2 * Copyright (c) 2007 Doug Rabson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
31 * Stand-alone ZFS file reader.
38 * List of all vdevs, chained through v_alllink.
40 static vdev_list_t zfs_vdevs;
43 * List of all pools, chained through spa_link.
45 static spa_list_t zfs_pools;
47 static uint64_t zfs_crc64_table[256];
48 static const dnode_phys_t *dnode_cache_obj = 0;
49 static uint64_t dnode_cache_bn;
50 static char *dnode_cache_buf;
51 static char *zap_scratch;
52 static char *zfs_temp_buf, *zfs_temp_end, *zfs_temp_ptr;
54 #define TEMP_SIZE (1024 * 1024)
56 static int zio_read(spa_t *spa, const blkptr_t *bp, void *buf);
61 STAILQ_INIT(&zfs_vdevs);
62 STAILQ_INIT(&zfs_pools);
64 zfs_temp_buf = malloc(TEMP_SIZE);
65 zfs_temp_end = zfs_temp_buf + TEMP_SIZE;
66 zfs_temp_ptr = zfs_temp_buf;
67 dnode_cache_buf = malloc(SPA_MAXBLOCKSIZE);
68 zap_scratch = malloc(SPA_MAXBLOCKSIZE);
74 zfs_alloc_temp(size_t sz)
78 if (zfs_temp_ptr + sz > zfs_temp_end) {
79 printf("ZFS: out of temporary buffer space\n");
92 zfs_temp_ptr = zfs_temp_buf;
96 xdr_int(const unsigned char **xdr, int *ip)
98 *ip = ((*xdr)[0] << 24)
107 xdr_u_int(const unsigned char **xdr, u_int *ip)
109 *ip = ((*xdr)[0] << 24)
118 xdr_uint64_t(const unsigned char **xdr, uint64_t *lp)
124 *lp = (((uint64_t) hi) << 32) | lo;
129 nvlist_find(const unsigned char *nvlist, const char *name, int type,
130 int* elementsp, void *valuep)
132 const unsigned char *p, *pair;
134 int encoded_size, decoded_size;
141 xdr_int(&p, &encoded_size);
142 xdr_int(&p, &decoded_size);
143 while (encoded_size && decoded_size) {
144 int namelen, pairtype, elements;
145 const char *pairname;
147 xdr_int(&p, &namelen);
148 pairname = (const char*) p;
149 p += roundup(namelen, 4);
150 xdr_int(&p, &pairtype);
152 if (!memcmp(name, pairname, namelen) && type == pairtype) {
153 xdr_int(&p, &elements);
155 *elementsp = elements;
156 if (type == DATA_TYPE_UINT64) {
157 xdr_uint64_t(&p, (uint64_t *) valuep);
159 } else if (type == DATA_TYPE_STRING) {
162 (*(const char**) valuep) = (const char*) p;
164 } else if (type == DATA_TYPE_NVLIST
165 || type == DATA_TYPE_NVLIST_ARRAY) {
166 (*(const unsigned char**) valuep) =
167 (const unsigned char*) p;
174 * Not the pair we are looking for, skip to the next one.
176 p = pair + encoded_size;
180 xdr_int(&p, &encoded_size);
181 xdr_int(&p, &decoded_size);
188 * Return the next nvlist in an nvlist array.
190 static const unsigned char *
191 nvlist_next(const unsigned char *nvlist)
193 const unsigned char *p, *pair;
195 int encoded_size, decoded_size;
202 xdr_int(&p, &encoded_size);
203 xdr_int(&p, &decoded_size);
204 while (encoded_size && decoded_size) {
205 p = pair + encoded_size;
208 xdr_int(&p, &encoded_size);
209 xdr_int(&p, &decoded_size);
217 static const unsigned char *
218 nvlist_print(const unsigned char *nvlist, unsigned int indent)
220 static const char* typenames[] = {
231 "DATA_TYPE_BYTE_ARRAY",
232 "DATA_TYPE_INT16_ARRAY",
233 "DATA_TYPE_UINT16_ARRAY",
234 "DATA_TYPE_INT32_ARRAY",
235 "DATA_TYPE_UINT32_ARRAY",
236 "DATA_TYPE_INT64_ARRAY",
237 "DATA_TYPE_UINT64_ARRAY",
238 "DATA_TYPE_STRING_ARRAY",
241 "DATA_TYPE_NVLIST_ARRAY",
242 "DATA_TYPE_BOOLEAN_VALUE",
245 "DATA_TYPE_BOOLEAN_ARRAY",
246 "DATA_TYPE_INT8_ARRAY",
247 "DATA_TYPE_UINT8_ARRAY"
251 const unsigned char *p, *pair;
253 int encoded_size, decoded_size;
260 xdr_int(&p, &encoded_size);
261 xdr_int(&p, &decoded_size);
262 while (encoded_size && decoded_size) {
263 int namelen, pairtype, elements;
264 const char *pairname;
266 xdr_int(&p, &namelen);
267 pairname = (const char*) p;
268 p += roundup(namelen, 4);
269 xdr_int(&p, &pairtype);
271 for (i = 0; i < indent; i++)
273 printf("%s %s", typenames[pairtype], pairname);
275 xdr_int(&p, &elements);
277 case DATA_TYPE_UINT64: {
279 xdr_uint64_t(&p, &val);
280 printf(" = 0x%llx\n", val);
284 case DATA_TYPE_STRING: {
287 printf(" = \"%s\"\n", p);
291 case DATA_TYPE_NVLIST:
293 nvlist_print(p, indent + 1);
296 case DATA_TYPE_NVLIST_ARRAY:
297 for (j = 0; j < elements; j++) {
299 p = nvlist_print(p, indent + 1);
300 if (j != elements - 1) {
301 for (i = 0; i < indent; i++)
303 printf("%s %s", typenames[pairtype], pairname);
312 p = pair + encoded_size;
315 xdr_int(&p, &encoded_size);
316 xdr_int(&p, &decoded_size);
325 vdev_read_phys(vdev_t *vdev, const blkptr_t *bp, void *buf,
326 off_t offset, size_t size)
331 if (!vdev->v_phys_read)
335 psize = BP_GET_PSIZE(bp);
340 /*printf("ZFS: reading %d bytes at 0x%llx to %p\n", psize, offset, buf);*/
341 rc = vdev->v_phys_read(vdev, vdev->v_read_priv, offset, buf, psize);
344 if (bp && zio_checksum_error(bp, buf))
351 vdev_disk_read(vdev_t *vdev, const blkptr_t *bp, void *buf,
352 off_t offset, size_t bytes)
355 return (vdev_read_phys(vdev, bp, buf,
356 offset + VDEV_LABEL_START_SIZE, bytes));
361 vdev_mirror_read(vdev_t *vdev, const blkptr_t *bp, void *buf,
362 off_t offset, size_t bytes)
368 STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) {
369 if (kid->v_state != VDEV_STATE_HEALTHY)
371 rc = kid->v_read(kid, bp, buf, offset, bytes);
380 vdev_find(uint64_t guid)
384 STAILQ_FOREACH(vdev, &zfs_vdevs, v_alllink)
385 if (vdev->v_guid == guid)
392 vdev_create(uint64_t guid, vdev_read_t *read)
396 vdev = malloc(sizeof(vdev_t));
397 memset(vdev, 0, sizeof(vdev_t));
398 STAILQ_INIT(&vdev->v_children);
400 vdev->v_state = VDEV_STATE_OFFLINE;
402 vdev->v_phys_read = 0;
403 vdev->v_read_priv = 0;
404 STAILQ_INSERT_TAIL(&zfs_vdevs, vdev, v_alllink);
410 vdev_init_from_nvlist(const unsigned char *nvlist, vdev_t **vdevp, int is_newer)
413 uint64_t guid, id, ashift, nparity;
417 const unsigned char *kids;
418 int nkids, i, is_new;
419 uint64_t is_offline, is_faulted, is_degraded, is_removed;
421 if (nvlist_find(nvlist, ZPOOL_CONFIG_GUID,
422 DATA_TYPE_UINT64, 0, &guid)
423 || nvlist_find(nvlist, ZPOOL_CONFIG_ID,
424 DATA_TYPE_UINT64, 0, &id)
425 || nvlist_find(nvlist, ZPOOL_CONFIG_TYPE,
426 DATA_TYPE_STRING, 0, &type)) {
427 printf("ZFS: can't find vdev details\n");
431 if (strcmp(type, VDEV_TYPE_MIRROR)
432 && strcmp(type, VDEV_TYPE_DISK)
433 && strcmp(type, VDEV_TYPE_RAIDZ)) {
434 printf("ZFS: can only boot from disk, mirror or raidz vdevs\n");
438 is_offline = is_removed = is_faulted = is_degraded = 0;
440 nvlist_find(nvlist, ZPOOL_CONFIG_OFFLINE, DATA_TYPE_UINT64, 0,
442 nvlist_find(nvlist, ZPOOL_CONFIG_REMOVED, DATA_TYPE_UINT64, 0,
444 nvlist_find(nvlist, ZPOOL_CONFIG_FAULTED, DATA_TYPE_UINT64, 0,
446 nvlist_find(nvlist, ZPOOL_CONFIG_DEGRADED, DATA_TYPE_UINT64, 0,
449 vdev = vdev_find(guid);
453 if (!strcmp(type, VDEV_TYPE_MIRROR))
454 vdev = vdev_create(guid, vdev_mirror_read);
455 else if (!strcmp(type, VDEV_TYPE_RAIDZ))
456 vdev = vdev_create(guid, vdev_raidz_read);
458 vdev = vdev_create(guid, vdev_disk_read);
461 if (nvlist_find(nvlist, ZPOOL_CONFIG_ASHIFT,
462 DATA_TYPE_UINT64, 0, &ashift) == 0)
463 vdev->v_ashift = ashift;
466 if (nvlist_find(nvlist, ZPOOL_CONFIG_NPARITY,
467 DATA_TYPE_UINT64, 0, &nparity) == 0)
468 vdev->v_nparity = nparity;
471 if (nvlist_find(nvlist, ZPOOL_CONFIG_PATH,
472 DATA_TYPE_STRING, 0, &path) == 0) {
473 if (strncmp(path, "/dev/", 5) == 0)
475 vdev->v_name = strdup(path);
477 if (!strcmp(type, "raidz")) {
478 if (vdev->v_nparity == 1)
479 vdev->v_name = "raidz1";
481 vdev->v_name = "raidz2";
483 vdev->v_name = strdup(type);
488 vdev->v_state = VDEV_STATE_OFFLINE;
490 vdev->v_state = VDEV_STATE_REMOVED;
492 vdev->v_state = VDEV_STATE_FAULTED;
493 else if (is_degraded)
494 vdev->v_state = VDEV_STATE_DEGRADED;
496 vdev->v_state = VDEV_STATE_HEALTHY;
502 * We've already seen this vdev, but from an older
503 * vdev label, so let's refresh its state from the
507 vdev->v_state = VDEV_STATE_OFFLINE;
509 vdev->v_state = VDEV_STATE_REMOVED;
511 vdev->v_state = VDEV_STATE_FAULTED;
512 else if (is_degraded)
513 vdev->v_state = VDEV_STATE_DEGRADED;
515 vdev->v_state = VDEV_STATE_HEALTHY;
519 rc = nvlist_find(nvlist, ZPOOL_CONFIG_CHILDREN,
520 DATA_TYPE_NVLIST_ARRAY, &nkids, &kids);
522 * Its ok if we don't have any kids.
525 vdev->v_nchildren = nkids;
526 for (i = 0; i < nkids; i++) {
527 rc = vdev_init_from_nvlist(kids, &kid, is_newer);
531 STAILQ_INSERT_TAIL(&vdev->v_children, kid,
533 kids = nvlist_next(kids);
536 vdev->v_nchildren = 0;
545 vdev_set_state(vdev_t *vdev)
552 * A mirror or raidz is healthy if all its kids are healthy. A
553 * mirror is degraded if any of its kids is healthy; a raidz
554 * is degraded if at most nparity kids are offline.
556 if (STAILQ_FIRST(&vdev->v_children)) {
559 STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) {
560 if (kid->v_state == VDEV_STATE_HEALTHY)
566 vdev->v_state = VDEV_STATE_HEALTHY;
568 if (vdev->v_read == vdev_mirror_read) {
570 vdev->v_state = VDEV_STATE_DEGRADED;
572 vdev->v_state = VDEV_STATE_OFFLINE;
574 } else if (vdev->v_read == vdev_raidz_read) {
575 if (bad_kids > vdev->v_nparity) {
576 vdev->v_state = VDEV_STATE_OFFLINE;
578 vdev->v_state = VDEV_STATE_DEGRADED;
586 spa_find_by_guid(uint64_t guid)
590 STAILQ_FOREACH(spa, &zfs_pools, spa_link)
591 if (spa->spa_guid == guid)
600 spa_find_by_name(const char *name)
604 STAILQ_FOREACH(spa, &zfs_pools, spa_link)
605 if (!strcmp(spa->spa_name, name))
614 spa_create(uint64_t guid)
618 spa = malloc(sizeof(spa_t));
619 memset(spa, 0, sizeof(spa_t));
620 STAILQ_INIT(&spa->spa_vdevs);
621 spa->spa_guid = guid;
622 STAILQ_INSERT_TAIL(&zfs_pools, spa, spa_link);
628 state_name(vdev_state_t state)
630 static const char* names[] = {
645 #define pager_printf printf
650 pager_printf(const char *fmt, ...)
656 vsprintf(line, fmt, args);
663 #define STATUS_FORMAT " %s %s\n"
666 print_state(int indent, const char *name, vdev_state_t state)
672 for (i = 0; i < indent; i++)
675 pager_printf(STATUS_FORMAT, buf, state_name(state));
680 vdev_status(vdev_t *vdev, int indent)
683 print_state(indent, vdev->v_name, vdev->v_state);
685 STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) {
686 vdev_status(kid, indent + 1);
691 spa_status(spa_t *spa)
694 int good_kids, bad_kids, degraded_kids;
697 pager_printf(" pool: %s\n", spa->spa_name);
698 pager_printf("config:\n\n");
699 pager_printf(STATUS_FORMAT, "NAME", "STATE");
704 STAILQ_FOREACH(vdev, &spa->spa_vdevs, v_childlink) {
705 if (vdev->v_state == VDEV_STATE_HEALTHY)
707 else if (vdev->v_state == VDEV_STATE_DEGRADED)
713 state = VDEV_STATE_CLOSED;
714 if (good_kids > 0 && (degraded_kids + bad_kids) == 0)
715 state = VDEV_STATE_HEALTHY;
716 else if ((good_kids + degraded_kids) > 0)
717 state = VDEV_STATE_DEGRADED;
719 print_state(0, spa->spa_name, state);
720 STAILQ_FOREACH(vdev, &spa->spa_vdevs, v_childlink) {
721 vdev_status(vdev, 1);
731 STAILQ_FOREACH(spa, &zfs_pools, spa_link) {
740 vdev_probe(vdev_phys_read_t *read, void *read_priv, spa_t **spap)
743 vdev_phys_t *vdev_label = (vdev_phys_t *) zap_scratch;
745 vdev_t *vdev, *top_vdev, *pool_vdev;
748 const unsigned char *nvlist;
751 uint64_t pool_txg, pool_guid;
752 const char *pool_name;
753 const unsigned char *vdevs;
756 const struct uberblock *up;
759 * Load the vdev label and figure out which
760 * uberblock is most current.
762 memset(&vtmp, 0, sizeof(vtmp));
763 vtmp.v_phys_read = read;
764 vtmp.v_read_priv = read_priv;
765 off = offsetof(vdev_label_t, vl_vdev_phys);
767 BP_SET_LSIZE(&bp, sizeof(vdev_phys_t));
768 BP_SET_PSIZE(&bp, sizeof(vdev_phys_t));
769 BP_SET_CHECKSUM(&bp, ZIO_CHECKSUM_LABEL);
770 BP_SET_COMPRESS(&bp, ZIO_COMPRESS_OFF);
771 ZIO_SET_CHECKSUM(&bp.blk_cksum, off, 0, 0, 0);
772 if (vdev_read_phys(&vtmp, &bp, vdev_label, off, 0))
775 if (vdev_label->vp_nvlist[0] != NV_ENCODE_XDR) {
779 nvlist = (const unsigned char *) vdev_label->vp_nvlist + 4;
781 if (nvlist_find(nvlist,
782 ZPOOL_CONFIG_VERSION,
783 DATA_TYPE_UINT64, 0, &val)) {
787 if (val > SPA_VERSION) {
788 printf("ZFS: unsupported ZFS version %u (should be %u)\n",
789 (unsigned) val, (unsigned) SPA_VERSION);
793 if (nvlist_find(nvlist,
794 ZPOOL_CONFIG_POOL_STATE,
795 DATA_TYPE_UINT64, 0, &val)) {
800 if (val != POOL_STATE_ACTIVE) {
802 * Don't print a message here. If we happen to reboot
803 * while where is an exported pool around, we don't
804 * need a cascade of confusing messages during boot.
806 /*printf("ZFS: pool is not active\n");*/
811 if (nvlist_find(nvlist,
812 ZPOOL_CONFIG_POOL_TXG,
813 DATA_TYPE_UINT64, 0, &pool_txg)
814 || nvlist_find(nvlist,
815 ZPOOL_CONFIG_POOL_GUID,
816 DATA_TYPE_UINT64, 0, &pool_guid)
817 || nvlist_find(nvlist,
818 ZPOOL_CONFIG_POOL_NAME,
819 DATA_TYPE_STRING, 0, &pool_name)) {
821 * Cache and spare devices end up here - just ignore
824 /*printf("ZFS: can't find pool details\n");*/
829 * Create the pool if this is the first time we've seen it.
831 spa = spa_find_by_guid(pool_guid);
833 spa = spa_create(pool_guid);
834 spa->spa_name = strdup(pool_name);
836 if (pool_txg > spa->spa_txg) {
837 spa->spa_txg = pool_txg;
843 * Get the vdev tree and create our in-core copy of it.
844 * If we already have a vdev with this guid, this must
845 * be some kind of alias (overlapping slices, dangerously dedicated
848 if (nvlist_find(nvlist,
850 DATA_TYPE_UINT64, 0, &guid)) {
853 vdev = vdev_find(guid);
854 if (vdev && vdev->v_phys_read) /* Has this vdev already been inited? */
857 if (nvlist_find(nvlist,
858 ZPOOL_CONFIG_VDEV_TREE,
859 DATA_TYPE_NVLIST, 0, &vdevs)) {
863 rc = vdev_init_from_nvlist(vdevs, &top_vdev, is_newer);
868 * Add the toplevel vdev to the pool if its not already there.
870 STAILQ_FOREACH(pool_vdev, &spa->spa_vdevs, v_childlink)
871 if (top_vdev == pool_vdev)
873 if (!pool_vdev && top_vdev)
874 STAILQ_INSERT_TAIL(&spa->spa_vdevs, top_vdev, v_childlink);
877 * We should already have created an incomplete vdev for this
878 * vdev. Find it and initialise it with our read proc.
880 vdev = vdev_find(guid);
882 vdev->v_phys_read = read;
883 vdev->v_read_priv = read_priv;
885 printf("ZFS: inconsistent nvlist contents\n");
890 * Re-evaluate top-level vdev state.
892 vdev_set_state(top_vdev);
895 * Ok, we are happy with the pool so far. Lets find
896 * the best uberblock and then we can actually access
897 * the contents of the pool.
900 i < VDEV_UBERBLOCK_RING >> UBERBLOCK_SHIFT;
902 off = offsetof(vdev_label_t, vl_uberblock);
903 off += i << UBERBLOCK_SHIFT;
905 DVA_SET_OFFSET(&bp.blk_dva[0], off);
906 BP_SET_LSIZE(&bp, 1 << UBERBLOCK_SHIFT);
907 BP_SET_PSIZE(&bp, 1 << UBERBLOCK_SHIFT);
908 BP_SET_CHECKSUM(&bp, ZIO_CHECKSUM_LABEL);
909 BP_SET_COMPRESS(&bp, ZIO_COMPRESS_OFF);
910 ZIO_SET_CHECKSUM(&bp.blk_cksum, off, 0, 0, 0);
911 if (vdev_read_phys(vdev, &bp, upbuf, off, 0))
914 up = (const struct uberblock *) upbuf;
915 if (up->ub_magic != UBERBLOCK_MAGIC)
917 if (up->ub_txg < spa->spa_txg)
919 if (up->ub_txg > spa->spa_uberblock.ub_txg) {
920 spa->spa_uberblock = *up;
921 } else if (up->ub_txg == spa->spa_uberblock.ub_txg) {
922 if (up->ub_timestamp > spa->spa_uberblock.ub_timestamp)
923 spa->spa_uberblock = *up;
937 for (v = 0; v < 32; v++)
944 zio_read_gang(spa_t *spa, const blkptr_t *bp, const dva_t *dva, void *buf)
946 zio_gbh_phys_t zio_gb;
952 vdevid = DVA_GET_VDEV(dva);
953 offset = DVA_GET_OFFSET(dva);
954 STAILQ_FOREACH(vdev, &spa->spa_vdevs, v_childlink)
955 if (vdev->v_id == vdevid)
957 if (!vdev || !vdev->v_read)
959 if (vdev->v_read(vdev, NULL, &zio_gb, offset, SPA_GANGBLOCKSIZE))
962 for (i = 0; i < SPA_GBH_NBLKPTRS; i++) {
963 blkptr_t *gbp = &zio_gb.zg_blkptr[i];
967 if (zio_read(spa, gbp, buf))
969 buf = (char*)buf + BP_GET_PSIZE(gbp);
976 zio_read(spa_t *spa, const blkptr_t *bp, void *buf)
978 int cpfunc = BP_GET_COMPRESS(bp);
979 size_t lsize = BP_GET_LSIZE(bp);
980 size_t psize = BP_GET_PSIZE(bp);
985 if (cpfunc != ZIO_COMPRESS_OFF)
986 pbuf = zfs_alloc_temp(psize);
990 for (i = 0; i < SPA_DVAS_PER_BP; i++) {
991 const dva_t *dva = &bp->blk_dva[i];
996 if (!dva->dva_word[0] && !dva->dva_word[1])
999 if (DVA_GET_GANG(dva)) {
1000 if (zio_read_gang(spa, bp, dva, buf))
1003 vdevid = DVA_GET_VDEV(dva);
1004 offset = DVA_GET_OFFSET(dva);
1005 STAILQ_FOREACH(vdev, &spa->spa_vdevs, v_childlink)
1006 if (vdev->v_id == vdevid)
1008 if (!vdev || !vdev->v_read) {
1011 if (vdev->v_read(vdev, bp, pbuf, offset, psize))
1014 if (cpfunc != ZIO_COMPRESS_OFF) {
1015 if (zio_decompress_data(cpfunc, pbuf, psize,
1023 printf("ZFS: i/o error - all block copies unavailable\n");
1029 dnode_read(spa_t *spa, const dnode_phys_t *dnode, off_t offset, void *buf, size_t buflen)
1031 int ibshift = dnode->dn_indblkshift - SPA_BLKPTRSHIFT;
1032 int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
1033 int nlevels = dnode->dn_nlevels;
1037 * Note: bsize may not be a power of two here so we need to do an
1038 * actual divide rather than a bitshift.
1040 while (buflen > 0) {
1041 uint64_t bn = offset / bsize;
1042 int boff = offset % bsize;
1044 const blkptr_t *indbp;
1047 if (bn > dnode->dn_maxblkid)
1050 if (dnode == dnode_cache_obj && bn == dnode_cache_bn)
1053 indbp = dnode->dn_blkptr;
1054 for (i = 0; i < nlevels; i++) {
1056 * Copy the bp from the indirect array so that
1057 * we can re-use the scratch buffer for multi-level
1060 ibn = bn >> ((nlevels - i - 1) * ibshift);
1061 ibn &= ((1 << ibshift) - 1);
1063 rc = zio_read(spa, &bp, dnode_cache_buf);
1066 indbp = (const blkptr_t *) dnode_cache_buf;
1068 dnode_cache_obj = dnode;
1069 dnode_cache_bn = bn;
1073 * The buffer contains our data block. Copy what we
1074 * need from it and loop.
1077 if (i > buflen) i = buflen;
1078 memcpy(buf, &dnode_cache_buf[boff], i);
1079 buf = ((char*) buf) + i;
1088 * Lookup a value in a microzap directory. Assumes that the zap
1089 * scratch buffer contains the directory contents.
1092 mzap_lookup(spa_t *spa, const dnode_phys_t *dnode, const char *name, uint64_t *value)
1094 const mzap_phys_t *mz;
1095 const mzap_ent_phys_t *mze;
1100 * Microzap objects use exactly one block. Read the whole
1103 size = dnode->dn_datablkszsec * 512;
1105 mz = (const mzap_phys_t *) zap_scratch;
1106 chunks = size / MZAP_ENT_LEN - 1;
1108 for (i = 0; i < chunks; i++) {
1109 mze = &mz->mz_chunk[i];
1110 if (!strcmp(mze->mze_name, name)) {
1111 *value = mze->mze_value;
1120 * Compare a name with a zap leaf entry. Return non-zero if the name
1124 fzap_name_equal(const zap_leaf_t *zl, const zap_leaf_chunk_t *zc, const char *name)
1127 const zap_leaf_chunk_t *nc;
1130 namelen = zc->l_entry.le_name_length;
1132 nc = &ZAP_LEAF_CHUNK(zl, zc->l_entry.le_name_chunk);
1134 while (namelen > 0) {
1137 if (len > ZAP_LEAF_ARRAY_BYTES)
1138 len = ZAP_LEAF_ARRAY_BYTES;
1139 if (memcmp(p, nc->l_array.la_array, len))
1143 nc = &ZAP_LEAF_CHUNK(zl, nc->l_array.la_next);
1150 * Extract a uint64_t value from a zap leaf entry.
1153 fzap_leaf_value(const zap_leaf_t *zl, const zap_leaf_chunk_t *zc)
1155 const zap_leaf_chunk_t *vc;
1160 vc = &ZAP_LEAF_CHUNK(zl, zc->l_entry.le_value_chunk);
1161 for (i = 0, value = 0, p = vc->l_array.la_array; i < 8; i++) {
1162 value = (value << 8) | p[i];
1169 * Lookup a value in a fatzap directory. Assumes that the zap scratch
1170 * buffer contains the directory header.
1173 fzap_lookup(spa_t *spa, const dnode_phys_t *dnode, const char *name, uint64_t *value)
1175 int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
1176 zap_phys_t zh = *(zap_phys_t *) zap_scratch;
1182 if (zh.zap_magic != ZAP_MAGIC)
1185 z.zap_block_shift = ilog2(bsize);
1186 z.zap_phys = (zap_phys_t *) zap_scratch;
1189 * Figure out where the pointer table is and read it in if necessary.
1191 if (zh.zap_ptrtbl.zt_blk) {
1192 rc = dnode_read(spa, dnode, zh.zap_ptrtbl.zt_blk * bsize,
1193 zap_scratch, bsize);
1196 ptrtbl = (uint64_t *) zap_scratch;
1198 ptrtbl = &ZAP_EMBEDDED_PTRTBL_ENT(&z, 0);
1201 hash = zap_hash(zh.zap_salt, name);
1204 zl.l_bs = z.zap_block_shift;
1206 off_t off = ptrtbl[hash >> (64 - zh.zap_ptrtbl.zt_shift)] << zl.l_bs;
1207 zap_leaf_chunk_t *zc;
1209 rc = dnode_read(spa, dnode, off, zap_scratch, bsize);
1213 zl.l_phys = (zap_leaf_phys_t *) zap_scratch;
1216 * Make sure this chunk matches our hash.
1218 if (zl.l_phys->l_hdr.lh_prefix_len > 0
1219 && zl.l_phys->l_hdr.lh_prefix
1220 != hash >> (64 - zl.l_phys->l_hdr.lh_prefix_len))
1224 * Hash within the chunk to find our entry.
1226 int shift = (64 - ZAP_LEAF_HASH_SHIFT(&zl) - zl.l_phys->l_hdr.lh_prefix_len);
1227 int h = (hash >> shift) & ((1 << ZAP_LEAF_HASH_SHIFT(&zl)) - 1);
1228 h = zl.l_phys->l_hash[h];
1231 zc = &ZAP_LEAF_CHUNK(&zl, h);
1232 while (zc->l_entry.le_hash != hash) {
1233 if (zc->l_entry.le_next == 0xffff) {
1237 zc = &ZAP_LEAF_CHUNK(&zl, zc->l_entry.le_next);
1239 if (fzap_name_equal(&zl, zc, name)) {
1240 *value = fzap_leaf_value(&zl, zc);
1248 * Lookup a name in a zap object and return its value as a uint64_t.
1251 zap_lookup(spa_t *spa, const dnode_phys_t *dnode, const char *name, uint64_t *value)
1255 size_t size = dnode->dn_datablkszsec * 512;
1257 rc = dnode_read(spa, dnode, 0, zap_scratch, size);
1261 zap_type = *(uint64_t *) zap_scratch;
1262 if (zap_type == ZBT_MICRO)
1263 return mzap_lookup(spa, dnode, name, value);
1265 return fzap_lookup(spa, dnode, name, value);
1271 * List a microzap directory. Assumes that the zap scratch buffer contains
1272 * the directory contents.
1275 mzap_list(spa_t *spa, const dnode_phys_t *dnode)
1277 const mzap_phys_t *mz;
1278 const mzap_ent_phys_t *mze;
1283 * Microzap objects use exactly one block. Read the whole
1286 size = dnode->dn_datablkszsec * 512;
1287 mz = (const mzap_phys_t *) zap_scratch;
1288 chunks = size / MZAP_ENT_LEN - 1;
1290 for (i = 0; i < chunks; i++) {
1291 mze = &mz->mz_chunk[i];
1292 if (mze->mze_name[0])
1293 //printf("%-32s 0x%llx\n", mze->mze_name, mze->mze_value);
1294 printf("%s\n", mze->mze_name);
1301 * List a fatzap directory. Assumes that the zap scratch buffer contains
1302 * the directory header.
1305 fzap_list(spa_t *spa, const dnode_phys_t *dnode)
1307 int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
1308 zap_phys_t zh = *(zap_phys_t *) zap_scratch;
1312 if (zh.zap_magic != ZAP_MAGIC)
1315 z.zap_block_shift = ilog2(bsize);
1316 z.zap_phys = (zap_phys_t *) zap_scratch;
1319 * This assumes that the leaf blocks start at block 1. The
1320 * documentation isn't exactly clear on this.
1323 zl.l_bs = z.zap_block_shift;
1324 for (i = 0; i < zh.zap_num_leafs; i++) {
1325 off_t off = (i + 1) << zl.l_bs;
1329 if (dnode_read(spa, dnode, off, zap_scratch, bsize))
1332 zl.l_phys = (zap_leaf_phys_t *) zap_scratch;
1334 for (j = 0; j < ZAP_LEAF_NUMCHUNKS(&zl); j++) {
1335 zap_leaf_chunk_t *zc, *nc;
1338 zc = &ZAP_LEAF_CHUNK(&zl, j);
1339 if (zc->l_entry.le_type != ZAP_CHUNK_ENTRY)
1341 namelen = zc->l_entry.le_name_length;
1342 if (namelen > sizeof(name))
1343 namelen = sizeof(name);
1346 * Paste the name back together.
1348 nc = &ZAP_LEAF_CHUNK(&zl, zc->l_entry.le_name_chunk);
1350 while (namelen > 0) {
1353 if (len > ZAP_LEAF_ARRAY_BYTES)
1354 len = ZAP_LEAF_ARRAY_BYTES;
1355 memcpy(p, nc->l_array.la_array, len);
1358 nc = &ZAP_LEAF_CHUNK(&zl, nc->l_array.la_next);
1362 * Assume the first eight bytes of the value are
1365 value = fzap_leaf_value(&zl, zc);
1367 printf("%s 0x%llx\n", name, value);
1375 * List a zap directory.
1378 zap_list(spa_t *spa, const dnode_phys_t *dnode)
1381 size_t size = dnode->dn_datablkszsec * 512;
1383 if (dnode_read(spa, dnode, 0, zap_scratch, size))
1386 zap_type = *(uint64_t *) zap_scratch;
1387 if (zap_type == ZBT_MICRO)
1388 return mzap_list(spa, dnode);
1390 return fzap_list(spa, dnode);
1396 objset_get_dnode(spa_t *spa, const objset_phys_t *os, uint64_t objnum, dnode_phys_t *dnode)
1400 offset = objnum * sizeof(dnode_phys_t);
1401 return dnode_read(spa, &os->os_meta_dnode, offset,
1402 dnode, sizeof(dnode_phys_t));
1406 * Find the object set given the object number of its dataset object
1407 * and return its details in *objset
1410 zfs_mount_dataset(spa_t *spa, uint64_t objnum, objset_phys_t *objset)
1412 dnode_phys_t dataset;
1413 dsl_dataset_phys_t *ds;
1415 if (objset_get_dnode(spa, &spa->spa_mos, objnum, &dataset)) {
1416 printf("ZFS: can't find dataset %llu\n", objnum);
1420 ds = (dsl_dataset_phys_t *) &dataset.dn_bonus;
1421 if (zio_read(spa, &ds->ds_bp, objset)) {
1422 printf("ZFS: can't read object set for dataset %llu\n", objnum);
1430 * Find the object set pointed to by the BOOTFS property or the root
1431 * dataset if there is none and return its details in *objset
1434 zfs_mount_root(spa_t *spa, objset_phys_t *objset)
1436 dnode_phys_t dir, propdir;
1437 uint64_t props, bootfs, root;
1440 * Start with the MOS directory object.
1442 if (objset_get_dnode(spa, &spa->spa_mos, DMU_POOL_DIRECTORY_OBJECT, &dir)) {
1443 printf("ZFS: can't read MOS object directory\n");
1448 * Lookup the pool_props and see if we can find a bootfs.
1450 if (zap_lookup(spa, &dir, DMU_POOL_PROPS, &props) == 0
1451 && objset_get_dnode(spa, &spa->spa_mos, props, &propdir) == 0
1452 && zap_lookup(spa, &propdir, "bootfs", &bootfs) == 0
1454 return zfs_mount_dataset(spa, bootfs, objset);
1457 * Lookup the root dataset directory
1459 if (zap_lookup(spa, &dir, DMU_POOL_ROOT_DATASET, &root)
1460 || objset_get_dnode(spa, &spa->spa_mos, root, &dir)) {
1461 printf("ZFS: can't find root dsl_dir\n");
1466 * Use the information from the dataset directory's bonus buffer
1467 * to find the dataset object and from that the object set itself.
1469 dsl_dir_phys_t *dd = (dsl_dir_phys_t *) &dir.dn_bonus;
1470 return zfs_mount_dataset(spa, dd->dd_head_dataset_obj, objset);
1474 zfs_mount_pool(spa_t *spa)
1477 * Find the MOS and work our way in from there.
1479 if (zio_read(spa, &spa->spa_uberblock.ub_rootbp, &spa->spa_mos)) {
1480 printf("ZFS: can't read MOS\n");
1485 * Find the root object set
1487 if (zfs_mount_root(spa, &spa->spa_root_objset)) {
1488 printf("Can't find root filesystem - giving up\n");
1496 * Lookup a file and return its dnode.
1499 zfs_lookup(spa_t *spa, const char *upath, dnode_phys_t *dnode)
1502 uint64_t objnum, rootnum, parentnum;
1504 const znode_phys_t *zp = (const znode_phys_t *) dn.dn_bonus;
1508 int symlinks_followed = 0;
1510 if (spa->spa_root_objset.os_type != DMU_OST_ZFS) {
1511 printf("ZFS: unexpected object set type %llu\n",
1512 spa->spa_root_objset.os_type);
1517 * Get the root directory dnode.
1519 rc = objset_get_dnode(spa, &spa->spa_root_objset, MASTER_NODE_OBJ, &dn);
1523 rc = zap_lookup(spa, &dn, ZFS_ROOT_OBJ, &rootnum);
1527 rc = objset_get_dnode(spa, &spa->spa_root_objset, rootnum, &dn);
1540 memcpy(element, p, q - p);
1548 if ((zp->zp_mode >> 12) != 0x4) {
1553 rc = zap_lookup(spa, &dn, element, &objnum);
1556 objnum = ZFS_DIRENT_OBJ(objnum);
1558 rc = objset_get_dnode(spa, &spa->spa_root_objset, objnum, &dn);
1563 * Check for symlink.
1565 if ((zp->zp_mode >> 12) == 0xa) {
1566 if (symlinks_followed > 10)
1568 symlinks_followed++;
1571 * Read the link value and copy the tail of our
1572 * current path onto the end.
1575 strcpy(&path[zp->zp_size], p);
1577 path[zp->zp_size] = 0;
1578 if (zp->zp_size + sizeof(znode_phys_t) <= dn.dn_bonuslen) {
1579 memcpy(path, &dn.dn_bonus[sizeof(znode_phys_t)],
1582 rc = dnode_read(spa, &dn, 0, path, zp->zp_size);
1588 * Restart with the new path, starting either at
1589 * the root or at the parent depending whether or
1590 * not the link is relative.
1597 objset_get_dnode(spa, &spa->spa_root_objset, objnum, &dn);