4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2016 by Delphix. All rights reserved.
24 * Copyright (c) 2015 Chunwei Chen. All rights reserved.
27 #include <sys/zfs_context.h>
28 #include <sys/dmu_objset.h>
29 #include <sys/dmu_traverse.h>
30 #include <sys/dsl_dataset.h>
31 #include <sys/dsl_dir.h>
32 #include <sys/dsl_pool.h>
33 #include <sys/dnode.h>
36 #include <sys/dmu_impl.h>
38 #include <sys/sa_impl.h>
39 #include <sys/callb.h>
40 #include <sys/zfeature.h>
42 int32_t zfs_pd_bytes_max = 50 * 1024 * 1024; /* 50MB */
43 boolean_t send_holes_without_birth_time = B_TRUE;
46 SYSCTL_DECL(_vfs_zfs);
47 SYSCTL_UINT(_vfs_zfs, OID_AUTO, send_holes_without_birth_time, CTLFLAG_RWTUN,
48 &send_holes_without_birth_time, 0, "Send holes without birth time");
51 typedef struct prefetch_data {
54 int32_t pd_bytes_fetched;
58 zbookmark_phys_t pd_resume;
61 typedef struct traverse_data {
66 zbookmark_phys_t *td_resume;
68 prefetch_data_t *td_pfd;
70 uint64_t td_hole_birth_enabled_txg;
73 boolean_t td_realloc_possible;
76 static int traverse_dnode(traverse_data_t *td, const dnode_phys_t *dnp,
77 uint64_t objset, uint64_t object);
78 static void prefetch_dnode_metadata(traverse_data_t *td, const dnode_phys_t *,
79 uint64_t objset, uint64_t object);
82 traverse_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
84 traverse_data_t *td = arg;
90 if (claim_txg == 0 && bp->blk_birth >= spa_first_txg(td->td_spa))
93 SET_BOOKMARK(&zb, td->td_objset, ZB_ZIL_OBJECT, ZB_ZIL_LEVEL,
94 bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
96 (void) td->td_func(td->td_spa, zilog, bp, &zb, NULL, td->td_arg);
102 traverse_zil_record(zilog_t *zilog, lr_t *lrc, void *arg, uint64_t claim_txg)
104 traverse_data_t *td = arg;
106 if (lrc->lrc_txtype == TX_WRITE) {
107 lr_write_t *lr = (lr_write_t *)lrc;
108 blkptr_t *bp = &lr->lr_blkptr;
114 if (claim_txg == 0 || bp->blk_birth < claim_txg)
117 SET_BOOKMARK(&zb, td->td_objset, lr->lr_foid,
118 ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp));
120 (void) td->td_func(td->td_spa, zilog, bp, &zb, NULL,
127 traverse_zil(traverse_data_t *td, zil_header_t *zh)
129 uint64_t claim_txg = zh->zh_claim_txg;
133 * We only want to visit blocks that have been claimed but not yet
134 * replayed; plus, in read-only mode, blocks that are already stable.
136 if (claim_txg == 0 && spa_writeable(td->td_spa))
139 zilog = zil_alloc(spa_get_dsl(td->td_spa)->dp_meta_objset, zh);
141 (void) zil_parse(zilog, traverse_zil_block, traverse_zil_record, td,
147 typedef enum resume_skip {
154 * Returns RESUME_SKIP_ALL if td indicates that we are resuming a traversal and
155 * the block indicated by zb does not need to be visited at all. Returns
156 * RESUME_SKIP_CHILDREN if we are resuming a post traversal and we reach the
157 * resume point. This indicates that this block should be visited but not its
158 * children (since they must have been visited in a previous traversal).
159 * Otherwise returns RESUME_SKIP_NONE.
162 resume_skip_check(traverse_data_t *td, const dnode_phys_t *dnp,
163 const zbookmark_phys_t *zb)
165 if (td->td_resume != NULL && !ZB_IS_ZERO(td->td_resume)) {
167 * If we already visited this bp & everything below,
168 * don't bother doing it again.
170 if (zbookmark_subtree_completed(dnp, zb, td->td_resume))
171 return (RESUME_SKIP_ALL);
174 * If we found the block we're trying to resume from, zero
175 * the bookmark out to indicate that we have resumed.
177 if (bcmp(zb, td->td_resume, sizeof (*zb)) == 0) {
178 bzero(td->td_resume, sizeof (*zb));
179 if (td->td_flags & TRAVERSE_POST)
180 return (RESUME_SKIP_CHILDREN);
183 return (RESUME_SKIP_NONE);
187 traverse_prefetch_metadata(traverse_data_t *td,
188 const blkptr_t *bp, const zbookmark_phys_t *zb)
190 arc_flags_t flags = ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH;
192 if (!(td->td_flags & TRAVERSE_PREFETCH_METADATA))
195 * If we are in the process of resuming, don't prefetch, because
196 * some children will not be needed (and in fact may have already
199 if (td->td_resume != NULL && !ZB_IS_ZERO(td->td_resume))
201 if (BP_IS_HOLE(bp) || bp->blk_birth <= td->td_min_txg)
203 if (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE)
206 (void) arc_read(NULL, td->td_spa, bp, NULL, NULL,
207 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
211 prefetch_needed(prefetch_data_t *pfd, const blkptr_t *bp)
213 ASSERT(pfd->pd_flags & TRAVERSE_PREFETCH_DATA);
214 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp) ||
215 BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG)
221 traverse_visitbp(traverse_data_t *td, const dnode_phys_t *dnp,
222 const blkptr_t *bp, const zbookmark_phys_t *zb)
224 zbookmark_phys_t czb;
226 arc_buf_t *buf = NULL;
227 prefetch_data_t *pd = td->td_pfd;
228 boolean_t hard = td->td_flags & TRAVERSE_HARD;
230 switch (resume_skip_check(td, dnp, zb)) {
231 case RESUME_SKIP_ALL:
233 case RESUME_SKIP_CHILDREN:
235 case RESUME_SKIP_NONE:
241 if (bp->blk_birth == 0) {
243 * Since this block has a birth time of 0 it must be one of
244 * two things: a hole created before the
245 * SPA_FEATURE_HOLE_BIRTH feature was enabled, or a hole
246 * which has always been a hole in an object.
248 * If a file is written sparsely, then the unwritten parts of
249 * the file were "always holes" -- that is, they have been
250 * holes since this object was allocated. However, we (and
251 * our callers) can not necessarily tell when an object was
252 * allocated. Therefore, if it's possible that this object
253 * was freed and then its object number reused, we need to
254 * visit all the holes with birth==0.
256 * If it isn't possible that the object number was reused,
257 * then if SPA_FEATURE_HOLE_BIRTH was enabled before we wrote
258 * all the blocks we will visit as part of this traversal,
259 * then this hole must have always existed, so we can skip
260 * it. We visit blocks born after (exclusive) td_min_txg.
262 * Note that the meta-dnode cannot be reallocated.
264 if (!send_holes_without_birth_time &&
265 (!td->td_realloc_possible ||
266 zb->zb_object == DMU_META_DNODE_OBJECT) &&
267 td->td_hole_birth_enabled_txg <= td->td_min_txg)
269 } else if (bp->blk_birth <= td->td_min_txg) {
273 if (pd != NULL && !pd->pd_exited && prefetch_needed(pd, bp)) {
274 uint64_t size = BP_GET_LSIZE(bp);
275 mutex_enter(&pd->pd_mtx);
276 ASSERT(pd->pd_bytes_fetched >= 0);
277 while (pd->pd_bytes_fetched < size && !pd->pd_exited)
278 cv_wait(&pd->pd_cv, &pd->pd_mtx);
279 pd->pd_bytes_fetched -= size;
280 cv_broadcast(&pd->pd_cv);
281 mutex_exit(&pd->pd_mtx);
284 if (BP_IS_HOLE(bp)) {
285 err = td->td_func(td->td_spa, NULL, bp, zb, dnp, td->td_arg);
291 if (td->td_flags & TRAVERSE_PRE) {
292 err = td->td_func(td->td_spa, NULL, bp, zb, dnp,
294 if (err == TRAVERSE_VISIT_NO_CHILDREN)
300 if (BP_GET_LEVEL(bp) > 0) {
301 arc_flags_t flags = ARC_FLAG_WAIT;
304 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
306 err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf,
307 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
312 for (i = 0; i < epb; i++) {
313 SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
315 zb->zb_blkid * epb + i);
316 traverse_prefetch_metadata(td, &cbp[i], &czb);
319 /* recursively visitbp() blocks below this */
320 for (i = 0; i < epb; i++) {
321 SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
323 zb->zb_blkid * epb + i);
324 err = traverse_visitbp(td, dnp, &cbp[i], &czb);
328 } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
329 arc_flags_t flags = ARC_FLAG_WAIT;
331 int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
333 err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf,
334 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
337 dnode_phys_t *child_dnp = buf->b_data;
339 for (i = 0; i < epb; i++) {
340 prefetch_dnode_metadata(td, &child_dnp[i],
341 zb->zb_objset, zb->zb_blkid * epb + i);
344 /* recursively visitbp() blocks below this */
345 for (i = 0; i < epb; i++) {
346 err = traverse_dnode(td, &child_dnp[i],
347 zb->zb_objset, zb->zb_blkid * epb + i);
351 } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
352 arc_flags_t flags = ARC_FLAG_WAIT;
354 err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf,
355 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
359 objset_phys_t *osp = buf->b_data;
360 prefetch_dnode_metadata(td, &osp->os_meta_dnode, zb->zb_objset,
361 DMU_META_DNODE_OBJECT);
363 * See the block comment above for the goal of this variable.
364 * If the maxblkid of the meta-dnode is 0, then we know that
365 * we've never had more than DNODES_PER_BLOCK objects in the
366 * dataset, which means we can't have reused any object ids.
368 if (osp->os_meta_dnode.dn_maxblkid == 0)
369 td->td_realloc_possible = B_FALSE;
371 if (arc_buf_size(buf) >= sizeof (objset_phys_t)) {
372 prefetch_dnode_metadata(td, &osp->os_groupused_dnode,
373 zb->zb_objset, DMU_GROUPUSED_OBJECT);
374 prefetch_dnode_metadata(td, &osp->os_userused_dnode,
375 zb->zb_objset, DMU_USERUSED_OBJECT);
378 err = traverse_dnode(td, &osp->os_meta_dnode, zb->zb_objset,
379 DMU_META_DNODE_OBJECT);
380 if (err == 0 && arc_buf_size(buf) >= sizeof (objset_phys_t)) {
381 err = traverse_dnode(td, &osp->os_groupused_dnode,
382 zb->zb_objset, DMU_GROUPUSED_OBJECT);
384 if (err == 0 && arc_buf_size(buf) >= sizeof (objset_phys_t)) {
385 err = traverse_dnode(td, &osp->os_userused_dnode,
386 zb->zb_objset, DMU_USERUSED_OBJECT);
391 arc_buf_destroy(buf, &buf);
394 if (err == 0 && (td->td_flags & TRAVERSE_POST))
395 err = td->td_func(td->td_spa, NULL, bp, zb, dnp, td->td_arg);
397 if (hard && (err == EIO || err == ECKSUM)) {
399 * Ignore this disk error as requested by the HARD flag,
400 * and continue traversal.
406 * If we are stopping here, set td_resume.
408 if (td->td_resume != NULL && err != 0 && !td->td_paused) {
409 td->td_resume->zb_objset = zb->zb_objset;
410 td->td_resume->zb_object = zb->zb_object;
411 td->td_resume->zb_level = 0;
413 * If we have stopped on an indirect block (e.g. due to
414 * i/o error), we have not visited anything below it.
415 * Set the bookmark to the first level-0 block that we need
416 * to visit. This way, the resuming code does not need to
417 * deal with resuming from indirect blocks.
419 * Note, if zb_level <= 0, dnp may be NULL, so we don't want
422 td->td_resume->zb_blkid = zb->zb_blkid;
423 if (zb->zb_level > 0) {
424 td->td_resume->zb_blkid <<= zb->zb_level *
425 (dnp->dn_indblkshift - SPA_BLKPTRSHIFT);
427 td->td_paused = B_TRUE;
434 prefetch_dnode_metadata(traverse_data_t *td, const dnode_phys_t *dnp,
435 uint64_t objset, uint64_t object)
438 zbookmark_phys_t czb;
440 for (j = 0; j < dnp->dn_nblkptr; j++) {
441 SET_BOOKMARK(&czb, objset, object, dnp->dn_nlevels - 1, j);
442 traverse_prefetch_metadata(td, &dnp->dn_blkptr[j], &czb);
445 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
446 SET_BOOKMARK(&czb, objset, object, 0, DMU_SPILL_BLKID);
447 traverse_prefetch_metadata(td, &dnp->dn_spill, &czb);
452 traverse_dnode(traverse_data_t *td, const dnode_phys_t *dnp,
453 uint64_t objset, uint64_t object)
456 zbookmark_phys_t czb;
458 if (object != DMU_META_DNODE_OBJECT && td->td_resume != NULL &&
459 object < td->td_resume->zb_object)
462 if (td->td_flags & TRAVERSE_PRE) {
463 SET_BOOKMARK(&czb, objset, object, ZB_DNODE_LEVEL,
465 err = td->td_func(td->td_spa, NULL, NULL, &czb, dnp,
467 if (err == TRAVERSE_VISIT_NO_CHILDREN)
473 for (j = 0; j < dnp->dn_nblkptr; j++) {
474 SET_BOOKMARK(&czb, objset, object, dnp->dn_nlevels - 1, j);
475 err = traverse_visitbp(td, dnp, &dnp->dn_blkptr[j], &czb);
480 if (err == 0 && (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
481 SET_BOOKMARK(&czb, objset, object, 0, DMU_SPILL_BLKID);
482 err = traverse_visitbp(td, dnp, &dnp->dn_spill, &czb);
485 if (err == 0 && (td->td_flags & TRAVERSE_POST)) {
486 SET_BOOKMARK(&czb, objset, object, ZB_DNODE_LEVEL,
488 err = td->td_func(td->td_spa, NULL, NULL, &czb, dnp,
490 if (err == TRAVERSE_VISIT_NO_CHILDREN)
500 traverse_prefetcher(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
501 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
503 prefetch_data_t *pfd = arg;
504 arc_flags_t aflags = ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH;
506 ASSERT(pfd->pd_bytes_fetched >= 0);
510 return (SET_ERROR(EINTR));
512 if (!prefetch_needed(pfd, bp))
515 mutex_enter(&pfd->pd_mtx);
516 while (!pfd->pd_cancel && pfd->pd_bytes_fetched >= zfs_pd_bytes_max)
517 cv_wait(&pfd->pd_cv, &pfd->pd_mtx);
518 pfd->pd_bytes_fetched += BP_GET_LSIZE(bp);
519 cv_broadcast(&pfd->pd_cv);
520 mutex_exit(&pfd->pd_mtx);
522 (void) arc_read(NULL, spa, bp, NULL, NULL, ZIO_PRIORITY_ASYNC_READ,
523 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, &aflags, zb);
529 traverse_prefetch_thread(void *arg)
531 traverse_data_t *td_main = arg;
532 traverse_data_t td = *td_main;
533 zbookmark_phys_t czb;
535 td.td_func = traverse_prefetcher;
536 td.td_arg = td_main->td_pfd;
538 td.td_resume = &td_main->td_pfd->pd_resume;
540 SET_BOOKMARK(&czb, td.td_objset,
541 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
542 (void) traverse_visitbp(&td, NULL, td.td_rootbp, &czb);
544 mutex_enter(&td_main->td_pfd->pd_mtx);
545 td_main->td_pfd->pd_exited = B_TRUE;
546 cv_broadcast(&td_main->td_pfd->pd_cv);
547 mutex_exit(&td_main->td_pfd->pd_mtx);
551 * NB: dataset must not be changing on-disk (eg, is a snapshot or we are
552 * in syncing context).
555 traverse_impl(spa_t *spa, dsl_dataset_t *ds, uint64_t objset, blkptr_t *rootbp,
556 uint64_t txg_start, zbookmark_phys_t *resume, int flags,
557 blkptr_cb_t func, void *arg)
560 prefetch_data_t pd = { 0 };
561 zbookmark_phys_t czb;
564 ASSERT(ds == NULL || objset == ds->ds_object);
565 ASSERT(!(flags & TRAVERSE_PRE) || !(flags & TRAVERSE_POST));
568 td.td_objset = objset;
569 td.td_rootbp = rootbp;
570 td.td_min_txg = txg_start;
571 td.td_resume = resume;
576 td.td_paused = B_FALSE;
577 td.td_realloc_possible = (txg_start == 0 ? B_FALSE : B_TRUE);
579 if (spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) {
580 VERIFY(spa_feature_enabled_txg(spa,
581 SPA_FEATURE_HOLE_BIRTH, &td.td_hole_birth_enabled_txg));
583 td.td_hole_birth_enabled_txg = UINT64_MAX;
588 pd.pd_resume = *resume;
589 mutex_init(&pd.pd_mtx, NULL, MUTEX_DEFAULT, NULL);
590 cv_init(&pd.pd_cv, NULL, CV_DEFAULT, NULL);
592 /* See comment on ZIL traversal in dsl_scan_visitds. */
593 if (ds != NULL && !ds->ds_is_snapshot && !BP_IS_HOLE(rootbp)) {
594 arc_flags_t flags = ARC_FLAG_WAIT;
598 err = arc_read(NULL, td.td_spa, rootbp,
599 arc_getbuf_func, &buf,
600 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, NULL);
605 traverse_zil(&td, &osp->os_zil_header);
606 arc_buf_destroy(buf, &buf);
609 if (!(flags & TRAVERSE_PREFETCH_DATA) ||
610 0 == taskq_dispatch(system_taskq, traverse_prefetch_thread,
612 pd.pd_exited = B_TRUE;
614 SET_BOOKMARK(&czb, td.td_objset,
615 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
616 err = traverse_visitbp(&td, NULL, rootbp, &czb);
618 mutex_enter(&pd.pd_mtx);
619 pd.pd_cancel = B_TRUE;
620 cv_broadcast(&pd.pd_cv);
621 while (!pd.pd_exited)
622 cv_wait(&pd.pd_cv, &pd.pd_mtx);
623 mutex_exit(&pd.pd_mtx);
625 mutex_destroy(&pd.pd_mtx);
626 cv_destroy(&pd.pd_cv);
632 * NB: dataset must not be changing on-disk (eg, is a snapshot or we are
633 * in syncing context).
636 traverse_dataset_resume(dsl_dataset_t *ds, uint64_t txg_start,
637 zbookmark_phys_t *resume,
638 int flags, blkptr_cb_t func, void *arg)
640 return (traverse_impl(ds->ds_dir->dd_pool->dp_spa, ds, ds->ds_object,
641 &dsl_dataset_phys(ds)->ds_bp, txg_start, resume, flags, func, arg));
645 traverse_dataset(dsl_dataset_t *ds, uint64_t txg_start,
646 int flags, blkptr_cb_t func, void *arg)
648 return (traverse_dataset_resume(ds, txg_start, NULL, flags, func, arg));
652 traverse_dataset_destroyed(spa_t *spa, blkptr_t *blkptr,
653 uint64_t txg_start, zbookmark_phys_t *resume, int flags,
654 blkptr_cb_t func, void *arg)
656 return (traverse_impl(spa, NULL, ZB_DESTROYED_OBJSET,
657 blkptr, txg_start, resume, flags, func, arg));
661 * NB: pool must not be changing on-disk (eg, from zdb or sync context).
664 traverse_pool(spa_t *spa, uint64_t txg_start, int flags,
665 blkptr_cb_t func, void *arg)
668 dsl_pool_t *dp = spa_get_dsl(spa);
669 objset_t *mos = dp->dp_meta_objset;
670 boolean_t hard = (flags & TRAVERSE_HARD);
673 err = traverse_impl(spa, NULL, 0, spa_get_rootblkptr(spa),
674 txg_start, NULL, flags, func, arg);
678 /* visit each dataset */
679 for (uint64_t obj = 1; err == 0;
680 err = dmu_object_next(mos, &obj, B_FALSE, txg_start)) {
681 dmu_object_info_t doi;
683 err = dmu_object_info(mos, obj, &doi);
690 if (doi.doi_bonus_type == DMU_OT_DSL_DATASET) {
692 uint64_t txg = txg_start;
694 dsl_pool_config_enter(dp, FTAG);
695 err = dsl_dataset_hold_obj(dp, obj, FTAG, &ds);
696 dsl_pool_config_exit(dp, FTAG);
702 if (dsl_dataset_phys(ds)->ds_prev_snap_txg > txg)
703 txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
704 err = traverse_dataset(ds, txg, flags, func, arg);
705 dsl_dataset_rele(ds, FTAG);