4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2013, 2014, Delphix. All rights reserved.
24 * Copyright (c) 2019 Datto Inc.
25 * Copyright (c) 2021, 2022, George Amanakis. All rights reserved.
29 * Routines to manage the on-disk persistent error log.
31 * Each pool stores a log of all logical data errors seen during normal
32 * operation. This is actually the union of two distinct logs: the last log,
33 * and the current log. All errors seen are logged to the current log. When a
34 * scrub completes, the current log becomes the last log, the last log is thrown
35 * out, and the current log is reinitialized. This way, if an error is somehow
36 * corrected, a new scrub will show that it no longer exists, and will be
37 * deleted from the log when the scrub completes.
39 * The log is stored using a ZAP object whose key is a string form of the
40 * zbookmark_phys tuple (objset, object, level, blkid), and whose contents is an
41 * optional 'objset:object' human-readable string describing the data. When an
42 * error is first logged, this string will be empty, indicating that no name is
43 * known. This prevents us from having to issue a potentially large amount of
44 * I/O to discover the object name during an error path. Instead, we do the
45 * calculation when the data is requested, storing the result so future queries
48 * If the head_errlog feature is enabled, a different on-disk format is used.
49 * The error log of each head dataset is stored separately in the zap object
50 * and keyed by the head id. This enables listing every dataset affected in
51 * userland. In order to be able to track whether an error block has been
52 * modified or added to snapshots since it was marked as an error, a new tuple
53 * is introduced: zbookmark_err_phys_t. It allows the storage of the birth
54 * transaction group of an error block on-disk. The birth transaction group is
55 * used by check_filesystem() to assess whether this block was freed,
56 * re-written or added to a snapshot since its marking as an error.
58 * This log is then shipped into an nvlist where the key is the dataset name and
59 * the value is the object name. Userland is then responsible for uniquifying
60 * this list and displaying it to the user.
63 #include <sys/dmu_tx.h>
65 #include <sys/spa_impl.h>
68 #include <sys/dsl_dir.h>
69 #include <sys/dmu_objset.h>
71 #include <sys/zfs_znode.h>
73 #define NAME_MAX_LEN 64
75 typedef struct clones {
81 * spa_upgrade_errlog_limit : A zfs module parameter that controls the number
82 * of on-disk error log entries that will be converted to the new
83 * format when enabling head_errlog. Defaults to 0 which converts
86 static uint_t spa_upgrade_errlog_limit = 0;
89 * Convert a bookmark to a string.
92 bookmark_to_name(zbookmark_phys_t *zb, char *buf, size_t len)
94 (void) snprintf(buf, len, "%llx:%llx:%llx:%llx",
95 (u_longlong_t)zb->zb_objset, (u_longlong_t)zb->zb_object,
96 (u_longlong_t)zb->zb_level, (u_longlong_t)zb->zb_blkid);
100 * Convert an err_phys to a string.
103 errphys_to_name(zbookmark_err_phys_t *zep, char *buf, size_t len)
105 (void) snprintf(buf, len, "%llx:%llx:%llx:%llx",
106 (u_longlong_t)zep->zb_object, (u_longlong_t)zep->zb_level,
107 (u_longlong_t)zep->zb_blkid, (u_longlong_t)zep->zb_birth);
111 * Convert a string to a err_phys.
114 name_to_errphys(char *buf, zbookmark_err_phys_t *zep)
116 zep->zb_object = zfs_strtonum(buf, &buf);
118 zep->zb_level = (int)zfs_strtonum(buf + 1, &buf);
120 zep->zb_blkid = zfs_strtonum(buf + 1, &buf);
122 zep->zb_birth = zfs_strtonum(buf + 1, &buf);
123 ASSERT(*buf == '\0');
127 * Convert a string to a bookmark.
130 name_to_bookmark(char *buf, zbookmark_phys_t *zb)
132 zb->zb_objset = zfs_strtonum(buf, &buf);
134 zb->zb_object = zfs_strtonum(buf + 1, &buf);
136 zb->zb_level = (int)zfs_strtonum(buf + 1, &buf);
138 zb->zb_blkid = zfs_strtonum(buf + 1, &buf);
139 ASSERT(*buf == '\0');
144 zep_to_zb(uint64_t dataset, zbookmark_err_phys_t *zep, zbookmark_phys_t *zb)
146 zb->zb_objset = dataset;
147 zb->zb_object = zep->zb_object;
148 zb->zb_level = zep->zb_level;
149 zb->zb_blkid = zep->zb_blkid;
154 name_to_object(char *buf, uint64_t *obj)
156 *obj = zfs_strtonum(buf, &buf);
157 ASSERT(*buf == '\0');
161 * Retrieve the head filesystem.
163 static int get_head_ds(spa_t *spa, uint64_t dsobj, uint64_t *head_ds)
166 int error = dsl_dataset_hold_obj(spa->spa_dsl_pool,
173 *head_ds = dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj;
174 dsl_dataset_rele(ds, FTAG);
180 * Log an uncorrectable error to the persistent error log. We add it to the
181 * spa's list of pending errors. The changes are actually synced out to disk
182 * during spa_errlog_sync().
185 spa_log_error(spa_t *spa, const zbookmark_phys_t *zb, const uint64_t *birth)
187 spa_error_entry_t search;
188 spa_error_entry_t *new;
193 * If we are trying to import a pool, ignore any errors, as we won't be
194 * writing to the pool any time soon.
196 if (spa_load_state(spa) == SPA_LOAD_TRYIMPORT)
199 mutex_enter(&spa->spa_errlist_lock);
202 * If we have had a request to rotate the log, log it to the next list
203 * instead of the current one.
205 if (spa->spa_scrub_active || spa->spa_scrub_finished)
206 tree = &spa->spa_errlist_scrub;
208 tree = &spa->spa_errlist_last;
210 search.se_bookmark = *zb;
211 if (avl_find(tree, &search, &where) != NULL) {
212 mutex_exit(&spa->spa_errlist_lock);
216 new = kmem_zalloc(sizeof (spa_error_entry_t), KM_SLEEP);
217 new->se_bookmark = *zb;
220 * If the head_errlog feature is enabled, store the birth txg now. In
221 * case the file is deleted before spa_errlog_sync() runs, we will not
222 * be able to retrieve the birth txg.
224 if (spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
225 new->se_zep.zb_object = zb->zb_object;
226 new->se_zep.zb_level = zb->zb_level;
227 new->se_zep.zb_blkid = zb->zb_blkid;
230 * birth may end up being NULL, e.g. in zio_done(). We
231 * will handle this in process_error_block().
234 new->se_zep.zb_birth = *birth;
237 avl_insert(tree, new, where);
238 mutex_exit(&spa->spa_errlist_lock);
243 find_birth_txg(dsl_dataset_t *ds, zbookmark_err_phys_t *zep,
247 int error = dmu_objset_from_ds(ds, &os);
254 error = dnode_hold(os, zep->zb_object, FTAG, &dn);
258 rw_enter(&dn->dn_struct_rwlock, RW_READER);
259 error = dbuf_dnode_findbp(dn, zep->zb_level, zep->zb_blkid, &bp, NULL,
261 if (error == 0 && BP_IS_HOLE(&bp))
262 error = SET_ERROR(ENOENT);
264 *birth_txg = bp.blk_birth;
265 rw_exit(&dn->dn_struct_rwlock);
266 dnode_rele(dn, FTAG);
271 * Copy the bookmark to the end of the user-space buffer which starts at
272 * uaddr and has *count unused entries, and decrement *count by 1.
275 copyout_entry(const zbookmark_phys_t *zb, void *uaddr, uint64_t *count)
278 return (SET_ERROR(ENOMEM));
281 if (copyout(zb, (char *)uaddr + (*count) * sizeof (zbookmark_phys_t),
282 sizeof (zbookmark_phys_t)) != 0)
283 return (SET_ERROR(EFAULT));
288 * Each time the error block is referenced by a snapshot or clone, add a
289 * zbookmark_phys_t entry to the userspace array at uaddr. The array is
290 * filled from the back and the in-out parameter *count is modified to be the
291 * number of unused entries at the beginning of the array.
294 check_filesystem(spa_t *spa, uint64_t head_ds, zbookmark_err_phys_t *zep,
295 void *uaddr, uint64_t *count, list_t *clones_list)
298 dsl_pool_t *dp = spa->spa_dsl_pool;
300 int error = dsl_dataset_hold_obj(dp, head_ds, FTAG, &ds);
305 uint64_t txg_to_consider = spa->spa_syncing_txg;
306 boolean_t check_snapshot = B_TRUE;
307 error = find_birth_txg(ds, zep, &latest_txg);
310 * If the filesystem is encrypted and the key is not loaded
311 * or the encrypted filesystem is not mounted the error will be EACCES.
312 * In that case report an error in the head filesystem and return.
314 if (error == EACCES) {
315 dsl_dataset_rele(ds, FTAG);
317 zep_to_zb(head_ds, zep, &zb);
318 error = copyout_entry(&zb, uaddr, count);
320 dsl_dataset_rele(ds, FTAG);
327 * If find_birth_txg() errors out otherwise, let txg_to_consider be
328 * equal to the spa's syncing txg: if check_filesystem() errors out
329 * then affected snapshots or clones will not be checked.
331 if (error == 0 && zep->zb_birth == latest_txg) {
332 /* Block neither free nor rewritten. */
334 zep_to_zb(head_ds, zep, &zb);
335 error = copyout_entry(&zb, uaddr, count);
337 dsl_dataset_rele(ds, FTAG);
340 check_snapshot = B_FALSE;
341 } else if (error == 0) {
342 txg_to_consider = latest_txg;
346 * Retrieve the number of snapshots if the dataset is not a snapshot.
348 uint64_t snap_count = 0;
349 if (dsl_dataset_phys(ds)->ds_snapnames_zapobj != 0) {
351 error = zap_count(spa->spa_meta_objset,
352 dsl_dataset_phys(ds)->ds_snapnames_zapobj, &snap_count);
355 dsl_dataset_rele(ds, FTAG);
360 if (snap_count == 0) {
361 /* Filesystem without snapshots. */
362 dsl_dataset_rele(ds, FTAG);
366 uint64_t *snap_obj_array = kmem_zalloc(snap_count * sizeof (uint64_t),
369 int aff_snap_count = 0;
370 uint64_t snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
371 uint64_t snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
372 uint64_t zap_clone = dsl_dir_phys(ds->ds_dir)->dd_clones;
374 dsl_dataset_rele(ds, FTAG);
376 /* Check only snapshots created from this file system. */
377 while (snap_obj != 0 && zep->zb_birth < snap_obj_txg &&
378 snap_obj_txg <= txg_to_consider) {
380 error = dsl_dataset_hold_obj(dp, snap_obj, FTAG, &ds);
384 if (dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj != head_ds) {
385 snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
386 snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
387 dsl_dataset_rele(ds, FTAG);
391 boolean_t affected = B_TRUE;
392 if (check_snapshot) {
394 error = find_birth_txg(ds, zep, &blk_txg);
395 affected = (error == 0 && zep->zb_birth == blk_txg);
398 /* Report errors in snapshots. */
400 snap_obj_array[aff_snap_count] = snap_obj;
404 zep_to_zb(snap_obj, zep, &zb);
405 error = copyout_entry(&zb, uaddr, count);
407 dsl_dataset_rele(ds, FTAG);
411 snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
412 snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
413 dsl_dataset_rele(ds, FTAG);
416 if (zap_clone == 0 || aff_snap_count == 0)
423 zc = kmem_zalloc(sizeof (zap_cursor_t), KM_SLEEP);
424 za = kmem_zalloc(sizeof (zap_attribute_t), KM_SLEEP);
426 for (zap_cursor_init(zc, spa->spa_meta_objset, zap_clone);
427 zap_cursor_retrieve(zc, za) == 0;
428 zap_cursor_advance(zc)) {
430 dsl_dataset_t *clone;
431 error = dsl_dataset_hold_obj(dp, za->za_first_integer,
438 * Only clones whose origins were affected could also
439 * have affected snapshots.
441 boolean_t found = B_FALSE;
442 for (int i = 0; i < snap_count; i++) {
443 if (dsl_dir_phys(clone->ds_dir)->dd_origin_obj
444 == snap_obj_array[i])
447 dsl_dataset_rele(clone, FTAG);
452 clones_t *ct = kmem_zalloc(sizeof (*ct), KM_SLEEP);
453 ct->clone_ds = za->za_first_integer;
454 list_insert_tail(clones_list, ct);
458 kmem_free(za, sizeof (*za));
459 kmem_free(zc, sizeof (*zc));
462 kmem_free(snap_obj_array, sizeof (*snap_obj_array));
467 find_top_affected_fs(spa_t *spa, uint64_t head_ds, zbookmark_err_phys_t *zep,
468 uint64_t *top_affected_fs)
470 uint64_t oldest_dsobj;
471 int error = dsl_dataset_oldest_snapshot(spa, head_ds, zep->zb_birth,
477 error = dsl_dataset_hold_obj(spa->spa_dsl_pool, oldest_dsobj,
483 dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj;
484 dsl_dataset_rele(ds, FTAG);
489 process_error_block(spa_t *spa, uint64_t head_ds, zbookmark_err_phys_t *zep,
490 void *uaddr, uint64_t *count)
493 * If zb_birth == 0 or head_ds == 0 it means we failed to retrieve the
494 * birth txg or the head filesystem of the block pointer. This may
495 * happen e.g. when an encrypted filesystem is not mounted or when
496 * the key is not loaded. In this case do not proceed to
497 * check_filesystem(), instead do the accounting here.
499 if (zep->zb_birth == 0 || head_ds == 0) {
501 zep_to_zb(head_ds, zep, &zb);
502 int error = copyout_entry(&zb, uaddr, count);
509 uint64_t top_affected_fs;
510 int error = find_top_affected_fs(spa, head_ds, zep, &top_affected_fs);
515 list_create(&clones_list, sizeof (clones_t),
516 offsetof(clones_t, node));
518 error = check_filesystem(spa, top_affected_fs, zep,
519 uaddr, count, &clones_list);
521 while ((ct = list_remove_head(&clones_list)) != NULL) {
522 error = check_filesystem(spa, ct->clone_ds, zep,
523 uaddr, count, &clones_list);
524 kmem_free(ct, sizeof (*ct));
527 while (!list_is_empty(&clones_list)) {
528 ct = list_remove_head(&clones_list);
529 kmem_free(ct, sizeof (*ct));
535 list_destroy(&clones_list);
543 * If a healed bookmark matches an entry in the error log we stash it in a tree
544 * so that we can later remove the related log entries in sync context.
547 spa_add_healed_error(spa_t *spa, uint64_t obj, zbookmark_phys_t *healed_zb)
549 char name[NAME_MAX_LEN];
554 bookmark_to_name(healed_zb, name, sizeof (name));
555 mutex_enter(&spa->spa_errlog_lock);
556 if (zap_contains(spa->spa_meta_objset, obj, name) == 0) {
558 * Found an error matching healed zb, add zb to our
559 * tree of healed errors
561 avl_tree_t *tree = &spa->spa_errlist_healed;
562 spa_error_entry_t search;
563 spa_error_entry_t *new;
565 search.se_bookmark = *healed_zb;
566 mutex_enter(&spa->spa_errlist_lock);
567 if (avl_find(tree, &search, &where) != NULL) {
568 mutex_exit(&spa->spa_errlist_lock);
569 mutex_exit(&spa->spa_errlog_lock);
572 new = kmem_zalloc(sizeof (spa_error_entry_t), KM_SLEEP);
573 new->se_bookmark = *healed_zb;
574 avl_insert(tree, new, where);
575 mutex_exit(&spa->spa_errlist_lock);
577 mutex_exit(&spa->spa_errlog_lock);
581 * If this error exists in the given tree remove it.
584 remove_error_from_list(spa_t *spa, avl_tree_t *t, const zbookmark_phys_t *zb)
586 spa_error_entry_t search, *found;
589 mutex_enter(&spa->spa_errlist_lock);
590 search.se_bookmark = *zb;
591 if ((found = avl_find(t, &search, &where)) != NULL) {
592 avl_remove(t, found);
593 kmem_free(found, sizeof (spa_error_entry_t));
595 mutex_exit(&spa->spa_errlist_lock);
600 * Removes all of the recv healed errors from both on-disk error logs
603 spa_remove_healed_errors(spa_t *spa, avl_tree_t *s, avl_tree_t *l, dmu_tx_t *tx)
605 char name[NAME_MAX_LEN];
606 spa_error_entry_t *se;
609 ASSERT(MUTEX_HELD(&spa->spa_errlog_lock));
611 while ((se = avl_destroy_nodes(&spa->spa_errlist_healed,
613 remove_error_from_list(spa, s, &se->se_bookmark);
614 remove_error_from_list(spa, l, &se->se_bookmark);
615 bookmark_to_name(&se->se_bookmark, name, sizeof (name));
616 kmem_free(se, sizeof (spa_error_entry_t));
617 (void) zap_remove(spa->spa_meta_objset,
618 spa->spa_errlog_last, name, tx);
619 (void) zap_remove(spa->spa_meta_objset,
620 spa->spa_errlog_scrub, name, tx);
625 * Stash away healed bookmarks to remove them from the on-disk error logs
626 * later in spa_remove_healed_errors().
629 spa_remove_error(spa_t *spa, zbookmark_phys_t *zb)
631 char name[NAME_MAX_LEN];
633 bookmark_to_name(zb, name, sizeof (name));
635 spa_add_healed_error(spa, spa->spa_errlog_last, zb);
636 spa_add_healed_error(spa, spa->spa_errlog_scrub, zb);
640 approx_errlog_size_impl(spa_t *spa, uint64_t spa_err_obj)
642 if (spa_err_obj == 0)
648 for (zap_cursor_init(&zc, spa->spa_meta_objset, spa_err_obj);
649 zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) {
651 if (zap_count(spa->spa_meta_objset, za.za_first_integer,
655 zap_cursor_fini(&zc);
660 * Return the approximate number of errors currently in the error log. This
661 * will be nonzero if there are some errors, but otherwise it may be more
662 * or less than the number of entries returned by spa_get_errlog().
665 spa_approx_errlog_size(spa_t *spa)
669 if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
670 mutex_enter(&spa->spa_errlog_lock);
672 if (spa->spa_errlog_scrub != 0 &&
673 zap_count(spa->spa_meta_objset, spa->spa_errlog_scrub,
677 if (spa->spa_errlog_last != 0 && !spa->spa_scrub_finished &&
678 zap_count(spa->spa_meta_objset, spa->spa_errlog_last,
681 mutex_exit(&spa->spa_errlog_lock);
684 mutex_enter(&spa->spa_errlog_lock);
685 total += approx_errlog_size_impl(spa, spa->spa_errlog_last);
686 total += approx_errlog_size_impl(spa, spa->spa_errlog_scrub);
687 mutex_exit(&spa->spa_errlog_lock);
689 mutex_enter(&spa->spa_errlist_lock);
690 total += avl_numnodes(&spa->spa_errlist_last);
691 total += avl_numnodes(&spa->spa_errlist_scrub);
692 mutex_exit(&spa->spa_errlist_lock);
697 * This function sweeps through an on-disk error log and stores all bookmarks
698 * as error bookmarks in a new ZAP object. At the end we discard the old one,
699 * and spa_update_errlog() will set the spa's on-disk error log to new ZAP
703 sync_upgrade_errlog(spa_t *spa, uint64_t spa_err_obj, uint64_t *newobj,
711 *newobj = zap_create(spa->spa_meta_objset, DMU_OT_ERROR_LOG,
715 * If we cannnot perform the upgrade we should clear the old on-disk
718 if (zap_count(spa->spa_meta_objset, spa_err_obj, &count) != 0) {
719 VERIFY0(dmu_object_free(spa->spa_meta_objset, spa_err_obj, tx));
723 for (zap_cursor_init(&zc, spa->spa_meta_objset, spa_err_obj);
724 zap_cursor_retrieve(&zc, &za) == 0;
725 zap_cursor_advance(&zc)) {
726 if (spa_upgrade_errlog_limit != 0 &&
727 zc.zc_cd == spa_upgrade_errlog_limit)
730 name_to_bookmark(za.za_name, &zb);
732 zbookmark_err_phys_t zep;
733 zep.zb_object = zb.zb_object;
734 zep.zb_level = zb.zb_level;
735 zep.zb_blkid = zb.zb_blkid;
739 * In case of an error we should simply continue instead of
740 * returning prematurely. See the next comment.
743 dsl_pool_t *dp = spa->spa_dsl_pool;
747 int error = dsl_dataset_hold_obj(dp, zb.zb_objset, FTAG, &ds);
751 head_ds = dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj;
754 * The objset and the dnode are required for getting the block
755 * pointer, which is used to determine if BP_IS_HOLE(). If
756 * getting the objset or the dnode fails, do not create a
757 * zap entry (presuming we know the dataset) as this may create
758 * spurious errors that we cannot ever resolve. If an error is
759 * truly persistent, it should re-appear after a scan.
761 if (dmu_objset_from_ds(ds, &os) != 0) {
762 dsl_dataset_rele(ds, FTAG);
769 if (dnode_hold(os, zep.zb_object, FTAG, &dn) != 0) {
770 dsl_dataset_rele(ds, FTAG);
774 rw_enter(&dn->dn_struct_rwlock, RW_READER);
775 error = dbuf_dnode_findbp(dn, zep.zb_level, zep.zb_blkid, &bp,
780 zep.zb_birth = bp.blk_birth;
782 rw_exit(&dn->dn_struct_rwlock);
783 dnode_rele(dn, FTAG);
784 dsl_dataset_rele(ds, FTAG);
786 if (error != 0 || BP_IS_HOLE(&bp))
790 error = zap_lookup_int_key(spa->spa_meta_objset, *newobj,
793 if (error == ENOENT) {
794 err_obj = zap_create(spa->spa_meta_objset,
795 DMU_OT_ERROR_LOG, DMU_OT_NONE, 0, tx);
797 (void) zap_update_int_key(spa->spa_meta_objset,
798 *newobj, head_ds, err_obj, tx);
802 errphys_to_name(&zep, buf, sizeof (buf));
804 const char *name = "";
805 (void) zap_update(spa->spa_meta_objset, err_obj,
806 buf, 1, strlen(name) + 1, name, tx);
808 zap_cursor_fini(&zc);
810 VERIFY0(dmu_object_free(spa->spa_meta_objset, spa_err_obj, tx));
814 spa_upgrade_errlog(spa_t *spa, dmu_tx_t *tx)
818 mutex_enter(&spa->spa_errlog_lock);
819 if (spa->spa_errlog_last != 0) {
820 sync_upgrade_errlog(spa, spa->spa_errlog_last, &newobj, tx);
821 spa->spa_errlog_last = newobj;
824 if (spa->spa_errlog_scrub != 0) {
825 sync_upgrade_errlog(spa, spa->spa_errlog_scrub, &newobj, tx);
826 spa->spa_errlog_scrub = newobj;
828 mutex_exit(&spa->spa_errlog_lock);
833 * If an error block is shared by two datasets it will be counted twice.
836 process_error_log(spa_t *spa, uint64_t obj, void *uaddr, uint64_t *count)
844 zc = kmem_zalloc(sizeof (zap_cursor_t), KM_SLEEP);
845 za = kmem_zalloc(sizeof (zap_attribute_t), KM_SLEEP);
847 if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
848 for (zap_cursor_init(zc, spa->spa_meta_objset, obj);
849 zap_cursor_retrieve(zc, za) == 0;
850 zap_cursor_advance(zc)) {
853 kmem_free(zc, sizeof (*zc));
854 kmem_free(za, sizeof (*za));
855 return (SET_ERROR(ENOMEM));
859 name_to_bookmark(za->za_name, &zb);
861 int error = copyout_entry(&zb, uaddr, count);
864 kmem_free(zc, sizeof (*zc));
865 kmem_free(za, sizeof (*za));
870 kmem_free(zc, sizeof (*zc));
871 kmem_free(za, sizeof (*za));
875 for (zap_cursor_init(zc, spa->spa_meta_objset, obj);
876 zap_cursor_retrieve(zc, za) == 0;
877 zap_cursor_advance(zc)) {
879 zap_cursor_t *head_ds_cursor;
880 zap_attribute_t *head_ds_attr;
882 head_ds_cursor = kmem_zalloc(sizeof (zap_cursor_t), KM_SLEEP);
883 head_ds_attr = kmem_zalloc(sizeof (zap_attribute_t), KM_SLEEP);
885 uint64_t head_ds_err_obj = za->za_first_integer;
887 name_to_object(za->za_name, &head_ds);
888 for (zap_cursor_init(head_ds_cursor, spa->spa_meta_objset,
889 head_ds_err_obj); zap_cursor_retrieve(head_ds_cursor,
890 head_ds_attr) == 0; zap_cursor_advance(head_ds_cursor)) {
892 zbookmark_err_phys_t head_ds_block;
893 name_to_errphys(head_ds_attr->za_name, &head_ds_block);
894 int error = process_error_block(spa, head_ds,
895 &head_ds_block, uaddr, count);
898 zap_cursor_fini(head_ds_cursor);
899 kmem_free(head_ds_cursor,
900 sizeof (*head_ds_cursor));
901 kmem_free(head_ds_attr, sizeof (*head_ds_attr));
904 kmem_free(za, sizeof (*za));
905 kmem_free(zc, sizeof (*zc));
909 zap_cursor_fini(head_ds_cursor);
910 kmem_free(head_ds_cursor, sizeof (*head_ds_cursor));
911 kmem_free(head_ds_attr, sizeof (*head_ds_attr));
914 kmem_free(za, sizeof (*za));
915 kmem_free(zc, sizeof (*zc));
920 process_error_list(spa_t *spa, avl_tree_t *list, void *uaddr, uint64_t *count)
922 spa_error_entry_t *se;
924 if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
925 for (se = avl_first(list); se != NULL;
926 se = AVL_NEXT(list, se)) {
928 copyout_entry(&se->se_bookmark, uaddr, count);
936 for (se = avl_first(list); se != NULL; se = AVL_NEXT(list, se)) {
937 uint64_t head_ds = 0;
938 int error = get_head_ds(spa, se->se_bookmark.zb_objset,
942 * If get_head_ds() errors out, set the head filesystem
943 * to the filesystem stored in the bookmark of the
947 head_ds = se->se_bookmark.zb_objset;
949 error = process_error_block(spa, head_ds,
950 &se->se_zep, uaddr, count);
959 * Copy all known errors to userland as an array of bookmarks. This is
960 * actually a union of the on-disk last log and current log, as well as any
961 * pending error requests.
963 * Because the act of reading the on-disk log could cause errors to be
964 * generated, we have two separate locks: one for the error log and one for the
965 * in-core error lists. We only need the error list lock to log and error, so
966 * we grab the error log lock while we read the on-disk logs, and only pick up
967 * the error list lock when we are finished.
970 spa_get_errlog(spa_t *spa, void *uaddr, uint64_t *count)
976 * The pool config lock is needed to hold a dataset_t via (among other
977 * places) process_error_list() -> process_error_block()->
978 * find_top_affected_fs(), and lock ordering requires that we get it
979 * before the spa_errlog_lock.
981 dsl_pool_config_enter(spa->spa_dsl_pool, FTAG);
982 mutex_enter(&spa->spa_errlog_lock);
984 ret = process_error_log(spa, spa->spa_errlog_scrub, uaddr, count);
986 if (!ret && !spa->spa_scrub_finished)
987 ret = process_error_log(spa, spa->spa_errlog_last, uaddr,
990 mutex_enter(&spa->spa_errlist_lock);
992 ret = process_error_list(spa, &spa->spa_errlist_scrub, uaddr,
995 ret = process_error_list(spa, &spa->spa_errlist_last, uaddr,
997 mutex_exit(&spa->spa_errlist_lock);
999 mutex_exit(&spa->spa_errlog_lock);
1000 dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
1002 (void) spa, (void) uaddr, (void) count;
1009 * Called when a scrub completes. This simply set a bit which tells which AVL
1010 * tree to add new errors. spa_errlog_sync() is responsible for actually
1011 * syncing the changes to the underlying objects.
1014 spa_errlog_rotate(spa_t *spa)
1016 mutex_enter(&spa->spa_errlist_lock);
1017 spa->spa_scrub_finished = B_TRUE;
1018 mutex_exit(&spa->spa_errlist_lock);
1022 * Discard any pending errors from the spa_t. Called when unloading a faulted
1023 * pool, as the errors encountered during the open cannot be synced to disk.
1026 spa_errlog_drain(spa_t *spa)
1028 spa_error_entry_t *se;
1031 mutex_enter(&spa->spa_errlist_lock);
1034 while ((se = avl_destroy_nodes(&spa->spa_errlist_last,
1036 kmem_free(se, sizeof (spa_error_entry_t));
1038 while ((se = avl_destroy_nodes(&spa->spa_errlist_scrub,
1040 kmem_free(se, sizeof (spa_error_entry_t));
1042 mutex_exit(&spa->spa_errlist_lock);
1046 * Process a list of errors into the current on-disk log.
1049 sync_error_list(spa_t *spa, avl_tree_t *t, uint64_t *obj, dmu_tx_t *tx)
1051 spa_error_entry_t *se;
1052 char buf[NAME_MAX_LEN];
1055 if (avl_numnodes(t) == 0)
1058 /* create log if necessary */
1060 *obj = zap_create(spa->spa_meta_objset, DMU_OT_ERROR_LOG,
1061 DMU_OT_NONE, 0, tx);
1063 /* add errors to the current log */
1064 if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
1065 for (se = avl_first(t); se != NULL; se = AVL_NEXT(t, se)) {
1066 bookmark_to_name(&se->se_bookmark, buf, sizeof (buf));
1068 const char *name = se->se_name ? se->se_name : "";
1069 (void) zap_update(spa->spa_meta_objset, *obj, buf, 1,
1070 strlen(name) + 1, name, tx);
1073 for (se = avl_first(t); se != NULL; se = AVL_NEXT(t, se)) {
1074 zbookmark_err_phys_t zep;
1075 zep.zb_object = se->se_zep.zb_object;
1076 zep.zb_level = se->se_zep.zb_level;
1077 zep.zb_blkid = se->se_zep.zb_blkid;
1078 zep.zb_birth = se->se_zep.zb_birth;
1080 uint64_t head_ds = 0;
1081 int error = get_head_ds(spa, se->se_bookmark.zb_objset,
1085 * If get_head_ds() errors out, set the head filesystem
1086 * to the filesystem stored in the bookmark of the
1090 head_ds = se->se_bookmark.zb_objset;
1093 error = zap_lookup_int_key(spa->spa_meta_objset,
1094 *obj, head_ds, &err_obj);
1096 if (error == ENOENT) {
1097 err_obj = zap_create(spa->spa_meta_objset,
1098 DMU_OT_ERROR_LOG, DMU_OT_NONE, 0, tx);
1100 (void) zap_update_int_key(spa->spa_meta_objset,
1101 *obj, head_ds, err_obj, tx);
1103 errphys_to_name(&zep, buf, sizeof (buf));
1105 const char *name = se->se_name ? se->se_name : "";
1106 (void) zap_update(spa->spa_meta_objset,
1107 err_obj, buf, 1, strlen(name) + 1, name, tx);
1110 /* purge the error list */
1112 while ((se = avl_destroy_nodes(t, &cookie)) != NULL)
1113 kmem_free(se, sizeof (spa_error_entry_t));
1117 delete_errlog(spa_t *spa, uint64_t spa_err_obj, dmu_tx_t *tx)
1119 if (spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
1122 for (zap_cursor_init(&zc, spa->spa_meta_objset, spa_err_obj);
1123 zap_cursor_retrieve(&zc, &za) == 0;
1124 zap_cursor_advance(&zc)) {
1125 VERIFY0(dmu_object_free(spa->spa_meta_objset,
1126 za.za_first_integer, tx));
1128 zap_cursor_fini(&zc);
1130 VERIFY0(dmu_object_free(spa->spa_meta_objset, spa_err_obj, tx));
1134 * Sync the error log out to disk. This is a little tricky because the act of
1135 * writing the error log requires the spa_errlist_lock. So, we need to lock the
1136 * error lists, take a copy of the lists, and then reinitialize them. Then, we
1137 * drop the error list lock and take the error log lock, at which point we
1138 * do the errlog processing. Then, if we encounter an I/O error during this
1139 * process, we can successfully add the error to the list. Note that this will
1140 * result in the perpetual recycling of errors, but it is an unlikely situation
1141 * and not a performance critical operation.
1144 spa_errlog_sync(spa_t *spa, uint64_t txg)
1147 avl_tree_t scrub, last;
1150 mutex_enter(&spa->spa_errlist_lock);
1153 * Bail out early under normal circumstances.
1155 if (avl_numnodes(&spa->spa_errlist_scrub) == 0 &&
1156 avl_numnodes(&spa->spa_errlist_last) == 0 &&
1157 avl_numnodes(&spa->spa_errlist_healed) == 0 &&
1158 !spa->spa_scrub_finished) {
1159 mutex_exit(&spa->spa_errlist_lock);
1163 spa_get_errlists(spa, &last, &scrub);
1164 scrub_finished = spa->spa_scrub_finished;
1165 spa->spa_scrub_finished = B_FALSE;
1167 mutex_exit(&spa->spa_errlist_lock);
1170 * The pool config lock is needed to hold a dataset_t via
1171 * sync_error_list() -> get_head_ds(), and lock ordering
1172 * requires that we get it before the spa_errlog_lock.
1174 dsl_pool_config_enter(spa->spa_dsl_pool, FTAG);
1175 mutex_enter(&spa->spa_errlog_lock);
1177 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
1180 * Remove healed errors from errors.
1182 spa_remove_healed_errors(spa, &last, &scrub, tx);
1185 * Sync out the current list of errors.
1187 sync_error_list(spa, &last, &spa->spa_errlog_last, tx);
1190 * Rotate the log if necessary.
1192 if (scrub_finished) {
1193 if (spa->spa_errlog_last != 0)
1194 delete_errlog(spa, spa->spa_errlog_last, tx);
1195 spa->spa_errlog_last = spa->spa_errlog_scrub;
1196 spa->spa_errlog_scrub = 0;
1198 sync_error_list(spa, &scrub, &spa->spa_errlog_last, tx);
1202 * Sync out any pending scrub errors.
1204 sync_error_list(spa, &scrub, &spa->spa_errlog_scrub, tx);
1207 * Update the MOS to reflect the new values.
1209 (void) zap_update(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1210 DMU_POOL_ERRLOG_LAST, sizeof (uint64_t), 1,
1211 &spa->spa_errlog_last, tx);
1212 (void) zap_update(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1213 DMU_POOL_ERRLOG_SCRUB, sizeof (uint64_t), 1,
1214 &spa->spa_errlog_scrub, tx);
1218 mutex_exit(&spa->spa_errlog_lock);
1219 dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
1223 delete_dataset_errlog(spa_t *spa, uint64_t spa_err_obj, uint64_t ds,
1226 if (spa_err_obj == 0)
1231 for (zap_cursor_init(&zc, spa->spa_meta_objset, spa_err_obj);
1232 zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) {
1234 name_to_object(za.za_name, &head_ds);
1235 if (head_ds == ds) {
1236 (void) zap_remove(spa->spa_meta_objset, spa_err_obj,
1238 VERIFY0(dmu_object_free(spa->spa_meta_objset,
1239 za.za_first_integer, tx));
1243 zap_cursor_fini(&zc);
1247 spa_delete_dataset_errlog(spa_t *spa, uint64_t ds, dmu_tx_t *tx)
1249 mutex_enter(&spa->spa_errlog_lock);
1250 delete_dataset_errlog(spa, spa->spa_errlog_scrub, ds, tx);
1251 delete_dataset_errlog(spa, spa->spa_errlog_last, ds, tx);
1252 mutex_exit(&spa->spa_errlog_lock);
1256 find_txg_ancestor_snapshot(spa_t *spa, uint64_t new_head, uint64_t old_head,
1260 dsl_pool_t *dp = spa->spa_dsl_pool;
1262 int error = dsl_dataset_hold_obj(dp, old_head, FTAG, &ds);
1266 uint64_t prev_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
1267 uint64_t prev_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
1269 while (prev_obj != 0) {
1270 dsl_dataset_rele(ds, FTAG);
1271 if ((error = dsl_dataset_hold_obj(dp, prev_obj,
1273 dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj == new_head)
1279 prev_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
1280 prev_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
1282 dsl_dataset_rele(ds, FTAG);
1283 ASSERT(prev_obj != 0);
1284 *txg = prev_obj_txg;
1289 swap_errlog(spa_t *spa, uint64_t spa_err_obj, uint64_t new_head, uint64_t
1290 old_head, dmu_tx_t *tx)
1292 if (spa_err_obj == 0)
1295 uint64_t old_head_errlog;
1296 int error = zap_lookup_int_key(spa->spa_meta_objset, spa_err_obj,
1297 old_head, &old_head_errlog);
1299 /* If no error log, then there is nothing to do. */
1304 error = find_txg_ancestor_snapshot(spa, new_head, old_head, &txg);
1309 * Create an error log if the file system being promoted does not
1312 uint64_t new_head_errlog;
1313 error = zap_lookup_int_key(spa->spa_meta_objset, spa_err_obj, new_head,
1317 new_head_errlog = zap_create(spa->spa_meta_objset,
1318 DMU_OT_ERROR_LOG, DMU_OT_NONE, 0, tx);
1320 (void) zap_update_int_key(spa->spa_meta_objset, spa_err_obj,
1321 new_head, new_head_errlog, tx);
1326 zbookmark_err_phys_t err_block;
1327 for (zap_cursor_init(&zc, spa->spa_meta_objset, old_head_errlog);
1328 zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) {
1330 const char *name = "";
1331 name_to_errphys(za.za_name, &err_block);
1332 if (err_block.zb_birth < txg) {
1333 (void) zap_update(spa->spa_meta_objset, new_head_errlog,
1334 za.za_name, 1, strlen(name) + 1, name, tx);
1336 (void) zap_remove(spa->spa_meta_objset, old_head_errlog,
1340 zap_cursor_fini(&zc);
1344 spa_swap_errlog(spa_t *spa, uint64_t new_head_ds, uint64_t old_head_ds,
1347 mutex_enter(&spa->spa_errlog_lock);
1348 swap_errlog(spa, spa->spa_errlog_scrub, new_head_ds, old_head_ds, tx);
1349 swap_errlog(spa, spa->spa_errlog_last, new_head_ds, old_head_ds, tx);
1350 mutex_exit(&spa->spa_errlog_lock);
1353 #if defined(_KERNEL)
1354 /* error handling */
1355 EXPORT_SYMBOL(spa_log_error);
1356 EXPORT_SYMBOL(spa_approx_errlog_size);
1357 EXPORT_SYMBOL(spa_get_errlog);
1358 EXPORT_SYMBOL(spa_errlog_rotate);
1359 EXPORT_SYMBOL(spa_errlog_drain);
1360 EXPORT_SYMBOL(spa_errlog_sync);
1361 EXPORT_SYMBOL(spa_get_errlists);
1362 EXPORT_SYMBOL(spa_delete_dataset_errlog);
1363 EXPORT_SYMBOL(spa_swap_errlog);
1364 EXPORT_SYMBOL(sync_error_list);
1365 EXPORT_SYMBOL(spa_upgrade_errlog);
1369 ZFS_MODULE_PARAM(zfs_spa, spa_, upgrade_errlog_limit, UINT, ZMOD_RW,
1370 "Limit the number of errors which will be upgraded to the new "
1371 "on-disk error log when enabling head_errlog");