4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
24 * Copyright (c) 2013 Steven Hartland. All rights reserved.
25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
26 * Copyright 2016 Nexenta Systems, Inc. All rights reserved.
29 #include <sys/dsl_pool.h>
30 #include <sys/dsl_dataset.h>
31 #include <sys/dsl_prop.h>
32 #include <sys/dsl_dir.h>
33 #include <sys/dsl_synctask.h>
34 #include <sys/dsl_scan.h>
35 #include <sys/dnode.h>
36 #include <sys/dmu_tx.h>
37 #include <sys/dmu_objset.h>
41 #include <sys/zfs_context.h>
42 #include <sys/fs/zfs.h>
43 #include <sys/zfs_znode.h>
44 #include <sys/spa_impl.h>
45 #include <sys/vdev_impl.h>
46 #include <sys/metaslab_impl.h>
47 #include <sys/bptree.h>
48 #include <sys/zfeature.h>
49 #include <sys/zil_impl.h>
50 #include <sys/dsl_userhold.h>
51 #include <sys/trace_zfs.h>
58 * ZFS must limit the rate of incoming writes to the rate at which it is able
59 * to sync data modifications to the backend storage. Throttling by too much
60 * creates an artificial limit; throttling by too little can only be sustained
61 * for short periods and would lead to highly lumpy performance. On a per-pool
62 * basis, ZFS tracks the amount of modified (dirty) data. As operations change
63 * data, the amount of dirty data increases; as ZFS syncs out data, the amount
64 * of dirty data decreases. When the amount of dirty data exceeds a
65 * predetermined threshold further modifications are blocked until the amount
66 * of dirty data decreases (as data is synced out).
68 * The limit on dirty data is tunable, and should be adjusted according to
69 * both the IO capacity and available memory of the system. The larger the
70 * window, the more ZFS is able to aggregate and amortize metadata (and data)
71 * changes. However, memory is a limited resource, and allowing for more dirty
72 * data comes at the cost of keeping other useful data in memory (for example
73 * ZFS data cached by the ARC).
77 * As buffers are modified dsl_pool_willuse_space() increments both the per-
78 * txg (dp_dirty_pertxg[]) and poolwide (dp_dirty_total) accounting of
79 * dirty space used; dsl_pool_dirty_space() decrements those values as data
80 * is synced out from dsl_pool_sync(). While only the poolwide value is
81 * relevant, the per-txg value is useful for debugging. The tunable
82 * zfs_dirty_data_max determines the dirty space limit. Once that value is
83 * exceeded, new writes are halted until space frees up.
85 * The zfs_dirty_data_sync_percent tunable dictates the threshold at which we
86 * ensure that there is a txg syncing (see the comment in txg.c for a full
87 * description of transaction group stages).
89 * The IO scheduler uses both the dirty space limit and current amount of
90 * dirty data as inputs. Those values affect the number of concurrent IOs ZFS
91 * issues. See the comment in vdev_queue.c for details of the IO scheduler.
93 * The delay is also calculated based on the amount of dirty data. See the
94 * comment above dmu_tx_delay() for details.
98 * zfs_dirty_data_max will be set to zfs_dirty_data_max_percent% of all memory,
99 * capped at zfs_dirty_data_max_max. It can also be overridden with a module
102 unsigned long zfs_dirty_data_max = 0;
103 unsigned long zfs_dirty_data_max_max = 0;
104 int zfs_dirty_data_max_percent = 10;
105 int zfs_dirty_data_max_max_percent = 25;
108 * If there's at least this much dirty data (as a percentage of
109 * zfs_dirty_data_max), push out a txg. This should be less than
110 * zfs_vdev_async_write_active_min_dirty_percent.
112 int zfs_dirty_data_sync_percent = 20;
115 * Once there is this amount of dirty data, the dmu_tx_delay() will kick in
116 * and delay each transaction.
117 * This value should be >= zfs_vdev_async_write_active_max_dirty_percent.
119 int zfs_delay_min_dirty_percent = 60;
122 * This controls how quickly the delay approaches infinity.
123 * Larger values cause it to delay more for a given amount of dirty data.
124 * Therefore larger values will cause there to be less dirty data for a
127 * For the smoothest delay, this value should be about 1 billion divided
128 * by the maximum number of operations per second. This will smoothly
129 * handle between 10x and 1/10th this number.
131 * Note: zfs_delay_scale * zfs_dirty_data_max must be < 2^64, due to the
132 * multiply in dmu_tx_delay().
134 unsigned long zfs_delay_scale = 1000 * 1000 * 1000 / 2000;
137 * This determines the number of threads used by the dp_sync_taskq.
139 int zfs_sync_taskq_batch_pct = 75;
142 * These tunables determine the behavior of how zil_itxg_clean() is
143 * called via zil_clean() in the context of spa_sync(). When an itxg
144 * list needs to be cleaned, TQ_NOSLEEP will be used when dispatching.
145 * If the dispatch fails, the call to zil_itxg_clean() will occur
146 * synchronously in the context of spa_sync(), which can negatively
147 * impact the performance of spa_sync() (e.g. in the case of the itxg
148 * list having a large number of itxs that needs to be cleaned).
150 * Thus, these tunables can be used to manipulate the behavior of the
151 * taskq used by zil_clean(); they determine the number of taskq entries
152 * that are pre-populated when the taskq is first created (via the
153 * "zfs_zil_clean_taskq_minalloc" tunable) and the maximum number of
154 * taskq entries that are cached after an on-demand allocation (via the
155 * "zfs_zil_clean_taskq_maxalloc").
157 * The idea being, we want to try reasonably hard to ensure there will
158 * already be a taskq entry pre-allocated by the time that it is needed
159 * by zil_clean(). This way, we can avoid the possibility of an
160 * on-demand allocation of a new taskq entry from failing, which would
161 * result in zil_itxg_clean() being called synchronously from zil_clean()
162 * (which can adversely affect performance of spa_sync()).
164 * Additionally, the number of threads used by the taskq can be
165 * configured via the "zfs_zil_clean_taskq_nthr_pct" tunable.
167 int zfs_zil_clean_taskq_nthr_pct = 100;
168 int zfs_zil_clean_taskq_minalloc = 1024;
169 int zfs_zil_clean_taskq_maxalloc = 1024 * 1024;
172 dsl_pool_open_special_dir(dsl_pool_t *dp, const char *name, dsl_dir_t **ddp)
177 err = zap_lookup(dp->dp_meta_objset,
178 dsl_dir_phys(dp->dp_root_dir)->dd_child_dir_zapobj,
179 name, sizeof (obj), 1, &obj);
183 return (dsl_dir_hold_obj(dp, obj, name, dp, ddp));
187 dsl_pool_open_impl(spa_t *spa, uint64_t txg)
190 blkptr_t *bp = spa_get_rootblkptr(spa);
192 dp = kmem_zalloc(sizeof (dsl_pool_t), KM_SLEEP);
194 dp->dp_meta_rootbp = *bp;
195 rrw_init(&dp->dp_config_rwlock, B_TRUE);
199 txg_list_create(&dp->dp_dirty_datasets, spa,
200 offsetof(dsl_dataset_t, ds_dirty_link));
201 txg_list_create(&dp->dp_dirty_zilogs, spa,
202 offsetof(zilog_t, zl_dirty_link));
203 txg_list_create(&dp->dp_dirty_dirs, spa,
204 offsetof(dsl_dir_t, dd_dirty_link));
205 txg_list_create(&dp->dp_sync_tasks, spa,
206 offsetof(dsl_sync_task_t, dst_node));
207 txg_list_create(&dp->dp_early_sync_tasks, spa,
208 offsetof(dsl_sync_task_t, dst_node));
210 dp->dp_sync_taskq = taskq_create("dp_sync_taskq",
211 zfs_sync_taskq_batch_pct, minclsyspri, 1, INT_MAX,
212 TASKQ_THREADS_CPU_PCT);
214 dp->dp_zil_clean_taskq = taskq_create("dp_zil_clean_taskq",
215 zfs_zil_clean_taskq_nthr_pct, minclsyspri,
216 zfs_zil_clean_taskq_minalloc,
217 zfs_zil_clean_taskq_maxalloc,
218 TASKQ_PREPOPULATE | TASKQ_THREADS_CPU_PCT);
220 mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL);
221 cv_init(&dp->dp_spaceavail_cv, NULL, CV_DEFAULT, NULL);
223 dp->dp_zrele_taskq = taskq_create("z_zrele", boot_ncpus, defclsyspri,
224 boot_ncpus * 8, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
225 dp->dp_unlinked_drain_taskq = taskq_create("z_unlinked_drain",
226 boot_ncpus, defclsyspri, boot_ncpus, INT_MAX,
227 TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
233 dsl_pool_init(spa_t *spa, uint64_t txg, dsl_pool_t **dpp)
236 dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
239 * Initialize the caller's dsl_pool_t structure before we actually open
240 * the meta objset. This is done because a self-healing write zio may
241 * be issued as part of dmu_objset_open_impl() and the spa needs its
242 * dsl_pool_t initialized in order to handle the write.
246 err = dmu_objset_open_impl(spa, NULL, &dp->dp_meta_rootbp,
247 &dp->dp_meta_objset);
257 dsl_pool_open(dsl_pool_t *dp)
264 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
265 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
266 DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1,
267 &dp->dp_root_dir_obj);
271 err = dsl_dir_hold_obj(dp, dp->dp_root_dir_obj,
272 NULL, dp, &dp->dp_root_dir);
276 err = dsl_pool_open_special_dir(dp, MOS_DIR_NAME, &dp->dp_mos_dir);
280 if (spa_version(dp->dp_spa) >= SPA_VERSION_ORIGIN) {
281 err = dsl_pool_open_special_dir(dp, ORIGIN_DIR_NAME, &dd);
284 err = dsl_dataset_hold_obj(dp,
285 dsl_dir_phys(dd)->dd_head_dataset_obj, FTAG, &ds);
287 err = dsl_dataset_hold_obj(dp,
288 dsl_dataset_phys(ds)->ds_prev_snap_obj, dp,
289 &dp->dp_origin_snap);
290 dsl_dataset_rele(ds, FTAG);
292 dsl_dir_rele(dd, dp);
297 if (spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) {
298 err = dsl_pool_open_special_dir(dp, FREE_DIR_NAME,
303 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
304 DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj);
307 VERIFY0(bpobj_open(&dp->dp_free_bpobj,
308 dp->dp_meta_objset, obj));
311 if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
312 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
313 DMU_POOL_OBSOLETE_BPOBJ, sizeof (uint64_t), 1, &obj);
315 VERIFY0(bpobj_open(&dp->dp_obsolete_bpobj,
316 dp->dp_meta_objset, obj));
317 } else if (err == ENOENT) {
319 * We might not have created the remap bpobj yet.
328 * Note: errors ignored, because the these special dirs, used for
329 * space accounting, are only created on demand.
331 (void) dsl_pool_open_special_dir(dp, LEAK_DIR_NAME,
334 if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY)) {
335 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
336 DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1,
342 if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMPTY_BPOBJ)) {
343 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
344 DMU_POOL_EMPTY_BPOBJ, sizeof (uint64_t), 1,
345 &dp->dp_empty_bpobj);
350 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
351 DMU_POOL_TMP_USERREFS, sizeof (uint64_t), 1,
352 &dp->dp_tmp_userrefs_obj);
358 err = dsl_scan_init(dp, dp->dp_tx.tx_open_txg);
361 rrw_exit(&dp->dp_config_rwlock, FTAG);
366 dsl_pool_close(dsl_pool_t *dp)
369 * Drop our references from dsl_pool_open().
371 * Since we held the origin_snap from "syncing" context (which
372 * includes pool-opening context), it actually only got a "ref"
373 * and not a hold, so just drop that here.
375 if (dp->dp_origin_snap != NULL)
376 dsl_dataset_rele(dp->dp_origin_snap, dp);
377 if (dp->dp_mos_dir != NULL)
378 dsl_dir_rele(dp->dp_mos_dir, dp);
379 if (dp->dp_free_dir != NULL)
380 dsl_dir_rele(dp->dp_free_dir, dp);
381 if (dp->dp_leak_dir != NULL)
382 dsl_dir_rele(dp->dp_leak_dir, dp);
383 if (dp->dp_root_dir != NULL)
384 dsl_dir_rele(dp->dp_root_dir, dp);
386 bpobj_close(&dp->dp_free_bpobj);
387 bpobj_close(&dp->dp_obsolete_bpobj);
389 /* undo the dmu_objset_open_impl(mos) from dsl_pool_open() */
390 if (dp->dp_meta_objset != NULL)
391 dmu_objset_evict(dp->dp_meta_objset);
393 txg_list_destroy(&dp->dp_dirty_datasets);
394 txg_list_destroy(&dp->dp_dirty_zilogs);
395 txg_list_destroy(&dp->dp_sync_tasks);
396 txg_list_destroy(&dp->dp_early_sync_tasks);
397 txg_list_destroy(&dp->dp_dirty_dirs);
399 taskq_destroy(dp->dp_zil_clean_taskq);
400 taskq_destroy(dp->dp_sync_taskq);
403 * We can't set retry to TRUE since we're explicitly specifying
404 * a spa to flush. This is good enough; any missed buffers for
405 * this spa won't cause trouble, and they'll eventually fall
406 * out of the ARC just like any other unused buffer.
408 arc_flush(dp->dp_spa, FALSE);
410 mmp_fini(dp->dp_spa);
413 dmu_buf_user_evict_wait();
415 rrw_destroy(&dp->dp_config_rwlock);
416 mutex_destroy(&dp->dp_lock);
417 cv_destroy(&dp->dp_spaceavail_cv);
418 taskq_destroy(dp->dp_unlinked_drain_taskq);
419 taskq_destroy(dp->dp_zrele_taskq);
420 if (dp->dp_blkstats != NULL) {
421 mutex_destroy(&dp->dp_blkstats->zab_lock);
422 vmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));
424 kmem_free(dp, sizeof (dsl_pool_t));
428 dsl_pool_create_obsolete_bpobj(dsl_pool_t *dp, dmu_tx_t *tx)
432 * Currently, we only create the obsolete_bpobj where there are
433 * indirect vdevs with referenced mappings.
435 ASSERT(spa_feature_is_active(dp->dp_spa, SPA_FEATURE_DEVICE_REMOVAL));
436 /* create and open the obsolete_bpobj */
437 obj = bpobj_alloc(dp->dp_meta_objset, SPA_OLD_MAXBLOCKSIZE, tx);
438 VERIFY0(bpobj_open(&dp->dp_obsolete_bpobj, dp->dp_meta_objset, obj));
439 VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
440 DMU_POOL_OBSOLETE_BPOBJ, sizeof (uint64_t), 1, &obj, tx));
441 spa_feature_incr(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
445 dsl_pool_destroy_obsolete_bpobj(dsl_pool_t *dp, dmu_tx_t *tx)
447 spa_feature_decr(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
448 VERIFY0(zap_remove(dp->dp_meta_objset,
449 DMU_POOL_DIRECTORY_OBJECT,
450 DMU_POOL_OBSOLETE_BPOBJ, tx));
451 bpobj_free(dp->dp_meta_objset,
452 dp->dp_obsolete_bpobj.bpo_object, tx);
453 bpobj_close(&dp->dp_obsolete_bpobj);
457 dsl_pool_create(spa_t *spa, nvlist_t *zplprops, dsl_crypto_params_t *dcp,
461 dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
462 dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg);
466 objset_t *os __attribute__((unused));
471 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
473 /* create and open the MOS (meta-objset) */
474 dp->dp_meta_objset = dmu_objset_create_impl(spa,
475 NULL, &dp->dp_meta_rootbp, DMU_OST_META, tx);
476 spa->spa_meta_objset = dp->dp_meta_objset;
478 /* create the pool directory */
479 err = zap_create_claim(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
480 DMU_OT_OBJECT_DIRECTORY, DMU_OT_NONE, 0, tx);
483 /* Initialize scan structures */
484 VERIFY0(dsl_scan_init(dp, txg));
486 /* create and open the root dir */
487 dp->dp_root_dir_obj = dsl_dir_create_sync(dp, NULL, NULL, tx);
488 VERIFY0(dsl_dir_hold_obj(dp, dp->dp_root_dir_obj,
489 NULL, dp, &dp->dp_root_dir));
491 /* create and open the meta-objset dir */
492 (void) dsl_dir_create_sync(dp, dp->dp_root_dir, MOS_DIR_NAME, tx);
493 VERIFY0(dsl_pool_open_special_dir(dp,
494 MOS_DIR_NAME, &dp->dp_mos_dir));
496 if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
497 /* create and open the free dir */
498 (void) dsl_dir_create_sync(dp, dp->dp_root_dir,
500 VERIFY0(dsl_pool_open_special_dir(dp,
501 FREE_DIR_NAME, &dp->dp_free_dir));
503 /* create and open the free_bplist */
504 obj = bpobj_alloc(dp->dp_meta_objset, SPA_OLD_MAXBLOCKSIZE, tx);
505 VERIFY(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
506 DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx) == 0);
507 VERIFY0(bpobj_open(&dp->dp_free_bpobj,
508 dp->dp_meta_objset, obj));
511 if (spa_version(spa) >= SPA_VERSION_DSL_SCRUB)
512 dsl_pool_create_origin(dp, tx);
515 * Some features may be needed when creating the root dataset, so we
516 * create the feature objects here.
518 if (spa_version(spa) >= SPA_VERSION_FEATURES)
519 spa_feature_create_zap_objects(spa, tx);
521 if (dcp != NULL && dcp->cp_crypt != ZIO_CRYPT_OFF &&
522 dcp->cp_crypt != ZIO_CRYPT_INHERIT)
523 spa_feature_enable(spa, SPA_FEATURE_ENCRYPTION, tx);
525 /* create the root dataset */
526 obj = dsl_dataset_create_sync_dd(dp->dp_root_dir, NULL, dcp, 0, tx);
528 /* create the root objset */
529 VERIFY0(dsl_dataset_hold_obj_flags(dp, obj,
530 DS_HOLD_FLAG_DECRYPT, FTAG, &ds));
531 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
532 os = dmu_objset_create_impl(dp->dp_spa, ds,
533 dsl_dataset_get_blkptr(ds), DMU_OST_ZFS, tx);
534 rrw_exit(&ds->ds_bp_rwlock, FTAG);
536 zfs_create_fs(os, kcred, zplprops, tx);
538 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
542 rrw_exit(&dp->dp_config_rwlock, FTAG);
548 * Account for the meta-objset space in its placeholder dsl_dir.
551 dsl_pool_mos_diduse_space(dsl_pool_t *dp,
552 int64_t used, int64_t comp, int64_t uncomp)
554 ASSERT3U(comp, ==, uncomp); /* it's all metadata */
555 mutex_enter(&dp->dp_lock);
556 dp->dp_mos_used_delta += used;
557 dp->dp_mos_compressed_delta += comp;
558 dp->dp_mos_uncompressed_delta += uncomp;
559 mutex_exit(&dp->dp_lock);
563 dsl_pool_sync_mos(dsl_pool_t *dp, dmu_tx_t *tx)
565 zio_t *zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
566 dmu_objset_sync(dp->dp_meta_objset, zio, tx);
567 VERIFY0(zio_wait(zio));
568 dprintf_bp(&dp->dp_meta_rootbp, "meta objset rootbp is %s", "");
569 spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp);
573 dsl_pool_dirty_delta(dsl_pool_t *dp, int64_t delta)
575 ASSERT(MUTEX_HELD(&dp->dp_lock));
578 ASSERT3U(-delta, <=, dp->dp_dirty_total);
580 dp->dp_dirty_total += delta;
583 * Note: we signal even when increasing dp_dirty_total.
584 * This ensures forward progress -- each thread wakes the next waiter.
586 if (dp->dp_dirty_total < zfs_dirty_data_max)
587 cv_signal(&dp->dp_spaceavail_cv);
592 dsl_early_sync_task_verify(dsl_pool_t *dp, uint64_t txg)
594 spa_t *spa = dp->dp_spa;
595 vdev_t *rvd = spa->spa_root_vdev;
597 for (uint64_t c = 0; c < rvd->vdev_children; c++) {
598 vdev_t *vd = rvd->vdev_child[c];
599 txg_list_t *tl = &vd->vdev_ms_list;
602 for (ms = txg_list_head(tl, TXG_CLEAN(txg)); ms;
603 ms = txg_list_next(tl, ms, TXG_CLEAN(txg))) {
604 VERIFY(range_tree_is_empty(ms->ms_freeing));
605 VERIFY(range_tree_is_empty(ms->ms_checkpointing));
614 dsl_pool_sync(dsl_pool_t *dp, uint64_t txg)
620 objset_t *mos = dp->dp_meta_objset;
621 list_t synced_datasets;
623 list_create(&synced_datasets, sizeof (dsl_dataset_t),
624 offsetof(dsl_dataset_t, ds_synced_link));
626 tx = dmu_tx_create_assigned(dp, txg);
629 * Run all early sync tasks before writing out any dirty blocks.
630 * For more info on early sync tasks see block comment in
631 * dsl_early_sync_task().
633 if (!txg_list_empty(&dp->dp_early_sync_tasks, txg)) {
634 dsl_sync_task_t *dst;
636 ASSERT3U(spa_sync_pass(dp->dp_spa), ==, 1);
638 txg_list_remove(&dp->dp_early_sync_tasks, txg)) != NULL) {
639 ASSERT(dsl_early_sync_task_verify(dp, txg));
640 dsl_sync_task_sync(dst, tx);
642 ASSERT(dsl_early_sync_task_verify(dp, txg));
646 * Write out all dirty blocks of dirty datasets.
648 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
649 while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) {
651 * We must not sync any non-MOS datasets twice, because
652 * we may have taken a snapshot of them. However, we
653 * may sync newly-created datasets on pass 2.
655 ASSERT(!list_link_active(&ds->ds_synced_link));
656 list_insert_tail(&synced_datasets, ds);
657 dsl_dataset_sync(ds, zio, tx);
659 VERIFY0(zio_wait(zio));
662 * Update the long range free counter after
663 * we're done syncing user data
665 mutex_enter(&dp->dp_lock);
666 ASSERT(spa_sync_pass(dp->dp_spa) == 1 ||
667 dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] == 0);
668 dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] = 0;
669 mutex_exit(&dp->dp_lock);
672 * After the data blocks have been written (ensured by the zio_wait()
673 * above), update the user/group/project space accounting. This happens
674 * in tasks dispatched to dp_sync_taskq, so wait for them before
677 for (ds = list_head(&synced_datasets); ds != NULL;
678 ds = list_next(&synced_datasets, ds)) {
679 dmu_objset_do_userquota_updates(ds->ds_objset, tx);
681 taskq_wait(dp->dp_sync_taskq);
684 * Sync the datasets again to push out the changes due to
685 * userspace updates. This must be done before we process the
686 * sync tasks, so that any snapshots will have the correct
687 * user accounting information (and we won't get confused
688 * about which blocks are part of the snapshot).
690 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
691 while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) {
692 objset_t *os = ds->ds_objset;
694 ASSERT(list_link_active(&ds->ds_synced_link));
695 dmu_buf_rele(ds->ds_dbuf, ds);
696 dsl_dataset_sync(ds, zio, tx);
699 * Release any key mappings created by calls to
700 * dsl_dataset_dirty() from the userquota accounting
703 if (os->os_encrypted && !os->os_raw_receive &&
704 !os->os_next_write_raw[txg & TXG_MASK]) {
705 ASSERT3P(ds->ds_key_mapping, !=, NULL);
706 key_mapping_rele(dp->dp_spa, ds->ds_key_mapping, ds);
709 VERIFY0(zio_wait(zio));
712 * Now that the datasets have been completely synced, we can
713 * clean up our in-memory structures accumulated while syncing:
715 * - move dead blocks from the pending deadlist and livelists
716 * to the on-disk versions
717 * - release hold from dsl_dataset_dirty()
718 * - release key mapping hold from dsl_dataset_dirty()
720 while ((ds = list_remove_head(&synced_datasets)) != NULL) {
721 objset_t *os = ds->ds_objset;
723 if (os->os_encrypted && !os->os_raw_receive &&
724 !os->os_next_write_raw[txg & TXG_MASK]) {
725 ASSERT3P(ds->ds_key_mapping, !=, NULL);
726 key_mapping_rele(dp->dp_spa, ds->ds_key_mapping, ds);
729 dsl_dataset_sync_done(ds, tx);
732 while ((dd = txg_list_remove(&dp->dp_dirty_dirs, txg)) != NULL) {
733 dsl_dir_sync(dd, tx);
737 * The MOS's space is accounted for in the pool/$MOS
738 * (dp_mos_dir). We can't modify the mos while we're syncing
739 * it, so we remember the deltas and apply them here.
741 if (dp->dp_mos_used_delta != 0 || dp->dp_mos_compressed_delta != 0 ||
742 dp->dp_mos_uncompressed_delta != 0) {
743 dsl_dir_diduse_space(dp->dp_mos_dir, DD_USED_HEAD,
744 dp->dp_mos_used_delta,
745 dp->dp_mos_compressed_delta,
746 dp->dp_mos_uncompressed_delta, tx);
747 dp->dp_mos_used_delta = 0;
748 dp->dp_mos_compressed_delta = 0;
749 dp->dp_mos_uncompressed_delta = 0;
752 if (dmu_objset_is_dirty(mos, txg)) {
753 dsl_pool_sync_mos(dp, tx);
757 * We have written all of the accounted dirty data, so our
758 * dp_space_towrite should now be zero. However, some seldom-used
759 * code paths do not adhere to this (e.g. dbuf_undirty()). Shore up
760 * the accounting of any dirtied space now.
762 * Note that, besides any dirty data from datasets, the amount of
763 * dirty data in the MOS is also accounted by the pool. Therefore,
764 * we want to do this cleanup after dsl_pool_sync_mos() so we don't
765 * attempt to update the accounting for the same dirty data twice.
766 * (i.e. at this point we only update the accounting for the space
767 * that we know that we "leaked").
769 dsl_pool_undirty_space(dp, dp->dp_dirty_pertxg[txg & TXG_MASK], txg);
772 * If we modify a dataset in the same txg that we want to destroy it,
773 * its dsl_dir's dd_dbuf will be dirty, and thus have a hold on it.
774 * dsl_dir_destroy_check() will fail if there are unexpected holds.
775 * Therefore, we want to sync the MOS (thus syncing the dd_dbuf
776 * and clearing the hold on it) before we process the sync_tasks.
777 * The MOS data dirtied by the sync_tasks will be synced on the next
780 if (!txg_list_empty(&dp->dp_sync_tasks, txg)) {
781 dsl_sync_task_t *dst;
783 * No more sync tasks should have been added while we
786 ASSERT3U(spa_sync_pass(dp->dp_spa), ==, 1);
787 while ((dst = txg_list_remove(&dp->dp_sync_tasks, txg)) != NULL)
788 dsl_sync_task_sync(dst, tx);
793 DTRACE_PROBE2(dsl_pool_sync__done, dsl_pool_t *dp, dp, uint64_t, txg);
797 dsl_pool_sync_done(dsl_pool_t *dp, uint64_t txg)
801 while ((zilog = txg_list_head(&dp->dp_dirty_zilogs, txg))) {
802 dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
804 * We don't remove the zilog from the dp_dirty_zilogs
805 * list until after we've cleaned it. This ensures that
806 * callers of zilog_is_dirty() receive an accurate
807 * answer when they are racing with the spa sync thread.
809 zil_clean(zilog, txg);
810 (void) txg_list_remove_this(&dp->dp_dirty_zilogs, zilog, txg);
811 ASSERT(!dmu_objset_is_dirty(zilog->zl_os, txg));
812 dmu_buf_rele(ds->ds_dbuf, zilog);
814 ASSERT(!dmu_objset_is_dirty(dp->dp_meta_objset, txg));
818 * TRUE if the current thread is the tx_sync_thread or if we
819 * are being called from SPA context during pool initialization.
822 dsl_pool_sync_context(dsl_pool_t *dp)
824 return (curthread == dp->dp_tx.tx_sync_thread ||
825 spa_is_initializing(dp->dp_spa) ||
826 taskq_member(dp->dp_sync_taskq, curthread));
830 * This function returns the amount of allocatable space in the pool
831 * minus whatever space is currently reserved by ZFS for specific
832 * purposes. Specifically:
834 * 1] Any reserved SLOP space
835 * 2] Any space used by the checkpoint
836 * 3] Any space used for deferred frees
838 * The latter 2 are especially important because they are needed to
839 * rectify the SPA's and DMU's different understanding of how much space
840 * is used. Now the DMU is aware of that extra space tracked by the SPA
841 * without having to maintain a separate special dir (e.g similar to
842 * $MOS, $FREEING, and $LEAKED).
844 * Note: By deferred frees here, we mean the frees that were deferred
845 * in spa_sync() after sync pass 1 (spa_deferred_bpobj), and not the
846 * segments placed in ms_defer trees during metaslab_sync_done().
849 dsl_pool_adjustedsize(dsl_pool_t *dp, zfs_space_check_t slop_policy)
851 spa_t *spa = dp->dp_spa;
852 uint64_t space, resv, adjustedsize;
853 uint64_t spa_deferred_frees =
854 spa->spa_deferred_bpobj.bpo_phys->bpo_bytes;
856 space = spa_get_dspace(spa)
857 - spa_get_checkpoint_space(spa) - spa_deferred_frees;
858 resv = spa_get_slop_space(spa);
860 switch (slop_policy) {
861 case ZFS_SPACE_CHECK_NORMAL:
863 case ZFS_SPACE_CHECK_RESERVED:
866 case ZFS_SPACE_CHECK_EXTRA_RESERVED:
869 case ZFS_SPACE_CHECK_NONE:
873 panic("invalid slop policy value: %d", slop_policy);
876 adjustedsize = (space >= resv) ? (space - resv) : 0;
878 return (adjustedsize);
882 dsl_pool_unreserved_space(dsl_pool_t *dp, zfs_space_check_t slop_policy)
884 uint64_t poolsize = dsl_pool_adjustedsize(dp, slop_policy);
886 metaslab_class_get_deferred(spa_normal_class(dp->dp_spa));
887 uint64_t quota = (poolsize >= deferred) ? (poolsize - deferred) : 0;
892 dsl_pool_need_dirty_delay(dsl_pool_t *dp)
894 uint64_t delay_min_bytes =
895 zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100;
896 uint64_t dirty_min_bytes =
897 zfs_dirty_data_max * zfs_dirty_data_sync_percent / 100;
900 mutex_enter(&dp->dp_lock);
901 dirty = dp->dp_dirty_total;
902 mutex_exit(&dp->dp_lock);
903 if (dirty > dirty_min_bytes)
905 return (dirty > delay_min_bytes);
909 dsl_pool_dirty_space(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx)
912 mutex_enter(&dp->dp_lock);
913 dp->dp_dirty_pertxg[tx->tx_txg & TXG_MASK] += space;
914 dsl_pool_dirty_delta(dp, space);
915 mutex_exit(&dp->dp_lock);
920 dsl_pool_undirty_space(dsl_pool_t *dp, int64_t space, uint64_t txg)
922 ASSERT3S(space, >=, 0);
926 mutex_enter(&dp->dp_lock);
927 if (dp->dp_dirty_pertxg[txg & TXG_MASK] < space) {
928 /* XXX writing something we didn't dirty? */
929 space = dp->dp_dirty_pertxg[txg & TXG_MASK];
931 ASSERT3U(dp->dp_dirty_pertxg[txg & TXG_MASK], >=, space);
932 dp->dp_dirty_pertxg[txg & TXG_MASK] -= space;
933 ASSERT3U(dp->dp_dirty_total, >=, space);
934 dsl_pool_dirty_delta(dp, -space);
935 mutex_exit(&dp->dp_lock);
940 upgrade_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
943 dsl_dataset_t *ds, *prev = NULL;
946 err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
950 while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
951 err = dsl_dataset_hold_obj(dp,
952 dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
954 dsl_dataset_rele(ds, FTAG);
958 if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object)
960 dsl_dataset_rele(ds, FTAG);
966 prev = dp->dp_origin_snap;
969 * The $ORIGIN can't have any data, or the accounting
972 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
973 ASSERT0(dsl_dataset_phys(prev)->ds_bp.blk_birth);
974 rrw_exit(&ds->ds_bp_rwlock, FTAG);
976 /* The origin doesn't get attached to itself */
977 if (ds->ds_object == prev->ds_object) {
978 dsl_dataset_rele(ds, FTAG);
982 dmu_buf_will_dirty(ds->ds_dbuf, tx);
983 dsl_dataset_phys(ds)->ds_prev_snap_obj = prev->ds_object;
984 dsl_dataset_phys(ds)->ds_prev_snap_txg =
985 dsl_dataset_phys(prev)->ds_creation_txg;
987 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
988 dsl_dir_phys(ds->ds_dir)->dd_origin_obj = prev->ds_object;
990 dmu_buf_will_dirty(prev->ds_dbuf, tx);
991 dsl_dataset_phys(prev)->ds_num_children++;
993 if (dsl_dataset_phys(ds)->ds_next_snap_obj == 0) {
994 ASSERT(ds->ds_prev == NULL);
995 VERIFY0(dsl_dataset_hold_obj(dp,
996 dsl_dataset_phys(ds)->ds_prev_snap_obj,
1001 ASSERT3U(dsl_dir_phys(ds->ds_dir)->dd_origin_obj, ==, prev->ds_object);
1002 ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_obj, ==, prev->ds_object);
1004 if (dsl_dataset_phys(prev)->ds_next_clones_obj == 0) {
1005 dmu_buf_will_dirty(prev->ds_dbuf, tx);
1006 dsl_dataset_phys(prev)->ds_next_clones_obj =
1007 zap_create(dp->dp_meta_objset,
1008 DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
1010 VERIFY0(zap_add_int(dp->dp_meta_objset,
1011 dsl_dataset_phys(prev)->ds_next_clones_obj, ds->ds_object, tx));
1013 dsl_dataset_rele(ds, FTAG);
1014 if (prev != dp->dp_origin_snap)
1015 dsl_dataset_rele(prev, FTAG);
1020 dsl_pool_upgrade_clones(dsl_pool_t *dp, dmu_tx_t *tx)
1022 ASSERT(dmu_tx_is_syncing(tx));
1023 ASSERT(dp->dp_origin_snap != NULL);
1025 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, upgrade_clones_cb,
1026 tx, DS_FIND_CHILDREN | DS_FIND_SERIALIZE));
1031 upgrade_dir_clones_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
1034 objset_t *mos = dp->dp_meta_objset;
1036 if (dsl_dir_phys(ds->ds_dir)->dd_origin_obj != 0) {
1037 dsl_dataset_t *origin;
1039 VERIFY0(dsl_dataset_hold_obj(dp,
1040 dsl_dir_phys(ds->ds_dir)->dd_origin_obj, FTAG, &origin));
1042 if (dsl_dir_phys(origin->ds_dir)->dd_clones == 0) {
1043 dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
1044 dsl_dir_phys(origin->ds_dir)->dd_clones =
1045 zap_create(mos, DMU_OT_DSL_CLONES, DMU_OT_NONE,
1049 VERIFY0(zap_add_int(dp->dp_meta_objset,
1050 dsl_dir_phys(origin->ds_dir)->dd_clones,
1051 ds->ds_object, tx));
1053 dsl_dataset_rele(origin, FTAG);
1059 dsl_pool_upgrade_dir_clones(dsl_pool_t *dp, dmu_tx_t *tx)
1063 ASSERT(dmu_tx_is_syncing(tx));
1065 (void) dsl_dir_create_sync(dp, dp->dp_root_dir, FREE_DIR_NAME, tx);
1066 VERIFY0(dsl_pool_open_special_dir(dp,
1067 FREE_DIR_NAME, &dp->dp_free_dir));
1070 * We can't use bpobj_alloc(), because spa_version() still
1071 * returns the old version, and we need a new-version bpobj with
1072 * subobj support. So call dmu_object_alloc() directly.
1074 obj = dmu_object_alloc(dp->dp_meta_objset, DMU_OT_BPOBJ,
1075 SPA_OLD_MAXBLOCKSIZE, DMU_OT_BPOBJ_HDR, sizeof (bpobj_phys_t), tx);
1076 VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1077 DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx));
1078 VERIFY0(bpobj_open(&dp->dp_free_bpobj, dp->dp_meta_objset, obj));
1080 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
1081 upgrade_dir_clones_cb, tx, DS_FIND_CHILDREN | DS_FIND_SERIALIZE));
1085 dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx)
1090 ASSERT(dmu_tx_is_syncing(tx));
1091 ASSERT(dp->dp_origin_snap == NULL);
1092 ASSERT(rrw_held(&dp->dp_config_rwlock, RW_WRITER));
1094 /* create the origin dir, ds, & snap-ds */
1095 dsobj = dsl_dataset_create_sync(dp->dp_root_dir, ORIGIN_DIR_NAME,
1096 NULL, 0, kcred, NULL, tx);
1097 VERIFY0(dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
1098 dsl_dataset_snapshot_sync_impl(ds, ORIGIN_DIR_NAME, tx);
1099 VERIFY0(dsl_dataset_hold_obj(dp, dsl_dataset_phys(ds)->ds_prev_snap_obj,
1100 dp, &dp->dp_origin_snap));
1101 dsl_dataset_rele(ds, FTAG);
1105 dsl_pool_zrele_taskq(dsl_pool_t *dp)
1107 return (dp->dp_zrele_taskq);
1111 dsl_pool_unlinked_drain_taskq(dsl_pool_t *dp)
1113 return (dp->dp_unlinked_drain_taskq);
1117 * Walk through the pool-wide zap object of temporary snapshot user holds
1121 dsl_pool_clean_tmp_userrefs(dsl_pool_t *dp)
1125 objset_t *mos = dp->dp_meta_objset;
1126 uint64_t zapobj = dp->dp_tmp_userrefs_obj;
1131 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
1133 holds = fnvlist_alloc();
1135 for (zap_cursor_init(&zc, mos, zapobj);
1136 zap_cursor_retrieve(&zc, &za) == 0;
1137 zap_cursor_advance(&zc)) {
1141 htag = strchr(za.za_name, '-');
1144 if (nvlist_lookup_nvlist(holds, za.za_name, &tags) != 0) {
1145 tags = fnvlist_alloc();
1146 fnvlist_add_boolean(tags, htag);
1147 fnvlist_add_nvlist(holds, za.za_name, tags);
1150 fnvlist_add_boolean(tags, htag);
1153 dsl_dataset_user_release_tmp(dp, holds);
1154 fnvlist_free(holds);
1155 zap_cursor_fini(&zc);
1159 * Create the pool-wide zap object for storing temporary snapshot holds.
1162 dsl_pool_user_hold_create_obj(dsl_pool_t *dp, dmu_tx_t *tx)
1164 objset_t *mos = dp->dp_meta_objset;
1166 ASSERT(dp->dp_tmp_userrefs_obj == 0);
1167 ASSERT(dmu_tx_is_syncing(tx));
1169 dp->dp_tmp_userrefs_obj = zap_create_link(mos, DMU_OT_USERREFS,
1170 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_TMP_USERREFS, tx);
1174 dsl_pool_user_hold_rele_impl(dsl_pool_t *dp, uint64_t dsobj,
1175 const char *tag, uint64_t now, dmu_tx_t *tx, boolean_t holding)
1177 objset_t *mos = dp->dp_meta_objset;
1178 uint64_t zapobj = dp->dp_tmp_userrefs_obj;
1182 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
1183 ASSERT(dmu_tx_is_syncing(tx));
1186 * If the pool was created prior to SPA_VERSION_USERREFS, the
1187 * zap object for temporary holds might not exist yet.
1191 dsl_pool_user_hold_create_obj(dp, tx);
1192 zapobj = dp->dp_tmp_userrefs_obj;
1194 return (SET_ERROR(ENOENT));
1198 name = kmem_asprintf("%llx-%s", (u_longlong_t)dsobj, tag);
1200 error = zap_add(mos, zapobj, name, 8, 1, &now, tx);
1202 error = zap_remove(mos, zapobj, name, tx);
1209 * Add a temporary hold for the given dataset object and tag.
1212 dsl_pool_user_hold(dsl_pool_t *dp, uint64_t dsobj, const char *tag,
1213 uint64_t now, dmu_tx_t *tx)
1215 return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, now, tx, B_TRUE));
1219 * Release a temporary hold for the given dataset object and tag.
1222 dsl_pool_user_release(dsl_pool_t *dp, uint64_t dsobj, const char *tag,
1225 return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, 0,
1230 * DSL Pool Configuration Lock
1232 * The dp_config_rwlock protects against changes to DSL state (e.g. dataset
1233 * creation / destruction / rename / property setting). It must be held for
1234 * read to hold a dataset or dsl_dir. I.e. you must call
1235 * dsl_pool_config_enter() or dsl_pool_hold() before calling
1236 * dsl_{dataset,dir}_hold{_obj}. In most circumstances, the dp_config_rwlock
1237 * must be held continuously until all datasets and dsl_dirs are released.
1239 * The only exception to this rule is that if a "long hold" is placed on
1240 * a dataset, then the dp_config_rwlock may be dropped while the dataset
1241 * is still held. The long hold will prevent the dataset from being
1242 * destroyed -- the destroy will fail with EBUSY. A long hold can be
1243 * obtained by calling dsl_dataset_long_hold(), or by "owning" a dataset
1244 * (by calling dsl_{dataset,objset}_{try}own{_obj}).
1246 * Legitimate long-holders (including owners) should be long-running, cancelable
1247 * tasks that should cause "zfs destroy" to fail. This includes DMU
1248 * consumers (i.e. a ZPL filesystem being mounted or ZVOL being open),
1249 * "zfs send", and "zfs diff". There are several other long-holders whose
1250 * uses are suboptimal (e.g. "zfs promote", and zil_suspend()).
1252 * The usual formula for long-holding would be:
1254 * dsl_dataset_hold()
1255 * ... perform checks ...
1256 * dsl_dataset_long_hold()
1258 * ... perform long-running task ...
1259 * dsl_dataset_long_rele()
1260 * dsl_dataset_rele()
1262 * Note that when the long hold is released, the dataset is still held but
1263 * the pool is not held. The dataset may change arbitrarily during this time
1264 * (e.g. it could be destroyed). Therefore you shouldn't do anything to the
1265 * dataset except release it.
1267 * User-initiated operations (e.g. ioctls, zfs_ioc_*()) are either read-only
1268 * or modifying operations.
1270 * Modifying operations should generally use dsl_sync_task(). The synctask
1271 * infrastructure enforces proper locking strategy with respect to the
1272 * dp_config_rwlock. See the comment above dsl_sync_task() for details.
1274 * Read-only operations will manually hold the pool, then the dataset, obtain
1275 * information from the dataset, then release the pool and dataset.
1276 * dmu_objset_{hold,rele}() are convenience routines that also do the pool
1281 dsl_pool_hold(const char *name, void *tag, dsl_pool_t **dp)
1286 error = spa_open(name, &spa, tag);
1288 *dp = spa_get_dsl(spa);
1289 dsl_pool_config_enter(*dp, tag);
1295 dsl_pool_rele(dsl_pool_t *dp, void *tag)
1297 dsl_pool_config_exit(dp, tag);
1298 spa_close(dp->dp_spa, tag);
1302 dsl_pool_config_enter(dsl_pool_t *dp, void *tag)
1305 * We use a "reentrant" reader-writer lock, but not reentrantly.
1307 * The rrwlock can (with the track_all flag) track all reading threads,
1308 * which is very useful for debugging which code path failed to release
1309 * the lock, and for verifying that the *current* thread does hold
1312 * (Unlike a rwlock, which knows that N threads hold it for
1313 * read, but not *which* threads, so rw_held(RW_READER) returns TRUE
1314 * if any thread holds it for read, even if this thread doesn't).
1316 ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER));
1317 rrw_enter(&dp->dp_config_rwlock, RW_READER, tag);
1321 dsl_pool_config_enter_prio(dsl_pool_t *dp, void *tag)
1323 ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER));
1324 rrw_enter_read_prio(&dp->dp_config_rwlock, tag);
1328 dsl_pool_config_exit(dsl_pool_t *dp, void *tag)
1330 rrw_exit(&dp->dp_config_rwlock, tag);
1334 dsl_pool_config_held(dsl_pool_t *dp)
1336 return (RRW_LOCK_HELD(&dp->dp_config_rwlock));
1340 dsl_pool_config_held_writer(dsl_pool_t *dp)
1342 return (RRW_WRITE_HELD(&dp->dp_config_rwlock));
1345 EXPORT_SYMBOL(dsl_pool_config_enter);
1346 EXPORT_SYMBOL(dsl_pool_config_exit);
1349 /* zfs_dirty_data_max_percent only applied at module load in arc_init(). */
1350 ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max_percent, INT, ZMOD_RD,
1351 "Max percent of RAM allowed to be dirty");
1353 /* zfs_dirty_data_max_max_percent only applied at module load in arc_init(). */
1354 ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max_max_percent, INT, ZMOD_RD,
1355 "zfs_dirty_data_max upper bound as % of RAM");
1357 ZFS_MODULE_PARAM(zfs, zfs_, delay_min_dirty_percent, INT, ZMOD_RW,
1358 "Transaction delay threshold");
1360 ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max, ULONG, ZMOD_RW,
1361 "Determines the dirty space limit");
1363 /* zfs_dirty_data_max_max only applied at module load in arc_init(). */
1364 ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max_max, ULONG, ZMOD_RD,
1365 "zfs_dirty_data_max upper bound in bytes");
1367 ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_sync_percent, INT, ZMOD_RW,
1368 "Dirty data txg sync threshold as a percentage of zfs_dirty_data_max");
1370 ZFS_MODULE_PARAM(zfs, zfs_, delay_scale, ULONG, ZMOD_RW,
1371 "How quickly delay approaches infinity");
1373 ZFS_MODULE_PARAM(zfs, zfs_, sync_taskq_batch_pct, INT, ZMOD_RW,
1374 "Max percent of CPUs that are used to sync dirty data");
1376 ZFS_MODULE_PARAM(zfs_zil, zfs_zil_, clean_taskq_nthr_pct, INT, ZMOD_RW,
1377 "Max percent of CPUs that are used per dp_sync_taskq");
1379 ZFS_MODULE_PARAM(zfs_zil, zfs_zil_, clean_taskq_minalloc, INT, ZMOD_RW,
1380 "Number of taskq entries that are pre-populated");
1382 ZFS_MODULE_PARAM(zfs_zil, zfs_zil_, clean_taskq_maxalloc, INT, ZMOD_RW,
1383 "Max number of taskq entries that are cached");