4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #include <sys/dsl_pool.h>
27 #include <sys/dsl_dataset.h>
28 #include <sys/dsl_dir.h>
29 #include <sys/dsl_synctask.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/dmu_objset.h>
35 #include <sys/zfs_context.h>
36 #include <sys/fs/zfs.h>
37 #include <sys/zfs_znode.h>
38 #include <sys/spa_impl.h>
40 int zfs_no_write_throttle = 0;
41 int zfs_write_limit_shift = 3; /* 1/8th of physical memory */
42 int zfs_txg_synctime = 5; /* target secs to sync a txg */
44 uint64_t zfs_write_limit_min = 32 << 20; /* min write limit is 32MB */
45 uint64_t zfs_write_limit_max = 0; /* max data payload per txg */
46 uint64_t zfs_write_limit_inflated = 0;
47 uint64_t zfs_write_limit_override = 0;
48 extern uint64_t zfs_write_limit_min;
50 kmutex_t zfs_write_limit_lock;
52 static pgcnt_t old_physmem = 0;
55 dsl_pool_open_special_dir(dsl_pool_t *dp, const char *name, dsl_dir_t **ddp)
60 err = zap_lookup(dp->dp_meta_objset,
61 dp->dp_root_dir->dd_phys->dd_child_dir_zapobj,
62 name, sizeof (obj), 1, &obj);
66 return (dsl_dir_open_obj(dp, obj, name, dp, ddp));
70 dsl_pool_open_impl(spa_t *spa, uint64_t txg)
73 blkptr_t *bp = spa_get_rootblkptr(spa);
75 dp = kmem_zalloc(sizeof (dsl_pool_t), KM_SLEEP);
77 dp->dp_meta_rootbp = *bp;
78 rw_init(&dp->dp_config_rwlock, NULL, RW_DEFAULT, NULL);
79 dp->dp_write_limit = zfs_write_limit_min;
82 txg_list_create(&dp->dp_dirty_datasets,
83 offsetof(dsl_dataset_t, ds_dirty_link));
84 txg_list_create(&dp->dp_dirty_dirs,
85 offsetof(dsl_dir_t, dd_dirty_link));
86 txg_list_create(&dp->dp_sync_tasks,
87 offsetof(dsl_sync_task_group_t, dstg_node));
88 list_create(&dp->dp_synced_datasets, sizeof (dsl_dataset_t),
89 offsetof(dsl_dataset_t, ds_synced_link));
91 mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL);
92 mutex_init(&dp->dp_scrub_cancel_lock, NULL, MUTEX_DEFAULT, NULL);
94 dp->dp_vnrele_taskq = taskq_create("zfs_vn_rele_taskq", 1, minclsyspri,
101 dsl_pool_open(spa_t *spa, uint64_t txg, dsl_pool_t **dpp)
104 dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
109 rw_enter(&dp->dp_config_rwlock, RW_WRITER);
110 err = dmu_objset_open_impl(spa, NULL, &dp->dp_meta_rootbp, &osi);
113 dp->dp_meta_objset = &osi->os;
115 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
116 DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1,
117 &dp->dp_root_dir_obj);
121 err = dsl_dir_open_obj(dp, dp->dp_root_dir_obj,
122 NULL, dp, &dp->dp_root_dir);
126 err = dsl_pool_open_special_dir(dp, MOS_DIR_NAME, &dp->dp_mos_dir);
130 if (spa_version(spa) >= SPA_VERSION_ORIGIN) {
131 err = dsl_pool_open_special_dir(dp, ORIGIN_DIR_NAME, &dd);
134 err = dsl_dataset_hold_obj(dp, dd->dd_phys->dd_head_dataset_obj,
138 err = dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj,
139 dp, &dp->dp_origin_snap);
142 dsl_dataset_rele(ds, FTAG);
143 dsl_dir_close(dd, dp);
146 /* get scrub status */
147 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
148 DMU_POOL_SCRUB_FUNC, sizeof (uint32_t), 1,
151 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
152 DMU_POOL_SCRUB_QUEUE, sizeof (uint64_t), 1,
153 &dp->dp_scrub_queue_obj);
156 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
157 DMU_POOL_SCRUB_MIN_TXG, sizeof (uint64_t), 1,
158 &dp->dp_scrub_min_txg);
161 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
162 DMU_POOL_SCRUB_MAX_TXG, sizeof (uint64_t), 1,
163 &dp->dp_scrub_max_txg);
166 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
167 DMU_POOL_SCRUB_BOOKMARK, sizeof (uint64_t), 4,
168 &dp->dp_scrub_bookmark);
171 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
172 DMU_POOL_SCRUB_ERRORS, sizeof (uint64_t), 1,
173 &spa->spa_scrub_errors);
176 if (spa_version(spa) < SPA_VERSION_DSL_SCRUB) {
178 * A new-type scrub was in progress on an old
179 * pool. Restart from the beginning, since the
180 * old software may have changed the pool in the
183 dsl_pool_scrub_restart(dp);
187 * It's OK if there is no scrub in progress (and if
188 * there was an I/O error, ignore it).
194 rw_exit(&dp->dp_config_rwlock);
204 dsl_pool_close(dsl_pool_t *dp)
206 /* drop our references from dsl_pool_open() */
209 * Since we held the origin_snap from "syncing" context (which
210 * includes pool-opening context), it actually only got a "ref"
211 * and not a hold, so just drop that here.
213 if (dp->dp_origin_snap)
214 dsl_dataset_drop_ref(dp->dp_origin_snap, dp);
216 dsl_dir_close(dp->dp_mos_dir, dp);
218 dsl_dir_close(dp->dp_root_dir, dp);
220 /* undo the dmu_objset_open_impl(mos) from dsl_pool_open() */
221 if (dp->dp_meta_objset)
222 dmu_objset_evict(NULL, dp->dp_meta_objset->os);
224 txg_list_destroy(&dp->dp_dirty_datasets);
225 txg_list_destroy(&dp->dp_dirty_dirs);
226 txg_list_destroy(&dp->dp_sync_tasks);
227 list_destroy(&dp->dp_synced_datasets);
229 arc_flush(dp->dp_spa);
231 rw_destroy(&dp->dp_config_rwlock);
232 mutex_destroy(&dp->dp_lock);
233 mutex_destroy(&dp->dp_scrub_cancel_lock);
234 taskq_destroy(dp->dp_vnrele_taskq);
236 kmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));
237 kmem_free(dp, sizeof (dsl_pool_t));
241 dsl_pool_create(spa_t *spa, nvlist_t *zplprops, uint64_t txg)
244 dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
245 dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg);
250 /* create and open the MOS (meta-objset) */
251 dp->dp_meta_objset = &dmu_objset_create_impl(spa,
252 NULL, &dp->dp_meta_rootbp, DMU_OST_META, tx)->os;
254 /* create the pool directory */
255 err = zap_create_claim(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
256 DMU_OT_OBJECT_DIRECTORY, DMU_OT_NONE, 0, tx);
257 ASSERT3U(err, ==, 0);
259 /* create and open the root dir */
260 dp->dp_root_dir_obj = dsl_dir_create_sync(dp, NULL, NULL, tx);
261 VERIFY(0 == dsl_dir_open_obj(dp, dp->dp_root_dir_obj,
262 NULL, dp, &dp->dp_root_dir));
264 /* create and open the meta-objset dir */
265 (void) dsl_dir_create_sync(dp, dp->dp_root_dir, MOS_DIR_NAME, tx);
266 VERIFY(0 == dsl_pool_open_special_dir(dp,
267 MOS_DIR_NAME, &dp->dp_mos_dir));
269 if (spa_version(spa) >= SPA_VERSION_DSL_SCRUB)
270 dsl_pool_create_origin(dp, tx);
272 /* create the root dataset */
273 dsobj = dsl_dataset_create_sync_dd(dp->dp_root_dir, NULL, 0, tx);
275 /* create the root objset */
276 VERIFY(0 == dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
277 osip = dmu_objset_create_impl(dp->dp_spa, ds,
278 dsl_dataset_get_blkptr(ds), DMU_OST_ZFS, tx);
280 zfs_create_fs(&osip->os, kcred, zplprops, tx);
282 dsl_dataset_rele(ds, FTAG);
290 dsl_pool_sync(dsl_pool_t *dp, uint64_t txg)
296 dsl_sync_task_group_t *dstg;
297 objset_impl_t *mosi = dp->dp_meta_objset->os;
298 hrtime_t start, write_time;
299 uint64_t data_written;
302 tx = dmu_tx_create_assigned(dp, txg);
304 dp->dp_read_overhead = 0;
306 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
307 while (ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) {
308 if (!list_link_active(&ds->ds_synced_link))
309 list_insert_tail(&dp->dp_synced_datasets, ds);
311 dmu_buf_rele(ds->ds_dbuf, ds);
312 dsl_dataset_sync(ds, zio, tx);
314 DTRACE_PROBE(pool_sync__1setup);
317 write_time = gethrtime() - start;
319 DTRACE_PROBE(pool_sync__2rootzio);
321 while (dstg = txg_list_remove(&dp->dp_sync_tasks, txg))
322 dsl_sync_task_group_sync(dstg, tx);
323 DTRACE_PROBE(pool_sync__3task);
326 while (dd = txg_list_remove(&dp->dp_dirty_dirs, txg))
327 dsl_dir_sync(dd, tx);
328 write_time += gethrtime() - start;
330 if (spa_sync_pass(dp->dp_spa) == 1)
331 dsl_pool_scrub_sync(dp, tx);
334 if (list_head(&mosi->os_dirty_dnodes[txg & TXG_MASK]) != NULL ||
335 list_head(&mosi->os_free_dnodes[txg & TXG_MASK]) != NULL) {
336 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
337 dmu_objset_sync(mosi, zio, tx);
340 dprintf_bp(&dp->dp_meta_rootbp, "meta objset rootbp is %s", "");
341 spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp);
343 write_time += gethrtime() - start;
344 DTRACE_PROBE2(pool_sync__4io, hrtime_t, write_time,
345 hrtime_t, dp->dp_read_overhead);
346 write_time -= dp->dp_read_overhead;
350 data_written = dp->dp_space_towrite[txg & TXG_MASK];
351 dp->dp_space_towrite[txg & TXG_MASK] = 0;
352 ASSERT(dp->dp_tempreserved[txg & TXG_MASK] == 0);
355 * If the write limit max has not been explicitly set, set it
356 * to a fraction of available physical memory (default 1/8th).
357 * Note that we must inflate the limit because the spa
358 * inflates write sizes to account for data replication.
359 * Check this each sync phase to catch changing memory size.
361 if (physmem != old_physmem && zfs_write_limit_shift) {
362 mutex_enter(&zfs_write_limit_lock);
363 old_physmem = physmem;
364 zfs_write_limit_max = ptob(physmem) >> zfs_write_limit_shift;
365 zfs_write_limit_inflated = MAX(zfs_write_limit_min,
366 spa_get_asize(dp->dp_spa, zfs_write_limit_max));
367 mutex_exit(&zfs_write_limit_lock);
371 * Attempt to keep the sync time consistent by adjusting the
372 * amount of write traffic allowed into each transaction group.
373 * Weight the throughput calculation towards the current value:
374 * thru = 3/4 old_thru + 1/4 new_thru
376 ASSERT(zfs_write_limit_min > 0);
377 if (data_written > zfs_write_limit_min / 8 && write_time > 0) {
378 uint64_t throughput = (data_written * NANOSEC) / write_time;
379 if (dp->dp_throughput)
380 dp->dp_throughput = throughput / 4 +
381 3 * dp->dp_throughput / 4;
383 dp->dp_throughput = throughput;
384 dp->dp_write_limit = MIN(zfs_write_limit_inflated,
385 MAX(zfs_write_limit_min,
386 dp->dp_throughput * zfs_txg_synctime));
391 dsl_pool_zil_clean(dsl_pool_t *dp)
395 while (ds = list_head(&dp->dp_synced_datasets)) {
396 list_remove(&dp->dp_synced_datasets, ds);
397 ASSERT(ds->ds_user_ptr != NULL);
398 zil_clean(((objset_impl_t *)ds->ds_user_ptr)->os_zil);
399 dmu_buf_rele(ds->ds_dbuf, ds);
404 * TRUE if the current thread is the tx_sync_thread or if we
405 * are being called from SPA context during pool initialization.
408 dsl_pool_sync_context(dsl_pool_t *dp)
410 return (curthread == dp->dp_tx.tx_sync_thread ||
411 spa_get_dsl(dp->dp_spa) == NULL);
415 dsl_pool_adjustedsize(dsl_pool_t *dp, boolean_t netfree)
417 uint64_t space, resv;
420 * Reserve about 1.6% (1/64), or at least 32MB, for allocation
422 * XXX The intent log is not accounted for, so it must fit
425 * If we're trying to assess whether it's OK to do a free,
426 * cut the reservation in half to allow forward progress
427 * (e.g. make it possible to rm(1) files from a full pool).
429 space = spa_get_dspace(dp->dp_spa);
430 resv = MAX(space >> 6, SPA_MINDEVSIZE >> 1);
434 return (space - resv);
438 dsl_pool_tempreserve_space(dsl_pool_t *dp, uint64_t space, dmu_tx_t *tx)
440 uint64_t reserved = 0;
441 uint64_t write_limit = (zfs_write_limit_override ?
442 zfs_write_limit_override : dp->dp_write_limit);
444 if (zfs_no_write_throttle) {
445 atomic_add_64(&dp->dp_tempreserved[tx->tx_txg & TXG_MASK],
451 * Check to see if we have exceeded the maximum allowed IO for
452 * this transaction group. We can do this without locks since
453 * a little slop here is ok. Note that we do the reserved check
454 * with only half the requested reserve: this is because the
455 * reserve requests are worst-case, and we really don't want to
456 * throttle based off of worst-case estimates.
458 if (write_limit > 0) {
459 reserved = dp->dp_space_towrite[tx->tx_txg & TXG_MASK]
460 + dp->dp_tempreserved[tx->tx_txg & TXG_MASK] / 2;
462 if (reserved && reserved > write_limit)
466 atomic_add_64(&dp->dp_tempreserved[tx->tx_txg & TXG_MASK], space);
469 * If this transaction group is over 7/8ths capacity, delay
470 * the caller 1 clock tick. This will slow down the "fill"
471 * rate until the sync process can catch up with us.
473 if (reserved && reserved > (write_limit - (write_limit >> 3)))
474 txg_delay(dp, tx->tx_txg, 1);
480 dsl_pool_tempreserve_clear(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx)
482 ASSERT(dp->dp_tempreserved[tx->tx_txg & TXG_MASK] >= space);
483 atomic_add_64(&dp->dp_tempreserved[tx->tx_txg & TXG_MASK], -space);
487 dsl_pool_memory_pressure(dsl_pool_t *dp)
489 uint64_t space_inuse = 0;
492 if (dp->dp_write_limit == zfs_write_limit_min)
495 for (i = 0; i < TXG_SIZE; i++) {
496 space_inuse += dp->dp_space_towrite[i];
497 space_inuse += dp->dp_tempreserved[i];
499 dp->dp_write_limit = MAX(zfs_write_limit_min,
500 MIN(dp->dp_write_limit, space_inuse / 4));
504 dsl_pool_willuse_space(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx)
507 mutex_enter(&dp->dp_lock);
508 dp->dp_space_towrite[tx->tx_txg & TXG_MASK] += space;
509 mutex_exit(&dp->dp_lock);
515 upgrade_clones_cb(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg)
518 dsl_dataset_t *ds, *prev = NULL;
520 dsl_pool_t *dp = spa_get_dsl(spa);
522 err = dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds);
526 while (ds->ds_phys->ds_prev_snap_obj != 0) {
527 err = dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj,
530 dsl_dataset_rele(ds, FTAG);
534 if (prev->ds_phys->ds_next_snap_obj != ds->ds_object)
536 dsl_dataset_rele(ds, FTAG);
542 prev = dp->dp_origin_snap;
545 * The $ORIGIN can't have any data, or the accounting
548 ASSERT(prev->ds_phys->ds_bp.blk_birth == 0);
550 /* The origin doesn't get attached to itself */
551 if (ds->ds_object == prev->ds_object) {
552 dsl_dataset_rele(ds, FTAG);
556 dmu_buf_will_dirty(ds->ds_dbuf, tx);
557 ds->ds_phys->ds_prev_snap_obj = prev->ds_object;
558 ds->ds_phys->ds_prev_snap_txg = prev->ds_phys->ds_creation_txg;
560 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
561 ds->ds_dir->dd_phys->dd_origin_obj = prev->ds_object;
563 dmu_buf_will_dirty(prev->ds_dbuf, tx);
564 prev->ds_phys->ds_num_children++;
566 if (ds->ds_phys->ds_next_snap_obj == 0) {
567 ASSERT(ds->ds_prev == NULL);
568 VERIFY(0 == dsl_dataset_hold_obj(dp,
569 ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev));
573 ASSERT(ds->ds_dir->dd_phys->dd_origin_obj == prev->ds_object);
574 ASSERT(ds->ds_phys->ds_prev_snap_obj == prev->ds_object);
576 if (prev->ds_phys->ds_next_clones_obj == 0) {
577 prev->ds_phys->ds_next_clones_obj =
578 zap_create(dp->dp_meta_objset,
579 DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
581 VERIFY(0 == zap_add_int(dp->dp_meta_objset,
582 prev->ds_phys->ds_next_clones_obj, ds->ds_object, tx));
584 dsl_dataset_rele(ds, FTAG);
585 if (prev != dp->dp_origin_snap)
586 dsl_dataset_rele(prev, FTAG);
591 dsl_pool_upgrade_clones(dsl_pool_t *dp, dmu_tx_t *tx)
593 ASSERT(dmu_tx_is_syncing(tx));
594 ASSERT(dp->dp_origin_snap != NULL);
596 (void) dmu_objset_find_spa(dp->dp_spa, NULL, upgrade_clones_cb,
597 tx, DS_FIND_CHILDREN);
601 dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx)
606 ASSERT(dmu_tx_is_syncing(tx));
607 ASSERT(dp->dp_origin_snap == NULL);
609 /* create the origin dir, ds, & snap-ds */
610 rw_enter(&dp->dp_config_rwlock, RW_WRITER);
611 dsobj = dsl_dataset_create_sync(dp->dp_root_dir, ORIGIN_DIR_NAME,
613 VERIFY(0 == dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
614 dsl_dataset_snapshot_sync(ds, ORIGIN_DIR_NAME, kcred, tx);
615 VERIFY(0 == dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj,
616 dp, &dp->dp_origin_snap));
617 dsl_dataset_rele(ds, FTAG);
618 rw_exit(&dp->dp_config_rwlock);
622 dsl_pool_vnrele_taskq(dsl_pool_t *dp)
624 return (dp->dp_vnrele_taskq);