4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
26 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
27 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
28 * Copyright (c) 2015, STRATO AG, Inc. All rights reserved.
29 * Copyright (c) 2014 Integros [integros.com]
30 * Copyright 2017 Nexenta Systems, Inc.
33 /* Portions Copyright 2010 Robert Milkowski */
36 #include <sys/zfs_context.h>
37 #include <sys/dmu_objset.h>
38 #include <sys/dsl_dir.h>
39 #include <sys/dsl_dataset.h>
40 #include <sys/dsl_prop.h>
41 #include <sys/dsl_pool.h>
42 #include <sys/dsl_synctask.h>
43 #include <sys/dsl_deleg.h>
44 #include <sys/dnode.h>
47 #include <sys/dmu_tx.h>
50 #include <sys/dmu_impl.h>
51 #include <sys/zfs_ioctl.h>
53 #include <sys/zfs_onexit.h>
54 #include <sys/dsl_destroy.h>
56 #include <sys/zfeature.h>
57 #include "zfs_namecheck.h"
60 * Needed to close a window in dnode_move() that allows the objset to be freed
61 * before it can be safely accessed.
66 * Tunable to overwrite the maximum number of threads for the parallization
67 * of dmu_objset_find_dp, needed to speed up the import of pools with many
69 * Default is 4 times the number of leaf vdevs.
71 int dmu_find_threads = 0;
74 * Backfill lower metadnode objects after this many have been freed.
75 * Backfilling negatively impacts object creation rates, so only do it
76 * if there are enough holes to fill.
78 int dmu_rescan_dnode_threshold = 131072;
80 static void dmu_objset_find_dp_cb(void *arg);
85 rw_init(&os_lock, NULL, RW_DEFAULT, NULL);
95 dmu_objset_spa(objset_t *os)
101 dmu_objset_zil(objset_t *os)
107 dmu_objset_pool(objset_t *os)
111 if ((ds = os->os_dsl_dataset) != NULL && ds->ds_dir)
112 return (ds->ds_dir->dd_pool);
114 return (spa_get_dsl(os->os_spa));
118 dmu_objset_ds(objset_t *os)
120 return (os->os_dsl_dataset);
124 dmu_objset_type(objset_t *os)
126 return (os->os_phys->os_type);
130 dmu_objset_name(objset_t *os, char *buf)
132 dsl_dataset_name(os->os_dsl_dataset, buf);
136 dmu_objset_id(objset_t *os)
138 dsl_dataset_t *ds = os->os_dsl_dataset;
140 return (ds ? ds->ds_object : 0);
144 dmu_objset_dnodesize(objset_t *os)
146 return (os->os_dnodesize);
150 dmu_objset_syncprop(objset_t *os)
152 return (os->os_sync);
156 dmu_objset_logbias(objset_t *os)
158 return (os->os_logbias);
162 checksum_changed_cb(void *arg, uint64_t newval)
167 * Inheritance should have been done by now.
169 ASSERT(newval != ZIO_CHECKSUM_INHERIT);
171 os->os_checksum = zio_checksum_select(newval, ZIO_CHECKSUM_ON_VALUE);
175 compression_changed_cb(void *arg, uint64_t newval)
180 * Inheritance and range checking should have been done by now.
182 ASSERT(newval != ZIO_COMPRESS_INHERIT);
184 os->os_compress = zio_compress_select(os->os_spa, newval,
189 copies_changed_cb(void *arg, uint64_t newval)
194 * Inheritance and range checking should have been done by now.
197 ASSERT(newval <= spa_max_replication(os->os_spa));
199 os->os_copies = newval;
203 dedup_changed_cb(void *arg, uint64_t newval)
206 spa_t *spa = os->os_spa;
207 enum zio_checksum checksum;
210 * Inheritance should have been done by now.
212 ASSERT(newval != ZIO_CHECKSUM_INHERIT);
214 checksum = zio_checksum_dedup_select(spa, newval, ZIO_CHECKSUM_OFF);
216 os->os_dedup_checksum = checksum & ZIO_CHECKSUM_MASK;
217 os->os_dedup_verify = !!(checksum & ZIO_CHECKSUM_VERIFY);
221 primary_cache_changed_cb(void *arg, uint64_t newval)
226 * Inheritance and range checking should have been done by now.
228 ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE ||
229 newval == ZFS_CACHE_METADATA);
231 os->os_primary_cache = newval;
235 secondary_cache_changed_cb(void *arg, uint64_t newval)
240 * Inheritance and range checking should have been done by now.
242 ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE ||
243 newval == ZFS_CACHE_METADATA);
245 os->os_secondary_cache = newval;
249 sync_changed_cb(void *arg, uint64_t newval)
254 * Inheritance and range checking should have been done by now.
256 ASSERT(newval == ZFS_SYNC_STANDARD || newval == ZFS_SYNC_ALWAYS ||
257 newval == ZFS_SYNC_DISABLED);
259 os->os_sync = newval;
261 zil_set_sync(os->os_zil, newval);
265 redundant_metadata_changed_cb(void *arg, uint64_t newval)
270 * Inheritance and range checking should have been done by now.
272 ASSERT(newval == ZFS_REDUNDANT_METADATA_ALL ||
273 newval == ZFS_REDUNDANT_METADATA_MOST);
275 os->os_redundant_metadata = newval;
279 dnodesize_changed_cb(void *arg, uint64_t newval)
284 case ZFS_DNSIZE_LEGACY:
285 os->os_dnodesize = DNODE_MIN_SIZE;
287 case ZFS_DNSIZE_AUTO:
289 * Choose a dnode size that will work well for most
290 * workloads if the user specified "auto". Future code
291 * improvements could dynamically select a dnode size
292 * based on observed workload patterns.
294 os->os_dnodesize = DNODE_MIN_SIZE * 2;
301 os->os_dnodesize = newval;
307 logbias_changed_cb(void *arg, uint64_t newval)
311 ASSERT(newval == ZFS_LOGBIAS_LATENCY ||
312 newval == ZFS_LOGBIAS_THROUGHPUT);
313 os->os_logbias = newval;
315 zil_set_logbias(os->os_zil, newval);
319 recordsize_changed_cb(void *arg, uint64_t newval)
323 os->os_recordsize = newval;
327 dmu_objset_byteswap(void *buf, size_t size)
329 objset_phys_t *osp = buf;
331 ASSERT(size == OBJSET_OLD_PHYS_SIZE || size == sizeof (objset_phys_t));
332 dnode_byteswap(&osp->os_meta_dnode);
333 byteswap_uint64_array(&osp->os_zil_header, sizeof (zil_header_t));
334 osp->os_type = BSWAP_64(osp->os_type);
335 osp->os_flags = BSWAP_64(osp->os_flags);
336 if (size == sizeof (objset_phys_t)) {
337 dnode_byteswap(&osp->os_userused_dnode);
338 dnode_byteswap(&osp->os_groupused_dnode);
343 * The hash is a CRC-based hash of the objset_t pointer and the object number.
346 dnode_hash(const objset_t *os, uint64_t obj)
348 uintptr_t osv = (uintptr_t)os;
349 uint64_t crc = -1ULL;
351 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
353 * The low 6 bits of the pointer don't have much entropy, because
354 * the objset_t is larger than 2^6 bytes long.
356 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF];
357 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF];
358 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF];
359 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 16)) & 0xFF];
361 crc ^= (osv>>14) ^ (obj>>24);
367 dnode_multilist_index_func(multilist_t *ml, void *obj)
370 return (dnode_hash(dn->dn_objset, dn->dn_object) %
371 multilist_get_num_sublists(ml));
375 * Instantiates the objset_t in-memory structure corresponding to the
376 * objset_phys_t that's pointed to by the specified blkptr_t.
379 dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
385 ASSERT(ds == NULL || MUTEX_HELD(&ds->ds_opening_lock));
389 * The $ORIGIN dataset (if it exists) doesn't have an associated
390 * objset, so there's no reason to open it. The $ORIGIN dataset
391 * will not exist on pools older than SPA_VERSION_ORIGIN.
393 if (ds != NULL && spa_get_dsl(spa) != NULL &&
394 spa_get_dsl(spa)->dp_origin_snap != NULL) {
395 ASSERT3P(ds->ds_dir, !=,
396 spa_get_dsl(spa)->dp_origin_snap->ds_dir);
400 os = kmem_zalloc(sizeof (objset_t), KM_SLEEP);
401 os->os_dsl_dataset = ds;
404 if (!BP_IS_HOLE(os->os_rootbp)) {
405 arc_flags_t aflags = ARC_FLAG_WAIT;
407 SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
408 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
410 if (DMU_OS_IS_L2CACHEABLE(os))
411 aflags |= ARC_FLAG_L2CACHE;
413 dprintf_bp(os->os_rootbp, "reading %s", "");
414 err = arc_read(NULL, spa, os->os_rootbp,
415 arc_getbuf_func, &os->os_phys_buf,
416 ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL, &aflags, &zb);
418 kmem_free(os, sizeof (objset_t));
419 /* convert checksum errors into IO errors */
421 err = SET_ERROR(EIO);
425 /* Increase the blocksize if we are permitted. */
426 if (spa_version(spa) >= SPA_VERSION_USERSPACE &&
427 arc_buf_size(os->os_phys_buf) < sizeof (objset_phys_t)) {
428 arc_buf_t *buf = arc_alloc_buf(spa, &os->os_phys_buf,
429 ARC_BUFC_METADATA, sizeof (objset_phys_t));
430 bzero(buf->b_data, sizeof (objset_phys_t));
431 bcopy(os->os_phys_buf->b_data, buf->b_data,
432 arc_buf_size(os->os_phys_buf));
433 arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf);
434 os->os_phys_buf = buf;
437 os->os_phys = os->os_phys_buf->b_data;
438 os->os_flags = os->os_phys->os_flags;
440 int size = spa_version(spa) >= SPA_VERSION_USERSPACE ?
441 sizeof (objset_phys_t) : OBJSET_OLD_PHYS_SIZE;
442 os->os_phys_buf = arc_alloc_buf(spa, &os->os_phys_buf,
443 ARC_BUFC_METADATA, size);
444 os->os_phys = os->os_phys_buf->b_data;
445 bzero(os->os_phys, size);
449 * Note: the changed_cb will be called once before the register
450 * func returns, thus changing the checksum/compression from the
451 * default (fletcher2/off). Snapshots don't need to know about
452 * checksum/compression/copies.
455 boolean_t needlock = B_FALSE;
458 * Note: it's valid to open the objset if the dataset is
459 * long-held, in which case the pool_config lock will not
462 if (!dsl_pool_config_held(dmu_objset_pool(os))) {
464 dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
466 err = dsl_prop_register(ds,
467 zfs_prop_to_name(ZFS_PROP_PRIMARYCACHE),
468 primary_cache_changed_cb, os);
470 err = dsl_prop_register(ds,
471 zfs_prop_to_name(ZFS_PROP_SECONDARYCACHE),
472 secondary_cache_changed_cb, os);
474 if (!ds->ds_is_snapshot) {
476 err = dsl_prop_register(ds,
477 zfs_prop_to_name(ZFS_PROP_CHECKSUM),
478 checksum_changed_cb, os);
481 err = dsl_prop_register(ds,
482 zfs_prop_to_name(ZFS_PROP_COMPRESSION),
483 compression_changed_cb, os);
486 err = dsl_prop_register(ds,
487 zfs_prop_to_name(ZFS_PROP_COPIES),
488 copies_changed_cb, os);
491 err = dsl_prop_register(ds,
492 zfs_prop_to_name(ZFS_PROP_DEDUP),
493 dedup_changed_cb, os);
496 err = dsl_prop_register(ds,
497 zfs_prop_to_name(ZFS_PROP_LOGBIAS),
498 logbias_changed_cb, os);
501 err = dsl_prop_register(ds,
502 zfs_prop_to_name(ZFS_PROP_SYNC),
503 sync_changed_cb, os);
506 err = dsl_prop_register(ds,
508 ZFS_PROP_REDUNDANT_METADATA),
509 redundant_metadata_changed_cb, os);
512 err = dsl_prop_register(ds,
513 zfs_prop_to_name(ZFS_PROP_RECORDSIZE),
514 recordsize_changed_cb, os);
517 err = dsl_prop_register(ds,
518 zfs_prop_to_name(ZFS_PROP_DNODESIZE),
519 dnodesize_changed_cb, os);
523 dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
525 arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf);
526 kmem_free(os, sizeof (objset_t));
530 /* It's the meta-objset. */
531 os->os_checksum = ZIO_CHECKSUM_FLETCHER_4;
532 os->os_compress = ZIO_COMPRESS_ON;
533 os->os_copies = spa_max_replication(spa);
534 os->os_dedup_checksum = ZIO_CHECKSUM_OFF;
535 os->os_dedup_verify = B_FALSE;
536 os->os_logbias = ZFS_LOGBIAS_LATENCY;
537 os->os_sync = ZFS_SYNC_STANDARD;
538 os->os_primary_cache = ZFS_CACHE_ALL;
539 os->os_secondary_cache = ZFS_CACHE_ALL;
540 os->os_dnodesize = DNODE_MIN_SIZE;
543 * These properties will be filled in by the logic in zfs_get_zplprop()
544 * when they are queried for the first time.
546 os->os_version = OBJSET_PROP_UNINITIALIZED;
547 os->os_normalization = OBJSET_PROP_UNINITIALIZED;
548 os->os_utf8only = OBJSET_PROP_UNINITIALIZED;
549 os->os_casesensitivity = OBJSET_PROP_UNINITIALIZED;
551 if (ds == NULL || !ds->ds_is_snapshot)
552 os->os_zil_header = os->os_phys->os_zil_header;
553 os->os_zil = zil_alloc(os, &os->os_zil_header);
555 for (i = 0; i < TXG_SIZE; i++) {
556 os->os_dirty_dnodes[i] = multilist_create(sizeof (dnode_t),
557 offsetof(dnode_t, dn_dirty_link[i]),
558 dnode_multilist_index_func);
560 list_create(&os->os_dnodes, sizeof (dnode_t),
561 offsetof(dnode_t, dn_link));
562 list_create(&os->os_downgraded_dbufs, sizeof (dmu_buf_impl_t),
563 offsetof(dmu_buf_impl_t, db_link));
565 mutex_init(&os->os_lock, NULL, MUTEX_DEFAULT, NULL);
566 mutex_init(&os->os_userused_lock, NULL, MUTEX_DEFAULT, NULL);
567 mutex_init(&os->os_obj_lock, NULL, MUTEX_DEFAULT, NULL);
568 mutex_init(&os->os_user_ptr_lock, NULL, MUTEX_DEFAULT, NULL);
569 os->os_obj_next_percpu_len = boot_ncpus;
570 os->os_obj_next_percpu = kmem_zalloc(os->os_obj_next_percpu_len *
571 sizeof (os->os_obj_next_percpu[0]), KM_SLEEP);
573 dnode_special_open(os, &os->os_phys->os_meta_dnode,
574 DMU_META_DNODE_OBJECT, &os->os_meta_dnode);
575 if (arc_buf_size(os->os_phys_buf) >= sizeof (objset_phys_t)) {
576 dnode_special_open(os, &os->os_phys->os_userused_dnode,
577 DMU_USERUSED_OBJECT, &os->os_userused_dnode);
578 dnode_special_open(os, &os->os_phys->os_groupused_dnode,
579 DMU_GROUPUSED_OBJECT, &os->os_groupused_dnode);
587 dmu_objset_from_ds(dsl_dataset_t *ds, objset_t **osp)
592 * We shouldn't be doing anything with dsl_dataset_t's unless the
593 * pool_config lock is held, or the dataset is long-held.
595 ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool) ||
596 dsl_dataset_long_held(ds));
598 mutex_enter(&ds->ds_opening_lock);
599 if (ds->ds_objset == NULL) {
601 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
602 err = dmu_objset_open_impl(dsl_dataset_get_spa(ds),
603 ds, dsl_dataset_get_blkptr(ds), &os);
604 rrw_exit(&ds->ds_bp_rwlock, FTAG);
607 mutex_enter(&ds->ds_lock);
608 ASSERT(ds->ds_objset == NULL);
610 mutex_exit(&ds->ds_lock);
613 *osp = ds->ds_objset;
614 mutex_exit(&ds->ds_opening_lock);
619 * Holds the pool while the objset is held. Therefore only one objset
620 * can be held at a time.
623 dmu_objset_hold(const char *name, void *tag, objset_t **osp)
629 err = dsl_pool_hold(name, tag, &dp);
632 err = dsl_dataset_hold(dp, name, tag, &ds);
634 dsl_pool_rele(dp, tag);
638 err = dmu_objset_from_ds(ds, osp);
640 dsl_dataset_rele(ds, tag);
641 dsl_pool_rele(dp, tag);
648 dmu_objset_own_impl(dsl_dataset_t *ds, dmu_objset_type_t type,
649 boolean_t readonly, void *tag, objset_t **osp)
653 err = dmu_objset_from_ds(ds, osp);
655 dsl_dataset_disown(ds, tag);
656 } else if (type != DMU_OST_ANY && type != (*osp)->os_phys->os_type) {
657 dsl_dataset_disown(ds, tag);
658 return (SET_ERROR(EINVAL));
659 } else if (!readonly && dsl_dataset_is_snapshot(ds)) {
660 dsl_dataset_disown(ds, tag);
661 return (SET_ERROR(EROFS));
667 * dsl_pool must not be held when this is called.
668 * Upon successful return, there will be a longhold on the dataset,
669 * and the dsl_pool will not be held.
672 dmu_objset_own(const char *name, dmu_objset_type_t type,
673 boolean_t readonly, void *tag, objset_t **osp)
679 err = dsl_pool_hold(name, FTAG, &dp);
682 err = dsl_dataset_own(dp, name, tag, &ds);
684 dsl_pool_rele(dp, FTAG);
687 err = dmu_objset_own_impl(ds, type, readonly, tag, osp);
688 dsl_pool_rele(dp, FTAG);
694 dmu_objset_own_obj(dsl_pool_t *dp, uint64_t obj, dmu_objset_type_t type,
695 boolean_t readonly, void *tag, objset_t **osp)
700 err = dsl_dataset_own_obj(dp, obj, tag, &ds);
704 return (dmu_objset_own_impl(ds, type, readonly, tag, osp));
708 dmu_objset_rele(objset_t *os, void *tag)
710 dsl_pool_t *dp = dmu_objset_pool(os);
711 dsl_dataset_rele(os->os_dsl_dataset, tag);
712 dsl_pool_rele(dp, tag);
716 * When we are called, os MUST refer to an objset associated with a dataset
717 * that is owned by 'tag'; that is, is held and long held by 'tag' and ds_owner
718 * == tag. We will then release and reacquire ownership of the dataset while
719 * holding the pool config_rwlock to avoid intervening namespace or ownership
722 * This exists solely to accommodate zfs_ioc_userspace_upgrade()'s desire to
723 * release the hold on its dataset and acquire a new one on the dataset of the
724 * same name so that it can be partially torn down and reconstructed.
727 dmu_objset_refresh_ownership(dsl_dataset_t *ds, dsl_dataset_t **newds,
731 char name[ZFS_MAX_DATASET_NAME_LEN];
733 VERIFY3P(ds, !=, NULL);
734 VERIFY3P(ds->ds_owner, ==, tag);
735 VERIFY(dsl_dataset_long_held(ds));
737 dsl_dataset_name(ds, name);
738 dp = ds->ds_dir->dd_pool;
739 dsl_pool_config_enter(dp, FTAG);
740 dsl_dataset_disown(ds, tag);
741 VERIFY0(dsl_dataset_own(dp, name, tag, newds));
742 dsl_pool_config_exit(dp, FTAG);
746 dmu_objset_disown(objset_t *os, void *tag)
748 dsl_dataset_disown(os->os_dsl_dataset, tag);
752 dmu_objset_evict_dbufs(objset_t *os)
757 mutex_enter(&os->os_lock);
758 dn = list_head(&os->os_dnodes);
761 * Skip dnodes without holds. We have to do this dance
762 * because dnode_add_ref() only works if there is already a
763 * hold. If the dnode has no holds, then it has no dbufs.
765 if (dnode_add_ref(dn, FTAG)) {
766 list_insert_after(&os->os_dnodes, dn, &dn_marker);
767 mutex_exit(&os->os_lock);
769 dnode_evict_dbufs(dn);
770 dnode_rele(dn, FTAG);
772 mutex_enter(&os->os_lock);
773 dn = list_next(&os->os_dnodes, &dn_marker);
774 list_remove(&os->os_dnodes, &dn_marker);
776 dn = list_next(&os->os_dnodes, dn);
779 mutex_exit(&os->os_lock);
781 if (DMU_USERUSED_DNODE(os) != NULL) {
782 dnode_evict_dbufs(DMU_GROUPUSED_DNODE(os));
783 dnode_evict_dbufs(DMU_USERUSED_DNODE(os));
785 dnode_evict_dbufs(DMU_META_DNODE(os));
789 * Objset eviction processing is split into into two pieces.
790 * The first marks the objset as evicting, evicts any dbufs that
791 * have a refcount of zero, and then queues up the objset for the
792 * second phase of eviction. Once os->os_dnodes has been cleared by
793 * dnode_buf_pageout()->dnode_destroy(), the second phase is executed.
794 * The second phase closes the special dnodes, dequeues the objset from
795 * the list of those undergoing eviction, and finally frees the objset.
797 * NOTE: Due to asynchronous eviction processing (invocation of
798 * dnode_buf_pageout()), it is possible for the meta dnode for the
799 * objset to have no holds even though os->os_dnodes is not empty.
802 dmu_objset_evict(objset_t *os)
804 dsl_dataset_t *ds = os->os_dsl_dataset;
806 for (int t = 0; t < TXG_SIZE; t++)
807 ASSERT(!dmu_objset_is_dirty(os, t));
810 dsl_prop_unregister_all(ds, os);
815 dmu_objset_evict_dbufs(os);
817 mutex_enter(&os->os_lock);
818 spa_evicting_os_register(os->os_spa, os);
819 if (list_is_empty(&os->os_dnodes)) {
820 mutex_exit(&os->os_lock);
821 dmu_objset_evict_done(os);
823 mutex_exit(&os->os_lock);
828 dmu_objset_evict_done(objset_t *os)
830 ASSERT3P(list_head(&os->os_dnodes), ==, NULL);
832 dnode_special_close(&os->os_meta_dnode);
833 if (DMU_USERUSED_DNODE(os)) {
834 dnode_special_close(&os->os_userused_dnode);
835 dnode_special_close(&os->os_groupused_dnode);
837 zil_free(os->os_zil);
839 arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf);
842 * This is a barrier to prevent the objset from going away in
843 * dnode_move() until we can safely ensure that the objset is still in
844 * use. We consider the objset valid before the barrier and invalid
847 rw_enter(&os_lock, RW_READER);
850 kmem_free(os->os_obj_next_percpu,
851 os->os_obj_next_percpu_len * sizeof (os->os_obj_next_percpu[0]));
853 mutex_destroy(&os->os_lock);
854 mutex_destroy(&os->os_userused_lock);
855 mutex_destroy(&os->os_obj_lock);
856 mutex_destroy(&os->os_user_ptr_lock);
857 for (int i = 0; i < TXG_SIZE; i++) {
858 multilist_destroy(os->os_dirty_dnodes[i]);
860 spa_evicting_os_deregister(os->os_spa, os);
861 kmem_free(os, sizeof (objset_t));
865 dmu_objset_snap_cmtime(objset_t *os)
867 return (dsl_dir_snap_cmtime(os->os_dsl_dataset->ds_dir));
870 /* called from dsl for meta-objset */
872 dmu_objset_create_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
873 dmu_objset_type_t type, dmu_tx_t *tx)
878 ASSERT(dmu_tx_is_syncing(tx));
881 VERIFY0(dmu_objset_from_ds(ds, &os));
883 VERIFY0(dmu_objset_open_impl(spa, NULL, bp, &os));
885 mdn = DMU_META_DNODE(os);
887 dnode_allocate(mdn, DMU_OT_DNODE, DNODE_BLOCK_SIZE, DN_MAX_INDBLKSHIFT,
888 DMU_OT_NONE, 0, DNODE_MIN_SLOTS, tx);
891 * We don't want to have to increase the meta-dnode's nlevels
892 * later, because then we could do it in quescing context while
893 * we are also accessing it in open context.
895 * This precaution is not necessary for the MOS (ds == NULL),
896 * because the MOS is only updated in syncing context.
897 * This is most fortunate: the MOS is the only objset that
898 * needs to be synced multiple times as spa_sync() iterates
899 * to convergence, so minimizing its dn_nlevels matters.
905 * Determine the number of levels necessary for the meta-dnode
906 * to contain DN_MAX_OBJECT dnodes. Note that in order to
907 * ensure that we do not overflow 64 bits, there has to be
908 * a nlevels that gives us a number of blocks > DN_MAX_OBJECT
909 * but < 2^64. Therefore,
910 * (mdn->dn_indblkshift - SPA_BLKPTRSHIFT) (10) must be
911 * less than (64 - log2(DN_MAX_OBJECT)) (16).
913 while ((uint64_t)mdn->dn_nblkptr <<
914 (mdn->dn_datablkshift - DNODE_SHIFT +
915 (levels - 1) * (mdn->dn_indblkshift - SPA_BLKPTRSHIFT)) <
919 mdn->dn_next_nlevels[tx->tx_txg & TXG_MASK] =
920 mdn->dn_nlevels = levels;
923 ASSERT(type != DMU_OST_NONE);
924 ASSERT(type != DMU_OST_ANY);
925 ASSERT(type < DMU_OST_NUMTYPES);
926 os->os_phys->os_type = type;
927 if (dmu_objset_userused_enabled(os)) {
928 os->os_phys->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE;
929 os->os_flags = os->os_phys->os_flags;
932 dsl_dataset_dirty(ds, tx);
937 typedef struct dmu_objset_create_arg {
938 const char *doca_name;
940 void (*doca_userfunc)(objset_t *os, void *arg,
941 cred_t *cr, dmu_tx_t *tx);
943 dmu_objset_type_t doca_type;
945 } dmu_objset_create_arg_t;
949 dmu_objset_create_check(void *arg, dmu_tx_t *tx)
951 dmu_objset_create_arg_t *doca = arg;
952 dsl_pool_t *dp = dmu_tx_pool(tx);
957 if (strchr(doca->doca_name, '@') != NULL)
958 return (SET_ERROR(EINVAL));
960 if (strlen(doca->doca_name) >= ZFS_MAX_DATASET_NAME_LEN)
961 return (SET_ERROR(ENAMETOOLONG));
963 if (dataset_nestcheck(doca->doca_name) != 0)
964 return (SET_ERROR(ENAMETOOLONG));
966 error = dsl_dir_hold(dp, doca->doca_name, FTAG, &pdd, &tail);
970 dsl_dir_rele(pdd, FTAG);
971 return (SET_ERROR(EEXIST));
973 error = dsl_fs_ss_limit_check(pdd, 1, ZFS_PROP_FILESYSTEM_LIMIT, NULL,
975 dsl_dir_rele(pdd, FTAG);
981 dmu_objset_create_sync(void *arg, dmu_tx_t *tx)
983 dmu_objset_create_arg_t *doca = arg;
984 dsl_pool_t *dp = dmu_tx_pool(tx);
992 VERIFY0(dsl_dir_hold(dp, doca->doca_name, FTAG, &pdd, &tail));
994 obj = dsl_dataset_create_sync(pdd, tail, NULL, doca->doca_flags,
995 doca->doca_cred, tx);
997 VERIFY0(dsl_dataset_hold_obj(pdd->dd_pool, obj, FTAG, &ds));
998 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
999 bp = dsl_dataset_get_blkptr(ds);
1000 os = dmu_objset_create_impl(pdd->dd_pool->dp_spa,
1001 ds, bp, doca->doca_type, tx);
1002 rrw_exit(&ds->ds_bp_rwlock, FTAG);
1004 if (doca->doca_userfunc != NULL) {
1005 doca->doca_userfunc(os, doca->doca_userarg,
1006 doca->doca_cred, tx);
1009 spa_history_log_internal_ds(ds, "create", tx, "");
1010 dsl_dataset_rele(ds, FTAG);
1011 dsl_dir_rele(pdd, FTAG);
1015 dmu_objset_create(const char *name, dmu_objset_type_t type, uint64_t flags,
1016 void (*func)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx), void *arg)
1018 dmu_objset_create_arg_t doca;
1020 doca.doca_name = name;
1021 doca.doca_cred = CRED();
1022 doca.doca_flags = flags;
1023 doca.doca_userfunc = func;
1024 doca.doca_userarg = arg;
1025 doca.doca_type = type;
1027 return (dsl_sync_task(name,
1028 dmu_objset_create_check, dmu_objset_create_sync, &doca,
1029 5, ZFS_SPACE_CHECK_NORMAL));
1032 typedef struct dmu_objset_clone_arg {
1033 const char *doca_clone;
1034 const char *doca_origin;
1036 } dmu_objset_clone_arg_t;
1040 dmu_objset_clone_check(void *arg, dmu_tx_t *tx)
1042 dmu_objset_clone_arg_t *doca = arg;
1046 dsl_dataset_t *origin;
1047 dsl_pool_t *dp = dmu_tx_pool(tx);
1049 if (strchr(doca->doca_clone, '@') != NULL)
1050 return (SET_ERROR(EINVAL));
1052 if (strlen(doca->doca_clone) >= ZFS_MAX_DATASET_NAME_LEN)
1053 return (SET_ERROR(ENAMETOOLONG));
1055 error = dsl_dir_hold(dp, doca->doca_clone, FTAG, &pdd, &tail);
1059 dsl_dir_rele(pdd, FTAG);
1060 return (SET_ERROR(EEXIST));
1063 error = dsl_fs_ss_limit_check(pdd, 1, ZFS_PROP_FILESYSTEM_LIMIT, NULL,
1066 dsl_dir_rele(pdd, FTAG);
1067 return (SET_ERROR(EDQUOT));
1069 dsl_dir_rele(pdd, FTAG);
1071 error = dsl_dataset_hold(dp, doca->doca_origin, FTAG, &origin);
1075 /* You can only clone snapshots, not the head datasets. */
1076 if (!origin->ds_is_snapshot) {
1077 dsl_dataset_rele(origin, FTAG);
1078 return (SET_ERROR(EINVAL));
1080 dsl_dataset_rele(origin, FTAG);
1086 dmu_objset_clone_sync(void *arg, dmu_tx_t *tx)
1088 dmu_objset_clone_arg_t *doca = arg;
1089 dsl_pool_t *dp = dmu_tx_pool(tx);
1092 dsl_dataset_t *origin, *ds;
1094 char namebuf[ZFS_MAX_DATASET_NAME_LEN];
1096 VERIFY0(dsl_dir_hold(dp, doca->doca_clone, FTAG, &pdd, &tail));
1097 VERIFY0(dsl_dataset_hold(dp, doca->doca_origin, FTAG, &origin));
1099 obj = dsl_dataset_create_sync(pdd, tail, origin, 0,
1100 doca->doca_cred, tx);
1102 VERIFY0(dsl_dataset_hold_obj(pdd->dd_pool, obj, FTAG, &ds));
1103 dsl_dataset_name(origin, namebuf);
1104 spa_history_log_internal_ds(ds, "clone", tx,
1105 "origin=%s (%llu)", namebuf, origin->ds_object);
1106 dsl_dataset_rele(ds, FTAG);
1107 dsl_dataset_rele(origin, FTAG);
1108 dsl_dir_rele(pdd, FTAG);
1112 dmu_objset_clone(const char *clone, const char *origin)
1114 dmu_objset_clone_arg_t doca;
1116 doca.doca_clone = clone;
1117 doca.doca_origin = origin;
1118 doca.doca_cred = CRED();
1120 return (dsl_sync_task(clone,
1121 dmu_objset_clone_check, dmu_objset_clone_sync, &doca,
1122 5, ZFS_SPACE_CHECK_NORMAL));
1126 dmu_objset_remap_indirects_impl(objset_t *os, uint64_t last_removed_txg)
1129 uint64_t object = 0;
1130 while ((error = dmu_object_next(os, &object, B_FALSE, 0)) == 0) {
1131 error = dmu_object_remap_indirects(os, object,
1134 * If the ZPL removed the object before we managed to dnode_hold
1135 * it, we would get an ENOENT. If the ZPL declares its intent
1136 * to remove the object (dnode_free) before we manage to
1137 * dnode_hold it, we would get an EEXIST. In either case, we
1138 * want to continue remapping the other objects in the objset;
1139 * in all other cases, we want to break early.
1141 if (error != 0 && error != ENOENT && error != EEXIST) {
1145 if (error == ESRCH) {
1152 dmu_objset_remap_indirects(const char *fsname)
1155 objset_t *os = NULL;
1156 uint64_t last_removed_txg;
1157 uint64_t remap_start_txg;
1160 error = dmu_objset_hold(fsname, FTAG, &os);
1164 dd = dmu_objset_ds(os)->ds_dir;
1166 if (!spa_feature_is_enabled(dmu_objset_spa(os),
1167 SPA_FEATURE_OBSOLETE_COUNTS)) {
1168 dmu_objset_rele(os, FTAG);
1169 return (SET_ERROR(ENOTSUP));
1172 if (dsl_dataset_is_snapshot(dmu_objset_ds(os))) {
1173 dmu_objset_rele(os, FTAG);
1174 return (SET_ERROR(EINVAL));
1178 * If there has not been a removal, we're done.
1180 last_removed_txg = spa_get_last_removal_txg(dmu_objset_spa(os));
1181 if (last_removed_txg == -1ULL) {
1182 dmu_objset_rele(os, FTAG);
1187 * If we have remapped since the last removal, we're done.
1189 if (dsl_dir_is_zapified(dd)) {
1190 uint64_t last_remap_txg;
1191 if (zap_lookup(spa_meta_objset(dmu_objset_spa(os)),
1192 dd->dd_object, DD_FIELD_LAST_REMAP_TXG,
1193 sizeof (last_remap_txg), 1, &last_remap_txg) == 0 &&
1194 last_remap_txg > last_removed_txg) {
1195 dmu_objset_rele(os, FTAG);
1200 dsl_dataset_long_hold(dmu_objset_ds(os), FTAG);
1201 dsl_pool_rele(dmu_objset_pool(os), FTAG);
1203 remap_start_txg = spa_last_synced_txg(dmu_objset_spa(os));
1204 error = dmu_objset_remap_indirects_impl(os, last_removed_txg);
1207 * We update the last_remap_txg to be the start txg so that
1208 * we can guarantee that every block older than last_remap_txg
1209 * that can be remapped has been remapped.
1211 error = dsl_dir_update_last_remap_txg(dd, remap_start_txg);
1214 dsl_dataset_long_rele(dmu_objset_ds(os), FTAG);
1215 dsl_dataset_rele(dmu_objset_ds(os), FTAG);
1221 dmu_objset_snapshot_one(const char *fsname, const char *snapname)
1224 char *longsnap = kmem_asprintf("%s@%s", fsname, snapname);
1225 nvlist_t *snaps = fnvlist_alloc();
1227 fnvlist_add_boolean(snaps, longsnap);
1229 err = dsl_dataset_snapshot(snaps, NULL, NULL);
1230 fnvlist_free(snaps);
1235 dmu_objset_sync_dnodes(multilist_sublist_t *list, dmu_tx_t *tx)
1239 while ((dn = multilist_sublist_head(list)) != NULL) {
1240 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
1241 ASSERT(dn->dn_dbuf->db_data_pending);
1243 * Initialize dn_zio outside dnode_sync() because the
1244 * meta-dnode needs to set it ouside dnode_sync().
1246 dn->dn_zio = dn->dn_dbuf->db_data_pending->dr_zio;
1249 ASSERT3U(dn->dn_nlevels, <=, DN_MAX_LEVELS);
1250 multilist_sublist_remove(list, dn);
1253 * If we are not doing useraccounting (os_synced_dnodes == NULL)
1254 * we are done with this dnode for this txg. Unset dn_dirty_txg
1255 * if later txgs aren't dirtying it so that future holders do
1256 * not get a stale value. Otherwise, we will do this in
1257 * userquota_updates_task() when processing has completely
1258 * finished for this txg.
1260 multilist_t *newlist = dn->dn_objset->os_synced_dnodes;
1261 if (newlist != NULL) {
1262 (void) dnode_add_ref(dn, newlist);
1263 multilist_insert(newlist, dn);
1265 mutex_enter(&dn->dn_mtx);
1266 if (dn->dn_dirty_txg == tx->tx_txg)
1267 dn->dn_dirty_txg = 0;
1268 mutex_exit(&dn->dn_mtx);
1277 dmu_objset_write_ready(zio_t *zio, arc_buf_t *abuf, void *arg)
1279 blkptr_t *bp = zio->io_bp;
1281 dnode_phys_t *dnp = &os->os_phys->os_meta_dnode;
1283 ASSERT(!BP_IS_EMBEDDED(bp));
1284 ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_OBJSET);
1285 ASSERT0(BP_GET_LEVEL(bp));
1288 * Update rootbp fill count: it should be the number of objects
1289 * allocated in the object set (not counting the "special"
1290 * objects that are stored in the objset_phys_t -- the meta
1291 * dnode and user/group accounting objects).
1294 for (int i = 0; i < dnp->dn_nblkptr; i++)
1295 bp->blk_fill += BP_GET_FILL(&dnp->dn_blkptr[i]);
1296 if (os->os_dsl_dataset != NULL)
1297 rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_WRITER, FTAG);
1298 *os->os_rootbp = *bp;
1299 if (os->os_dsl_dataset != NULL)
1300 rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG);
1305 dmu_objset_write_done(zio_t *zio, arc_buf_t *abuf, void *arg)
1307 blkptr_t *bp = zio->io_bp;
1308 blkptr_t *bp_orig = &zio->io_bp_orig;
1311 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
1312 ASSERT(BP_EQUAL(bp, bp_orig));
1314 dsl_dataset_t *ds = os->os_dsl_dataset;
1315 dmu_tx_t *tx = os->os_synctx;
1317 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
1318 dsl_dataset_block_born(ds, bp, tx);
1320 kmem_free(bp, sizeof (*bp));
1323 typedef struct sync_dnodes_arg {
1324 multilist_t *sda_list;
1325 int sda_sublist_idx;
1326 multilist_t *sda_newlist;
1328 } sync_dnodes_arg_t;
1331 sync_dnodes_task(void *arg)
1333 sync_dnodes_arg_t *sda = arg;
1335 multilist_sublist_t *ms =
1336 multilist_sublist_lock(sda->sda_list, sda->sda_sublist_idx);
1338 dmu_objset_sync_dnodes(ms, sda->sda_tx);
1340 multilist_sublist_unlock(ms);
1342 kmem_free(sda, sizeof (*sda));
1346 /* called from dsl */
1348 dmu_objset_sync(objset_t *os, zio_t *pio, dmu_tx_t *tx)
1351 zbookmark_phys_t zb;
1355 dbuf_dirty_record_t *dr;
1358 blkptr_t *blkptr_copy = kmem_alloc(sizeof (*os->os_rootbp), KM_SLEEP);
1359 *blkptr_copy = *os->os_rootbp;
1361 dprintf_ds(os->os_dsl_dataset, "txg=%llu\n", tx->tx_txg);
1363 ASSERT(dmu_tx_is_syncing(tx));
1364 /* XXX the write_done callback should really give us the tx... */
1367 if (os->os_dsl_dataset == NULL) {
1369 * This is the MOS. If we have upgraded,
1370 * spa_max_replication() could change, so reset
1373 os->os_copies = spa_max_replication(os->os_spa);
1377 * Create the root block IO
1379 SET_BOOKMARK(&zb, os->os_dsl_dataset ?
1380 os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
1381 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
1382 arc_release(os->os_phys_buf, &os->os_phys_buf);
1384 dmu_write_policy(os, NULL, 0, 0, &zp);
1386 zio = arc_write(pio, os->os_spa, tx->tx_txg,
1387 blkptr_copy, os->os_phys_buf, DMU_OS_IS_L2CACHEABLE(os),
1388 &zp, dmu_objset_write_ready, NULL, NULL, dmu_objset_write_done,
1389 os, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
1392 * Sync special dnodes - the parent IO for the sync is the root block
1394 DMU_META_DNODE(os)->dn_zio = zio;
1395 dnode_sync(DMU_META_DNODE(os), tx);
1397 os->os_phys->os_flags = os->os_flags;
1399 if (DMU_USERUSED_DNODE(os) &&
1400 DMU_USERUSED_DNODE(os)->dn_type != DMU_OT_NONE) {
1401 DMU_USERUSED_DNODE(os)->dn_zio = zio;
1402 dnode_sync(DMU_USERUSED_DNODE(os), tx);
1403 DMU_GROUPUSED_DNODE(os)->dn_zio = zio;
1404 dnode_sync(DMU_GROUPUSED_DNODE(os), tx);
1407 txgoff = tx->tx_txg & TXG_MASK;
1409 if (dmu_objset_userused_enabled(os)) {
1411 * We must create the list here because it uses the
1412 * dn_dirty_link[] of this txg. But it may already
1413 * exist because we call dsl_dataset_sync() twice per txg.
1415 if (os->os_synced_dnodes == NULL) {
1416 os->os_synced_dnodes =
1417 multilist_create(sizeof (dnode_t),
1418 offsetof(dnode_t, dn_dirty_link[txgoff]),
1419 dnode_multilist_index_func);
1421 ASSERT3U(os->os_synced_dnodes->ml_offset, ==,
1422 offsetof(dnode_t, dn_dirty_link[txgoff]));
1426 ml = os->os_dirty_dnodes[txgoff];
1427 num_sublists = multilist_get_num_sublists(ml);
1428 for (int i = 0; i < num_sublists; i++) {
1429 if (multilist_sublist_is_empty_idx(ml, i))
1431 sync_dnodes_arg_t *sda = kmem_alloc(sizeof (*sda), KM_SLEEP);
1433 sda->sda_sublist_idx = i;
1435 (void) taskq_dispatch(dmu_objset_pool(os)->dp_sync_taskq,
1436 sync_dnodes_task, sda, 0);
1437 /* callback frees sda */
1439 taskq_wait(dmu_objset_pool(os)->dp_sync_taskq);
1441 list = &DMU_META_DNODE(os)->dn_dirty_records[txgoff];
1442 while ((dr = list_head(list)) != NULL) {
1443 ASSERT0(dr->dr_dbuf->db_level);
1444 list_remove(list, dr);
1446 zio_nowait(dr->dr_zio);
1449 /* Enable dnode backfill if enough objects have been freed. */
1450 if (os->os_freed_dnodes >= dmu_rescan_dnode_threshold) {
1451 os->os_rescan_dnodes = B_TRUE;
1452 os->os_freed_dnodes = 0;
1456 * Free intent log blocks up to this tx.
1458 zil_sync(os->os_zil, tx);
1459 os->os_phys->os_zil_header = os->os_zil_header;
1464 dmu_objset_is_dirty(objset_t *os, uint64_t txg)
1466 return (!multilist_is_empty(os->os_dirty_dnodes[txg & TXG_MASK]));
1469 static objset_used_cb_t *used_cbs[DMU_OST_NUMTYPES];
1472 dmu_objset_register_type(dmu_objset_type_t ost, objset_used_cb_t *cb)
1478 dmu_objset_userused_enabled(objset_t *os)
1480 return (spa_version(os->os_spa) >= SPA_VERSION_USERSPACE &&
1481 used_cbs[os->os_phys->os_type] != NULL &&
1482 DMU_USERUSED_DNODE(os) != NULL);
1485 typedef struct userquota_node {
1488 avl_node_t uqn_node;
1491 typedef struct userquota_cache {
1492 avl_tree_t uqc_user_deltas;
1493 avl_tree_t uqc_group_deltas;
1494 } userquota_cache_t;
1497 userquota_compare(const void *l, const void *r)
1499 const userquota_node_t *luqn = l;
1500 const userquota_node_t *ruqn = r;
1502 if (luqn->uqn_id < ruqn->uqn_id)
1504 if (luqn->uqn_id > ruqn->uqn_id)
1510 do_userquota_cacheflush(objset_t *os, userquota_cache_t *cache, dmu_tx_t *tx)
1513 userquota_node_t *uqn;
1515 ASSERT(dmu_tx_is_syncing(tx));
1518 while ((uqn = avl_destroy_nodes(&cache->uqc_user_deltas,
1519 &cookie)) != NULL) {
1521 * os_userused_lock protects against concurrent calls to
1522 * zap_increment_int(). It's needed because zap_increment_int()
1523 * is not thread-safe (i.e. not atomic).
1525 mutex_enter(&os->os_userused_lock);
1526 VERIFY0(zap_increment_int(os, DMU_USERUSED_OBJECT,
1527 uqn->uqn_id, uqn->uqn_delta, tx));
1528 mutex_exit(&os->os_userused_lock);
1529 kmem_free(uqn, sizeof (*uqn));
1531 avl_destroy(&cache->uqc_user_deltas);
1534 while ((uqn = avl_destroy_nodes(&cache->uqc_group_deltas,
1535 &cookie)) != NULL) {
1536 mutex_enter(&os->os_userused_lock);
1537 VERIFY0(zap_increment_int(os, DMU_GROUPUSED_OBJECT,
1538 uqn->uqn_id, uqn->uqn_delta, tx));
1539 mutex_exit(&os->os_userused_lock);
1540 kmem_free(uqn, sizeof (*uqn));
1542 avl_destroy(&cache->uqc_group_deltas);
1546 userquota_update_cache(avl_tree_t *avl, uint64_t id, int64_t delta)
1548 userquota_node_t search = { .uqn_id = id };
1551 userquota_node_t *uqn = avl_find(avl, &search, &idx);
1553 uqn = kmem_zalloc(sizeof (*uqn), KM_SLEEP);
1555 avl_insert(avl, uqn, idx);
1557 uqn->uqn_delta += delta;
1561 do_userquota_update(userquota_cache_t *cache, uint64_t used, uint64_t flags,
1562 uint64_t user, uint64_t group, boolean_t subtract)
1564 if ((flags & DNODE_FLAG_USERUSED_ACCOUNTED)) {
1565 int64_t delta = DNODE_MIN_SIZE + used;
1569 userquota_update_cache(&cache->uqc_user_deltas, user, delta);
1570 userquota_update_cache(&cache->uqc_group_deltas, group, delta);
1574 typedef struct userquota_updates_arg {
1576 int uua_sublist_idx;
1578 } userquota_updates_arg_t;
1581 userquota_updates_task(void *arg)
1583 userquota_updates_arg_t *uua = arg;
1584 objset_t *os = uua->uua_os;
1585 dmu_tx_t *tx = uua->uua_tx;
1587 userquota_cache_t cache = { 0 };
1589 multilist_sublist_t *list =
1590 multilist_sublist_lock(os->os_synced_dnodes, uua->uua_sublist_idx);
1592 ASSERT(multilist_sublist_head(list) == NULL ||
1593 dmu_objset_userused_enabled(os));
1594 avl_create(&cache.uqc_user_deltas, userquota_compare,
1595 sizeof (userquota_node_t), offsetof(userquota_node_t, uqn_node));
1596 avl_create(&cache.uqc_group_deltas, userquota_compare,
1597 sizeof (userquota_node_t), offsetof(userquota_node_t, uqn_node));
1599 while ((dn = multilist_sublist_head(list)) != NULL) {
1601 ASSERT(!DMU_OBJECT_IS_SPECIAL(dn->dn_object));
1602 ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE ||
1603 dn->dn_phys->dn_flags &
1604 DNODE_FLAG_USERUSED_ACCOUNTED);
1606 flags = dn->dn_id_flags;
1608 if (flags & DN_ID_OLD_EXIST) {
1609 do_userquota_update(&cache,
1610 dn->dn_oldused, dn->dn_oldflags,
1611 dn->dn_olduid, dn->dn_oldgid, B_TRUE);
1613 if (flags & DN_ID_NEW_EXIST) {
1614 do_userquota_update(&cache,
1615 DN_USED_BYTES(dn->dn_phys),
1616 dn->dn_phys->dn_flags, dn->dn_newuid,
1617 dn->dn_newgid, B_FALSE);
1620 mutex_enter(&dn->dn_mtx);
1622 dn->dn_oldflags = 0;
1623 if (dn->dn_id_flags & DN_ID_NEW_EXIST) {
1624 dn->dn_olduid = dn->dn_newuid;
1625 dn->dn_oldgid = dn->dn_newgid;
1626 dn->dn_id_flags |= DN_ID_OLD_EXIST;
1627 if (dn->dn_bonuslen == 0)
1628 dn->dn_id_flags |= DN_ID_CHKED_SPILL;
1630 dn->dn_id_flags |= DN_ID_CHKED_BONUS;
1632 dn->dn_id_flags &= ~(DN_ID_NEW_EXIST);
1633 if (dn->dn_dirty_txg == spa_syncing_txg(os->os_spa))
1634 dn->dn_dirty_txg = 0;
1635 mutex_exit(&dn->dn_mtx);
1637 multilist_sublist_remove(list, dn);
1638 dnode_rele(dn, os->os_synced_dnodes);
1640 do_userquota_cacheflush(os, &cache, tx);
1641 multilist_sublist_unlock(list);
1642 kmem_free(uua, sizeof (*uua));
1646 dmu_objset_do_userquota_updates(objset_t *os, dmu_tx_t *tx)
1650 if (!dmu_objset_userused_enabled(os))
1653 /* Allocate the user/groupused objects if necessary. */
1654 if (DMU_USERUSED_DNODE(os)->dn_type == DMU_OT_NONE) {
1655 VERIFY0(zap_create_claim(os,
1656 DMU_USERUSED_OBJECT,
1657 DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx));
1658 VERIFY0(zap_create_claim(os,
1659 DMU_GROUPUSED_OBJECT,
1660 DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx));
1663 num_sublists = multilist_get_num_sublists(os->os_synced_dnodes);
1664 for (int i = 0; i < num_sublists; i++) {
1665 if (multilist_sublist_is_empty_idx(os->os_synced_dnodes, i))
1667 userquota_updates_arg_t *uua =
1668 kmem_alloc(sizeof (*uua), KM_SLEEP);
1670 uua->uua_sublist_idx = i;
1672 /* note: caller does taskq_wait() */
1673 (void) taskq_dispatch(dmu_objset_pool(os)->dp_sync_taskq,
1674 userquota_updates_task, uua, 0);
1675 /* callback frees uua */
1680 * Returns a pointer to data to find uid/gid from
1682 * If a dirty record for transaction group that is syncing can't
1683 * be found then NULL is returned. In the NULL case it is assumed
1684 * the uid/gid aren't changing.
1687 dmu_objset_userquota_find_data(dmu_buf_impl_t *db, dmu_tx_t *tx)
1689 dbuf_dirty_record_t *dr, **drp;
1692 if (db->db_dirtycnt == 0)
1693 return (db->db.db_data); /* Nothing is changing */
1695 for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next)
1696 if (dr->dr_txg == tx->tx_txg)
1704 DB_DNODE_ENTER(dr->dr_dbuf);
1705 dn = DB_DNODE(dr->dr_dbuf);
1707 if (dn->dn_bonuslen == 0 &&
1708 dr->dr_dbuf->db_blkid == DMU_SPILL_BLKID)
1709 data = dr->dt.dl.dr_data->b_data;
1711 data = dr->dt.dl.dr_data;
1713 DB_DNODE_EXIT(dr->dr_dbuf);
1720 dmu_objset_userquota_get_ids(dnode_t *dn, boolean_t before, dmu_tx_t *tx)
1722 objset_t *os = dn->dn_objset;
1724 dmu_buf_impl_t *db = NULL;
1725 uint64_t *user = NULL;
1726 uint64_t *group = NULL;
1727 int flags = dn->dn_id_flags;
1729 boolean_t have_spill = B_FALSE;
1731 if (!dmu_objset_userused_enabled(dn->dn_objset))
1734 if (before && (flags & (DN_ID_CHKED_BONUS|DN_ID_OLD_EXIST|
1735 DN_ID_CHKED_SPILL)))
1738 if (before && dn->dn_bonuslen != 0)
1739 data = DN_BONUS(dn->dn_phys);
1740 else if (!before && dn->dn_bonuslen != 0) {
1743 mutex_enter(&db->db_mtx);
1744 data = dmu_objset_userquota_find_data(db, tx);
1746 data = DN_BONUS(dn->dn_phys);
1748 } else if (dn->dn_bonuslen == 0 && dn->dn_bonustype == DMU_OT_SA) {
1751 if (RW_WRITE_HELD(&dn->dn_struct_rwlock))
1752 rf |= DB_RF_HAVESTRUCT;
1753 error = dmu_spill_hold_by_dnode(dn,
1754 rf | DB_RF_MUST_SUCCEED,
1755 FTAG, (dmu_buf_t **)&db);
1757 mutex_enter(&db->db_mtx);
1758 data = (before) ? db->db.db_data :
1759 dmu_objset_userquota_find_data(db, tx);
1760 have_spill = B_TRUE;
1762 mutex_enter(&dn->dn_mtx);
1763 dn->dn_id_flags |= DN_ID_CHKED_BONUS;
1764 mutex_exit(&dn->dn_mtx);
1770 user = &dn->dn_olduid;
1771 group = &dn->dn_oldgid;
1773 user = &dn->dn_newuid;
1774 group = &dn->dn_newgid;
1778 * Must always call the callback in case the object
1779 * type has changed and that type isn't an object type to track
1781 error = used_cbs[os->os_phys->os_type](dn->dn_bonustype, data,
1785 * Preserve existing uid/gid when the callback can't determine
1786 * what the new uid/gid are and the callback returned EEXIST.
1787 * The EEXIST error tells us to just use the existing uid/gid.
1788 * If we don't know what the old values are then just assign
1789 * them to 0, since that is a new file being created.
1791 if (!before && data == NULL && error == EEXIST) {
1792 if (flags & DN_ID_OLD_EXIST) {
1793 dn->dn_newuid = dn->dn_olduid;
1794 dn->dn_newgid = dn->dn_oldgid;
1803 mutex_exit(&db->db_mtx);
1805 mutex_enter(&dn->dn_mtx);
1806 if (error == 0 && before)
1807 dn->dn_id_flags |= DN_ID_OLD_EXIST;
1808 if (error == 0 && !before)
1809 dn->dn_id_flags |= DN_ID_NEW_EXIST;
1812 dn->dn_id_flags |= DN_ID_CHKED_SPILL;
1814 dn->dn_id_flags |= DN_ID_CHKED_BONUS;
1816 mutex_exit(&dn->dn_mtx);
1818 dmu_buf_rele((dmu_buf_t *)db, FTAG);
1822 dmu_objset_userspace_present(objset_t *os)
1824 return (os->os_phys->os_flags &
1825 OBJSET_FLAG_USERACCOUNTING_COMPLETE);
1829 dmu_objset_userspace_upgrade(objset_t *os)
1834 if (dmu_objset_userspace_present(os))
1836 if (!dmu_objset_userused_enabled(os))
1837 return (SET_ERROR(ENOTSUP));
1838 if (dmu_objset_is_snapshot(os))
1839 return (SET_ERROR(EINVAL));
1842 * We simply need to mark every object dirty, so that it will be
1843 * synced out and now accounted. If this is called
1844 * concurrently, or if we already did some work before crashing,
1845 * that's fine, since we track each object's accounted state
1849 for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, 0)) {
1854 if (issig(JUSTLOOKING) && issig(FORREAL))
1855 return (SET_ERROR(EINTR));
1857 objerr = dmu_bonus_hold(os, obj, FTAG, &db);
1860 tx = dmu_tx_create(os);
1861 dmu_tx_hold_bonus(tx, obj);
1862 objerr = dmu_tx_assign(tx, TXG_WAIT);
1867 dmu_buf_will_dirty(db, tx);
1868 dmu_buf_rele(db, FTAG);
1872 os->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE;
1873 txg_wait_synced(dmu_objset_pool(os), 0);
1878 dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp,
1879 uint64_t *usedobjsp, uint64_t *availobjsp)
1881 dsl_dataset_space(os->os_dsl_dataset, refdbytesp, availbytesp,
1882 usedobjsp, availobjsp);
1886 dmu_objset_fsid_guid(objset_t *os)
1888 return (dsl_dataset_fsid_guid(os->os_dsl_dataset));
1892 dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat)
1894 stat->dds_type = os->os_phys->os_type;
1895 if (os->os_dsl_dataset)
1896 dsl_dataset_fast_stat(os->os_dsl_dataset, stat);
1900 dmu_objset_stats(objset_t *os, nvlist_t *nv)
1902 ASSERT(os->os_dsl_dataset ||
1903 os->os_phys->os_type == DMU_OST_META);
1905 if (os->os_dsl_dataset != NULL)
1906 dsl_dataset_stats(os->os_dsl_dataset, nv);
1908 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_TYPE,
1909 os->os_phys->os_type);
1910 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERACCOUNTING,
1911 dmu_objset_userspace_present(os));
1915 dmu_objset_is_snapshot(objset_t *os)
1917 if (os->os_dsl_dataset != NULL)
1918 return (os->os_dsl_dataset->ds_is_snapshot);
1924 dmu_snapshot_realname(objset_t *os, char *name, char *real, int maxlen,
1925 boolean_t *conflict)
1927 dsl_dataset_t *ds = os->os_dsl_dataset;
1930 if (dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0)
1931 return (SET_ERROR(ENOENT));
1933 return (zap_lookup_norm(ds->ds_dir->dd_pool->dp_meta_objset,
1934 dsl_dataset_phys(ds)->ds_snapnames_zapobj, name, 8, 1, &ignored,
1935 MT_NORMALIZE, real, maxlen, conflict));
1939 dmu_snapshot_list_next(objset_t *os, int namelen, char *name,
1940 uint64_t *idp, uint64_t *offp, boolean_t *case_conflict)
1942 dsl_dataset_t *ds = os->os_dsl_dataset;
1943 zap_cursor_t cursor;
1944 zap_attribute_t attr;
1946 ASSERT(dsl_pool_config_held(dmu_objset_pool(os)));
1948 if (dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0)
1949 return (SET_ERROR(ENOENT));
1951 zap_cursor_init_serialized(&cursor,
1952 ds->ds_dir->dd_pool->dp_meta_objset,
1953 dsl_dataset_phys(ds)->ds_snapnames_zapobj, *offp);
1955 if (zap_cursor_retrieve(&cursor, &attr) != 0) {
1956 zap_cursor_fini(&cursor);
1957 return (SET_ERROR(ENOENT));
1960 if (strlen(attr.za_name) + 1 > namelen) {
1961 zap_cursor_fini(&cursor);
1962 return (SET_ERROR(ENAMETOOLONG));
1965 (void) strcpy(name, attr.za_name);
1967 *idp = attr.za_first_integer;
1969 *case_conflict = attr.za_normalization_conflict;
1970 zap_cursor_advance(&cursor);
1971 *offp = zap_cursor_serialize(&cursor);
1972 zap_cursor_fini(&cursor);
1978 dmu_dir_list_next(objset_t *os, int namelen, char *name,
1979 uint64_t *idp, uint64_t *offp)
1981 dsl_dir_t *dd = os->os_dsl_dataset->ds_dir;
1982 zap_cursor_t cursor;
1983 zap_attribute_t attr;
1985 /* there is no next dir on a snapshot! */
1986 if (os->os_dsl_dataset->ds_object !=
1987 dsl_dir_phys(dd)->dd_head_dataset_obj)
1988 return (SET_ERROR(ENOENT));
1990 zap_cursor_init_serialized(&cursor,
1991 dd->dd_pool->dp_meta_objset,
1992 dsl_dir_phys(dd)->dd_child_dir_zapobj, *offp);
1994 if (zap_cursor_retrieve(&cursor, &attr) != 0) {
1995 zap_cursor_fini(&cursor);
1996 return (SET_ERROR(ENOENT));
1999 if (strlen(attr.za_name) + 1 > namelen) {
2000 zap_cursor_fini(&cursor);
2001 return (SET_ERROR(ENAMETOOLONG));
2004 (void) strcpy(name, attr.za_name);
2006 *idp = attr.za_first_integer;
2007 zap_cursor_advance(&cursor);
2008 *offp = zap_cursor_serialize(&cursor);
2009 zap_cursor_fini(&cursor);
2014 typedef struct dmu_objset_find_ctx {
2018 char *dc_ddname; /* last component of ddobj's name */
2019 int (*dc_func)(dsl_pool_t *, dsl_dataset_t *, void *);
2022 kmutex_t *dc_error_lock;
2024 } dmu_objset_find_ctx_t;
2027 dmu_objset_find_dp_impl(dmu_objset_find_ctx_t *dcp)
2029 dsl_pool_t *dp = dcp->dc_dp;
2033 zap_attribute_t *attr;
2037 /* don't process if there already was an error */
2038 if (*dcp->dc_error != 0)
2042 * Note: passing the name (dc_ddname) here is optional, but it
2043 * improves performance because we don't need to call
2044 * zap_value_search() to determine the name.
2046 err = dsl_dir_hold_obj(dp, dcp->dc_ddobj, dcp->dc_ddname, FTAG, &dd);
2050 /* Don't visit hidden ($MOS & $ORIGIN) objsets. */
2051 if (dd->dd_myname[0] == '$') {
2052 dsl_dir_rele(dd, FTAG);
2056 thisobj = dsl_dir_phys(dd)->dd_head_dataset_obj;
2057 attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
2060 * Iterate over all children.
2062 if (dcp->dc_flags & DS_FIND_CHILDREN) {
2063 for (zap_cursor_init(&zc, dp->dp_meta_objset,
2064 dsl_dir_phys(dd)->dd_child_dir_zapobj);
2065 zap_cursor_retrieve(&zc, attr) == 0;
2066 (void) zap_cursor_advance(&zc)) {
2067 ASSERT3U(attr->za_integer_length, ==,
2069 ASSERT3U(attr->za_num_integers, ==, 1);
2071 dmu_objset_find_ctx_t *child_dcp =
2072 kmem_alloc(sizeof (*child_dcp), KM_SLEEP);
2074 child_dcp->dc_ddobj = attr->za_first_integer;
2075 child_dcp->dc_ddname = spa_strdup(attr->za_name);
2076 if (dcp->dc_tq != NULL)
2077 (void) taskq_dispatch(dcp->dc_tq,
2078 dmu_objset_find_dp_cb, child_dcp, TQ_SLEEP);
2080 dmu_objset_find_dp_impl(child_dcp);
2082 zap_cursor_fini(&zc);
2086 * Iterate over all snapshots.
2088 if (dcp->dc_flags & DS_FIND_SNAPSHOTS) {
2090 err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds);
2095 snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj;
2096 dsl_dataset_rele(ds, FTAG);
2098 for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj);
2099 zap_cursor_retrieve(&zc, attr) == 0;
2100 (void) zap_cursor_advance(&zc)) {
2101 ASSERT3U(attr->za_integer_length, ==,
2103 ASSERT3U(attr->za_num_integers, ==, 1);
2105 err = dsl_dataset_hold_obj(dp,
2106 attr->za_first_integer, FTAG, &ds);
2109 err = dcp->dc_func(dp, ds, dcp->dc_arg);
2110 dsl_dataset_rele(ds, FTAG);
2114 zap_cursor_fini(&zc);
2118 kmem_free(attr, sizeof (zap_attribute_t));
2121 dsl_dir_rele(dd, FTAG);
2128 err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds);
2131 * Note: we hold the dir while calling dsl_dataset_hold_obj() so
2132 * that the dir will remain cached, and we won't have to re-instantiate
2133 * it (which could be expensive due to finding its name via
2134 * zap_value_search()).
2136 dsl_dir_rele(dd, FTAG);
2139 err = dcp->dc_func(dp, ds, dcp->dc_arg);
2140 dsl_dataset_rele(ds, FTAG);
2144 mutex_enter(dcp->dc_error_lock);
2145 /* only keep first error */
2146 if (*dcp->dc_error == 0)
2147 *dcp->dc_error = err;
2148 mutex_exit(dcp->dc_error_lock);
2151 if (dcp->dc_ddname != NULL)
2152 spa_strfree(dcp->dc_ddname);
2153 kmem_free(dcp, sizeof (*dcp));
2157 dmu_objset_find_dp_cb(void *arg)
2159 dmu_objset_find_ctx_t *dcp = arg;
2160 dsl_pool_t *dp = dcp->dc_dp;
2163 * We need to get a pool_config_lock here, as there are several
2164 * asssert(pool_config_held) down the stack. Getting a lock via
2165 * dsl_pool_config_enter is risky, as it might be stalled by a
2166 * pending writer. This would deadlock, as the write lock can
2167 * only be granted when our parent thread gives up the lock.
2168 * The _prio interface gives us priority over a pending writer.
2170 dsl_pool_config_enter_prio(dp, FTAG);
2172 dmu_objset_find_dp_impl(dcp);
2174 dsl_pool_config_exit(dp, FTAG);
2178 * Find objsets under and including ddobj, call func(ds) on each.
2179 * The order for the enumeration is completely undefined.
2180 * func is called with dsl_pool_config held.
2183 dmu_objset_find_dp(dsl_pool_t *dp, uint64_t ddobj,
2184 int func(dsl_pool_t *, dsl_dataset_t *, void *), void *arg, int flags)
2189 dmu_objset_find_ctx_t *dcp;
2192 mutex_init(&err_lock, NULL, MUTEX_DEFAULT, NULL);
2193 dcp = kmem_alloc(sizeof (*dcp), KM_SLEEP);
2196 dcp->dc_ddobj = ddobj;
2197 dcp->dc_ddname = NULL;
2198 dcp->dc_func = func;
2200 dcp->dc_flags = flags;
2201 dcp->dc_error_lock = &err_lock;
2202 dcp->dc_error = &error;
2204 if ((flags & DS_FIND_SERIALIZE) || dsl_pool_config_held_writer(dp)) {
2206 * In case a write lock is held we can't make use of
2207 * parallelism, as down the stack of the worker threads
2208 * the lock is asserted via dsl_pool_config_held.
2209 * In case of a read lock this is solved by getting a read
2210 * lock in each worker thread, which isn't possible in case
2211 * of a writer lock. So we fall back to the synchronous path
2213 * In the future it might be possible to get some magic into
2214 * dsl_pool_config_held in a way that it returns true for
2215 * the worker threads so that a single lock held from this
2216 * thread suffices. For now, stay single threaded.
2218 dmu_objset_find_dp_impl(dcp);
2219 mutex_destroy(&err_lock);
2224 ntasks = dmu_find_threads;
2226 ntasks = vdev_count_leaves(dp->dp_spa) * 4;
2227 tq = taskq_create("dmu_objset_find", ntasks, minclsyspri, ntasks,
2230 kmem_free(dcp, sizeof (*dcp));
2231 mutex_destroy(&err_lock);
2233 return (SET_ERROR(ENOMEM));
2237 /* dcp will be freed by task */
2238 (void) taskq_dispatch(tq, dmu_objset_find_dp_cb, dcp, TQ_SLEEP);
2241 * PORTING: this code relies on the property of taskq_wait to wait
2242 * until no more tasks are queued and no more tasks are active. As
2243 * we always queue new tasks from within other tasks, task_wait
2244 * reliably waits for the full recursion to finish, even though we
2245 * enqueue new tasks after taskq_wait has been called.
2246 * On platforms other than illumos, taskq_wait may not have this
2251 mutex_destroy(&err_lock);
2257 * Find all objsets under name, and for each, call 'func(child_name, arg)'.
2258 * The dp_config_rwlock must not be held when this is called, and it
2259 * will not be held when the callback is called.
2260 * Therefore this function should only be used when the pool is not changing
2261 * (e.g. in syncing context), or the callback can deal with the possible races.
2264 dmu_objset_find_impl(spa_t *spa, const char *name,
2265 int func(const char *, void *), void *arg, int flags)
2268 dsl_pool_t *dp = spa_get_dsl(spa);
2271 zap_attribute_t *attr;
2276 dsl_pool_config_enter(dp, FTAG);
2278 err = dsl_dir_hold(dp, name, FTAG, &dd, NULL);
2280 dsl_pool_config_exit(dp, FTAG);
2284 /* Don't visit hidden ($MOS & $ORIGIN) objsets. */
2285 if (dd->dd_myname[0] == '$') {
2286 dsl_dir_rele(dd, FTAG);
2287 dsl_pool_config_exit(dp, FTAG);
2291 thisobj = dsl_dir_phys(dd)->dd_head_dataset_obj;
2292 attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
2295 * Iterate over all children.
2297 if (flags & DS_FIND_CHILDREN) {
2298 for (zap_cursor_init(&zc, dp->dp_meta_objset,
2299 dsl_dir_phys(dd)->dd_child_dir_zapobj);
2300 zap_cursor_retrieve(&zc, attr) == 0;
2301 (void) zap_cursor_advance(&zc)) {
2302 ASSERT3U(attr->za_integer_length, ==,
2304 ASSERT3U(attr->za_num_integers, ==, 1);
2306 child = kmem_asprintf("%s/%s", name, attr->za_name);
2307 dsl_pool_config_exit(dp, FTAG);
2308 err = dmu_objset_find_impl(spa, child,
2310 dsl_pool_config_enter(dp, FTAG);
2315 zap_cursor_fini(&zc);
2318 dsl_dir_rele(dd, FTAG);
2319 dsl_pool_config_exit(dp, FTAG);
2320 kmem_free(attr, sizeof (zap_attribute_t));
2326 * Iterate over all snapshots.
2328 if (flags & DS_FIND_SNAPSHOTS) {
2329 err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds);
2334 snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj;
2335 dsl_dataset_rele(ds, FTAG);
2337 for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj);
2338 zap_cursor_retrieve(&zc, attr) == 0;
2339 (void) zap_cursor_advance(&zc)) {
2340 ASSERT3U(attr->za_integer_length, ==,
2342 ASSERT3U(attr->za_num_integers, ==, 1);
2344 child = kmem_asprintf("%s@%s",
2345 name, attr->za_name);
2346 dsl_pool_config_exit(dp, FTAG);
2347 err = func(child, arg);
2348 dsl_pool_config_enter(dp, FTAG);
2353 zap_cursor_fini(&zc);
2357 dsl_dir_rele(dd, FTAG);
2358 kmem_free(attr, sizeof (zap_attribute_t));
2359 dsl_pool_config_exit(dp, FTAG);
2364 /* Apply to self. */
2365 return (func(name, arg));
2369 * See comment above dmu_objset_find_impl().
2372 dmu_objset_find(char *name, int func(const char *, void *), void *arg,
2378 error = spa_open(name, &spa, FTAG);
2381 error = dmu_objset_find_impl(spa, name, func, arg, flags);
2382 spa_close(spa, FTAG);
2387 dmu_objset_set_user(objset_t *os, void *user_ptr)
2389 ASSERT(MUTEX_HELD(&os->os_user_ptr_lock));
2390 os->os_user_ptr = user_ptr;
2394 dmu_objset_get_user(objset_t *os)
2396 ASSERT(MUTEX_HELD(&os->os_user_ptr_lock));
2397 return (os->os_user_ptr);
2401 * Determine name of filesystem, given name of snapshot.
2402 * buf must be at least ZFS_MAX_DATASET_NAME_LEN bytes
2405 dmu_fsname(const char *snapname, char *buf)
2407 char *atp = strchr(snapname, '@');
2409 return (SET_ERROR(EINVAL));
2410 if (atp - snapname >= ZFS_MAX_DATASET_NAME_LEN)
2411 return (SET_ERROR(ENAMETOOLONG));
2412 (void) strlcpy(buf, snapname, atp - snapname + 1);
2417 * Call when we think we're going to write/free space in open context to track
2418 * the amount of dirty data in the open txg, which is also the amount
2419 * of memory that can not be evicted until this txg syncs.
2422 dmu_objset_willuse_space(objset_t *os, int64_t space, dmu_tx_t *tx)
2424 dsl_dataset_t *ds = os->os_dsl_dataset;
2425 int64_t aspace = spa_get_worst_case_asize(os->os_spa, space);
2428 dsl_dir_willuse_space(ds->ds_dir, aspace, tx);
2429 dsl_pool_dirty_space(dmu_tx_pool(tx), space, tx);