4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2013 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
27 /* Portions Copyright 2010 Robert Milkowski */
30 #include <sys/zfs_context.h>
31 #include <sys/dmu_objset.h>
32 #include <sys/dsl_dir.h>
33 #include <sys/dsl_dataset.h>
34 #include <sys/dsl_prop.h>
35 #include <sys/dsl_pool.h>
36 #include <sys/dsl_synctask.h>
37 #include <sys/dsl_deleg.h>
38 #include <sys/dnode.h>
41 #include <sys/dmu_tx.h>
44 #include <sys/dmu_impl.h>
45 #include <sys/zfs_ioctl.h>
47 #include <sys/zfs_onexit.h>
48 #include <sys/dsl_destroy.h>
51 * Needed to close a window in dnode_move() that allows the objset to be freed
52 * before it can be safely accessed.
59 rw_init(&os_lock, NULL, RW_DEFAULT, NULL);
69 dmu_objset_spa(objset_t *os)
75 dmu_objset_zil(objset_t *os)
81 dmu_objset_pool(objset_t *os)
85 if ((ds = os->os_dsl_dataset) != NULL && ds->ds_dir)
86 return (ds->ds_dir->dd_pool);
88 return (spa_get_dsl(os->os_spa));
92 dmu_objset_ds(objset_t *os)
94 return (os->os_dsl_dataset);
98 dmu_objset_type(objset_t *os)
100 return (os->os_phys->os_type);
104 dmu_objset_name(objset_t *os, char *buf)
106 dsl_dataset_name(os->os_dsl_dataset, buf);
110 dmu_objset_id(objset_t *os)
112 dsl_dataset_t *ds = os->os_dsl_dataset;
114 return (ds ? ds->ds_object : 0);
118 dmu_objset_syncprop(objset_t *os)
120 return (os->os_sync);
124 dmu_objset_logbias(objset_t *os)
126 return (os->os_logbias);
130 checksum_changed_cb(void *arg, uint64_t newval)
135 * Inheritance should have been done by now.
137 ASSERT(newval != ZIO_CHECKSUM_INHERIT);
139 os->os_checksum = zio_checksum_select(newval, ZIO_CHECKSUM_ON_VALUE);
143 compression_changed_cb(void *arg, uint64_t newval)
148 * Inheritance and range checking should have been done by now.
150 ASSERT(newval != ZIO_COMPRESS_INHERIT);
152 os->os_compress = zio_compress_select(newval, ZIO_COMPRESS_ON_VALUE);
156 copies_changed_cb(void *arg, uint64_t newval)
161 * Inheritance and range checking should have been done by now.
164 ASSERT(newval <= spa_max_replication(os->os_spa));
166 os->os_copies = newval;
170 dedup_changed_cb(void *arg, uint64_t newval)
173 spa_t *spa = os->os_spa;
174 enum zio_checksum checksum;
177 * Inheritance should have been done by now.
179 ASSERT(newval != ZIO_CHECKSUM_INHERIT);
181 checksum = zio_checksum_dedup_select(spa, newval, ZIO_CHECKSUM_OFF);
183 os->os_dedup_checksum = checksum & ZIO_CHECKSUM_MASK;
184 os->os_dedup_verify = !!(checksum & ZIO_CHECKSUM_VERIFY);
188 primary_cache_changed_cb(void *arg, uint64_t newval)
193 * Inheritance and range checking should have been done by now.
195 ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE ||
196 newval == ZFS_CACHE_METADATA);
198 os->os_primary_cache = newval;
202 secondary_cache_changed_cb(void *arg, uint64_t newval)
207 * Inheritance and range checking should have been done by now.
209 ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE ||
210 newval == ZFS_CACHE_METADATA);
212 os->os_secondary_cache = newval;
216 sync_changed_cb(void *arg, uint64_t newval)
221 * Inheritance and range checking should have been done by now.
223 ASSERT(newval == ZFS_SYNC_STANDARD || newval == ZFS_SYNC_ALWAYS ||
224 newval == ZFS_SYNC_DISABLED);
226 os->os_sync = newval;
228 zil_set_sync(os->os_zil, newval);
232 logbias_changed_cb(void *arg, uint64_t newval)
236 ASSERT(newval == ZFS_LOGBIAS_LATENCY ||
237 newval == ZFS_LOGBIAS_THROUGHPUT);
238 os->os_logbias = newval;
240 zil_set_logbias(os->os_zil, newval);
244 dmu_objset_byteswap(void *buf, size_t size)
246 objset_phys_t *osp = buf;
248 ASSERT(size == OBJSET_OLD_PHYS_SIZE || size == sizeof (objset_phys_t));
249 dnode_byteswap(&osp->os_meta_dnode);
250 byteswap_uint64_array(&osp->os_zil_header, sizeof (zil_header_t));
251 osp->os_type = BSWAP_64(osp->os_type);
252 osp->os_flags = BSWAP_64(osp->os_flags);
253 if (size == sizeof (objset_phys_t)) {
254 dnode_byteswap(&osp->os_userused_dnode);
255 dnode_byteswap(&osp->os_groupused_dnode);
260 dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
266 ASSERT(ds == NULL || MUTEX_HELD(&ds->ds_opening_lock));
268 os = kmem_zalloc(sizeof (objset_t), KM_SLEEP);
269 os->os_dsl_dataset = ds;
272 if (!BP_IS_HOLE(os->os_rootbp)) {
273 uint32_t aflags = ARC_WAIT;
275 SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
276 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
278 if (DMU_OS_IS_L2CACHEABLE(os))
279 aflags |= ARC_L2CACHE;
280 if (DMU_OS_IS_L2COMPRESSIBLE(os))
281 aflags |= ARC_L2COMPRESS;
283 dprintf_bp(os->os_rootbp, "reading %s", "");
284 err = arc_read(NULL, spa, os->os_rootbp,
285 arc_getbuf_func, &os->os_phys_buf,
286 ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL, &aflags, &zb);
288 kmem_free(os, sizeof (objset_t));
289 /* convert checksum errors into IO errors */
291 err = SET_ERROR(EIO);
295 /* Increase the blocksize if we are permitted. */
296 if (spa_version(spa) >= SPA_VERSION_USERSPACE &&
297 arc_buf_size(os->os_phys_buf) < sizeof (objset_phys_t)) {
298 arc_buf_t *buf = arc_buf_alloc(spa,
299 sizeof (objset_phys_t), &os->os_phys_buf,
301 bzero(buf->b_data, sizeof (objset_phys_t));
302 bcopy(os->os_phys_buf->b_data, buf->b_data,
303 arc_buf_size(os->os_phys_buf));
304 (void) arc_buf_remove_ref(os->os_phys_buf,
306 os->os_phys_buf = buf;
309 os->os_phys = os->os_phys_buf->b_data;
310 os->os_flags = os->os_phys->os_flags;
312 int size = spa_version(spa) >= SPA_VERSION_USERSPACE ?
313 sizeof (objset_phys_t) : OBJSET_OLD_PHYS_SIZE;
314 os->os_phys_buf = arc_buf_alloc(spa, size,
315 &os->os_phys_buf, ARC_BUFC_METADATA);
316 os->os_phys = os->os_phys_buf->b_data;
317 bzero(os->os_phys, size);
321 * Note: the changed_cb will be called once before the register
322 * func returns, thus changing the checksum/compression from the
323 * default (fletcher2/off). Snapshots don't need to know about
324 * checksum/compression/copies.
327 err = dsl_prop_register(ds,
328 zfs_prop_to_name(ZFS_PROP_PRIMARYCACHE),
329 primary_cache_changed_cb, os);
331 err = dsl_prop_register(ds,
332 zfs_prop_to_name(ZFS_PROP_SECONDARYCACHE),
333 secondary_cache_changed_cb, os);
335 if (!dsl_dataset_is_snapshot(ds)) {
337 err = dsl_prop_register(ds,
338 zfs_prop_to_name(ZFS_PROP_CHECKSUM),
339 checksum_changed_cb, os);
342 err = dsl_prop_register(ds,
343 zfs_prop_to_name(ZFS_PROP_COMPRESSION),
344 compression_changed_cb, os);
347 err = dsl_prop_register(ds,
348 zfs_prop_to_name(ZFS_PROP_COPIES),
349 copies_changed_cb, os);
352 err = dsl_prop_register(ds,
353 zfs_prop_to_name(ZFS_PROP_DEDUP),
354 dedup_changed_cb, os);
357 err = dsl_prop_register(ds,
358 zfs_prop_to_name(ZFS_PROP_LOGBIAS),
359 logbias_changed_cb, os);
362 err = dsl_prop_register(ds,
363 zfs_prop_to_name(ZFS_PROP_SYNC),
364 sync_changed_cb, os);
368 VERIFY(arc_buf_remove_ref(os->os_phys_buf,
370 kmem_free(os, sizeof (objset_t));
373 } else if (ds == NULL) {
374 /* It's the meta-objset. */
375 os->os_checksum = ZIO_CHECKSUM_FLETCHER_4;
376 os->os_compress = ZIO_COMPRESS_LZJB;
377 os->os_copies = spa_max_replication(spa);
378 os->os_dedup_checksum = ZIO_CHECKSUM_OFF;
379 os->os_dedup_verify = 0;
382 os->os_primary_cache = ZFS_CACHE_ALL;
383 os->os_secondary_cache = ZFS_CACHE_ALL;
386 if (ds == NULL || !dsl_dataset_is_snapshot(ds))
387 os->os_zil_header = os->os_phys->os_zil_header;
388 os->os_zil = zil_alloc(os, &os->os_zil_header);
390 for (i = 0; i < TXG_SIZE; i++) {
391 list_create(&os->os_dirty_dnodes[i], sizeof (dnode_t),
392 offsetof(dnode_t, dn_dirty_link[i]));
393 list_create(&os->os_free_dnodes[i], sizeof (dnode_t),
394 offsetof(dnode_t, dn_dirty_link[i]));
396 list_create(&os->os_dnodes, sizeof (dnode_t),
397 offsetof(dnode_t, dn_link));
398 list_create(&os->os_downgraded_dbufs, sizeof (dmu_buf_impl_t),
399 offsetof(dmu_buf_impl_t, db_link));
401 mutex_init(&os->os_lock, NULL, MUTEX_DEFAULT, NULL);
402 mutex_init(&os->os_obj_lock, NULL, MUTEX_DEFAULT, NULL);
403 mutex_init(&os->os_user_ptr_lock, NULL, MUTEX_DEFAULT, NULL);
405 DMU_META_DNODE(os) = dnode_special_open(os,
406 &os->os_phys->os_meta_dnode, DMU_META_DNODE_OBJECT,
408 if (arc_buf_size(os->os_phys_buf) >= sizeof (objset_phys_t)) {
409 DMU_USERUSED_DNODE(os) = dnode_special_open(os,
410 &os->os_phys->os_userused_dnode, DMU_USERUSED_OBJECT,
411 &os->os_userused_dnode);
412 DMU_GROUPUSED_DNODE(os) = dnode_special_open(os,
413 &os->os_phys->os_groupused_dnode, DMU_GROUPUSED_OBJECT,
414 &os->os_groupused_dnode);
418 * We should be the only thread trying to do this because we
419 * have ds_opening_lock
422 mutex_enter(&ds->ds_lock);
423 ASSERT(ds->ds_objset == NULL);
425 mutex_exit(&ds->ds_lock);
433 dmu_objset_from_ds(dsl_dataset_t *ds, objset_t **osp)
437 mutex_enter(&ds->ds_opening_lock);
438 *osp = ds->ds_objset;
440 err = dmu_objset_open_impl(dsl_dataset_get_spa(ds),
441 ds, dsl_dataset_get_blkptr(ds), osp);
443 mutex_exit(&ds->ds_opening_lock);
448 * Holds the pool while the objset is held. Therefore only one objset
449 * can be held at a time.
452 dmu_objset_hold(const char *name, void *tag, objset_t **osp)
458 err = dsl_pool_hold(name, tag, &dp);
461 err = dsl_dataset_hold(dp, name, tag, &ds);
463 dsl_pool_rele(dp, tag);
467 err = dmu_objset_from_ds(ds, osp);
469 dsl_dataset_rele(ds, tag);
470 dsl_pool_rele(dp, tag);
477 * dsl_pool must not be held when this is called.
478 * Upon successful return, there will be a longhold on the dataset,
479 * and the dsl_pool will not be held.
482 dmu_objset_own(const char *name, dmu_objset_type_t type,
483 boolean_t readonly, void *tag, objset_t **osp)
489 err = dsl_pool_hold(name, FTAG, &dp);
492 err = dsl_dataset_own(dp, name, tag, &ds);
494 dsl_pool_rele(dp, FTAG);
498 err = dmu_objset_from_ds(ds, osp);
499 dsl_pool_rele(dp, FTAG);
501 dsl_dataset_disown(ds, tag);
502 } else if (type != DMU_OST_ANY && type != (*osp)->os_phys->os_type) {
503 dsl_dataset_disown(ds, tag);
504 return (SET_ERROR(EINVAL));
505 } else if (!readonly && dsl_dataset_is_snapshot(ds)) {
506 dsl_dataset_disown(ds, tag);
507 return (SET_ERROR(EROFS));
513 dmu_objset_rele(objset_t *os, void *tag)
515 dsl_pool_t *dp = dmu_objset_pool(os);
516 dsl_dataset_rele(os->os_dsl_dataset, tag);
517 dsl_pool_rele(dp, tag);
521 dmu_objset_disown(objset_t *os, void *tag)
523 dsl_dataset_disown(os->os_dsl_dataset, tag);
527 dmu_objset_evict_dbufs(objset_t *os)
531 mutex_enter(&os->os_lock);
533 /* process the mdn last, since the other dnodes have holds on it */
534 list_remove(&os->os_dnodes, DMU_META_DNODE(os));
535 list_insert_tail(&os->os_dnodes, DMU_META_DNODE(os));
538 * Find the first dnode with holds. We have to do this dance
539 * because dnode_add_ref() only works if you already have a
540 * hold. If there are no holds then it has no dbufs so OK to
543 for (dn = list_head(&os->os_dnodes);
544 dn && !dnode_add_ref(dn, FTAG);
545 dn = list_next(&os->os_dnodes, dn))
549 dnode_t *next_dn = dn;
552 next_dn = list_next(&os->os_dnodes, next_dn);
553 } while (next_dn && !dnode_add_ref(next_dn, FTAG));
555 mutex_exit(&os->os_lock);
556 dnode_evict_dbufs(dn);
557 dnode_rele(dn, FTAG);
558 mutex_enter(&os->os_lock);
561 mutex_exit(&os->os_lock);
565 dmu_objset_evict(objset_t *os)
567 dsl_dataset_t *ds = os->os_dsl_dataset;
569 for (int t = 0; t < TXG_SIZE; t++)
570 ASSERT(!dmu_objset_is_dirty(os, t));
573 if (!dsl_dataset_is_snapshot(ds)) {
574 VERIFY0(dsl_prop_unregister(ds,
575 zfs_prop_to_name(ZFS_PROP_CHECKSUM),
576 checksum_changed_cb, os));
577 VERIFY0(dsl_prop_unregister(ds,
578 zfs_prop_to_name(ZFS_PROP_COMPRESSION),
579 compression_changed_cb, os));
580 VERIFY0(dsl_prop_unregister(ds,
581 zfs_prop_to_name(ZFS_PROP_COPIES),
582 copies_changed_cb, os));
583 VERIFY0(dsl_prop_unregister(ds,
584 zfs_prop_to_name(ZFS_PROP_DEDUP),
585 dedup_changed_cb, os));
586 VERIFY0(dsl_prop_unregister(ds,
587 zfs_prop_to_name(ZFS_PROP_LOGBIAS),
588 logbias_changed_cb, os));
589 VERIFY0(dsl_prop_unregister(ds,
590 zfs_prop_to_name(ZFS_PROP_SYNC),
591 sync_changed_cb, os));
593 VERIFY0(dsl_prop_unregister(ds,
594 zfs_prop_to_name(ZFS_PROP_PRIMARYCACHE),
595 primary_cache_changed_cb, os));
596 VERIFY0(dsl_prop_unregister(ds,
597 zfs_prop_to_name(ZFS_PROP_SECONDARYCACHE),
598 secondary_cache_changed_cb, os));
604 dmu_objset_evict_dbufs(os);
606 dnode_special_close(&os->os_meta_dnode);
607 if (DMU_USERUSED_DNODE(os)) {
608 dnode_special_close(&os->os_userused_dnode);
609 dnode_special_close(&os->os_groupused_dnode);
611 zil_free(os->os_zil);
613 ASSERT3P(list_head(&os->os_dnodes), ==, NULL);
615 VERIFY(arc_buf_remove_ref(os->os_phys_buf, &os->os_phys_buf));
618 * This is a barrier to prevent the objset from going away in
619 * dnode_move() until we can safely ensure that the objset is still in
620 * use. We consider the objset valid before the barrier and invalid
623 rw_enter(&os_lock, RW_READER);
626 mutex_destroy(&os->os_lock);
627 mutex_destroy(&os->os_obj_lock);
628 mutex_destroy(&os->os_user_ptr_lock);
629 kmem_free(os, sizeof (objset_t));
633 dmu_objset_snap_cmtime(objset_t *os)
635 return (dsl_dir_snap_cmtime(os->os_dsl_dataset->ds_dir));
638 /* called from dsl for meta-objset */
640 dmu_objset_create_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
641 dmu_objset_type_t type, dmu_tx_t *tx)
646 ASSERT(dmu_tx_is_syncing(tx));
649 VERIFY0(dmu_objset_from_ds(ds, &os));
651 VERIFY0(dmu_objset_open_impl(spa, NULL, bp, &os));
653 mdn = DMU_META_DNODE(os);
655 dnode_allocate(mdn, DMU_OT_DNODE, 1 << DNODE_BLOCK_SHIFT,
656 DN_MAX_INDBLKSHIFT, DMU_OT_NONE, 0, tx);
659 * We don't want to have to increase the meta-dnode's nlevels
660 * later, because then we could do it in quescing context while
661 * we are also accessing it in open context.
663 * This precaution is not necessary for the MOS (ds == NULL),
664 * because the MOS is only updated in syncing context.
665 * This is most fortunate: the MOS is the only objset that
666 * needs to be synced multiple times as spa_sync() iterates
667 * to convergence, so minimizing its dn_nlevels matters.
673 * Determine the number of levels necessary for the meta-dnode
674 * to contain DN_MAX_OBJECT dnodes.
676 while ((uint64_t)mdn->dn_nblkptr << (mdn->dn_datablkshift +
677 (levels - 1) * (mdn->dn_indblkshift - SPA_BLKPTRSHIFT)) <
678 DN_MAX_OBJECT * sizeof (dnode_phys_t))
681 mdn->dn_next_nlevels[tx->tx_txg & TXG_MASK] =
682 mdn->dn_nlevels = levels;
685 ASSERT(type != DMU_OST_NONE);
686 ASSERT(type != DMU_OST_ANY);
687 ASSERT(type < DMU_OST_NUMTYPES);
688 os->os_phys->os_type = type;
689 if (dmu_objset_userused_enabled(os)) {
690 os->os_phys->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE;
691 os->os_flags = os->os_phys->os_flags;
694 dsl_dataset_dirty(ds, tx);
699 typedef struct dmu_objset_create_arg {
700 const char *doca_name;
702 void (*doca_userfunc)(objset_t *os, void *arg,
703 cred_t *cr, dmu_tx_t *tx);
705 dmu_objset_type_t doca_type;
707 } dmu_objset_create_arg_t;
711 dmu_objset_create_check(void *arg, dmu_tx_t *tx)
713 dmu_objset_create_arg_t *doca = arg;
714 dsl_pool_t *dp = dmu_tx_pool(tx);
719 if (strchr(doca->doca_name, '@') != NULL)
720 return (SET_ERROR(EINVAL));
722 error = dsl_dir_hold(dp, doca->doca_name, FTAG, &pdd, &tail);
726 dsl_dir_rele(pdd, FTAG);
727 return (SET_ERROR(EEXIST));
729 dsl_dir_rele(pdd, FTAG);
735 dmu_objset_create_sync(void *arg, dmu_tx_t *tx)
737 dmu_objset_create_arg_t *doca = arg;
738 dsl_pool_t *dp = dmu_tx_pool(tx);
746 VERIFY0(dsl_dir_hold(dp, doca->doca_name, FTAG, &pdd, &tail));
748 obj = dsl_dataset_create_sync(pdd, tail, NULL, doca->doca_flags,
749 doca->doca_cred, tx);
751 VERIFY0(dsl_dataset_hold_obj(pdd->dd_pool, obj, FTAG, &ds));
752 bp = dsl_dataset_get_blkptr(ds);
753 os = dmu_objset_create_impl(pdd->dd_pool->dp_spa,
754 ds, bp, doca->doca_type, tx);
756 if (doca->doca_userfunc != NULL) {
757 doca->doca_userfunc(os, doca->doca_userarg,
758 doca->doca_cred, tx);
761 spa_history_log_internal_ds(ds, "create", tx, "");
762 dsl_dataset_rele(ds, FTAG);
763 dsl_dir_rele(pdd, FTAG);
767 dmu_objset_create(const char *name, dmu_objset_type_t type, uint64_t flags,
768 void (*func)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx), void *arg)
770 dmu_objset_create_arg_t doca;
772 doca.doca_name = name;
773 doca.doca_cred = CRED();
774 doca.doca_flags = flags;
775 doca.doca_userfunc = func;
776 doca.doca_userarg = arg;
777 doca.doca_type = type;
779 return (dsl_sync_task(name,
780 dmu_objset_create_check, dmu_objset_create_sync, &doca, 5));
783 typedef struct dmu_objset_clone_arg {
784 const char *doca_clone;
785 const char *doca_origin;
787 } dmu_objset_clone_arg_t;
791 dmu_objset_clone_check(void *arg, dmu_tx_t *tx)
793 dmu_objset_clone_arg_t *doca = arg;
797 dsl_dataset_t *origin;
798 dsl_pool_t *dp = dmu_tx_pool(tx);
800 if (strchr(doca->doca_clone, '@') != NULL)
801 return (SET_ERROR(EINVAL));
803 error = dsl_dir_hold(dp, doca->doca_clone, FTAG, &pdd, &tail);
807 dsl_dir_rele(pdd, FTAG);
808 return (SET_ERROR(EEXIST));
810 /* You can't clone across pools. */
811 if (pdd->dd_pool != dp) {
812 dsl_dir_rele(pdd, FTAG);
813 return (SET_ERROR(EXDEV));
815 dsl_dir_rele(pdd, FTAG);
817 error = dsl_dataset_hold(dp, doca->doca_origin, FTAG, &origin);
821 /* You can't clone across pools. */
822 if (origin->ds_dir->dd_pool != dp) {
823 dsl_dataset_rele(origin, FTAG);
824 return (SET_ERROR(EXDEV));
827 /* You can only clone snapshots, not the head datasets. */
828 if (!dsl_dataset_is_snapshot(origin)) {
829 dsl_dataset_rele(origin, FTAG);
830 return (SET_ERROR(EINVAL));
832 dsl_dataset_rele(origin, FTAG);
838 dmu_objset_clone_sync(void *arg, dmu_tx_t *tx)
840 dmu_objset_clone_arg_t *doca = arg;
841 dsl_pool_t *dp = dmu_tx_pool(tx);
844 dsl_dataset_t *origin, *ds;
846 char namebuf[MAXNAMELEN];
848 VERIFY0(dsl_dir_hold(dp, doca->doca_clone, FTAG, &pdd, &tail));
849 VERIFY0(dsl_dataset_hold(dp, doca->doca_origin, FTAG, &origin));
851 obj = dsl_dataset_create_sync(pdd, tail, origin, 0,
852 doca->doca_cred, tx);
854 VERIFY0(dsl_dataset_hold_obj(pdd->dd_pool, obj, FTAG, &ds));
855 dsl_dataset_name(origin, namebuf);
856 spa_history_log_internal_ds(ds, "clone", tx,
857 "origin=%s (%llu)", namebuf, origin->ds_object);
858 dsl_dataset_rele(ds, FTAG);
859 dsl_dataset_rele(origin, FTAG);
860 dsl_dir_rele(pdd, FTAG);
864 dmu_objset_clone(const char *clone, const char *origin)
866 dmu_objset_clone_arg_t doca;
868 doca.doca_clone = clone;
869 doca.doca_origin = origin;
870 doca.doca_cred = CRED();
872 return (dsl_sync_task(clone,
873 dmu_objset_clone_check, dmu_objset_clone_sync, &doca, 5));
877 dmu_objset_snapshot_one(const char *fsname, const char *snapname)
880 char *longsnap = kmem_asprintf("%s@%s", fsname, snapname);
881 nvlist_t *snaps = fnvlist_alloc();
883 fnvlist_add_boolean(snaps, longsnap);
885 err = dsl_dataset_snapshot(snaps, NULL, NULL);
891 dmu_objset_sync_dnodes(list_t *list, list_t *newlist, dmu_tx_t *tx)
895 while (dn = list_head(list)) {
896 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
897 ASSERT(dn->dn_dbuf->db_data_pending);
899 * Initialize dn_zio outside dnode_sync() because the
900 * meta-dnode needs to set it ouside dnode_sync().
902 dn->dn_zio = dn->dn_dbuf->db_data_pending->dr_zio;
905 ASSERT3U(dn->dn_nlevels, <=, DN_MAX_LEVELS);
906 list_remove(list, dn);
909 (void) dnode_add_ref(dn, newlist);
910 list_insert_tail(newlist, dn);
919 dmu_objset_write_ready(zio_t *zio, arc_buf_t *abuf, void *arg)
921 blkptr_t *bp = zio->io_bp;
923 dnode_phys_t *dnp = &os->os_phys->os_meta_dnode;
925 ASSERT3P(bp, ==, os->os_rootbp);
926 ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_OBJSET);
927 ASSERT0(BP_GET_LEVEL(bp));
930 * Update rootbp fill count: it should be the number of objects
931 * allocated in the object set (not counting the "special"
932 * objects that are stored in the objset_phys_t -- the meta
933 * dnode and user/group accounting objects).
936 for (int i = 0; i < dnp->dn_nblkptr; i++)
937 bp->blk_fill += dnp->dn_blkptr[i].blk_fill;
942 dmu_objset_write_done(zio_t *zio, arc_buf_t *abuf, void *arg)
944 blkptr_t *bp = zio->io_bp;
945 blkptr_t *bp_orig = &zio->io_bp_orig;
948 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
949 ASSERT(BP_EQUAL(bp, bp_orig));
951 dsl_dataset_t *ds = os->os_dsl_dataset;
952 dmu_tx_t *tx = os->os_synctx;
954 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
955 dsl_dataset_block_born(ds, bp, tx);
959 /* called from dsl */
961 dmu_objset_sync(objset_t *os, zio_t *pio, dmu_tx_t *tx)
968 list_t *newlist = NULL;
969 dbuf_dirty_record_t *dr;
971 dprintf_ds(os->os_dsl_dataset, "txg=%llu\n", tx->tx_txg);
973 ASSERT(dmu_tx_is_syncing(tx));
974 /* XXX the write_done callback should really give us the tx... */
977 if (os->os_dsl_dataset == NULL) {
979 * This is the MOS. If we have upgraded,
980 * spa_max_replication() could change, so reset
983 os->os_copies = spa_max_replication(os->os_spa);
987 * Create the root block IO
989 SET_BOOKMARK(&zb, os->os_dsl_dataset ?
990 os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
991 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
992 arc_release(os->os_phys_buf, &os->os_phys_buf);
994 dmu_write_policy(os, NULL, 0, 0, &zp);
996 zio = arc_write(pio, os->os_spa, tx->tx_txg,
997 os->os_rootbp, os->os_phys_buf, DMU_OS_IS_L2CACHEABLE(os),
998 DMU_OS_IS_L2COMPRESSIBLE(os), &zp, dmu_objset_write_ready,
999 dmu_objset_write_done, os, ZIO_PRIORITY_ASYNC_WRITE,
1000 ZIO_FLAG_MUSTSUCCEED, &zb);
1003 * Sync special dnodes - the parent IO for the sync is the root block
1005 DMU_META_DNODE(os)->dn_zio = zio;
1006 dnode_sync(DMU_META_DNODE(os), tx);
1008 os->os_phys->os_flags = os->os_flags;
1010 if (DMU_USERUSED_DNODE(os) &&
1011 DMU_USERUSED_DNODE(os)->dn_type != DMU_OT_NONE) {
1012 DMU_USERUSED_DNODE(os)->dn_zio = zio;
1013 dnode_sync(DMU_USERUSED_DNODE(os), tx);
1014 DMU_GROUPUSED_DNODE(os)->dn_zio = zio;
1015 dnode_sync(DMU_GROUPUSED_DNODE(os), tx);
1018 txgoff = tx->tx_txg & TXG_MASK;
1020 if (dmu_objset_userused_enabled(os)) {
1021 newlist = &os->os_synced_dnodes;
1023 * We must create the list here because it uses the
1024 * dn_dirty_link[] of this txg.
1026 list_create(newlist, sizeof (dnode_t),
1027 offsetof(dnode_t, dn_dirty_link[txgoff]));
1030 dmu_objset_sync_dnodes(&os->os_free_dnodes[txgoff], newlist, tx);
1031 dmu_objset_sync_dnodes(&os->os_dirty_dnodes[txgoff], newlist, tx);
1033 list = &DMU_META_DNODE(os)->dn_dirty_records[txgoff];
1034 while (dr = list_head(list)) {
1035 ASSERT0(dr->dr_dbuf->db_level);
1036 list_remove(list, dr);
1038 zio_nowait(dr->dr_zio);
1041 * Free intent log blocks up to this tx.
1043 zil_sync(os->os_zil, tx);
1044 os->os_phys->os_zil_header = os->os_zil_header;
1049 dmu_objset_is_dirty(objset_t *os, uint64_t txg)
1051 return (!list_is_empty(&os->os_dirty_dnodes[txg & TXG_MASK]) ||
1052 !list_is_empty(&os->os_free_dnodes[txg & TXG_MASK]));
1055 static objset_used_cb_t *used_cbs[DMU_OST_NUMTYPES];
1058 dmu_objset_register_type(dmu_objset_type_t ost, objset_used_cb_t *cb)
1064 dmu_objset_userused_enabled(objset_t *os)
1066 return (spa_version(os->os_spa) >= SPA_VERSION_USERSPACE &&
1067 used_cbs[os->os_phys->os_type] != NULL &&
1068 DMU_USERUSED_DNODE(os) != NULL);
1072 do_userquota_update(objset_t *os, uint64_t used, uint64_t flags,
1073 uint64_t user, uint64_t group, boolean_t subtract, dmu_tx_t *tx)
1075 if ((flags & DNODE_FLAG_USERUSED_ACCOUNTED)) {
1076 int64_t delta = DNODE_SIZE + used;
1079 VERIFY3U(0, ==, zap_increment_int(os, DMU_USERUSED_OBJECT,
1081 VERIFY3U(0, ==, zap_increment_int(os, DMU_GROUPUSED_OBJECT,
1087 dmu_objset_do_userquota_updates(objset_t *os, dmu_tx_t *tx)
1090 list_t *list = &os->os_synced_dnodes;
1092 ASSERT(list_head(list) == NULL || dmu_objset_userused_enabled(os));
1094 while (dn = list_head(list)) {
1096 ASSERT(!DMU_OBJECT_IS_SPECIAL(dn->dn_object));
1097 ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE ||
1098 dn->dn_phys->dn_flags &
1099 DNODE_FLAG_USERUSED_ACCOUNTED);
1101 /* Allocate the user/groupused objects if necessary. */
1102 if (DMU_USERUSED_DNODE(os)->dn_type == DMU_OT_NONE) {
1103 VERIFY(0 == zap_create_claim(os,
1104 DMU_USERUSED_OBJECT,
1105 DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx));
1106 VERIFY(0 == zap_create_claim(os,
1107 DMU_GROUPUSED_OBJECT,
1108 DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx));
1112 * We intentionally modify the zap object even if the
1113 * net delta is zero. Otherwise
1114 * the block of the zap obj could be shared between
1115 * datasets but need to be different between them after
1119 flags = dn->dn_id_flags;
1121 if (flags & DN_ID_OLD_EXIST) {
1122 do_userquota_update(os, dn->dn_oldused, dn->dn_oldflags,
1123 dn->dn_olduid, dn->dn_oldgid, B_TRUE, tx);
1125 if (flags & DN_ID_NEW_EXIST) {
1126 do_userquota_update(os, DN_USED_BYTES(dn->dn_phys),
1127 dn->dn_phys->dn_flags, dn->dn_newuid,
1128 dn->dn_newgid, B_FALSE, tx);
1131 mutex_enter(&dn->dn_mtx);
1133 dn->dn_oldflags = 0;
1134 if (dn->dn_id_flags & DN_ID_NEW_EXIST) {
1135 dn->dn_olduid = dn->dn_newuid;
1136 dn->dn_oldgid = dn->dn_newgid;
1137 dn->dn_id_flags |= DN_ID_OLD_EXIST;
1138 if (dn->dn_bonuslen == 0)
1139 dn->dn_id_flags |= DN_ID_CHKED_SPILL;
1141 dn->dn_id_flags |= DN_ID_CHKED_BONUS;
1143 dn->dn_id_flags &= ~(DN_ID_NEW_EXIST);
1144 mutex_exit(&dn->dn_mtx);
1146 list_remove(list, dn);
1147 dnode_rele(dn, list);
1152 * Returns a pointer to data to find uid/gid from
1154 * If a dirty record for transaction group that is syncing can't
1155 * be found then NULL is returned. In the NULL case it is assumed
1156 * the uid/gid aren't changing.
1159 dmu_objset_userquota_find_data(dmu_buf_impl_t *db, dmu_tx_t *tx)
1161 dbuf_dirty_record_t *dr, **drp;
1164 if (db->db_dirtycnt == 0)
1165 return (db->db.db_data); /* Nothing is changing */
1167 for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next)
1168 if (dr->dr_txg == tx->tx_txg)
1176 DB_DNODE_ENTER(dr->dr_dbuf);
1177 dn = DB_DNODE(dr->dr_dbuf);
1179 if (dn->dn_bonuslen == 0 &&
1180 dr->dr_dbuf->db_blkid == DMU_SPILL_BLKID)
1181 data = dr->dt.dl.dr_data->b_data;
1183 data = dr->dt.dl.dr_data;
1185 DB_DNODE_EXIT(dr->dr_dbuf);
1192 dmu_objset_userquota_get_ids(dnode_t *dn, boolean_t before, dmu_tx_t *tx)
1194 objset_t *os = dn->dn_objset;
1196 dmu_buf_impl_t *db = NULL;
1197 uint64_t *user = NULL;
1198 uint64_t *group = NULL;
1199 int flags = dn->dn_id_flags;
1201 boolean_t have_spill = B_FALSE;
1203 if (!dmu_objset_userused_enabled(dn->dn_objset))
1206 if (before && (flags & (DN_ID_CHKED_BONUS|DN_ID_OLD_EXIST|
1207 DN_ID_CHKED_SPILL)))
1210 if (before && dn->dn_bonuslen != 0)
1211 data = DN_BONUS(dn->dn_phys);
1212 else if (!before && dn->dn_bonuslen != 0) {
1215 mutex_enter(&db->db_mtx);
1216 data = dmu_objset_userquota_find_data(db, tx);
1218 data = DN_BONUS(dn->dn_phys);
1220 } else if (dn->dn_bonuslen == 0 && dn->dn_bonustype == DMU_OT_SA) {
1223 if (RW_WRITE_HELD(&dn->dn_struct_rwlock))
1224 rf |= DB_RF_HAVESTRUCT;
1225 error = dmu_spill_hold_by_dnode(dn,
1226 rf | DB_RF_MUST_SUCCEED,
1227 FTAG, (dmu_buf_t **)&db);
1229 mutex_enter(&db->db_mtx);
1230 data = (before) ? db->db.db_data :
1231 dmu_objset_userquota_find_data(db, tx);
1232 have_spill = B_TRUE;
1234 mutex_enter(&dn->dn_mtx);
1235 dn->dn_id_flags |= DN_ID_CHKED_BONUS;
1236 mutex_exit(&dn->dn_mtx);
1242 user = &dn->dn_olduid;
1243 group = &dn->dn_oldgid;
1245 user = &dn->dn_newuid;
1246 group = &dn->dn_newgid;
1250 * Must always call the callback in case the object
1251 * type has changed and that type isn't an object type to track
1253 error = used_cbs[os->os_phys->os_type](dn->dn_bonustype, data,
1257 * Preserve existing uid/gid when the callback can't determine
1258 * what the new uid/gid are and the callback returned EEXIST.
1259 * The EEXIST error tells us to just use the existing uid/gid.
1260 * If we don't know what the old values are then just assign
1261 * them to 0, since that is a new file being created.
1263 if (!before && data == NULL && error == EEXIST) {
1264 if (flags & DN_ID_OLD_EXIST) {
1265 dn->dn_newuid = dn->dn_olduid;
1266 dn->dn_newgid = dn->dn_oldgid;
1275 mutex_exit(&db->db_mtx);
1277 mutex_enter(&dn->dn_mtx);
1278 if (error == 0 && before)
1279 dn->dn_id_flags |= DN_ID_OLD_EXIST;
1280 if (error == 0 && !before)
1281 dn->dn_id_flags |= DN_ID_NEW_EXIST;
1284 dn->dn_id_flags |= DN_ID_CHKED_SPILL;
1286 dn->dn_id_flags |= DN_ID_CHKED_BONUS;
1288 mutex_exit(&dn->dn_mtx);
1290 dmu_buf_rele((dmu_buf_t *)db, FTAG);
1294 dmu_objset_userspace_present(objset_t *os)
1296 return (os->os_phys->os_flags &
1297 OBJSET_FLAG_USERACCOUNTING_COMPLETE);
1301 dmu_objset_userspace_upgrade(objset_t *os)
1306 if (dmu_objset_userspace_present(os))
1308 if (!dmu_objset_userused_enabled(os))
1309 return (SET_ERROR(ENOTSUP));
1310 if (dmu_objset_is_snapshot(os))
1311 return (SET_ERROR(EINVAL));
1314 * We simply need to mark every object dirty, so that it will be
1315 * synced out and now accounted. If this is called
1316 * concurrently, or if we already did some work before crashing,
1317 * that's fine, since we track each object's accounted state
1321 for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, 0)) {
1326 if (issig(JUSTLOOKING) && issig(FORREAL))
1327 return (SET_ERROR(EINTR));
1329 objerr = dmu_bonus_hold(os, obj, FTAG, &db);
1332 tx = dmu_tx_create(os);
1333 dmu_tx_hold_bonus(tx, obj);
1334 objerr = dmu_tx_assign(tx, TXG_WAIT);
1339 dmu_buf_will_dirty(db, tx);
1340 dmu_buf_rele(db, FTAG);
1344 os->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE;
1345 txg_wait_synced(dmu_objset_pool(os), 0);
1350 dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp,
1351 uint64_t *usedobjsp, uint64_t *availobjsp)
1353 dsl_dataset_space(os->os_dsl_dataset, refdbytesp, availbytesp,
1354 usedobjsp, availobjsp);
1358 dmu_objset_fsid_guid(objset_t *os)
1360 return (dsl_dataset_fsid_guid(os->os_dsl_dataset));
1364 dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat)
1366 stat->dds_type = os->os_phys->os_type;
1367 if (os->os_dsl_dataset)
1368 dsl_dataset_fast_stat(os->os_dsl_dataset, stat);
1372 dmu_objset_stats(objset_t *os, nvlist_t *nv)
1374 ASSERT(os->os_dsl_dataset ||
1375 os->os_phys->os_type == DMU_OST_META);
1377 if (os->os_dsl_dataset != NULL)
1378 dsl_dataset_stats(os->os_dsl_dataset, nv);
1380 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_TYPE,
1381 os->os_phys->os_type);
1382 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERACCOUNTING,
1383 dmu_objset_userspace_present(os));
1387 dmu_objset_is_snapshot(objset_t *os)
1389 if (os->os_dsl_dataset != NULL)
1390 return (dsl_dataset_is_snapshot(os->os_dsl_dataset));
1396 dmu_snapshot_realname(objset_t *os, char *name, char *real, int maxlen,
1397 boolean_t *conflict)
1399 dsl_dataset_t *ds = os->os_dsl_dataset;
1402 if (ds->ds_phys->ds_snapnames_zapobj == 0)
1403 return (SET_ERROR(ENOENT));
1405 return (zap_lookup_norm(ds->ds_dir->dd_pool->dp_meta_objset,
1406 ds->ds_phys->ds_snapnames_zapobj, name, 8, 1, &ignored, MT_FIRST,
1407 real, maxlen, conflict));
1411 dmu_snapshot_list_next(objset_t *os, int namelen, char *name,
1412 uint64_t *idp, uint64_t *offp, boolean_t *case_conflict)
1414 dsl_dataset_t *ds = os->os_dsl_dataset;
1415 zap_cursor_t cursor;
1416 zap_attribute_t attr;
1418 ASSERT(dsl_pool_config_held(dmu_objset_pool(os)));
1420 if (ds->ds_phys->ds_snapnames_zapobj == 0)
1421 return (SET_ERROR(ENOENT));
1423 zap_cursor_init_serialized(&cursor,
1424 ds->ds_dir->dd_pool->dp_meta_objset,
1425 ds->ds_phys->ds_snapnames_zapobj, *offp);
1427 if (zap_cursor_retrieve(&cursor, &attr) != 0) {
1428 zap_cursor_fini(&cursor);
1429 return (SET_ERROR(ENOENT));
1432 if (strlen(attr.za_name) + 1 > namelen) {
1433 zap_cursor_fini(&cursor);
1434 return (SET_ERROR(ENAMETOOLONG));
1437 (void) strcpy(name, attr.za_name);
1439 *idp = attr.za_first_integer;
1441 *case_conflict = attr.za_normalization_conflict;
1442 zap_cursor_advance(&cursor);
1443 *offp = zap_cursor_serialize(&cursor);
1444 zap_cursor_fini(&cursor);
1450 dmu_dir_list_next(objset_t *os, int namelen, char *name,
1451 uint64_t *idp, uint64_t *offp)
1453 dsl_dir_t *dd = os->os_dsl_dataset->ds_dir;
1454 zap_cursor_t cursor;
1455 zap_attribute_t attr;
1457 /* there is no next dir on a snapshot! */
1458 if (os->os_dsl_dataset->ds_object !=
1459 dd->dd_phys->dd_head_dataset_obj)
1460 return (SET_ERROR(ENOENT));
1462 zap_cursor_init_serialized(&cursor,
1463 dd->dd_pool->dp_meta_objset,
1464 dd->dd_phys->dd_child_dir_zapobj, *offp);
1466 if (zap_cursor_retrieve(&cursor, &attr) != 0) {
1467 zap_cursor_fini(&cursor);
1468 return (SET_ERROR(ENOENT));
1471 if (strlen(attr.za_name) + 1 > namelen) {
1472 zap_cursor_fini(&cursor);
1473 return (SET_ERROR(ENAMETOOLONG));
1476 (void) strcpy(name, attr.za_name);
1478 *idp = attr.za_first_integer;
1479 zap_cursor_advance(&cursor);
1480 *offp = zap_cursor_serialize(&cursor);
1481 zap_cursor_fini(&cursor);
1487 * Find objsets under and including ddobj, call func(ds) on each.
1490 dmu_objset_find_dp(dsl_pool_t *dp, uint64_t ddobj,
1491 int func(dsl_pool_t *, dsl_dataset_t *, void *), void *arg, int flags)
1496 zap_attribute_t *attr;
1500 ASSERT(dsl_pool_config_held(dp));
1502 err = dsl_dir_hold_obj(dp, ddobj, NULL, FTAG, &dd);
1506 /* Don't visit hidden ($MOS & $ORIGIN) objsets. */
1507 if (dd->dd_myname[0] == '$') {
1508 dsl_dir_rele(dd, FTAG);
1512 thisobj = dd->dd_phys->dd_head_dataset_obj;
1513 attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
1516 * Iterate over all children.
1518 if (flags & DS_FIND_CHILDREN) {
1519 for (zap_cursor_init(&zc, dp->dp_meta_objset,
1520 dd->dd_phys->dd_child_dir_zapobj);
1521 zap_cursor_retrieve(&zc, attr) == 0;
1522 (void) zap_cursor_advance(&zc)) {
1523 ASSERT3U(attr->za_integer_length, ==,
1525 ASSERT3U(attr->za_num_integers, ==, 1);
1527 err = dmu_objset_find_dp(dp, attr->za_first_integer,
1532 zap_cursor_fini(&zc);
1535 dsl_dir_rele(dd, FTAG);
1536 kmem_free(attr, sizeof (zap_attribute_t));
1542 * Iterate over all snapshots.
1544 if (flags & DS_FIND_SNAPSHOTS) {
1546 err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds);
1549 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
1550 dsl_dataset_rele(ds, FTAG);
1552 for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj);
1553 zap_cursor_retrieve(&zc, attr) == 0;
1554 (void) zap_cursor_advance(&zc)) {
1555 ASSERT3U(attr->za_integer_length, ==,
1557 ASSERT3U(attr->za_num_integers, ==, 1);
1559 err = dsl_dataset_hold_obj(dp,
1560 attr->za_first_integer, FTAG, &ds);
1563 err = func(dp, ds, arg);
1564 dsl_dataset_rele(ds, FTAG);
1568 zap_cursor_fini(&zc);
1572 dsl_dir_rele(dd, FTAG);
1573 kmem_free(attr, sizeof (zap_attribute_t));
1581 err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds);
1584 err = func(dp, ds, arg);
1585 dsl_dataset_rele(ds, FTAG);
1590 * Find all objsets under name, and for each, call 'func(child_name, arg)'.
1591 * The dp_config_rwlock must not be held when this is called, and it
1592 * will not be held when the callback is called.
1593 * Therefore this function should only be used when the pool is not changing
1594 * (e.g. in syncing context), or the callback can deal with the possible races.
1597 dmu_objset_find_impl(spa_t *spa, const char *name,
1598 int func(const char *, void *), void *arg, int flags)
1601 dsl_pool_t *dp = spa_get_dsl(spa);
1604 zap_attribute_t *attr;
1609 dsl_pool_config_enter(dp, FTAG);
1611 err = dsl_dir_hold(dp, name, FTAG, &dd, NULL);
1613 dsl_pool_config_exit(dp, FTAG);
1617 /* Don't visit hidden ($MOS & $ORIGIN) objsets. */
1618 if (dd->dd_myname[0] == '$') {
1619 dsl_dir_rele(dd, FTAG);
1620 dsl_pool_config_exit(dp, FTAG);
1624 thisobj = dd->dd_phys->dd_head_dataset_obj;
1625 attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
1628 * Iterate over all children.
1630 if (flags & DS_FIND_CHILDREN) {
1631 for (zap_cursor_init(&zc, dp->dp_meta_objset,
1632 dd->dd_phys->dd_child_dir_zapobj);
1633 zap_cursor_retrieve(&zc, attr) == 0;
1634 (void) zap_cursor_advance(&zc)) {
1635 ASSERT3U(attr->za_integer_length, ==,
1637 ASSERT3U(attr->za_num_integers, ==, 1);
1639 child = kmem_asprintf("%s/%s", name, attr->za_name);
1640 dsl_pool_config_exit(dp, FTAG);
1641 err = dmu_objset_find_impl(spa, child,
1643 dsl_pool_config_enter(dp, FTAG);
1648 zap_cursor_fini(&zc);
1651 dsl_dir_rele(dd, FTAG);
1652 dsl_pool_config_exit(dp, FTAG);
1653 kmem_free(attr, sizeof (zap_attribute_t));
1659 * Iterate over all snapshots.
1661 if (flags & DS_FIND_SNAPSHOTS) {
1662 err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds);
1665 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
1666 dsl_dataset_rele(ds, FTAG);
1668 for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj);
1669 zap_cursor_retrieve(&zc, attr) == 0;
1670 (void) zap_cursor_advance(&zc)) {
1671 ASSERT3U(attr->za_integer_length, ==,
1673 ASSERT3U(attr->za_num_integers, ==, 1);
1675 child = kmem_asprintf("%s@%s",
1676 name, attr->za_name);
1677 dsl_pool_config_exit(dp, FTAG);
1678 err = func(child, arg);
1679 dsl_pool_config_enter(dp, FTAG);
1684 zap_cursor_fini(&zc);
1688 dsl_dir_rele(dd, FTAG);
1689 kmem_free(attr, sizeof (zap_attribute_t));
1690 dsl_pool_config_exit(dp, FTAG);
1695 /* Apply to self. */
1696 return (func(name, arg));
1700 * See comment above dmu_objset_find_impl().
1703 dmu_objset_find(char *name, int func(const char *, void *), void *arg,
1709 error = spa_open(name, &spa, FTAG);
1712 error = dmu_objset_find_impl(spa, name, func, arg, flags);
1713 spa_close(spa, FTAG);
1718 dmu_objset_set_user(objset_t *os, void *user_ptr)
1720 ASSERT(MUTEX_HELD(&os->os_user_ptr_lock));
1721 os->os_user_ptr = user_ptr;
1725 dmu_objset_get_user(objset_t *os)
1727 ASSERT(MUTEX_HELD(&os->os_user_ptr_lock));
1728 return (os->os_user_ptr);
1732 * Determine name of filesystem, given name of snapshot.
1733 * buf must be at least MAXNAMELEN bytes
1736 dmu_fsname(const char *snapname, char *buf)
1738 char *atp = strchr(snapname, '@');
1740 return (SET_ERROR(EINVAL));
1741 if (atp - snapname >= MAXNAMELEN)
1742 return (SET_ERROR(ENAMETOOLONG));
1743 (void) strlcpy(buf, snapname, atp - snapname + 1);