4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #pragma ident "%Z%%M% %I% %E% SMI"
28 #include <sys/zfs_context.h>
29 #include <sys/spa_impl.h>
31 #include <sys/zio_checksum.h>
32 #include <sys/zio_compress.h>
34 #include <sys/dmu_tx.h>
37 #include <sys/vdev_impl.h>
38 #include <sys/metaslab.h>
39 #include <sys/uberblock_impl.h>
42 #include <sys/unique.h>
43 #include <sys/dsl_pool.h>
44 #include <sys/dsl_dir.h>
45 #include <sys/dsl_prop.h>
46 #include <sys/fs/zfs.h>
51 * There are four basic locks for managing spa_t structures:
53 * spa_namespace_lock (global mutex)
55 * This lock must be acquired to do any of the following:
57 * - Lookup a spa_t by name
58 * - Add or remove a spa_t from the namespace
59 * - Increase spa_refcount from non-zero
60 * - Check if spa_refcount is zero
62 * - add/remove/attach/detach devices
63 * - Held for the duration of create/destroy/import/export
65 * It does not need to handle recursion. A create or destroy may
66 * reference objects (files or zvols) in other pools, but by
67 * definition they must have an existing reference, and will never need
68 * to lookup a spa_t by name.
70 * spa_refcount (per-spa refcount_t protected by mutex)
72 * This reference count keep track of any active users of the spa_t. The
73 * spa_t cannot be destroyed or freed while this is non-zero. Internally,
74 * the refcount is never really 'zero' - opening a pool implicitly keeps
75 * some references in the DMU. Internally we check against SPA_MINREF, but
76 * present the image of a zero/non-zero value to consumers.
78 * spa_config_lock (per-spa crazy rwlock)
80 * This SPA special is a recursive rwlock, capable of being acquired from
81 * asynchronous threads. It has protects the spa_t from config changes,
82 * and must be held in the following circumstances:
84 * - RW_READER to perform I/O to the spa
85 * - RW_WRITER to change the vdev config
87 * spa_config_cache_lock (per-spa mutex)
89 * This mutex prevents the spa_config nvlist from being updated. No
90 * other locks are required to obtain this lock, although implicitly you
91 * must have the namespace lock or non-zero refcount to have any kind
92 * of spa_t pointer at all.
94 * The locking order is fairly straightforward:
96 * spa_namespace_lock -> spa_refcount
98 * The namespace lock must be acquired to increase the refcount from 0
99 * or to check if it is zero.
101 * spa_refcount -> spa_config_lock
103 * There must be at least one valid reference on the spa_t to acquire
106 * spa_namespace_lock -> spa_config_lock
108 * The namespace lock must always be taken before the config lock.
111 * The spa_namespace_lock and spa_config_cache_lock can be acquired directly and
112 * are globally visible.
114 * The namespace is manipulated using the following functions, all which require
115 * the spa_namespace_lock to be held.
117 * spa_lookup() Lookup a spa_t by name.
119 * spa_add() Create a new spa_t in the namespace.
121 * spa_remove() Remove a spa_t from the namespace. This also
122 * frees up any memory associated with the spa_t.
124 * spa_next() Returns the next spa_t in the system, or the
125 * first if NULL is passed.
127 * spa_evict_all() Shutdown and remove all spa_t structures in
130 * spa_guid_exists() Determine whether a pool/device guid exists.
132 * The spa_refcount is manipulated using the following functions:
134 * spa_open_ref() Adds a reference to the given spa_t. Must be
135 * called with spa_namespace_lock held if the
136 * refcount is currently zero.
138 * spa_close() Remove a reference from the spa_t. This will
139 * not free the spa_t or remove it from the
140 * namespace. No locking is required.
142 * spa_refcount_zero() Returns true if the refcount is currently
143 * zero. Must be called with spa_namespace_lock
146 * The spa_config_lock is manipulated using the following functions:
148 * spa_config_enter() Acquire the config lock as RW_READER or
149 * RW_WRITER. At least one reference on the spa_t
152 * spa_config_exit() Release the config lock.
154 * spa_config_held() Returns true if the config lock is currently
155 * held in the given state.
157 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit().
159 * spa_vdev_enter() Acquire the namespace lock and the config lock
162 * spa_vdev_exit() Release the config lock, wait for all I/O
163 * to complete, sync the updated configs to the
164 * cache, and release the namespace lock.
166 * The spa_name() function also requires either the spa_namespace_lock
167 * or the spa_config_lock, as both are needed to do a rename. spa_rename() is
168 * also implemented within this file since is requires manipulation of the
172 static avl_tree_t spa_namespace_avl;
173 kmutex_t spa_namespace_lock;
174 static kcondvar_t spa_namespace_cv;
175 static int spa_active_count;
176 int spa_max_replication_override = SPA_DVAS_PER_BP;
178 static kmutex_t spa_spare_lock;
179 static avl_tree_t spa_spare_avl;
181 kmem_cache_t *spa_buffer_pool;
191 * zfs_recover can be set to nonzero to attempt to recover from
192 * otherwise-fatal errors, typically caused by on-disk corruption. When
193 * set, calls to zfs_panic_recover() will turn into warning messages.
196 SYSCTL_DECL(_vfs_zfs);
197 TUNABLE_INT("vfs.zfs.recover", &zfs_recover);
198 SYSCTL_INT(_vfs_zfs, OID_AUTO, recover, CTLFLAG_RDTUN, &zfs_recover, 0,
199 "Try to recover from otherwise-fatal errors.");
201 #define SPA_MINREF 5 /* spa_refcnt for an open-but-idle pool */
204 * ==========================================================================
205 * SPA namespace functions
206 * ==========================================================================
210 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held.
211 * Returns NULL if no matching spa_t is found.
214 spa_lookup(const char *name)
219 ASSERT(MUTEX_HELD(&spa_namespace_lock));
221 search.spa_name = (char *)name;
222 spa = avl_find(&spa_namespace_avl, &search, &where);
228 * Create an uninitialized spa_t with the given name. Requires
229 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already
230 * exist by calling spa_lookup() first.
233 spa_add(const char *name, const char *altroot)
237 ASSERT(MUTEX_HELD(&spa_namespace_lock));
239 spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
241 spa->spa_name = spa_strdup(name);
242 spa->spa_state = POOL_STATE_UNINITIALIZED;
243 spa->spa_freeze_txg = UINT64_MAX;
244 spa->spa_final_txg = UINT64_MAX;
246 mutex_init(&spa->spa_config_cache_lock, NULL, MUTEX_DEFAULT, NULL);
247 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
248 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL);
250 cv_init(&spa->spa_scrub_cv, NULL, CV_DEFAULT, NULL);
251 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
252 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
254 refcount_create(&spa->spa_refcount);
255 refcount_create(&spa->spa_config_lock.scl_count);
257 avl_add(&spa_namespace_avl, spa);
260 * Set the alternate root, if there is one.
263 spa->spa_root = spa_strdup(altroot);
271 * Removes a spa_t from the namespace, freeing up any memory used. Requires
272 * spa_namespace_lock. This is called only after the spa_t has been closed and
276 spa_remove(spa_t *spa)
278 ASSERT(MUTEX_HELD(&spa_namespace_lock));
279 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
280 ASSERT(spa->spa_scrub_thread == NULL);
282 avl_remove(&spa_namespace_avl, spa);
283 cv_broadcast(&spa_namespace_cv);
286 spa_strfree(spa->spa_root);
291 spa_strfree(spa->spa_name);
293 spa_config_set(spa, NULL);
295 refcount_destroy(&spa->spa_refcount);
296 refcount_destroy(&spa->spa_config_lock.scl_count);
298 cv_destroy(&spa->spa_async_cv);
299 cv_destroy(&spa->spa_scrub_io_cv);
300 cv_destroy(&spa->spa_scrub_cv);
302 mutex_destroy(&spa->spa_scrub_lock);
303 mutex_destroy(&spa->spa_async_lock);
304 mutex_destroy(&spa->spa_config_cache_lock);
306 kmem_free(spa, sizeof (spa_t));
310 * Given a pool, return the next pool in the namespace, or NULL if there is
311 * none. If 'prev' is NULL, return the first pool.
314 spa_next(spa_t *prev)
316 ASSERT(MUTEX_HELD(&spa_namespace_lock));
319 return (AVL_NEXT(&spa_namespace_avl, prev));
321 return (avl_first(&spa_namespace_avl));
325 * ==========================================================================
326 * SPA refcount functions
327 * ==========================================================================
331 * Add a reference to the given spa_t. Must have at least one reference, or
332 * have the namespace lock held.
335 spa_open_ref(spa_t *spa, void *tag)
337 ASSERT(refcount_count(&spa->spa_refcount) > SPA_MINREF ||
338 MUTEX_HELD(&spa_namespace_lock));
340 (void) refcount_add(&spa->spa_refcount, tag);
344 * Remove a reference to the given spa_t. Must have at least one reference, or
345 * have the namespace lock held.
348 spa_close(spa_t *spa, void *tag)
350 ASSERT(refcount_count(&spa->spa_refcount) > SPA_MINREF ||
351 MUTEX_HELD(&spa_namespace_lock));
353 (void) refcount_remove(&spa->spa_refcount, tag);
357 * Check to see if the spa refcount is zero. Must be called with
358 * spa_namespace_lock held. We really compare against SPA_MINREF, which is the
359 * number of references acquired when opening a pool
362 spa_refcount_zero(spa_t *spa)
364 ASSERT(MUTEX_HELD(&spa_namespace_lock));
366 return (refcount_count(&spa->spa_refcount) == SPA_MINREF);
370 * ==========================================================================
372 * ==========================================================================
376 * Spares are tracked globally due to the following constraints:
378 * - A spare may be part of multiple pools.
379 * - A spare may be added to a pool even if it's actively in use within
381 * - A spare in use in any pool can only be the source of a replacement if
382 * the target is a spare in the same pool.
384 * We keep track of all spares on the system through the use of a reference
385 * counted AVL tree. When a vdev is added as a spare, or used as a replacement
386 * spare, then we bump the reference count in the AVL tree. In addition, we set
387 * the 'vdev_isspare' member to indicate that the device is a spare (active or
388 * inactive). When a spare is made active (used to replace a device in the
389 * pool), we also keep track of which pool its been made a part of.
391 * The 'spa_spare_lock' protects the AVL tree. These functions are normally
392 * called under the spa_namespace lock as part of vdev reconfiguration. The
393 * separate spare lock exists for the status query path, which does not need to
394 * be completely consistent with respect to other vdev configuration changes.
397 typedef struct spa_spare {
400 avl_node_t spare_avl;
405 spa_spare_compare(const void *a, const void *b)
407 const spa_spare_t *sa = a;
408 const spa_spare_t *sb = b;
410 if (sa->spare_guid < sb->spare_guid)
412 else if (sa->spare_guid > sb->spare_guid)
419 spa_spare_add(vdev_t *vd)
425 mutex_enter(&spa_spare_lock);
426 ASSERT(!vd->vdev_isspare);
428 search.spare_guid = vd->vdev_guid;
429 if ((spare = avl_find(&spa_spare_avl, &search, &where)) != NULL) {
430 spare->spare_count++;
432 spare = kmem_zalloc(sizeof (spa_spare_t), KM_SLEEP);
433 spare->spare_guid = vd->vdev_guid;
434 spare->spare_count = 1;
435 avl_insert(&spa_spare_avl, spare, where);
437 vd->vdev_isspare = B_TRUE;
439 mutex_exit(&spa_spare_lock);
443 spa_spare_remove(vdev_t *vd)
449 mutex_enter(&spa_spare_lock);
451 search.spare_guid = vd->vdev_guid;
452 spare = avl_find(&spa_spare_avl, &search, &where);
454 ASSERT(vd->vdev_isspare);
455 ASSERT(spare != NULL);
457 if (--spare->spare_count == 0) {
458 avl_remove(&spa_spare_avl, spare);
459 kmem_free(spare, sizeof (spa_spare_t));
460 } else if (spare->spare_pool == spa_guid(vd->vdev_spa)) {
461 spare->spare_pool = 0ULL;
464 vd->vdev_isspare = B_FALSE;
465 mutex_exit(&spa_spare_lock);
469 spa_spare_exists(uint64_t guid, uint64_t *pool)
471 spa_spare_t search, *found;
474 mutex_enter(&spa_spare_lock);
476 search.spare_guid = guid;
477 found = avl_find(&spa_spare_avl, &search, &where);
481 *pool = found->spare_pool;
486 mutex_exit(&spa_spare_lock);
488 return (found != NULL);
492 spa_spare_activate(vdev_t *vd)
494 spa_spare_t search, *found;
497 mutex_enter(&spa_spare_lock);
498 ASSERT(vd->vdev_isspare);
500 search.spare_guid = vd->vdev_guid;
501 found = avl_find(&spa_spare_avl, &search, &where);
502 ASSERT(found != NULL);
503 ASSERT(found->spare_pool == 0ULL);
505 found->spare_pool = spa_guid(vd->vdev_spa);
506 mutex_exit(&spa_spare_lock);
510 * ==========================================================================
512 * ==========================================================================
516 * Acquire the config lock. The config lock is a special rwlock that allows for
517 * recursive enters. Because these enters come from the same thread as well as
518 * asynchronous threads working on behalf of the owner, we must unilaterally
519 * allow all reads access as long at least one reader is held (even if a write
520 * is requested). This has the side effect of write starvation, but write locks
521 * are extremely rare, and a solution to this problem would be significantly
522 * more complex (if even possible).
524 * We would like to assert that the namespace lock isn't held, but this is a
525 * valid use during create.
528 spa_config_enter(spa_t *spa, krw_t rw, void *tag)
530 spa_config_lock_t *scl = &spa->spa_config_lock;
532 mutex_enter(&scl->scl_lock);
534 if (scl->scl_writer != curthread) {
535 if (rw == RW_READER) {
536 while (scl->scl_writer != NULL)
537 cv_wait(&scl->scl_cv, &scl->scl_lock);
539 while (scl->scl_writer != NULL ||
540 !refcount_is_zero(&scl->scl_count))
541 cv_wait(&scl->scl_cv, &scl->scl_lock);
542 scl->scl_writer = curthread;
546 (void) refcount_add(&scl->scl_count, tag);
548 mutex_exit(&scl->scl_lock);
552 * Release the spa config lock, notifying any waiters in the process.
555 spa_config_exit(spa_t *spa, void *tag)
557 spa_config_lock_t *scl = &spa->spa_config_lock;
559 mutex_enter(&scl->scl_lock);
561 ASSERT(!refcount_is_zero(&scl->scl_count));
562 if (refcount_remove(&scl->scl_count, tag) == 0) {
563 cv_broadcast(&scl->scl_cv);
564 scl->scl_writer = NULL; /* OK in either case */
567 mutex_exit(&scl->scl_lock);
571 * Returns true if the config lock is held in the given manner.
574 spa_config_held(spa_t *spa, krw_t rw)
576 spa_config_lock_t *scl = &spa->spa_config_lock;
579 mutex_enter(&scl->scl_lock);
581 held = (scl->scl_writer == curthread);
583 held = !refcount_is_zero(&scl->scl_count);
584 mutex_exit(&scl->scl_lock);
590 * ==========================================================================
592 * ==========================================================================
596 * Lock the given spa_t for the purpose of adding or removing a vdev.
597 * Grabs the global spa_namespace_lock plus the spa config lock for writing.
598 * It returns the next transaction group for the spa_t.
601 spa_vdev_enter(spa_t *spa)
604 * Suspend scrub activity while we mess with the config.
606 spa_scrub_suspend(spa);
608 mutex_enter(&spa_namespace_lock);
610 spa_config_enter(spa, RW_WRITER, spa);
612 return (spa_last_synced_txg(spa) + 1);
616 * Unlock the spa_t after adding or removing a vdev. Besides undoing the
617 * locking of spa_vdev_enter(), we also want make sure the transactions have
618 * synced to disk, and then update the global configuration cache with the new
622 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
624 int config_changed = B_FALSE;
626 ASSERT(txg > spa_last_synced_txg(spa));
631 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE);
634 * If the config changed, notify the scrub thread that it must restart.
636 if (error == 0 && !list_is_empty(&spa->spa_dirty_list)) {
637 config_changed = B_TRUE;
638 spa_scrub_restart(spa, txg);
641 spa_config_exit(spa, spa);
644 * Allow scrubbing to resume.
646 spa_scrub_resume(spa);
649 * Note: this txg_wait_synced() is important because it ensures
650 * that there won't be more than one config change per txg.
651 * This allows us to use the txg as the generation number.
654 txg_wait_synced(spa->spa_dsl_pool, txg);
657 ASSERT(!vd->vdev_detached || vd->vdev_dtl.smo_object == 0);
662 * If the config changed, update the config cache.
667 mutex_exit(&spa_namespace_lock);
673 * ==========================================================================
674 * Miscellaneous functions
675 * ==========================================================================
682 spa_rename(const char *name, const char *newname)
688 * Lookup the spa_t and grab the config lock for writing. We need to
689 * actually open the pool so that we can sync out the necessary labels.
690 * It's OK to call spa_open() with the namespace lock held because we
691 * allow recursive calls for other reasons.
693 mutex_enter(&spa_namespace_lock);
694 if ((err = spa_open(name, &spa, FTAG)) != 0) {
695 mutex_exit(&spa_namespace_lock);
699 spa_config_enter(spa, RW_WRITER, FTAG);
701 avl_remove(&spa_namespace_avl, spa);
702 spa_strfree(spa->spa_name);
703 spa->spa_name = spa_strdup(newname);
704 avl_add(&spa_namespace_avl, spa);
707 * Sync all labels to disk with the new names by marking the root vdev
708 * dirty and waiting for it to sync. It will pick up the new pool name
711 vdev_config_dirty(spa->spa_root_vdev);
713 spa_config_exit(spa, FTAG);
715 txg_wait_synced(spa->spa_dsl_pool, 0);
718 * Sync the updated config cache.
722 spa_close(spa, FTAG);
724 mutex_exit(&spa_namespace_lock);
731 * Determine whether a pool with given pool_guid exists. If device_guid is
732 * non-zero, determine whether the pool exists *and* contains a device with the
733 * specified device_guid.
736 spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
739 avl_tree_t *t = &spa_namespace_avl;
741 ASSERT(MUTEX_HELD(&spa_namespace_lock));
743 for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
744 if (spa->spa_state == POOL_STATE_UNINITIALIZED)
746 if (spa->spa_root_vdev == NULL)
748 if (spa_guid(spa) == pool_guid) {
749 if (device_guid == 0)
752 if (vdev_lookup_by_guid(spa->spa_root_vdev,
753 device_guid) != NULL)
757 * Check any devices we may in the process of adding.
759 if (spa->spa_pending_vdev) {
760 if (vdev_lookup_by_guid(spa->spa_pending_vdev,
761 device_guid) != NULL)
767 return (spa != NULL);
771 spa_strdup(const char *s)
777 new = kmem_alloc(len + 1, KM_SLEEP);
787 kmem_free(s, strlen(s) + 1);
791 spa_get_random(uint64_t range)
797 (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t));
803 sprintf_blkptr(char *buf, int len, const blkptr_t *bp)
808 (void) snprintf(buf, len, "<NULL>");
812 if (BP_IS_HOLE(bp)) {
813 (void) snprintf(buf, len, "<hole>");
817 (void) snprintf(buf, len, "[L%llu %s] %llxL/%llxP ",
818 (u_longlong_t)BP_GET_LEVEL(bp),
819 dmu_ot[BP_GET_TYPE(bp)].ot_name,
820 (u_longlong_t)BP_GET_LSIZE(bp),
821 (u_longlong_t)BP_GET_PSIZE(bp));
823 for (d = 0; d < BP_GET_NDVAS(bp); d++) {
824 const dva_t *dva = &bp->blk_dva[d];
825 (void) snprintf(buf + strlen(buf), len - strlen(buf),
826 "DVA[%d]=<%llu:%llx:%llx> ", d,
827 (u_longlong_t)DVA_GET_VDEV(dva),
828 (u_longlong_t)DVA_GET_OFFSET(dva),
829 (u_longlong_t)DVA_GET_ASIZE(dva));
832 (void) snprintf(buf + strlen(buf), len - strlen(buf),
833 "%s %s %s %s birth=%llu fill=%llu cksum=%llx:%llx:%llx:%llx",
834 zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name,
835 zio_compress_table[BP_GET_COMPRESS(bp)].ci_name,
836 BP_GET_BYTEORDER(bp) == 0 ? "BE" : "LE",
837 BP_IS_GANG(bp) ? "gang" : "contiguous",
838 (u_longlong_t)bp->blk_birth,
839 (u_longlong_t)bp->blk_fill,
840 (u_longlong_t)bp->blk_cksum.zc_word[0],
841 (u_longlong_t)bp->blk_cksum.zc_word[1],
842 (u_longlong_t)bp->blk_cksum.zc_word[2],
843 (u_longlong_t)bp->blk_cksum.zc_word[3]);
847 spa_freeze(spa_t *spa)
849 uint64_t freeze_txg = 0;
851 spa_config_enter(spa, RW_WRITER, FTAG);
852 if (spa->spa_freeze_txg == UINT64_MAX) {
853 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE;
854 spa->spa_freeze_txg = freeze_txg;
856 spa_config_exit(spa, FTAG);
858 txg_wait_synced(spa_get_dsl(spa), freeze_txg);
862 zfs_panic_recover(const char *fmt, ...)
867 vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx);
872 * ==========================================================================
874 * ==========================================================================
878 spa_traverse_rwlock(spa_t *spa)
880 return (&spa->spa_traverse_lock);
884 spa_traverse_wanted(spa_t *spa)
886 return (spa->spa_traverse_wanted);
890 spa_get_dsl(spa_t *spa)
892 return (spa->spa_dsl_pool);
896 spa_get_rootblkptr(spa_t *spa)
898 return (&spa->spa_ubsync.ub_rootbp);
902 spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp)
904 spa->spa_uberblock.ub_rootbp = *bp;
908 spa_altroot(spa_t *spa, char *buf, size_t buflen)
910 if (spa->spa_root == NULL)
913 (void) strncpy(buf, spa->spa_root, buflen);
917 spa_sync_pass(spa_t *spa)
919 return (spa->spa_sync_pass);
926 * Accessing the name requires holding either the namespace lock or the
927 * config lock, both of which are required to do a rename.
929 ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
930 spa_config_held(spa, RW_READER) || spa_config_held(spa, RW_WRITER));
932 return (spa->spa_name);
939 * If we fail to parse the config during spa_load(), we can go through
940 * the error path (which posts an ereport) and end up here with no root
941 * vdev. We stash the original pool guid in 'spa_load_guid' to handle
944 if (spa->spa_root_vdev != NULL)
945 return (spa->spa_root_vdev->vdev_guid);
947 return (spa->spa_load_guid);
951 spa_last_synced_txg(spa_t *spa)
953 return (spa->spa_ubsync.ub_txg);
957 spa_first_txg(spa_t *spa)
959 return (spa->spa_first_txg);
963 spa_state(spa_t *spa)
965 return (spa->spa_state);
969 spa_freeze_txg(spa_t *spa)
971 return (spa->spa_freeze_txg);
975 * In the future, this may select among different metaslab classes
976 * depending on the zdp. For now, there's no such distinction.
979 spa_metaslab_class_select(spa_t *spa)
981 return (spa->spa_normal_class);
985 * Return how much space is allocated in the pool (ie. sum of all asize)
988 spa_get_alloc(spa_t *spa)
990 return (spa->spa_root_vdev->vdev_stat.vs_alloc);
994 * Return how much (raid-z inflated) space there is in the pool.
997 spa_get_space(spa_t *spa)
999 return (spa->spa_root_vdev->vdev_stat.vs_space);
1003 * Return the amount of raid-z-deflated space in the pool.
1006 spa_get_dspace(spa_t *spa)
1008 if (spa->spa_deflate)
1009 return (spa->spa_root_vdev->vdev_stat.vs_dspace);
1011 return (spa->spa_root_vdev->vdev_stat.vs_space);
1016 spa_get_asize(spa_t *spa, uint64_t lsize)
1019 * For now, the worst case is 512-byte RAID-Z blocks, in which
1020 * case the space requirement is exactly 2x; so just assume that.
1021 * Add to this the fact that we can have up to 3 DVAs per bp, and
1022 * we have to multiply by a total of 6x.
1028 spa_version(spa_t *spa)
1030 return (spa->spa_ubsync.ub_version);
1034 spa_max_replication(spa_t *spa)
1037 * As of ZFS_VERSION == ZFS_VERSION_DITTO_BLOCKS, we are able to
1038 * handle BPs with more than one DVA allocated. Set our max
1039 * replication level accordingly.
1041 if (spa_version(spa) < ZFS_VERSION_DITTO_BLOCKS)
1043 return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override));
1047 bp_get_dasize(spa_t *spa, const blkptr_t *bp)
1051 if (!spa->spa_deflate)
1052 return (BP_GET_ASIZE(bp));
1054 for (i = 0; i < SPA_DVAS_PER_BP; i++) {
1056 vdev_lookup_top(spa, DVA_GET_VDEV(&bp->blk_dva[i]));
1057 sz += (DVA_GET_ASIZE(&bp->blk_dva[i]) >> SPA_MINBLOCKSHIFT) *
1058 vd->vdev_deflate_ratio;
1064 * ==========================================================================
1065 * Initialization and Termination
1066 * ==========================================================================
1070 spa_name_compare(const void *a1, const void *a2)
1072 const spa_t *s1 = a1;
1073 const spa_t *s2 = a2;
1076 s = strcmp(s1->spa_name, s2->spa_name);
1087 return (spa_active_count);
1093 mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL);
1094 cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL);
1096 avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t),
1097 offsetof(spa_t, spa_avl));
1099 mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL);
1101 avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_spare_t),
1102 offsetof(spa_spare_t, spare_avl));
1124 avl_destroy(&spa_namespace_avl);
1125 avl_destroy(&spa_spare_avl);
1127 cv_destroy(&spa_namespace_cv);
1128 mutex_destroy(&spa_namespace_lock);
1129 mutex_destroy(&spa_spare_lock);