4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2024 by Delphix. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
26 * Copyright 2013 Saso Kiselkov. All rights reserved.
27 * Copyright (c) 2016 Actifio, Inc. All rights reserved.
28 * Copyright (c) 2017, Intel Corporation.
29 * Copyright (c) 2019 Datto Inc.
32 #ifndef _SYS_SPA_IMPL_H
33 #define _SYS_SPA_IMPL_H
36 #include <sys/spa_checkpoint.h>
37 #include <sys/spa_log_spacemap.h>
39 #include <sys/vdev_rebuild.h>
40 #include <sys/vdev_removal.h>
41 #include <sys/vdev_raidz.h>
42 #include <sys/metaslab.h>
44 #include <sys/dsl_pool.h>
45 #include <sys/uberblock_impl.h>
46 #include <sys/zfs_context.h>
48 #include <sys/zfs_refcount.h>
49 #include <sys/bplist.h>
50 #include <sys/bpobj.h>
51 #include <sys/dsl_crypt.h>
52 #include <sys/zfeature.h>
54 #include <sys/dsl_deadlist.h>
55 #include <zfeature_common.h>
61 typedef struct spa_alloc {
64 } ____cacheline_aligned spa_alloc_t;
66 typedef struct spa_error_entry {
67 zbookmark_phys_t se_bookmark;
70 zbookmark_err_phys_t se_zep; /* not accounted in avl_find */
73 typedef struct spa_history_phys {
74 uint64_t sh_pool_create_len; /* ending offset of zpool create */
75 uint64_t sh_phys_max_off; /* physical EOF */
76 uint64_t sh_bof; /* logical BOF */
77 uint64_t sh_eof; /* logical EOF */
78 uint64_t sh_records_lost; /* num of records overwritten */
82 * All members must be uint64_t, for byteswap purposes.
84 typedef struct spa_removing_phys {
85 uint64_t sr_state; /* dsl_scan_state_t */
88 * The vdev ID that we most recently attempted to remove,
89 * or -1 if no removal has been attempted.
91 uint64_t sr_removing_vdev;
94 * The vdev ID that we most recently successfully removed,
95 * or -1 if no devices have been removed.
97 uint64_t sr_prev_indirect_vdev;
99 uint64_t sr_start_time;
100 uint64_t sr_end_time;
103 * Note that we can not use the space map's or indirect mapping's
104 * accounting as a substitute for these values, because we need to
105 * count frees of not-yet-copied data as though it did the copy.
106 * Otherwise, we could get into a situation where copied > to_copy,
107 * or we complete before copied == to_copy.
109 uint64_t sr_to_copy; /* bytes that need to be copied */
110 uint64_t sr_copied; /* bytes that have been copied or freed */
111 } spa_removing_phys_t;
114 * This struct is stored as an entry in the DMU_POOL_DIRECTORY_OBJECT
115 * (with key DMU_POOL_CONDENSING_INDIRECT). It is present if a condense
116 * of an indirect vdev's mapping object is in progress.
118 typedef struct spa_condensing_indirect_phys {
120 * The vdev ID of the indirect vdev whose indirect mapping is
126 * The vdev's old obsolete spacemap. This spacemap's contents are
127 * being integrated into the new mapping.
129 uint64_t scip_prev_obsolete_sm_object;
132 * The new mapping object that is being created.
134 uint64_t scip_next_mapping_object;
135 } spa_condensing_indirect_phys_t;
137 struct spa_aux_vdev {
138 uint64_t sav_object; /* MOS object for device list */
139 nvlist_t *sav_config; /* cached device config */
140 vdev_t **sav_vdevs; /* devices */
141 int sav_count; /* number devices */
142 boolean_t sav_sync; /* sync the device list */
143 nvlist_t **sav_pending; /* pending device additions */
144 uint_t sav_npending; /* # pending devices */
147 typedef struct spa_config_lock {
149 kthread_t *scl_writer;
150 int scl_write_wanted;
153 } ____cacheline_aligned spa_config_lock_t;
155 typedef struct spa_config_dirent {
156 list_node_t scd_link;
158 } spa_config_dirent_t;
160 typedef enum zio_taskq_type {
162 ZIO_TASKQ_ISSUE_HIGH,
164 ZIO_TASKQ_INTERRUPT_HIGH,
169 * State machine for the zpool-poolname process. The states transitions
170 * are done as follows:
173 * PROC_NONE -> PROC_CREATED spa_activate()
174 * PROC_CREATED -> PROC_ACTIVE spa_thread()
175 * PROC_ACTIVE -> PROC_DEACTIVATE spa_deactivate()
176 * PROC_DEACTIVATE -> PROC_GONE spa_thread()
177 * PROC_GONE -> PROC_NONE spa_deactivate()
179 typedef enum spa_proc_state {
180 SPA_PROC_NONE, /* spa_proc = &p0, no process created */
181 SPA_PROC_CREATED, /* spa_activate() has proc, is waiting */
182 SPA_PROC_ACTIVE, /* taskqs created, spa_proc set */
183 SPA_PROC_DEACTIVATE, /* spa_deactivate() requests process exit */
184 SPA_PROC_GONE /* spa_thread() is exiting, spa_proc = &p0 */
187 typedef struct spa_taskqs {
189 taskq_t **stqs_taskq;
192 /* one for each thread in the spa sync taskq */
193 typedef struct spa_syncthread_info {
194 kthread_t *sti_thread;
195 taskq_t *sti_wr_iss_tq; /* assigned wr_iss taskq */
196 } spa_syncthread_info_t;
198 typedef enum spa_all_vdev_zap_action {
200 AVZ_ACTION_DESTROY, /* Destroy all per-vdev ZAPs and the AVZ. */
201 AVZ_ACTION_REBUILD, /* Populate the new AVZ, see spa_avz_rebuild */
202 AVZ_ACTION_INITIALIZE
205 typedef enum spa_config_source {
206 SPA_CONFIG_SRC_NONE = 0,
207 SPA_CONFIG_SRC_SCAN, /* scan of path (default: /dev/dsk) */
208 SPA_CONFIG_SRC_CACHEFILE, /* any cachefile */
209 SPA_CONFIG_SRC_TRYIMPORT, /* returned from call to tryimport */
210 SPA_CONFIG_SRC_SPLIT, /* new pool in a pool split */
211 SPA_CONFIG_SRC_MOS /* MOS, but not always from right txg */
212 } spa_config_source_t;
216 * Fields protected by spa_namespace_lock.
218 char spa_name[ZFS_MAX_DATASET_NAME_LEN]; /* pool name */
219 char *spa_comment; /* comment */
220 avl_node_t spa_avl; /* node in spa_namespace_avl */
221 nvlist_t *spa_config; /* last synced config */
222 nvlist_t *spa_config_syncing; /* currently syncing config */
223 nvlist_t *spa_config_splitting; /* config for splitting */
224 nvlist_t *spa_load_info; /* info and errors from load */
225 uint64_t spa_config_txg; /* txg of last config change */
226 uint32_t spa_sync_pass; /* iterate-to-convergence */
227 pool_state_t spa_state; /* pool state */
228 int spa_inject_ref; /* injection references */
229 uint8_t spa_sync_on; /* sync threads are running */
230 spa_load_state_t spa_load_state; /* current load operation */
231 boolean_t spa_indirect_vdevs_loaded; /* mappings loaded? */
232 boolean_t spa_trust_config; /* do we trust vdev tree? */
233 boolean_t spa_is_splitting; /* in the middle of a split? */
234 spa_config_source_t spa_config_source; /* where config comes from? */
235 uint64_t spa_import_flags; /* import specific flags */
236 spa_taskqs_t spa_zio_taskq[ZIO_TYPES][ZIO_TASKQ_TYPES];
237 dsl_pool_t *spa_dsl_pool;
238 boolean_t spa_is_initializing; /* true while opening pool */
239 boolean_t spa_is_exporting; /* true while exporting pool */
240 kthread_t *spa_load_thread; /* loading, no namespace lock */
241 metaslab_class_t *spa_normal_class; /* normal data class */
242 metaslab_class_t *spa_log_class; /* intent log data class */
243 metaslab_class_t *spa_embedded_log_class; /* log on normal vdevs */
244 metaslab_class_t *spa_special_class; /* special allocation class */
245 metaslab_class_t *spa_dedup_class; /* dedup allocation class */
246 uint64_t spa_first_txg; /* first txg after spa_open() */
247 uint64_t spa_final_txg; /* txg of export/destroy */
248 uint64_t spa_freeze_txg; /* freeze pool at this txg */
249 uint64_t spa_load_max_txg; /* best initial ub_txg */
250 uint64_t spa_claim_max_txg; /* highest claimed birth txg */
251 inode_timespec_t spa_loaded_ts; /* 1st successful open time */
252 objset_t *spa_meta_objset; /* copy of dp->dp_meta_objset */
253 kmutex_t spa_evicting_os_lock; /* Evicting objset list lock */
254 list_t spa_evicting_os_list; /* Objsets being evicted. */
255 kcondvar_t spa_evicting_os_cv; /* Objset Eviction Completion */
256 txg_list_t spa_vdev_txg_list; /* per-txg dirty vdev list */
257 vdev_t *spa_root_vdev; /* top-level vdev container */
258 uint64_t spa_min_ashift; /* of vdevs in normal class */
259 uint64_t spa_max_ashift; /* of vdevs in normal class */
260 uint64_t spa_min_alloc; /* of vdevs in normal class */
261 uint64_t spa_gcd_alloc; /* of vdevs in normal class */
262 uint64_t spa_config_guid; /* config pool guid */
263 uint64_t spa_load_guid; /* spa_load initialized guid */
264 uint64_t spa_last_synced_guid; /* last synced guid */
265 list_t spa_config_dirty_list; /* vdevs with dirty config */
266 list_t spa_state_dirty_list; /* vdevs with dirty state */
268 * spa_allocs is an array, whose lengths is stored in spa_alloc_count.
269 * There is one tree and one lock for each allocator, to help improve
270 * allocation performance in write-heavy workloads.
272 spa_alloc_t *spa_allocs;
274 int spa_active_allocator; /* selectable allocator */
276 /* per-allocator sync thread taskqs */
277 taskq_t *spa_sync_tq;
278 spa_syncthread_info_t *spa_syncthreads;
280 spa_aux_vdev_t spa_spares; /* hot spares */
281 spa_aux_vdev_t spa_l2cache; /* L2ARC cache devices */
282 boolean_t spa_aux_sync_uber; /* need to sync aux uber */
283 nvlist_t *spa_label_features; /* Features for reading MOS */
284 uint64_t spa_config_object; /* MOS object for pool config */
285 uint64_t spa_config_generation; /* config generation number */
286 uint64_t spa_syncing_txg; /* txg currently syncing */
287 bpobj_t spa_deferred_bpobj; /* deferred-free bplist */
288 bplist_t spa_free_bplist[TXG_SIZE]; /* bplist of stuff to free */
289 zio_cksum_salt_t spa_cksum_salt; /* secret salt for cksum */
290 /* checksum context templates */
291 kmutex_t spa_cksum_tmpls_lock;
292 void *spa_cksum_tmpls[ZIO_CHECKSUM_FUNCTIONS];
293 uberblock_t spa_ubsync; /* last synced uberblock */
294 uberblock_t spa_uberblock; /* current uberblock */
295 boolean_t spa_extreme_rewind; /* rewind past deferred frees */
296 kmutex_t spa_scrub_lock; /* resilver/scrub lock */
297 uint64_t spa_scrub_inflight; /* in-flight scrub bytes */
299 /* in-flight verification bytes */
300 uint64_t spa_load_verify_bytes;
301 kcondvar_t spa_scrub_io_cv; /* scrub I/O completion */
302 uint8_t spa_scrub_active; /* active or suspended? */
303 uint8_t spa_scrub_type; /* type of scrub we're doing */
304 uint8_t spa_scrub_finished; /* indicator to rotate logs */
305 uint8_t spa_scrub_started; /* started since last boot */
306 uint8_t spa_scrub_reopen; /* scrub doing vdev_reopen */
307 uint64_t spa_scan_pass_start; /* start time per pass/reboot */
308 uint64_t spa_scan_pass_scrub_pause; /* scrub pause time */
309 uint64_t spa_scan_pass_scrub_spent_paused; /* total paused */
310 uint64_t spa_scan_pass_exam; /* examined bytes per pass */
311 uint64_t spa_scan_pass_issued; /* issued bytes per pass */
313 /* error scrub pause time in milliseconds */
314 uint64_t spa_scan_pass_errorscrub_pause;
315 /* total error scrub paused time in milliseconds */
316 uint64_t spa_scan_pass_errorscrub_spent_paused;
318 * We are in the middle of a resilver, and another resilver
319 * is needed once this one completes. This is set iff any
320 * vdev_resilver_deferred is set.
322 boolean_t spa_resilver_deferred;
323 kmutex_t spa_async_lock; /* protect async state */
324 kthread_t *spa_async_thread; /* thread doing async task */
325 int spa_async_suspended; /* async tasks suspended */
326 kcondvar_t spa_async_cv; /* wait for thread_exit() */
327 uint16_t spa_async_tasks; /* async task mask */
328 uint64_t spa_missing_tvds; /* unopenable tvds on load */
329 uint64_t spa_missing_tvds_allowed; /* allow loading spa? */
331 uint64_t spa_nonallocating_dspace;
332 spa_removing_phys_t spa_removing_phys;
333 spa_vdev_removal_t *spa_vdev_removal;
335 spa_condensing_indirect_phys_t spa_condensing_indirect_phys;
336 spa_condensing_indirect_t *spa_condensing_indirect;
337 zthr_t *spa_condense_zthr; /* zthr doing condense. */
339 vdev_raidz_expand_t *spa_raidz_expand;
340 zthr_t *spa_raidz_expand_zthr;
342 uint64_t spa_checkpoint_txg; /* the txg of the checkpoint */
343 spa_checkpoint_info_t spa_checkpoint_info; /* checkpoint accounting */
344 zthr_t *spa_checkpoint_discard_zthr;
346 space_map_t *spa_syncing_log_sm; /* current log space map */
347 avl_tree_t spa_sm_logs_by_txg;
348 kmutex_t spa_flushed_ms_lock; /* for metaslabs_by_flushed */
349 avl_tree_t spa_metaslabs_by_flushed;
350 spa_unflushed_stats_t spa_unflushed_stats;
351 list_t spa_log_summary;
352 uint64_t spa_log_flushall_txg;
354 zthr_t *spa_livelist_delete_zthr; /* deleting livelists */
355 zthr_t *spa_livelist_condense_zthr; /* condensing livelists */
356 uint64_t spa_livelists_to_delete; /* set of livelists to free */
357 livelist_condense_entry_t spa_to_condense; /* next to condense */
359 char *spa_root; /* alternate root directory */
360 uint64_t spa_ena; /* spa-wide ereport ENA */
361 int spa_last_open_failed; /* error if last open failed */
362 uint64_t spa_last_ubsync_txg; /* "best" uberblock txg */
363 uint64_t spa_last_ubsync_txg_ts; /* timestamp from that ub */
364 uint64_t spa_load_txg; /* ub txg that loaded */
365 uint64_t spa_load_txg_ts; /* timestamp from that ub */
366 uint64_t spa_load_meta_errors; /* verify metadata err count */
367 uint64_t spa_load_data_errors; /* verify data err count */
368 uint64_t spa_verify_min_txg; /* start txg of verify scrub */
369 kmutex_t spa_errlog_lock; /* error log lock */
370 uint64_t spa_errlog_last; /* last error log object */
371 uint64_t spa_errlog_scrub; /* scrub error log object */
372 kmutex_t spa_errlist_lock; /* error list/ereport lock */
373 avl_tree_t spa_errlist_last; /* last error list */
374 avl_tree_t spa_errlist_scrub; /* scrub error list */
375 avl_tree_t spa_errlist_healed; /* list of healed blocks */
376 uint64_t spa_deflate; /* should we deflate? */
377 uint64_t spa_history; /* history object */
378 kmutex_t spa_history_lock; /* history lock */
379 vdev_t *spa_pending_vdev; /* pending vdev additions */
380 kmutex_t spa_props_lock; /* property lock */
381 uint64_t spa_pool_props_object; /* object for properties */
382 uint64_t spa_bootfs; /* default boot filesystem */
383 uint64_t spa_failmode; /* failure mode for the pool */
384 uint64_t spa_deadman_failmode; /* failure mode for deadman */
385 uint64_t spa_delegation; /* delegation on/off */
386 list_t spa_config_list; /* previous cache file(s) */
387 /* per-CPU array of root of async I/O: */
388 zio_t **spa_async_zio_root;
389 zio_t *spa_suspend_zio_root; /* root of all suspended I/O */
390 zio_t *spa_txg_zio[TXG_SIZE]; /* spa_sync() waits for this */
391 kmutex_t spa_suspend_lock; /* protects suspend_zio_root */
392 kcondvar_t spa_suspend_cv; /* notification of resume */
393 zio_suspend_reason_t spa_suspended; /* pool is suspended */
394 uint8_t spa_claiming; /* pool is doing zil_claim() */
395 boolean_t spa_is_root; /* pool is root */
396 int spa_minref; /* num refs when first opened */
397 spa_mode_t spa_mode; /* SPA_MODE_{READ|WRITE} */
398 boolean_t spa_read_spacemaps; /* spacemaps available if ro */
399 spa_log_state_t spa_log_state; /* log state */
400 uint64_t spa_autoexpand; /* lun expansion on/off */
401 ddt_t *spa_ddt[ZIO_CHECKSUM_FUNCTIONS]; /* in-core DDTs */
402 uint64_t spa_ddt_stat_object; /* DDT statistics */
403 uint64_t spa_dedup_dspace; /* Cache get_dedup_dspace() */
404 uint64_t spa_dedup_checksum; /* default dedup checksum */
405 uint64_t spa_dspace; /* dspace in normal class */
406 struct brt *spa_brt; /* in-core BRT */
407 kmutex_t spa_vdev_top_lock; /* dueling offline/remove */
408 kmutex_t spa_proc_lock; /* protects spa_proc* */
409 kcondvar_t spa_proc_cv; /* spa_proc_state transitions */
410 spa_proc_state_t spa_proc_state; /* see definition */
411 proc_t *spa_proc; /* "zpool-poolname" process */
412 uintptr_t spa_did; /* if procp != p0, did of t1 */
413 boolean_t spa_autoreplace; /* autoreplace set in open */
414 int spa_vdev_locks; /* locks grabbed */
415 uint64_t spa_creation_version; /* version at pool creation */
416 uint64_t spa_prev_software_version; /* See ub_software_version */
417 uint64_t spa_feat_for_write_obj; /* required to write to pool */
418 uint64_t spa_feat_for_read_obj; /* required to read from pool */
419 uint64_t spa_feat_desc_obj; /* Feature descriptions */
420 uint64_t spa_feat_enabled_txg_obj; /* Feature enabled txg */
421 kmutex_t spa_feat_stats_lock; /* protects spa_feat_stats */
422 nvlist_t *spa_feat_stats; /* Cache of enabled features */
423 /* cache feature refcounts */
424 uint64_t spa_feat_refcount_cache[SPA_FEATURES];
425 taskqid_t spa_deadman_tqid; /* Task id */
426 uint64_t spa_deadman_calls; /* number of deadman calls */
427 hrtime_t spa_sync_starttime; /* starting time of spa_sync */
428 uint64_t spa_deadman_synctime; /* deadman sync expiration */
429 uint64_t spa_deadman_ziotime; /* deadman zio expiration */
430 uint64_t spa_all_vdev_zaps; /* ZAP of per-vd ZAP obj #s */
431 spa_avz_action_t spa_avz_action; /* destroy/rebuild AVZ? */
432 uint64_t spa_autotrim; /* automatic background trim? */
433 uint64_t spa_errata; /* errata issues detected */
434 spa_stats_t spa_stats; /* assorted spa statistics */
435 spa_keystore_t spa_keystore; /* loaded crypto keys */
437 /* arc_memory_throttle() parameters during low memory condition */
438 uint64_t spa_lowmem_page_load; /* memory load during txg */
439 uint64_t spa_lowmem_last_txg; /* txg window start */
441 hrtime_t spa_ccw_fail_time; /* Conf cache write fail time */
442 taskq_t *spa_zvol_taskq; /* Taskq for minor management */
443 taskq_t *spa_metaslab_taskq; /* Taskq for metaslab preload */
444 taskq_t *spa_prefetch_taskq; /* Taskq for prefetch threads */
445 taskq_t *spa_upgrade_taskq; /* Taskq for upgrade jobs */
446 uint64_t spa_multihost; /* multihost aware (mmp) */
447 mmp_thread_t spa_mmp; /* multihost mmp thread */
448 list_t spa_leaf_list; /* list of leaf vdevs */
449 uint64_t spa_leaf_list_gen; /* track leaf_list changes */
450 uint32_t spa_hostid; /* cached system hostid */
452 /* synchronization for threads in spa_wait */
453 kmutex_t spa_activities_lock;
454 kcondvar_t spa_activities_cv;
455 kcondvar_t spa_waiters_cv;
456 int spa_waiters; /* number of waiting threads */
457 boolean_t spa_waiters_cancel; /* waiters should return */
459 char *spa_compatibility; /* compatibility file(s) */
462 * spa_refcount & spa_config_lock must be the last elements
463 * because zfs_refcount_t changes size based on compilation options.
464 * In order for the MDB module to function correctly, the other
465 * fields must remain in the same location.
467 spa_config_lock_t spa_config_lock[SCL_LOCKS]; /* config changes */
468 zfs_refcount_t spa_refcount; /* number of opens */
471 extern char *spa_config_path;
472 extern const char *zfs_deadman_failmode;
473 extern uint_t spa_slop_shift;
474 extern void spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
475 task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent, zio_t *zio);
476 extern void spa_taskq_dispatch_sync(spa_t *, zio_type_t t, zio_taskq_type_t q,
477 task_func_t *func, void *arg, uint_t flags);
478 extern void spa_load_spares(spa_t *spa);
479 extern void spa_load_l2cache(spa_t *spa);
480 extern sysevent_t *spa_event_create(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl,
482 extern void spa_event_post(sysevent_t *ev);
483 extern int param_set_deadman_failmode_common(const char *val);
484 extern void spa_set_deadman_synctime(hrtime_t ns);
485 extern void spa_set_deadman_ziotime(hrtime_t ns);
486 extern const char *spa_history_zone(void);
487 extern const char *zfs_active_allocator;
488 extern int param_set_active_allocator_common(const char *val);
494 #endif /* _SYS_SPA_IMPL_H */