]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/vdev_impl.h
MFC r334844, r336180, r336458
[FreeBSD/FreeBSD.git] / sys / cddl / contrib / opensolaris / uts / common / fs / zfs / sys / vdev_impl.h
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
24  */
25
26 #ifndef _SYS_VDEV_IMPL_H
27 #define _SYS_VDEV_IMPL_H
28
29 #include <sys/avl.h>
30 #include <sys/bpobj.h>
31 #include <sys/dmu.h>
32 #include <sys/metaslab.h>
33 #include <sys/nvpair.h>
34 #include <sys/space_map.h>
35 #include <sys/vdev.h>
36 #include <sys/dkio.h>
37 #include <sys/uberblock_impl.h>
38 #include <sys/vdev_indirect_mapping.h>
39 #include <sys/vdev_indirect_births.h>
40 #include <sys/vdev_removal.h>
41
42 #ifdef  __cplusplus
43 extern "C" {
44 #endif
45
46 /*
47  * Virtual device descriptors.
48  *
49  * All storage pool operations go through the virtual device framework,
50  * which provides data replication and I/O scheduling.
51  */
52
53 /*
54  * Forward declarations that lots of things need.
55  */
56 typedef struct vdev_queue vdev_queue_t;
57 typedef struct vdev_cache vdev_cache_t;
58 typedef struct vdev_cache_entry vdev_cache_entry_t;
59 struct abd;
60
61 extern int zfs_vdev_queue_depth_pct;
62 extern uint32_t zfs_vdev_async_write_max_active;
63
64 /*
65  * Virtual device operations
66  */
67 typedef int     vdev_open_func_t(vdev_t *vd, uint64_t *size, uint64_t *max_size,
68     uint64_t *logical_ashift, uint64_t *physical_ashift);
69 typedef void    vdev_close_func_t(vdev_t *vd);
70 typedef uint64_t vdev_asize_func_t(vdev_t *vd, uint64_t psize);
71 typedef void    vdev_io_start_func_t(zio_t *zio);
72 typedef void    vdev_io_done_func_t(zio_t *zio);
73 typedef void    vdev_state_change_func_t(vdev_t *vd, int, int);
74 typedef boolean_t vdev_need_resilver_func_t(vdev_t *vd, uint64_t, size_t);
75 typedef void    vdev_hold_func_t(vdev_t *vd);
76 typedef void    vdev_rele_func_t(vdev_t *vd);
77
78 typedef void    vdev_remap_cb_t(uint64_t inner_offset, vdev_t *vd,
79     uint64_t offset, uint64_t size, void *arg);
80 typedef void    vdev_remap_func_t(vdev_t *vd, uint64_t offset, uint64_t size,
81     vdev_remap_cb_t callback, void *arg);
82
83 typedef struct vdev_ops {
84         vdev_open_func_t                *vdev_op_open;
85         vdev_close_func_t               *vdev_op_close;
86         vdev_asize_func_t               *vdev_op_asize;
87         vdev_io_start_func_t            *vdev_op_io_start;
88         vdev_io_done_func_t             *vdev_op_io_done;
89         vdev_state_change_func_t        *vdev_op_state_change;
90         vdev_need_resilver_func_t       *vdev_op_need_resilver;
91         vdev_hold_func_t                *vdev_op_hold;
92         vdev_rele_func_t                *vdev_op_rele;
93         vdev_remap_func_t               *vdev_op_remap;
94         char                            vdev_op_type[16];
95         boolean_t                       vdev_op_leaf;
96 } vdev_ops_t;
97
98 /*
99  * Virtual device properties
100  */
101 struct vdev_cache_entry {
102         struct abd      *ve_abd;
103         uint64_t        ve_offset;
104         uint64_t        ve_lastused;
105         avl_node_t      ve_offset_node;
106         avl_node_t      ve_lastused_node;
107         uint32_t        ve_hits;
108         uint16_t        ve_missed_update;
109         zio_t           *ve_fill_io;
110 };
111
112 struct vdev_cache {
113         avl_tree_t      vc_offset_tree;
114         avl_tree_t      vc_lastused_tree;
115         kmutex_t        vc_lock;
116 };
117
118 typedef struct vdev_queue_class {
119         uint32_t        vqc_active;
120
121         /*
122          * Sorted by offset or timestamp, depending on if the queue is
123          * LBA-ordered vs FIFO.
124          */
125         avl_tree_t      vqc_queued_tree;
126 } vdev_queue_class_t;
127
128 struct vdev_queue {
129         vdev_t          *vq_vdev;
130         vdev_queue_class_t vq_class[ZIO_PRIORITY_NUM_QUEUEABLE];
131         avl_tree_t      vq_active_tree;
132         avl_tree_t      vq_read_offset_tree;
133         avl_tree_t      vq_write_offset_tree;
134         uint64_t        vq_last_offset;
135         hrtime_t        vq_io_complete_ts; /* time last i/o completed */
136         kmutex_t        vq_lock;
137         uint64_t        vq_lastoffset;
138 };
139
140 /*
141  * On-disk indirect vdev state.
142  *
143  * An indirect vdev is described exclusively in the MOS config of a pool.
144  * The config for an indirect vdev includes several fields, which are
145  * accessed in memory by a vdev_indirect_config_t.
146  */
147 typedef struct vdev_indirect_config {
148         /*
149          * Object (in MOS) which contains the indirect mapping. This object
150          * contains an array of vdev_indirect_mapping_entry_phys_t ordered by
151          * vimep_src. The bonus buffer for this object is a
152          * vdev_indirect_mapping_phys_t. This object is allocated when a vdev
153          * removal is initiated.
154          *
155          * Note that this object can be empty if none of the data on the vdev
156          * has been copied yet.
157          */
158         uint64_t        vic_mapping_object;
159
160         /*
161          * Object (in MOS) which contains the birth times for the mapping
162          * entries. This object contains an array of
163          * vdev_indirect_birth_entry_phys_t sorted by vibe_offset. The bonus
164          * buffer for this object is a vdev_indirect_birth_phys_t. This object
165          * is allocated when a vdev removal is initiated.
166          *
167          * Note that this object can be empty if none of the vdev has yet been
168          * copied.
169          */
170         uint64_t        vic_births_object;
171
172         /*
173          * This is the vdev ID which was removed previous to this vdev, or
174          * UINT64_MAX if there are no previously removed vdevs.
175          */
176         uint64_t        vic_prev_indirect_vdev;
177 } vdev_indirect_config_t;
178
179 /*
180  * Virtual device descriptor
181  */
182 struct vdev {
183         /*
184          * Common to all vdev types.
185          */
186         uint64_t        vdev_id;        /* child number in vdev parent  */
187         uint64_t        vdev_guid;      /* unique ID for this vdev      */
188         uint64_t        vdev_guid_sum;  /* self guid + all child guids  */
189         uint64_t        vdev_orig_guid; /* orig. guid prior to remove   */
190         uint64_t        vdev_asize;     /* allocatable device capacity  */
191         uint64_t        vdev_min_asize; /* min acceptable asize         */
192         uint64_t        vdev_max_asize; /* max acceptable asize         */
193         uint64_t        vdev_ashift;    /* block alignment shift        */
194         /*
195          * Logical block alignment shift
196          *
197          * The smallest sized/aligned I/O supported by the device.
198          */
199         uint64_t        vdev_logical_ashift;
200         /*
201          * Physical block alignment shift
202          *
203          * The device supports logical I/Os with vdev_logical_ashift
204          * size/alignment, but optimum performance will be achieved by
205          * aligning/sizing requests to vdev_physical_ashift.  Smaller
206          * requests may be inflated or incur device level read-modify-write
207          * operations.
208          *
209          * May be 0 to indicate no preference (i.e. use vdev_logical_ashift).
210          */
211         uint64_t        vdev_physical_ashift;
212         uint64_t        vdev_state;     /* see VDEV_STATE_* #defines    */
213         uint64_t        vdev_prevstate; /* used when reopening a vdev   */
214         vdev_ops_t      *vdev_ops;      /* vdev operations              */
215         spa_t           *vdev_spa;      /* spa for this vdev            */
216         void            *vdev_tsd;      /* type-specific data           */
217         vnode_t         *vdev_name_vp;  /* vnode for pathname           */
218         vnode_t         *vdev_devid_vp; /* vnode for devid              */
219         vdev_t          *vdev_top;      /* top-level vdev               */
220         vdev_t          *vdev_parent;   /* parent vdev                  */
221         vdev_t          **vdev_child;   /* array of children            */
222         uint64_t        vdev_children;  /* number of children           */
223         vdev_stat_t     vdev_stat;      /* virtual device statistics    */
224         boolean_t       vdev_expanding; /* expand the vdev?             */
225         boolean_t       vdev_reopening; /* reopen in progress?          */
226         int             vdev_open_error; /* error on last open          */
227         kthread_t       *vdev_open_thread; /* thread opening children   */
228         uint64_t        vdev_crtxg;     /* txg when top-level was added */
229
230         /*
231          * Top-level vdev state.
232          */
233         uint64_t        vdev_ms_array;  /* metaslab array object        */
234         uint64_t        vdev_ms_shift;  /* metaslab size shift          */
235         uint64_t        vdev_ms_count;  /* number of metaslabs          */
236         metaslab_group_t *vdev_mg;      /* metaslab group               */
237         metaslab_t      **vdev_ms;      /* metaslab array               */
238         txg_list_t      vdev_ms_list;   /* per-txg dirty metaslab lists */
239         txg_list_t      vdev_dtl_list;  /* per-txg dirty DTL lists      */
240         txg_node_t      vdev_txg_node;  /* per-txg dirty vdev linkage   */
241         boolean_t       vdev_remove_wanted; /* async remove wanted?     */
242         boolean_t       vdev_probe_wanted; /* async probe wanted?       */
243         list_node_t     vdev_config_dirty_node; /* config dirty list    */
244         list_node_t     vdev_state_dirty_node; /* state dirty list      */
245         uint64_t        vdev_deflate_ratio; /* deflation ratio (x512)   */
246         uint64_t        vdev_islog;     /* is an intent log device      */
247         uint64_t        vdev_removing;  /* device is being removed?     */
248         boolean_t       vdev_ishole;    /* is a hole in the namespace   */
249         kmutex_t        vdev_queue_lock; /* protects vdev_queue_depth   */
250         uint64_t        vdev_top_zap;
251
252         /* pool checkpoint related */
253         space_map_t     *vdev_checkpoint_sm;    /* contains reserved blocks */
254
255         /*
256          * Values stored in the config for an indirect or removing vdev.
257          */
258         vdev_indirect_config_t  vdev_indirect_config;
259
260         /*
261          * The vdev_indirect_rwlock protects the vdev_indirect_mapping
262          * pointer from changing on indirect vdevs (when it is condensed).
263          * Note that removing (not yet indirect) vdevs have different
264          * access patterns (the mapping is not accessed from open context,
265          * e.g. from zio_read) and locking strategy (e.g. svr_lock).
266          */
267         krwlock_t vdev_indirect_rwlock;
268         vdev_indirect_mapping_t *vdev_indirect_mapping;
269         vdev_indirect_births_t *vdev_indirect_births;
270
271         /*
272          * In memory data structures used to manage the obsolete sm, for
273          * indirect or removing vdevs.
274          *
275          * The vdev_obsolete_segments is the in-core record of the segments
276          * that are no longer referenced anywhere in the pool (due to
277          * being freed or remapped and not referenced by any snapshots).
278          * During a sync, segments are added to vdev_obsolete_segments
279          * via vdev_indirect_mark_obsolete(); at the end of each sync
280          * pass, this is appended to vdev_obsolete_sm via
281          * vdev_indirect_sync_obsolete().  The vdev_obsolete_lock
282          * protects against concurrent modifications of vdev_obsolete_segments
283          * from multiple zio threads.
284          */
285         kmutex_t        vdev_obsolete_lock;
286         range_tree_t    *vdev_obsolete_segments;
287         space_map_t     *vdev_obsolete_sm;
288
289         /*
290          * The queue depth parameters determine how many async writes are
291          * still pending (i.e. allocated by net yet issued to disk) per
292          * top-level (vdev_async_write_queue_depth) and the maximum allowed
293          * (vdev_max_async_write_queue_depth). These values only apply to
294          * top-level vdevs.
295          */
296         uint64_t        vdev_async_write_queue_depth;
297         uint64_t        vdev_max_async_write_queue_depth;
298
299         /*
300          * Protects the vdev_scan_io_queue field itself as well as the
301          * structure's contents (when present).
302          */
303         kmutex_t                        vdev_scan_io_queue_lock;
304         struct dsl_scan_io_queue        *vdev_scan_io_queue;
305
306         /*
307          * Leaf vdev state.
308          */
309         range_tree_t    *vdev_dtl[DTL_TYPES]; /* dirty time logs        */
310         space_map_t     *vdev_dtl_sm;   /* dirty time log space map     */
311         txg_node_t      vdev_dtl_node;  /* per-txg dirty DTL linkage    */
312         uint64_t        vdev_dtl_object; /* DTL object                  */
313         uint64_t        vdev_psize;     /* physical device capacity     */
314         uint64_t        vdev_wholedisk; /* true if this is a whole disk */
315         uint64_t        vdev_offline;   /* persistent offline state     */
316         uint64_t        vdev_faulted;   /* persistent faulted state     */
317         uint64_t        vdev_degraded;  /* persistent degraded state    */
318         uint64_t        vdev_removed;   /* persistent removed state     */
319         uint64_t        vdev_resilver_txg; /* persistent resilvering state */
320         uint64_t        vdev_nparity;   /* number of parity devices for raidz */
321         char            *vdev_path;     /* vdev path (if any)           */
322         char            *vdev_devid;    /* vdev devid (if any)          */
323         char            *vdev_physpath; /* vdev device path (if any)    */
324         char            *vdev_fru;      /* physical FRU location        */
325         uint64_t        vdev_not_present; /* not present during import  */
326         uint64_t        vdev_unspare;   /* unspare when resilvering done */
327         boolean_t       vdev_nowritecache; /* true if flushwritecache failed */
328         boolean_t       vdev_notrim;    /* true if trim failed */
329         boolean_t       vdev_checkremove; /* temporary online test      */
330         boolean_t       vdev_forcefault; /* force online fault          */
331         boolean_t       vdev_splitting; /* split or repair in progress  */
332         boolean_t       vdev_delayed_close; /* delayed device close?    */
333         boolean_t       vdev_tmpoffline; /* device taken offline temporarily? */
334         boolean_t       vdev_detached;  /* device detached?             */
335         boolean_t       vdev_cant_read; /* vdev is failing all reads    */
336         boolean_t       vdev_cant_write; /* vdev is failing all writes  */
337         boolean_t       vdev_isspare;   /* was a hot spare              */
338         boolean_t       vdev_isl2cache; /* was a l2cache device         */
339         vdev_queue_t    vdev_queue;     /* I/O deadline schedule queue  */
340         vdev_cache_t    vdev_cache;     /* physical block cache         */
341         spa_aux_vdev_t  *vdev_aux;      /* for l2cache and spares vdevs */
342         zio_t           *vdev_probe_zio; /* root of current probe       */
343         vdev_aux_t      vdev_label_aux; /* on-disk aux state            */
344         struct trim_map *vdev_trimmap;  /* map on outstanding trims     */ 
345         uint16_t        vdev_rotation_rate; /* rotational rate of the media */
346 #define VDEV_RATE_UNKNOWN       0
347 #define VDEV_RATE_NON_ROTATING  1
348         uint64_t        vdev_leaf_zap;
349
350         /*
351          * For DTrace to work in userland (libzpool) context, these fields must
352          * remain at the end of the structure.  DTrace will use the kernel's
353          * CTF definition for 'struct vdev', and since the size of a kmutex_t is
354          * larger in userland, the offsets for the rest of the fields would be
355          * incorrect.
356          */
357         kmutex_t        vdev_dtl_lock;  /* vdev_dtl_{map,resilver}      */
358         kmutex_t        vdev_stat_lock; /* vdev_stat                    */
359         kmutex_t        vdev_probe_lock; /* protects vdev_probe_zio     */
360 };
361
362 #define VDEV_RAIDZ_MAXPARITY    3
363
364 #define VDEV_PAD_SIZE           (8 << 10)
365 /* 2 padding areas (vl_pad1 and vl_pad2) to skip */
366 #define VDEV_SKIP_SIZE          VDEV_PAD_SIZE * 2
367 #define VDEV_PHYS_SIZE          (112 << 10)
368 #define VDEV_UBERBLOCK_RING     (128 << 10)
369
370 /* The largest uberblock we support is 8k. */
371 #define MAX_UBERBLOCK_SHIFT (13)
372 #define VDEV_UBERBLOCK_SHIFT(vd)        \
373         MIN(MAX((vd)->vdev_top->vdev_ashift, UBERBLOCK_SHIFT), \
374             MAX_UBERBLOCK_SHIFT)
375 #define VDEV_UBERBLOCK_COUNT(vd)        \
376         (VDEV_UBERBLOCK_RING >> VDEV_UBERBLOCK_SHIFT(vd))
377 #define VDEV_UBERBLOCK_OFFSET(vd, n)    \
378         offsetof(vdev_label_t, vl_uberblock[(n) << VDEV_UBERBLOCK_SHIFT(vd)])
379 #define VDEV_UBERBLOCK_SIZE(vd)         (1ULL << VDEV_UBERBLOCK_SHIFT(vd))
380
381 typedef struct vdev_phys {
382         char            vp_nvlist[VDEV_PHYS_SIZE - sizeof (zio_eck_t)];
383         zio_eck_t       vp_zbt;
384 } vdev_phys_t;
385
386 typedef struct vdev_label {
387         char            vl_pad1[VDEV_PAD_SIZE];                 /*  8K */
388         char            vl_pad2[VDEV_PAD_SIZE];                 /*  8K */
389         vdev_phys_t     vl_vdev_phys;                           /* 112K */
390         char            vl_uberblock[VDEV_UBERBLOCK_RING];      /* 128K */
391 } vdev_label_t;                                                 /* 256K total */
392
393 /*
394  * vdev_dirty() flags
395  */
396 #define VDD_METASLAB    0x01
397 #define VDD_DTL         0x02
398
399 /* Offset of embedded boot loader region on each label */
400 #define VDEV_BOOT_OFFSET        (2 * sizeof (vdev_label_t))
401 /*
402  * Size of embedded boot loader region on each label.
403  * The total size of the first two labels plus the boot area is 4MB.
404  */
405 #define VDEV_BOOT_SIZE          (7ULL << 19)                    /* 3.5M */
406
407 /*
408  * Size of label regions at the start and end of each leaf device.
409  */
410 #define VDEV_LABEL_START_SIZE   (2 * sizeof (vdev_label_t) + VDEV_BOOT_SIZE)
411 #define VDEV_LABEL_END_SIZE     (2 * sizeof (vdev_label_t))
412 #define VDEV_LABELS             4
413 #define VDEV_BEST_LABEL         VDEV_LABELS
414
415 #define VDEV_ALLOC_LOAD         0
416 #define VDEV_ALLOC_ADD          1
417 #define VDEV_ALLOC_SPARE        2
418 #define VDEV_ALLOC_L2CACHE      3
419 #define VDEV_ALLOC_ROOTPOOL     4
420 #define VDEV_ALLOC_SPLIT        5
421 #define VDEV_ALLOC_ATTACH       6
422
423 /*
424  * Allocate or free a vdev
425  */
426 extern vdev_t *vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid,
427     vdev_ops_t *ops);
428 extern int vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *config,
429     vdev_t *parent, uint_t id, int alloctype);
430 extern void vdev_free(vdev_t *vd);
431
432 /*
433  * Add or remove children and parents
434  */
435 extern void vdev_add_child(vdev_t *pvd, vdev_t *cvd);
436 extern void vdev_remove_child(vdev_t *pvd, vdev_t *cvd);
437 extern void vdev_compact_children(vdev_t *pvd);
438 extern vdev_t *vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops);
439 extern void vdev_remove_parent(vdev_t *cvd);
440
441 /*
442  * vdev sync load and sync
443  */
444 extern boolean_t vdev_log_state_valid(vdev_t *vd);
445 extern int vdev_load(vdev_t *vd);
446 extern int vdev_dtl_load(vdev_t *vd);
447 extern void vdev_sync(vdev_t *vd, uint64_t txg);
448 extern void vdev_sync_done(vdev_t *vd, uint64_t txg);
449 extern void vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg);
450 extern void vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg);
451
452 /*
453  * Available vdev types.
454  */
455 extern vdev_ops_t vdev_root_ops;
456 extern vdev_ops_t vdev_mirror_ops;
457 extern vdev_ops_t vdev_replacing_ops;
458 extern vdev_ops_t vdev_raidz_ops;
459 #ifdef _KERNEL
460 extern vdev_ops_t vdev_geom_ops;
461 #else
462 extern vdev_ops_t vdev_disk_ops;
463 #endif
464 extern vdev_ops_t vdev_file_ops;
465 extern vdev_ops_t vdev_missing_ops;
466 extern vdev_ops_t vdev_hole_ops;
467 extern vdev_ops_t vdev_spare_ops;
468 extern vdev_ops_t vdev_indirect_ops;
469
470 /*
471  * Common size functions
472  */
473 extern uint64_t vdev_default_asize(vdev_t *vd, uint64_t psize);
474 extern uint64_t vdev_get_min_asize(vdev_t *vd);
475 extern void vdev_set_min_asize(vdev_t *vd);
476
477 /*
478  * Global variables
479  */
480 extern int vdev_standard_sm_blksz;
481 /* zdb uses this tunable, so it must be declared here to make lint happy. */
482 extern int zfs_vdev_cache_size;
483 extern uint_t zfs_geom_probe_vdev_key;
484
485 /*
486  * Functions from vdev_indirect.c
487  */
488 extern void vdev_indirect_sync_obsolete(vdev_t *vd, dmu_tx_t *tx);
489 extern boolean_t vdev_indirect_should_condense(vdev_t *vd);
490 extern void spa_condense_indirect_start_sync(vdev_t *vd, dmu_tx_t *tx);
491 extern int vdev_obsolete_sm_object(vdev_t *vd);
492 extern boolean_t vdev_obsolete_counts_are_precise(vdev_t *vd);
493
494 #ifdef illumos
495 /*
496  * Other miscellaneous functions
497  */
498 int vdev_checkpoint_sm_object(vdev_t *vd);
499
500 /*
501  * The vdev_buf_t is used to translate between zio_t and buf_t, and back again.
502  */
503 typedef struct vdev_buf {
504         buf_t   vb_buf;         /* buffer that describes the io */
505         zio_t   *vb_io;         /* pointer back to the original zio_t */
506 } vdev_buf_t;
507 #endif
508
509 #ifdef  __cplusplus
510 }
511 #endif
512
513 #endif  /* _SYS_VDEV_IMPL_H */