]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev.c
MFC r336947: MFV r336946: 9238 ZFS Spacemap Encoding V2
[FreeBSD/FreeBSD.git] / sys / cddl / contrib / opensolaris / uts / common / fs / zfs / vdev.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21
22 /*
23  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
25  * Copyright 2017 Nexenta Systems, Inc.
26  * Copyright 2013 Martin Matuska <mm@FreeBSD.org>. All rights reserved.
27  * Copyright (c) 2014 Integros [integros.com]
28  * Copyright 2016 Toomas Soome <tsoome@me.com>
29  * Copyright 2017 Joyent, Inc.
30  */
31
32 #include <sys/zfs_context.h>
33 #include <sys/fm/fs/zfs.h>
34 #include <sys/spa.h>
35 #include <sys/spa_impl.h>
36 #include <sys/bpobj.h>
37 #include <sys/dmu.h>
38 #include <sys/dmu_tx.h>
39 #include <sys/dsl_dir.h>
40 #include <sys/vdev_impl.h>
41 #include <sys/uberblock_impl.h>
42 #include <sys/metaslab.h>
43 #include <sys/metaslab_impl.h>
44 #include <sys/space_map.h>
45 #include <sys/space_reftree.h>
46 #include <sys/zio.h>
47 #include <sys/zap.h>
48 #include <sys/fs/zfs.h>
49 #include <sys/arc.h>
50 #include <sys/zil.h>
51 #include <sys/dsl_scan.h>
52 #include <sys/abd.h>
53 #include <sys/trim_map.h>
54
55 SYSCTL_DECL(_vfs_zfs);
56 SYSCTL_NODE(_vfs_zfs, OID_AUTO, vdev, CTLFLAG_RW, 0, "ZFS VDEV");
57
58 /*
59  * Virtual device management.
60  */
61
62 /*
63  * The limit for ZFS to automatically increase a top-level vdev's ashift
64  * from logical ashift to physical ashift.
65  *
66  * Example: one or more 512B emulation child vdevs
67  *          child->vdev_ashift = 9 (512 bytes)
68  *          child->vdev_physical_ashift = 12 (4096 bytes)
69  *          zfs_max_auto_ashift = 11 (2048 bytes)
70  *          zfs_min_auto_ashift = 9 (512 bytes)
71  *
72  * On pool creation or the addition of a new top-level vdev, ZFS will
73  * increase the ashift of the top-level vdev to 2048 as limited by
74  * zfs_max_auto_ashift.
75  *
76  * Example: one or more 512B emulation child vdevs
77  *          child->vdev_ashift = 9 (512 bytes)
78  *          child->vdev_physical_ashift = 12 (4096 bytes)
79  *          zfs_max_auto_ashift = 13 (8192 bytes)
80  *          zfs_min_auto_ashift = 9 (512 bytes)
81  *
82  * On pool creation or the addition of a new top-level vdev, ZFS will
83  * increase the ashift of the top-level vdev to 4096 to match the
84  * max vdev_physical_ashift.
85  *
86  * Example: one or more 512B emulation child vdevs
87  *          child->vdev_ashift = 9 (512 bytes)
88  *          child->vdev_physical_ashift = 9 (512 bytes)
89  *          zfs_max_auto_ashift = 13 (8192 bytes)
90  *          zfs_min_auto_ashift = 12 (4096 bytes)
91  *
92  * On pool creation or the addition of a new top-level vdev, ZFS will
93  * increase the ashift of the top-level vdev to 4096 to match the
94  * zfs_min_auto_ashift.
95  */
96 static uint64_t zfs_max_auto_ashift = SPA_MAXASHIFT;
97 static uint64_t zfs_min_auto_ashift = SPA_MINASHIFT;
98
99 static int
100 sysctl_vfs_zfs_max_auto_ashift(SYSCTL_HANDLER_ARGS)
101 {
102         uint64_t val;
103         int err;
104
105         val = zfs_max_auto_ashift;
106         err = sysctl_handle_64(oidp, &val, 0, req);
107         if (err != 0 || req->newptr == NULL)
108                 return (err);
109
110         if (val > SPA_MAXASHIFT || val < zfs_min_auto_ashift)
111                 return (EINVAL);
112
113         zfs_max_auto_ashift = val;
114
115         return (0);
116 }
117 SYSCTL_PROC(_vfs_zfs, OID_AUTO, max_auto_ashift,
118     CTLTYPE_U64 | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, sizeof(uint64_t),
119     sysctl_vfs_zfs_max_auto_ashift, "QU",
120     "Max ashift used when optimising for logical -> physical sectors size on "
121     "new top-level vdevs.");
122
123 static int
124 sysctl_vfs_zfs_min_auto_ashift(SYSCTL_HANDLER_ARGS)
125 {
126         uint64_t val;
127         int err;
128
129         val = zfs_min_auto_ashift;
130         err = sysctl_handle_64(oidp, &val, 0, req);
131         if (err != 0 || req->newptr == NULL)
132                 return (err);
133
134         if (val < SPA_MINASHIFT || val > zfs_max_auto_ashift)
135                 return (EINVAL);
136
137         zfs_min_auto_ashift = val;
138
139         return (0);
140 }
141 SYSCTL_PROC(_vfs_zfs, OID_AUTO, min_auto_ashift,
142     CTLTYPE_U64 | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, sizeof(uint64_t),
143     sysctl_vfs_zfs_min_auto_ashift, "QU",
144     "Min ashift used when creating new top-level vdevs.");
145
146 static vdev_ops_t *vdev_ops_table[] = {
147         &vdev_root_ops,
148         &vdev_raidz_ops,
149         &vdev_mirror_ops,
150         &vdev_replacing_ops,
151         &vdev_spare_ops,
152 #ifdef _KERNEL
153         &vdev_geom_ops,
154 #else
155         &vdev_disk_ops,
156 #endif
157         &vdev_file_ops,
158         &vdev_missing_ops,
159         &vdev_hole_ops,
160         &vdev_indirect_ops,
161         NULL
162 };
163
164
165 /* maximum number of metaslabs per top-level vdev */
166 int vdev_max_ms_count = 200;
167 SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, max_ms_count, CTLFLAG_RDTUN,
168     &vdev_max_ms_count, 0,
169     "Maximum number of metaslabs per top-level vdev");
170
171 /* minimum amount of metaslabs per top-level vdev */
172 int vdev_min_ms_count = 16;
173 SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, min_ms_count, CTLFLAG_RDTUN,
174     &vdev_min_ms_count, 0,
175     "Minimum number of metaslabs per top-level vdev");
176
177 /* see comment in vdev_metaslab_set_size() */
178 int vdev_default_ms_shift = 29;
179 SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, default_ms_shift, CTLFLAG_RDTUN,
180     &vdev_default_ms_shift, 0,
181     "Shift between vdev size and number of metaslabs");
182
183 boolean_t vdev_validate_skip = B_FALSE;
184
185 /*
186  * Since the DTL space map of a vdev is not expected to have a lot of
187  * entries, we default its block size to 4K.
188  */
189 int vdev_dtl_sm_blksz = (1 << 12);
190 SYSCTL_INT(_vfs_zfs, OID_AUTO, dtl_sm_blksz, CTLFLAG_RDTUN,
191     &vdev_dtl_sm_blksz, 0,
192     "Block size for DTL space map.  Power of 2 and greater than 4096.");
193
194 /*
195  * vdev-wide space maps that have lots of entries written to them at
196  * the end of each transaction can benefit from a higher I/O bandwidth
197  * (e.g. vdev_obsolete_sm), thus we default their block size to 128K.
198  */
199 int vdev_standard_sm_blksz = (1 << 17);
200 SYSCTL_INT(_vfs_zfs, OID_AUTO, standard_sm_blksz, CTLFLAG_RDTUN,
201     &vdev_standard_sm_blksz, 0,
202     "Block size for standard space map.  Power of 2 and greater than 4096.");
203
204 /*PRINTFLIKE2*/
205 void
206 vdev_dbgmsg(vdev_t *vd, const char *fmt, ...)
207 {
208         va_list adx;
209         char buf[256];
210
211         va_start(adx, fmt);
212         (void) vsnprintf(buf, sizeof (buf), fmt, adx);
213         va_end(adx);
214
215         if (vd->vdev_path != NULL) {
216                 zfs_dbgmsg("%s vdev '%s': %s", vd->vdev_ops->vdev_op_type,
217                     vd->vdev_path, buf);
218         } else {
219                 zfs_dbgmsg("%s-%llu vdev (guid %llu): %s",
220                     vd->vdev_ops->vdev_op_type,
221                     (u_longlong_t)vd->vdev_id,
222                     (u_longlong_t)vd->vdev_guid, buf);
223         }
224 }
225
226 void
227 vdev_dbgmsg_print_tree(vdev_t *vd, int indent)
228 {
229         char state[20];
230
231         if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) {
232                 zfs_dbgmsg("%*svdev %u: %s", indent, "", vd->vdev_id,
233                     vd->vdev_ops->vdev_op_type);
234                 return;
235         }
236
237         switch (vd->vdev_state) {
238         case VDEV_STATE_UNKNOWN:
239                 (void) snprintf(state, sizeof (state), "unknown");
240                 break;
241         case VDEV_STATE_CLOSED:
242                 (void) snprintf(state, sizeof (state), "closed");
243                 break;
244         case VDEV_STATE_OFFLINE:
245                 (void) snprintf(state, sizeof (state), "offline");
246                 break;
247         case VDEV_STATE_REMOVED:
248                 (void) snprintf(state, sizeof (state), "removed");
249                 break;
250         case VDEV_STATE_CANT_OPEN:
251                 (void) snprintf(state, sizeof (state), "can't open");
252                 break;
253         case VDEV_STATE_FAULTED:
254                 (void) snprintf(state, sizeof (state), "faulted");
255                 break;
256         case VDEV_STATE_DEGRADED:
257                 (void) snprintf(state, sizeof (state), "degraded");
258                 break;
259         case VDEV_STATE_HEALTHY:
260                 (void) snprintf(state, sizeof (state), "healthy");
261                 break;
262         default:
263                 (void) snprintf(state, sizeof (state), "<state %u>",
264                     (uint_t)vd->vdev_state);
265         }
266
267         zfs_dbgmsg("%*svdev %u: %s%s, guid: %llu, path: %s, %s", indent,
268             "", (int)vd->vdev_id, vd->vdev_ops->vdev_op_type,
269             vd->vdev_islog ? " (log)" : "",
270             (u_longlong_t)vd->vdev_guid,
271             vd->vdev_path ? vd->vdev_path : "N/A", state);
272
273         for (uint64_t i = 0; i < vd->vdev_children; i++)
274                 vdev_dbgmsg_print_tree(vd->vdev_child[i], indent + 2);
275 }
276
277 /*
278  * Given a vdev type, return the appropriate ops vector.
279  */
280 static vdev_ops_t *
281 vdev_getops(const char *type)
282 {
283         vdev_ops_t *ops, **opspp;
284
285         for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++)
286                 if (strcmp(ops->vdev_op_type, type) == 0)
287                         break;
288
289         return (ops);
290 }
291
292 /*
293  * Default asize function: return the MAX of psize with the asize of
294  * all children.  This is what's used by anything other than RAID-Z.
295  */
296 uint64_t
297 vdev_default_asize(vdev_t *vd, uint64_t psize)
298 {
299         uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift);
300         uint64_t csize;
301
302         for (int c = 0; c < vd->vdev_children; c++) {
303                 csize = vdev_psize_to_asize(vd->vdev_child[c], psize);
304                 asize = MAX(asize, csize);
305         }
306
307         return (asize);
308 }
309
310 /*
311  * Get the minimum allocatable size. We define the allocatable size as
312  * the vdev's asize rounded to the nearest metaslab. This allows us to
313  * replace or attach devices which don't have the same physical size but
314  * can still satisfy the same number of allocations.
315  */
316 uint64_t
317 vdev_get_min_asize(vdev_t *vd)
318 {
319         vdev_t *pvd = vd->vdev_parent;
320
321         /*
322          * If our parent is NULL (inactive spare or cache) or is the root,
323          * just return our own asize.
324          */
325         if (pvd == NULL)
326                 return (vd->vdev_asize);
327
328         /*
329          * The top-level vdev just returns the allocatable size rounded
330          * to the nearest metaslab.
331          */
332         if (vd == vd->vdev_top)
333                 return (P2ALIGN(vd->vdev_asize, 1ULL << vd->vdev_ms_shift));
334
335         /*
336          * The allocatable space for a raidz vdev is N * sizeof(smallest child),
337          * so each child must provide at least 1/Nth of its asize.
338          */
339         if (pvd->vdev_ops == &vdev_raidz_ops)
340                 return ((pvd->vdev_min_asize + pvd->vdev_children - 1) /
341                     pvd->vdev_children);
342
343         return (pvd->vdev_min_asize);
344 }
345
346 void
347 vdev_set_min_asize(vdev_t *vd)
348 {
349         vd->vdev_min_asize = vdev_get_min_asize(vd);
350
351         for (int c = 0; c < vd->vdev_children; c++)
352                 vdev_set_min_asize(vd->vdev_child[c]);
353 }
354
355 vdev_t *
356 vdev_lookup_top(spa_t *spa, uint64_t vdev)
357 {
358         vdev_t *rvd = spa->spa_root_vdev;
359
360         ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
361
362         if (vdev < rvd->vdev_children) {
363                 ASSERT(rvd->vdev_child[vdev] != NULL);
364                 return (rvd->vdev_child[vdev]);
365         }
366
367         return (NULL);
368 }
369
370 vdev_t *
371 vdev_lookup_by_guid(vdev_t *vd, uint64_t guid)
372 {
373         vdev_t *mvd;
374
375         if (vd->vdev_guid == guid)
376                 return (vd);
377
378         for (int c = 0; c < vd->vdev_children; c++)
379                 if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) !=
380                     NULL)
381                         return (mvd);
382
383         return (NULL);
384 }
385
386 static int
387 vdev_count_leaves_impl(vdev_t *vd)
388 {
389         int n = 0;
390
391         if (vd->vdev_ops->vdev_op_leaf)
392                 return (1);
393
394         for (int c = 0; c < vd->vdev_children; c++)
395                 n += vdev_count_leaves_impl(vd->vdev_child[c]);
396
397         return (n);
398 }
399
400 int
401 vdev_count_leaves(spa_t *spa)
402 {
403         return (vdev_count_leaves_impl(spa->spa_root_vdev));
404 }
405
406 void
407 vdev_add_child(vdev_t *pvd, vdev_t *cvd)
408 {
409         size_t oldsize, newsize;
410         uint64_t id = cvd->vdev_id;
411         vdev_t **newchild;
412         spa_t *spa = cvd->vdev_spa;
413
414         ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
415         ASSERT(cvd->vdev_parent == NULL);
416
417         cvd->vdev_parent = pvd;
418
419         if (pvd == NULL)
420                 return;
421
422         ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL);
423
424         oldsize = pvd->vdev_children * sizeof (vdev_t *);
425         pvd->vdev_children = MAX(pvd->vdev_children, id + 1);
426         newsize = pvd->vdev_children * sizeof (vdev_t *);
427
428         newchild = kmem_zalloc(newsize, KM_SLEEP);
429         if (pvd->vdev_child != NULL) {
430                 bcopy(pvd->vdev_child, newchild, oldsize);
431                 kmem_free(pvd->vdev_child, oldsize);
432         }
433
434         pvd->vdev_child = newchild;
435         pvd->vdev_child[id] = cvd;
436
437         cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd);
438         ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL);
439
440         /*
441          * Walk up all ancestors to update guid sum.
442          */
443         for (; pvd != NULL; pvd = pvd->vdev_parent)
444                 pvd->vdev_guid_sum += cvd->vdev_guid_sum;
445 }
446
447 void
448 vdev_remove_child(vdev_t *pvd, vdev_t *cvd)
449 {
450         int c;
451         uint_t id = cvd->vdev_id;
452
453         ASSERT(cvd->vdev_parent == pvd);
454
455         if (pvd == NULL)
456                 return;
457
458         ASSERT(id < pvd->vdev_children);
459         ASSERT(pvd->vdev_child[id] == cvd);
460
461         pvd->vdev_child[id] = NULL;
462         cvd->vdev_parent = NULL;
463
464         for (c = 0; c < pvd->vdev_children; c++)
465                 if (pvd->vdev_child[c])
466                         break;
467
468         if (c == pvd->vdev_children) {
469                 kmem_free(pvd->vdev_child, c * sizeof (vdev_t *));
470                 pvd->vdev_child = NULL;
471                 pvd->vdev_children = 0;
472         }
473
474         /*
475          * Walk up all ancestors to update guid sum.
476          */
477         for (; pvd != NULL; pvd = pvd->vdev_parent)
478                 pvd->vdev_guid_sum -= cvd->vdev_guid_sum;
479 }
480
481 /*
482  * Remove any holes in the child array.
483  */
484 void
485 vdev_compact_children(vdev_t *pvd)
486 {
487         vdev_t **newchild, *cvd;
488         int oldc = pvd->vdev_children;
489         int newc;
490
491         ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
492
493         for (int c = newc = 0; c < oldc; c++)
494                 if (pvd->vdev_child[c])
495                         newc++;
496
497         newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_SLEEP);
498
499         for (int c = newc = 0; c < oldc; c++) {
500                 if ((cvd = pvd->vdev_child[c]) != NULL) {
501                         newchild[newc] = cvd;
502                         cvd->vdev_id = newc++;
503                 }
504         }
505
506         kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *));
507         pvd->vdev_child = newchild;
508         pvd->vdev_children = newc;
509 }
510
511 /*
512  * Allocate and minimally initialize a vdev_t.
513  */
514 vdev_t *
515 vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
516 {
517         vdev_t *vd;
518         vdev_indirect_config_t *vic;
519
520         vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP);
521         vic = &vd->vdev_indirect_config;
522
523         if (spa->spa_root_vdev == NULL) {
524                 ASSERT(ops == &vdev_root_ops);
525                 spa->spa_root_vdev = vd;
526                 spa->spa_load_guid = spa_generate_guid(NULL);
527         }
528
529         if (guid == 0 && ops != &vdev_hole_ops) {
530                 if (spa->spa_root_vdev == vd) {
531                         /*
532                          * The root vdev's guid will also be the pool guid,
533                          * which must be unique among all pools.
534                          */
535                         guid = spa_generate_guid(NULL);
536                 } else {
537                         /*
538                          * Any other vdev's guid must be unique within the pool.
539                          */
540                         guid = spa_generate_guid(spa);
541                 }
542                 ASSERT(!spa_guid_exists(spa_guid(spa), guid));
543         }
544
545         vd->vdev_spa = spa;
546         vd->vdev_id = id;
547         vd->vdev_guid = guid;
548         vd->vdev_guid_sum = guid;
549         vd->vdev_ops = ops;
550         vd->vdev_state = VDEV_STATE_CLOSED;
551         vd->vdev_ishole = (ops == &vdev_hole_ops);
552         vic->vic_prev_indirect_vdev = UINT64_MAX;
553
554         rw_init(&vd->vdev_indirect_rwlock, NULL, RW_DEFAULT, NULL);
555         mutex_init(&vd->vdev_obsolete_lock, NULL, MUTEX_DEFAULT, NULL);
556         vd->vdev_obsolete_segments = range_tree_create(NULL, NULL);
557
558         mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_DEFAULT, NULL);
559         mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL);
560         mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL);
561         mutex_init(&vd->vdev_queue_lock, NULL, MUTEX_DEFAULT, NULL);
562         mutex_init(&vd->vdev_scan_io_queue_lock, NULL, MUTEX_DEFAULT, NULL);
563  
564         for (int t = 0; t < DTL_TYPES; t++) {
565                 vd->vdev_dtl[t] = range_tree_create(NULL, NULL);
566         }
567         txg_list_create(&vd->vdev_ms_list, spa,
568             offsetof(struct metaslab, ms_txg_node));
569         txg_list_create(&vd->vdev_dtl_list, spa,
570             offsetof(struct vdev, vdev_dtl_node));
571         vd->vdev_stat.vs_timestamp = gethrtime();
572         vdev_queue_init(vd);
573         vdev_cache_init(vd);
574
575         return (vd);
576 }
577
578 /*
579  * Allocate a new vdev.  The 'alloctype' is used to control whether we are
580  * creating a new vdev or loading an existing one - the behavior is slightly
581  * different for each case.
582  */
583 int
584 vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
585     int alloctype)
586 {
587         vdev_ops_t *ops;
588         char *type;
589         uint64_t guid = 0, islog, nparity;
590         vdev_t *vd;
591         vdev_indirect_config_t *vic;
592
593         ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
594
595         if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
596                 return (SET_ERROR(EINVAL));
597
598         if ((ops = vdev_getops(type)) == NULL)
599                 return (SET_ERROR(EINVAL));
600
601         /*
602          * If this is a load, get the vdev guid from the nvlist.
603          * Otherwise, vdev_alloc_common() will generate one for us.
604          */
605         if (alloctype == VDEV_ALLOC_LOAD) {
606                 uint64_t label_id;
607
608                 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) ||
609                     label_id != id)
610                         return (SET_ERROR(EINVAL));
611
612                 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
613                         return (SET_ERROR(EINVAL));
614         } else if (alloctype == VDEV_ALLOC_SPARE) {
615                 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
616                         return (SET_ERROR(EINVAL));
617         } else if (alloctype == VDEV_ALLOC_L2CACHE) {
618                 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
619                         return (SET_ERROR(EINVAL));
620         } else if (alloctype == VDEV_ALLOC_ROOTPOOL) {
621                 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
622                         return (SET_ERROR(EINVAL));
623         }
624
625         /*
626          * The first allocated vdev must be of type 'root'.
627          */
628         if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL)
629                 return (SET_ERROR(EINVAL));
630
631         /*
632          * Determine whether we're a log vdev.
633          */
634         islog = 0;
635         (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog);
636         if (islog && spa_version(spa) < SPA_VERSION_SLOGS)
637                 return (SET_ERROR(ENOTSUP));
638
639         if (ops == &vdev_hole_ops && spa_version(spa) < SPA_VERSION_HOLES)
640                 return (SET_ERROR(ENOTSUP));
641
642         /*
643          * Set the nparity property for RAID-Z vdevs.
644          */
645         nparity = -1ULL;
646         if (ops == &vdev_raidz_ops) {
647                 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
648                     &nparity) == 0) {
649                         if (nparity == 0 || nparity > VDEV_RAIDZ_MAXPARITY)
650                                 return (SET_ERROR(EINVAL));
651                         /*
652                          * Previous versions could only support 1 or 2 parity
653                          * device.
654                          */
655                         if (nparity > 1 &&
656                             spa_version(spa) < SPA_VERSION_RAIDZ2)
657                                 return (SET_ERROR(ENOTSUP));
658                         if (nparity > 2 &&
659                             spa_version(spa) < SPA_VERSION_RAIDZ3)
660                                 return (SET_ERROR(ENOTSUP));
661                 } else {
662                         /*
663                          * We require the parity to be specified for SPAs that
664                          * support multiple parity levels.
665                          */
666                         if (spa_version(spa) >= SPA_VERSION_RAIDZ2)
667                                 return (SET_ERROR(EINVAL));
668                         /*
669                          * Otherwise, we default to 1 parity device for RAID-Z.
670                          */
671                         nparity = 1;
672                 }
673         } else {
674                 nparity = 0;
675         }
676         ASSERT(nparity != -1ULL);
677
678         vd = vdev_alloc_common(spa, id, guid, ops);
679         vic = &vd->vdev_indirect_config;
680
681         vd->vdev_islog = islog;
682         vd->vdev_nparity = nparity;
683
684         if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0)
685                 vd->vdev_path = spa_strdup(vd->vdev_path);
686         if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0)
687                 vd->vdev_devid = spa_strdup(vd->vdev_devid);
688         if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH,
689             &vd->vdev_physpath) == 0)
690                 vd->vdev_physpath = spa_strdup(vd->vdev_physpath);
691         if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &vd->vdev_fru) == 0)
692                 vd->vdev_fru = spa_strdup(vd->vdev_fru);
693
694         /*
695          * Set the whole_disk property.  If it's not specified, leave the value
696          * as -1.
697          */
698         if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
699             &vd->vdev_wholedisk) != 0)
700                 vd->vdev_wholedisk = -1ULL;
701
702         ASSERT0(vic->vic_mapping_object);
703         (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_OBJECT,
704             &vic->vic_mapping_object);
705         ASSERT0(vic->vic_births_object);
706         (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_BIRTHS,
707             &vic->vic_births_object);
708         ASSERT3U(vic->vic_prev_indirect_vdev, ==, UINT64_MAX);
709         (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_PREV_INDIRECT_VDEV,
710             &vic->vic_prev_indirect_vdev);
711
712         /*
713          * Look for the 'not present' flag.  This will only be set if the device
714          * was not present at the time of import.
715          */
716         (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
717             &vd->vdev_not_present);
718
719         /*
720          * Get the alignment requirement.
721          */
722         (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift);
723
724         /*
725          * Retrieve the vdev creation time.
726          */
727         (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_CREATE_TXG,
728             &vd->vdev_crtxg);
729
730         /*
731          * If we're a top-level vdev, try to load the allocation parameters.
732          */
733         if (parent && !parent->vdev_parent &&
734             (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) {
735                 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY,
736                     &vd->vdev_ms_array);
737                 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT,
738                     &vd->vdev_ms_shift);
739                 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE,
740                     &vd->vdev_asize);
741                 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVING,
742                     &vd->vdev_removing);
743                 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_VDEV_TOP_ZAP,
744                     &vd->vdev_top_zap);
745         } else {
746                 ASSERT0(vd->vdev_top_zap);
747         }
748
749         if (parent && !parent->vdev_parent && alloctype != VDEV_ALLOC_ATTACH) {
750                 ASSERT(alloctype == VDEV_ALLOC_LOAD ||
751                     alloctype == VDEV_ALLOC_ADD ||
752                     alloctype == VDEV_ALLOC_SPLIT ||
753                     alloctype == VDEV_ALLOC_ROOTPOOL);
754                 vd->vdev_mg = metaslab_group_create(islog ?
755                     spa_log_class(spa) : spa_normal_class(spa), vd);
756         }
757
758         if (vd->vdev_ops->vdev_op_leaf &&
759             (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) {
760                 (void) nvlist_lookup_uint64(nv,
761                     ZPOOL_CONFIG_VDEV_LEAF_ZAP, &vd->vdev_leaf_zap);
762         } else {
763                 ASSERT0(vd->vdev_leaf_zap);
764         }
765
766         /*
767          * If we're a leaf vdev, try to load the DTL object and other state.
768          */
769
770         if (vd->vdev_ops->vdev_op_leaf &&
771             (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE ||
772             alloctype == VDEV_ALLOC_ROOTPOOL)) {
773                 if (alloctype == VDEV_ALLOC_LOAD) {
774                         (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL,
775                             &vd->vdev_dtl_object);
776                         (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE,
777                             &vd->vdev_unspare);
778                 }
779
780                 if (alloctype == VDEV_ALLOC_ROOTPOOL) {
781                         uint64_t spare = 0;
782
783                         if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
784                             &spare) == 0 && spare)
785                                 spa_spare_add(vd);
786                 }
787
788                 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE,
789                     &vd->vdev_offline);
790
791                 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_RESILVER_TXG,
792                     &vd->vdev_resilver_txg);
793
794                 /*
795                  * When importing a pool, we want to ignore the persistent fault
796                  * state, as the diagnosis made on another system may not be
797                  * valid in the current context.  Local vdevs will
798                  * remain in the faulted state.
799                  */
800                 if (spa_load_state(spa) == SPA_LOAD_OPEN) {
801                         (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED,
802                             &vd->vdev_faulted);
803                         (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED,
804                             &vd->vdev_degraded);
805                         (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED,
806                             &vd->vdev_removed);
807
808                         if (vd->vdev_faulted || vd->vdev_degraded) {
809                                 char *aux;
810
811                                 vd->vdev_label_aux =
812                                     VDEV_AUX_ERR_EXCEEDED;
813                                 if (nvlist_lookup_string(nv,
814                                     ZPOOL_CONFIG_AUX_STATE, &aux) == 0 &&
815                                     strcmp(aux, "external") == 0)
816                                         vd->vdev_label_aux = VDEV_AUX_EXTERNAL;
817                         }
818                 }
819         }
820
821         /*
822          * Add ourselves to the parent's list of children.
823          */
824         vdev_add_child(parent, vd);
825
826         *vdp = vd;
827
828         return (0);
829 }
830
831 void
832 vdev_free(vdev_t *vd)
833 {
834         spa_t *spa = vd->vdev_spa;
835
836         /*
837          * Scan queues are normally destroyed at the end of a scan. If the
838          * queue exists here, that implies the vdev is being removed while
839          * the scan is still running.
840          */
841         if (vd->vdev_scan_io_queue != NULL) {
842                 mutex_enter(&vd->vdev_scan_io_queue_lock);
843                 dsl_scan_io_queue_destroy(vd->vdev_scan_io_queue);
844                 vd->vdev_scan_io_queue = NULL;
845                 mutex_exit(&vd->vdev_scan_io_queue_lock);
846         }
847
848         /*
849          * vdev_free() implies closing the vdev first.  This is simpler than
850          * trying to ensure complicated semantics for all callers.
851          */
852         vdev_close(vd);
853
854         ASSERT(!list_link_active(&vd->vdev_config_dirty_node));
855         ASSERT(!list_link_active(&vd->vdev_state_dirty_node));
856
857         /*
858          * Free all children.
859          */
860         for (int c = 0; c < vd->vdev_children; c++)
861                 vdev_free(vd->vdev_child[c]);
862
863         ASSERT(vd->vdev_child == NULL);
864         ASSERT(vd->vdev_guid_sum == vd->vdev_guid);
865
866         /*
867          * Discard allocation state.
868          */
869         if (vd->vdev_mg != NULL) {
870                 vdev_metaslab_fini(vd);
871                 metaslab_group_destroy(vd->vdev_mg);
872         }
873
874         ASSERT0(vd->vdev_stat.vs_space);
875         ASSERT0(vd->vdev_stat.vs_dspace);
876         ASSERT0(vd->vdev_stat.vs_alloc);
877
878         /*
879          * Remove this vdev from its parent's child list.
880          */
881         vdev_remove_child(vd->vdev_parent, vd);
882
883         ASSERT(vd->vdev_parent == NULL);
884
885         /*
886          * Clean up vdev structure.
887          */
888         vdev_queue_fini(vd);
889         vdev_cache_fini(vd);
890
891         if (vd->vdev_path)
892                 spa_strfree(vd->vdev_path);
893         if (vd->vdev_devid)
894                 spa_strfree(vd->vdev_devid);
895         if (vd->vdev_physpath)
896                 spa_strfree(vd->vdev_physpath);
897         if (vd->vdev_fru)
898                 spa_strfree(vd->vdev_fru);
899
900         if (vd->vdev_isspare)
901                 spa_spare_remove(vd);
902         if (vd->vdev_isl2cache)
903                 spa_l2cache_remove(vd);
904
905         txg_list_destroy(&vd->vdev_ms_list);
906         txg_list_destroy(&vd->vdev_dtl_list);
907
908         mutex_enter(&vd->vdev_dtl_lock);
909         space_map_close(vd->vdev_dtl_sm);
910         for (int t = 0; t < DTL_TYPES; t++) {
911                 range_tree_vacate(vd->vdev_dtl[t], NULL, NULL);
912                 range_tree_destroy(vd->vdev_dtl[t]);
913         }
914         mutex_exit(&vd->vdev_dtl_lock);
915
916         EQUIV(vd->vdev_indirect_births != NULL,
917             vd->vdev_indirect_mapping != NULL);
918         if (vd->vdev_indirect_births != NULL) {
919                 vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
920                 vdev_indirect_births_close(vd->vdev_indirect_births);
921         }
922
923         if (vd->vdev_obsolete_sm != NULL) {
924                 ASSERT(vd->vdev_removing ||
925                     vd->vdev_ops == &vdev_indirect_ops);
926                 space_map_close(vd->vdev_obsolete_sm);
927                 vd->vdev_obsolete_sm = NULL;
928         }
929         range_tree_destroy(vd->vdev_obsolete_segments);
930         rw_destroy(&vd->vdev_indirect_rwlock);
931         mutex_destroy(&vd->vdev_obsolete_lock);
932
933         mutex_destroy(&vd->vdev_queue_lock);
934         mutex_destroy(&vd->vdev_dtl_lock);
935         mutex_destroy(&vd->vdev_stat_lock);
936         mutex_destroy(&vd->vdev_probe_lock);
937         mutex_destroy(&vd->vdev_scan_io_queue_lock);
938
939         if (vd == spa->spa_root_vdev)
940                 spa->spa_root_vdev = NULL;
941
942         kmem_free(vd, sizeof (vdev_t));
943 }
944
945 /*
946  * Transfer top-level vdev state from svd to tvd.
947  */
948 static void
949 vdev_top_transfer(vdev_t *svd, vdev_t *tvd)
950 {
951         spa_t *spa = svd->vdev_spa;
952         metaslab_t *msp;
953         vdev_t *vd;
954         int t;
955
956         ASSERT(tvd == tvd->vdev_top);
957
958         tvd->vdev_ms_array = svd->vdev_ms_array;
959         tvd->vdev_ms_shift = svd->vdev_ms_shift;
960         tvd->vdev_ms_count = svd->vdev_ms_count;
961         tvd->vdev_top_zap = svd->vdev_top_zap;
962
963         svd->vdev_ms_array = 0;
964         svd->vdev_ms_shift = 0;
965         svd->vdev_ms_count = 0;
966         svd->vdev_top_zap = 0;
967
968         if (tvd->vdev_mg)
969                 ASSERT3P(tvd->vdev_mg, ==, svd->vdev_mg);
970         tvd->vdev_mg = svd->vdev_mg;
971         tvd->vdev_ms = svd->vdev_ms;
972
973         svd->vdev_mg = NULL;
974         svd->vdev_ms = NULL;
975
976         if (tvd->vdev_mg != NULL)
977                 tvd->vdev_mg->mg_vd = tvd;
978
979         tvd->vdev_checkpoint_sm = svd->vdev_checkpoint_sm;
980         svd->vdev_checkpoint_sm = NULL;
981
982         tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc;
983         tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space;
984         tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace;
985
986         svd->vdev_stat.vs_alloc = 0;
987         svd->vdev_stat.vs_space = 0;
988         svd->vdev_stat.vs_dspace = 0;
989
990         for (t = 0; t < TXG_SIZE; t++) {
991                 while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL)
992                         (void) txg_list_add(&tvd->vdev_ms_list, msp, t);
993                 while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL)
994                         (void) txg_list_add(&tvd->vdev_dtl_list, vd, t);
995                 if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t))
996                         (void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t);
997         }
998
999         if (list_link_active(&svd->vdev_config_dirty_node)) {
1000                 vdev_config_clean(svd);
1001                 vdev_config_dirty(tvd);
1002         }
1003
1004         if (list_link_active(&svd->vdev_state_dirty_node)) {
1005                 vdev_state_clean(svd);
1006                 vdev_state_dirty(tvd);
1007         }
1008
1009         tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio;
1010         svd->vdev_deflate_ratio = 0;
1011
1012         tvd->vdev_islog = svd->vdev_islog;
1013         svd->vdev_islog = 0;
1014
1015         dsl_scan_io_queue_vdev_xfer(svd, tvd);
1016 }
1017
1018 static void
1019 vdev_top_update(vdev_t *tvd, vdev_t *vd)
1020 {
1021         if (vd == NULL)
1022                 return;
1023
1024         vd->vdev_top = tvd;
1025
1026         for (int c = 0; c < vd->vdev_children; c++)
1027                 vdev_top_update(tvd, vd->vdev_child[c]);
1028 }
1029
1030 /*
1031  * Add a mirror/replacing vdev above an existing vdev.
1032  */
1033 vdev_t *
1034 vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops)
1035 {
1036         spa_t *spa = cvd->vdev_spa;
1037         vdev_t *pvd = cvd->vdev_parent;
1038         vdev_t *mvd;
1039
1040         ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1041
1042         mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops);
1043
1044         mvd->vdev_asize = cvd->vdev_asize;
1045         mvd->vdev_min_asize = cvd->vdev_min_asize;
1046         mvd->vdev_max_asize = cvd->vdev_max_asize;
1047         mvd->vdev_psize = cvd->vdev_psize;
1048         mvd->vdev_ashift = cvd->vdev_ashift;
1049         mvd->vdev_logical_ashift = cvd->vdev_logical_ashift;
1050         mvd->vdev_physical_ashift = cvd->vdev_physical_ashift;
1051         mvd->vdev_state = cvd->vdev_state;
1052         mvd->vdev_crtxg = cvd->vdev_crtxg;
1053
1054         vdev_remove_child(pvd, cvd);
1055         vdev_add_child(pvd, mvd);
1056         cvd->vdev_id = mvd->vdev_children;
1057         vdev_add_child(mvd, cvd);
1058         vdev_top_update(cvd->vdev_top, cvd->vdev_top);
1059
1060         if (mvd == mvd->vdev_top)
1061                 vdev_top_transfer(cvd, mvd);
1062
1063         return (mvd);
1064 }
1065
1066 /*
1067  * Remove a 1-way mirror/replacing vdev from the tree.
1068  */
1069 void
1070 vdev_remove_parent(vdev_t *cvd)
1071 {
1072         vdev_t *mvd = cvd->vdev_parent;
1073         vdev_t *pvd = mvd->vdev_parent;
1074
1075         ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1076
1077         ASSERT(mvd->vdev_children == 1);
1078         ASSERT(mvd->vdev_ops == &vdev_mirror_ops ||
1079             mvd->vdev_ops == &vdev_replacing_ops ||
1080             mvd->vdev_ops == &vdev_spare_ops);
1081         cvd->vdev_ashift = mvd->vdev_ashift;
1082         cvd->vdev_logical_ashift = mvd->vdev_logical_ashift;
1083         cvd->vdev_physical_ashift = mvd->vdev_physical_ashift;
1084
1085         vdev_remove_child(mvd, cvd);
1086         vdev_remove_child(pvd, mvd);
1087
1088         /*
1089          * If cvd will replace mvd as a top-level vdev, preserve mvd's guid.
1090          * Otherwise, we could have detached an offline device, and when we
1091          * go to import the pool we'll think we have two top-level vdevs,
1092          * instead of a different version of the same top-level vdev.
1093          */
1094         if (mvd->vdev_top == mvd) {
1095                 uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid;
1096                 cvd->vdev_orig_guid = cvd->vdev_guid;
1097                 cvd->vdev_guid += guid_delta;
1098                 cvd->vdev_guid_sum += guid_delta;
1099         }
1100         cvd->vdev_id = mvd->vdev_id;
1101         vdev_add_child(pvd, cvd);
1102         vdev_top_update(cvd->vdev_top, cvd->vdev_top);
1103
1104         if (cvd == cvd->vdev_top)
1105                 vdev_top_transfer(mvd, cvd);
1106
1107         ASSERT(mvd->vdev_children == 0);
1108         vdev_free(mvd);
1109 }
1110
1111 int
1112 vdev_metaslab_init(vdev_t *vd, uint64_t txg)
1113 {
1114         spa_t *spa = vd->vdev_spa;
1115         objset_t *mos = spa->spa_meta_objset;
1116         uint64_t m;
1117         uint64_t oldc = vd->vdev_ms_count;
1118         uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift;
1119         metaslab_t **mspp;
1120         int error;
1121
1122         ASSERT(txg == 0 || spa_config_held(spa, SCL_ALLOC, RW_WRITER));
1123
1124         /*
1125          * This vdev is not being allocated from yet or is a hole.
1126          */
1127         if (vd->vdev_ms_shift == 0)
1128                 return (0);
1129
1130         ASSERT(!vd->vdev_ishole);
1131
1132         ASSERT(oldc <= newc);
1133
1134         mspp = kmem_zalloc(newc * sizeof (*mspp), KM_SLEEP);
1135
1136         if (oldc != 0) {
1137                 bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp));
1138                 kmem_free(vd->vdev_ms, oldc * sizeof (*mspp));
1139         }
1140
1141         vd->vdev_ms = mspp;
1142         vd->vdev_ms_count = newc;
1143
1144         for (m = oldc; m < newc; m++) {
1145                 uint64_t object = 0;
1146
1147                 /*
1148                  * vdev_ms_array may be 0 if we are creating the "fake"
1149                  * metaslabs for an indirect vdev for zdb's leak detection.
1150                  * See zdb_leak_init().
1151                  */
1152                 if (txg == 0 && vd->vdev_ms_array != 0) {
1153                         error = dmu_read(mos, vd->vdev_ms_array,
1154                             m * sizeof (uint64_t), sizeof (uint64_t), &object,
1155                             DMU_READ_PREFETCH);
1156                         if (error != 0) {
1157                                 vdev_dbgmsg(vd, "unable to read the metaslab "
1158                                     "array [error=%d]", error);
1159                                 return (error);
1160                         }
1161                 }
1162
1163                 error = metaslab_init(vd->vdev_mg, m, object, txg,
1164                     &(vd->vdev_ms[m]));
1165                 if (error != 0) {
1166                         vdev_dbgmsg(vd, "metaslab_init failed [error=%d]",
1167                             error);
1168                         return (error);
1169                 }
1170         }
1171
1172         if (txg == 0)
1173                 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_WRITER);
1174
1175         /*
1176          * If the vdev is being removed we don't activate
1177          * the metaslabs since we want to ensure that no new
1178          * allocations are performed on this device.
1179          */
1180         if (oldc == 0 && !vd->vdev_removing)
1181                 metaslab_group_activate(vd->vdev_mg);
1182
1183         if (txg == 0)
1184                 spa_config_exit(spa, SCL_ALLOC, FTAG);
1185
1186         return (0);
1187 }
1188
1189 void
1190 vdev_metaslab_fini(vdev_t *vd)
1191 {
1192         if (vd->vdev_checkpoint_sm != NULL) {
1193                 ASSERT(spa_feature_is_active(vd->vdev_spa,
1194                     SPA_FEATURE_POOL_CHECKPOINT));
1195                 space_map_close(vd->vdev_checkpoint_sm);
1196                 /*
1197                  * Even though we close the space map, we need to set its
1198                  * pointer to NULL. The reason is that vdev_metaslab_fini()
1199                  * may be called multiple times for certain operations
1200                  * (i.e. when destroying a pool) so we need to ensure that
1201                  * this clause never executes twice. This logic is similar
1202                  * to the one used for the vdev_ms clause below.
1203                  */
1204                 vd->vdev_checkpoint_sm = NULL;
1205         }
1206
1207         if (vd->vdev_ms != NULL) {
1208                 uint64_t count = vd->vdev_ms_count;
1209
1210                 metaslab_group_passivate(vd->vdev_mg);
1211                 for (uint64_t m = 0; m < count; m++) {
1212                         metaslab_t *msp = vd->vdev_ms[m];
1213
1214                         if (msp != NULL)
1215                                 metaslab_fini(msp);
1216                 }
1217                 kmem_free(vd->vdev_ms, count * sizeof (metaslab_t *));
1218                 vd->vdev_ms = NULL;
1219
1220                 vd->vdev_ms_count = 0;
1221         }
1222         ASSERT0(vd->vdev_ms_count);
1223 }
1224
1225 typedef struct vdev_probe_stats {
1226         boolean_t       vps_readable;
1227         boolean_t       vps_writeable;
1228         int             vps_flags;
1229 } vdev_probe_stats_t;
1230
1231 static void
1232 vdev_probe_done(zio_t *zio)
1233 {
1234         spa_t *spa = zio->io_spa;
1235         vdev_t *vd = zio->io_vd;
1236         vdev_probe_stats_t *vps = zio->io_private;
1237
1238         ASSERT(vd->vdev_probe_zio != NULL);
1239
1240         if (zio->io_type == ZIO_TYPE_READ) {
1241                 if (zio->io_error == 0)
1242                         vps->vps_readable = 1;
1243                 if (zio->io_error == 0 && spa_writeable(spa)) {
1244                         zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd,
1245                             zio->io_offset, zio->io_size, zio->io_abd,
1246                             ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
1247                             ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE));
1248                 } else {
1249                         abd_free(zio->io_abd);
1250                 }
1251         } else if (zio->io_type == ZIO_TYPE_WRITE) {
1252                 if (zio->io_error == 0)
1253                         vps->vps_writeable = 1;
1254                 abd_free(zio->io_abd);
1255         } else if (zio->io_type == ZIO_TYPE_NULL) {
1256                 zio_t *pio;
1257
1258                 vd->vdev_cant_read |= !vps->vps_readable;
1259                 vd->vdev_cant_write |= !vps->vps_writeable;
1260
1261                 if (vdev_readable(vd) &&
1262                     (vdev_writeable(vd) || !spa_writeable(spa))) {
1263                         zio->io_error = 0;
1264                 } else {
1265                         ASSERT(zio->io_error != 0);
1266                         vdev_dbgmsg(vd, "failed probe");
1267                         zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE,
1268                             spa, vd, NULL, 0, 0);
1269                         zio->io_error = SET_ERROR(ENXIO);
1270                 }
1271
1272                 mutex_enter(&vd->vdev_probe_lock);
1273                 ASSERT(vd->vdev_probe_zio == zio);
1274                 vd->vdev_probe_zio = NULL;
1275                 mutex_exit(&vd->vdev_probe_lock);
1276
1277                 zio_link_t *zl = NULL;
1278                 while ((pio = zio_walk_parents(zio, &zl)) != NULL)
1279                         if (!vdev_accessible(vd, pio))
1280                                 pio->io_error = SET_ERROR(ENXIO);
1281
1282                 kmem_free(vps, sizeof (*vps));
1283         }
1284 }
1285
1286 /*
1287  * Determine whether this device is accessible.
1288  *
1289  * Read and write to several known locations: the pad regions of each
1290  * vdev label but the first, which we leave alone in case it contains
1291  * a VTOC.
1292  */
1293 zio_t *
1294 vdev_probe(vdev_t *vd, zio_t *zio)
1295 {
1296         spa_t *spa = vd->vdev_spa;
1297         vdev_probe_stats_t *vps = NULL;
1298         zio_t *pio;
1299
1300         ASSERT(vd->vdev_ops->vdev_op_leaf);
1301
1302         /*
1303          * Don't probe the probe.
1304          */
1305         if (zio && (zio->io_flags & ZIO_FLAG_PROBE))
1306                 return (NULL);
1307
1308         /*
1309          * To prevent 'probe storms' when a device fails, we create
1310          * just one probe i/o at a time.  All zios that want to probe
1311          * this vdev will become parents of the probe io.
1312          */
1313         mutex_enter(&vd->vdev_probe_lock);
1314
1315         if ((pio = vd->vdev_probe_zio) == NULL) {
1316                 vps = kmem_zalloc(sizeof (*vps), KM_SLEEP);
1317
1318                 vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE |
1319                     ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE |
1320                     ZIO_FLAG_TRYHARD;
1321
1322                 if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) {
1323                         /*
1324                          * vdev_cant_read and vdev_cant_write can only
1325                          * transition from TRUE to FALSE when we have the
1326                          * SCL_ZIO lock as writer; otherwise they can only
1327                          * transition from FALSE to TRUE.  This ensures that
1328                          * any zio looking at these values can assume that
1329                          * failures persist for the life of the I/O.  That's
1330                          * important because when a device has intermittent
1331                          * connectivity problems, we want to ensure that
1332                          * they're ascribed to the device (ENXIO) and not
1333                          * the zio (EIO).
1334                          *
1335                          * Since we hold SCL_ZIO as writer here, clear both
1336                          * values so the probe can reevaluate from first
1337                          * principles.
1338                          */
1339                         vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER;
1340                         vd->vdev_cant_read = B_FALSE;
1341                         vd->vdev_cant_write = B_FALSE;
1342                 }
1343
1344                 vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd,
1345                     vdev_probe_done, vps,
1346                     vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE);
1347
1348                 /*
1349                  * We can't change the vdev state in this context, so we
1350                  * kick off an async task to do it on our behalf.
1351                  */
1352                 if (zio != NULL) {
1353                         vd->vdev_probe_wanted = B_TRUE;
1354                         spa_async_request(spa, SPA_ASYNC_PROBE);
1355                 }
1356         }
1357
1358         if (zio != NULL)
1359                 zio_add_child(zio, pio);
1360
1361         mutex_exit(&vd->vdev_probe_lock);
1362
1363         if (vps == NULL) {
1364                 ASSERT(zio != NULL);
1365                 return (NULL);
1366         }
1367
1368         for (int l = 1; l < VDEV_LABELS; l++) {
1369                 zio_nowait(zio_read_phys(pio, vd,
1370                     vdev_label_offset(vd->vdev_psize, l,
1371                     offsetof(vdev_label_t, vl_pad2)), VDEV_PAD_SIZE,
1372                     abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE),
1373                     ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
1374                     ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE));
1375         }
1376
1377         if (zio == NULL)
1378                 return (pio);
1379
1380         zio_nowait(pio);
1381         return (NULL);
1382 }
1383
1384 static void
1385 vdev_open_child(void *arg)
1386 {
1387         vdev_t *vd = arg;
1388
1389         vd->vdev_open_thread = curthread;
1390         vd->vdev_open_error = vdev_open(vd);
1391         vd->vdev_open_thread = NULL;
1392 }
1393
1394 boolean_t
1395 vdev_uses_zvols(vdev_t *vd)
1396 {
1397         if (vd->vdev_path && strncmp(vd->vdev_path, ZVOL_DIR,
1398             strlen(ZVOL_DIR)) == 0)
1399                 return (B_TRUE);
1400         for (int c = 0; c < vd->vdev_children; c++)
1401                 if (vdev_uses_zvols(vd->vdev_child[c]))
1402                         return (B_TRUE);
1403         return (B_FALSE);
1404 }
1405
1406 void
1407 vdev_open_children(vdev_t *vd)
1408 {
1409         taskq_t *tq;
1410         int children = vd->vdev_children;
1411
1412         /*
1413          * in order to handle pools on top of zvols, do the opens
1414          * in a single thread so that the same thread holds the
1415          * spa_namespace_lock
1416          */
1417         if (B_TRUE || vdev_uses_zvols(vd)) {
1418                 for (int c = 0; c < children; c++)
1419                         vd->vdev_child[c]->vdev_open_error =
1420                             vdev_open(vd->vdev_child[c]);
1421                 return;
1422         }
1423         tq = taskq_create("vdev_open", children, minclsyspri,
1424             children, children, TASKQ_PREPOPULATE);
1425
1426         for (int c = 0; c < children; c++)
1427                 VERIFY(taskq_dispatch(tq, vdev_open_child, vd->vdev_child[c],
1428                     TQ_SLEEP) != 0);
1429
1430         taskq_destroy(tq);
1431 }
1432
1433 /*
1434  * Compute the raidz-deflation ratio.  Note, we hard-code
1435  * in 128k (1 << 17) because it is the "typical" blocksize.
1436  * Even though SPA_MAXBLOCKSIZE changed, this algorithm can not change,
1437  * otherwise it would inconsistently account for existing bp's.
1438  */
1439 static void
1440 vdev_set_deflate_ratio(vdev_t *vd)
1441 {
1442         if (vd == vd->vdev_top && !vd->vdev_ishole && vd->vdev_ashift != 0) {
1443                 vd->vdev_deflate_ratio = (1 << 17) /
1444                     (vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT);
1445         }
1446 }
1447
1448 /*
1449  * Prepare a virtual device for access.
1450  */
1451 int
1452 vdev_open(vdev_t *vd)
1453 {
1454         spa_t *spa = vd->vdev_spa;
1455         int error;
1456         uint64_t osize = 0;
1457         uint64_t max_osize = 0;
1458         uint64_t asize, max_asize, psize;
1459         uint64_t logical_ashift = 0;
1460         uint64_t physical_ashift = 0;
1461
1462         ASSERT(vd->vdev_open_thread == curthread ||
1463             spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
1464         ASSERT(vd->vdev_state == VDEV_STATE_CLOSED ||
1465             vd->vdev_state == VDEV_STATE_CANT_OPEN ||
1466             vd->vdev_state == VDEV_STATE_OFFLINE);
1467
1468         vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
1469         vd->vdev_cant_read = B_FALSE;
1470         vd->vdev_cant_write = B_FALSE;
1471         vd->vdev_notrim = B_FALSE;
1472         vd->vdev_min_asize = vdev_get_min_asize(vd);
1473
1474         /*
1475          * If this vdev is not removed, check its fault status.  If it's
1476          * faulted, bail out of the open.
1477          */
1478         if (!vd->vdev_removed && vd->vdev_faulted) {
1479                 ASSERT(vd->vdev_children == 0);
1480                 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
1481                     vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
1482                 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
1483                     vd->vdev_label_aux);
1484                 return (SET_ERROR(ENXIO));
1485         } else if (vd->vdev_offline) {
1486                 ASSERT(vd->vdev_children == 0);
1487                 vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE);
1488                 return (SET_ERROR(ENXIO));
1489         }
1490
1491         error = vd->vdev_ops->vdev_op_open(vd, &osize, &max_osize,
1492             &logical_ashift, &physical_ashift);
1493
1494         /*
1495          * Reset the vdev_reopening flag so that we actually close
1496          * the vdev on error.
1497          */
1498         vd->vdev_reopening = B_FALSE;
1499         if (zio_injection_enabled && error == 0)
1500                 error = zio_handle_device_injection(vd, NULL, ENXIO);
1501
1502         if (error) {
1503                 if (vd->vdev_removed &&
1504                     vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED)
1505                         vd->vdev_removed = B_FALSE;
1506
1507                 if (vd->vdev_stat.vs_aux == VDEV_AUX_CHILDREN_OFFLINE) {
1508                         vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE,
1509                             vd->vdev_stat.vs_aux);
1510                 } else {
1511                         vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1512                             vd->vdev_stat.vs_aux);
1513                 }
1514                 return (error);
1515         }
1516
1517         vd->vdev_removed = B_FALSE;
1518
1519         /*
1520          * Recheck the faulted flag now that we have confirmed that
1521          * the vdev is accessible.  If we're faulted, bail.
1522          */
1523         if (vd->vdev_faulted) {
1524                 ASSERT(vd->vdev_children == 0);
1525                 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
1526                     vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
1527                 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
1528                     vd->vdev_label_aux);
1529                 return (SET_ERROR(ENXIO));
1530         }
1531
1532         if (vd->vdev_degraded) {
1533                 ASSERT(vd->vdev_children == 0);
1534                 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
1535                     VDEV_AUX_ERR_EXCEEDED);
1536         } else {
1537                 vdev_set_state(vd, B_TRUE, VDEV_STATE_HEALTHY, 0);
1538         }
1539
1540         /*
1541          * For hole or missing vdevs we just return success.
1542          */
1543         if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops)
1544                 return (0);
1545
1546         if (zfs_trim_enabled && !vd->vdev_notrim && vd->vdev_ops->vdev_op_leaf)
1547                 trim_map_create(vd);
1548
1549         for (int c = 0; c < vd->vdev_children; c++) {
1550                 if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) {
1551                         vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
1552                             VDEV_AUX_NONE);
1553                         break;
1554                 }
1555         }
1556
1557         osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t));
1558         max_osize = P2ALIGN(max_osize, (uint64_t)sizeof (vdev_label_t));
1559
1560         if (vd->vdev_children == 0) {
1561                 if (osize < SPA_MINDEVSIZE) {
1562                         vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1563                             VDEV_AUX_TOO_SMALL);
1564                         return (SET_ERROR(EOVERFLOW));
1565                 }
1566                 psize = osize;
1567                 asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE);
1568                 max_asize = max_osize - (VDEV_LABEL_START_SIZE +
1569                     VDEV_LABEL_END_SIZE);
1570         } else {
1571                 if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE -
1572                     (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) {
1573                         vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1574                             VDEV_AUX_TOO_SMALL);
1575                         return (SET_ERROR(EOVERFLOW));
1576                 }
1577                 psize = 0;
1578                 asize = osize;
1579                 max_asize = max_osize;
1580         }
1581
1582         vd->vdev_psize = psize;
1583
1584         /*
1585          * Make sure the allocatable size hasn't shrunk too much.
1586          */
1587         if (asize < vd->vdev_min_asize) {
1588                 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1589                     VDEV_AUX_BAD_LABEL);
1590                 return (SET_ERROR(EINVAL));
1591         }
1592
1593         vd->vdev_physical_ashift =
1594             MAX(physical_ashift, vd->vdev_physical_ashift);
1595         vd->vdev_logical_ashift = MAX(logical_ashift, vd->vdev_logical_ashift);
1596         vd->vdev_ashift = MAX(vd->vdev_logical_ashift, vd->vdev_ashift);
1597
1598         if (vd->vdev_logical_ashift > SPA_MAXASHIFT) {
1599                 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1600                     VDEV_AUX_ASHIFT_TOO_BIG);
1601                 return (EINVAL);
1602         }
1603
1604         if (vd->vdev_asize == 0) {
1605                 /*
1606                  * This is the first-ever open, so use the computed values.
1607                  * For testing purposes, a higher ashift can be requested.
1608                  */
1609                 vd->vdev_asize = asize;
1610                 vd->vdev_max_asize = max_asize;
1611         } else {
1612                 /*
1613                  * Make sure the alignment requirement hasn't increased.
1614                  */
1615                 if (vd->vdev_ashift > vd->vdev_top->vdev_ashift &&
1616                     vd->vdev_ops->vdev_op_leaf) {
1617                         vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1618                             VDEV_AUX_BAD_LABEL);
1619                         return (EINVAL);
1620                 }
1621                 vd->vdev_max_asize = max_asize;
1622         }
1623
1624         /*
1625          * If all children are healthy we update asize if either:
1626          * The asize has increased, due to a device expansion caused by dynamic
1627          * LUN growth or vdev replacement, and automatic expansion is enabled;
1628          * making the additional space available.
1629          *
1630          * The asize has decreased, due to a device shrink usually caused by a
1631          * vdev replace with a smaller device. This ensures that calculations
1632          * based of max_asize and asize e.g. esize are always valid. It's safe
1633          * to do this as we've already validated that asize is greater than
1634          * vdev_min_asize.
1635          */
1636         if (vd->vdev_state == VDEV_STATE_HEALTHY &&
1637             ((asize > vd->vdev_asize &&
1638             (vd->vdev_expanding || spa->spa_autoexpand)) ||
1639             (asize < vd->vdev_asize)))
1640                 vd->vdev_asize = asize;
1641
1642         vdev_set_min_asize(vd);
1643
1644         /*
1645          * Ensure we can issue some IO before declaring the
1646          * vdev open for business.
1647          */
1648         if (vd->vdev_ops->vdev_op_leaf &&
1649             (error = zio_wait(vdev_probe(vd, NULL))) != 0) {
1650                 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
1651                     VDEV_AUX_ERR_EXCEEDED);
1652                 return (error);
1653         }
1654
1655         /*
1656          * Track the min and max ashift values for normal data devices.
1657          */
1658         if (vd->vdev_top == vd && vd->vdev_ashift != 0 &&
1659             !vd->vdev_islog && vd->vdev_aux == NULL) {
1660                 if (vd->vdev_ashift > spa->spa_max_ashift)
1661                         spa->spa_max_ashift = vd->vdev_ashift;
1662                 if (vd->vdev_ashift < spa->spa_min_ashift)
1663                         spa->spa_min_ashift = vd->vdev_ashift;
1664         }
1665
1666         /*
1667          * If a leaf vdev has a DTL, and seems healthy, then kick off a
1668          * resilver.  But don't do this if we are doing a reopen for a scrub,
1669          * since this would just restart the scrub we are already doing.
1670          */
1671         if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen &&
1672             vdev_resilver_needed(vd, NULL, NULL))
1673                 spa_async_request(spa, SPA_ASYNC_RESILVER);
1674
1675         return (0);
1676 }
1677
1678 /*
1679  * Called once the vdevs are all opened, this routine validates the label
1680  * contents. This needs to be done before vdev_load() so that we don't
1681  * inadvertently do repair I/Os to the wrong device.
1682  *
1683  * This function will only return failure if one of the vdevs indicates that it
1684  * has since been destroyed or exported.  This is only possible if
1685  * /etc/zfs/zpool.cache was readonly at the time.  Otherwise, the vdev state
1686  * will be updated but the function will return 0.
1687  */
1688 int
1689 vdev_validate(vdev_t *vd)
1690 {
1691         spa_t *spa = vd->vdev_spa;
1692         nvlist_t *label;
1693         uint64_t guid = 0, aux_guid = 0, top_guid;
1694         uint64_t state;
1695         nvlist_t *nvl;
1696         uint64_t txg;
1697
1698         if (vdev_validate_skip)
1699                 return (0);
1700
1701         for (uint64_t c = 0; c < vd->vdev_children; c++)
1702                 if (vdev_validate(vd->vdev_child[c]) != 0)
1703                         return (SET_ERROR(EBADF));
1704
1705         /*
1706          * If the device has already failed, or was marked offline, don't do
1707          * any further validation.  Otherwise, label I/O will fail and we will
1708          * overwrite the previous state.
1709          */
1710         if (!vd->vdev_ops->vdev_op_leaf || !vdev_readable(vd))
1711                 return (0);
1712
1713         /*
1714          * If we are performing an extreme rewind, we allow for a label that
1715          * was modified at a point after the current txg.
1716          * If config lock is not held do not check for the txg. spa_sync could
1717          * be updating the vdev's label before updating spa_last_synced_txg.
1718          */
1719         if (spa->spa_extreme_rewind || spa_last_synced_txg(spa) == 0 ||
1720             spa_config_held(spa, SCL_CONFIG, RW_WRITER) != SCL_CONFIG)
1721                 txg = UINT64_MAX;
1722         else
1723                 txg = spa_last_synced_txg(spa);
1724
1725         if ((label = vdev_label_read_config(vd, txg)) == NULL) {
1726                 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1727                     VDEV_AUX_BAD_LABEL);
1728                 vdev_dbgmsg(vd, "vdev_validate: failed reading config for "
1729                     "txg %llu", (u_longlong_t)txg);
1730                 return (0);
1731         }
1732
1733         /*
1734          * Determine if this vdev has been split off into another
1735          * pool.  If so, then refuse to open it.
1736          */
1737         if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_SPLIT_GUID,
1738             &aux_guid) == 0 && aux_guid == spa_guid(spa)) {
1739                 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1740                     VDEV_AUX_SPLIT_POOL);
1741                 nvlist_free(label);
1742                 vdev_dbgmsg(vd, "vdev_validate: vdev split into other pool");
1743                 return (0);
1744         }
1745
1746         if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, &guid) != 0) {
1747                 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1748                     VDEV_AUX_CORRUPT_DATA);
1749                 nvlist_free(label);
1750                 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
1751                     ZPOOL_CONFIG_POOL_GUID);
1752                 return (0);
1753         }
1754
1755         /*
1756          * If config is not trusted then ignore the spa guid check. This is
1757          * necessary because if the machine crashed during a re-guid the new
1758          * guid might have been written to all of the vdev labels, but not the
1759          * cached config. The check will be performed again once we have the
1760          * trusted config from the MOS.
1761          */
1762         if (spa->spa_trust_config && guid != spa_guid(spa)) {
1763                 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1764                     VDEV_AUX_CORRUPT_DATA);
1765                 nvlist_free(label);
1766                 vdev_dbgmsg(vd, "vdev_validate: vdev label pool_guid doesn't "
1767                     "match config (%llu != %llu)", (u_longlong_t)guid,
1768                     (u_longlong_t)spa_guid(spa));
1769                 return (0);
1770         }
1771
1772         if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvl)
1773             != 0 || nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_ORIG_GUID,
1774             &aux_guid) != 0)
1775                 aux_guid = 0;
1776
1777         if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0) {
1778                 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1779                     VDEV_AUX_CORRUPT_DATA);
1780                 nvlist_free(label);
1781                 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
1782                     ZPOOL_CONFIG_GUID);
1783                 return (0);
1784         }
1785
1786         if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID, &top_guid)
1787             != 0) {
1788                 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1789                     VDEV_AUX_CORRUPT_DATA);
1790                 nvlist_free(label);
1791                 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
1792                     ZPOOL_CONFIG_TOP_GUID);
1793                 return (0);
1794         }
1795
1796         /*
1797          * If this vdev just became a top-level vdev because its sibling was
1798          * detached, it will have adopted the parent's vdev guid -- but the
1799          * label may or may not be on disk yet. Fortunately, either version
1800          * of the label will have the same top guid, so if we're a top-level
1801          * vdev, we can safely compare to that instead.
1802          * However, if the config comes from a cachefile that failed to update
1803          * after the detach, a top-level vdev will appear as a non top-level
1804          * vdev in the config. Also relax the constraints if we perform an
1805          * extreme rewind.
1806          *
1807          * If we split this vdev off instead, then we also check the
1808          * original pool's guid. We don't want to consider the vdev
1809          * corrupt if it is partway through a split operation.
1810          */
1811         if (vd->vdev_guid != guid && vd->vdev_guid != aux_guid) {
1812                 boolean_t mismatch = B_FALSE;
1813                 if (spa->spa_trust_config && !spa->spa_extreme_rewind) {
1814                         if (vd != vd->vdev_top || vd->vdev_guid != top_guid)
1815                                 mismatch = B_TRUE;
1816                 } else {
1817                         if (vd->vdev_guid != top_guid &&
1818                             vd->vdev_top->vdev_guid != guid)
1819                                 mismatch = B_TRUE;
1820                 }
1821
1822                 if (mismatch) {
1823                         vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1824                             VDEV_AUX_CORRUPT_DATA);
1825                         nvlist_free(label);
1826                         vdev_dbgmsg(vd, "vdev_validate: config guid "
1827                             "doesn't match label guid");
1828                         vdev_dbgmsg(vd, "CONFIG: guid %llu, top_guid %llu",
1829                             (u_longlong_t)vd->vdev_guid,
1830                             (u_longlong_t)vd->vdev_top->vdev_guid);
1831                         vdev_dbgmsg(vd, "LABEL: guid %llu, top_guid %llu, "
1832                             "aux_guid %llu", (u_longlong_t)guid,
1833                             (u_longlong_t)top_guid, (u_longlong_t)aux_guid);
1834                         return (0);
1835                 }
1836         }
1837
1838         if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE,
1839             &state) != 0) {
1840                 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1841                     VDEV_AUX_CORRUPT_DATA);
1842                 nvlist_free(label);
1843                 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
1844                     ZPOOL_CONFIG_POOL_STATE);
1845                 return (0);
1846         }
1847
1848         nvlist_free(label);
1849
1850         /*
1851          * If this is a verbatim import, no need to check the
1852          * state of the pool.
1853          */
1854         if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) &&
1855             spa_load_state(spa) == SPA_LOAD_OPEN &&
1856             state != POOL_STATE_ACTIVE) {
1857                 vdev_dbgmsg(vd, "vdev_validate: invalid pool state (%llu) "
1858                     "for spa %s", (u_longlong_t)state, spa->spa_name);
1859                 return (SET_ERROR(EBADF));
1860         }
1861
1862         /*
1863          * If we were able to open and validate a vdev that was
1864          * previously marked permanently unavailable, clear that state
1865          * now.
1866          */
1867         if (vd->vdev_not_present)
1868                 vd->vdev_not_present = 0;
1869
1870         return (0);
1871 }
1872
1873 static void
1874 vdev_copy_path_impl(vdev_t *svd, vdev_t *dvd)
1875 {
1876         if (svd->vdev_path != NULL && dvd->vdev_path != NULL) {
1877                 if (strcmp(svd->vdev_path, dvd->vdev_path) != 0) {
1878                         zfs_dbgmsg("vdev_copy_path: vdev %llu: path changed "
1879                             "from '%s' to '%s'", (u_longlong_t)dvd->vdev_guid,
1880                             dvd->vdev_path, svd->vdev_path);
1881                         spa_strfree(dvd->vdev_path);
1882                         dvd->vdev_path = spa_strdup(svd->vdev_path);
1883                 }
1884         } else if (svd->vdev_path != NULL) {
1885                 dvd->vdev_path = spa_strdup(svd->vdev_path);
1886                 zfs_dbgmsg("vdev_copy_path: vdev %llu: path set to '%s'",
1887                     (u_longlong_t)dvd->vdev_guid, dvd->vdev_path);
1888         }
1889 }
1890
1891 /*
1892  * Recursively copy vdev paths from one vdev to another. Source and destination
1893  * vdev trees must have same geometry otherwise return error. Intended to copy
1894  * paths from userland config into MOS config.
1895  */
1896 int
1897 vdev_copy_path_strict(vdev_t *svd, vdev_t *dvd)
1898 {
1899         if ((svd->vdev_ops == &vdev_missing_ops) ||
1900             (svd->vdev_ishole && dvd->vdev_ishole) ||
1901             (dvd->vdev_ops == &vdev_indirect_ops))
1902                 return (0);
1903
1904         if (svd->vdev_ops != dvd->vdev_ops) {
1905                 vdev_dbgmsg(svd, "vdev_copy_path: vdev type mismatch: %s != %s",
1906                     svd->vdev_ops->vdev_op_type, dvd->vdev_ops->vdev_op_type);
1907                 return (SET_ERROR(EINVAL));
1908         }
1909
1910         if (svd->vdev_guid != dvd->vdev_guid) {
1911                 vdev_dbgmsg(svd, "vdev_copy_path: guids mismatch (%llu != "
1912                     "%llu)", (u_longlong_t)svd->vdev_guid,
1913                     (u_longlong_t)dvd->vdev_guid);
1914                 return (SET_ERROR(EINVAL));
1915         }
1916
1917         if (svd->vdev_children != dvd->vdev_children) {
1918                 vdev_dbgmsg(svd, "vdev_copy_path: children count mismatch: "
1919                     "%llu != %llu", (u_longlong_t)svd->vdev_children,
1920                     (u_longlong_t)dvd->vdev_children);
1921                 return (SET_ERROR(EINVAL));
1922         }
1923
1924         for (uint64_t i = 0; i < svd->vdev_children; i++) {
1925                 int error = vdev_copy_path_strict(svd->vdev_child[i],
1926                     dvd->vdev_child[i]);
1927                 if (error != 0)
1928                         return (error);
1929         }
1930
1931         if (svd->vdev_ops->vdev_op_leaf)
1932                 vdev_copy_path_impl(svd, dvd);
1933
1934         return (0);
1935 }
1936
1937 static void
1938 vdev_copy_path_search(vdev_t *stvd, vdev_t *dvd)
1939 {
1940         ASSERT(stvd->vdev_top == stvd);
1941         ASSERT3U(stvd->vdev_id, ==, dvd->vdev_top->vdev_id);
1942
1943         for (uint64_t i = 0; i < dvd->vdev_children; i++) {
1944                 vdev_copy_path_search(stvd, dvd->vdev_child[i]);
1945         }
1946
1947         if (!dvd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(dvd))
1948                 return;
1949
1950         /*
1951          * The idea here is that while a vdev can shift positions within
1952          * a top vdev (when replacing, attaching mirror, etc.) it cannot
1953          * step outside of it.
1954          */
1955         vdev_t *vd = vdev_lookup_by_guid(stvd, dvd->vdev_guid);
1956
1957         if (vd == NULL || vd->vdev_ops != dvd->vdev_ops)
1958                 return;
1959
1960         ASSERT(vd->vdev_ops->vdev_op_leaf);
1961
1962         vdev_copy_path_impl(vd, dvd);
1963 }
1964
1965 /*
1966  * Recursively copy vdev paths from one root vdev to another. Source and
1967  * destination vdev trees may differ in geometry. For each destination leaf
1968  * vdev, search a vdev with the same guid and top vdev id in the source.
1969  * Intended to copy paths from userland config into MOS config.
1970  */
1971 void
1972 vdev_copy_path_relaxed(vdev_t *srvd, vdev_t *drvd)
1973 {
1974         uint64_t children = MIN(srvd->vdev_children, drvd->vdev_children);
1975         ASSERT(srvd->vdev_ops == &vdev_root_ops);
1976         ASSERT(drvd->vdev_ops == &vdev_root_ops);
1977
1978         for (uint64_t i = 0; i < children; i++) {
1979                 vdev_copy_path_search(srvd->vdev_child[i],
1980                     drvd->vdev_child[i]);
1981         }
1982 }
1983
1984 /*
1985  * Close a virtual device.
1986  */
1987 void
1988 vdev_close(vdev_t *vd)
1989 {
1990         spa_t *spa = vd->vdev_spa;
1991         vdev_t *pvd = vd->vdev_parent;
1992
1993         ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
1994
1995         /*
1996          * If our parent is reopening, then we are as well, unless we are
1997          * going offline.
1998          */
1999         if (pvd != NULL && pvd->vdev_reopening)
2000                 vd->vdev_reopening = (pvd->vdev_reopening && !vd->vdev_offline);
2001
2002         vd->vdev_ops->vdev_op_close(vd);
2003
2004         vdev_cache_purge(vd);
2005
2006         if (vd->vdev_ops->vdev_op_leaf)
2007                 trim_map_destroy(vd);
2008
2009         /*
2010          * We record the previous state before we close it, so that if we are
2011          * doing a reopen(), we don't generate FMA ereports if we notice that
2012          * it's still faulted.
2013          */
2014         vd->vdev_prevstate = vd->vdev_state;
2015
2016         if (vd->vdev_offline)
2017                 vd->vdev_state = VDEV_STATE_OFFLINE;
2018         else
2019                 vd->vdev_state = VDEV_STATE_CLOSED;
2020         vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
2021 }
2022
2023 void
2024 vdev_hold(vdev_t *vd)
2025 {
2026         spa_t *spa = vd->vdev_spa;
2027
2028         ASSERT(spa_is_root(spa));
2029         if (spa->spa_state == POOL_STATE_UNINITIALIZED)
2030                 return;
2031
2032         for (int c = 0; c < vd->vdev_children; c++)
2033                 vdev_hold(vd->vdev_child[c]);
2034
2035         if (vd->vdev_ops->vdev_op_leaf)
2036                 vd->vdev_ops->vdev_op_hold(vd);
2037 }
2038
2039 void
2040 vdev_rele(vdev_t *vd)
2041 {
2042         spa_t *spa = vd->vdev_spa;
2043
2044         ASSERT(spa_is_root(spa));
2045         for (int c = 0; c < vd->vdev_children; c++)
2046                 vdev_rele(vd->vdev_child[c]);
2047
2048         if (vd->vdev_ops->vdev_op_leaf)
2049                 vd->vdev_ops->vdev_op_rele(vd);
2050 }
2051
2052 /*
2053  * Reopen all interior vdevs and any unopened leaves.  We don't actually
2054  * reopen leaf vdevs which had previously been opened as they might deadlock
2055  * on the spa_config_lock.  Instead we only obtain the leaf's physical size.
2056  * If the leaf has never been opened then open it, as usual.
2057  */
2058 void
2059 vdev_reopen(vdev_t *vd)
2060 {
2061         spa_t *spa = vd->vdev_spa;
2062
2063         ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
2064
2065         /* set the reopening flag unless we're taking the vdev offline */
2066         vd->vdev_reopening = !vd->vdev_offline;
2067         vdev_close(vd);
2068         (void) vdev_open(vd);
2069
2070         /*
2071          * Call vdev_validate() here to make sure we have the same device.
2072          * Otherwise, a device with an invalid label could be successfully
2073          * opened in response to vdev_reopen().
2074          */
2075         if (vd->vdev_aux) {
2076                 (void) vdev_validate_aux(vd);
2077                 if (vdev_readable(vd) && vdev_writeable(vd) &&
2078                     vd->vdev_aux == &spa->spa_l2cache &&
2079                     !l2arc_vdev_present(vd))
2080                         l2arc_add_vdev(spa, vd);
2081         } else {
2082                 (void) vdev_validate(vd);
2083         }
2084
2085         /*
2086          * Reassess parent vdev's health.
2087          */
2088         vdev_propagate_state(vd);
2089 }
2090
2091 int
2092 vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing)
2093 {
2094         int error;
2095
2096         /*
2097          * Normally, partial opens (e.g. of a mirror) are allowed.
2098          * For a create, however, we want to fail the request if
2099          * there are any components we can't open.
2100          */
2101         error = vdev_open(vd);
2102
2103         if (error || vd->vdev_state != VDEV_STATE_HEALTHY) {
2104                 vdev_close(vd);
2105                 return (error ? error : ENXIO);
2106         }
2107
2108         /*
2109          * Recursively load DTLs and initialize all labels.
2110          */
2111         if ((error = vdev_dtl_load(vd)) != 0 ||
2112             (error = vdev_label_init(vd, txg, isreplacing ?
2113             VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) {
2114                 vdev_close(vd);
2115                 return (error);
2116         }
2117
2118         return (0);
2119 }
2120
2121 void
2122 vdev_metaslab_set_size(vdev_t *vd)
2123 {
2124         uint64_t asize = vd->vdev_asize;
2125         uint64_t ms_shift = 0;
2126
2127         /*
2128          * For vdevs that are bigger than 8G the metaslab size varies in
2129          * a way that the number of metaslabs increases in powers of two,
2130          * linearly in terms of vdev_asize, starting from 16 metaslabs.
2131          * So for vdev_asize of 8G we get 16 metaslabs, for 16G, we get 32,
2132          * and so on, until we hit the maximum metaslab count limit
2133          * [vdev_max_ms_count] from which point the metaslab count stays
2134          * the same.
2135          */
2136         ms_shift = vdev_default_ms_shift;
2137
2138         if ((asize >> ms_shift) < vdev_min_ms_count) {
2139                 /*
2140                  * For devices that are less than 8G we want to have
2141                  * exactly 16 metaslabs. We don't want less as integer
2142                  * division rounds down, so less metaslabs mean more
2143                  * wasted space. We don't want more as these vdevs are
2144                  * small and in the likely event that we are running
2145                  * out of space, the SPA will have a hard time finding
2146                  * space due to fragmentation.
2147                  */
2148                 ms_shift = highbit64(asize / vdev_min_ms_count);
2149                 ms_shift = MAX(ms_shift, SPA_MAXBLOCKSHIFT);
2150
2151         } else if ((asize >> ms_shift) > vdev_max_ms_count) {
2152                 ms_shift = highbit64(asize / vdev_max_ms_count);
2153         }
2154
2155         vd->vdev_ms_shift = ms_shift;
2156         ASSERT3U(vd->vdev_ms_shift, >=, SPA_MAXBLOCKSHIFT);
2157 }
2158
2159 /*
2160  * Maximize performance by inflating the configured ashift for top level
2161  * vdevs to be as close to the physical ashift as possible while maintaining
2162  * administrator defined limits and ensuring it doesn't go below the
2163  * logical ashift.
2164  */
2165 void
2166 vdev_ashift_optimize(vdev_t *vd)
2167 {
2168         if (vd == vd->vdev_top) {
2169                 if (vd->vdev_ashift < vd->vdev_physical_ashift) {
2170                         vd->vdev_ashift = MIN(
2171                             MAX(zfs_max_auto_ashift, vd->vdev_ashift),
2172                             MAX(zfs_min_auto_ashift, vd->vdev_physical_ashift));
2173                 } else {
2174                         /*
2175                          * Unusual case where logical ashift > physical ashift
2176                          * so we can't cap the calculated ashift based on max
2177                          * ashift as that would cause failures.
2178                          * We still check if we need to increase it to match
2179                          * the min ashift.
2180                          */
2181                         vd->vdev_ashift = MAX(zfs_min_auto_ashift,
2182                             vd->vdev_ashift);
2183                 }
2184         }
2185 }
2186
2187 void
2188 vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg)
2189 {
2190         ASSERT(vd == vd->vdev_top);
2191         /* indirect vdevs don't have metaslabs or dtls */
2192         ASSERT(vdev_is_concrete(vd) || flags == 0);
2193         ASSERT(ISP2(flags));
2194         ASSERT(spa_writeable(vd->vdev_spa));
2195
2196         if (flags & VDD_METASLAB)
2197                 (void) txg_list_add(&vd->vdev_ms_list, arg, txg);
2198
2199         if (flags & VDD_DTL)
2200                 (void) txg_list_add(&vd->vdev_dtl_list, arg, txg);
2201
2202         (void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg);
2203 }
2204
2205 void
2206 vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg)
2207 {
2208         for (int c = 0; c < vd->vdev_children; c++)
2209                 vdev_dirty_leaves(vd->vdev_child[c], flags, txg);
2210
2211         if (vd->vdev_ops->vdev_op_leaf)
2212                 vdev_dirty(vd->vdev_top, flags, vd, txg);
2213 }
2214
2215 /*
2216  * DTLs.
2217  *
2218  * A vdev's DTL (dirty time log) is the set of transaction groups for which
2219  * the vdev has less than perfect replication.  There are four kinds of DTL:
2220  *
2221  * DTL_MISSING: txgs for which the vdev has no valid copies of the data
2222  *
2223  * DTL_PARTIAL: txgs for which data is available, but not fully replicated
2224  *
2225  * DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon
2226  *      scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of
2227  *      txgs that was scrubbed.
2228  *
2229  * DTL_OUTAGE: txgs which cannot currently be read, whether due to
2230  *      persistent errors or just some device being offline.
2231  *      Unlike the other three, the DTL_OUTAGE map is not generally
2232  *      maintained; it's only computed when needed, typically to
2233  *      determine whether a device can be detached.
2234  *
2235  * For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device
2236  * either has the data or it doesn't.
2237  *
2238  * For interior vdevs such as mirror and RAID-Z the picture is more complex.
2239  * A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because
2240  * if any child is less than fully replicated, then so is its parent.
2241  * A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs,
2242  * comprising only those txgs which appear in 'maxfaults' or more children;
2243  * those are the txgs we don't have enough replication to read.  For example,
2244  * double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2);
2245  * thus, its DTL_MISSING consists of the set of txgs that appear in more than
2246  * two child DTL_MISSING maps.
2247  *
2248  * It should be clear from the above that to compute the DTLs and outage maps
2249  * for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps.
2250  * Therefore, that is all we keep on disk.  When loading the pool, or after
2251  * a configuration change, we generate all other DTLs from first principles.
2252  */
2253 void
2254 vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
2255 {
2256         range_tree_t *rt = vd->vdev_dtl[t];
2257
2258         ASSERT(t < DTL_TYPES);
2259         ASSERT(vd != vd->vdev_spa->spa_root_vdev);
2260         ASSERT(spa_writeable(vd->vdev_spa));
2261
2262         mutex_enter(&vd->vdev_dtl_lock);
2263         if (!range_tree_contains(rt, txg, size))
2264                 range_tree_add(rt, txg, size);
2265         mutex_exit(&vd->vdev_dtl_lock);
2266 }
2267
2268 boolean_t
2269 vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
2270 {
2271         range_tree_t *rt = vd->vdev_dtl[t];
2272         boolean_t dirty = B_FALSE;
2273
2274         ASSERT(t < DTL_TYPES);
2275         ASSERT(vd != vd->vdev_spa->spa_root_vdev);
2276
2277         /*
2278          * While we are loading the pool, the DTLs have not been loaded yet.
2279          * Ignore the DTLs and try all devices.  This avoids a recursive
2280          * mutex enter on the vdev_dtl_lock, and also makes us try hard
2281          * when loading the pool (relying on the checksum to ensure that
2282          * we get the right data -- note that we while loading, we are
2283          * only reading the MOS, which is always checksummed).
2284          */
2285         if (vd->vdev_spa->spa_load_state != SPA_LOAD_NONE)
2286                 return (B_FALSE);
2287
2288         mutex_enter(&vd->vdev_dtl_lock);
2289         if (!range_tree_is_empty(rt))
2290                 dirty = range_tree_contains(rt, txg, size);
2291         mutex_exit(&vd->vdev_dtl_lock);
2292
2293         return (dirty);
2294 }
2295
2296 boolean_t
2297 vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t)
2298 {
2299         range_tree_t *rt = vd->vdev_dtl[t];
2300         boolean_t empty;
2301
2302         mutex_enter(&vd->vdev_dtl_lock);
2303         empty = range_tree_is_empty(rt);
2304         mutex_exit(&vd->vdev_dtl_lock);
2305
2306         return (empty);
2307 }
2308
2309 /*
2310  * Returns B_TRUE if vdev determines offset needs to be resilvered.
2311  */
2312 boolean_t
2313 vdev_dtl_need_resilver(vdev_t *vd, uint64_t offset, size_t psize)
2314 {
2315         ASSERT(vd != vd->vdev_spa->spa_root_vdev);
2316
2317         if (vd->vdev_ops->vdev_op_need_resilver == NULL ||
2318             vd->vdev_ops->vdev_op_leaf)
2319                 return (B_TRUE);
2320
2321         return (vd->vdev_ops->vdev_op_need_resilver(vd, offset, psize));
2322 }
2323
2324 /*
2325  * Returns the lowest txg in the DTL range.
2326  */
2327 static uint64_t
2328 vdev_dtl_min(vdev_t *vd)
2329 {
2330         range_seg_t *rs;
2331
2332         ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
2333         ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
2334         ASSERT0(vd->vdev_children);
2335
2336         rs = avl_first(&vd->vdev_dtl[DTL_MISSING]->rt_root);
2337         return (rs->rs_start - 1);
2338 }
2339
2340 /*
2341  * Returns the highest txg in the DTL.
2342  */
2343 static uint64_t
2344 vdev_dtl_max(vdev_t *vd)
2345 {
2346         range_seg_t *rs;
2347
2348         ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
2349         ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
2350         ASSERT0(vd->vdev_children);
2351
2352         rs = avl_last(&vd->vdev_dtl[DTL_MISSING]->rt_root);
2353         return (rs->rs_end);
2354 }
2355
2356 /*
2357  * Determine if a resilvering vdev should remove any DTL entries from
2358  * its range. If the vdev was resilvering for the entire duration of the
2359  * scan then it should excise that range from its DTLs. Otherwise, this
2360  * vdev is considered partially resilvered and should leave its DTL
2361  * entries intact. The comment in vdev_dtl_reassess() describes how we
2362  * excise the DTLs.
2363  */
2364 static boolean_t
2365 vdev_dtl_should_excise(vdev_t *vd)
2366 {
2367         spa_t *spa = vd->vdev_spa;
2368         dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
2369
2370         ASSERT0(scn->scn_phys.scn_errors);
2371         ASSERT0(vd->vdev_children);
2372
2373         if (vd->vdev_state < VDEV_STATE_DEGRADED)
2374                 return (B_FALSE);
2375
2376         if (vd->vdev_resilver_txg == 0 ||
2377             range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]))
2378                 return (B_TRUE);
2379
2380         /*
2381          * When a resilver is initiated the scan will assign the scn_max_txg
2382          * value to the highest txg value that exists in all DTLs. If this
2383          * device's max DTL is not part of this scan (i.e. it is not in
2384          * the range (scn_min_txg, scn_max_txg] then it is not eligible
2385          * for excision.
2386          */
2387         if (vdev_dtl_max(vd) <= scn->scn_phys.scn_max_txg) {
2388                 ASSERT3U(scn->scn_phys.scn_min_txg, <=, vdev_dtl_min(vd));
2389                 ASSERT3U(scn->scn_phys.scn_min_txg, <, vd->vdev_resilver_txg);
2390                 ASSERT3U(vd->vdev_resilver_txg, <=, scn->scn_phys.scn_max_txg);
2391                 return (B_TRUE);
2392         }
2393         return (B_FALSE);
2394 }
2395
2396 /*
2397  * Reassess DTLs after a config change or scrub completion.
2398  */
2399 void
2400 vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done)
2401 {
2402         spa_t *spa = vd->vdev_spa;
2403         avl_tree_t reftree;
2404         int minref;
2405
2406         ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
2407
2408         for (int c = 0; c < vd->vdev_children; c++)
2409                 vdev_dtl_reassess(vd->vdev_child[c], txg,
2410                     scrub_txg, scrub_done);
2411
2412         if (vd == spa->spa_root_vdev || !vdev_is_concrete(vd) || vd->vdev_aux)
2413                 return;
2414
2415         if (vd->vdev_ops->vdev_op_leaf) {
2416                 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
2417
2418                 mutex_enter(&vd->vdev_dtl_lock);
2419
2420                 /*
2421                  * If we've completed a scan cleanly then determine
2422                  * if this vdev should remove any DTLs. We only want to
2423                  * excise regions on vdevs that were available during
2424                  * the entire duration of this scan.
2425                  */
2426                 if (scrub_txg != 0 &&
2427                     (spa->spa_scrub_started ||
2428                     (scn != NULL && scn->scn_phys.scn_errors == 0)) &&
2429                     vdev_dtl_should_excise(vd)) {
2430                         /*
2431                          * We completed a scrub up to scrub_txg.  If we
2432                          * did it without rebooting, then the scrub dtl
2433                          * will be valid, so excise the old region and
2434                          * fold in the scrub dtl.  Otherwise, leave the
2435                          * dtl as-is if there was an error.
2436                          *
2437                          * There's little trick here: to excise the beginning
2438                          * of the DTL_MISSING map, we put it into a reference
2439                          * tree and then add a segment with refcnt -1 that
2440                          * covers the range [0, scrub_txg).  This means
2441                          * that each txg in that range has refcnt -1 or 0.
2442                          * We then add DTL_SCRUB with a refcnt of 2, so that
2443                          * entries in the range [0, scrub_txg) will have a
2444                          * positive refcnt -- either 1 or 2.  We then convert
2445                          * the reference tree into the new DTL_MISSING map.
2446                          */
2447                         space_reftree_create(&reftree);
2448                         space_reftree_add_map(&reftree,
2449                             vd->vdev_dtl[DTL_MISSING], 1);
2450                         space_reftree_add_seg(&reftree, 0, scrub_txg, -1);
2451                         space_reftree_add_map(&reftree,
2452                             vd->vdev_dtl[DTL_SCRUB], 2);
2453                         space_reftree_generate_map(&reftree,
2454                             vd->vdev_dtl[DTL_MISSING], 1);
2455                         space_reftree_destroy(&reftree);
2456                 }
2457                 range_tree_vacate(vd->vdev_dtl[DTL_PARTIAL], NULL, NULL);
2458                 range_tree_walk(vd->vdev_dtl[DTL_MISSING],
2459                     range_tree_add, vd->vdev_dtl[DTL_PARTIAL]);
2460                 if (scrub_done)
2461                         range_tree_vacate(vd->vdev_dtl[DTL_SCRUB], NULL, NULL);
2462                 range_tree_vacate(vd->vdev_dtl[DTL_OUTAGE], NULL, NULL);
2463                 if (!vdev_readable(vd))
2464                         range_tree_add(vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL);
2465                 else
2466                         range_tree_walk(vd->vdev_dtl[DTL_MISSING],
2467                             range_tree_add, vd->vdev_dtl[DTL_OUTAGE]);
2468
2469                 /*
2470                  * If the vdev was resilvering and no longer has any
2471                  * DTLs then reset its resilvering flag and dirty
2472                  * the top level so that we persist the change.
2473                  */
2474                 if (vd->vdev_resilver_txg != 0 &&
2475                     range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) &&
2476                     range_tree_is_empty(vd->vdev_dtl[DTL_OUTAGE])) {
2477                         vd->vdev_resilver_txg = 0;
2478                         vdev_config_dirty(vd->vdev_top);
2479                 }
2480
2481                 mutex_exit(&vd->vdev_dtl_lock);
2482
2483                 if (txg != 0)
2484                         vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg);
2485                 return;
2486         }
2487
2488         mutex_enter(&vd->vdev_dtl_lock);
2489         for (int t = 0; t < DTL_TYPES; t++) {
2490                 /* account for child's outage in parent's missing map */
2491                 int s = (t == DTL_MISSING) ? DTL_OUTAGE: t;
2492                 if (t == DTL_SCRUB)
2493                         continue;                       /* leaf vdevs only */
2494                 if (t == DTL_PARTIAL)
2495                         minref = 1;                     /* i.e. non-zero */
2496                 else if (vd->vdev_nparity != 0)
2497                         minref = vd->vdev_nparity + 1;  /* RAID-Z */
2498                 else
2499                         minref = vd->vdev_children;     /* any kind of mirror */
2500                 space_reftree_create(&reftree);
2501                 for (int c = 0; c < vd->vdev_children; c++) {
2502                         vdev_t *cvd = vd->vdev_child[c];
2503                         mutex_enter(&cvd->vdev_dtl_lock);
2504                         space_reftree_add_map(&reftree, cvd->vdev_dtl[s], 1);
2505                         mutex_exit(&cvd->vdev_dtl_lock);
2506                 }
2507                 space_reftree_generate_map(&reftree, vd->vdev_dtl[t], minref);
2508                 space_reftree_destroy(&reftree);
2509         }
2510         mutex_exit(&vd->vdev_dtl_lock);
2511 }
2512
2513 int
2514 vdev_dtl_load(vdev_t *vd)
2515 {
2516         spa_t *spa = vd->vdev_spa;
2517         objset_t *mos = spa->spa_meta_objset;
2518         int error = 0;
2519
2520         if (vd->vdev_ops->vdev_op_leaf && vd->vdev_dtl_object != 0) {
2521                 ASSERT(vdev_is_concrete(vd));
2522
2523                 error = space_map_open(&vd->vdev_dtl_sm, mos,
2524                     vd->vdev_dtl_object, 0, -1ULL, 0);
2525                 if (error)
2526                         return (error);
2527                 ASSERT(vd->vdev_dtl_sm != NULL);
2528
2529                 mutex_enter(&vd->vdev_dtl_lock);
2530
2531                 /*
2532                  * Now that we've opened the space_map we need to update
2533                  * the in-core DTL.
2534                  */
2535                 space_map_update(vd->vdev_dtl_sm);
2536
2537                 error = space_map_load(vd->vdev_dtl_sm,
2538                     vd->vdev_dtl[DTL_MISSING], SM_ALLOC);
2539                 mutex_exit(&vd->vdev_dtl_lock);
2540
2541                 return (error);
2542         }
2543
2544         for (int c = 0; c < vd->vdev_children; c++) {
2545                 error = vdev_dtl_load(vd->vdev_child[c]);
2546                 if (error != 0)
2547                         break;
2548         }
2549
2550         return (error);
2551 }
2552
2553 void
2554 vdev_destroy_unlink_zap(vdev_t *vd, uint64_t zapobj, dmu_tx_t *tx)
2555 {
2556         spa_t *spa = vd->vdev_spa;
2557
2558         VERIFY0(zap_destroy(spa->spa_meta_objset, zapobj, tx));
2559         VERIFY0(zap_remove_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps,
2560             zapobj, tx));
2561 }
2562
2563 uint64_t
2564 vdev_create_link_zap(vdev_t *vd, dmu_tx_t *tx)
2565 {
2566         spa_t *spa = vd->vdev_spa;
2567         uint64_t zap = zap_create(spa->spa_meta_objset, DMU_OTN_ZAP_METADATA,
2568             DMU_OT_NONE, 0, tx);
2569
2570         ASSERT(zap != 0);
2571         VERIFY0(zap_add_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps,
2572             zap, tx));
2573
2574         return (zap);
2575 }
2576
2577 void
2578 vdev_construct_zaps(vdev_t *vd, dmu_tx_t *tx)
2579 {
2580         if (vd->vdev_ops != &vdev_hole_ops &&
2581             vd->vdev_ops != &vdev_missing_ops &&
2582             vd->vdev_ops != &vdev_root_ops &&
2583             !vd->vdev_top->vdev_removing) {
2584                 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_leaf_zap == 0) {
2585                         vd->vdev_leaf_zap = vdev_create_link_zap(vd, tx);
2586                 }
2587                 if (vd == vd->vdev_top && vd->vdev_top_zap == 0) {
2588                         vd->vdev_top_zap = vdev_create_link_zap(vd, tx);
2589                 }
2590         }
2591         for (uint64_t i = 0; i < vd->vdev_children; i++) {
2592                 vdev_construct_zaps(vd->vdev_child[i], tx);
2593         }
2594 }
2595
2596 void
2597 vdev_dtl_sync(vdev_t *vd, uint64_t txg)
2598 {
2599         spa_t *spa = vd->vdev_spa;
2600         range_tree_t *rt = vd->vdev_dtl[DTL_MISSING];
2601         objset_t *mos = spa->spa_meta_objset;
2602         range_tree_t *rtsync;
2603         dmu_tx_t *tx;
2604         uint64_t object = space_map_object(vd->vdev_dtl_sm);
2605
2606         ASSERT(vdev_is_concrete(vd));
2607         ASSERT(vd->vdev_ops->vdev_op_leaf);
2608
2609         tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
2610
2611         if (vd->vdev_detached || vd->vdev_top->vdev_removing) {
2612                 mutex_enter(&vd->vdev_dtl_lock);
2613                 space_map_free(vd->vdev_dtl_sm, tx);
2614                 space_map_close(vd->vdev_dtl_sm);
2615                 vd->vdev_dtl_sm = NULL;
2616                 mutex_exit(&vd->vdev_dtl_lock);
2617
2618                 /*
2619                  * We only destroy the leaf ZAP for detached leaves or for
2620                  * removed log devices. Removed data devices handle leaf ZAP
2621                  * cleanup later, once cancellation is no longer possible.
2622                  */
2623                 if (vd->vdev_leaf_zap != 0 && (vd->vdev_detached ||
2624                     vd->vdev_top->vdev_islog)) {
2625                         vdev_destroy_unlink_zap(vd, vd->vdev_leaf_zap, tx);
2626                         vd->vdev_leaf_zap = 0;
2627                 }
2628
2629                 dmu_tx_commit(tx);
2630                 return;
2631         }
2632
2633         if (vd->vdev_dtl_sm == NULL) {
2634                 uint64_t new_object;
2635
2636                 new_object = space_map_alloc(mos, vdev_dtl_sm_blksz, tx);
2637                 VERIFY3U(new_object, !=, 0);
2638
2639                 VERIFY0(space_map_open(&vd->vdev_dtl_sm, mos, new_object,
2640                     0, -1ULL, 0));
2641                 ASSERT(vd->vdev_dtl_sm != NULL);
2642         }
2643
2644         rtsync = range_tree_create(NULL, NULL);
2645
2646         mutex_enter(&vd->vdev_dtl_lock);
2647         range_tree_walk(rt, range_tree_add, rtsync);
2648         mutex_exit(&vd->vdev_dtl_lock);
2649
2650         space_map_truncate(vd->vdev_dtl_sm, vdev_dtl_sm_blksz, tx);
2651         space_map_write(vd->vdev_dtl_sm, rtsync, SM_ALLOC, SM_NO_VDEVID, tx);
2652         range_tree_vacate(rtsync, NULL, NULL);
2653
2654         range_tree_destroy(rtsync);
2655
2656         /*
2657          * If the object for the space map has changed then dirty
2658          * the top level so that we update the config.
2659          */
2660         if (object != space_map_object(vd->vdev_dtl_sm)) {
2661                 vdev_dbgmsg(vd, "txg %llu, spa %s, DTL old object %llu, "
2662                     "new object %llu", (u_longlong_t)txg, spa_name(spa),
2663                     (u_longlong_t)object,
2664                     (u_longlong_t)space_map_object(vd->vdev_dtl_sm));
2665                 vdev_config_dirty(vd->vdev_top);
2666         }
2667
2668         dmu_tx_commit(tx);
2669
2670         mutex_enter(&vd->vdev_dtl_lock);
2671         space_map_update(vd->vdev_dtl_sm);
2672         mutex_exit(&vd->vdev_dtl_lock);
2673 }
2674
2675 /*
2676  * Determine whether the specified vdev can be offlined/detached/removed
2677  * without losing data.
2678  */
2679 boolean_t
2680 vdev_dtl_required(vdev_t *vd)
2681 {
2682         spa_t *spa = vd->vdev_spa;
2683         vdev_t *tvd = vd->vdev_top;
2684         uint8_t cant_read = vd->vdev_cant_read;
2685         boolean_t required;
2686
2687         ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
2688
2689         if (vd == spa->spa_root_vdev || vd == tvd)
2690                 return (B_TRUE);
2691
2692         /*
2693          * Temporarily mark the device as unreadable, and then determine
2694          * whether this results in any DTL outages in the top-level vdev.
2695          * If not, we can safely offline/detach/remove the device.
2696          */
2697         vd->vdev_cant_read = B_TRUE;
2698         vdev_dtl_reassess(tvd, 0, 0, B_FALSE);
2699         required = !vdev_dtl_empty(tvd, DTL_OUTAGE);
2700         vd->vdev_cant_read = cant_read;
2701         vdev_dtl_reassess(tvd, 0, 0, B_FALSE);
2702
2703         if (!required && zio_injection_enabled)
2704                 required = !!zio_handle_device_injection(vd, NULL, ECHILD);
2705
2706         return (required);
2707 }
2708
2709 /*
2710  * Determine if resilver is needed, and if so the txg range.
2711  */
2712 boolean_t
2713 vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp)
2714 {
2715         boolean_t needed = B_FALSE;
2716         uint64_t thismin = UINT64_MAX;
2717         uint64_t thismax = 0;
2718
2719         if (vd->vdev_children == 0) {
2720                 mutex_enter(&vd->vdev_dtl_lock);
2721                 if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) &&
2722                     vdev_writeable(vd)) {
2723
2724                         thismin = vdev_dtl_min(vd);
2725                         thismax = vdev_dtl_max(vd);
2726                         needed = B_TRUE;
2727                 }
2728                 mutex_exit(&vd->vdev_dtl_lock);
2729         } else {
2730                 for (int c = 0; c < vd->vdev_children; c++) {
2731                         vdev_t *cvd = vd->vdev_child[c];
2732                         uint64_t cmin, cmax;
2733
2734                         if (vdev_resilver_needed(cvd, &cmin, &cmax)) {
2735                                 thismin = MIN(thismin, cmin);
2736                                 thismax = MAX(thismax, cmax);
2737                                 needed = B_TRUE;
2738                         }
2739                 }
2740         }
2741
2742         if (needed && minp) {
2743                 *minp = thismin;
2744                 *maxp = thismax;
2745         }
2746         return (needed);
2747 }
2748
2749 /*
2750  * Gets the checkpoint space map object from the vdev's ZAP.
2751  * Returns the spacemap object, or 0 if it wasn't in the ZAP
2752  * or the ZAP doesn't exist yet.
2753  */
2754 int
2755 vdev_checkpoint_sm_object(vdev_t *vd)
2756 {
2757         ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
2758         if (vd->vdev_top_zap == 0) {
2759                 return (0);
2760         }
2761
2762         uint64_t sm_obj = 0;
2763         int err = zap_lookup(spa_meta_objset(vd->vdev_spa), vd->vdev_top_zap,
2764             VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, sizeof (uint64_t), 1, &sm_obj);
2765
2766         ASSERT(err == 0 || err == ENOENT);
2767
2768         return (sm_obj);
2769 }
2770
2771 int
2772 vdev_load(vdev_t *vd)
2773 {
2774         int error = 0;
2775         /*
2776          * Recursively load all children.
2777          */
2778         for (int c = 0; c < vd->vdev_children; c++) {
2779                 error = vdev_load(vd->vdev_child[c]);
2780                 if (error != 0) {
2781                         return (error);
2782                 }
2783         }
2784
2785         vdev_set_deflate_ratio(vd);
2786
2787         /*
2788          * If this is a top-level vdev, initialize its metaslabs.
2789          */
2790         if (vd == vd->vdev_top && vdev_is_concrete(vd)) {
2791                 if (vd->vdev_ashift == 0 || vd->vdev_asize == 0) {
2792                         vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2793                             VDEV_AUX_CORRUPT_DATA);
2794                         vdev_dbgmsg(vd, "vdev_load: invalid size. ashift=%llu, "
2795                             "asize=%llu", (u_longlong_t)vd->vdev_ashift,
2796                             (u_longlong_t)vd->vdev_asize);
2797                         return (SET_ERROR(ENXIO));
2798                 } else if ((error = vdev_metaslab_init(vd, 0)) != 0) {
2799                         vdev_dbgmsg(vd, "vdev_load: metaslab_init failed "
2800                             "[error=%d]", error);
2801                         vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2802                             VDEV_AUX_CORRUPT_DATA);
2803                         return (error);
2804                 }
2805
2806                 uint64_t checkpoint_sm_obj = vdev_checkpoint_sm_object(vd);
2807                 if (checkpoint_sm_obj != 0) {
2808                         objset_t *mos = spa_meta_objset(vd->vdev_spa);
2809                         ASSERT(vd->vdev_asize != 0);
2810                         ASSERT3P(vd->vdev_checkpoint_sm, ==, NULL);
2811
2812                         if ((error = space_map_open(&vd->vdev_checkpoint_sm,
2813                             mos, checkpoint_sm_obj, 0, vd->vdev_asize,
2814                             vd->vdev_ashift))) {
2815                                 vdev_dbgmsg(vd, "vdev_load: space_map_open "
2816                                     "failed for checkpoint spacemap (obj %llu) "
2817                                     "[error=%d]",
2818                                     (u_longlong_t)checkpoint_sm_obj, error);
2819                                 return (error);
2820                         }
2821                         ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
2822                         space_map_update(vd->vdev_checkpoint_sm);
2823
2824                         /*
2825                          * Since the checkpoint_sm contains free entries
2826                          * exclusively we can use sm_alloc to indicate the
2827                          * culmulative checkpointed space that has been freed.
2828                          */
2829                         vd->vdev_stat.vs_checkpoint_space =
2830                             -vd->vdev_checkpoint_sm->sm_alloc;
2831                         vd->vdev_spa->spa_checkpoint_info.sci_dspace +=
2832                             vd->vdev_stat.vs_checkpoint_space;
2833                 }
2834         }
2835
2836         /*
2837          * If this is a leaf vdev, load its DTL.
2838          */
2839         if (vd->vdev_ops->vdev_op_leaf && (error = vdev_dtl_load(vd)) != 0) {
2840                 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2841                     VDEV_AUX_CORRUPT_DATA);
2842                 vdev_dbgmsg(vd, "vdev_load: vdev_dtl_load failed "
2843                     "[error=%d]", error);
2844                 return (error);
2845         }
2846
2847         uint64_t obsolete_sm_object = vdev_obsolete_sm_object(vd);
2848         if (obsolete_sm_object != 0) {
2849                 objset_t *mos = vd->vdev_spa->spa_meta_objset;
2850                 ASSERT(vd->vdev_asize != 0);
2851                 ASSERT3P(vd->vdev_obsolete_sm, ==, NULL);
2852
2853                 if ((error = space_map_open(&vd->vdev_obsolete_sm, mos,
2854                     obsolete_sm_object, 0, vd->vdev_asize, 0))) {
2855                         vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2856                             VDEV_AUX_CORRUPT_DATA);
2857                         vdev_dbgmsg(vd, "vdev_load: space_map_open failed for "
2858                             "obsolete spacemap (obj %llu) [error=%d]",
2859                             (u_longlong_t)obsolete_sm_object, error);
2860                         return (error);
2861                 }
2862                 space_map_update(vd->vdev_obsolete_sm);
2863         }
2864
2865         return (0);
2866 }
2867
2868 /*
2869  * The special vdev case is used for hot spares and l2cache devices.  Its
2870  * sole purpose it to set the vdev state for the associated vdev.  To do this,
2871  * we make sure that we can open the underlying device, then try to read the
2872  * label, and make sure that the label is sane and that it hasn't been
2873  * repurposed to another pool.
2874  */
2875 int
2876 vdev_validate_aux(vdev_t *vd)
2877 {
2878         nvlist_t *label;
2879         uint64_t guid, version;
2880         uint64_t state;
2881
2882         if (!vdev_readable(vd))
2883                 return (0);
2884
2885         if ((label = vdev_label_read_config(vd, -1ULL)) == NULL) {
2886                 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2887                     VDEV_AUX_CORRUPT_DATA);
2888                 return (-1);
2889         }
2890
2891         if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 ||
2892             !SPA_VERSION_IS_SUPPORTED(version) ||
2893             nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 ||
2894             guid != vd->vdev_guid ||
2895             nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) {
2896                 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2897                     VDEV_AUX_CORRUPT_DATA);
2898                 nvlist_free(label);
2899                 return (-1);
2900         }
2901
2902         /*
2903          * We don't actually check the pool state here.  If it's in fact in
2904          * use by another pool, we update this fact on the fly when requested.
2905          */
2906         nvlist_free(label);
2907         return (0);
2908 }
2909
2910 /*
2911  * Free the objects used to store this vdev's spacemaps, and the array
2912  * that points to them.
2913  */
2914 void
2915 vdev_destroy_spacemaps(vdev_t *vd, dmu_tx_t *tx)
2916 {
2917         if (vd->vdev_ms_array == 0)
2918                 return;
2919
2920         objset_t *mos = vd->vdev_spa->spa_meta_objset;
2921         uint64_t array_count = vd->vdev_asize >> vd->vdev_ms_shift;
2922         size_t array_bytes = array_count * sizeof (uint64_t);
2923         uint64_t *smobj_array = kmem_alloc(array_bytes, KM_SLEEP);
2924         VERIFY0(dmu_read(mos, vd->vdev_ms_array, 0,
2925             array_bytes, smobj_array, 0));
2926
2927         for (uint64_t i = 0; i < array_count; i++) {
2928                 uint64_t smobj = smobj_array[i];
2929                 if (smobj == 0)
2930                         continue;
2931
2932                 space_map_free_obj(mos, smobj, tx);
2933         }
2934
2935         kmem_free(smobj_array, array_bytes);
2936         VERIFY0(dmu_object_free(mos, vd->vdev_ms_array, tx));
2937         vd->vdev_ms_array = 0;
2938 }
2939
2940 static void
2941 vdev_remove_empty(vdev_t *vd, uint64_t txg)
2942 {
2943         spa_t *spa = vd->vdev_spa;
2944         dmu_tx_t *tx;
2945
2946         ASSERT(vd == vd->vdev_top);
2947         ASSERT3U(txg, ==, spa_syncing_txg(spa));
2948
2949         if (vd->vdev_ms != NULL) {
2950                 metaslab_group_t *mg = vd->vdev_mg;
2951
2952                 metaslab_group_histogram_verify(mg);
2953                 metaslab_class_histogram_verify(mg->mg_class);
2954
2955                 for (int m = 0; m < vd->vdev_ms_count; m++) {
2956                         metaslab_t *msp = vd->vdev_ms[m];
2957
2958                         if (msp == NULL || msp->ms_sm == NULL)
2959                                 continue;
2960
2961                         mutex_enter(&msp->ms_lock);
2962                         /*
2963                          * If the metaslab was not loaded when the vdev
2964                          * was removed then the histogram accounting may
2965                          * not be accurate. Update the histogram information
2966                          * here so that we ensure that the metaslab group
2967                          * and metaslab class are up-to-date.
2968                          */
2969                         metaslab_group_histogram_remove(mg, msp);
2970
2971                         VERIFY0(space_map_allocated(msp->ms_sm));
2972                         space_map_close(msp->ms_sm);
2973                         msp->ms_sm = NULL;
2974                         mutex_exit(&msp->ms_lock);
2975                 }
2976
2977                 if (vd->vdev_checkpoint_sm != NULL) {
2978                         ASSERT(spa_has_checkpoint(spa));
2979                         space_map_close(vd->vdev_checkpoint_sm);
2980                         vd->vdev_checkpoint_sm = NULL;
2981                 }
2982
2983                 metaslab_group_histogram_verify(mg);
2984                 metaslab_class_histogram_verify(mg->mg_class);
2985                 for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
2986                         ASSERT0(mg->mg_histogram[i]);
2987         }
2988
2989         tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
2990         vdev_destroy_spacemaps(vd, tx);
2991
2992         if (vd->vdev_islog && vd->vdev_top_zap != 0) {
2993                 vdev_destroy_unlink_zap(vd, vd->vdev_top_zap, tx);
2994                 vd->vdev_top_zap = 0;
2995         }
2996         dmu_tx_commit(tx);
2997 }
2998
2999 void
3000 vdev_sync_done(vdev_t *vd, uint64_t txg)
3001 {
3002         metaslab_t *msp;
3003         boolean_t reassess = !txg_list_empty(&vd->vdev_ms_list, TXG_CLEAN(txg));
3004
3005         ASSERT(vdev_is_concrete(vd));
3006
3007         while (msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg)))
3008                 metaslab_sync_done(msp, txg);
3009
3010         if (reassess)
3011                 metaslab_sync_reassess(vd->vdev_mg);
3012 }
3013
3014 void
3015 vdev_sync(vdev_t *vd, uint64_t txg)
3016 {
3017         spa_t *spa = vd->vdev_spa;
3018         vdev_t *lvd;
3019         metaslab_t *msp;
3020         dmu_tx_t *tx;
3021
3022         if (range_tree_space(vd->vdev_obsolete_segments) > 0) {
3023                 dmu_tx_t *tx;
3024
3025                 ASSERT(vd->vdev_removing ||
3026                     vd->vdev_ops == &vdev_indirect_ops);
3027
3028                 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
3029                 vdev_indirect_sync_obsolete(vd, tx);
3030                 dmu_tx_commit(tx);
3031
3032                 /*
3033                  * If the vdev is indirect, it can't have dirty
3034                  * metaslabs or DTLs.
3035                  */
3036                 if (vd->vdev_ops == &vdev_indirect_ops) {
3037                         ASSERT(txg_list_empty(&vd->vdev_ms_list, txg));
3038                         ASSERT(txg_list_empty(&vd->vdev_dtl_list, txg));
3039                         return;
3040                 }
3041         }
3042
3043         ASSERT(vdev_is_concrete(vd));
3044
3045         if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0 &&
3046             !vd->vdev_removing) {
3047                 ASSERT(vd == vd->vdev_top);
3048                 ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
3049                 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
3050                 vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset,
3051                     DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx);
3052                 ASSERT(vd->vdev_ms_array != 0);
3053                 vdev_config_dirty(vd);
3054                 dmu_tx_commit(tx);
3055         }
3056
3057         while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) {
3058                 metaslab_sync(msp, txg);
3059                 (void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg));
3060         }
3061
3062         while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL)
3063                 vdev_dtl_sync(lvd, txg);
3064
3065         /*
3066          * Remove the metadata associated with this vdev once it's empty.
3067          * Note that this is typically used for log/cache device removal;
3068          * we don't empty toplevel vdevs when removing them.  But if
3069          * a toplevel happens to be emptied, this is not harmful.
3070          */
3071         if (vd->vdev_stat.vs_alloc == 0 && vd->vdev_removing) {
3072                 vdev_remove_empty(vd, txg);
3073         }
3074
3075         (void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg));
3076 }
3077
3078 uint64_t
3079 vdev_psize_to_asize(vdev_t *vd, uint64_t psize)
3080 {
3081         return (vd->vdev_ops->vdev_op_asize(vd, psize));
3082 }
3083
3084 /*
3085  * Mark the given vdev faulted.  A faulted vdev behaves as if the device could
3086  * not be opened, and no I/O is attempted.
3087  */
3088 int
3089 vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux)
3090 {
3091         vdev_t *vd, *tvd;
3092
3093         spa_vdev_state_enter(spa, SCL_NONE);
3094
3095         if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
3096                 return (spa_vdev_state_exit(spa, NULL, ENODEV));
3097
3098         if (!vd->vdev_ops->vdev_op_leaf)
3099                 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
3100
3101         tvd = vd->vdev_top;
3102
3103         /*
3104          * We don't directly use the aux state here, but if we do a
3105          * vdev_reopen(), we need this value to be present to remember why we
3106          * were faulted.
3107          */
3108         vd->vdev_label_aux = aux;
3109
3110         /*
3111          * Faulted state takes precedence over degraded.
3112          */
3113         vd->vdev_delayed_close = B_FALSE;
3114         vd->vdev_faulted = 1ULL;
3115         vd->vdev_degraded = 0ULL;
3116         vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, aux);
3117
3118         /*
3119          * If this device has the only valid copy of the data, then
3120          * back off and simply mark the vdev as degraded instead.
3121          */
3122         if (!tvd->vdev_islog && vd->vdev_aux == NULL && vdev_dtl_required(vd)) {
3123                 vd->vdev_degraded = 1ULL;
3124                 vd->vdev_faulted = 0ULL;
3125
3126                 /*
3127                  * If we reopen the device and it's not dead, only then do we
3128                  * mark it degraded.
3129                  */
3130                 vdev_reopen(tvd);
3131
3132                 if (vdev_readable(vd))
3133                         vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, aux);
3134         }
3135
3136         return (spa_vdev_state_exit(spa, vd, 0));
3137 }
3138
3139 /*
3140  * Mark the given vdev degraded.  A degraded vdev is purely an indication to the
3141  * user that something is wrong.  The vdev continues to operate as normal as far
3142  * as I/O is concerned.
3143  */
3144 int
3145 vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux)
3146 {
3147         vdev_t *vd;
3148
3149         spa_vdev_state_enter(spa, SCL_NONE);
3150
3151         if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
3152                 return (spa_vdev_state_exit(spa, NULL, ENODEV));
3153
3154         if (!vd->vdev_ops->vdev_op_leaf)
3155                 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
3156
3157         /*
3158          * If the vdev is already faulted, then don't do anything.
3159          */
3160         if (vd->vdev_faulted || vd->vdev_degraded)
3161                 return (spa_vdev_state_exit(spa, NULL, 0));
3162
3163         vd->vdev_degraded = 1ULL;
3164         if (!vdev_is_dead(vd))
3165                 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED,
3166                     aux);
3167
3168         return (spa_vdev_state_exit(spa, vd, 0));
3169 }
3170
3171 /*
3172  * Online the given vdev.
3173  *
3174  * If 'ZFS_ONLINE_UNSPARE' is set, it implies two things.  First, any attached
3175  * spare device should be detached when the device finishes resilvering.
3176  * Second, the online should be treated like a 'test' online case, so no FMA
3177  * events are generated if the device fails to open.
3178  */
3179 int
3180 vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate)
3181 {
3182         vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev;
3183         boolean_t wasoffline;
3184         vdev_state_t oldstate;
3185
3186         spa_vdev_state_enter(spa, SCL_NONE);
3187
3188         if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
3189                 return (spa_vdev_state_exit(spa, NULL, ENODEV));
3190
3191         if (!vd->vdev_ops->vdev_op_leaf)
3192                 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
3193
3194         wasoffline = (vd->vdev_offline || vd->vdev_tmpoffline);
3195         oldstate = vd->vdev_state;
3196
3197         tvd = vd->vdev_top;
3198         vd->vdev_offline = B_FALSE;
3199         vd->vdev_tmpoffline = B_FALSE;
3200         vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE);
3201         vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT);
3202
3203         /* XXX - L2ARC 1.0 does not support expansion */
3204         if (!vd->vdev_aux) {
3205                 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
3206                         pvd->vdev_expanding = !!(flags & ZFS_ONLINE_EXPAND);
3207         }
3208
3209         vdev_reopen(tvd);
3210         vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE;
3211
3212         if (!vd->vdev_aux) {
3213                 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
3214                         pvd->vdev_expanding = B_FALSE;
3215         }
3216
3217         if (newstate)
3218                 *newstate = vd->vdev_state;
3219         if ((flags & ZFS_ONLINE_UNSPARE) &&
3220             !vdev_is_dead(vd) && vd->vdev_parent &&
3221             vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
3222             vd->vdev_parent->vdev_child[0] == vd)
3223                 vd->vdev_unspare = B_TRUE;
3224
3225         if ((flags & ZFS_ONLINE_EXPAND) || spa->spa_autoexpand) {
3226
3227                 /* XXX - L2ARC 1.0 does not support expansion */
3228                 if (vd->vdev_aux)
3229                         return (spa_vdev_state_exit(spa, vd, ENOTSUP));
3230                 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
3231         }
3232
3233         if (wasoffline ||
3234             (oldstate < VDEV_STATE_DEGRADED &&
3235             vd->vdev_state >= VDEV_STATE_DEGRADED))
3236                 spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_ONLINE);
3237
3238         return (spa_vdev_state_exit(spa, vd, 0));
3239 }
3240
3241 static int
3242 vdev_offline_locked(spa_t *spa, uint64_t guid, uint64_t flags)
3243 {
3244         vdev_t *vd, *tvd;
3245         int error = 0;
3246         uint64_t generation;
3247         metaslab_group_t *mg;
3248
3249 top:
3250         spa_vdev_state_enter(spa, SCL_ALLOC);
3251
3252         if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
3253                 return (spa_vdev_state_exit(spa, NULL, ENODEV));
3254
3255         if (!vd->vdev_ops->vdev_op_leaf)
3256                 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
3257
3258         tvd = vd->vdev_top;
3259         mg = tvd->vdev_mg;
3260         generation = spa->spa_config_generation + 1;
3261
3262         /*
3263          * If the device isn't already offline, try to offline it.
3264          */
3265         if (!vd->vdev_offline) {
3266                 /*
3267                  * If this device has the only valid copy of some data,
3268                  * don't allow it to be offlined. Log devices are always
3269                  * expendable.
3270                  */
3271                 if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
3272                     vdev_dtl_required(vd))
3273                         return (spa_vdev_state_exit(spa, NULL, EBUSY));
3274
3275                 /*
3276                  * If the top-level is a slog and it has had allocations
3277                  * then proceed.  We check that the vdev's metaslab group
3278                  * is not NULL since it's possible that we may have just
3279                  * added this vdev but not yet initialized its metaslabs.
3280                  */
3281                 if (tvd->vdev_islog && mg != NULL) {
3282                         /*
3283                          * Prevent any future allocations.
3284                          */
3285                         metaslab_group_passivate(mg);
3286                         (void) spa_vdev_state_exit(spa, vd, 0);
3287
3288                         error = spa_reset_logs(spa);
3289
3290                         /*
3291                          * If the log device was successfully reset but has
3292                          * checkpointed data, do not offline it.
3293                          */
3294                         if (error == 0 &&
3295                             tvd->vdev_checkpoint_sm != NULL) {
3296                                 ASSERT3U(tvd->vdev_checkpoint_sm->sm_alloc,
3297                                     !=, 0);
3298                                 error = ZFS_ERR_CHECKPOINT_EXISTS;
3299                         }
3300
3301                         spa_vdev_state_enter(spa, SCL_ALLOC);
3302
3303                         /*
3304                          * Check to see if the config has changed.
3305                          */
3306                         if (error || generation != spa->spa_config_generation) {
3307                                 metaslab_group_activate(mg);
3308                                 if (error)
3309                                         return (spa_vdev_state_exit(spa,
3310                                             vd, error));
3311                                 (void) spa_vdev_state_exit(spa, vd, 0);
3312                                 goto top;
3313                         }
3314                         ASSERT0(tvd->vdev_stat.vs_alloc);
3315                 }
3316
3317                 /*
3318                  * Offline this device and reopen its top-level vdev.
3319                  * If the top-level vdev is a log device then just offline
3320                  * it. Otherwise, if this action results in the top-level
3321                  * vdev becoming unusable, undo it and fail the request.
3322                  */
3323                 vd->vdev_offline = B_TRUE;
3324                 vdev_reopen(tvd);
3325
3326                 if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
3327                     vdev_is_dead(tvd)) {
3328                         vd->vdev_offline = B_FALSE;
3329                         vdev_reopen(tvd);
3330                         return (spa_vdev_state_exit(spa, NULL, EBUSY));
3331                 }
3332
3333                 /*
3334                  * Add the device back into the metaslab rotor so that
3335                  * once we online the device it's open for business.
3336                  */
3337                 if (tvd->vdev_islog && mg != NULL)
3338                         metaslab_group_activate(mg);
3339         }
3340
3341         vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY);
3342
3343         return (spa_vdev_state_exit(spa, vd, 0));
3344 }
3345
3346 int
3347 vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags)
3348 {
3349         int error;
3350
3351         mutex_enter(&spa->spa_vdev_top_lock);
3352         error = vdev_offline_locked(spa, guid, flags);
3353         mutex_exit(&spa->spa_vdev_top_lock);
3354
3355         return (error);
3356 }
3357
3358 /*
3359  * Clear the error counts associated with this vdev.  Unlike vdev_online() and
3360  * vdev_offline(), we assume the spa config is locked.  We also clear all
3361  * children.  If 'vd' is NULL, then the user wants to clear all vdevs.
3362  */
3363 void
3364 vdev_clear(spa_t *spa, vdev_t *vd)
3365 {
3366         vdev_t *rvd = spa->spa_root_vdev;
3367
3368         ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
3369
3370         if (vd == NULL)
3371                 vd = rvd;
3372
3373         vd->vdev_stat.vs_read_errors = 0;
3374         vd->vdev_stat.vs_write_errors = 0;
3375         vd->vdev_stat.vs_checksum_errors = 0;
3376
3377         for (int c = 0; c < vd->vdev_children; c++)
3378                 vdev_clear(spa, vd->vdev_child[c]);
3379
3380         if (vd == rvd) {
3381                 for (int c = 0; c < spa->spa_l2cache.sav_count; c++)
3382                         vdev_clear(spa, spa->spa_l2cache.sav_vdevs[c]);
3383
3384                 for (int c = 0; c < spa->spa_spares.sav_count; c++)
3385                         vdev_clear(spa, spa->spa_spares.sav_vdevs[c]);
3386         }
3387
3388         /*
3389          * It makes no sense to "clear" an indirect vdev.
3390          */
3391         if (!vdev_is_concrete(vd))
3392                 return;
3393
3394         /*
3395          * If we're in the FAULTED state or have experienced failed I/O, then
3396          * clear the persistent state and attempt to reopen the device.  We
3397          * also mark the vdev config dirty, so that the new faulted state is
3398          * written out to disk.
3399          */
3400         if (vd->vdev_faulted || vd->vdev_degraded ||
3401             !vdev_readable(vd) || !vdev_writeable(vd)) {
3402
3403                 /*
3404                  * When reopening in reponse to a clear event, it may be due to
3405                  * a fmadm repair request.  In this case, if the device is
3406                  * still broken, we want to still post the ereport again.
3407                  */
3408                 vd->vdev_forcefault = B_TRUE;
3409
3410                 vd->vdev_faulted = vd->vdev_degraded = 0ULL;
3411                 vd->vdev_cant_read = B_FALSE;
3412                 vd->vdev_cant_write = B_FALSE;
3413
3414                 vdev_reopen(vd == rvd ? rvd : vd->vdev_top);
3415
3416                 vd->vdev_forcefault = B_FALSE;
3417
3418                 if (vd != rvd && vdev_writeable(vd->vdev_top))
3419                         vdev_state_dirty(vd->vdev_top);
3420
3421                 if (vd->vdev_aux == NULL && !vdev_is_dead(vd))
3422                         spa_async_request(spa, SPA_ASYNC_RESILVER);
3423
3424                 spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_CLEAR);
3425         }
3426
3427         /*
3428          * When clearing a FMA-diagnosed fault, we always want to
3429          * unspare the device, as we assume that the original spare was
3430          * done in response to the FMA fault.
3431          */
3432         if (!vdev_is_dead(vd) && vd->vdev_parent != NULL &&
3433             vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
3434             vd->vdev_parent->vdev_child[0] == vd)
3435                 vd->vdev_unspare = B_TRUE;
3436 }
3437
3438 boolean_t
3439 vdev_is_dead(vdev_t *vd)
3440 {
3441         /*
3442          * Holes and missing devices are always considered "dead".
3443          * This simplifies the code since we don't have to check for
3444          * these types of devices in the various code paths.
3445          * Instead we rely on the fact that we skip over dead devices
3446          * before issuing I/O to them.
3447          */
3448         return (vd->vdev_state < VDEV_STATE_DEGRADED ||
3449             vd->vdev_ops == &vdev_hole_ops ||
3450             vd->vdev_ops == &vdev_missing_ops);
3451 }
3452
3453 boolean_t
3454 vdev_readable(vdev_t *vd)
3455 {
3456         return (!vdev_is_dead(vd) && !vd->vdev_cant_read);
3457 }
3458
3459 boolean_t
3460 vdev_writeable(vdev_t *vd)
3461 {
3462         return (!vdev_is_dead(vd) && !vd->vdev_cant_write &&
3463             vdev_is_concrete(vd));
3464 }
3465
3466 boolean_t
3467 vdev_allocatable(vdev_t *vd)
3468 {
3469         uint64_t state = vd->vdev_state;
3470
3471         /*
3472          * We currently allow allocations from vdevs which may be in the
3473          * process of reopening (i.e. VDEV_STATE_CLOSED). If the device
3474          * fails to reopen then we'll catch it later when we're holding
3475          * the proper locks.  Note that we have to get the vdev state
3476          * in a local variable because although it changes atomically,
3477          * we're asking two separate questions about it.
3478          */
3479         return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) &&
3480             !vd->vdev_cant_write && vdev_is_concrete(vd) &&
3481             vd->vdev_mg->mg_initialized);
3482 }
3483
3484 boolean_t
3485 vdev_accessible(vdev_t *vd, zio_t *zio)
3486 {
3487         ASSERT(zio->io_vd == vd);
3488
3489         if (vdev_is_dead(vd) || vd->vdev_remove_wanted)
3490                 return (B_FALSE);
3491
3492         if (zio->io_type == ZIO_TYPE_READ)
3493                 return (!vd->vdev_cant_read);
3494
3495         if (zio->io_type == ZIO_TYPE_WRITE)
3496                 return (!vd->vdev_cant_write);
3497
3498         return (B_TRUE);
3499 }
3500
3501 boolean_t
3502 vdev_is_spacemap_addressable(vdev_t *vd)
3503 {
3504         /*
3505          * Assuming 47 bits of the space map entry dedicated for the entry's
3506          * offset (see description in space_map.h), we calculate the maximum
3507          * address that can be described by a space map entry for the given
3508          * device.
3509          */
3510         uint64_t shift = vd->vdev_ashift + 47;
3511
3512         if (shift >= 63) /* detect potential overflow */
3513                 return (B_TRUE);
3514
3515         return (vd->vdev_asize < (1ULL << shift));
3516 }
3517
3518 /*
3519  * Get statistics for the given vdev.
3520  */
3521 void
3522 vdev_get_stats(vdev_t *vd, vdev_stat_t *vs)
3523 {
3524         spa_t *spa = vd->vdev_spa;
3525         vdev_t *rvd = spa->spa_root_vdev;
3526         vdev_t *tvd = vd->vdev_top;
3527
3528         ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
3529
3530         mutex_enter(&vd->vdev_stat_lock);
3531         bcopy(&vd->vdev_stat, vs, sizeof (*vs));
3532         vs->vs_timestamp = gethrtime() - vs->vs_timestamp;
3533         vs->vs_state = vd->vdev_state;
3534         vs->vs_rsize = vdev_get_min_asize(vd);
3535         if (vd->vdev_ops->vdev_op_leaf)
3536                 vs->vs_rsize += VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
3537         /*
3538          * Report expandable space on top-level, non-auxillary devices only.
3539          * The expandable space is reported in terms of metaslab sized units
3540          * since that determines how much space the pool can expand.
3541          */
3542         if (vd->vdev_aux == NULL && tvd != NULL && vd->vdev_max_asize != 0) {
3543                 vs->vs_esize = P2ALIGN(vd->vdev_max_asize - vd->vdev_asize -
3544                     spa->spa_bootsize, 1ULL << tvd->vdev_ms_shift);
3545         }
3546         vs->vs_configured_ashift = vd->vdev_top != NULL
3547             ? vd->vdev_top->vdev_ashift : vd->vdev_ashift;
3548         vs->vs_logical_ashift = vd->vdev_logical_ashift;
3549         vs->vs_physical_ashift = vd->vdev_physical_ashift;
3550         if (vd->vdev_aux == NULL && vd == vd->vdev_top &&
3551             vdev_is_concrete(vd)) {
3552                 vs->vs_fragmentation = vd->vdev_mg->mg_fragmentation;
3553         }
3554
3555         /*
3556          * If we're getting stats on the root vdev, aggregate the I/O counts
3557          * over all top-level vdevs (i.e. the direct children of the root).
3558          */
3559         if (vd == rvd) {
3560                 for (int c = 0; c < rvd->vdev_children; c++) {
3561                         vdev_t *cvd = rvd->vdev_child[c];
3562                         vdev_stat_t *cvs = &cvd->vdev_stat;
3563
3564                         for (int t = 0; t < ZIO_TYPES; t++) {
3565                                 vs->vs_ops[t] += cvs->vs_ops[t];
3566                                 vs->vs_bytes[t] += cvs->vs_bytes[t];
3567                         }
3568                         cvs->vs_scan_removing = cvd->vdev_removing;
3569                 }
3570         }
3571         mutex_exit(&vd->vdev_stat_lock);
3572 }
3573
3574 void
3575 vdev_clear_stats(vdev_t *vd)
3576 {
3577         mutex_enter(&vd->vdev_stat_lock);
3578         vd->vdev_stat.vs_space = 0;
3579         vd->vdev_stat.vs_dspace = 0;
3580         vd->vdev_stat.vs_alloc = 0;
3581         mutex_exit(&vd->vdev_stat_lock);
3582 }
3583
3584 void
3585 vdev_scan_stat_init(vdev_t *vd)
3586 {
3587         vdev_stat_t *vs = &vd->vdev_stat;
3588
3589         for (int c = 0; c < vd->vdev_children; c++)
3590                 vdev_scan_stat_init(vd->vdev_child[c]);
3591
3592         mutex_enter(&vd->vdev_stat_lock);
3593         vs->vs_scan_processed = 0;
3594         mutex_exit(&vd->vdev_stat_lock);
3595 }
3596
3597 void
3598 vdev_stat_update(zio_t *zio, uint64_t psize)
3599 {
3600         spa_t *spa = zio->io_spa;
3601         vdev_t *rvd = spa->spa_root_vdev;
3602         vdev_t *vd = zio->io_vd ? zio->io_vd : rvd;
3603         vdev_t *pvd;
3604         uint64_t txg = zio->io_txg;
3605         vdev_stat_t *vs = &vd->vdev_stat;
3606         zio_type_t type = zio->io_type;
3607         int flags = zio->io_flags;
3608
3609         /*
3610          * If this i/o is a gang leader, it didn't do any actual work.
3611          */
3612         if (zio->io_gang_tree)
3613                 return;
3614
3615         if (zio->io_error == 0) {
3616                 /*
3617                  * If this is a root i/o, don't count it -- we've already
3618                  * counted the top-level vdevs, and vdev_get_stats() will
3619                  * aggregate them when asked.  This reduces contention on
3620                  * the root vdev_stat_lock and implicitly handles blocks
3621                  * that compress away to holes, for which there is no i/o.
3622                  * (Holes never create vdev children, so all the counters
3623                  * remain zero, which is what we want.)
3624                  *
3625                  * Note: this only applies to successful i/o (io_error == 0)
3626                  * because unlike i/o counts, errors are not additive.
3627                  * When reading a ditto block, for example, failure of
3628                  * one top-level vdev does not imply a root-level error.
3629                  */
3630                 if (vd == rvd)
3631                         return;
3632
3633                 ASSERT(vd == zio->io_vd);
3634
3635                 if (flags & ZIO_FLAG_IO_BYPASS)
3636                         return;
3637
3638                 mutex_enter(&vd->vdev_stat_lock);
3639
3640                 if (flags & ZIO_FLAG_IO_REPAIR) {
3641                         if (flags & ZIO_FLAG_SCAN_THREAD) {
3642                                 dsl_scan_phys_t *scn_phys =
3643                                     &spa->spa_dsl_pool->dp_scan->scn_phys;
3644                                 uint64_t *processed = &scn_phys->scn_processed;
3645
3646                                 /* XXX cleanup? */
3647                                 if (vd->vdev_ops->vdev_op_leaf)
3648                                         atomic_add_64(processed, psize);
3649                                 vs->vs_scan_processed += psize;
3650                         }
3651
3652                         if (flags & ZIO_FLAG_SELF_HEAL)
3653                                 vs->vs_self_healed += psize;
3654                 }
3655
3656                 vs->vs_ops[type]++;
3657                 vs->vs_bytes[type] += psize;
3658
3659                 mutex_exit(&vd->vdev_stat_lock);
3660                 return;
3661         }
3662
3663         if (flags & ZIO_FLAG_SPECULATIVE)
3664                 return;
3665
3666         /*
3667          * If this is an I/O error that is going to be retried, then ignore the
3668          * error.  Otherwise, the user may interpret B_FAILFAST I/O errors as
3669          * hard errors, when in reality they can happen for any number of
3670          * innocuous reasons (bus resets, MPxIO link failure, etc).
3671          */
3672         if (zio->io_error == EIO &&
3673             !(zio->io_flags & ZIO_FLAG_IO_RETRY))
3674                 return;
3675
3676         /*
3677          * Intent logs writes won't propagate their error to the root
3678          * I/O so don't mark these types of failures as pool-level
3679          * errors.
3680          */
3681         if (zio->io_vd == NULL && (zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
3682                 return;
3683
3684         mutex_enter(&vd->vdev_stat_lock);
3685         if (type == ZIO_TYPE_READ && !vdev_is_dead(vd)) {
3686                 if (zio->io_error == ECKSUM)
3687                         vs->vs_checksum_errors++;
3688                 else
3689                         vs->vs_read_errors++;
3690         }
3691         if (type == ZIO_TYPE_WRITE && !vdev_is_dead(vd))
3692                 vs->vs_write_errors++;
3693         mutex_exit(&vd->vdev_stat_lock);
3694
3695         if (spa->spa_load_state == SPA_LOAD_NONE &&
3696             type == ZIO_TYPE_WRITE && txg != 0 &&
3697             (!(flags & ZIO_FLAG_IO_REPAIR) ||
3698             (flags & ZIO_FLAG_SCAN_THREAD) ||
3699             spa->spa_claiming)) {
3700                 /*
3701                  * This is either a normal write (not a repair), or it's
3702                  * a repair induced by the scrub thread, or it's a repair
3703                  * made by zil_claim() during spa_load() in the first txg.
3704                  * In the normal case, we commit the DTL change in the same
3705                  * txg as the block was born.  In the scrub-induced repair
3706                  * case, we know that scrubs run in first-pass syncing context,
3707                  * so we commit the DTL change in spa_syncing_txg(spa).
3708                  * In the zil_claim() case, we commit in spa_first_txg(spa).
3709                  *
3710                  * We currently do not make DTL entries for failed spontaneous
3711                  * self-healing writes triggered by normal (non-scrubbing)
3712                  * reads, because we have no transactional context in which to
3713                  * do so -- and it's not clear that it'd be desirable anyway.
3714                  */
3715                 if (vd->vdev_ops->vdev_op_leaf) {
3716                         uint64_t commit_txg = txg;
3717                         if (flags & ZIO_FLAG_SCAN_THREAD) {
3718                                 ASSERT(flags & ZIO_FLAG_IO_REPAIR);
3719                                 ASSERT(spa_sync_pass(spa) == 1);
3720                                 vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1);
3721                                 commit_txg = spa_syncing_txg(spa);
3722                         } else if (spa->spa_claiming) {
3723                                 ASSERT(flags & ZIO_FLAG_IO_REPAIR);
3724                                 commit_txg = spa_first_txg(spa);
3725                         }
3726                         ASSERT(commit_txg >= spa_syncing_txg(spa));
3727                         if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1))
3728                                 return;
3729                         for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
3730                                 vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1);
3731                         vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg);
3732                 }
3733                 if (vd != rvd)
3734                         vdev_dtl_dirty(vd, DTL_MISSING, txg, 1);
3735         }
3736 }
3737
3738 /*
3739  * Update the in-core space usage stats for this vdev, its metaslab class,
3740  * and the root vdev.
3741  */
3742 void
3743 vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta,
3744     int64_t space_delta)
3745 {
3746         int64_t dspace_delta = space_delta;
3747         spa_t *spa = vd->vdev_spa;
3748         vdev_t *rvd = spa->spa_root_vdev;
3749         metaslab_group_t *mg = vd->vdev_mg;
3750         metaslab_class_t *mc = mg ? mg->mg_class : NULL;
3751
3752         ASSERT(vd == vd->vdev_top);
3753
3754         /*
3755          * Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion
3756          * factor.  We must calculate this here and not at the root vdev
3757          * because the root vdev's psize-to-asize is simply the max of its
3758          * childrens', thus not accurate enough for us.
3759          */
3760         ASSERT((dspace_delta & (SPA_MINBLOCKSIZE-1)) == 0);
3761         ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache);
3762         dspace_delta = (dspace_delta >> SPA_MINBLOCKSHIFT) *
3763             vd->vdev_deflate_ratio;
3764
3765         mutex_enter(&vd->vdev_stat_lock);
3766         vd->vdev_stat.vs_alloc += alloc_delta;
3767         vd->vdev_stat.vs_space += space_delta;
3768         vd->vdev_stat.vs_dspace += dspace_delta;
3769         mutex_exit(&vd->vdev_stat_lock);
3770
3771         if (mc == spa_normal_class(spa)) {
3772                 mutex_enter(&rvd->vdev_stat_lock);
3773                 rvd->vdev_stat.vs_alloc += alloc_delta;
3774                 rvd->vdev_stat.vs_space += space_delta;
3775                 rvd->vdev_stat.vs_dspace += dspace_delta;
3776                 mutex_exit(&rvd->vdev_stat_lock);
3777         }
3778
3779         if (mc != NULL) {
3780                 ASSERT(rvd == vd->vdev_parent);
3781                 ASSERT(vd->vdev_ms_count != 0);
3782
3783                 metaslab_class_space_update(mc,
3784                     alloc_delta, defer_delta, space_delta, dspace_delta);
3785         }
3786 }
3787
3788 /*
3789  * Mark a top-level vdev's config as dirty, placing it on the dirty list
3790  * so that it will be written out next time the vdev configuration is synced.
3791  * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs.
3792  */
3793 void
3794 vdev_config_dirty(vdev_t *vd)
3795 {
3796         spa_t *spa = vd->vdev_spa;
3797         vdev_t *rvd = spa->spa_root_vdev;
3798         int c;
3799
3800         ASSERT(spa_writeable(spa));
3801
3802         /*
3803          * If this is an aux vdev (as with l2cache and spare devices), then we
3804          * update the vdev config manually and set the sync flag.
3805          */
3806         if (vd->vdev_aux != NULL) {
3807                 spa_aux_vdev_t *sav = vd->vdev_aux;
3808                 nvlist_t **aux;
3809                 uint_t naux;
3810
3811                 for (c = 0; c < sav->sav_count; c++) {
3812                         if (sav->sav_vdevs[c] == vd)
3813                                 break;
3814                 }
3815
3816                 if (c == sav->sav_count) {
3817                         /*
3818                          * We're being removed.  There's nothing more to do.
3819                          */
3820                         ASSERT(sav->sav_sync == B_TRUE);
3821                         return;
3822                 }
3823
3824                 sav->sav_sync = B_TRUE;
3825
3826                 if (nvlist_lookup_nvlist_array(sav->sav_config,
3827                     ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) {
3828                         VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
3829                             ZPOOL_CONFIG_SPARES, &aux, &naux) == 0);
3830                 }
3831
3832                 ASSERT(c < naux);
3833
3834                 /*
3835                  * Setting the nvlist in the middle if the array is a little
3836                  * sketchy, but it will work.
3837                  */
3838                 nvlist_free(aux[c]);
3839                 aux[c] = vdev_config_generate(spa, vd, B_TRUE, 0);
3840
3841                 return;
3842         }
3843
3844         /*
3845          * The dirty list is protected by the SCL_CONFIG lock.  The caller
3846          * must either hold SCL_CONFIG as writer, or must be the sync thread
3847          * (which holds SCL_CONFIG as reader).  There's only one sync thread,
3848          * so this is sufficient to ensure mutual exclusion.
3849          */
3850         ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
3851             (dsl_pool_sync_context(spa_get_dsl(spa)) &&
3852             spa_config_held(spa, SCL_CONFIG, RW_READER)));
3853
3854         if (vd == rvd) {
3855                 for (c = 0; c < rvd->vdev_children; c++)
3856                         vdev_config_dirty(rvd->vdev_child[c]);
3857         } else {
3858                 ASSERT(vd == vd->vdev_top);
3859
3860                 if (!list_link_active(&vd->vdev_config_dirty_node) &&
3861                     vdev_is_concrete(vd)) {
3862                         list_insert_head(&spa->spa_config_dirty_list, vd);
3863                 }
3864         }
3865 }
3866
3867 void
3868 vdev_config_clean(vdev_t *vd)
3869 {
3870         spa_t *spa = vd->vdev_spa;
3871
3872         ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
3873             (dsl_pool_sync_context(spa_get_dsl(spa)) &&
3874             spa_config_held(spa, SCL_CONFIG, RW_READER)));
3875
3876         ASSERT(list_link_active(&vd->vdev_config_dirty_node));
3877         list_remove(&spa->spa_config_dirty_list, vd);
3878 }
3879
3880 /*
3881  * Mark a top-level vdev's state as dirty, so that the next pass of
3882  * spa_sync() can convert this into vdev_config_dirty().  We distinguish
3883  * the state changes from larger config changes because they require
3884  * much less locking, and are often needed for administrative actions.
3885  */
3886 void
3887 vdev_state_dirty(vdev_t *vd)
3888 {
3889         spa_t *spa = vd->vdev_spa;
3890
3891         ASSERT(spa_writeable(spa));
3892         ASSERT(vd == vd->vdev_top);
3893
3894         /*
3895          * The state list is protected by the SCL_STATE lock.  The caller
3896          * must either hold SCL_STATE as writer, or must be the sync thread
3897          * (which holds SCL_STATE as reader).  There's only one sync thread,
3898          * so this is sufficient to ensure mutual exclusion.
3899          */
3900         ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
3901             (dsl_pool_sync_context(spa_get_dsl(spa)) &&
3902             spa_config_held(spa, SCL_STATE, RW_READER)));
3903
3904         if (!list_link_active(&vd->vdev_state_dirty_node) &&
3905             vdev_is_concrete(vd))
3906                 list_insert_head(&spa->spa_state_dirty_list, vd);
3907 }
3908
3909 void
3910 vdev_state_clean(vdev_t *vd)
3911 {
3912         spa_t *spa = vd->vdev_spa;
3913
3914         ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
3915             (dsl_pool_sync_context(spa_get_dsl(spa)) &&
3916             spa_config_held(spa, SCL_STATE, RW_READER)));
3917
3918         ASSERT(list_link_active(&vd->vdev_state_dirty_node));
3919         list_remove(&spa->spa_state_dirty_list, vd);
3920 }
3921
3922 /*
3923  * Propagate vdev state up from children to parent.
3924  */
3925 void
3926 vdev_propagate_state(vdev_t *vd)
3927 {
3928         spa_t *spa = vd->vdev_spa;
3929         vdev_t *rvd = spa->spa_root_vdev;
3930         int degraded = 0, faulted = 0;
3931         int corrupted = 0;
3932         vdev_t *child;
3933
3934         if (vd->vdev_children > 0) {
3935                 for (int c = 0; c < vd->vdev_children; c++) {
3936                         child = vd->vdev_child[c];
3937
3938                         /*
3939                          * Don't factor holes or indirect vdevs into the
3940                          * decision.
3941                          */
3942                         if (!vdev_is_concrete(child))
3943                                 continue;
3944
3945                         if (!vdev_readable(child) ||
3946                             (!vdev_writeable(child) && spa_writeable(spa))) {
3947                                 /*
3948                                  * Root special: if there is a top-level log
3949                                  * device, treat the root vdev as if it were
3950                                  * degraded.
3951                                  */
3952                                 if (child->vdev_islog && vd == rvd)
3953                                         degraded++;
3954                                 else
3955                                         faulted++;
3956                         } else if (child->vdev_state <= VDEV_STATE_DEGRADED) {
3957                                 degraded++;
3958                         }
3959
3960                         if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA)
3961                                 corrupted++;
3962                 }
3963
3964                 vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded);
3965
3966                 /*
3967                  * Root special: if there is a top-level vdev that cannot be
3968                  * opened due to corrupted metadata, then propagate the root
3969                  * vdev's aux state as 'corrupt' rather than 'insufficient
3970                  * replicas'.
3971                  */
3972                 if (corrupted && vd == rvd &&
3973                     rvd->vdev_state == VDEV_STATE_CANT_OPEN)
3974                         vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN,
3975                             VDEV_AUX_CORRUPT_DATA);
3976         }
3977
3978         if (vd->vdev_parent)
3979                 vdev_propagate_state(vd->vdev_parent);
3980 }
3981
3982 /*
3983  * Set a vdev's state.  If this is during an open, we don't update the parent
3984  * state, because we're in the process of opening children depth-first.
3985  * Otherwise, we propagate the change to the parent.
3986  *
3987  * If this routine places a device in a faulted state, an appropriate ereport is
3988  * generated.
3989  */
3990 void
3991 vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux)
3992 {
3993         uint64_t save_state;
3994         spa_t *spa = vd->vdev_spa;
3995
3996         if (state == vd->vdev_state) {
3997                 vd->vdev_stat.vs_aux = aux;
3998                 return;
3999         }
4000
4001         save_state = vd->vdev_state;
4002
4003         vd->vdev_state = state;
4004         vd->vdev_stat.vs_aux = aux;
4005
4006         /*
4007          * If we are setting the vdev state to anything but an open state, then
4008          * always close the underlying device unless the device has requested
4009          * a delayed close (i.e. we're about to remove or fault the device).
4010          * Otherwise, we keep accessible but invalid devices open forever.
4011          * We don't call vdev_close() itself, because that implies some extra
4012          * checks (offline, etc) that we don't want here.  This is limited to
4013          * leaf devices, because otherwise closing the device will affect other
4014          * children.
4015          */
4016         if (!vd->vdev_delayed_close && vdev_is_dead(vd) &&
4017             vd->vdev_ops->vdev_op_leaf)
4018                 vd->vdev_ops->vdev_op_close(vd);
4019
4020         if (vd->vdev_removed &&
4021             state == VDEV_STATE_CANT_OPEN &&
4022             (aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) {
4023                 /*
4024                  * If the previous state is set to VDEV_STATE_REMOVED, then this
4025                  * device was previously marked removed and someone attempted to
4026                  * reopen it.  If this failed due to a nonexistent device, then
4027                  * keep the device in the REMOVED state.  We also let this be if
4028                  * it is one of our special test online cases, which is only
4029                  * attempting to online the device and shouldn't generate an FMA
4030                  * fault.
4031                  */
4032                 vd->vdev_state = VDEV_STATE_REMOVED;
4033                 vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
4034         } else if (state == VDEV_STATE_REMOVED) {
4035                 vd->vdev_removed = B_TRUE;
4036         } else if (state == VDEV_STATE_CANT_OPEN) {
4037                 /*
4038                  * If we fail to open a vdev during an import or recovery, we
4039                  * mark it as "not available", which signifies that it was
4040                  * never there to begin with.  Failure to open such a device
4041                  * is not considered an error.
4042                  */
4043                 if ((spa_load_state(spa) == SPA_LOAD_IMPORT ||
4044                     spa_load_state(spa) == SPA_LOAD_RECOVER) &&
4045                     vd->vdev_ops->vdev_op_leaf)
4046                         vd->vdev_not_present = 1;
4047
4048                 /*
4049                  * Post the appropriate ereport.  If the 'prevstate' field is
4050                  * set to something other than VDEV_STATE_UNKNOWN, it indicates
4051                  * that this is part of a vdev_reopen().  In this case, we don't
4052                  * want to post the ereport if the device was already in the
4053                  * CANT_OPEN state beforehand.
4054                  *
4055                  * If the 'checkremove' flag is set, then this is an attempt to
4056                  * online the device in response to an insertion event.  If we
4057                  * hit this case, then we have detected an insertion event for a
4058                  * faulted or offline device that wasn't in the removed state.
4059                  * In this scenario, we don't post an ereport because we are
4060                  * about to replace the device, or attempt an online with
4061                  * vdev_forcefault, which will generate the fault for us.
4062                  */
4063                 if ((vd->vdev_prevstate != state || vd->vdev_forcefault) &&
4064                     !vd->vdev_not_present && !vd->vdev_checkremove &&
4065                     vd != spa->spa_root_vdev) {
4066                         const char *class;
4067
4068                         switch (aux) {
4069                         case VDEV_AUX_OPEN_FAILED:
4070                                 class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED;
4071                                 break;
4072                         case VDEV_AUX_CORRUPT_DATA:
4073                                 class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA;
4074                                 break;
4075                         case VDEV_AUX_NO_REPLICAS:
4076                                 class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS;
4077                                 break;
4078                         case VDEV_AUX_BAD_GUID_SUM:
4079                                 class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM;
4080                                 break;
4081                         case VDEV_AUX_TOO_SMALL:
4082                                 class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL;
4083                                 break;
4084                         case VDEV_AUX_BAD_LABEL:
4085                                 class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL;
4086                                 break;
4087                         default:
4088                                 class = FM_EREPORT_ZFS_DEVICE_UNKNOWN;
4089                         }
4090
4091                         zfs_ereport_post(class, spa, vd, NULL, save_state, 0);
4092                 }
4093
4094                 /* Erase any notion of persistent removed state */
4095                 vd->vdev_removed = B_FALSE;
4096         } else {
4097                 vd->vdev_removed = B_FALSE;
4098         }
4099
4100         /*
4101         * Notify the fmd of the state change.  Be verbose and post
4102         * notifications even for stuff that's not important; the fmd agent can
4103         * sort it out.  Don't emit state change events for non-leaf vdevs since
4104         * they can't change state on their own.  The FMD can check their state
4105         * if it wants to when it sees that a leaf vdev had a state change.
4106         */
4107         if (vd->vdev_ops->vdev_op_leaf)
4108                 zfs_post_state_change(spa, vd);
4109
4110         if (!isopen && vd->vdev_parent)
4111                 vdev_propagate_state(vd->vdev_parent);
4112 }
4113
4114 boolean_t
4115 vdev_children_are_offline(vdev_t *vd)
4116 {
4117         ASSERT(!vd->vdev_ops->vdev_op_leaf);
4118
4119         for (uint64_t i = 0; i < vd->vdev_children; i++) {
4120                 if (vd->vdev_child[i]->vdev_state != VDEV_STATE_OFFLINE)
4121                         return (B_FALSE);
4122         }
4123
4124         return (B_TRUE);
4125 }
4126
4127 /*
4128  * Check the vdev configuration to ensure that it's capable of supporting
4129  * a root pool. We do not support partial configuration.
4130  * In addition, only a single top-level vdev is allowed.
4131  *
4132  * FreeBSD does not have above limitations.
4133  */
4134 boolean_t
4135 vdev_is_bootable(vdev_t *vd)
4136 {
4137 #ifdef illumos
4138         if (!vd->vdev_ops->vdev_op_leaf) {
4139                 char *vdev_type = vd->vdev_ops->vdev_op_type;
4140
4141                 if (strcmp(vdev_type, VDEV_TYPE_ROOT) == 0 &&
4142                     vd->vdev_children > 1) {
4143                         return (B_FALSE);
4144                 } else if (strcmp(vdev_type, VDEV_TYPE_MISSING) == 0 ||
4145                     strcmp(vdev_type, VDEV_TYPE_INDIRECT) == 0) {
4146                         return (B_FALSE);
4147                 }
4148         }
4149
4150         for (int c = 0; c < vd->vdev_children; c++) {
4151                 if (!vdev_is_bootable(vd->vdev_child[c]))
4152                         return (B_FALSE);
4153         }
4154 #endif  /* illumos */
4155         return (B_TRUE);
4156 }
4157
4158 boolean_t
4159 vdev_is_concrete(vdev_t *vd)
4160 {
4161         vdev_ops_t *ops = vd->vdev_ops;
4162         if (ops == &vdev_indirect_ops || ops == &vdev_hole_ops ||
4163             ops == &vdev_missing_ops || ops == &vdev_root_ops) {
4164                 return (B_FALSE);
4165         } else {
4166                 return (B_TRUE);
4167         }
4168 }
4169
4170 /*
4171  * Determine if a log device has valid content.  If the vdev was
4172  * removed or faulted in the MOS config then we know that
4173  * the content on the log device has already been written to the pool.
4174  */
4175 boolean_t
4176 vdev_log_state_valid(vdev_t *vd)
4177 {
4178         if (vd->vdev_ops->vdev_op_leaf && !vd->vdev_faulted &&
4179             !vd->vdev_removed)
4180                 return (B_TRUE);
4181
4182         for (int c = 0; c < vd->vdev_children; c++)
4183                 if (vdev_log_state_valid(vd->vdev_child[c]))
4184                         return (B_TRUE);
4185
4186         return (B_FALSE);
4187 }
4188
4189 /*
4190  * Expand a vdev if possible.
4191  */
4192 void
4193 vdev_expand(vdev_t *vd, uint64_t txg)
4194 {
4195         ASSERT(vd->vdev_top == vd);
4196         ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
4197
4198         vdev_set_deflate_ratio(vd);
4199
4200         if ((vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count &&
4201             vdev_is_concrete(vd)) {
4202                 VERIFY(vdev_metaslab_init(vd, txg) == 0);
4203                 vdev_config_dirty(vd);
4204         }
4205 }
4206
4207 /*
4208  * Split a vdev.
4209  */
4210 void
4211 vdev_split(vdev_t *vd)
4212 {
4213         vdev_t *cvd, *pvd = vd->vdev_parent;
4214
4215         vdev_remove_child(pvd, vd);
4216         vdev_compact_children(pvd);
4217
4218         cvd = pvd->vdev_child[0];
4219         if (pvd->vdev_children == 1) {
4220                 vdev_remove_parent(cvd);
4221                 cvd->vdev_splitting = B_TRUE;
4222         }
4223         vdev_propagate_state(cvd);
4224 }
4225
4226 void
4227 vdev_deadman(vdev_t *vd)
4228 {
4229         for (int c = 0; c < vd->vdev_children; c++) {
4230                 vdev_t *cvd = vd->vdev_child[c];
4231
4232                 vdev_deadman(cvd);
4233         }
4234
4235         if (vd->vdev_ops->vdev_op_leaf) {
4236                 vdev_queue_t *vq = &vd->vdev_queue;
4237
4238                 mutex_enter(&vq->vq_lock);
4239                 if (avl_numnodes(&vq->vq_active_tree) > 0) {
4240                         spa_t *spa = vd->vdev_spa;
4241                         zio_t *fio;
4242                         uint64_t delta;
4243
4244                         /*
4245                          * Look at the head of all the pending queues,
4246                          * if any I/O has been outstanding for longer than
4247                          * the spa_deadman_synctime we panic the system.
4248                          */
4249                         fio = avl_first(&vq->vq_active_tree);
4250                         delta = gethrtime() - fio->io_timestamp;
4251                         if (delta > spa_deadman_synctime(spa)) {
4252                                 vdev_dbgmsg(vd, "SLOW IO: zio timestamp "
4253                                     "%lluns, delta %lluns, last io %lluns",
4254                                     fio->io_timestamp, (u_longlong_t)delta,
4255                                     vq->vq_io_complete_ts);
4256                                 fm_panic("I/O to pool '%s' appears to be "
4257                                     "hung on vdev guid %llu at '%s'.",
4258                                     spa_name(spa),
4259                                     (long long unsigned int) vd->vdev_guid,
4260                                     vd->vdev_path);
4261                         }
4262                 }
4263                 mutex_exit(&vq->vq_lock);
4264         }
4265 }