]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/contrib/openzfs/module/zfs/vdev_initialize.c
ssh: Update to OpenSSH 9.5p1
[FreeBSD/FreeBSD.git] / sys / contrib / openzfs / module / zfs / vdev_initialize.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or https://opensource.org/licenses/CDDL-1.0.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21
22 /*
23  * Copyright (c) 2016, 2019 by Delphix. All rights reserved.
24  */
25
26 #include <sys/spa.h>
27 #include <sys/spa_impl.h>
28 #include <sys/txg.h>
29 #include <sys/vdev_impl.h>
30 #include <sys/metaslab_impl.h>
31 #include <sys/dsl_synctask.h>
32 #include <sys/zap.h>
33 #include <sys/dmu_tx.h>
34 #include <sys/vdev_initialize.h>
35
36 /*
37  * Value that is written to disk during initialization.
38  */
39 static uint64_t zfs_initialize_value = 0xdeadbeefdeadbeeeULL;
40
41 /* maximum number of I/Os outstanding per leaf vdev */
42 static const int zfs_initialize_limit = 1;
43
44 /* size of initializing writes; default 1MiB, see zfs_remove_max_segment */
45 static uint64_t zfs_initialize_chunk_size = 1024 * 1024;
46
47 static boolean_t
48 vdev_initialize_should_stop(vdev_t *vd)
49 {
50         return (vd->vdev_initialize_exit_wanted || !vdev_writeable(vd) ||
51             vd->vdev_detached || vd->vdev_top->vdev_removing);
52 }
53
54 static void
55 vdev_initialize_zap_update_sync(void *arg, dmu_tx_t *tx)
56 {
57         /*
58          * We pass in the guid instead of the vdev_t since the vdev may
59          * have been freed prior to the sync task being processed. This
60          * happens when a vdev is detached as we call spa_config_vdev_exit(),
61          * stop the initializing thread, schedule the sync task, and free
62          * the vdev. Later when the scheduled sync task is invoked, it would
63          * find that the vdev has been freed.
64          */
65         uint64_t guid = *(uint64_t *)arg;
66         uint64_t txg = dmu_tx_get_txg(tx);
67         kmem_free(arg, sizeof (uint64_t));
68
69         vdev_t *vd = spa_lookup_by_guid(tx->tx_pool->dp_spa, guid, B_FALSE);
70         if (vd == NULL || vd->vdev_top->vdev_removing || !vdev_is_concrete(vd))
71                 return;
72
73         uint64_t last_offset = vd->vdev_initialize_offset[txg & TXG_MASK];
74         vd->vdev_initialize_offset[txg & TXG_MASK] = 0;
75
76         VERIFY(vd->vdev_leaf_zap != 0);
77
78         objset_t *mos = vd->vdev_spa->spa_meta_objset;
79
80         if (last_offset > 0) {
81                 vd->vdev_initialize_last_offset = last_offset;
82                 VERIFY0(zap_update(mos, vd->vdev_leaf_zap,
83                     VDEV_LEAF_ZAP_INITIALIZE_LAST_OFFSET,
84                     sizeof (last_offset), 1, &last_offset, tx));
85         }
86         if (vd->vdev_initialize_action_time > 0) {
87                 uint64_t val = (uint64_t)vd->vdev_initialize_action_time;
88                 VERIFY0(zap_update(mos, vd->vdev_leaf_zap,
89                     VDEV_LEAF_ZAP_INITIALIZE_ACTION_TIME, sizeof (val),
90                     1, &val, tx));
91         }
92
93         uint64_t initialize_state = vd->vdev_initialize_state;
94         VERIFY0(zap_update(mos, vd->vdev_leaf_zap,
95             VDEV_LEAF_ZAP_INITIALIZE_STATE, sizeof (initialize_state), 1,
96             &initialize_state, tx));
97 }
98
99 static void
100 vdev_initialize_zap_remove_sync(void *arg, dmu_tx_t *tx)
101 {
102         uint64_t guid = *(uint64_t *)arg;
103
104         kmem_free(arg, sizeof (uint64_t));
105
106         vdev_t *vd = spa_lookup_by_guid(tx->tx_pool->dp_spa, guid, B_FALSE);
107         if (vd == NULL || vd->vdev_top->vdev_removing || !vdev_is_concrete(vd))
108                 return;
109
110         ASSERT3S(vd->vdev_initialize_state, ==, VDEV_INITIALIZE_NONE);
111         ASSERT3U(vd->vdev_leaf_zap, !=, 0);
112
113         vd->vdev_initialize_last_offset = 0;
114         vd->vdev_initialize_action_time = 0;
115
116         objset_t *mos = vd->vdev_spa->spa_meta_objset;
117         int error;
118
119         error = zap_remove(mos, vd->vdev_leaf_zap,
120             VDEV_LEAF_ZAP_INITIALIZE_LAST_OFFSET, tx);
121         VERIFY(error == 0 || error == ENOENT);
122
123         error = zap_remove(mos, vd->vdev_leaf_zap,
124             VDEV_LEAF_ZAP_INITIALIZE_STATE, tx);
125         VERIFY(error == 0 || error == ENOENT);
126
127         error = zap_remove(mos, vd->vdev_leaf_zap,
128             VDEV_LEAF_ZAP_INITIALIZE_ACTION_TIME, tx);
129         VERIFY(error == 0 || error == ENOENT);
130 }
131
132 static void
133 vdev_initialize_change_state(vdev_t *vd, vdev_initializing_state_t new_state)
134 {
135         ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock));
136         spa_t *spa = vd->vdev_spa;
137
138         if (new_state == vd->vdev_initialize_state)
139                 return;
140
141         /*
142          * Copy the vd's guid, this will be freed by the sync task.
143          */
144         uint64_t *guid = kmem_zalloc(sizeof (uint64_t), KM_SLEEP);
145         *guid = vd->vdev_guid;
146
147         /*
148          * If we're suspending, then preserving the original start time.
149          */
150         if (vd->vdev_initialize_state != VDEV_INITIALIZE_SUSPENDED) {
151                 vd->vdev_initialize_action_time = gethrestime_sec();
152         }
153
154         vdev_initializing_state_t old_state = vd->vdev_initialize_state;
155         vd->vdev_initialize_state = new_state;
156
157         dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
158         VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
159
160         if (new_state != VDEV_INITIALIZE_NONE) {
161                 dsl_sync_task_nowait(spa_get_dsl(spa),
162                     vdev_initialize_zap_update_sync, guid, tx);
163         } else {
164                 dsl_sync_task_nowait(spa_get_dsl(spa),
165                     vdev_initialize_zap_remove_sync, guid, tx);
166         }
167
168         switch (new_state) {
169         case VDEV_INITIALIZE_ACTIVE:
170                 spa_history_log_internal(spa, "initialize", tx,
171                     "vdev=%s activated", vd->vdev_path);
172                 break;
173         case VDEV_INITIALIZE_SUSPENDED:
174                 spa_history_log_internal(spa, "initialize", tx,
175                     "vdev=%s suspended", vd->vdev_path);
176                 break;
177         case VDEV_INITIALIZE_CANCELED:
178                 if (old_state == VDEV_INITIALIZE_ACTIVE ||
179                     old_state == VDEV_INITIALIZE_SUSPENDED)
180                         spa_history_log_internal(spa, "initialize", tx,
181                             "vdev=%s canceled", vd->vdev_path);
182                 break;
183         case VDEV_INITIALIZE_COMPLETE:
184                 spa_history_log_internal(spa, "initialize", tx,
185                     "vdev=%s complete", vd->vdev_path);
186                 break;
187         case VDEV_INITIALIZE_NONE:
188                 spa_history_log_internal(spa, "uninitialize", tx,
189                     "vdev=%s", vd->vdev_path);
190                 break;
191         default:
192                 panic("invalid state %llu", (unsigned long long)new_state);
193         }
194
195         dmu_tx_commit(tx);
196
197         if (new_state != VDEV_INITIALIZE_ACTIVE)
198                 spa_notify_waiters(spa);
199 }
200
201 static void
202 vdev_initialize_cb(zio_t *zio)
203 {
204         vdev_t *vd = zio->io_vd;
205         mutex_enter(&vd->vdev_initialize_io_lock);
206         if (zio->io_error == ENXIO && !vdev_writeable(vd)) {
207                 /*
208                  * The I/O failed because the vdev was unavailable; roll the
209                  * last offset back. (This works because spa_sync waits on
210                  * spa_txg_zio before it runs sync tasks.)
211                  */
212                 uint64_t *off =
213                     &vd->vdev_initialize_offset[zio->io_txg & TXG_MASK];
214                 *off = MIN(*off, zio->io_offset);
215         } else {
216                 /*
217                  * Since initializing is best-effort, we ignore I/O errors and
218                  * rely on vdev_probe to determine if the errors are more
219                  * critical.
220                  */
221                 if (zio->io_error != 0)
222                         vd->vdev_stat.vs_initialize_errors++;
223
224                 vd->vdev_initialize_bytes_done += zio->io_orig_size;
225         }
226         ASSERT3U(vd->vdev_initialize_inflight, >, 0);
227         vd->vdev_initialize_inflight--;
228         cv_broadcast(&vd->vdev_initialize_io_cv);
229         mutex_exit(&vd->vdev_initialize_io_lock);
230
231         spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd);
232 }
233
234 /* Takes care of physical writing and limiting # of concurrent ZIOs. */
235 static int
236 vdev_initialize_write(vdev_t *vd, uint64_t start, uint64_t size, abd_t *data)
237 {
238         spa_t *spa = vd->vdev_spa;
239
240         /* Limit inflight initializing I/Os */
241         mutex_enter(&vd->vdev_initialize_io_lock);
242         while (vd->vdev_initialize_inflight >= zfs_initialize_limit) {
243                 cv_wait(&vd->vdev_initialize_io_cv,
244                     &vd->vdev_initialize_io_lock);
245         }
246         vd->vdev_initialize_inflight++;
247         mutex_exit(&vd->vdev_initialize_io_lock);
248
249         dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
250         VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
251         uint64_t txg = dmu_tx_get_txg(tx);
252
253         spa_config_enter(spa, SCL_STATE_ALL, vd, RW_READER);
254         mutex_enter(&vd->vdev_initialize_lock);
255
256         if (vd->vdev_initialize_offset[txg & TXG_MASK] == 0) {
257                 uint64_t *guid = kmem_zalloc(sizeof (uint64_t), KM_SLEEP);
258                 *guid = vd->vdev_guid;
259
260                 /* This is the first write of this txg. */
261                 dsl_sync_task_nowait(spa_get_dsl(spa),
262                     vdev_initialize_zap_update_sync, guid, tx);
263         }
264
265         /*
266          * We know the vdev struct will still be around since all
267          * consumers of vdev_free must stop the initialization first.
268          */
269         if (vdev_initialize_should_stop(vd)) {
270                 mutex_enter(&vd->vdev_initialize_io_lock);
271                 ASSERT3U(vd->vdev_initialize_inflight, >, 0);
272                 vd->vdev_initialize_inflight--;
273                 mutex_exit(&vd->vdev_initialize_io_lock);
274                 spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd);
275                 mutex_exit(&vd->vdev_initialize_lock);
276                 dmu_tx_commit(tx);
277                 return (SET_ERROR(EINTR));
278         }
279         mutex_exit(&vd->vdev_initialize_lock);
280
281         vd->vdev_initialize_offset[txg & TXG_MASK] = start + size;
282         zio_nowait(zio_write_phys(spa->spa_txg_zio[txg & TXG_MASK], vd, start,
283             size, data, ZIO_CHECKSUM_OFF, vdev_initialize_cb, NULL,
284             ZIO_PRIORITY_INITIALIZING, ZIO_FLAG_CANFAIL, B_FALSE));
285         /* vdev_initialize_cb releases SCL_STATE_ALL */
286
287         dmu_tx_commit(tx);
288
289         return (0);
290 }
291
292 /*
293  * Callback to fill each ABD chunk with zfs_initialize_value. len must be
294  * divisible by sizeof (uint64_t), and buf must be 8-byte aligned. The ABD
295  * allocation will guarantee these for us.
296  */
297 static int
298 vdev_initialize_block_fill(void *buf, size_t len, void *unused)
299 {
300         (void) unused;
301
302         ASSERT0(len % sizeof (uint64_t));
303         for (uint64_t i = 0; i < len; i += sizeof (uint64_t)) {
304                 *(uint64_t *)((char *)(buf) + i) = zfs_initialize_value;
305         }
306         return (0);
307 }
308
309 static abd_t *
310 vdev_initialize_block_alloc(void)
311 {
312         /* Allocate ABD for filler data */
313         abd_t *data = abd_alloc_for_io(zfs_initialize_chunk_size, B_FALSE);
314
315         ASSERT0(zfs_initialize_chunk_size % sizeof (uint64_t));
316         (void) abd_iterate_func(data, 0, zfs_initialize_chunk_size,
317             vdev_initialize_block_fill, NULL);
318
319         return (data);
320 }
321
322 static void
323 vdev_initialize_block_free(abd_t *data)
324 {
325         abd_free(data);
326 }
327
328 static int
329 vdev_initialize_ranges(vdev_t *vd, abd_t *data)
330 {
331         range_tree_t *rt = vd->vdev_initialize_tree;
332         zfs_btree_t *bt = &rt->rt_root;
333         zfs_btree_index_t where;
334
335         for (range_seg_t *rs = zfs_btree_first(bt, &where); rs != NULL;
336             rs = zfs_btree_next(bt, &where, &where)) {
337                 uint64_t size = rs_get_end(rs, rt) - rs_get_start(rs, rt);
338
339                 /* Split range into legally-sized physical chunks */
340                 uint64_t writes_required =
341                     ((size - 1) / zfs_initialize_chunk_size) + 1;
342
343                 for (uint64_t w = 0; w < writes_required; w++) {
344                         int error;
345
346                         error = vdev_initialize_write(vd,
347                             VDEV_LABEL_START_SIZE + rs_get_start(rs, rt) +
348                             (w * zfs_initialize_chunk_size),
349                             MIN(size - (w * zfs_initialize_chunk_size),
350                             zfs_initialize_chunk_size), data);
351                         if (error != 0)
352                                 return (error);
353                 }
354         }
355         return (0);
356 }
357
358 static void
359 vdev_initialize_xlate_last_rs_end(void *arg, range_seg64_t *physical_rs)
360 {
361         uint64_t *last_rs_end = (uint64_t *)arg;
362
363         if (physical_rs->rs_end > *last_rs_end)
364                 *last_rs_end = physical_rs->rs_end;
365 }
366
367 static void
368 vdev_initialize_xlate_progress(void *arg, range_seg64_t *physical_rs)
369 {
370         vdev_t *vd = (vdev_t *)arg;
371
372         uint64_t size = physical_rs->rs_end - physical_rs->rs_start;
373         vd->vdev_initialize_bytes_est += size;
374
375         if (vd->vdev_initialize_last_offset > physical_rs->rs_end) {
376                 vd->vdev_initialize_bytes_done += size;
377         } else if (vd->vdev_initialize_last_offset > physical_rs->rs_start &&
378             vd->vdev_initialize_last_offset < physical_rs->rs_end) {
379                 vd->vdev_initialize_bytes_done +=
380                     vd->vdev_initialize_last_offset - physical_rs->rs_start;
381         }
382 }
383
384 static void
385 vdev_initialize_calculate_progress(vdev_t *vd)
386 {
387         ASSERT(spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_READER) ||
388             spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_WRITER));
389         ASSERT(vd->vdev_leaf_zap != 0);
390
391         vd->vdev_initialize_bytes_est = 0;
392         vd->vdev_initialize_bytes_done = 0;
393
394         for (uint64_t i = 0; i < vd->vdev_top->vdev_ms_count; i++) {
395                 metaslab_t *msp = vd->vdev_top->vdev_ms[i];
396                 mutex_enter(&msp->ms_lock);
397
398                 uint64_t ms_free = (msp->ms_size -
399                     metaslab_allocated_space(msp)) /
400                     vdev_get_ndisks(vd->vdev_top);
401
402                 /*
403                  * Convert the metaslab range to a physical range
404                  * on our vdev. We use this to determine if we are
405                  * in the middle of this metaslab range.
406                  */
407                 range_seg64_t logical_rs, physical_rs, remain_rs;
408                 logical_rs.rs_start = msp->ms_start;
409                 logical_rs.rs_end = msp->ms_start + msp->ms_size;
410
411                 /* Metaslab space after this offset has not been initialized */
412                 vdev_xlate(vd, &logical_rs, &physical_rs, &remain_rs);
413                 if (vd->vdev_initialize_last_offset <= physical_rs.rs_start) {
414                         vd->vdev_initialize_bytes_est += ms_free;
415                         mutex_exit(&msp->ms_lock);
416                         continue;
417                 }
418
419                 /* Metaslab space before this offset has been initialized */
420                 uint64_t last_rs_end = physical_rs.rs_end;
421                 if (!vdev_xlate_is_empty(&remain_rs)) {
422                         vdev_xlate_walk(vd, &remain_rs,
423                             vdev_initialize_xlate_last_rs_end, &last_rs_end);
424                 }
425
426                 if (vd->vdev_initialize_last_offset > last_rs_end) {
427                         vd->vdev_initialize_bytes_done += ms_free;
428                         vd->vdev_initialize_bytes_est += ms_free;
429                         mutex_exit(&msp->ms_lock);
430                         continue;
431                 }
432
433                 /*
434                  * If we get here, we're in the middle of initializing this
435                  * metaslab. Load it and walk the free tree for more accurate
436                  * progress estimation.
437                  */
438                 VERIFY0(metaslab_load(msp));
439
440                 zfs_btree_index_t where;
441                 range_tree_t *rt = msp->ms_allocatable;
442                 for (range_seg_t *rs =
443                     zfs_btree_first(&rt->rt_root, &where); rs;
444                     rs = zfs_btree_next(&rt->rt_root, &where,
445                     &where)) {
446                         logical_rs.rs_start = rs_get_start(rs, rt);
447                         logical_rs.rs_end = rs_get_end(rs, rt);
448
449                         vdev_xlate_walk(vd, &logical_rs,
450                             vdev_initialize_xlate_progress, vd);
451                 }
452                 mutex_exit(&msp->ms_lock);
453         }
454 }
455
456 static int
457 vdev_initialize_load(vdev_t *vd)
458 {
459         int err = 0;
460         ASSERT(spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_READER) ||
461             spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_WRITER));
462         ASSERT(vd->vdev_leaf_zap != 0);
463
464         if (vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE ||
465             vd->vdev_initialize_state == VDEV_INITIALIZE_SUSPENDED) {
466                 err = zap_lookup(vd->vdev_spa->spa_meta_objset,
467                     vd->vdev_leaf_zap, VDEV_LEAF_ZAP_INITIALIZE_LAST_OFFSET,
468                     sizeof (vd->vdev_initialize_last_offset), 1,
469                     &vd->vdev_initialize_last_offset);
470                 if (err == ENOENT) {
471                         vd->vdev_initialize_last_offset = 0;
472                         err = 0;
473                 }
474         }
475
476         vdev_initialize_calculate_progress(vd);
477         return (err);
478 }
479
480 static void
481 vdev_initialize_xlate_range_add(void *arg, range_seg64_t *physical_rs)
482 {
483         vdev_t *vd = arg;
484
485         /* Only add segments that we have not visited yet */
486         if (physical_rs->rs_end <= vd->vdev_initialize_last_offset)
487                 return;
488
489         /* Pick up where we left off mid-range. */
490         if (vd->vdev_initialize_last_offset > physical_rs->rs_start) {
491                 zfs_dbgmsg("range write: vd %s changed (%llu, %llu) to "
492                     "(%llu, %llu)", vd->vdev_path,
493                     (u_longlong_t)physical_rs->rs_start,
494                     (u_longlong_t)physical_rs->rs_end,
495                     (u_longlong_t)vd->vdev_initialize_last_offset,
496                     (u_longlong_t)physical_rs->rs_end);
497                 ASSERT3U(physical_rs->rs_end, >,
498                     vd->vdev_initialize_last_offset);
499                 physical_rs->rs_start = vd->vdev_initialize_last_offset;
500         }
501
502         ASSERT3U(physical_rs->rs_end, >, physical_rs->rs_start);
503
504         range_tree_add(vd->vdev_initialize_tree, physical_rs->rs_start,
505             physical_rs->rs_end - physical_rs->rs_start);
506 }
507
508 /*
509  * Convert the logical range into a physical range and add it to our
510  * avl tree.
511  */
512 static void
513 vdev_initialize_range_add(void *arg, uint64_t start, uint64_t size)
514 {
515         vdev_t *vd = arg;
516         range_seg64_t logical_rs;
517         logical_rs.rs_start = start;
518         logical_rs.rs_end = start + size;
519
520         ASSERT(vd->vdev_ops->vdev_op_leaf);
521         vdev_xlate_walk(vd, &logical_rs, vdev_initialize_xlate_range_add, arg);
522 }
523
524 static __attribute__((noreturn)) void
525 vdev_initialize_thread(void *arg)
526 {
527         vdev_t *vd = arg;
528         spa_t *spa = vd->vdev_spa;
529         int error = 0;
530         uint64_t ms_count = 0;
531
532         ASSERT(vdev_is_concrete(vd));
533         spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
534
535         vd->vdev_initialize_last_offset = 0;
536         VERIFY0(vdev_initialize_load(vd));
537
538         abd_t *deadbeef = vdev_initialize_block_alloc();
539
540         vd->vdev_initialize_tree = range_tree_create(NULL, RANGE_SEG64, NULL,
541             0, 0);
542
543         for (uint64_t i = 0; !vd->vdev_detached &&
544             i < vd->vdev_top->vdev_ms_count; i++) {
545                 metaslab_t *msp = vd->vdev_top->vdev_ms[i];
546                 boolean_t unload_when_done = B_FALSE;
547
548                 /*
549                  * If we've expanded the top-level vdev or it's our
550                  * first pass, calculate our progress.
551                  */
552                 if (vd->vdev_top->vdev_ms_count != ms_count) {
553                         vdev_initialize_calculate_progress(vd);
554                         ms_count = vd->vdev_top->vdev_ms_count;
555                 }
556
557                 spa_config_exit(spa, SCL_CONFIG, FTAG);
558                 metaslab_disable(msp);
559                 mutex_enter(&msp->ms_lock);
560                 if (!msp->ms_loaded && !msp->ms_loading)
561                         unload_when_done = B_TRUE;
562                 VERIFY0(metaslab_load(msp));
563
564                 range_tree_walk(msp->ms_allocatable, vdev_initialize_range_add,
565                     vd);
566                 mutex_exit(&msp->ms_lock);
567
568                 error = vdev_initialize_ranges(vd, deadbeef);
569                 metaslab_enable(msp, B_TRUE, unload_when_done);
570                 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
571
572                 range_tree_vacate(vd->vdev_initialize_tree, NULL, NULL);
573                 if (error != 0)
574                         break;
575         }
576
577         spa_config_exit(spa, SCL_CONFIG, FTAG);
578         mutex_enter(&vd->vdev_initialize_io_lock);
579         while (vd->vdev_initialize_inflight > 0) {
580                 cv_wait(&vd->vdev_initialize_io_cv,
581                     &vd->vdev_initialize_io_lock);
582         }
583         mutex_exit(&vd->vdev_initialize_io_lock);
584
585         range_tree_destroy(vd->vdev_initialize_tree);
586         vdev_initialize_block_free(deadbeef);
587         vd->vdev_initialize_tree = NULL;
588
589         mutex_enter(&vd->vdev_initialize_lock);
590         if (!vd->vdev_initialize_exit_wanted) {
591                 if (vdev_writeable(vd)) {
592                         vdev_initialize_change_state(vd,
593                             VDEV_INITIALIZE_COMPLETE);
594                 } else if (vd->vdev_faulted) {
595                         vdev_initialize_change_state(vd,
596                             VDEV_INITIALIZE_CANCELED);
597                 }
598         }
599         ASSERT(vd->vdev_initialize_thread != NULL ||
600             vd->vdev_initialize_inflight == 0);
601
602         /*
603          * Drop the vdev_initialize_lock while we sync out the
604          * txg since it's possible that a device might be trying to
605          * come online and must check to see if it needs to restart an
606          * initialization. That thread will be holding the spa_config_lock
607          * which would prevent the txg_wait_synced from completing.
608          */
609         mutex_exit(&vd->vdev_initialize_lock);
610         txg_wait_synced(spa_get_dsl(spa), 0);
611         mutex_enter(&vd->vdev_initialize_lock);
612
613         vd->vdev_initialize_thread = NULL;
614         cv_broadcast(&vd->vdev_initialize_cv);
615         mutex_exit(&vd->vdev_initialize_lock);
616
617         thread_exit();
618 }
619
620 /*
621  * Initiates a device. Caller must hold vdev_initialize_lock.
622  * Device must be a leaf and not already be initializing.
623  */
624 void
625 vdev_initialize(vdev_t *vd)
626 {
627         ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock));
628         ASSERT(vd->vdev_ops->vdev_op_leaf);
629         ASSERT(vdev_is_concrete(vd));
630         ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
631         ASSERT(!vd->vdev_detached);
632         ASSERT(!vd->vdev_initialize_exit_wanted);
633         ASSERT(!vd->vdev_top->vdev_removing);
634
635         vdev_initialize_change_state(vd, VDEV_INITIALIZE_ACTIVE);
636         vd->vdev_initialize_thread = thread_create(NULL, 0,
637             vdev_initialize_thread, vd, 0, &p0, TS_RUN, maxclsyspri);
638 }
639
640 /*
641  * Uninitializes a device. Caller must hold vdev_initialize_lock.
642  * Device must be a leaf and not already be initializing.
643  */
644 void
645 vdev_uninitialize(vdev_t *vd)
646 {
647         ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock));
648         ASSERT(vd->vdev_ops->vdev_op_leaf);
649         ASSERT(vdev_is_concrete(vd));
650         ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
651         ASSERT(!vd->vdev_detached);
652         ASSERT(!vd->vdev_initialize_exit_wanted);
653         ASSERT(!vd->vdev_top->vdev_removing);
654
655         vdev_initialize_change_state(vd, VDEV_INITIALIZE_NONE);
656 }
657
658 /*
659  * Wait for the initialize thread to be terminated (cancelled or stopped).
660  */
661 static void
662 vdev_initialize_stop_wait_impl(vdev_t *vd)
663 {
664         ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock));
665
666         while (vd->vdev_initialize_thread != NULL)
667                 cv_wait(&vd->vdev_initialize_cv, &vd->vdev_initialize_lock);
668
669         ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
670         vd->vdev_initialize_exit_wanted = B_FALSE;
671 }
672
673 /*
674  * Wait for vdev initialize threads which were either to cleanly exit.
675  */
676 void
677 vdev_initialize_stop_wait(spa_t *spa, list_t *vd_list)
678 {
679         (void) spa;
680         vdev_t *vd;
681
682         ASSERT(MUTEX_HELD(&spa_namespace_lock));
683
684         while ((vd = list_remove_head(vd_list)) != NULL) {
685                 mutex_enter(&vd->vdev_initialize_lock);
686                 vdev_initialize_stop_wait_impl(vd);
687                 mutex_exit(&vd->vdev_initialize_lock);
688         }
689 }
690
691 /*
692  * Stop initializing a device, with the resultant initializing state being
693  * tgt_state.  For blocking behavior pass NULL for vd_list.  Otherwise, when
694  * a list_t is provided the stopping vdev is inserted in to the list.  Callers
695  * are then required to call vdev_initialize_stop_wait() to block for all the
696  * initialization threads to exit.  The caller must hold vdev_initialize_lock
697  * and must not be writing to the spa config, as the initializing thread may
698  * try to enter the config as a reader before exiting.
699  */
700 void
701 vdev_initialize_stop(vdev_t *vd, vdev_initializing_state_t tgt_state,
702     list_t *vd_list)
703 {
704         ASSERT(!spa_config_held(vd->vdev_spa, SCL_CONFIG|SCL_STATE, RW_WRITER));
705         ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock));
706         ASSERT(vd->vdev_ops->vdev_op_leaf);
707         ASSERT(vdev_is_concrete(vd));
708
709         /*
710          * Allow cancel requests to proceed even if the initialize thread
711          * has stopped.
712          */
713         if (vd->vdev_initialize_thread == NULL &&
714             tgt_state != VDEV_INITIALIZE_CANCELED) {
715                 return;
716         }
717
718         vdev_initialize_change_state(vd, tgt_state);
719         vd->vdev_initialize_exit_wanted = B_TRUE;
720
721         if (vd_list == NULL) {
722                 vdev_initialize_stop_wait_impl(vd);
723         } else {
724                 ASSERT(MUTEX_HELD(&spa_namespace_lock));
725                 list_insert_tail(vd_list, vd);
726         }
727 }
728
729 static void
730 vdev_initialize_stop_all_impl(vdev_t *vd, vdev_initializing_state_t tgt_state,
731     list_t *vd_list)
732 {
733         if (vd->vdev_ops->vdev_op_leaf && vdev_is_concrete(vd)) {
734                 mutex_enter(&vd->vdev_initialize_lock);
735                 vdev_initialize_stop(vd, tgt_state, vd_list);
736                 mutex_exit(&vd->vdev_initialize_lock);
737                 return;
738         }
739
740         for (uint64_t i = 0; i < vd->vdev_children; i++) {
741                 vdev_initialize_stop_all_impl(vd->vdev_child[i], tgt_state,
742                     vd_list);
743         }
744 }
745
746 /*
747  * Convenience function to stop initializing of a vdev tree and set all
748  * initialize thread pointers to NULL.
749  */
750 void
751 vdev_initialize_stop_all(vdev_t *vd, vdev_initializing_state_t tgt_state)
752 {
753         spa_t *spa = vd->vdev_spa;
754         list_t vd_list;
755
756         ASSERT(MUTEX_HELD(&spa_namespace_lock));
757
758         list_create(&vd_list, sizeof (vdev_t),
759             offsetof(vdev_t, vdev_initialize_node));
760
761         vdev_initialize_stop_all_impl(vd, tgt_state, &vd_list);
762         vdev_initialize_stop_wait(spa, &vd_list);
763
764         if (vd->vdev_spa->spa_sync_on) {
765                 /* Make sure that our state has been synced to disk */
766                 txg_wait_synced(spa_get_dsl(vd->vdev_spa), 0);
767         }
768
769         list_destroy(&vd_list);
770 }
771
772 void
773 vdev_initialize_restart(vdev_t *vd)
774 {
775         ASSERT(MUTEX_HELD(&spa_namespace_lock));
776         ASSERT(!spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
777
778         if (vd->vdev_leaf_zap != 0) {
779                 mutex_enter(&vd->vdev_initialize_lock);
780                 uint64_t initialize_state = VDEV_INITIALIZE_NONE;
781                 int err = zap_lookup(vd->vdev_spa->spa_meta_objset,
782                     vd->vdev_leaf_zap, VDEV_LEAF_ZAP_INITIALIZE_STATE,
783                     sizeof (initialize_state), 1, &initialize_state);
784                 ASSERT(err == 0 || err == ENOENT);
785                 vd->vdev_initialize_state = initialize_state;
786
787                 uint64_t timestamp = 0;
788                 err = zap_lookup(vd->vdev_spa->spa_meta_objset,
789                     vd->vdev_leaf_zap, VDEV_LEAF_ZAP_INITIALIZE_ACTION_TIME,
790                     sizeof (timestamp), 1, &timestamp);
791                 ASSERT(err == 0 || err == ENOENT);
792                 vd->vdev_initialize_action_time = timestamp;
793
794                 if (vd->vdev_initialize_state == VDEV_INITIALIZE_SUSPENDED ||
795                     vd->vdev_offline) {
796                         /* load progress for reporting, but don't resume */
797                         VERIFY0(vdev_initialize_load(vd));
798                 } else if (vd->vdev_initialize_state ==
799                     VDEV_INITIALIZE_ACTIVE && vdev_writeable(vd) &&
800                     !vd->vdev_top->vdev_removing &&
801                     vd->vdev_initialize_thread == NULL) {
802                         vdev_initialize(vd);
803                 }
804
805                 mutex_exit(&vd->vdev_initialize_lock);
806         }
807
808         for (uint64_t i = 0; i < vd->vdev_children; i++) {
809                 vdev_initialize_restart(vd->vdev_child[i]);
810         }
811 }
812
813 EXPORT_SYMBOL(vdev_initialize);
814 EXPORT_SYMBOL(vdev_uninitialize);
815 EXPORT_SYMBOL(vdev_initialize_stop);
816 EXPORT_SYMBOL(vdev_initialize_stop_all);
817 EXPORT_SYMBOL(vdev_initialize_stop_wait);
818 EXPORT_SYMBOL(vdev_initialize_restart);
819
820 ZFS_MODULE_PARAM(zfs, zfs_, initialize_value, U64, ZMOD_RW,
821         "Value written during zpool initialize");
822
823 ZFS_MODULE_PARAM(zfs, zfs_, initialize_chunk_size, U64, ZMOD_RW,
824         "Size in bytes of writes by zpool initialize");