]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_indirect.c
MFC r336951: MFV r336950: 9290 device removal reduces redundancy of mirrors
[FreeBSD/FreeBSD.git] / sys / cddl / contrib / opensolaris / uts / common / fs / zfs / vdev_indirect.c
1 /*
2  * CDDL HEADER START
3  *
4  * This file and its contents are supplied under the terms of the
5  * Common Development and Distribution License ("CDDL"), version 1.0.
6  * You may only use this file in accordance with the terms of version
7  * 1.0 of the CDDL.
8  *
9  * A full copy of the text of the CDDL should have accompanied this
10  * source.  A copy of the CDDL is also available via the Internet at
11  * http://www.illumos.org/license/CDDL.
12  *
13  * CDDL HEADER END
14  */
15
16 /*
17  * Copyright (c) 2014, 2017 by Delphix. All rights reserved.
18  */
19
20 #include <sys/zfs_context.h>
21 #include <sys/spa.h>
22 #include <sys/spa_impl.h>
23 #include <sys/vdev_impl.h>
24 #include <sys/fs/zfs.h>
25 #include <sys/zio.h>
26 #include <sys/zio_checksum.h>
27 #include <sys/metaslab.h>
28 #include <sys/refcount.h>
29 #include <sys/dmu.h>
30 #include <sys/vdev_indirect_mapping.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/dsl_synctask.h>
33 #include <sys/zap.h>
34 #include <sys/abd.h>
35 #include <sys/zthr.h>
36
37 /*
38  * An indirect vdev corresponds to a vdev that has been removed.  Since
39  * we cannot rewrite block pointers of snapshots, etc., we keep a
40  * mapping from old location on the removed device to the new location
41  * on another device in the pool and use this mapping whenever we need
42  * to access the DVA.  Unfortunately, this mapping did not respect
43  * logical block boundaries when it was first created, and so a DVA on
44  * this indirect vdev may be "split" into multiple sections that each
45  * map to a different location.  As a consequence, not all DVAs can be
46  * translated to an equivalent new DVA.  Instead we must provide a
47  * "vdev_remap" operation that executes a callback on each contiguous
48  * segment of the new location.  This function is used in multiple ways:
49  *
50  *  - i/os to this vdev use the callback to determine where the
51  *    data is now located, and issue child i/os for each segment's new
52  *    location.
53  *
54  *  - frees and claims to this vdev use the callback to free or claim
55  *    each mapped segment.  (Note that we don't actually need to claim
56  *    log blocks on indirect vdevs, because we don't allocate to
57  *    removing vdevs.  However, zdb uses zio_claim() for its leak
58  *    detection.)
59  */
60
61 /*
62  * "Big theory statement" for how we mark blocks obsolete.
63  *
64  * When a block on an indirect vdev is freed or remapped, a section of
65  * that vdev's mapping may no longer be referenced (aka "obsolete").  We
66  * keep track of how much of each mapping entry is obsolete.  When
67  * an entry becomes completely obsolete, we can remove it, thus reducing
68  * the memory used by the mapping.  The complete picture of obsolescence
69  * is given by the following data structures, described below:
70  *  - the entry-specific obsolete count
71  *  - the vdev-specific obsolete spacemap
72  *  - the pool-specific obsolete bpobj
73  *
74  * == On disk data structures used ==
75  *
76  * We track the obsolete space for the pool using several objects.  Each
77  * of these objects is created on demand and freed when no longer
78  * needed, and is assumed to be empty if it does not exist.
79  * SPA_FEATURE_OBSOLETE_COUNTS includes the count of these objects.
80  *
81  *  - Each vic_mapping_object (associated with an indirect vdev) can
82  *    have a vimp_counts_object.  This is an array of uint32_t's
83  *    with the same number of entries as the vic_mapping_object.  When
84  *    the mapping is condensed, entries from the vic_obsolete_sm_object
85  *    (see below) are folded into the counts.  Therefore, each
86  *    obsolete_counts entry tells us the number of bytes in the
87  *    corresponding mapping entry that were not referenced when the
88  *    mapping was last condensed.
89  *
90  *  - Each indirect or removing vdev can have a vic_obsolete_sm_object.
91  *    This is a space map containing an alloc entry for every DVA that
92  *    has been obsoleted since the last time this indirect vdev was
93  *    condensed.  We use this object in order to improve performance
94  *    when marking a DVA as obsolete.  Instead of modifying an arbitrary
95  *    offset of the vimp_counts_object, we only need to append an entry
96  *    to the end of this object.  When a DVA becomes obsolete, it is
97  *    added to the obsolete space map.  This happens when the DVA is
98  *    freed, remapped and not referenced by a snapshot, or the last
99  *    snapshot referencing it is destroyed.
100  *
101  *  - Each dataset can have a ds_remap_deadlist object.  This is a
102  *    deadlist object containing all blocks that were remapped in this
103  *    dataset but referenced in a previous snapshot.  Blocks can *only*
104  *    appear on this list if they were remapped (dsl_dataset_block_remapped);
105  *    blocks that were killed in a head dataset are put on the normal
106  *    ds_deadlist and marked obsolete when they are freed.
107  *
108  *  - The pool can have a dp_obsolete_bpobj.  This is a list of blocks
109  *    in the pool that need to be marked obsolete.  When a snapshot is
110  *    destroyed, we move some of the ds_remap_deadlist to the obsolete
111  *    bpobj (see dsl_destroy_snapshot_handle_remaps()).  We then
112  *    asynchronously process the obsolete bpobj, moving its entries to
113  *    the specific vdevs' obsolete space maps.
114  *
115  * == Summary of how we mark blocks as obsolete ==
116  *
117  * - When freeing a block: if any DVA is on an indirect vdev, append to
118  *   vic_obsolete_sm_object.
119  * - When remapping a block, add dva to ds_remap_deadlist (if prev snap
120  *   references; otherwise append to vic_obsolete_sm_object).
121  * - When freeing a snapshot: move parts of ds_remap_deadlist to
122  *   dp_obsolete_bpobj (same algorithm as ds_deadlist).
123  * - When syncing the spa: process dp_obsolete_bpobj, moving ranges to
124  *   individual vdev's vic_obsolete_sm_object.
125  */
126
127 /*
128  * "Big theory statement" for how we condense indirect vdevs.
129  *
130  * Condensing an indirect vdev's mapping is the process of determining
131  * the precise counts of obsolete space for each mapping entry (by
132  * integrating the obsolete spacemap into the obsolete counts) and
133  * writing out a new mapping that contains only referenced entries.
134  *
135  * We condense a vdev when we expect the mapping to shrink (see
136  * vdev_indirect_should_condense()), but only perform one condense at a
137  * time to limit the memory usage.  In addition, we use a separate
138  * open-context thread (spa_condense_indirect_thread) to incrementally
139  * create the new mapping object in a way that minimizes the impact on
140  * the rest of the system.
141  *
142  * == Generating a new mapping ==
143  *
144  * To generate a new mapping, we follow these steps:
145  *
146  * 1. Save the old obsolete space map and create a new mapping object
147  *    (see spa_condense_indirect_start_sync()).  This initializes the
148  *    spa_condensing_indirect_phys with the "previous obsolete space map",
149  *    which is now read only.  Newly obsolete DVAs will be added to a
150  *    new (initially empty) obsolete space map, and will not be
151  *    considered as part of this condense operation.
152  *
153  * 2. Construct in memory the precise counts of obsolete space for each
154  *    mapping entry, by incorporating the obsolete space map into the
155  *    counts.  (See vdev_indirect_mapping_load_obsolete_{counts,spacemap}().)
156  *
157  * 3. Iterate through each mapping entry, writing to the new mapping any
158  *    entries that are not completely obsolete (i.e. which don't have
159  *    obsolete count == mapping length).  (See
160  *    spa_condense_indirect_generate_new_mapping().)
161  *
162  * 4. Destroy the old mapping object and switch over to the new one
163  *    (spa_condense_indirect_complete_sync).
164  *
165  * == Restarting from failure ==
166  *
167  * To restart the condense when we import/open the pool, we must start
168  * at the 2nd step above: reconstruct the precise counts in memory,
169  * based on the space map + counts.  Then in the 3rd step, we start
170  * iterating where we left off: at vimp_max_offset of the new mapping
171  * object.
172  */
173
174 boolean_t zfs_condense_indirect_vdevs_enable = B_TRUE;
175
176 /*
177  * Condense if at least this percent of the bytes in the mapping is
178  * obsolete.  With the default of 25%, the amount of space mapped
179  * will be reduced to 1% of its original size after at most 16
180  * condenses.  Higher values will condense less often (causing less
181  * i/o); lower values will reduce the mapping size more quickly.
182  */
183 int zfs_indirect_condense_obsolete_pct = 25;
184
185 /*
186  * Condense if the obsolete space map takes up more than this amount of
187  * space on disk (logically).  This limits the amount of disk space
188  * consumed by the obsolete space map; the default of 1GB is small enough
189  * that we typically don't mind "wasting" it.
190  */
191 uint64_t zfs_condense_max_obsolete_bytes = 1024 * 1024 * 1024;
192
193 /*
194  * Don't bother condensing if the mapping uses less than this amount of
195  * memory.  The default of 128KB is considered a "trivial" amount of
196  * memory and not worth reducing.
197  */
198 uint64_t zfs_condense_min_mapping_bytes = 128 * 1024;
199
200 /*
201  * This is used by the test suite so that it can ensure that certain
202  * actions happen while in the middle of a condense (which might otherwise
203  * complete too quickly).  If used to reduce the performance impact of
204  * condensing in production, a maximum value of 1 should be sufficient.
205  */
206 int zfs_condense_indirect_commit_entry_delay_ticks = 0;
207
208 /*
209  * If a split block contains more than this many segments, consider it too
210  * computationally expensive to check all (2^num_segments) possible
211  * combinations. Instead, try at most 2^_segments_max randomly-selected
212  * combinations.
213  *
214  * This is reasonable if only a few segment copies are damaged and the
215  * majority of segment copies are good. This allows all the segment copies to
216  * participate fairly in the reconstruction and prevents the repeated use of
217  * one bad copy.
218  */
219 int zfs_reconstruct_indirect_segments_max = 10;
220
221 /*
222  * The indirect_child_t represents the vdev that we will read from, when we
223  * need to read all copies of the data (e.g. for scrub or reconstruction).
224  * For plain (non-mirror) top-level vdevs (i.e. is_vdev is not a mirror),
225  * ic_vdev is the same as is_vdev.  However, for mirror top-level vdevs,
226  * ic_vdev is a child of the mirror.
227  */
228 typedef struct indirect_child {
229         abd_t *ic_data;
230         vdev_t *ic_vdev;
231 } indirect_child_t;
232
233 /*
234  * The indirect_split_t represents one mapped segment of an i/o to the
235  * indirect vdev. For non-split (contiguously-mapped) blocks, there will be
236  * only one indirect_split_t, with is_split_offset==0 and is_size==io_size.
237  * For split blocks, there will be several of these.
238  */
239 typedef struct indirect_split {
240         list_node_t is_node; /* link on iv_splits */
241
242         /*
243          * is_split_offset is the offset into the i/o.
244          * This is the sum of the previous splits' is_size's.
245          */
246         uint64_t is_split_offset;
247
248         vdev_t *is_vdev; /* top-level vdev */
249         uint64_t is_target_offset; /* offset on is_vdev */
250         uint64_t is_size;
251         int is_children; /* number of entries in is_child[] */
252
253         /*
254          * is_good_child is the child that we are currently using to
255          * attempt reconstruction.
256          */
257         int is_good_child;
258
259         indirect_child_t is_child[1]; /* variable-length */
260 } indirect_split_t;
261
262 /*
263  * The indirect_vsd_t is associated with each i/o to the indirect vdev.
264  * It is the "Vdev-Specific Data" in the zio_t's io_vsd.
265  */
266 typedef struct indirect_vsd {
267         boolean_t iv_split_block;
268         boolean_t iv_reconstruct;
269
270         list_t iv_splits; /* list of indirect_split_t's */
271 } indirect_vsd_t;
272
273 static void
274 vdev_indirect_map_free(zio_t *zio)
275 {
276         indirect_vsd_t *iv = zio->io_vsd;
277
278         indirect_split_t *is;
279         while ((is = list_head(&iv->iv_splits)) != NULL) {
280                 for (int c = 0; c < is->is_children; c++) {
281                         indirect_child_t *ic = &is->is_child[c];
282                         if (ic->ic_data != NULL)
283                                 abd_free(ic->ic_data);
284                 }
285                 list_remove(&iv->iv_splits, is);
286                 kmem_free(is,
287                     offsetof(indirect_split_t, is_child[is->is_children]));
288         }
289         kmem_free(iv, sizeof (*iv));
290 }
291
292 static const zio_vsd_ops_t vdev_indirect_vsd_ops = {
293         vdev_indirect_map_free,
294         zio_vsd_default_cksum_report
295 };
296 /*
297  * Mark the given offset and size as being obsolete.
298  */
299 void
300 vdev_indirect_mark_obsolete(vdev_t *vd, uint64_t offset, uint64_t size)
301 {
302         spa_t *spa = vd->vdev_spa;
303
304         ASSERT3U(vd->vdev_indirect_config.vic_mapping_object, !=, 0);
305         ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops);
306         ASSERT(size > 0);
307         VERIFY(vdev_indirect_mapping_entry_for_offset(
308             vd->vdev_indirect_mapping, offset) != NULL);
309
310         if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
311                 mutex_enter(&vd->vdev_obsolete_lock);
312                 range_tree_add(vd->vdev_obsolete_segments, offset, size);
313                 mutex_exit(&vd->vdev_obsolete_lock);
314                 vdev_dirty(vd, 0, NULL, spa_syncing_txg(spa));
315         }
316 }
317
318 /*
319  * Mark the DVA vdev_id:offset:size as being obsolete in the given tx. This
320  * wrapper is provided because the DMU does not know about vdev_t's and
321  * cannot directly call vdev_indirect_mark_obsolete.
322  */
323 void
324 spa_vdev_indirect_mark_obsolete(spa_t *spa, uint64_t vdev_id, uint64_t offset,
325     uint64_t size, dmu_tx_t *tx)
326 {
327         vdev_t *vd = vdev_lookup_top(spa, vdev_id);
328         ASSERT(dmu_tx_is_syncing(tx));
329
330         /* The DMU can only remap indirect vdevs. */
331         ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
332         vdev_indirect_mark_obsolete(vd, offset, size);
333 }
334
335 static spa_condensing_indirect_t *
336 spa_condensing_indirect_create(spa_t *spa)
337 {
338         spa_condensing_indirect_phys_t *scip =
339             &spa->spa_condensing_indirect_phys;
340         spa_condensing_indirect_t *sci = kmem_zalloc(sizeof (*sci), KM_SLEEP);
341         objset_t *mos = spa->spa_meta_objset;
342
343         for (int i = 0; i < TXG_SIZE; i++) {
344                 list_create(&sci->sci_new_mapping_entries[i],
345                     sizeof (vdev_indirect_mapping_entry_t),
346                     offsetof(vdev_indirect_mapping_entry_t, vime_node));
347         }
348
349         sci->sci_new_mapping =
350             vdev_indirect_mapping_open(mos, scip->scip_next_mapping_object);
351
352         return (sci);
353 }
354
355 static void
356 spa_condensing_indirect_destroy(spa_condensing_indirect_t *sci)
357 {
358         for (int i = 0; i < TXG_SIZE; i++)
359                 list_destroy(&sci->sci_new_mapping_entries[i]);
360
361         if (sci->sci_new_mapping != NULL)
362                 vdev_indirect_mapping_close(sci->sci_new_mapping);
363
364         kmem_free(sci, sizeof (*sci));
365 }
366
367 boolean_t
368 vdev_indirect_should_condense(vdev_t *vd)
369 {
370         vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
371         spa_t *spa = vd->vdev_spa;
372
373         ASSERT(dsl_pool_sync_context(spa->spa_dsl_pool));
374
375         if (!zfs_condense_indirect_vdevs_enable)
376                 return (B_FALSE);
377
378         /*
379          * We can only condense one indirect vdev at a time.
380          */
381         if (spa->spa_condensing_indirect != NULL)
382                 return (B_FALSE);
383
384         if (spa_shutting_down(spa))
385                 return (B_FALSE);
386
387         /*
388          * The mapping object size must not change while we are
389          * condensing, so we can only condense indirect vdevs
390          * (not vdevs that are still in the middle of being removed).
391          */
392         if (vd->vdev_ops != &vdev_indirect_ops)
393                 return (B_FALSE);
394
395         /*
396          * If nothing new has been marked obsolete, there is no
397          * point in condensing.
398          */
399         if (vd->vdev_obsolete_sm == NULL) {
400                 ASSERT0(vdev_obsolete_sm_object(vd));
401                 return (B_FALSE);
402         }
403
404         ASSERT(vd->vdev_obsolete_sm != NULL);
405
406         ASSERT3U(vdev_obsolete_sm_object(vd), ==,
407             space_map_object(vd->vdev_obsolete_sm));
408
409         uint64_t bytes_mapped = vdev_indirect_mapping_bytes_mapped(vim);
410         uint64_t bytes_obsolete = space_map_allocated(vd->vdev_obsolete_sm);
411         uint64_t mapping_size = vdev_indirect_mapping_size(vim);
412         uint64_t obsolete_sm_size = space_map_length(vd->vdev_obsolete_sm);
413
414         ASSERT3U(bytes_obsolete, <=, bytes_mapped);
415
416         /*
417          * If a high percentage of the bytes that are mapped have become
418          * obsolete, condense (unless the mapping is already small enough).
419          * This has a good chance of reducing the amount of memory used
420          * by the mapping.
421          */
422         if (bytes_obsolete * 100 / bytes_mapped >=
423             zfs_indirect_condense_obsolete_pct &&
424             mapping_size > zfs_condense_min_mapping_bytes) {
425                 zfs_dbgmsg("should condense vdev %llu because obsolete "
426                     "spacemap covers %d%% of %lluMB mapping",
427                     (u_longlong_t)vd->vdev_id,
428                     (int)(bytes_obsolete * 100 / bytes_mapped),
429                     (u_longlong_t)bytes_mapped / 1024 / 1024);
430                 return (B_TRUE);
431         }
432
433         /*
434          * If the obsolete space map takes up too much space on disk,
435          * condense in order to free up this disk space.
436          */
437         if (obsolete_sm_size >= zfs_condense_max_obsolete_bytes) {
438                 zfs_dbgmsg("should condense vdev %llu because obsolete sm "
439                     "length %lluMB >= max size %lluMB",
440                     (u_longlong_t)vd->vdev_id,
441                     (u_longlong_t)obsolete_sm_size / 1024 / 1024,
442                     (u_longlong_t)zfs_condense_max_obsolete_bytes /
443                     1024 / 1024);
444                 return (B_TRUE);
445         }
446
447         return (B_FALSE);
448 }
449
450 /*
451  * This sync task completes (finishes) a condense, deleting the old
452  * mapping and replacing it with the new one.
453  */
454 static void
455 spa_condense_indirect_complete_sync(void *arg, dmu_tx_t *tx)
456 {
457         spa_condensing_indirect_t *sci = arg;
458         spa_t *spa = dmu_tx_pool(tx)->dp_spa;
459         spa_condensing_indirect_phys_t *scip =
460             &spa->spa_condensing_indirect_phys;
461         vdev_t *vd = vdev_lookup_top(spa, scip->scip_vdev);
462         vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
463         objset_t *mos = spa->spa_meta_objset;
464         vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
465         uint64_t old_count = vdev_indirect_mapping_num_entries(old_mapping);
466         uint64_t new_count =
467             vdev_indirect_mapping_num_entries(sci->sci_new_mapping);
468
469         ASSERT(dmu_tx_is_syncing(tx));
470         ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
471         ASSERT3P(sci, ==, spa->spa_condensing_indirect);
472         for (int i = 0; i < TXG_SIZE; i++) {
473                 ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i]));
474         }
475         ASSERT(vic->vic_mapping_object != 0);
476         ASSERT3U(vd->vdev_id, ==, scip->scip_vdev);
477         ASSERT(scip->scip_next_mapping_object != 0);
478         ASSERT(scip->scip_prev_obsolete_sm_object != 0);
479
480         /*
481          * Reset vdev_indirect_mapping to refer to the new object.
482          */
483         rw_enter(&vd->vdev_indirect_rwlock, RW_WRITER);
484         vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
485         vd->vdev_indirect_mapping = sci->sci_new_mapping;
486         rw_exit(&vd->vdev_indirect_rwlock);
487
488         sci->sci_new_mapping = NULL;
489         vdev_indirect_mapping_free(mos, vic->vic_mapping_object, tx);
490         vic->vic_mapping_object = scip->scip_next_mapping_object;
491         scip->scip_next_mapping_object = 0;
492
493         space_map_free_obj(mos, scip->scip_prev_obsolete_sm_object, tx);
494         spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
495         scip->scip_prev_obsolete_sm_object = 0;
496
497         scip->scip_vdev = 0;
498
499         VERIFY0(zap_remove(mos, DMU_POOL_DIRECTORY_OBJECT,
500             DMU_POOL_CONDENSING_INDIRECT, tx));
501         spa_condensing_indirect_destroy(spa->spa_condensing_indirect);
502         spa->spa_condensing_indirect = NULL;
503
504         zfs_dbgmsg("finished condense of vdev %llu in txg %llu: "
505             "new mapping object %llu has %llu entries "
506             "(was %llu entries)",
507             vd->vdev_id, dmu_tx_get_txg(tx), vic->vic_mapping_object,
508             new_count, old_count);
509
510         vdev_config_dirty(spa->spa_root_vdev);
511 }
512
513 /*
514  * This sync task appends entries to the new mapping object.
515  */
516 static void
517 spa_condense_indirect_commit_sync(void *arg, dmu_tx_t *tx)
518 {
519         spa_condensing_indirect_t *sci = arg;
520         uint64_t txg = dmu_tx_get_txg(tx);
521         spa_t *spa = dmu_tx_pool(tx)->dp_spa;
522
523         ASSERT(dmu_tx_is_syncing(tx));
524         ASSERT3P(sci, ==, spa->spa_condensing_indirect);
525
526         vdev_indirect_mapping_add_entries(sci->sci_new_mapping,
527             &sci->sci_new_mapping_entries[txg & TXG_MASK], tx);
528         ASSERT(list_is_empty(&sci->sci_new_mapping_entries[txg & TXG_MASK]));
529 }
530
531 /*
532  * Open-context function to add one entry to the new mapping.  The new
533  * entry will be remembered and written from syncing context.
534  */
535 static void
536 spa_condense_indirect_commit_entry(spa_t *spa,
537     vdev_indirect_mapping_entry_phys_t *vimep, uint32_t count)
538 {
539         spa_condensing_indirect_t *sci = spa->spa_condensing_indirect;
540
541         ASSERT3U(count, <, DVA_GET_ASIZE(&vimep->vimep_dst));
542
543         dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
544         dmu_tx_hold_space(tx, sizeof (*vimep) + sizeof (count));
545         VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
546         int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
547
548         /*
549          * If we are the first entry committed this txg, kick off the sync
550          * task to write to the MOS on our behalf.
551          */
552         if (list_is_empty(&sci->sci_new_mapping_entries[txgoff])) {
553                 dsl_sync_task_nowait(dmu_tx_pool(tx),
554                     spa_condense_indirect_commit_sync, sci,
555                     0, ZFS_SPACE_CHECK_NONE, tx);
556         }
557
558         vdev_indirect_mapping_entry_t *vime =
559             kmem_alloc(sizeof (*vime), KM_SLEEP);
560         vime->vime_mapping = *vimep;
561         vime->vime_obsolete_count = count;
562         list_insert_tail(&sci->sci_new_mapping_entries[txgoff], vime);
563
564         dmu_tx_commit(tx);
565 }
566
567 static void
568 spa_condense_indirect_generate_new_mapping(vdev_t *vd,
569     uint32_t *obsolete_counts, uint64_t start_index, zthr_t *zthr)
570 {
571         spa_t *spa = vd->vdev_spa;
572         uint64_t mapi = start_index;
573         vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
574         uint64_t old_num_entries =
575             vdev_indirect_mapping_num_entries(old_mapping);
576
577         ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
578         ASSERT3U(vd->vdev_id, ==, spa->spa_condensing_indirect_phys.scip_vdev);
579
580         zfs_dbgmsg("starting condense of vdev %llu from index %llu",
581             (u_longlong_t)vd->vdev_id,
582             (u_longlong_t)mapi);
583
584         while (mapi < old_num_entries) {
585
586                 if (zthr_iscancelled(zthr)) {
587                         zfs_dbgmsg("pausing condense of vdev %llu "
588                             "at index %llu", (u_longlong_t)vd->vdev_id,
589                             (u_longlong_t)mapi);
590                         break;
591                 }
592
593                 vdev_indirect_mapping_entry_phys_t *entry =
594                     &old_mapping->vim_entries[mapi];
595                 uint64_t entry_size = DVA_GET_ASIZE(&entry->vimep_dst);
596                 ASSERT3U(obsolete_counts[mapi], <=, entry_size);
597                 if (obsolete_counts[mapi] < entry_size) {
598                         spa_condense_indirect_commit_entry(spa, entry,
599                             obsolete_counts[mapi]);
600
601                         /*
602                          * This delay may be requested for testing, debugging,
603                          * or performance reasons.
604                          */
605                         delay(zfs_condense_indirect_commit_entry_delay_ticks);
606                 }
607
608                 mapi++;
609         }
610 }
611
612 /* ARGSUSED */
613 static boolean_t
614 spa_condense_indirect_thread_check(void *arg, zthr_t *zthr)
615 {
616         spa_t *spa = arg;
617
618         return (spa->spa_condensing_indirect != NULL);
619 }
620
621 /* ARGSUSED */
622 static int
623 spa_condense_indirect_thread(void *arg, zthr_t *zthr)
624 {
625         spa_t *spa = arg;
626         vdev_t *vd;
627
628         ASSERT3P(spa->spa_condensing_indirect, !=, NULL);
629         spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
630         vd = vdev_lookup_top(spa, spa->spa_condensing_indirect_phys.scip_vdev);
631         ASSERT3P(vd, !=, NULL);
632         spa_config_exit(spa, SCL_VDEV, FTAG);
633
634         spa_condensing_indirect_t *sci = spa->spa_condensing_indirect;
635         spa_condensing_indirect_phys_t *scip =
636             &spa->spa_condensing_indirect_phys;
637         uint32_t *counts;
638         uint64_t start_index;
639         vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
640         space_map_t *prev_obsolete_sm = NULL;
641
642         ASSERT3U(vd->vdev_id, ==, scip->scip_vdev);
643         ASSERT(scip->scip_next_mapping_object != 0);
644         ASSERT(scip->scip_prev_obsolete_sm_object != 0);
645         ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
646
647         for (int i = 0; i < TXG_SIZE; i++) {
648                 /*
649                  * The list must start out empty in order for the
650                  * _commit_sync() sync task to be properly registered
651                  * on the first call to _commit_entry(); so it's wise
652                  * to double check and ensure we actually are starting
653                  * with empty lists.
654                  */
655                 ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i]));
656         }
657
658         VERIFY0(space_map_open(&prev_obsolete_sm, spa->spa_meta_objset,
659             scip->scip_prev_obsolete_sm_object, 0, vd->vdev_asize, 0));
660         space_map_update(prev_obsolete_sm);
661         counts = vdev_indirect_mapping_load_obsolete_counts(old_mapping);
662         if (prev_obsolete_sm != NULL) {
663                 vdev_indirect_mapping_load_obsolete_spacemap(old_mapping,
664                     counts, prev_obsolete_sm);
665         }
666         space_map_close(prev_obsolete_sm);
667
668         /*
669          * Generate new mapping.  Determine what index to continue from
670          * based on the max offset that we've already written in the
671          * new mapping.
672          */
673         uint64_t max_offset =
674             vdev_indirect_mapping_max_offset(sci->sci_new_mapping);
675         if (max_offset == 0) {
676                 /* We haven't written anything to the new mapping yet. */
677                 start_index = 0;
678         } else {
679                 /*
680                  * Pick up from where we left off. _entry_for_offset()
681                  * returns a pointer into the vim_entries array. If
682                  * max_offset is greater than any of the mappings
683                  * contained in the table  NULL will be returned and
684                  * that indicates we've exhausted our iteration of the
685                  * old_mapping.
686                  */
687
688                 vdev_indirect_mapping_entry_phys_t *entry =
689                     vdev_indirect_mapping_entry_for_offset_or_next(old_mapping,
690                     max_offset);
691
692                 if (entry == NULL) {
693                         /*
694                          * We've already written the whole new mapping.
695                          * This special value will cause us to skip the
696                          * generate_new_mapping step and just do the sync
697                          * task to complete the condense.
698                          */
699                         start_index = UINT64_MAX;
700                 } else {
701                         start_index = entry - old_mapping->vim_entries;
702                         ASSERT3U(start_index, <,
703                             vdev_indirect_mapping_num_entries(old_mapping));
704                 }
705         }
706
707         spa_condense_indirect_generate_new_mapping(vd, counts,
708             start_index, zthr);
709
710         vdev_indirect_mapping_free_obsolete_counts(old_mapping, counts);
711
712         /*
713          * If the zthr has received a cancellation signal while running
714          * in generate_new_mapping() or at any point after that, then bail
715          * early. We don't want to complete the condense if the spa is
716          * shutting down.
717          */
718         if (zthr_iscancelled(zthr))
719                 return (0);
720
721         VERIFY0(dsl_sync_task(spa_name(spa), NULL,
722             spa_condense_indirect_complete_sync, sci, 0,
723             ZFS_SPACE_CHECK_EXTRA_RESERVED));
724
725         return (0);
726         thread_exit();
727 }
728
729 /*
730  * Sync task to begin the condensing process.
731  */
732 void
733 spa_condense_indirect_start_sync(vdev_t *vd, dmu_tx_t *tx)
734 {
735         spa_t *spa = vd->vdev_spa;
736         spa_condensing_indirect_phys_t *scip =
737             &spa->spa_condensing_indirect_phys;
738
739         ASSERT0(scip->scip_next_mapping_object);
740         ASSERT0(scip->scip_prev_obsolete_sm_object);
741         ASSERT0(scip->scip_vdev);
742         ASSERT(dmu_tx_is_syncing(tx));
743         ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
744         ASSERT(spa_feature_is_active(spa, SPA_FEATURE_OBSOLETE_COUNTS));
745         ASSERT(vdev_indirect_mapping_num_entries(vd->vdev_indirect_mapping));
746
747         uint64_t obsolete_sm_obj = vdev_obsolete_sm_object(vd);
748         ASSERT(obsolete_sm_obj != 0);
749
750         scip->scip_vdev = vd->vdev_id;
751         scip->scip_next_mapping_object =
752             vdev_indirect_mapping_alloc(spa->spa_meta_objset, tx);
753
754         scip->scip_prev_obsolete_sm_object = obsolete_sm_obj;
755
756         /*
757          * We don't need to allocate a new space map object, since
758          * vdev_indirect_sync_obsolete will allocate one when needed.
759          */
760         space_map_close(vd->vdev_obsolete_sm);
761         vd->vdev_obsolete_sm = NULL;
762         VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap,
763             VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, tx));
764
765         VERIFY0(zap_add(spa->spa_dsl_pool->dp_meta_objset,
766             DMU_POOL_DIRECTORY_OBJECT,
767             DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t),
768             sizeof (*scip) / sizeof (uint64_t), scip, tx));
769
770         ASSERT3P(spa->spa_condensing_indirect, ==, NULL);
771         spa->spa_condensing_indirect = spa_condensing_indirect_create(spa);
772
773         zfs_dbgmsg("starting condense of vdev %llu in txg %llu: "
774             "posm=%llu nm=%llu",
775             vd->vdev_id, dmu_tx_get_txg(tx),
776             (u_longlong_t)scip->scip_prev_obsolete_sm_object,
777             (u_longlong_t)scip->scip_next_mapping_object);
778
779         zthr_wakeup(spa->spa_condense_zthr);
780 }
781
782 /*
783  * Sync to the given vdev's obsolete space map any segments that are no longer
784  * referenced as of the given txg.
785  *
786  * If the obsolete space map doesn't exist yet, create and open it.
787  */
788 void
789 vdev_indirect_sync_obsolete(vdev_t *vd, dmu_tx_t *tx)
790 {
791         spa_t *spa = vd->vdev_spa;
792         vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
793
794         ASSERT3U(vic->vic_mapping_object, !=, 0);
795         ASSERT(range_tree_space(vd->vdev_obsolete_segments) > 0);
796         ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops);
797         ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS));
798
799         if (vdev_obsolete_sm_object(vd) == 0) {
800                 uint64_t obsolete_sm_object =
801                     space_map_alloc(spa->spa_meta_objset,
802                     vdev_standard_sm_blksz, tx);
803
804                 ASSERT(vd->vdev_top_zap != 0);
805                 VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
806                     VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM,
807                     sizeof (obsolete_sm_object), 1, &obsolete_sm_object, tx));
808                 ASSERT3U(vdev_obsolete_sm_object(vd), !=, 0);
809
810                 spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
811                 VERIFY0(space_map_open(&vd->vdev_obsolete_sm,
812                     spa->spa_meta_objset, obsolete_sm_object,
813                     0, vd->vdev_asize, 0));
814                 space_map_update(vd->vdev_obsolete_sm);
815         }
816
817         ASSERT(vd->vdev_obsolete_sm != NULL);
818         ASSERT3U(vdev_obsolete_sm_object(vd), ==,
819             space_map_object(vd->vdev_obsolete_sm));
820
821         space_map_write(vd->vdev_obsolete_sm,
822             vd->vdev_obsolete_segments, SM_ALLOC, SM_NO_VDEVID, tx);
823         space_map_update(vd->vdev_obsolete_sm);
824         range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL);
825 }
826
827 int
828 spa_condense_init(spa_t *spa)
829 {
830         int error = zap_lookup(spa->spa_meta_objset,
831             DMU_POOL_DIRECTORY_OBJECT,
832             DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t),
833             sizeof (spa->spa_condensing_indirect_phys) / sizeof (uint64_t),
834             &spa->spa_condensing_indirect_phys);
835         if (error == 0) {
836                 if (spa_writeable(spa)) {
837                         spa->spa_condensing_indirect =
838                             spa_condensing_indirect_create(spa);
839                 }
840                 return (0);
841         } else if (error == ENOENT) {
842                 return (0);
843         } else {
844                 return (error);
845         }
846 }
847
848 void
849 spa_condense_fini(spa_t *spa)
850 {
851         if (spa->spa_condensing_indirect != NULL) {
852                 spa_condensing_indirect_destroy(spa->spa_condensing_indirect);
853                 spa->spa_condensing_indirect = NULL;
854         }
855 }
856
857 void
858 spa_start_indirect_condensing_thread(spa_t *spa)
859 {
860         ASSERT3P(spa->spa_condense_zthr, ==, NULL);
861         spa->spa_condense_zthr = zthr_create(spa_condense_indirect_thread_check,
862             spa_condense_indirect_thread, spa);
863 }
864
865 /*
866  * Gets the obsolete spacemap object from the vdev's ZAP.
867  * Returns the spacemap object, or 0 if it wasn't in the ZAP or the ZAP doesn't
868  * exist yet.
869  */
870 int
871 vdev_obsolete_sm_object(vdev_t *vd)
872 {
873         ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
874         if (vd->vdev_top_zap == 0) {
875                 return (0);
876         }
877
878         uint64_t sm_obj = 0;
879         int err = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
880             VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, sizeof (sm_obj), 1, &sm_obj);
881
882         ASSERT(err == 0 || err == ENOENT);
883
884         return (sm_obj);
885 }
886
887 boolean_t
888 vdev_obsolete_counts_are_precise(vdev_t *vd)
889 {
890         ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
891         if (vd->vdev_top_zap == 0) {
892                 return (B_FALSE);
893         }
894
895         uint64_t val = 0;
896         int err = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
897             VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, sizeof (val), 1, &val);
898
899         ASSERT(err == 0 || err == ENOENT);
900
901         return (val != 0);
902 }
903
904 /* ARGSUSED */
905 static void
906 vdev_indirect_close(vdev_t *vd)
907 {
908 }
909
910 /* ARGSUSED */
911 static int
912 vdev_indirect_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
913     uint64_t *logical_ashift, uint64_t *physical_ashift)
914 {
915         *psize = *max_psize = vd->vdev_asize +
916             VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
917         *logical_ashift = vd->vdev_ashift;
918         *physical_ashift = vd->vdev_physical_ashift;
919         return (0);
920 }
921
922 typedef struct remap_segment {
923         vdev_t *rs_vd;
924         uint64_t rs_offset;
925         uint64_t rs_asize;
926         uint64_t rs_split_offset;
927         list_node_t rs_node;
928 } remap_segment_t;
929
930 remap_segment_t *
931 rs_alloc(vdev_t *vd, uint64_t offset, uint64_t asize, uint64_t split_offset)
932 {
933         remap_segment_t *rs = kmem_alloc(sizeof (remap_segment_t), KM_SLEEP);
934         rs->rs_vd = vd;
935         rs->rs_offset = offset;
936         rs->rs_asize = asize;
937         rs->rs_split_offset = split_offset;
938         return (rs);
939 }
940
941 /*
942  * Given an indirect vdev and an extent on that vdev, it duplicates the
943  * physical entries of the indirect mapping that correspond to the extent
944  * to a new array and returns a pointer to it. In addition, copied_entries
945  * is populated with the number of mapping entries that were duplicated.
946  *
947  * Note that the function assumes that the caller holds vdev_indirect_rwlock.
948  * This ensures that the mapping won't change due to condensing as we
949  * copy over its contents.
950  *
951  * Finally, since we are doing an allocation, it is up to the caller to
952  * free the array allocated in this function.
953  */
954 vdev_indirect_mapping_entry_phys_t *
955 vdev_indirect_mapping_duplicate_adjacent_entries(vdev_t *vd, uint64_t offset,
956     uint64_t asize, uint64_t *copied_entries)
957 {
958         vdev_indirect_mapping_entry_phys_t *duplicate_mappings = NULL;
959         vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
960         uint64_t entries = 0;
961
962         ASSERT(RW_READ_HELD(&vd->vdev_indirect_rwlock));
963
964         vdev_indirect_mapping_entry_phys_t *first_mapping =
965             vdev_indirect_mapping_entry_for_offset(vim, offset);
966         ASSERT3P(first_mapping, !=, NULL);
967
968         vdev_indirect_mapping_entry_phys_t *m = first_mapping;
969         while (asize > 0) {
970                 uint64_t size = DVA_GET_ASIZE(&m->vimep_dst);
971
972                 ASSERT3U(offset, >=, DVA_MAPPING_GET_SRC_OFFSET(m));
973                 ASSERT3U(offset, <, DVA_MAPPING_GET_SRC_OFFSET(m) + size);
974
975                 uint64_t inner_offset = offset - DVA_MAPPING_GET_SRC_OFFSET(m);
976                 uint64_t inner_size = MIN(asize, size - inner_offset);
977
978                 offset += inner_size;
979                 asize -= inner_size;
980                 entries++;
981                 m++;
982         }
983
984         size_t copy_length = entries * sizeof (*first_mapping);
985         duplicate_mappings = kmem_alloc(copy_length, KM_SLEEP);
986         bcopy(first_mapping, duplicate_mappings, copy_length);
987         *copied_entries = entries;
988
989         return (duplicate_mappings);
990 }
991
992 /*
993  * Goes through the relevant indirect mappings until it hits a concrete vdev
994  * and issues the callback. On the way to the concrete vdev, if any other
995  * indirect vdevs are encountered, then the callback will also be called on
996  * each of those indirect vdevs. For example, if the segment is mapped to
997  * segment A on indirect vdev 1, and then segment A on indirect vdev 1 is
998  * mapped to segment B on concrete vdev 2, then the callback will be called on
999  * both vdev 1 and vdev 2.
1000  *
1001  * While the callback passed to vdev_indirect_remap() is called on every vdev
1002  * the function encounters, certain callbacks only care about concrete vdevs.
1003  * These types of callbacks should return immediately and explicitly when they
1004  * are called on an indirect vdev.
1005  *
1006  * Because there is a possibility that a DVA section in the indirect device
1007  * has been split into multiple sections in our mapping, we keep track
1008  * of the relevant contiguous segments of the new location (remap_segment_t)
1009  * in a stack. This way we can call the callback for each of the new sections
1010  * created by a single section of the indirect device. Note though, that in
1011  * this scenario the callbacks in each split block won't occur in-order in
1012  * terms of offset, so callers should not make any assumptions about that.
1013  *
1014  * For callbacks that don't handle split blocks and immediately return when
1015  * they encounter them (as is the case for remap_blkptr_cb), the caller can
1016  * assume that its callback will be applied from the first indirect vdev
1017  * encountered to the last one and then the concrete vdev, in that order.
1018  */
1019 static void
1020 vdev_indirect_remap(vdev_t *vd, uint64_t offset, uint64_t asize,
1021     void (*func)(uint64_t, vdev_t *, uint64_t, uint64_t, void *), void *arg)
1022 {
1023         list_t stack;
1024         spa_t *spa = vd->vdev_spa;
1025
1026         list_create(&stack, sizeof (remap_segment_t),
1027             offsetof(remap_segment_t, rs_node));
1028
1029         for (remap_segment_t *rs = rs_alloc(vd, offset, asize, 0);
1030             rs != NULL; rs = list_remove_head(&stack)) {
1031                 vdev_t *v = rs->rs_vd;
1032                 uint64_t num_entries = 0;
1033
1034                 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
1035                 ASSERT(rs->rs_asize > 0);
1036
1037                 /*
1038                  * Note: As this function can be called from open context
1039                  * (e.g. zio_read()), we need the following rwlock to
1040                  * prevent the mapping from being changed by condensing.
1041                  *
1042                  * So we grab the lock and we make a copy of the entries
1043                  * that are relevant to the extent that we are working on.
1044                  * Once that is done, we drop the lock and iterate over
1045                  * our copy of the mapping. Once we are done with the with
1046                  * the remap segment and we free it, we also free our copy
1047                  * of the indirect mapping entries that are relevant to it.
1048                  *
1049                  * This way we don't need to wait until the function is
1050                  * finished with a segment, to condense it. In addition, we
1051                  * don't need a recursive rwlock for the case that a call to
1052                  * vdev_indirect_remap() needs to call itself (through the
1053                  * codepath of its callback) for the same vdev in the middle
1054                  * of its execution.
1055                  */
1056                 rw_enter(&v->vdev_indirect_rwlock, RW_READER);
1057                 vdev_indirect_mapping_t *vim = v->vdev_indirect_mapping;
1058                 ASSERT3P(vim, !=, NULL);
1059
1060                 vdev_indirect_mapping_entry_phys_t *mapping =
1061                     vdev_indirect_mapping_duplicate_adjacent_entries(v,
1062                     rs->rs_offset, rs->rs_asize, &num_entries);
1063                 ASSERT3P(mapping, !=, NULL);
1064                 ASSERT3U(num_entries, >, 0);
1065                 rw_exit(&v->vdev_indirect_rwlock);
1066
1067                 for (uint64_t i = 0; i < num_entries; i++) {
1068                         /*
1069                          * Note: the vdev_indirect_mapping can not change
1070                          * while we are running.  It only changes while the
1071                          * removal is in progress, and then only from syncing
1072                          * context. While a removal is in progress, this
1073                          * function is only called for frees, which also only
1074                          * happen from syncing context.
1075                          */
1076                         vdev_indirect_mapping_entry_phys_t *m = &mapping[i];
1077
1078                         ASSERT3P(m, !=, NULL);
1079                         ASSERT3U(rs->rs_asize, >, 0);
1080
1081                         uint64_t size = DVA_GET_ASIZE(&m->vimep_dst);
1082                         uint64_t dst_offset = DVA_GET_OFFSET(&m->vimep_dst);
1083                         uint64_t dst_vdev = DVA_GET_VDEV(&m->vimep_dst);
1084
1085                         ASSERT3U(rs->rs_offset, >=,
1086                             DVA_MAPPING_GET_SRC_OFFSET(m));
1087                         ASSERT3U(rs->rs_offset, <,
1088                             DVA_MAPPING_GET_SRC_OFFSET(m) + size);
1089                         ASSERT3U(dst_vdev, !=, v->vdev_id);
1090
1091                         uint64_t inner_offset = rs->rs_offset -
1092                             DVA_MAPPING_GET_SRC_OFFSET(m);
1093                         uint64_t inner_size =
1094                             MIN(rs->rs_asize, size - inner_offset);
1095
1096                         vdev_t *dst_v = vdev_lookup_top(spa, dst_vdev);
1097                         ASSERT3P(dst_v, !=, NULL);
1098
1099                         if (dst_v->vdev_ops == &vdev_indirect_ops) {
1100                                 list_insert_head(&stack,
1101                                     rs_alloc(dst_v, dst_offset + inner_offset,
1102                                     inner_size, rs->rs_split_offset));
1103
1104                         }
1105
1106                         if ((zfs_flags & ZFS_DEBUG_INDIRECT_REMAP) &&
1107                             IS_P2ALIGNED(inner_size, 2 * SPA_MINBLOCKSIZE)) {
1108                                 /*
1109                                  * Note: This clause exists only solely for
1110                                  * testing purposes. We use it to ensure that
1111                                  * split blocks work and that the callbacks
1112                                  * using them yield the same result if issued
1113                                  * in reverse order.
1114                                  */
1115                                 uint64_t inner_half = inner_size / 2;
1116
1117                                 func(rs->rs_split_offset + inner_half, dst_v,
1118                                     dst_offset + inner_offset + inner_half,
1119                                     inner_half, arg);
1120
1121                                 func(rs->rs_split_offset, dst_v,
1122                                     dst_offset + inner_offset,
1123                                     inner_half, arg);
1124                         } else {
1125                                 func(rs->rs_split_offset, dst_v,
1126                                     dst_offset + inner_offset,
1127                                     inner_size, arg);
1128                         }
1129
1130                         rs->rs_offset += inner_size;
1131                         rs->rs_asize -= inner_size;
1132                         rs->rs_split_offset += inner_size;
1133                 }
1134                 VERIFY0(rs->rs_asize);
1135
1136                 kmem_free(mapping, num_entries * sizeof (*mapping));
1137                 kmem_free(rs, sizeof (remap_segment_t));
1138         }
1139         list_destroy(&stack);
1140 }
1141
1142 static void
1143 vdev_indirect_child_io_done(zio_t *zio)
1144 {
1145         zio_t *pio = zio->io_private;
1146
1147         mutex_enter(&pio->io_lock);
1148         pio->io_error = zio_worst_error(pio->io_error, zio->io_error);
1149         mutex_exit(&pio->io_lock);
1150
1151         abd_put(zio->io_abd);
1152 }
1153
1154 /*
1155  * This is a callback for vdev_indirect_remap() which allocates an
1156  * indirect_split_t for each split segment and adds it to iv_splits.
1157  */
1158 static void
1159 vdev_indirect_gather_splits(uint64_t split_offset, vdev_t *vd, uint64_t offset,
1160     uint64_t size, void *arg)
1161 {
1162         zio_t *zio = arg;
1163         indirect_vsd_t *iv = zio->io_vsd;
1164
1165         ASSERT3P(vd, !=, NULL);
1166
1167         if (vd->vdev_ops == &vdev_indirect_ops)
1168                 return;
1169
1170         int n = 1;
1171         if (vd->vdev_ops == &vdev_mirror_ops)
1172                 n = vd->vdev_children;
1173
1174         indirect_split_t *is =
1175             kmem_zalloc(offsetof(indirect_split_t, is_child[n]), KM_SLEEP);
1176
1177         is->is_children = n;
1178         is->is_size = size;
1179         is->is_split_offset = split_offset;
1180         is->is_target_offset = offset;
1181         is->is_vdev = vd;
1182
1183         /*
1184          * Note that we only consider multiple copies of the data for
1185          * *mirror* vdevs.  We don't for "replacing" or "spare" vdevs, even
1186          * though they use the same ops as mirror, because there's only one
1187          * "good" copy under the replacing/spare.
1188          */
1189         if (vd->vdev_ops == &vdev_mirror_ops) {
1190                 for (int i = 0; i < n; i++) {
1191                         is->is_child[i].ic_vdev = vd->vdev_child[i];
1192                 }
1193         } else {
1194                 is->is_child[0].ic_vdev = vd;
1195         }
1196
1197         list_insert_tail(&iv->iv_splits, is);
1198 }
1199
1200 static void
1201 vdev_indirect_read_split_done(zio_t *zio)
1202 {
1203         indirect_child_t *ic = zio->io_private;
1204
1205         if (zio->io_error != 0) {
1206                 /*
1207                  * Clear ic_data to indicate that we do not have data for this
1208                  * child.
1209                  */
1210                 abd_free(ic->ic_data);
1211                 ic->ic_data = NULL;
1212         }
1213 }
1214
1215 /*
1216  * Issue reads for all copies (mirror children) of all splits.
1217  */
1218 static void
1219 vdev_indirect_read_all(zio_t *zio)
1220 {
1221         indirect_vsd_t *iv = zio->io_vsd;
1222
1223         for (indirect_split_t *is = list_head(&iv->iv_splits);
1224             is != NULL; is = list_next(&iv->iv_splits, is)) {
1225                 for (int i = 0; i < is->is_children; i++) {
1226                         indirect_child_t *ic = &is->is_child[i];
1227
1228                         if (!vdev_readable(ic->ic_vdev))
1229                                 continue;
1230
1231                         /*
1232                          * Note, we may read from a child whose DTL
1233                          * indicates that the data may not be present here.
1234                          * While this might result in a few i/os that will
1235                          * likely return incorrect data, it simplifies the
1236                          * code since we can treat scrub and resilver
1237                          * identically.  (The incorrect data will be
1238                          * detected and ignored when we verify the
1239                          * checksum.)
1240                          */
1241
1242                         ic->ic_data = abd_alloc_sametype(zio->io_abd,
1243                             is->is_size);
1244
1245                         zio_nowait(zio_vdev_child_io(zio, NULL,
1246                             ic->ic_vdev, is->is_target_offset, ic->ic_data,
1247                             is->is_size, zio->io_type, zio->io_priority, 0,
1248                             vdev_indirect_read_split_done, ic));
1249                 }
1250         }
1251         iv->iv_reconstruct = B_TRUE;
1252 }
1253
1254 static void
1255 vdev_indirect_io_start(zio_t *zio)
1256 {
1257         spa_t *spa = zio->io_spa;
1258         indirect_vsd_t *iv = kmem_zalloc(sizeof (*iv), KM_SLEEP);
1259         list_create(&iv->iv_splits,
1260             sizeof (indirect_split_t), offsetof(indirect_split_t, is_node));
1261
1262         zio->io_vsd = iv;
1263         zio->io_vsd_ops = &vdev_indirect_vsd_ops;
1264
1265         ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
1266         if (zio->io_type != ZIO_TYPE_READ) {
1267                 ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
1268                 /*
1269                  * Note: this code can handle other kinds of writes,
1270                  * but we don't expect them.
1271                  */
1272                 ASSERT((zio->io_flags & (ZIO_FLAG_SELF_HEAL |
1273                     ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE)) != 0);
1274         }
1275
1276         vdev_indirect_remap(zio->io_vd, zio->io_offset, zio->io_size,
1277             vdev_indirect_gather_splits, zio);
1278
1279         indirect_split_t *first = list_head(&iv->iv_splits);
1280         if (first->is_size == zio->io_size) {
1281                 /*
1282                  * This is not a split block; we are pointing to the entire
1283                  * data, which will checksum the same as the original data.
1284                  * Pass the BP down so that the child i/o can verify the
1285                  * checksum, and try a different location if available
1286                  * (e.g. on a mirror).
1287                  *
1288                  * While this special case could be handled the same as the
1289                  * general (split block) case, doing it this way ensures
1290                  * that the vast majority of blocks on indirect vdevs
1291                  * (which are not split) are handled identically to blocks
1292                  * on non-indirect vdevs.  This allows us to be less strict
1293                  * about performance in the general (but rare) case.
1294                  */
1295                 ASSERT0(first->is_split_offset);
1296                 ASSERT3P(list_next(&iv->iv_splits, first), ==, NULL);
1297                 zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
1298                     first->is_vdev, first->is_target_offset,
1299                     abd_get_offset(zio->io_abd, 0),
1300                     zio->io_size, zio->io_type, zio->io_priority, 0,
1301                     vdev_indirect_child_io_done, zio));
1302         } else {
1303                 iv->iv_split_block = B_TRUE;
1304                 if (zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER)) {
1305                         /*
1306                          * Read all copies.  Note that for simplicity,
1307                          * we don't bother consulting the DTL in the
1308                          * resilver case.
1309                          */
1310                         vdev_indirect_read_all(zio);
1311                 } else {
1312                         /*
1313                          * Read one copy of each split segment, from the
1314                          * top-level vdev.  Since we don't know the
1315                          * checksum of each split individually, the child
1316                          * zio can't ensure that we get the right data.
1317                          * E.g. if it's a mirror, it will just read from a
1318                          * random (healthy) leaf vdev.  We have to verify
1319                          * the checksum in vdev_indirect_io_done().
1320                          */
1321                         for (indirect_split_t *is = list_head(&iv->iv_splits);
1322                             is != NULL; is = list_next(&iv->iv_splits, is)) {
1323                                 zio_nowait(zio_vdev_child_io(zio, NULL,
1324                                     is->is_vdev, is->is_target_offset,
1325                                     abd_get_offset(zio->io_abd,
1326                                     is->is_split_offset),
1327                                     is->is_size, zio->io_type,
1328                                     zio->io_priority, 0,
1329                                     vdev_indirect_child_io_done, zio));
1330                         }
1331                 }
1332         }
1333
1334         zio_execute(zio);
1335 }
1336
1337 /*
1338  * Report a checksum error for a child.
1339  */
1340 static void
1341 vdev_indirect_checksum_error(zio_t *zio,
1342     indirect_split_t *is, indirect_child_t *ic)
1343 {
1344         vdev_t *vd = ic->ic_vdev;
1345
1346         if (zio->io_flags & ZIO_FLAG_SPECULATIVE)
1347                 return;
1348
1349         mutex_enter(&vd->vdev_stat_lock);
1350         vd->vdev_stat.vs_checksum_errors++;
1351         mutex_exit(&vd->vdev_stat_lock);
1352
1353         zio_bad_cksum_t zbc = { 0 };
1354         void *bad_buf = abd_borrow_buf_copy(ic->ic_data, is->is_size);
1355         abd_t *good_abd = is->is_child[is->is_good_child].ic_data;
1356         void *good_buf = abd_borrow_buf_copy(good_abd, is->is_size);
1357         zfs_ereport_post_checksum(zio->io_spa, vd, zio,
1358             is->is_target_offset, is->is_size, good_buf, bad_buf, &zbc);
1359         abd_return_buf(ic->ic_data, bad_buf, is->is_size);
1360         abd_return_buf(good_abd, good_buf, is->is_size);
1361 }
1362
1363 /*
1364  * Issue repair i/os for any incorrect copies.  We do this by comparing
1365  * each split segment's correct data (is_good_child's ic_data) with each
1366  * other copy of the data.  If they differ, then we overwrite the bad data
1367  * with the good copy.  Note that we do this without regard for the DTL's,
1368  * which simplifies this code and also issues the optimal number of writes
1369  * (based on which copies actually read bad data, as opposed to which we
1370  * think might be wrong).  For the same reason, we always use
1371  * ZIO_FLAG_SELF_HEAL, to bypass the DTL check in zio_vdev_io_start().
1372  */
1373 static void
1374 vdev_indirect_repair(zio_t *zio)
1375 {
1376         indirect_vsd_t *iv = zio->io_vsd;
1377
1378         enum zio_flag flags = ZIO_FLAG_IO_REPAIR;
1379
1380         if (!(zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER)))
1381                 flags |= ZIO_FLAG_SELF_HEAL;
1382
1383         if (!spa_writeable(zio->io_spa))
1384                 return;
1385
1386         for (indirect_split_t *is = list_head(&iv->iv_splits);
1387             is != NULL; is = list_next(&iv->iv_splits, is)) {
1388                 indirect_child_t *good_child = &is->is_child[is->is_good_child];
1389
1390                 for (int c = 0; c < is->is_children; c++) {
1391                         indirect_child_t *ic = &is->is_child[c];
1392                         if (ic == good_child)
1393                                 continue;
1394                         if (ic->ic_data == NULL)
1395                                 continue;
1396                         if (abd_cmp(good_child->ic_data, ic->ic_data,
1397                             is->is_size) == 0)
1398                                 continue;
1399
1400                         zio_nowait(zio_vdev_child_io(zio, NULL,
1401                             ic->ic_vdev, is->is_target_offset,
1402                             good_child->ic_data, is->is_size,
1403                             ZIO_TYPE_WRITE, ZIO_PRIORITY_ASYNC_WRITE,
1404                             ZIO_FLAG_IO_REPAIR | ZIO_FLAG_SELF_HEAL,
1405                             NULL, NULL));
1406
1407                         vdev_indirect_checksum_error(zio, is, ic);
1408                 }
1409         }
1410 }
1411
1412 /*
1413  * Report checksum errors on all children that we read from.
1414  */
1415 static void
1416 vdev_indirect_all_checksum_errors(zio_t *zio)
1417 {
1418         indirect_vsd_t *iv = zio->io_vsd;
1419
1420         if (zio->io_flags & ZIO_FLAG_SPECULATIVE)
1421                 return;
1422
1423         for (indirect_split_t *is = list_head(&iv->iv_splits);
1424             is != NULL; is = list_next(&iv->iv_splits, is)) {
1425                 for (int c = 0; c < is->is_children; c++) {
1426                         indirect_child_t *ic = &is->is_child[c];
1427
1428                         if (ic->ic_data == NULL)
1429                                 continue;
1430
1431                         vdev_t *vd = ic->ic_vdev;
1432
1433                         mutex_enter(&vd->vdev_stat_lock);
1434                         vd->vdev_stat.vs_checksum_errors++;
1435                         mutex_exit(&vd->vdev_stat_lock);
1436
1437                         zfs_ereport_post_checksum(zio->io_spa, vd, zio,
1438                             is->is_target_offset, is->is_size,
1439                             NULL, NULL, NULL);
1440                 }
1441         }
1442 }
1443
1444 /*
1445  * This function is called when we have read all copies of the data and need
1446  * to try to find a combination of copies that gives us the right checksum.
1447  *
1448  * If we pointed to any mirror vdevs, this effectively does the job of the
1449  * mirror.  The mirror vdev code can't do its own job because we don't know
1450  * the checksum of each split segment individually.  We have to try every
1451  * combination of copies of split segments, until we find one that checksums
1452  * correctly.  (Or until we have tried all combinations, or have tried
1453  * 2^zfs_reconstruct_indirect_segments_max combinations.  In these cases we
1454  * set io_error to ECKSUM to propagate the error up to the user.)
1455  *
1456  * For example, if we have 3 segments in the split,
1457  * and each points to a 2-way mirror, we will have the following pieces of
1458  * data:
1459  *
1460  *       |     mirror child
1461  * split |     [0]        [1]
1462  * ======|=====================
1463  *   A   |  data_A_0   data_A_1
1464  *   B   |  data_B_0   data_B_1
1465  *   C   |  data_C_0   data_C_1
1466  *
1467  * We will try the following (mirror children)^(number of splits) (2^3=8)
1468  * combinations, which is similar to bitwise-little-endian counting in
1469  * binary.  In general each "digit" corresponds to a split segment, and the
1470  * base of each digit is is_children, which can be different for each
1471  * digit.
1472  *
1473  * "low bit"        "high bit"
1474  *        v                 v
1475  * data_A_0 data_B_0 data_C_0
1476  * data_A_1 data_B_0 data_C_0
1477  * data_A_0 data_B_1 data_C_0
1478  * data_A_1 data_B_1 data_C_0
1479  * data_A_0 data_B_0 data_C_1
1480  * data_A_1 data_B_0 data_C_1
1481  * data_A_0 data_B_1 data_C_1
1482  * data_A_1 data_B_1 data_C_1
1483  *
1484  * Note that the split segments may be on the same or different top-level
1485  * vdevs. In either case, we try lots of combinations (see
1486  * zfs_reconstruct_indirect_segments_max).  This ensures that if a mirror has
1487  * small silent errors on all of its children, we can still reconstruct the
1488  * correct data, as long as those errors are at sufficiently-separated
1489  * offsets (specifically, separated by the largest block size - default of
1490  * 128KB, but up to 16MB).
1491  */
1492 static void
1493 vdev_indirect_reconstruct_io_done(zio_t *zio)
1494 {
1495         indirect_vsd_t *iv = zio->io_vsd;
1496         uint64_t attempts = 0;
1497         uint64_t attempts_max = 1ULL << zfs_reconstruct_indirect_segments_max;
1498         int segments = 0;
1499
1500         for (indirect_split_t *is = list_head(&iv->iv_splits);
1501             is != NULL; is = list_next(&iv->iv_splits, is))
1502                 segments++;
1503
1504         for (;;) {
1505                 /* copy data from splits to main zio */
1506                 int ret;
1507                 for (indirect_split_t *is = list_head(&iv->iv_splits);
1508                     is != NULL; is = list_next(&iv->iv_splits, is)) {
1509
1510                         /*
1511                          * If this child failed, its ic_data will be NULL.
1512                          * Skip this combination.
1513                          */
1514                         if (is->is_child[is->is_good_child].ic_data == NULL) {
1515                                 ret = EIO;
1516                                 goto next;
1517                         }
1518
1519                         abd_copy_off(zio->io_abd,
1520                             is->is_child[is->is_good_child].ic_data,
1521                             is->is_split_offset, 0, is->is_size);
1522                 }
1523
1524                 /* See if this checksum matches. */
1525                 zio_bad_cksum_t zbc;
1526                 ret = zio_checksum_error(zio, &zbc);
1527                 if (ret == 0) {
1528                         /* Found a matching checksum.  Issue repair i/os. */
1529                         vdev_indirect_repair(zio);
1530                         zio_checksum_verified(zio);
1531                         return;
1532                 }
1533
1534                 /*
1535                  * Checksum failed; try a different combination of split
1536                  * children.
1537                  */
1538                 boolean_t more;
1539 next:
1540                 more = B_FALSE;
1541                 if (segments <= zfs_reconstruct_indirect_segments_max) {
1542                         /*
1543                          * There are relatively few segments, so
1544                          * deterministically check all combinations.  We do
1545                          * this by by adding one to the first split's
1546                          * good_child.  If it overflows, then "carry over" to
1547                          * the next split (like counting in base is_children,
1548                          * but each digit can have a different base).
1549                          */
1550                         for (indirect_split_t *is = list_head(&iv->iv_splits);
1551                             is != NULL; is = list_next(&iv->iv_splits, is)) {
1552                                 is->is_good_child++;
1553                                 if (is->is_good_child < is->is_children) {
1554                                         more = B_TRUE;
1555                                         break;
1556                                 }
1557                                 is->is_good_child = 0;
1558                         }
1559                 } else if (++attempts < attempts_max) {
1560                         /*
1561                          * There are too many combinations to try all of them
1562                          * in a reasonable amount of time, so try a fixed
1563                          * number of random combinations, after which we'll
1564                          * consider the block unrecoverable.
1565                          */
1566                         for (indirect_split_t *is = list_head(&iv->iv_splits);
1567                             is != NULL; is = list_next(&iv->iv_splits, is)) {
1568                                 is->is_good_child =
1569                                     spa_get_random(is->is_children);
1570                         }
1571                         more = B_TRUE;
1572                 }
1573                 if (!more) {
1574                         /* All combinations failed. */
1575                         zio->io_error = ret;
1576                         vdev_indirect_all_checksum_errors(zio);
1577                         zio_checksum_verified(zio);
1578                         return;
1579                 }
1580         }
1581 }
1582
1583 static void
1584 vdev_indirect_io_done(zio_t *zio)
1585 {
1586         indirect_vsd_t *iv = zio->io_vsd;
1587
1588         if (iv->iv_reconstruct) {
1589                 /*
1590                  * We have read all copies of the data (e.g. from mirrors),
1591                  * either because this was a scrub/resilver, or because the
1592                  * one-copy read didn't checksum correctly.
1593                  */
1594                 vdev_indirect_reconstruct_io_done(zio);
1595                 return;
1596         }
1597
1598         if (!iv->iv_split_block) {
1599                 /*
1600                  * This was not a split block, so we passed the BP down,
1601                  * and the checksum was handled by the (one) child zio.
1602                  */
1603                 return;
1604         }
1605
1606         zio_bad_cksum_t zbc;
1607         int ret = zio_checksum_error(zio, &zbc);
1608         if (ret == 0) {
1609                 zio_checksum_verified(zio);
1610                 return;
1611         }
1612
1613         /*
1614          * The checksum didn't match.  Read all copies of all splits, and
1615          * then we will try to reconstruct.  The next time
1616          * vdev_indirect_io_done() is called, iv_reconstruct will be set.
1617          */
1618         vdev_indirect_read_all(zio);
1619
1620         zio_vdev_io_redone(zio);
1621 }
1622
1623 vdev_ops_t vdev_indirect_ops = {
1624         vdev_indirect_open,
1625         vdev_indirect_close,
1626         vdev_default_asize,
1627         vdev_indirect_io_start,
1628         vdev_indirect_io_done,
1629         NULL,
1630         NULL,
1631         NULL,
1632         NULL,
1633         vdev_indirect_remap,
1634         VDEV_TYPE_INDIRECT,     /* name of this vdev type */
1635         B_FALSE                 /* leaf vdev */
1636 };