]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/contrib/openzfs/module/zfs/vdev_indirect.c
ZFS: MFV 2.0-rc1-gfd20a8
[FreeBSD/FreeBSD.git] / sys / contrib / openzfs / module / zfs / vdev_indirect.c
1 /*
2  * CDDL HEADER START
3  *
4  * This file and its contents are supplied under the terms of the
5  * Common Development and Distribution License ("CDDL"), version 1.0.
6  * You may only use this file in accordance with the terms of version
7  * 1.0 of the CDDL.
8  *
9  * A full copy of the text of the CDDL should have accompanied this
10  * source.  A copy of the CDDL is also available via the Internet at
11  * http://www.illumos.org/license/CDDL.
12  *
13  * CDDL HEADER END
14  */
15
16 /*
17  * Copyright (c) 2014, 2017 by Delphix. All rights reserved.
18  * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
19  * Copyright (c) 2014, 2019 by Delphix. All rights reserved.
20  */
21
22 #include <sys/zfs_context.h>
23 #include <sys/spa.h>
24 #include <sys/spa_impl.h>
25 #include <sys/vdev_impl.h>
26 #include <sys/fs/zfs.h>
27 #include <sys/zio.h>
28 #include <sys/zio_checksum.h>
29 #include <sys/metaslab.h>
30 #include <sys/dmu.h>
31 #include <sys/vdev_indirect_mapping.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dsl_synctask.h>
34 #include <sys/zap.h>
35 #include <sys/abd.h>
36 #include <sys/zthr.h>
37
38 /*
39  * An indirect vdev corresponds to a vdev that has been removed.  Since
40  * we cannot rewrite block pointers of snapshots, etc., we keep a
41  * mapping from old location on the removed device to the new location
42  * on another device in the pool and use this mapping whenever we need
43  * to access the DVA.  Unfortunately, this mapping did not respect
44  * logical block boundaries when it was first created, and so a DVA on
45  * this indirect vdev may be "split" into multiple sections that each
46  * map to a different location.  As a consequence, not all DVAs can be
47  * translated to an equivalent new DVA.  Instead we must provide a
48  * "vdev_remap" operation that executes a callback on each contiguous
49  * segment of the new location.  This function is used in multiple ways:
50  *
51  *  - i/os to this vdev use the callback to determine where the
52  *    data is now located, and issue child i/os for each segment's new
53  *    location.
54  *
55  *  - frees and claims to this vdev use the callback to free or claim
56  *    each mapped segment.  (Note that we don't actually need to claim
57  *    log blocks on indirect vdevs, because we don't allocate to
58  *    removing vdevs.  However, zdb uses zio_claim() for its leak
59  *    detection.)
60  */
61
62 /*
63  * "Big theory statement" for how we mark blocks obsolete.
64  *
65  * When a block on an indirect vdev is freed or remapped, a section of
66  * that vdev's mapping may no longer be referenced (aka "obsolete").  We
67  * keep track of how much of each mapping entry is obsolete.  When
68  * an entry becomes completely obsolete, we can remove it, thus reducing
69  * the memory used by the mapping.  The complete picture of obsolescence
70  * is given by the following data structures, described below:
71  *  - the entry-specific obsolete count
72  *  - the vdev-specific obsolete spacemap
73  *  - the pool-specific obsolete bpobj
74  *
75  * == On disk data structures used ==
76  *
77  * We track the obsolete space for the pool using several objects.  Each
78  * of these objects is created on demand and freed when no longer
79  * needed, and is assumed to be empty if it does not exist.
80  * SPA_FEATURE_OBSOLETE_COUNTS includes the count of these objects.
81  *
82  *  - Each vic_mapping_object (associated with an indirect vdev) can
83  *    have a vimp_counts_object.  This is an array of uint32_t's
84  *    with the same number of entries as the vic_mapping_object.  When
85  *    the mapping is condensed, entries from the vic_obsolete_sm_object
86  *    (see below) are folded into the counts.  Therefore, each
87  *    obsolete_counts entry tells us the number of bytes in the
88  *    corresponding mapping entry that were not referenced when the
89  *    mapping was last condensed.
90  *
91  *  - Each indirect or removing vdev can have a vic_obsolete_sm_object.
92  *    This is a space map containing an alloc entry for every DVA that
93  *    has been obsoleted since the last time this indirect vdev was
94  *    condensed.  We use this object in order to improve performance
95  *    when marking a DVA as obsolete.  Instead of modifying an arbitrary
96  *    offset of the vimp_counts_object, we only need to append an entry
97  *    to the end of this object.  When a DVA becomes obsolete, it is
98  *    added to the obsolete space map.  This happens when the DVA is
99  *    freed, remapped and not referenced by a snapshot, or the last
100  *    snapshot referencing it is destroyed.
101  *
102  *  - Each dataset can have a ds_remap_deadlist object.  This is a
103  *    deadlist object containing all blocks that were remapped in this
104  *    dataset but referenced in a previous snapshot.  Blocks can *only*
105  *    appear on this list if they were remapped (dsl_dataset_block_remapped);
106  *    blocks that were killed in a head dataset are put on the normal
107  *    ds_deadlist and marked obsolete when they are freed.
108  *
109  *  - The pool can have a dp_obsolete_bpobj.  This is a list of blocks
110  *    in the pool that need to be marked obsolete.  When a snapshot is
111  *    destroyed, we move some of the ds_remap_deadlist to the obsolete
112  *    bpobj (see dsl_destroy_snapshot_handle_remaps()).  We then
113  *    asynchronously process the obsolete bpobj, moving its entries to
114  *    the specific vdevs' obsolete space maps.
115  *
116  * == Summary of how we mark blocks as obsolete ==
117  *
118  * - When freeing a block: if any DVA is on an indirect vdev, append to
119  *   vic_obsolete_sm_object.
120  * - When remapping a block, add dva to ds_remap_deadlist (if prev snap
121  *   references; otherwise append to vic_obsolete_sm_object).
122  * - When freeing a snapshot: move parts of ds_remap_deadlist to
123  *   dp_obsolete_bpobj (same algorithm as ds_deadlist).
124  * - When syncing the spa: process dp_obsolete_bpobj, moving ranges to
125  *   individual vdev's vic_obsolete_sm_object.
126  */
127
128 /*
129  * "Big theory statement" for how we condense indirect vdevs.
130  *
131  * Condensing an indirect vdev's mapping is the process of determining
132  * the precise counts of obsolete space for each mapping entry (by
133  * integrating the obsolete spacemap into the obsolete counts) and
134  * writing out a new mapping that contains only referenced entries.
135  *
136  * We condense a vdev when we expect the mapping to shrink (see
137  * vdev_indirect_should_condense()), but only perform one condense at a
138  * time to limit the memory usage.  In addition, we use a separate
139  * open-context thread (spa_condense_indirect_thread) to incrementally
140  * create the new mapping object in a way that minimizes the impact on
141  * the rest of the system.
142  *
143  * == Generating a new mapping ==
144  *
145  * To generate a new mapping, we follow these steps:
146  *
147  * 1. Save the old obsolete space map and create a new mapping object
148  *    (see spa_condense_indirect_start_sync()).  This initializes the
149  *    spa_condensing_indirect_phys with the "previous obsolete space map",
150  *    which is now read only.  Newly obsolete DVAs will be added to a
151  *    new (initially empty) obsolete space map, and will not be
152  *    considered as part of this condense operation.
153  *
154  * 2. Construct in memory the precise counts of obsolete space for each
155  *    mapping entry, by incorporating the obsolete space map into the
156  *    counts.  (See vdev_indirect_mapping_load_obsolete_{counts,spacemap}().)
157  *
158  * 3. Iterate through each mapping entry, writing to the new mapping any
159  *    entries that are not completely obsolete (i.e. which don't have
160  *    obsolete count == mapping length).  (See
161  *    spa_condense_indirect_generate_new_mapping().)
162  *
163  * 4. Destroy the old mapping object and switch over to the new one
164  *    (spa_condense_indirect_complete_sync).
165  *
166  * == Restarting from failure ==
167  *
168  * To restart the condense when we import/open the pool, we must start
169  * at the 2nd step above: reconstruct the precise counts in memory,
170  * based on the space map + counts.  Then in the 3rd step, we start
171  * iterating where we left off: at vimp_max_offset of the new mapping
172  * object.
173  */
174
175 int zfs_condense_indirect_vdevs_enable = B_TRUE;
176
177 /*
178  * Condense if at least this percent of the bytes in the mapping is
179  * obsolete.  With the default of 25%, the amount of space mapped
180  * will be reduced to 1% of its original size after at most 16
181  * condenses.  Higher values will condense less often (causing less
182  * i/o); lower values will reduce the mapping size more quickly.
183  */
184 int zfs_indirect_condense_obsolete_pct = 25;
185
186 /*
187  * Condense if the obsolete space map takes up more than this amount of
188  * space on disk (logically).  This limits the amount of disk space
189  * consumed by the obsolete space map; the default of 1GB is small enough
190  * that we typically don't mind "wasting" it.
191  */
192 unsigned long zfs_condense_max_obsolete_bytes = 1024 * 1024 * 1024;
193
194 /*
195  * Don't bother condensing if the mapping uses less than this amount of
196  * memory.  The default of 128KB is considered a "trivial" amount of
197  * memory and not worth reducing.
198  */
199 unsigned long zfs_condense_min_mapping_bytes = 128 * 1024;
200
201 /*
202  * This is used by the test suite so that it can ensure that certain
203  * actions happen while in the middle of a condense (which might otherwise
204  * complete too quickly).  If used to reduce the performance impact of
205  * condensing in production, a maximum value of 1 should be sufficient.
206  */
207 int zfs_condense_indirect_commit_entry_delay_ms = 0;
208
209 /*
210  * If an indirect split block contains more than this many possible unique
211  * combinations when being reconstructed, consider it too computationally
212  * expensive to check them all. Instead, try at most 100 randomly-selected
213  * combinations each time the block is accessed.  This allows all segment
214  * copies to participate fairly in the reconstruction when all combinations
215  * cannot be checked and prevents repeated use of one bad copy.
216  */
217 int zfs_reconstruct_indirect_combinations_max = 4096;
218
219 /*
220  * Enable to simulate damaged segments and validate reconstruction.  This
221  * is intentionally not exposed as a module parameter.
222  */
223 unsigned long zfs_reconstruct_indirect_damage_fraction = 0;
224
225 /*
226  * The indirect_child_t represents the vdev that we will read from, when we
227  * need to read all copies of the data (e.g. for scrub or reconstruction).
228  * For plain (non-mirror) top-level vdevs (i.e. is_vdev is not a mirror),
229  * ic_vdev is the same as is_vdev.  However, for mirror top-level vdevs,
230  * ic_vdev is a child of the mirror.
231  */
232 typedef struct indirect_child {
233         abd_t *ic_data;
234         vdev_t *ic_vdev;
235
236         /*
237          * ic_duplicate is NULL when the ic_data contents are unique, when it
238          * is determined to be a duplicate it references the primary child.
239          */
240         struct indirect_child *ic_duplicate;
241         list_node_t ic_node; /* node on is_unique_child */
242 } indirect_child_t;
243
244 /*
245  * The indirect_split_t represents one mapped segment of an i/o to the
246  * indirect vdev. For non-split (contiguously-mapped) blocks, there will be
247  * only one indirect_split_t, with is_split_offset==0 and is_size==io_size.
248  * For split blocks, there will be several of these.
249  */
250 typedef struct indirect_split {
251         list_node_t is_node; /* link on iv_splits */
252
253         /*
254          * is_split_offset is the offset into the i/o.
255          * This is the sum of the previous splits' is_size's.
256          */
257         uint64_t is_split_offset;
258
259         vdev_t *is_vdev; /* top-level vdev */
260         uint64_t is_target_offset; /* offset on is_vdev */
261         uint64_t is_size;
262         int is_children; /* number of entries in is_child[] */
263         int is_unique_children; /* number of entries in is_unique_child */
264         list_t is_unique_child;
265
266         /*
267          * is_good_child is the child that we are currently using to
268          * attempt reconstruction.
269          */
270         indirect_child_t *is_good_child;
271
272         indirect_child_t is_child[1]; /* variable-length */
273 } indirect_split_t;
274
275 /*
276  * The indirect_vsd_t is associated with each i/o to the indirect vdev.
277  * It is the "Vdev-Specific Data" in the zio_t's io_vsd.
278  */
279 typedef struct indirect_vsd {
280         boolean_t iv_split_block;
281         boolean_t iv_reconstruct;
282         uint64_t iv_unique_combinations;
283         uint64_t iv_attempts;
284         uint64_t iv_attempts_max;
285
286         list_t iv_splits; /* list of indirect_split_t's */
287 } indirect_vsd_t;
288
289 static void
290 vdev_indirect_map_free(zio_t *zio)
291 {
292         indirect_vsd_t *iv = zio->io_vsd;
293
294         indirect_split_t *is;
295         while ((is = list_head(&iv->iv_splits)) != NULL) {
296                 for (int c = 0; c < is->is_children; c++) {
297                         indirect_child_t *ic = &is->is_child[c];
298                         if (ic->ic_data != NULL)
299                                 abd_free(ic->ic_data);
300                 }
301                 list_remove(&iv->iv_splits, is);
302
303                 indirect_child_t *ic;
304                 while ((ic = list_head(&is->is_unique_child)) != NULL)
305                         list_remove(&is->is_unique_child, ic);
306
307                 list_destroy(&is->is_unique_child);
308
309                 kmem_free(is,
310                     offsetof(indirect_split_t, is_child[is->is_children]));
311         }
312         kmem_free(iv, sizeof (*iv));
313 }
314
315 static const zio_vsd_ops_t vdev_indirect_vsd_ops = {
316         .vsd_free = vdev_indirect_map_free,
317         .vsd_cksum_report = zio_vsd_default_cksum_report
318 };
319
320 /*
321  * Mark the given offset and size as being obsolete.
322  */
323 void
324 vdev_indirect_mark_obsolete(vdev_t *vd, uint64_t offset, uint64_t size)
325 {
326         spa_t *spa = vd->vdev_spa;
327
328         ASSERT3U(vd->vdev_indirect_config.vic_mapping_object, !=, 0);
329         ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops);
330         ASSERT(size > 0);
331         VERIFY(vdev_indirect_mapping_entry_for_offset(
332             vd->vdev_indirect_mapping, offset) != NULL);
333
334         if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
335                 mutex_enter(&vd->vdev_obsolete_lock);
336                 range_tree_add(vd->vdev_obsolete_segments, offset, size);
337                 mutex_exit(&vd->vdev_obsolete_lock);
338                 vdev_dirty(vd, 0, NULL, spa_syncing_txg(spa));
339         }
340 }
341
342 /*
343  * Mark the DVA vdev_id:offset:size as being obsolete in the given tx. This
344  * wrapper is provided because the DMU does not know about vdev_t's and
345  * cannot directly call vdev_indirect_mark_obsolete.
346  */
347 void
348 spa_vdev_indirect_mark_obsolete(spa_t *spa, uint64_t vdev_id, uint64_t offset,
349     uint64_t size, dmu_tx_t *tx)
350 {
351         vdev_t *vd = vdev_lookup_top(spa, vdev_id);
352         ASSERT(dmu_tx_is_syncing(tx));
353
354         /* The DMU can only remap indirect vdevs. */
355         ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
356         vdev_indirect_mark_obsolete(vd, offset, size);
357 }
358
359 static spa_condensing_indirect_t *
360 spa_condensing_indirect_create(spa_t *spa)
361 {
362         spa_condensing_indirect_phys_t *scip =
363             &spa->spa_condensing_indirect_phys;
364         spa_condensing_indirect_t *sci = kmem_zalloc(sizeof (*sci), KM_SLEEP);
365         objset_t *mos = spa->spa_meta_objset;
366
367         for (int i = 0; i < TXG_SIZE; i++) {
368                 list_create(&sci->sci_new_mapping_entries[i],
369                     sizeof (vdev_indirect_mapping_entry_t),
370                     offsetof(vdev_indirect_mapping_entry_t, vime_node));
371         }
372
373         sci->sci_new_mapping =
374             vdev_indirect_mapping_open(mos, scip->scip_next_mapping_object);
375
376         return (sci);
377 }
378
379 static void
380 spa_condensing_indirect_destroy(spa_condensing_indirect_t *sci)
381 {
382         for (int i = 0; i < TXG_SIZE; i++)
383                 list_destroy(&sci->sci_new_mapping_entries[i]);
384
385         if (sci->sci_new_mapping != NULL)
386                 vdev_indirect_mapping_close(sci->sci_new_mapping);
387
388         kmem_free(sci, sizeof (*sci));
389 }
390
391 boolean_t
392 vdev_indirect_should_condense(vdev_t *vd)
393 {
394         vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
395         spa_t *spa = vd->vdev_spa;
396
397         ASSERT(dsl_pool_sync_context(spa->spa_dsl_pool));
398
399         if (!zfs_condense_indirect_vdevs_enable)
400                 return (B_FALSE);
401
402         /*
403          * We can only condense one indirect vdev at a time.
404          */
405         if (spa->spa_condensing_indirect != NULL)
406                 return (B_FALSE);
407
408         if (spa_shutting_down(spa))
409                 return (B_FALSE);
410
411         /*
412          * The mapping object size must not change while we are
413          * condensing, so we can only condense indirect vdevs
414          * (not vdevs that are still in the middle of being removed).
415          */
416         if (vd->vdev_ops != &vdev_indirect_ops)
417                 return (B_FALSE);
418
419         /*
420          * If nothing new has been marked obsolete, there is no
421          * point in condensing.
422          */
423         uint64_t obsolete_sm_obj __maybe_unused;
424         ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_obj));
425         if (vd->vdev_obsolete_sm == NULL) {
426                 ASSERT0(obsolete_sm_obj);
427                 return (B_FALSE);
428         }
429
430         ASSERT(vd->vdev_obsolete_sm != NULL);
431
432         ASSERT3U(obsolete_sm_obj, ==, space_map_object(vd->vdev_obsolete_sm));
433
434         uint64_t bytes_mapped = vdev_indirect_mapping_bytes_mapped(vim);
435         uint64_t bytes_obsolete = space_map_allocated(vd->vdev_obsolete_sm);
436         uint64_t mapping_size = vdev_indirect_mapping_size(vim);
437         uint64_t obsolete_sm_size = space_map_length(vd->vdev_obsolete_sm);
438
439         ASSERT3U(bytes_obsolete, <=, bytes_mapped);
440
441         /*
442          * If a high percentage of the bytes that are mapped have become
443          * obsolete, condense (unless the mapping is already small enough).
444          * This has a good chance of reducing the amount of memory used
445          * by the mapping.
446          */
447         if (bytes_obsolete * 100 / bytes_mapped >=
448             zfs_indirect_condense_obsolete_pct &&
449             mapping_size > zfs_condense_min_mapping_bytes) {
450                 zfs_dbgmsg("should condense vdev %llu because obsolete "
451                     "spacemap covers %d%% of %lluMB mapping",
452                     (u_longlong_t)vd->vdev_id,
453                     (int)(bytes_obsolete * 100 / bytes_mapped),
454                     (u_longlong_t)bytes_mapped / 1024 / 1024);
455                 return (B_TRUE);
456         }
457
458         /*
459          * If the obsolete space map takes up too much space on disk,
460          * condense in order to free up this disk space.
461          */
462         if (obsolete_sm_size >= zfs_condense_max_obsolete_bytes) {
463                 zfs_dbgmsg("should condense vdev %llu because obsolete sm "
464                     "length %lluMB >= max size %lluMB",
465                     (u_longlong_t)vd->vdev_id,
466                     (u_longlong_t)obsolete_sm_size / 1024 / 1024,
467                     (u_longlong_t)zfs_condense_max_obsolete_bytes /
468                     1024 / 1024);
469                 return (B_TRUE);
470         }
471
472         return (B_FALSE);
473 }
474
475 /*
476  * This sync task completes (finishes) a condense, deleting the old
477  * mapping and replacing it with the new one.
478  */
479 static void
480 spa_condense_indirect_complete_sync(void *arg, dmu_tx_t *tx)
481 {
482         spa_condensing_indirect_t *sci = arg;
483         spa_t *spa = dmu_tx_pool(tx)->dp_spa;
484         spa_condensing_indirect_phys_t *scip =
485             &spa->spa_condensing_indirect_phys;
486         vdev_t *vd = vdev_lookup_top(spa, scip->scip_vdev);
487         vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
488         objset_t *mos = spa->spa_meta_objset;
489         vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
490         uint64_t old_count = vdev_indirect_mapping_num_entries(old_mapping);
491         uint64_t new_count =
492             vdev_indirect_mapping_num_entries(sci->sci_new_mapping);
493
494         ASSERT(dmu_tx_is_syncing(tx));
495         ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
496         ASSERT3P(sci, ==, spa->spa_condensing_indirect);
497         for (int i = 0; i < TXG_SIZE; i++) {
498                 ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i]));
499         }
500         ASSERT(vic->vic_mapping_object != 0);
501         ASSERT3U(vd->vdev_id, ==, scip->scip_vdev);
502         ASSERT(scip->scip_next_mapping_object != 0);
503         ASSERT(scip->scip_prev_obsolete_sm_object != 0);
504
505         /*
506          * Reset vdev_indirect_mapping to refer to the new object.
507          */
508         rw_enter(&vd->vdev_indirect_rwlock, RW_WRITER);
509         vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
510         vd->vdev_indirect_mapping = sci->sci_new_mapping;
511         rw_exit(&vd->vdev_indirect_rwlock);
512
513         sci->sci_new_mapping = NULL;
514         vdev_indirect_mapping_free(mos, vic->vic_mapping_object, tx);
515         vic->vic_mapping_object = scip->scip_next_mapping_object;
516         scip->scip_next_mapping_object = 0;
517
518         space_map_free_obj(mos, scip->scip_prev_obsolete_sm_object, tx);
519         spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
520         scip->scip_prev_obsolete_sm_object = 0;
521
522         scip->scip_vdev = 0;
523
524         VERIFY0(zap_remove(mos, DMU_POOL_DIRECTORY_OBJECT,
525             DMU_POOL_CONDENSING_INDIRECT, tx));
526         spa_condensing_indirect_destroy(spa->spa_condensing_indirect);
527         spa->spa_condensing_indirect = NULL;
528
529         zfs_dbgmsg("finished condense of vdev %llu in txg %llu: "
530             "new mapping object %llu has %llu entries "
531             "(was %llu entries)",
532             vd->vdev_id, dmu_tx_get_txg(tx), vic->vic_mapping_object,
533             new_count, old_count);
534
535         vdev_config_dirty(spa->spa_root_vdev);
536 }
537
538 /*
539  * This sync task appends entries to the new mapping object.
540  */
541 static void
542 spa_condense_indirect_commit_sync(void *arg, dmu_tx_t *tx)
543 {
544         spa_condensing_indirect_t *sci = arg;
545         uint64_t txg = dmu_tx_get_txg(tx);
546         spa_t *spa __maybe_unused = dmu_tx_pool(tx)->dp_spa;
547
548         ASSERT(dmu_tx_is_syncing(tx));
549         ASSERT3P(sci, ==, spa->spa_condensing_indirect);
550
551         vdev_indirect_mapping_add_entries(sci->sci_new_mapping,
552             &sci->sci_new_mapping_entries[txg & TXG_MASK], tx);
553         ASSERT(list_is_empty(&sci->sci_new_mapping_entries[txg & TXG_MASK]));
554 }
555
556 /*
557  * Open-context function to add one entry to the new mapping.  The new
558  * entry will be remembered and written from syncing context.
559  */
560 static void
561 spa_condense_indirect_commit_entry(spa_t *spa,
562     vdev_indirect_mapping_entry_phys_t *vimep, uint32_t count)
563 {
564         spa_condensing_indirect_t *sci = spa->spa_condensing_indirect;
565
566         ASSERT3U(count, <, DVA_GET_ASIZE(&vimep->vimep_dst));
567
568         dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
569         dmu_tx_hold_space(tx, sizeof (*vimep) + sizeof (count));
570         VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
571         int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
572
573         /*
574          * If we are the first entry committed this txg, kick off the sync
575          * task to write to the MOS on our behalf.
576          */
577         if (list_is_empty(&sci->sci_new_mapping_entries[txgoff])) {
578                 dsl_sync_task_nowait(dmu_tx_pool(tx),
579                     spa_condense_indirect_commit_sync, sci,
580                     0, ZFS_SPACE_CHECK_NONE, tx);
581         }
582
583         vdev_indirect_mapping_entry_t *vime =
584             kmem_alloc(sizeof (*vime), KM_SLEEP);
585         vime->vime_mapping = *vimep;
586         vime->vime_obsolete_count = count;
587         list_insert_tail(&sci->sci_new_mapping_entries[txgoff], vime);
588
589         dmu_tx_commit(tx);
590 }
591
592 static void
593 spa_condense_indirect_generate_new_mapping(vdev_t *vd,
594     uint32_t *obsolete_counts, uint64_t start_index, zthr_t *zthr)
595 {
596         spa_t *spa = vd->vdev_spa;
597         uint64_t mapi = start_index;
598         vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
599         uint64_t old_num_entries =
600             vdev_indirect_mapping_num_entries(old_mapping);
601
602         ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
603         ASSERT3U(vd->vdev_id, ==, spa->spa_condensing_indirect_phys.scip_vdev);
604
605         zfs_dbgmsg("starting condense of vdev %llu from index %llu",
606             (u_longlong_t)vd->vdev_id,
607             (u_longlong_t)mapi);
608
609         while (mapi < old_num_entries) {
610
611                 if (zthr_iscancelled(zthr)) {
612                         zfs_dbgmsg("pausing condense of vdev %llu "
613                             "at index %llu", (u_longlong_t)vd->vdev_id,
614                             (u_longlong_t)mapi);
615                         break;
616                 }
617
618                 vdev_indirect_mapping_entry_phys_t *entry =
619                     &old_mapping->vim_entries[mapi];
620                 uint64_t entry_size = DVA_GET_ASIZE(&entry->vimep_dst);
621                 ASSERT3U(obsolete_counts[mapi], <=, entry_size);
622                 if (obsolete_counts[mapi] < entry_size) {
623                         spa_condense_indirect_commit_entry(spa, entry,
624                             obsolete_counts[mapi]);
625
626                         /*
627                          * This delay may be requested for testing, debugging,
628                          * or performance reasons.
629                          */
630                         hrtime_t now = gethrtime();
631                         hrtime_t sleep_until = now + MSEC2NSEC(
632                             zfs_condense_indirect_commit_entry_delay_ms);
633                         zfs_sleep_until(sleep_until);
634                 }
635
636                 mapi++;
637         }
638 }
639
640 /* ARGSUSED */
641 static boolean_t
642 spa_condense_indirect_thread_check(void *arg, zthr_t *zthr)
643 {
644         spa_t *spa = arg;
645
646         return (spa->spa_condensing_indirect != NULL);
647 }
648
649 /* ARGSUSED */
650 static void
651 spa_condense_indirect_thread(void *arg, zthr_t *zthr)
652 {
653         spa_t *spa = arg;
654         vdev_t *vd;
655
656         ASSERT3P(spa->spa_condensing_indirect, !=, NULL);
657         spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
658         vd = vdev_lookup_top(spa, spa->spa_condensing_indirect_phys.scip_vdev);
659         ASSERT3P(vd, !=, NULL);
660         spa_config_exit(spa, SCL_VDEV, FTAG);
661
662         spa_condensing_indirect_t *sci = spa->spa_condensing_indirect;
663         spa_condensing_indirect_phys_t *scip =
664             &spa->spa_condensing_indirect_phys;
665         uint32_t *counts;
666         uint64_t start_index;
667         vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
668         space_map_t *prev_obsolete_sm = NULL;
669
670         ASSERT3U(vd->vdev_id, ==, scip->scip_vdev);
671         ASSERT(scip->scip_next_mapping_object != 0);
672         ASSERT(scip->scip_prev_obsolete_sm_object != 0);
673         ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
674
675         for (int i = 0; i < TXG_SIZE; i++) {
676                 /*
677                  * The list must start out empty in order for the
678                  * _commit_sync() sync task to be properly registered
679                  * on the first call to _commit_entry(); so it's wise
680                  * to double check and ensure we actually are starting
681                  * with empty lists.
682                  */
683                 ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i]));
684         }
685
686         VERIFY0(space_map_open(&prev_obsolete_sm, spa->spa_meta_objset,
687             scip->scip_prev_obsolete_sm_object, 0, vd->vdev_asize, 0));
688         counts = vdev_indirect_mapping_load_obsolete_counts(old_mapping);
689         if (prev_obsolete_sm != NULL) {
690                 vdev_indirect_mapping_load_obsolete_spacemap(old_mapping,
691                     counts, prev_obsolete_sm);
692         }
693         space_map_close(prev_obsolete_sm);
694
695         /*
696          * Generate new mapping.  Determine what index to continue from
697          * based on the max offset that we've already written in the
698          * new mapping.
699          */
700         uint64_t max_offset =
701             vdev_indirect_mapping_max_offset(sci->sci_new_mapping);
702         if (max_offset == 0) {
703                 /* We haven't written anything to the new mapping yet. */
704                 start_index = 0;
705         } else {
706                 /*
707                  * Pick up from where we left off. _entry_for_offset()
708                  * returns a pointer into the vim_entries array. If
709                  * max_offset is greater than any of the mappings
710                  * contained in the table  NULL will be returned and
711                  * that indicates we've exhausted our iteration of the
712                  * old_mapping.
713                  */
714
715                 vdev_indirect_mapping_entry_phys_t *entry =
716                     vdev_indirect_mapping_entry_for_offset_or_next(old_mapping,
717                     max_offset);
718
719                 if (entry == NULL) {
720                         /*
721                          * We've already written the whole new mapping.
722                          * This special value will cause us to skip the
723                          * generate_new_mapping step and just do the sync
724                          * task to complete the condense.
725                          */
726                         start_index = UINT64_MAX;
727                 } else {
728                         start_index = entry - old_mapping->vim_entries;
729                         ASSERT3U(start_index, <,
730                             vdev_indirect_mapping_num_entries(old_mapping));
731                 }
732         }
733
734         spa_condense_indirect_generate_new_mapping(vd, counts,
735             start_index, zthr);
736
737         vdev_indirect_mapping_free_obsolete_counts(old_mapping, counts);
738
739         /*
740          * If the zthr has received a cancellation signal while running
741          * in generate_new_mapping() or at any point after that, then bail
742          * early. We don't want to complete the condense if the spa is
743          * shutting down.
744          */
745         if (zthr_iscancelled(zthr))
746                 return;
747
748         VERIFY0(dsl_sync_task(spa_name(spa), NULL,
749             spa_condense_indirect_complete_sync, sci, 0,
750             ZFS_SPACE_CHECK_EXTRA_RESERVED));
751 }
752
753 /*
754  * Sync task to begin the condensing process.
755  */
756 void
757 spa_condense_indirect_start_sync(vdev_t *vd, dmu_tx_t *tx)
758 {
759         spa_t *spa = vd->vdev_spa;
760         spa_condensing_indirect_phys_t *scip =
761             &spa->spa_condensing_indirect_phys;
762
763         ASSERT0(scip->scip_next_mapping_object);
764         ASSERT0(scip->scip_prev_obsolete_sm_object);
765         ASSERT0(scip->scip_vdev);
766         ASSERT(dmu_tx_is_syncing(tx));
767         ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
768         ASSERT(spa_feature_is_active(spa, SPA_FEATURE_OBSOLETE_COUNTS));
769         ASSERT(vdev_indirect_mapping_num_entries(vd->vdev_indirect_mapping));
770
771         uint64_t obsolete_sm_obj;
772         VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_obj));
773         ASSERT3U(obsolete_sm_obj, !=, 0);
774
775         scip->scip_vdev = vd->vdev_id;
776         scip->scip_next_mapping_object =
777             vdev_indirect_mapping_alloc(spa->spa_meta_objset, tx);
778
779         scip->scip_prev_obsolete_sm_object = obsolete_sm_obj;
780
781         /*
782          * We don't need to allocate a new space map object, since
783          * vdev_indirect_sync_obsolete will allocate one when needed.
784          */
785         space_map_close(vd->vdev_obsolete_sm);
786         vd->vdev_obsolete_sm = NULL;
787         VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap,
788             VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, tx));
789
790         VERIFY0(zap_add(spa->spa_dsl_pool->dp_meta_objset,
791             DMU_POOL_DIRECTORY_OBJECT,
792             DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t),
793             sizeof (*scip) / sizeof (uint64_t), scip, tx));
794
795         ASSERT3P(spa->spa_condensing_indirect, ==, NULL);
796         spa->spa_condensing_indirect = spa_condensing_indirect_create(spa);
797
798         zfs_dbgmsg("starting condense of vdev %llu in txg %llu: "
799             "posm=%llu nm=%llu",
800             vd->vdev_id, dmu_tx_get_txg(tx),
801             (u_longlong_t)scip->scip_prev_obsolete_sm_object,
802             (u_longlong_t)scip->scip_next_mapping_object);
803
804         zthr_wakeup(spa->spa_condense_zthr);
805 }
806
807 /*
808  * Sync to the given vdev's obsolete space map any segments that are no longer
809  * referenced as of the given txg.
810  *
811  * If the obsolete space map doesn't exist yet, create and open it.
812  */
813 void
814 vdev_indirect_sync_obsolete(vdev_t *vd, dmu_tx_t *tx)
815 {
816         spa_t *spa = vd->vdev_spa;
817         vdev_indirect_config_t *vic __maybe_unused = &vd->vdev_indirect_config;
818
819         ASSERT3U(vic->vic_mapping_object, !=, 0);
820         ASSERT(range_tree_space(vd->vdev_obsolete_segments) > 0);
821         ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops);
822         ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS));
823
824         uint64_t obsolete_sm_object;
825         VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
826         if (obsolete_sm_object == 0) {
827                 obsolete_sm_object = space_map_alloc(spa->spa_meta_objset,
828                     zfs_vdev_standard_sm_blksz, tx);
829
830                 ASSERT(vd->vdev_top_zap != 0);
831                 VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
832                     VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM,
833                     sizeof (obsolete_sm_object), 1, &obsolete_sm_object, tx));
834                 ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
835                 ASSERT3U(obsolete_sm_object, !=, 0);
836
837                 spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
838                 VERIFY0(space_map_open(&vd->vdev_obsolete_sm,
839                     spa->spa_meta_objset, obsolete_sm_object,
840                     0, vd->vdev_asize, 0));
841         }
842
843         ASSERT(vd->vdev_obsolete_sm != NULL);
844         ASSERT3U(obsolete_sm_object, ==,
845             space_map_object(vd->vdev_obsolete_sm));
846
847         space_map_write(vd->vdev_obsolete_sm,
848             vd->vdev_obsolete_segments, SM_ALLOC, SM_NO_VDEVID, tx);
849         range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL);
850 }
851
852 int
853 spa_condense_init(spa_t *spa)
854 {
855         int error = zap_lookup(spa->spa_meta_objset,
856             DMU_POOL_DIRECTORY_OBJECT,
857             DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t),
858             sizeof (spa->spa_condensing_indirect_phys) / sizeof (uint64_t),
859             &spa->spa_condensing_indirect_phys);
860         if (error == 0) {
861                 if (spa_writeable(spa)) {
862                         spa->spa_condensing_indirect =
863                             spa_condensing_indirect_create(spa);
864                 }
865                 return (0);
866         } else if (error == ENOENT) {
867                 return (0);
868         } else {
869                 return (error);
870         }
871 }
872
873 void
874 spa_condense_fini(spa_t *spa)
875 {
876         if (spa->spa_condensing_indirect != NULL) {
877                 spa_condensing_indirect_destroy(spa->spa_condensing_indirect);
878                 spa->spa_condensing_indirect = NULL;
879         }
880 }
881
882 void
883 spa_start_indirect_condensing_thread(spa_t *spa)
884 {
885         ASSERT3P(spa->spa_condense_zthr, ==, NULL);
886         spa->spa_condense_zthr = zthr_create("z_indirect_condense",
887             spa_condense_indirect_thread_check,
888             spa_condense_indirect_thread, spa);
889 }
890
891 /*
892  * Gets the obsolete spacemap object from the vdev's ZAP.  On success sm_obj
893  * will contain either the obsolete spacemap object or zero if none exists.
894  * All other errors are returned to the caller.
895  */
896 int
897 vdev_obsolete_sm_object(vdev_t *vd, uint64_t *sm_obj)
898 {
899         ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
900
901         if (vd->vdev_top_zap == 0) {
902                 *sm_obj = 0;
903                 return (0);
904         }
905
906         int error = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
907             VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, sizeof (uint64_t), 1, sm_obj);
908         if (error == ENOENT) {
909                 *sm_obj = 0;
910                 error = 0;
911         }
912
913         return (error);
914 }
915
916 /*
917  * Gets the obsolete count are precise spacemap object from the vdev's ZAP.
918  * On success are_precise will be set to reflect if the counts are precise.
919  * All other errors are returned to the caller.
920  */
921 int
922 vdev_obsolete_counts_are_precise(vdev_t *vd, boolean_t *are_precise)
923 {
924         ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
925
926         if (vd->vdev_top_zap == 0) {
927                 *are_precise = B_FALSE;
928                 return (0);
929         }
930
931         uint64_t val = 0;
932         int error = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
933             VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, sizeof (val), 1, &val);
934         if (error == 0) {
935                 *are_precise = (val != 0);
936         } else if (error == ENOENT) {
937                 *are_precise = B_FALSE;
938                 error = 0;
939         }
940
941         return (error);
942 }
943
944 /* ARGSUSED */
945 static void
946 vdev_indirect_close(vdev_t *vd)
947 {
948 }
949
950 /* ARGSUSED */
951 static int
952 vdev_indirect_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
953     uint64_t *logical_ashift, uint64_t *physical_ashift)
954 {
955         *psize = *max_psize = vd->vdev_asize +
956             VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
957         *logical_ashift = vd->vdev_ashift;
958         *physical_ashift = vd->vdev_physical_ashift;
959         return (0);
960 }
961
962 typedef struct remap_segment {
963         vdev_t *rs_vd;
964         uint64_t rs_offset;
965         uint64_t rs_asize;
966         uint64_t rs_split_offset;
967         list_node_t rs_node;
968 } remap_segment_t;
969
970 static remap_segment_t *
971 rs_alloc(vdev_t *vd, uint64_t offset, uint64_t asize, uint64_t split_offset)
972 {
973         remap_segment_t *rs = kmem_alloc(sizeof (remap_segment_t), KM_SLEEP);
974         rs->rs_vd = vd;
975         rs->rs_offset = offset;
976         rs->rs_asize = asize;
977         rs->rs_split_offset = split_offset;
978         return (rs);
979 }
980
981 /*
982  * Given an indirect vdev and an extent on that vdev, it duplicates the
983  * physical entries of the indirect mapping that correspond to the extent
984  * to a new array and returns a pointer to it. In addition, copied_entries
985  * is populated with the number of mapping entries that were duplicated.
986  *
987  * Note that the function assumes that the caller holds vdev_indirect_rwlock.
988  * This ensures that the mapping won't change due to condensing as we
989  * copy over its contents.
990  *
991  * Finally, since we are doing an allocation, it is up to the caller to
992  * free the array allocated in this function.
993  */
994 static vdev_indirect_mapping_entry_phys_t *
995 vdev_indirect_mapping_duplicate_adjacent_entries(vdev_t *vd, uint64_t offset,
996     uint64_t asize, uint64_t *copied_entries)
997 {
998         vdev_indirect_mapping_entry_phys_t *duplicate_mappings = NULL;
999         vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
1000         uint64_t entries = 0;
1001
1002         ASSERT(RW_READ_HELD(&vd->vdev_indirect_rwlock));
1003
1004         vdev_indirect_mapping_entry_phys_t *first_mapping =
1005             vdev_indirect_mapping_entry_for_offset(vim, offset);
1006         ASSERT3P(first_mapping, !=, NULL);
1007
1008         vdev_indirect_mapping_entry_phys_t *m = first_mapping;
1009         while (asize > 0) {
1010                 uint64_t size = DVA_GET_ASIZE(&m->vimep_dst);
1011
1012                 ASSERT3U(offset, >=, DVA_MAPPING_GET_SRC_OFFSET(m));
1013                 ASSERT3U(offset, <, DVA_MAPPING_GET_SRC_OFFSET(m) + size);
1014
1015                 uint64_t inner_offset = offset - DVA_MAPPING_GET_SRC_OFFSET(m);
1016                 uint64_t inner_size = MIN(asize, size - inner_offset);
1017
1018                 offset += inner_size;
1019                 asize -= inner_size;
1020                 entries++;
1021                 m++;
1022         }
1023
1024         size_t copy_length = entries * sizeof (*first_mapping);
1025         duplicate_mappings = kmem_alloc(copy_length, KM_SLEEP);
1026         bcopy(first_mapping, duplicate_mappings, copy_length);
1027         *copied_entries = entries;
1028
1029         return (duplicate_mappings);
1030 }
1031
1032 /*
1033  * Goes through the relevant indirect mappings until it hits a concrete vdev
1034  * and issues the callback. On the way to the concrete vdev, if any other
1035  * indirect vdevs are encountered, then the callback will also be called on
1036  * each of those indirect vdevs. For example, if the segment is mapped to
1037  * segment A on indirect vdev 1, and then segment A on indirect vdev 1 is
1038  * mapped to segment B on concrete vdev 2, then the callback will be called on
1039  * both vdev 1 and vdev 2.
1040  *
1041  * While the callback passed to vdev_indirect_remap() is called on every vdev
1042  * the function encounters, certain callbacks only care about concrete vdevs.
1043  * These types of callbacks should return immediately and explicitly when they
1044  * are called on an indirect vdev.
1045  *
1046  * Because there is a possibility that a DVA section in the indirect device
1047  * has been split into multiple sections in our mapping, we keep track
1048  * of the relevant contiguous segments of the new location (remap_segment_t)
1049  * in a stack. This way we can call the callback for each of the new sections
1050  * created by a single section of the indirect device. Note though, that in
1051  * this scenario the callbacks in each split block won't occur in-order in
1052  * terms of offset, so callers should not make any assumptions about that.
1053  *
1054  * For callbacks that don't handle split blocks and immediately return when
1055  * they encounter them (as is the case for remap_blkptr_cb), the caller can
1056  * assume that its callback will be applied from the first indirect vdev
1057  * encountered to the last one and then the concrete vdev, in that order.
1058  */
1059 static void
1060 vdev_indirect_remap(vdev_t *vd, uint64_t offset, uint64_t asize,
1061     void (*func)(uint64_t, vdev_t *, uint64_t, uint64_t, void *), void *arg)
1062 {
1063         list_t stack;
1064         spa_t *spa = vd->vdev_spa;
1065
1066         list_create(&stack, sizeof (remap_segment_t),
1067             offsetof(remap_segment_t, rs_node));
1068
1069         for (remap_segment_t *rs = rs_alloc(vd, offset, asize, 0);
1070             rs != NULL; rs = list_remove_head(&stack)) {
1071                 vdev_t *v = rs->rs_vd;
1072                 uint64_t num_entries = 0;
1073
1074                 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
1075                 ASSERT(rs->rs_asize > 0);
1076
1077                 /*
1078                  * Note: As this function can be called from open context
1079                  * (e.g. zio_read()), we need the following rwlock to
1080                  * prevent the mapping from being changed by condensing.
1081                  *
1082                  * So we grab the lock and we make a copy of the entries
1083                  * that are relevant to the extent that we are working on.
1084                  * Once that is done, we drop the lock and iterate over
1085                  * our copy of the mapping. Once we are done with the with
1086                  * the remap segment and we free it, we also free our copy
1087                  * of the indirect mapping entries that are relevant to it.
1088                  *
1089                  * This way we don't need to wait until the function is
1090                  * finished with a segment, to condense it. In addition, we
1091                  * don't need a recursive rwlock for the case that a call to
1092                  * vdev_indirect_remap() needs to call itself (through the
1093                  * codepath of its callback) for the same vdev in the middle
1094                  * of its execution.
1095                  */
1096                 rw_enter(&v->vdev_indirect_rwlock, RW_READER);
1097                 ASSERT3P(v->vdev_indirect_mapping, !=, NULL);
1098
1099                 vdev_indirect_mapping_entry_phys_t *mapping =
1100                     vdev_indirect_mapping_duplicate_adjacent_entries(v,
1101                     rs->rs_offset, rs->rs_asize, &num_entries);
1102                 ASSERT3P(mapping, !=, NULL);
1103                 ASSERT3U(num_entries, >, 0);
1104                 rw_exit(&v->vdev_indirect_rwlock);
1105
1106                 for (uint64_t i = 0; i < num_entries; i++) {
1107                         /*
1108                          * Note: the vdev_indirect_mapping can not change
1109                          * while we are running.  It only changes while the
1110                          * removal is in progress, and then only from syncing
1111                          * context. While a removal is in progress, this
1112                          * function is only called for frees, which also only
1113                          * happen from syncing context.
1114                          */
1115                         vdev_indirect_mapping_entry_phys_t *m = &mapping[i];
1116
1117                         ASSERT3P(m, !=, NULL);
1118                         ASSERT3U(rs->rs_asize, >, 0);
1119
1120                         uint64_t size = DVA_GET_ASIZE(&m->vimep_dst);
1121                         uint64_t dst_offset = DVA_GET_OFFSET(&m->vimep_dst);
1122                         uint64_t dst_vdev = DVA_GET_VDEV(&m->vimep_dst);
1123
1124                         ASSERT3U(rs->rs_offset, >=,
1125                             DVA_MAPPING_GET_SRC_OFFSET(m));
1126                         ASSERT3U(rs->rs_offset, <,
1127                             DVA_MAPPING_GET_SRC_OFFSET(m) + size);
1128                         ASSERT3U(dst_vdev, !=, v->vdev_id);
1129
1130                         uint64_t inner_offset = rs->rs_offset -
1131                             DVA_MAPPING_GET_SRC_OFFSET(m);
1132                         uint64_t inner_size =
1133                             MIN(rs->rs_asize, size - inner_offset);
1134
1135                         vdev_t *dst_v = vdev_lookup_top(spa, dst_vdev);
1136                         ASSERT3P(dst_v, !=, NULL);
1137
1138                         if (dst_v->vdev_ops == &vdev_indirect_ops) {
1139                                 list_insert_head(&stack,
1140                                     rs_alloc(dst_v, dst_offset + inner_offset,
1141                                     inner_size, rs->rs_split_offset));
1142
1143                         }
1144
1145                         if ((zfs_flags & ZFS_DEBUG_INDIRECT_REMAP) &&
1146                             IS_P2ALIGNED(inner_size, 2 * SPA_MINBLOCKSIZE)) {
1147                                 /*
1148                                  * Note: This clause exists only solely for
1149                                  * testing purposes. We use it to ensure that
1150                                  * split blocks work and that the callbacks
1151                                  * using them yield the same result if issued
1152                                  * in reverse order.
1153                                  */
1154                                 uint64_t inner_half = inner_size / 2;
1155
1156                                 func(rs->rs_split_offset + inner_half, dst_v,
1157                                     dst_offset + inner_offset + inner_half,
1158                                     inner_half, arg);
1159
1160                                 func(rs->rs_split_offset, dst_v,
1161                                     dst_offset + inner_offset,
1162                                     inner_half, arg);
1163                         } else {
1164                                 func(rs->rs_split_offset, dst_v,
1165                                     dst_offset + inner_offset,
1166                                     inner_size, arg);
1167                         }
1168
1169                         rs->rs_offset += inner_size;
1170                         rs->rs_asize -= inner_size;
1171                         rs->rs_split_offset += inner_size;
1172                 }
1173                 VERIFY0(rs->rs_asize);
1174
1175                 kmem_free(mapping, num_entries * sizeof (*mapping));
1176                 kmem_free(rs, sizeof (remap_segment_t));
1177         }
1178         list_destroy(&stack);
1179 }
1180
1181 static void
1182 vdev_indirect_child_io_done(zio_t *zio)
1183 {
1184         zio_t *pio = zio->io_private;
1185
1186         mutex_enter(&pio->io_lock);
1187         pio->io_error = zio_worst_error(pio->io_error, zio->io_error);
1188         mutex_exit(&pio->io_lock);
1189
1190         abd_put(zio->io_abd);
1191 }
1192
1193 /*
1194  * This is a callback for vdev_indirect_remap() which allocates an
1195  * indirect_split_t for each split segment and adds it to iv_splits.
1196  */
1197 static void
1198 vdev_indirect_gather_splits(uint64_t split_offset, vdev_t *vd, uint64_t offset,
1199     uint64_t size, void *arg)
1200 {
1201         zio_t *zio = arg;
1202         indirect_vsd_t *iv = zio->io_vsd;
1203
1204         ASSERT3P(vd, !=, NULL);
1205
1206         if (vd->vdev_ops == &vdev_indirect_ops)
1207                 return;
1208
1209         int n = 1;
1210         if (vd->vdev_ops == &vdev_mirror_ops)
1211                 n = vd->vdev_children;
1212
1213         indirect_split_t *is =
1214             kmem_zalloc(offsetof(indirect_split_t, is_child[n]), KM_SLEEP);
1215
1216         is->is_children = n;
1217         is->is_size = size;
1218         is->is_split_offset = split_offset;
1219         is->is_target_offset = offset;
1220         is->is_vdev = vd;
1221         list_create(&is->is_unique_child, sizeof (indirect_child_t),
1222             offsetof(indirect_child_t, ic_node));
1223
1224         /*
1225          * Note that we only consider multiple copies of the data for
1226          * *mirror* vdevs.  We don't for "replacing" or "spare" vdevs, even
1227          * though they use the same ops as mirror, because there's only one
1228          * "good" copy under the replacing/spare.
1229          */
1230         if (vd->vdev_ops == &vdev_mirror_ops) {
1231                 for (int i = 0; i < n; i++) {
1232                         is->is_child[i].ic_vdev = vd->vdev_child[i];
1233                         list_link_init(&is->is_child[i].ic_node);
1234                 }
1235         } else {
1236                 is->is_child[0].ic_vdev = vd;
1237         }
1238
1239         list_insert_tail(&iv->iv_splits, is);
1240 }
1241
1242 static void
1243 vdev_indirect_read_split_done(zio_t *zio)
1244 {
1245         indirect_child_t *ic = zio->io_private;
1246
1247         if (zio->io_error != 0) {
1248                 /*
1249                  * Clear ic_data to indicate that we do not have data for this
1250                  * child.
1251                  */
1252                 abd_free(ic->ic_data);
1253                 ic->ic_data = NULL;
1254         }
1255 }
1256
1257 /*
1258  * Issue reads for all copies (mirror children) of all splits.
1259  */
1260 static void
1261 vdev_indirect_read_all(zio_t *zio)
1262 {
1263         indirect_vsd_t *iv = zio->io_vsd;
1264
1265         ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
1266
1267         for (indirect_split_t *is = list_head(&iv->iv_splits);
1268             is != NULL; is = list_next(&iv->iv_splits, is)) {
1269                 for (int i = 0; i < is->is_children; i++) {
1270                         indirect_child_t *ic = &is->is_child[i];
1271
1272                         if (!vdev_readable(ic->ic_vdev))
1273                                 continue;
1274
1275                         /*
1276                          * Note, we may read from a child whose DTL
1277                          * indicates that the data may not be present here.
1278                          * While this might result in a few i/os that will
1279                          * likely return incorrect data, it simplifies the
1280                          * code since we can treat scrub and resilver
1281                          * identically.  (The incorrect data will be
1282                          * detected and ignored when we verify the
1283                          * checksum.)
1284                          */
1285
1286                         ic->ic_data = abd_alloc_sametype(zio->io_abd,
1287                             is->is_size);
1288                         ic->ic_duplicate = NULL;
1289
1290                         zio_nowait(zio_vdev_child_io(zio, NULL,
1291                             ic->ic_vdev, is->is_target_offset, ic->ic_data,
1292                             is->is_size, zio->io_type, zio->io_priority, 0,
1293                             vdev_indirect_read_split_done, ic));
1294                 }
1295         }
1296         iv->iv_reconstruct = B_TRUE;
1297 }
1298
1299 static void
1300 vdev_indirect_io_start(zio_t *zio)
1301 {
1302         spa_t *spa __maybe_unused = zio->io_spa;
1303         indirect_vsd_t *iv = kmem_zalloc(sizeof (*iv), KM_SLEEP);
1304         list_create(&iv->iv_splits,
1305             sizeof (indirect_split_t), offsetof(indirect_split_t, is_node));
1306
1307         zio->io_vsd = iv;
1308         zio->io_vsd_ops = &vdev_indirect_vsd_ops;
1309
1310         ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
1311         if (zio->io_type != ZIO_TYPE_READ) {
1312                 ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
1313                 /*
1314                  * Note: this code can handle other kinds of writes,
1315                  * but we don't expect them.
1316                  */
1317                 ASSERT((zio->io_flags & (ZIO_FLAG_SELF_HEAL |
1318                     ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE)) != 0);
1319         }
1320
1321         vdev_indirect_remap(zio->io_vd, zio->io_offset, zio->io_size,
1322             vdev_indirect_gather_splits, zio);
1323
1324         indirect_split_t *first = list_head(&iv->iv_splits);
1325         if (first->is_size == zio->io_size) {
1326                 /*
1327                  * This is not a split block; we are pointing to the entire
1328                  * data, which will checksum the same as the original data.
1329                  * Pass the BP down so that the child i/o can verify the
1330                  * checksum, and try a different location if available
1331                  * (e.g. on a mirror).
1332                  *
1333                  * While this special case could be handled the same as the
1334                  * general (split block) case, doing it this way ensures
1335                  * that the vast majority of blocks on indirect vdevs
1336                  * (which are not split) are handled identically to blocks
1337                  * on non-indirect vdevs.  This allows us to be less strict
1338                  * about performance in the general (but rare) case.
1339                  */
1340                 ASSERT0(first->is_split_offset);
1341                 ASSERT3P(list_next(&iv->iv_splits, first), ==, NULL);
1342                 zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
1343                     first->is_vdev, first->is_target_offset,
1344                     abd_get_offset(zio->io_abd, 0),
1345                     zio->io_size, zio->io_type, zio->io_priority, 0,
1346                     vdev_indirect_child_io_done, zio));
1347         } else {
1348                 iv->iv_split_block = B_TRUE;
1349                 if (zio->io_type == ZIO_TYPE_READ &&
1350                     zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER)) {
1351                         /*
1352                          * Read all copies.  Note that for simplicity,
1353                          * we don't bother consulting the DTL in the
1354                          * resilver case.
1355                          */
1356                         vdev_indirect_read_all(zio);
1357                 } else {
1358                         /*
1359                          * If this is a read zio, we read one copy of each
1360                          * split segment, from the top-level vdev.  Since
1361                          * we don't know the checksum of each split
1362                          * individually, the child zio can't ensure that
1363                          * we get the right data. E.g. if it's a mirror,
1364                          * it will just read from a random (healthy) leaf
1365                          * vdev. We have to verify the checksum in
1366                          * vdev_indirect_io_done().
1367                          *
1368                          * For write zios, the vdev code will ensure we write
1369                          * to all children.
1370                          */
1371                         for (indirect_split_t *is = list_head(&iv->iv_splits);
1372                             is != NULL; is = list_next(&iv->iv_splits, is)) {
1373                                 zio_nowait(zio_vdev_child_io(zio, NULL,
1374                                     is->is_vdev, is->is_target_offset,
1375                                     abd_get_offset(zio->io_abd,
1376                                     is->is_split_offset), is->is_size,
1377                                     zio->io_type, zio->io_priority, 0,
1378                                     vdev_indirect_child_io_done, zio));
1379                         }
1380
1381                 }
1382         }
1383
1384         zio_execute(zio);
1385 }
1386
1387 /*
1388  * Report a checksum error for a child.
1389  */
1390 static void
1391 vdev_indirect_checksum_error(zio_t *zio,
1392     indirect_split_t *is, indirect_child_t *ic)
1393 {
1394         vdev_t *vd = ic->ic_vdev;
1395
1396         if (zio->io_flags & ZIO_FLAG_SPECULATIVE)
1397                 return;
1398
1399         mutex_enter(&vd->vdev_stat_lock);
1400         vd->vdev_stat.vs_checksum_errors++;
1401         mutex_exit(&vd->vdev_stat_lock);
1402
1403         zio_bad_cksum_t zbc = {{{ 0 }}};
1404         abd_t *bad_abd = ic->ic_data;
1405         abd_t *good_abd = is->is_good_child->ic_data;
1406         (void) zfs_ereport_post_checksum(zio->io_spa, vd, NULL, zio,
1407             is->is_target_offset, is->is_size, good_abd, bad_abd, &zbc);
1408 }
1409
1410 /*
1411  * Issue repair i/os for any incorrect copies.  We do this by comparing
1412  * each split segment's correct data (is_good_child's ic_data) with each
1413  * other copy of the data.  If they differ, then we overwrite the bad data
1414  * with the good copy.  Note that we do this without regard for the DTL's,
1415  * which simplifies this code and also issues the optimal number of writes
1416  * (based on which copies actually read bad data, as opposed to which we
1417  * think might be wrong).  For the same reason, we always use
1418  * ZIO_FLAG_SELF_HEAL, to bypass the DTL check in zio_vdev_io_start().
1419  */
1420 static void
1421 vdev_indirect_repair(zio_t *zio)
1422 {
1423         indirect_vsd_t *iv = zio->io_vsd;
1424
1425         enum zio_flag flags = ZIO_FLAG_IO_REPAIR;
1426
1427         if (!(zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER)))
1428                 flags |= ZIO_FLAG_SELF_HEAL;
1429
1430         if (!spa_writeable(zio->io_spa))
1431                 return;
1432
1433         for (indirect_split_t *is = list_head(&iv->iv_splits);
1434             is != NULL; is = list_next(&iv->iv_splits, is)) {
1435                 for (int c = 0; c < is->is_children; c++) {
1436                         indirect_child_t *ic = &is->is_child[c];
1437                         if (ic == is->is_good_child)
1438                                 continue;
1439                         if (ic->ic_data == NULL)
1440                                 continue;
1441                         if (ic->ic_duplicate == is->is_good_child)
1442                                 continue;
1443
1444                         zio_nowait(zio_vdev_child_io(zio, NULL,
1445                             ic->ic_vdev, is->is_target_offset,
1446                             is->is_good_child->ic_data, is->is_size,
1447                             ZIO_TYPE_WRITE, ZIO_PRIORITY_ASYNC_WRITE,
1448                             ZIO_FLAG_IO_REPAIR | ZIO_FLAG_SELF_HEAL,
1449                             NULL, NULL));
1450
1451                         vdev_indirect_checksum_error(zio, is, ic);
1452                 }
1453         }
1454 }
1455
1456 /*
1457  * Report checksum errors on all children that we read from.
1458  */
1459 static void
1460 vdev_indirect_all_checksum_errors(zio_t *zio)
1461 {
1462         indirect_vsd_t *iv = zio->io_vsd;
1463
1464         if (zio->io_flags & ZIO_FLAG_SPECULATIVE)
1465                 return;
1466
1467         for (indirect_split_t *is = list_head(&iv->iv_splits);
1468             is != NULL; is = list_next(&iv->iv_splits, is)) {
1469                 for (int c = 0; c < is->is_children; c++) {
1470                         indirect_child_t *ic = &is->is_child[c];
1471
1472                         if (ic->ic_data == NULL)
1473                                 continue;
1474
1475                         vdev_t *vd = ic->ic_vdev;
1476
1477                         mutex_enter(&vd->vdev_stat_lock);
1478                         vd->vdev_stat.vs_checksum_errors++;
1479                         mutex_exit(&vd->vdev_stat_lock);
1480
1481                         (void) zfs_ereport_post_checksum(zio->io_spa, vd,
1482                             NULL, zio, is->is_target_offset, is->is_size,
1483                             NULL, NULL, NULL);
1484                 }
1485         }
1486 }
1487
1488 /*
1489  * Copy data from all the splits to a main zio then validate the checksum.
1490  * If then checksum is successfully validated return success.
1491  */
1492 static int
1493 vdev_indirect_splits_checksum_validate(indirect_vsd_t *iv, zio_t *zio)
1494 {
1495         zio_bad_cksum_t zbc;
1496
1497         for (indirect_split_t *is = list_head(&iv->iv_splits);
1498             is != NULL; is = list_next(&iv->iv_splits, is)) {
1499
1500                 ASSERT3P(is->is_good_child->ic_data, !=, NULL);
1501                 ASSERT3P(is->is_good_child->ic_duplicate, ==, NULL);
1502
1503                 abd_copy_off(zio->io_abd, is->is_good_child->ic_data,
1504                     is->is_split_offset, 0, is->is_size);
1505         }
1506
1507         return (zio_checksum_error(zio, &zbc));
1508 }
1509
1510 /*
1511  * There are relatively few possible combinations making it feasible to
1512  * deterministically check them all.  We do this by setting the good_child
1513  * to the next unique split version.  If we reach the end of the list then
1514  * "carry over" to the next unique split version (like counting in base
1515  * is_unique_children, but each digit can have a different base).
1516  */
1517 static int
1518 vdev_indirect_splits_enumerate_all(indirect_vsd_t *iv, zio_t *zio)
1519 {
1520         boolean_t more = B_TRUE;
1521
1522         iv->iv_attempts = 0;
1523
1524         for (indirect_split_t *is = list_head(&iv->iv_splits);
1525             is != NULL; is = list_next(&iv->iv_splits, is))
1526                 is->is_good_child = list_head(&is->is_unique_child);
1527
1528         while (more == B_TRUE) {
1529                 iv->iv_attempts++;
1530                 more = B_FALSE;
1531
1532                 if (vdev_indirect_splits_checksum_validate(iv, zio) == 0)
1533                         return (0);
1534
1535                 for (indirect_split_t *is = list_head(&iv->iv_splits);
1536                     is != NULL; is = list_next(&iv->iv_splits, is)) {
1537                         is->is_good_child = list_next(&is->is_unique_child,
1538                             is->is_good_child);
1539                         if (is->is_good_child != NULL) {
1540                                 more = B_TRUE;
1541                                 break;
1542                         }
1543
1544                         is->is_good_child = list_head(&is->is_unique_child);
1545                 }
1546         }
1547
1548         ASSERT3S(iv->iv_attempts, <=, iv->iv_unique_combinations);
1549
1550         return (SET_ERROR(ECKSUM));
1551 }
1552
1553 /*
1554  * There are too many combinations to try all of them in a reasonable amount
1555  * of time.  So try a fixed number of random combinations from the unique
1556  * split versions, after which we'll consider the block unrecoverable.
1557  */
1558 static int
1559 vdev_indirect_splits_enumerate_randomly(indirect_vsd_t *iv, zio_t *zio)
1560 {
1561         iv->iv_attempts = 0;
1562
1563         while (iv->iv_attempts < iv->iv_attempts_max) {
1564                 iv->iv_attempts++;
1565
1566                 for (indirect_split_t *is = list_head(&iv->iv_splits);
1567                     is != NULL; is = list_next(&iv->iv_splits, is)) {
1568                         indirect_child_t *ic = list_head(&is->is_unique_child);
1569                         int children = is->is_unique_children;
1570
1571                         for (int i = spa_get_random(children); i > 0; i--)
1572                                 ic = list_next(&is->is_unique_child, ic);
1573
1574                         ASSERT3P(ic, !=, NULL);
1575                         is->is_good_child = ic;
1576                 }
1577
1578                 if (vdev_indirect_splits_checksum_validate(iv, zio) == 0)
1579                         return (0);
1580         }
1581
1582         return (SET_ERROR(ECKSUM));
1583 }
1584
1585 /*
1586  * This is a validation function for reconstruction.  It randomly selects
1587  * a good combination, if one can be found, and then it intentionally
1588  * damages all other segment copes by zeroing them.  This forces the
1589  * reconstruction algorithm to locate the one remaining known good copy.
1590  */
1591 static int
1592 vdev_indirect_splits_damage(indirect_vsd_t *iv, zio_t *zio)
1593 {
1594         int error;
1595
1596         /* Presume all the copies are unique for initial selection. */
1597         for (indirect_split_t *is = list_head(&iv->iv_splits);
1598             is != NULL; is = list_next(&iv->iv_splits, is)) {
1599                 is->is_unique_children = 0;
1600
1601                 for (int i = 0; i < is->is_children; i++) {
1602                         indirect_child_t *ic = &is->is_child[i];
1603                         if (ic->ic_data != NULL) {
1604                                 is->is_unique_children++;
1605                                 list_insert_tail(&is->is_unique_child, ic);
1606                         }
1607                 }
1608
1609                 if (list_is_empty(&is->is_unique_child)) {
1610                         error = SET_ERROR(EIO);
1611                         goto out;
1612                 }
1613         }
1614
1615         /*
1616          * Set each is_good_child to a randomly-selected child which
1617          * is known to contain validated data.
1618          */
1619         error = vdev_indirect_splits_enumerate_randomly(iv, zio);
1620         if (error)
1621                 goto out;
1622
1623         /*
1624          * Damage all but the known good copy by zeroing it.  This will
1625          * result in two or less unique copies per indirect_child_t.
1626          * Both may need to be checked in order to reconstruct the block.
1627          * Set iv->iv_attempts_max such that all unique combinations will
1628          * enumerated, but limit the damage to at most 12 indirect splits.
1629          */
1630         iv->iv_attempts_max = 1;
1631
1632         for (indirect_split_t *is = list_head(&iv->iv_splits);
1633             is != NULL; is = list_next(&iv->iv_splits, is)) {
1634                 for (int c = 0; c < is->is_children; c++) {
1635                         indirect_child_t *ic = &is->is_child[c];
1636
1637                         if (ic == is->is_good_child)
1638                                 continue;
1639                         if (ic->ic_data == NULL)
1640                                 continue;
1641
1642                         abd_zero(ic->ic_data, abd_get_size(ic->ic_data));
1643                 }
1644
1645                 iv->iv_attempts_max *= 2;
1646                 if (iv->iv_attempts_max >= (1ULL << 12)) {
1647                         iv->iv_attempts_max = UINT64_MAX;
1648                         break;
1649                 }
1650         }
1651
1652 out:
1653         /* Empty the unique children lists so they can be reconstructed. */
1654         for (indirect_split_t *is = list_head(&iv->iv_splits);
1655             is != NULL; is = list_next(&iv->iv_splits, is)) {
1656                 indirect_child_t *ic;
1657                 while ((ic = list_head(&is->is_unique_child)) != NULL)
1658                         list_remove(&is->is_unique_child, ic);
1659
1660                 is->is_unique_children = 0;
1661         }
1662
1663         return (error);
1664 }
1665
1666 /*
1667  * This function is called when we have read all copies of the data and need
1668  * to try to find a combination of copies that gives us the right checksum.
1669  *
1670  * If we pointed to any mirror vdevs, this effectively does the job of the
1671  * mirror.  The mirror vdev code can't do its own job because we don't know
1672  * the checksum of each split segment individually.
1673  *
1674  * We have to try every unique combination of copies of split segments, until
1675  * we find one that checksums correctly.  Duplicate segment copies are first
1676  * identified and latter skipped during reconstruction.  This optimization
1677  * reduces the search space and ensures that of the remaining combinations
1678  * at most one is correct.
1679  *
1680  * When the total number of combinations is small they can all be checked.
1681  * For example, if we have 3 segments in the split, and each points to a
1682  * 2-way mirror with unique copies, we will have the following pieces of data:
1683  *
1684  *       |     mirror child
1685  * split |     [0]        [1]
1686  * ======|=====================
1687  *   A   |  data_A_0   data_A_1
1688  *   B   |  data_B_0   data_B_1
1689  *   C   |  data_C_0   data_C_1
1690  *
1691  * We will try the following (mirror children)^(number of splits) (2^3=8)
1692  * combinations, which is similar to bitwise-little-endian counting in
1693  * binary.  In general each "digit" corresponds to a split segment, and the
1694  * base of each digit is is_children, which can be different for each
1695  * digit.
1696  *
1697  * "low bit"        "high bit"
1698  *        v                 v
1699  * data_A_0 data_B_0 data_C_0
1700  * data_A_1 data_B_0 data_C_0
1701  * data_A_0 data_B_1 data_C_0
1702  * data_A_1 data_B_1 data_C_0
1703  * data_A_0 data_B_0 data_C_1
1704  * data_A_1 data_B_0 data_C_1
1705  * data_A_0 data_B_1 data_C_1
1706  * data_A_1 data_B_1 data_C_1
1707  *
1708  * Note that the split segments may be on the same or different top-level
1709  * vdevs. In either case, we may need to try lots of combinations (see
1710  * zfs_reconstruct_indirect_combinations_max).  This ensures that if a mirror
1711  * has small silent errors on all of its children, we can still reconstruct
1712  * the correct data, as long as those errors are at sufficiently-separated
1713  * offsets (specifically, separated by the largest block size - default of
1714  * 128KB, but up to 16MB).
1715  */
1716 static void
1717 vdev_indirect_reconstruct_io_done(zio_t *zio)
1718 {
1719         indirect_vsd_t *iv = zio->io_vsd;
1720         boolean_t known_good = B_FALSE;
1721         int error;
1722
1723         iv->iv_unique_combinations = 1;
1724         iv->iv_attempts_max = UINT64_MAX;
1725
1726         if (zfs_reconstruct_indirect_combinations_max > 0)
1727                 iv->iv_attempts_max = zfs_reconstruct_indirect_combinations_max;
1728
1729         /*
1730          * If nonzero, every 1/x blocks will be damaged, in order to validate
1731          * reconstruction when there are split segments with damaged copies.
1732          * Known_good will be TRUE when reconstruction is known to be possible.
1733          */
1734         if (zfs_reconstruct_indirect_damage_fraction != 0 &&
1735             spa_get_random(zfs_reconstruct_indirect_damage_fraction) == 0)
1736                 known_good = (vdev_indirect_splits_damage(iv, zio) == 0);
1737
1738         /*
1739          * Determine the unique children for a split segment and add them
1740          * to the is_unique_child list.  By restricting reconstruction
1741          * to these children, only unique combinations will be considered.
1742          * This can vastly reduce the search space when there are a large
1743          * number of indirect splits.
1744          */
1745         for (indirect_split_t *is = list_head(&iv->iv_splits);
1746             is != NULL; is = list_next(&iv->iv_splits, is)) {
1747                 is->is_unique_children = 0;
1748
1749                 for (int i = 0; i < is->is_children; i++) {
1750                         indirect_child_t *ic_i = &is->is_child[i];
1751
1752                         if (ic_i->ic_data == NULL ||
1753                             ic_i->ic_duplicate != NULL)
1754                                 continue;
1755
1756                         for (int j = i + 1; j < is->is_children; j++) {
1757                                 indirect_child_t *ic_j = &is->is_child[j];
1758
1759                                 if (ic_j->ic_data == NULL ||
1760                                     ic_j->ic_duplicate != NULL)
1761                                         continue;
1762
1763                                 if (abd_cmp(ic_i->ic_data, ic_j->ic_data) == 0)
1764                                         ic_j->ic_duplicate = ic_i;
1765                         }
1766
1767                         is->is_unique_children++;
1768                         list_insert_tail(&is->is_unique_child, ic_i);
1769                 }
1770
1771                 /* Reconstruction is impossible, no valid children */
1772                 EQUIV(list_is_empty(&is->is_unique_child),
1773                     is->is_unique_children == 0);
1774                 if (list_is_empty(&is->is_unique_child)) {
1775                         zio->io_error = EIO;
1776                         vdev_indirect_all_checksum_errors(zio);
1777                         zio_checksum_verified(zio);
1778                         return;
1779                 }
1780
1781                 iv->iv_unique_combinations *= is->is_unique_children;
1782         }
1783
1784         if (iv->iv_unique_combinations <= iv->iv_attempts_max)
1785                 error = vdev_indirect_splits_enumerate_all(iv, zio);
1786         else
1787                 error = vdev_indirect_splits_enumerate_randomly(iv, zio);
1788
1789         if (error != 0) {
1790                 /* All attempted combinations failed. */
1791                 ASSERT3B(known_good, ==, B_FALSE);
1792                 zio->io_error = error;
1793                 vdev_indirect_all_checksum_errors(zio);
1794         } else {
1795                 /*
1796                  * The checksum has been successfully validated.  Issue
1797                  * repair I/Os to any copies of splits which don't match
1798                  * the validated version.
1799                  */
1800                 ASSERT0(vdev_indirect_splits_checksum_validate(iv, zio));
1801                 vdev_indirect_repair(zio);
1802                 zio_checksum_verified(zio);
1803         }
1804 }
1805
1806 static void
1807 vdev_indirect_io_done(zio_t *zio)
1808 {
1809         indirect_vsd_t *iv = zio->io_vsd;
1810
1811         if (iv->iv_reconstruct) {
1812                 /*
1813                  * We have read all copies of the data (e.g. from mirrors),
1814                  * either because this was a scrub/resilver, or because the
1815                  * one-copy read didn't checksum correctly.
1816                  */
1817                 vdev_indirect_reconstruct_io_done(zio);
1818                 return;
1819         }
1820
1821         if (!iv->iv_split_block) {
1822                 /*
1823                  * This was not a split block, so we passed the BP down,
1824                  * and the checksum was handled by the (one) child zio.
1825                  */
1826                 return;
1827         }
1828
1829         zio_bad_cksum_t zbc;
1830         int ret = zio_checksum_error(zio, &zbc);
1831         if (ret == 0) {
1832                 zio_checksum_verified(zio);
1833                 return;
1834         }
1835
1836         /*
1837          * The checksum didn't match.  Read all copies of all splits, and
1838          * then we will try to reconstruct.  The next time
1839          * vdev_indirect_io_done() is called, iv_reconstruct will be set.
1840          */
1841         vdev_indirect_read_all(zio);
1842
1843         zio_vdev_io_redone(zio);
1844 }
1845
1846 vdev_ops_t vdev_indirect_ops = {
1847         .vdev_op_open = vdev_indirect_open,
1848         .vdev_op_close = vdev_indirect_close,
1849         .vdev_op_asize = vdev_default_asize,
1850         .vdev_op_io_start = vdev_indirect_io_start,
1851         .vdev_op_io_done = vdev_indirect_io_done,
1852         .vdev_op_state_change = NULL,
1853         .vdev_op_need_resilver = NULL,
1854         .vdev_op_hold = NULL,
1855         .vdev_op_rele = NULL,
1856         .vdev_op_remap = vdev_indirect_remap,
1857         .vdev_op_xlate = NULL,
1858         .vdev_op_type = VDEV_TYPE_INDIRECT,     /* name of this vdev type */
1859         .vdev_op_leaf = B_FALSE                 /* leaf vdev */
1860 };
1861
1862 EXPORT_SYMBOL(spa_condense_fini);
1863 EXPORT_SYMBOL(spa_start_indirect_condensing_thread);
1864 EXPORT_SYMBOL(spa_condense_indirect_start_sync);
1865 EXPORT_SYMBOL(spa_condense_init);
1866 EXPORT_SYMBOL(spa_vdev_indirect_mark_obsolete);
1867 EXPORT_SYMBOL(vdev_indirect_mark_obsolete);
1868 EXPORT_SYMBOL(vdev_indirect_should_condense);
1869 EXPORT_SYMBOL(vdev_indirect_sync_obsolete);
1870 EXPORT_SYMBOL(vdev_obsolete_counts_are_precise);
1871 EXPORT_SYMBOL(vdev_obsolete_sm_object);
1872
1873 /* BEGIN CSTYLED */
1874 ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, indirect_vdevs_enable, INT, ZMOD_RW,
1875         "Whether to attempt condensing indirect vdev mappings");
1876
1877 ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, min_mapping_bytes, ULONG, ZMOD_RW,
1878         "Don't bother condensing if the mapping uses less than this amount of "
1879         "memory");
1880
1881 ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, max_obsolete_bytes, ULONG, ZMOD_RW,
1882         "Minimum size obsolete spacemap to attempt condensing");
1883
1884 ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, indirect_commit_entry_delay_ms, INT, ZMOD_RW,
1885         "Used by tests to ensure certain actions happen in the middle of a "
1886         "condense. A maximum value of 1 should be sufficient.");
1887
1888 ZFS_MODULE_PARAM(zfs_reconstruct, zfs_reconstruct_, indirect_combinations_max, INT, ZMOD_RW,
1889         "Maximum number of combinations when reconstructing split segments");
1890 /* END CSTYLED */