4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2014 by Chunwei Chen. All rights reserved.
23 * Copyright (c) 2019 by Delphix. All rights reserved.
27 * ARC buffer data (ABD).
29 * ABDs are an abstract data structure for the ARC which can use two
30 * different ways of storing the underlying data:
32 * (a) Linear buffer. In this case, all the data in the ABD is stored in one
33 * contiguous buffer in memory (from a zio_[data_]buf_* kmem cache).
35 * +-------------------+
38 * | abd_size = ... | +--------------------------------+
39 * | abd_buf ------------->| raw buffer of size abd_size |
40 * +-------------------+ +--------------------------------+
43 * (b) Scattered buffer. In this case, the data in the ABD is split into
44 * equal-sized chunks (from the abd_chunk_cache kmem_cache), with pointers
45 * to the chunks recorded in an array at the end of the ABD structure.
47 * +-------------------+
51 * | abd_offset = 0 | +-----------+
52 * | abd_chunks[0] ----------------------------->| chunk 0 |
53 * | abd_chunks[1] ---------------------+ +-----------+
54 * | ... | | +-----------+
55 * | abd_chunks[N-1] ---------+ +------->| chunk 1 |
56 * +-------------------+ | +-----------+
59 * +----------------->| chunk N-1 |
62 * Linear buffers act exactly like normal buffers and are always mapped into the
63 * kernel's virtual memory space, while scattered ABD data chunks are allocated
64 * as physical pages and then mapped in only while they are actually being
65 * accessed through one of the abd_* library functions. Using scattered ABDs
66 * provides several benefits:
68 * (1) They avoid use of kmem_*, preventing performance problems where running
69 * kmem_reap on very large memory systems never finishes and causes
70 * constant TLB shootdowns.
72 * (2) Fragmentation is less of an issue since when we are at the limit of
73 * allocatable space, we won't have to search around for a long free
74 * hole in the VA space for large ARC allocations. Each chunk is mapped in
75 * individually, so even if we weren't using segkpm (see next point) we
76 * wouldn't need to worry about finding a contiguous address range.
78 * (3) Use of segkpm will avoid the need for map / unmap / TLB shootdown costs
79 * on each ABD access. (If segkpm isn't available then we use all linear
80 * ABDs to avoid this penalty.) See seg_kpm.c for more details.
82 * It is possible to make all ABDs linear by setting zfs_abd_scatter_enabled to
83 * B_FALSE. However, it is not possible to use scattered ABDs if segkpm is not
84 * available, which is the case on all 32-bit systems and any 64-bit systems
85 * where kpm_enable is turned off.
87 * In addition to directly allocating a linear or scattered ABD, it is also
88 * possible to create an ABD by requesting the "sub-ABD" starting at an offset
89 * within an existing ABD. In linear buffers this is simple (set abd_buf of
90 * the new ABD to the starting point within the original raw buffer), but
91 * scattered ABDs are a little more complex. The new ABD makes a copy of the
92 * relevant abd_chunks pointers (but not the underlying data). However, to
93 * provide arbitrary rather than only chunk-aligned starting offsets, it also
94 * tracks an abd_offset field which represents the starting point of the data
95 * within the first chunk in abd_chunks. For both linear and scattered ABDs,
96 * creating an offset ABD marks the original ABD as the offset's parent, and the
97 * original ABD's abd_children refcount is incremented. This data allows us to
98 * ensure the root ABD isn't deleted before its children.
100 * Most consumers should never need to know what type of ABD they're using --
101 * the ABD public API ensures that it's possible to transparently switch from
102 * using a linear ABD to a scattered one when doing so would be beneficial.
104 * If you need to use the data within an ABD directly, if you know it's linear
105 * (because you allocated it) you can use abd_to_buf() to access the underlying
106 * raw buffer. Otherwise, you should use one of the abd_borrow_buf* functions
107 * which will allocate a raw buffer if necessary. Use the abd_return_buf*
108 * functions to return any raw buffers that are no longer necessary when you're
111 * There are a variety of ABD APIs that implement basic buffer operations:
112 * compare, copy, read, write, and fill with zeroes. If you need a custom
113 * function which progressively accesses the whole ABD, use the abd_iterate_*
118 #include <sys/param.h>
120 #include <sys/zfs_context.h>
121 #include <sys/zfs_znode.h>
123 #include <linux/scatterlist.h>
124 #include <linux/kmap_compat.h>
129 typedef struct abd_stats {
130 kstat_named_t abdstat_struct_size;
131 kstat_named_t abdstat_linear_cnt;
132 kstat_named_t abdstat_linear_data_size;
133 kstat_named_t abdstat_scatter_cnt;
134 kstat_named_t abdstat_scatter_data_size;
135 kstat_named_t abdstat_scatter_chunk_waste;
136 kstat_named_t abdstat_scatter_orders[MAX_ORDER];
137 kstat_named_t abdstat_scatter_page_multi_chunk;
138 kstat_named_t abdstat_scatter_page_multi_zone;
139 kstat_named_t abdstat_scatter_page_alloc_retry;
140 kstat_named_t abdstat_scatter_sg_table_retry;
143 static abd_stats_t abd_stats = {
144 /* Amount of memory occupied by all of the abd_t struct allocations */
145 { "struct_size", KSTAT_DATA_UINT64 },
147 * The number of linear ABDs which are currently allocated, excluding
148 * ABDs which don't own their data (for instance the ones which were
149 * allocated through abd_get_offset() and abd_get_from_buf()). If an
150 * ABD takes ownership of its buf then it will become tracked.
152 { "linear_cnt", KSTAT_DATA_UINT64 },
153 /* Amount of data stored in all linear ABDs tracked by linear_cnt */
154 { "linear_data_size", KSTAT_DATA_UINT64 },
156 * The number of scatter ABDs which are currently allocated, excluding
157 * ABDs which don't own their data (for instance the ones which were
158 * allocated through abd_get_offset()).
160 { "scatter_cnt", KSTAT_DATA_UINT64 },
161 /* Amount of data stored in all scatter ABDs tracked by scatter_cnt */
162 { "scatter_data_size", KSTAT_DATA_UINT64 },
164 * The amount of space wasted at the end of the last chunk across all
165 * scatter ABDs tracked by scatter_cnt.
167 { "scatter_chunk_waste", KSTAT_DATA_UINT64 },
169 * The number of compound allocations of a given order. These
170 * allocations are spread over all currently allocated ABDs, and
171 * act as a measure of memory fragmentation.
173 { { "scatter_order_N", KSTAT_DATA_UINT64 } },
175 * The number of scatter ABDs which contain multiple chunks.
176 * ABDs are preferentially allocated from the minimum number of
177 * contiguous multi-page chunks, a single chunk is optimal.
179 { "scatter_page_multi_chunk", KSTAT_DATA_UINT64 },
181 * The number of scatter ABDs which are split across memory zones.
182 * ABDs are preferentially allocated using pages from a single zone.
184 { "scatter_page_multi_zone", KSTAT_DATA_UINT64 },
186 * The total number of retries encountered when attempting to
187 * allocate the pages to populate the scatter ABD.
189 { "scatter_page_alloc_retry", KSTAT_DATA_UINT64 },
191 * The total number of retries encountered when attempting to
192 * allocate the sg table for an ABD.
194 { "scatter_sg_table_retry", KSTAT_DATA_UINT64 },
197 #define ABDSTAT(stat) (abd_stats.stat.value.ui64)
198 #define ABDSTAT_INCR(stat, val) \
199 atomic_add_64(&abd_stats.stat.value.ui64, (val))
200 #define ABDSTAT_BUMP(stat) ABDSTAT_INCR(stat, 1)
201 #define ABDSTAT_BUMPDOWN(stat) ABDSTAT_INCR(stat, -1)
203 #define ABD_SCATTER(abd) (abd->abd_u.abd_scatter)
204 #define ABD_BUF(abd) (abd->abd_u.abd_linear.abd_buf)
205 #define abd_for_each_sg(abd, sg, n, i) \
206 for_each_sg(ABD_SCATTER(abd).abd_sgl, sg, n, i)
208 /* see block comment above for description */
209 int zfs_abd_scatter_enabled = B_TRUE;
210 unsigned zfs_abd_scatter_max_order = MAX_ORDER - 1;
213 * zfs_abd_scatter_min_size is the minimum allocation size to use scatter
214 * ABD's. Smaller allocations will use linear ABD's which uses
215 * zio_[data_]buf_alloc().
217 * Scatter ABD's use at least one page each, so sub-page allocations waste
218 * some space when allocated as scatter (e.g. 2KB scatter allocation wastes
219 * half of each page). Using linear ABD's for small allocations means that
220 * they will be put on slabs which contain many allocations. This can
221 * improve memory efficiency, but it also makes it much harder for ARC
222 * evictions to actually free pages, because all the buffers on one slab need
223 * to be freed in order for the slab (and underlying pages) to be freed.
224 * Typically, 512B and 1KB kmem caches have 16 buffers per slab, so it's
225 * possible for them to actually waste more memory than scatter (one page per
226 * buf = wasting 3/4 or 7/8th; one buf per slab = wasting 15/16th).
228 * Spill blocks are typically 512B and are heavily used on systems running
229 * selinux with the default dnode size and the `xattr=sa` property set.
231 * By default we use linear allocations for 512B and 1KB, and scatter
232 * allocations for larger (1.5KB and up).
234 int zfs_abd_scatter_min_size = 512 * 3;
236 static kmem_cache_t *abd_cache = NULL;
237 static kstat_t *abd_ksp;
240 abd_chunkcnt_for_bytes(size_t size)
242 return (P2ROUNDUP(size, PAGESIZE) / PAGESIZE);
246 #ifndef CONFIG_HIGHMEM
248 #ifndef __GFP_RECLAIM
249 #define __GFP_RECLAIM __GFP_WAIT
253 abd_alloc_chunk(int nid, gfp_t gfp, unsigned int order)
257 page = alloc_pages_node(nid, gfp, order);
261 return ((unsigned long) page_address(page));
265 * The goal is to minimize fragmentation by preferentially populating ABDs
266 * with higher order compound pages from a single zone. Allocation size is
267 * progressively decreased until it can be satisfied without performing
268 * reclaim or compaction. When necessary this function will degenerate to
269 * allocating individual pages and allowing reclaim to satisfy allocations.
272 abd_alloc_pages(abd_t *abd, size_t size)
274 struct list_head pages;
275 struct sg_table table;
276 struct scatterlist *sg;
277 struct page *page, *tmp_page = NULL;
278 gfp_t gfp = __GFP_NOWARN | GFP_NOIO;
279 gfp_t gfp_comp = (gfp | __GFP_NORETRY | __GFP_COMP) & ~__GFP_RECLAIM;
280 int max_order = MIN(zfs_abd_scatter_max_order, MAX_ORDER - 1);
281 int nr_pages = abd_chunkcnt_for_bytes(size);
282 int chunks = 0, zones = 0;
283 size_t remaining_size;
284 int nid = NUMA_NO_NODE;
288 INIT_LIST_HEAD(&pages);
290 while (alloc_pages < nr_pages) {
292 unsigned chunk_pages;
294 order = MIN(highbit64(nr_pages - alloc_pages) - 1, max_order);
295 chunk_pages = (1U << order);
297 paddr = abd_alloc_chunk(nid, order ? gfp_comp : gfp, order);
300 ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry);
301 schedule_timeout_interruptible(1);
303 max_order = MAX(0, order - 1);
308 page = virt_to_page(paddr);
309 list_add_tail(&page->lru, &pages);
311 if ((nid != NUMA_NO_NODE) && (page_to_nid(page) != nid))
314 nid = page_to_nid(page);
315 ABDSTAT_BUMP(abdstat_scatter_orders[order]);
317 alloc_pages += chunk_pages;
320 ASSERT3S(alloc_pages, ==, nr_pages);
322 while (sg_alloc_table(&table, chunks, gfp)) {
323 ABDSTAT_BUMP(abdstat_scatter_sg_table_retry);
324 schedule_timeout_interruptible(1);
328 remaining_size = size;
329 list_for_each_entry_safe(page, tmp_page, &pages, lru) {
330 size_t sg_size = MIN(PAGESIZE << compound_order(page),
332 sg_set_page(sg, page, sg_size, 0);
333 remaining_size -= sg_size;
336 list_del(&page->lru);
340 ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk);
341 abd->abd_flags |= ABD_FLAG_MULTI_CHUNK;
344 ABDSTAT_BUMP(abdstat_scatter_page_multi_zone);
345 abd->abd_flags |= ABD_FLAG_MULTI_ZONE;
349 ABD_SCATTER(abd).abd_sgl = table.sgl;
350 ABD_SCATTER(abd).abd_nents = table.nents;
354 * Allocate N individual pages to construct a scatter ABD. This function
355 * makes no attempt to request contiguous pages and requires the minimal
356 * number of kernel interfaces. It's designed for maximum compatibility.
359 abd_alloc_pages(abd_t *abd, size_t size)
361 struct scatterlist *sg = NULL;
362 struct sg_table table;
364 gfp_t gfp = __GFP_NOWARN | GFP_NOIO;
365 int nr_pages = abd_chunkcnt_for_bytes(size);
368 while (sg_alloc_table(&table, nr_pages, gfp)) {
369 ABDSTAT_BUMP(abdstat_scatter_sg_table_retry);
370 schedule_timeout_interruptible(1);
373 ASSERT3U(table.nents, ==, nr_pages);
374 ABD_SCATTER(abd).abd_sgl = table.sgl;
375 ABD_SCATTER(abd).abd_nents = nr_pages;
377 abd_for_each_sg(abd, sg, nr_pages, i) {
378 while ((page = __page_cache_alloc(gfp)) == NULL) {
379 ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry);
380 schedule_timeout_interruptible(1);
383 ABDSTAT_BUMP(abdstat_scatter_orders[0]);
384 sg_set_page(sg, page, PAGESIZE, 0);
388 ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk);
389 abd->abd_flags |= ABD_FLAG_MULTI_CHUNK;
392 #endif /* !CONFIG_HIGHMEM */
395 abd_free_pages(abd_t *abd)
397 struct scatterlist *sg = NULL;
398 struct sg_table table;
400 int nr_pages = ABD_SCATTER(abd).abd_nents;
403 if (abd->abd_flags & ABD_FLAG_MULTI_ZONE)
404 ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_zone);
406 if (abd->abd_flags & ABD_FLAG_MULTI_CHUNK)
407 ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_chunk);
409 abd_for_each_sg(abd, sg, nr_pages, i) {
411 order = compound_order(page);
412 __free_pages(page, order);
413 ASSERT3U(sg->length, <=, PAGE_SIZE << order);
414 ABDSTAT_BUMPDOWN(abdstat_scatter_orders[order]);
417 table.sgl = ABD_SCATTER(abd).abd_sgl;
418 table.nents = table.orig_nents = nr_pages;
419 sg_free_table(&table);
425 #define PAGE_SHIFT (highbit64(PAGESIZE)-1)
431 #define abd_alloc_chunk(o) \
432 ((struct page *)umem_alloc_aligned(PAGESIZE << (o), 64, KM_SLEEP))
433 #define abd_free_chunk(chunk, o) umem_free(chunk, PAGESIZE << (o))
434 #define zfs_kmap_atomic(chunk, km) ((void *)chunk)
435 #define zfs_kunmap_atomic(addr, km) do { (void)(addr); } while (0)
436 #define local_irq_save(flags) do { (void)(flags); } while (0)
437 #define local_irq_restore(flags) do { (void)(flags); } while (0)
438 #define nth_page(pg, i) \
439 ((struct page *)((void *)(pg) + (i) * PAGESIZE))
448 sg_init_table(struct scatterlist *sg, int nr)
450 memset(sg, 0, nr * sizeof (struct scatterlist));
454 #define for_each_sg(sgl, sg, nr, i) \
455 for ((i) = 0, (sg) = (sgl); (i) < (nr); (i)++, (sg) = sg_next(sg))
458 sg_set_page(struct scatterlist *sg, struct page *page, unsigned int len,
461 /* currently we don't use offset */
467 static inline struct page *
468 sg_page(struct scatterlist *sg)
473 static inline struct scatterlist *
474 sg_next(struct scatterlist *sg)
483 abd_alloc_pages(abd_t *abd, size_t size)
485 unsigned nr_pages = abd_chunkcnt_for_bytes(size);
486 struct scatterlist *sg;
489 ABD_SCATTER(abd).abd_sgl = vmem_alloc(nr_pages *
490 sizeof (struct scatterlist), KM_SLEEP);
491 sg_init_table(ABD_SCATTER(abd).abd_sgl, nr_pages);
493 abd_for_each_sg(abd, sg, nr_pages, i) {
494 struct page *p = abd_alloc_chunk(0);
495 sg_set_page(sg, p, PAGESIZE, 0);
497 ABD_SCATTER(abd).abd_nents = nr_pages;
501 abd_free_pages(abd_t *abd)
503 int i, n = ABD_SCATTER(abd).abd_nents;
504 struct scatterlist *sg;
507 abd_for_each_sg(abd, sg, n, i) {
508 for (j = 0; j < sg->length; j += PAGESIZE) {
509 struct page *p = nth_page(sg_page(sg), j>>PAGE_SHIFT);
510 abd_free_chunk(p, 0);
514 vmem_free(ABD_SCATTER(abd).abd_sgl, n * sizeof (struct scatterlist));
524 abd_cache = kmem_cache_create("abd_t", sizeof (abd_t),
525 0, NULL, NULL, NULL, NULL, NULL, 0);
527 abd_ksp = kstat_create("zfs", 0, "abdstats", "misc", KSTAT_TYPE_NAMED,
528 sizeof (abd_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
529 if (abd_ksp != NULL) {
530 abd_ksp->ks_data = &abd_stats;
531 kstat_install(abd_ksp);
533 for (i = 0; i < MAX_ORDER; i++) {
534 snprintf(abd_stats.abdstat_scatter_orders[i].name,
535 KSTAT_STRLEN, "scatter_order_%d", i);
536 abd_stats.abdstat_scatter_orders[i].data_type =
545 if (abd_ksp != NULL) {
546 kstat_delete(abd_ksp);
551 kmem_cache_destroy(abd_cache);
557 abd_verify(abd_t *abd)
559 ASSERT3U(abd->abd_size, >, 0);
560 ASSERT3U(abd->abd_size, <=, SPA_MAXBLOCKSIZE);
561 ASSERT3U(abd->abd_flags, ==, abd->abd_flags & (ABD_FLAG_LINEAR |
562 ABD_FLAG_OWNER | ABD_FLAG_META | ABD_FLAG_MULTI_ZONE |
563 ABD_FLAG_MULTI_CHUNK));
564 IMPLY(abd->abd_parent != NULL, !(abd->abd_flags & ABD_FLAG_OWNER));
565 IMPLY(abd->abd_flags & ABD_FLAG_META, abd->abd_flags & ABD_FLAG_OWNER);
566 if (abd_is_linear(abd)) {
567 ASSERT3P(abd->abd_u.abd_linear.abd_buf, !=, NULL);
571 struct scatterlist *sg = NULL;
573 ASSERT3U(ABD_SCATTER(abd).abd_nents, >, 0);
574 ASSERT3U(ABD_SCATTER(abd).abd_offset, <,
575 ABD_SCATTER(abd).abd_sgl->length);
576 n = ABD_SCATTER(abd).abd_nents;
577 abd_for_each_sg(abd, sg, n, i) {
578 ASSERT3P(sg_page(sg), !=, NULL);
583 static inline abd_t *
584 abd_alloc_struct(void)
586 abd_t *abd = kmem_cache_alloc(abd_cache, KM_PUSHPAGE);
588 ASSERT3P(abd, !=, NULL);
589 ABDSTAT_INCR(abdstat_struct_size, sizeof (abd_t));
595 abd_free_struct(abd_t *abd)
597 kmem_cache_free(abd_cache, abd);
598 ABDSTAT_INCR(abdstat_struct_size, -(int)sizeof (abd_t));
602 * Allocate an ABD, along with its own underlying data buffers. Use this if you
603 * don't care whether the ABD is linear or not.
606 abd_alloc(size_t size, boolean_t is_metadata)
608 /* see the comment above zfs_abd_scatter_min_size */
609 if (!zfs_abd_scatter_enabled || size < zfs_abd_scatter_min_size)
610 return (abd_alloc_linear(size, is_metadata));
612 VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
614 abd_t *abd = abd_alloc_struct();
615 abd->abd_flags = ABD_FLAG_OWNER;
616 abd_alloc_pages(abd, size);
619 abd->abd_flags |= ABD_FLAG_META;
621 abd->abd_size = size;
622 abd->abd_parent = NULL;
623 zfs_refcount_create(&abd->abd_children);
625 abd->abd_u.abd_scatter.abd_offset = 0;
627 ABDSTAT_BUMP(abdstat_scatter_cnt);
628 ABDSTAT_INCR(abdstat_scatter_data_size, size);
629 ABDSTAT_INCR(abdstat_scatter_chunk_waste,
630 P2ROUNDUP(size, PAGESIZE) - size);
636 abd_free_scatter(abd_t *abd)
640 zfs_refcount_destroy(&abd->abd_children);
641 ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
642 ABDSTAT_INCR(abdstat_scatter_data_size, -(int)abd->abd_size);
643 ABDSTAT_INCR(abdstat_scatter_chunk_waste,
644 (int)abd->abd_size - (int)P2ROUNDUP(abd->abd_size, PAGESIZE));
646 abd_free_struct(abd);
650 * Allocate an ABD that must be linear, along with its own underlying data
651 * buffer. Only use this when it would be very annoying to write your ABD
652 * consumer with a scattered ABD.
655 abd_alloc_linear(size_t size, boolean_t is_metadata)
657 abd_t *abd = abd_alloc_struct();
659 VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
661 abd->abd_flags = ABD_FLAG_LINEAR | ABD_FLAG_OWNER;
663 abd->abd_flags |= ABD_FLAG_META;
665 abd->abd_size = size;
666 abd->abd_parent = NULL;
667 zfs_refcount_create(&abd->abd_children);
670 abd->abd_u.abd_linear.abd_buf = zio_buf_alloc(size);
672 abd->abd_u.abd_linear.abd_buf = zio_data_buf_alloc(size);
675 ABDSTAT_BUMP(abdstat_linear_cnt);
676 ABDSTAT_INCR(abdstat_linear_data_size, size);
682 abd_free_linear(abd_t *abd)
684 if (abd->abd_flags & ABD_FLAG_META) {
685 zio_buf_free(abd->abd_u.abd_linear.abd_buf, abd->abd_size);
687 zio_data_buf_free(abd->abd_u.abd_linear.abd_buf, abd->abd_size);
690 zfs_refcount_destroy(&abd->abd_children);
691 ABDSTAT_BUMPDOWN(abdstat_linear_cnt);
692 ABDSTAT_INCR(abdstat_linear_data_size, -(int)abd->abd_size);
694 abd_free_struct(abd);
698 * Free an ABD. Only use this on ABDs allocated with abd_alloc() or
699 * abd_alloc_linear().
705 ASSERT3P(abd->abd_parent, ==, NULL);
706 ASSERT(abd->abd_flags & ABD_FLAG_OWNER);
707 if (abd_is_linear(abd))
708 abd_free_linear(abd);
710 abd_free_scatter(abd);
714 * Allocate an ABD of the same format (same metadata flag, same scatterize
715 * setting) as another ABD.
718 abd_alloc_sametype(abd_t *sabd, size_t size)
720 boolean_t is_metadata = (sabd->abd_flags & ABD_FLAG_META) != 0;
721 if (abd_is_linear(sabd)) {
722 return (abd_alloc_linear(size, is_metadata));
724 return (abd_alloc(size, is_metadata));
729 * If we're going to use this ABD for doing I/O using the block layer, the
730 * consumer of the ABD data doesn't care if it's scattered or not, and we don't
731 * plan to store this ABD in memory for a long period of time, we should
732 * allocate the ABD type that requires the least data copying to do the I/O.
734 * On Illumos this is linear ABDs, however if ldi_strategy() can ever issue I/Os
735 * using a scatter/gather list we should switch to that and replace this call
736 * with vanilla abd_alloc().
738 * On Linux the optimal thing to do would be to use abd_get_offset() and
739 * construct a new ABD which shares the original pages thereby eliminating
740 * the copy. But for the moment a new linear ABD is allocated until this
741 * performance optimization can be implemented.
744 abd_alloc_for_io(size_t size, boolean_t is_metadata)
746 return (abd_alloc(size, is_metadata));
750 * Allocate a new ABD to point to offset off of sabd. It shares the underlying
751 * buffer data with sabd. Use abd_put() to free. sabd must not be freed while
752 * any derived ABDs exist.
754 static inline abd_t *
755 abd_get_offset_impl(abd_t *sabd, size_t off, size_t size)
760 ASSERT3U(off, <=, sabd->abd_size);
762 if (abd_is_linear(sabd)) {
763 abd = abd_alloc_struct();
766 * Even if this buf is filesystem metadata, we only track that
767 * if we own the underlying data buffer, which is not true in
768 * this case. Therefore, we don't ever use ABD_FLAG_META here.
770 abd->abd_flags = ABD_FLAG_LINEAR;
772 abd->abd_u.abd_linear.abd_buf =
773 (char *)sabd->abd_u.abd_linear.abd_buf + off;
776 struct scatterlist *sg = NULL;
777 size_t new_offset = sabd->abd_u.abd_scatter.abd_offset + off;
779 abd = abd_alloc_struct();
782 * Even if this buf is filesystem metadata, we only track that
783 * if we own the underlying data buffer, which is not true in
784 * this case. Therefore, we don't ever use ABD_FLAG_META here.
788 abd_for_each_sg(sabd, sg, ABD_SCATTER(sabd).abd_nents, i) {
789 if (new_offset < sg->length)
791 new_offset -= sg->length;
794 ABD_SCATTER(abd).abd_sgl = sg;
795 ABD_SCATTER(abd).abd_offset = new_offset;
796 ABD_SCATTER(abd).abd_nents = ABD_SCATTER(sabd).abd_nents - i;
799 abd->abd_size = size;
800 abd->abd_parent = sabd;
801 zfs_refcount_create(&abd->abd_children);
802 (void) zfs_refcount_add_many(&sabd->abd_children, abd->abd_size, abd);
808 abd_get_offset(abd_t *sabd, size_t off)
810 size_t size = sabd->abd_size > off ? sabd->abd_size - off : 0;
812 VERIFY3U(size, >, 0);
814 return (abd_get_offset_impl(sabd, off, size));
818 abd_get_offset_size(abd_t *sabd, size_t off, size_t size)
820 ASSERT3U(off + size, <=, sabd->abd_size);
822 return (abd_get_offset_impl(sabd, off, size));
826 * Allocate a linear ABD structure for buf. You must free this with abd_put()
827 * since the resulting ABD doesn't own its own buffer.
830 abd_get_from_buf(void *buf, size_t size)
832 abd_t *abd = abd_alloc_struct();
834 VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
837 * Even if this buf is filesystem metadata, we only track that if we
838 * own the underlying data buffer, which is not true in this case.
839 * Therefore, we don't ever use ABD_FLAG_META here.
841 abd->abd_flags = ABD_FLAG_LINEAR;
842 abd->abd_size = size;
843 abd->abd_parent = NULL;
844 zfs_refcount_create(&abd->abd_children);
846 abd->abd_u.abd_linear.abd_buf = buf;
852 * Free an ABD allocated from abd_get_offset() or abd_get_from_buf(). Will not
853 * free the underlying scatterlist or buffer.
859 ASSERT(!(abd->abd_flags & ABD_FLAG_OWNER));
861 if (abd->abd_parent != NULL) {
862 (void) zfs_refcount_remove_many(&abd->abd_parent->abd_children,
866 zfs_refcount_destroy(&abd->abd_children);
867 abd_free_struct(abd);
871 * Get the raw buffer associated with a linear ABD.
874 abd_to_buf(abd_t *abd)
876 ASSERT(abd_is_linear(abd));
878 return (abd->abd_u.abd_linear.abd_buf);
882 * Borrow a raw buffer from an ABD without copying the contents of the ABD
883 * into the buffer. If the ABD is scattered, this will allocate a raw buffer
884 * whose contents are undefined. To copy over the existing data in the ABD, use
885 * abd_borrow_buf_copy() instead.
888 abd_borrow_buf(abd_t *abd, size_t n)
892 ASSERT3U(abd->abd_size, >=, n);
893 if (abd_is_linear(abd)) {
894 buf = abd_to_buf(abd);
896 buf = zio_buf_alloc(n);
898 (void) zfs_refcount_add_many(&abd->abd_children, n, buf);
904 abd_borrow_buf_copy(abd_t *abd, size_t n)
906 void *buf = abd_borrow_buf(abd, n);
907 if (!abd_is_linear(abd)) {
908 abd_copy_to_buf(buf, abd, n);
914 * Return a borrowed raw buffer to an ABD. If the ABD is scattered, this will
915 * not change the contents of the ABD and will ASSERT that you didn't modify
916 * the buffer since it was borrowed. If you want any changes you made to buf to
917 * be copied back to abd, use abd_return_buf_copy() instead.
920 abd_return_buf(abd_t *abd, void *buf, size_t n)
923 ASSERT3U(abd->abd_size, >=, n);
924 if (abd_is_linear(abd)) {
925 ASSERT3P(buf, ==, abd_to_buf(abd));
927 ASSERT0(abd_cmp_buf(abd, buf, n));
928 zio_buf_free(buf, n);
930 (void) zfs_refcount_remove_many(&abd->abd_children, n, buf);
934 abd_return_buf_copy(abd_t *abd, void *buf, size_t n)
936 if (!abd_is_linear(abd)) {
937 abd_copy_from_buf(abd, buf, n);
939 abd_return_buf(abd, buf, n);
943 * Give this ABD ownership of the buffer that it's storing. Can only be used on
944 * linear ABDs which were allocated via abd_get_from_buf(), or ones allocated
945 * with abd_alloc_linear() which subsequently released ownership of their buf
946 * with abd_release_ownership_of_buf().
949 abd_take_ownership_of_buf(abd_t *abd, boolean_t is_metadata)
951 ASSERT(abd_is_linear(abd));
952 ASSERT(!(abd->abd_flags & ABD_FLAG_OWNER));
955 abd->abd_flags |= ABD_FLAG_OWNER;
957 abd->abd_flags |= ABD_FLAG_META;
960 ABDSTAT_BUMP(abdstat_linear_cnt);
961 ABDSTAT_INCR(abdstat_linear_data_size, abd->abd_size);
965 abd_release_ownership_of_buf(abd_t *abd)
967 ASSERT(abd_is_linear(abd));
968 ASSERT(abd->abd_flags & ABD_FLAG_OWNER);
971 abd->abd_flags &= ~ABD_FLAG_OWNER;
972 /* Disable this flag since we no longer own the data buffer */
973 abd->abd_flags &= ~ABD_FLAG_META;
975 ABDSTAT_BUMPDOWN(abdstat_linear_cnt);
976 ABDSTAT_INCR(abdstat_linear_data_size, -(int)abd->abd_size);
979 #ifndef HAVE_1ARG_KMAP_ATOMIC
980 #define NR_KM_TYPE (6)
982 int km_table[NR_KM_TYPE] = {
994 /* public interface */
995 void *iter_mapaddr; /* addr corresponding to iter_pos */
996 size_t iter_mapsize; /* length of data valid at mapaddr */
999 abd_t *iter_abd; /* ABD being iterated through */
1001 size_t iter_offset; /* offset in current sg/abd_buf, */
1002 /* abd_offset included */
1003 struct scatterlist *iter_sg; /* current sg */
1004 #ifndef HAVE_1ARG_KMAP_ATOMIC
1005 int iter_km; /* KM_* for kmap_atomic */
1010 * Initialize the abd_iter.
1013 abd_iter_init(struct abd_iter *aiter, abd_t *abd, int km_type)
1016 aiter->iter_abd = abd;
1017 aiter->iter_mapaddr = NULL;
1018 aiter->iter_mapsize = 0;
1019 aiter->iter_pos = 0;
1020 if (abd_is_linear(abd)) {
1021 aiter->iter_offset = 0;
1022 aiter->iter_sg = NULL;
1024 aiter->iter_offset = ABD_SCATTER(abd).abd_offset;
1025 aiter->iter_sg = ABD_SCATTER(abd).abd_sgl;
1027 #ifndef HAVE_1ARG_KMAP_ATOMIC
1028 ASSERT3U(km_type, <, NR_KM_TYPE);
1029 aiter->iter_km = km_type;
1034 * Advance the iterator by a certain amount. Cannot be called when a chunk is
1035 * in use. This can be safely called when the aiter has already exhausted, in
1036 * which case this does nothing.
1039 abd_iter_advance(struct abd_iter *aiter, size_t amount)
1041 ASSERT3P(aiter->iter_mapaddr, ==, NULL);
1042 ASSERT0(aiter->iter_mapsize);
1044 /* There's nothing left to advance to, so do nothing */
1045 if (aiter->iter_pos == aiter->iter_abd->abd_size)
1048 aiter->iter_pos += amount;
1049 aiter->iter_offset += amount;
1050 if (!abd_is_linear(aiter->iter_abd)) {
1051 while (aiter->iter_offset >= aiter->iter_sg->length) {
1052 aiter->iter_offset -= aiter->iter_sg->length;
1053 aiter->iter_sg = sg_next(aiter->iter_sg);
1054 if (aiter->iter_sg == NULL) {
1055 ASSERT0(aiter->iter_offset);
1063 * Map the current chunk into aiter. This can be safely called when the aiter
1064 * has already exhausted, in which case this does nothing.
1067 abd_iter_map(struct abd_iter *aiter)
1072 ASSERT3P(aiter->iter_mapaddr, ==, NULL);
1073 ASSERT0(aiter->iter_mapsize);
1075 /* There's nothing left to iterate over, so do nothing */
1076 if (aiter->iter_pos == aiter->iter_abd->abd_size)
1079 if (abd_is_linear(aiter->iter_abd)) {
1080 ASSERT3U(aiter->iter_pos, ==, aiter->iter_offset);
1081 offset = aiter->iter_offset;
1082 aiter->iter_mapsize = aiter->iter_abd->abd_size - offset;
1083 paddr = aiter->iter_abd->abd_u.abd_linear.abd_buf;
1085 offset = aiter->iter_offset;
1086 aiter->iter_mapsize = MIN(aiter->iter_sg->length - offset,
1087 aiter->iter_abd->abd_size - aiter->iter_pos);
1089 paddr = zfs_kmap_atomic(sg_page(aiter->iter_sg),
1090 km_table[aiter->iter_km]);
1093 aiter->iter_mapaddr = (char *)paddr + offset;
1097 * Unmap the current chunk from aiter. This can be safely called when the aiter
1098 * has already exhausted, in which case this does nothing.
1101 abd_iter_unmap(struct abd_iter *aiter)
1103 /* There's nothing left to unmap, so do nothing */
1104 if (aiter->iter_pos == aiter->iter_abd->abd_size)
1107 if (!abd_is_linear(aiter->iter_abd)) {
1108 /* LINTED E_FUNC_SET_NOT_USED */
1109 zfs_kunmap_atomic(aiter->iter_mapaddr - aiter->iter_offset,
1110 km_table[aiter->iter_km]);
1113 ASSERT3P(aiter->iter_mapaddr, !=, NULL);
1114 ASSERT3U(aiter->iter_mapsize, >, 0);
1116 aiter->iter_mapaddr = NULL;
1117 aiter->iter_mapsize = 0;
1121 abd_iterate_func(abd_t *abd, size_t off, size_t size,
1122 abd_iter_func_t *func, void *private)
1125 struct abd_iter aiter;
1128 ASSERT3U(off + size, <=, abd->abd_size);
1130 abd_iter_init(&aiter, abd, 0);
1131 abd_iter_advance(&aiter, off);
1134 abd_iter_map(&aiter);
1136 size_t len = MIN(aiter.iter_mapsize, size);
1137 ASSERT3U(len, >, 0);
1139 ret = func(aiter.iter_mapaddr, len, private);
1141 abd_iter_unmap(&aiter);
1147 abd_iter_advance(&aiter, len);
1158 abd_copy_to_buf_off_cb(void *buf, size_t size, void *private)
1160 struct buf_arg *ba_ptr = private;
1162 (void) memcpy(ba_ptr->arg_buf, buf, size);
1163 ba_ptr->arg_buf = (char *)ba_ptr->arg_buf + size;
1169 * Copy abd to buf. (off is the offset in abd.)
1172 abd_copy_to_buf_off(void *buf, abd_t *abd, size_t off, size_t size)
1174 struct buf_arg ba_ptr = { buf };
1176 (void) abd_iterate_func(abd, off, size, abd_copy_to_buf_off_cb,
1181 abd_cmp_buf_off_cb(void *buf, size_t size, void *private)
1184 struct buf_arg *ba_ptr = private;
1186 ret = memcmp(buf, ba_ptr->arg_buf, size);
1187 ba_ptr->arg_buf = (char *)ba_ptr->arg_buf + size;
1193 * Compare the contents of abd to buf. (off is the offset in abd.)
1196 abd_cmp_buf_off(abd_t *abd, const void *buf, size_t off, size_t size)
1198 struct buf_arg ba_ptr = { (void *) buf };
1200 return (abd_iterate_func(abd, off, size, abd_cmp_buf_off_cb, &ba_ptr));
1204 abd_copy_from_buf_off_cb(void *buf, size_t size, void *private)
1206 struct buf_arg *ba_ptr = private;
1208 (void) memcpy(buf, ba_ptr->arg_buf, size);
1209 ba_ptr->arg_buf = (char *)ba_ptr->arg_buf + size;
1215 * Copy from buf to abd. (off is the offset in abd.)
1218 abd_copy_from_buf_off(abd_t *abd, const void *buf, size_t off, size_t size)
1220 struct buf_arg ba_ptr = { (void *) buf };
1222 (void) abd_iterate_func(abd, off, size, abd_copy_from_buf_off_cb,
1228 abd_zero_off_cb(void *buf, size_t size, void *private)
1230 (void) memset(buf, 0, size);
1235 * Zero out the abd from a particular offset to the end.
1238 abd_zero_off(abd_t *abd, size_t off, size_t size)
1240 (void) abd_iterate_func(abd, off, size, abd_zero_off_cb, NULL);
1244 * Iterate over two ABDs and call func incrementally on the two ABDs' data in
1245 * equal-sized chunks (passed to func as raw buffers). func could be called many
1246 * times during this iteration.
1249 abd_iterate_func2(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff,
1250 size_t size, abd_iter_func2_t *func, void *private)
1253 struct abd_iter daiter, saiter;
1258 ASSERT3U(doff + size, <=, dabd->abd_size);
1259 ASSERT3U(soff + size, <=, sabd->abd_size);
1261 abd_iter_init(&daiter, dabd, 0);
1262 abd_iter_init(&saiter, sabd, 1);
1263 abd_iter_advance(&daiter, doff);
1264 abd_iter_advance(&saiter, soff);
1267 abd_iter_map(&daiter);
1268 abd_iter_map(&saiter);
1270 size_t dlen = MIN(daiter.iter_mapsize, size);
1271 size_t slen = MIN(saiter.iter_mapsize, size);
1272 size_t len = MIN(dlen, slen);
1273 ASSERT(dlen > 0 || slen > 0);
1275 ret = func(daiter.iter_mapaddr, saiter.iter_mapaddr, len,
1278 abd_iter_unmap(&saiter);
1279 abd_iter_unmap(&daiter);
1285 abd_iter_advance(&daiter, len);
1286 abd_iter_advance(&saiter, len);
1294 abd_copy_off_cb(void *dbuf, void *sbuf, size_t size, void *private)
1296 (void) memcpy(dbuf, sbuf, size);
1301 * Copy from sabd to dabd starting from soff and doff.
1304 abd_copy_off(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff, size_t size)
1306 (void) abd_iterate_func2(dabd, sabd, doff, soff, size,
1307 abd_copy_off_cb, NULL);
1312 abd_cmp_cb(void *bufa, void *bufb, size_t size, void *private)
1314 return (memcmp(bufa, bufb, size));
1318 * Compares the contents of two ABDs.
1321 abd_cmp(abd_t *dabd, abd_t *sabd)
1323 ASSERT3U(dabd->abd_size, ==, sabd->abd_size);
1324 return (abd_iterate_func2(dabd, sabd, 0, 0, dabd->abd_size,
1329 * Iterate over code ABDs and a data ABD and call @func_raidz_gen.
1331 * @cabds parity ABDs, must have equal size
1332 * @dabd data ABD. Can be NULL (in this case @dsize = 0)
1333 * @func_raidz_gen should be implemented so that its behaviour
1334 * is the same when taking linear and when taking scatter
1337 abd_raidz_gen_iterate(abd_t **cabds, abd_t *dabd,
1338 ssize_t csize, ssize_t dsize, const unsigned parity,
1339 void (*func_raidz_gen)(void **, const void *, size_t, size_t))
1343 struct abd_iter caiters[3];
1344 struct abd_iter daiter = {0};
1346 unsigned long flags;
1348 ASSERT3U(parity, <=, 3);
1350 for (i = 0; i < parity; i++)
1351 abd_iter_init(&caiters[i], cabds[i], i);
1354 abd_iter_init(&daiter, dabd, i);
1356 ASSERT3S(dsize, >=, 0);
1358 local_irq_save(flags);
1362 if (dabd && dsize > 0)
1363 abd_iter_map(&daiter);
1365 for (i = 0; i < parity; i++) {
1366 abd_iter_map(&caiters[i]);
1367 caddrs[i] = caiters[i].iter_mapaddr;
1372 len = MIN(caiters[2].iter_mapsize, len);
1374 len = MIN(caiters[1].iter_mapsize, len);
1376 len = MIN(caiters[0].iter_mapsize, len);
1379 /* must be progressive */
1380 ASSERT3S(len, >, 0);
1382 if (dabd && dsize > 0) {
1383 /* this needs precise iter.length */
1384 len = MIN(daiter.iter_mapsize, len);
1389 /* must be progressive */
1390 ASSERT3S(len, >, 0);
1392 * The iterated function likely will not do well if each
1393 * segment except the last one is not multiple of 512 (raidz).
1395 ASSERT3U(((uint64_t)len & 511ULL), ==, 0);
1397 func_raidz_gen(caddrs, daiter.iter_mapaddr, len, dlen);
1399 for (i = parity-1; i >= 0; i--) {
1400 abd_iter_unmap(&caiters[i]);
1401 abd_iter_advance(&caiters[i], len);
1404 if (dabd && dsize > 0) {
1405 abd_iter_unmap(&daiter);
1406 abd_iter_advance(&daiter, dlen);
1412 ASSERT3S(dsize, >=, 0);
1413 ASSERT3S(csize, >=, 0);
1415 local_irq_restore(flags);
1419 * Iterate over code ABDs and data reconstruction target ABDs and call
1420 * @func_raidz_rec. Function maps at most 6 pages atomically.
1422 * @cabds parity ABDs, must have equal size
1423 * @tabds rec target ABDs, at most 3
1424 * @tsize size of data target columns
1425 * @func_raidz_rec expects syndrome data in target columns. Function
1426 * reconstructs data and overwrites target columns.
1429 abd_raidz_rec_iterate(abd_t **cabds, abd_t **tabds,
1430 ssize_t tsize, const unsigned parity,
1431 void (*func_raidz_rec)(void **t, const size_t tsize, void **c,
1432 const unsigned *mul),
1433 const unsigned *mul)
1437 struct abd_iter citers[3];
1438 struct abd_iter xiters[3];
1439 void *caddrs[3], *xaddrs[3];
1440 unsigned long flags;
1442 ASSERT3U(parity, <=, 3);
1444 for (i = 0; i < parity; i++) {
1445 abd_iter_init(&citers[i], cabds[i], 2*i);
1446 abd_iter_init(&xiters[i], tabds[i], 2*i+1);
1449 local_irq_save(flags);
1452 for (i = 0; i < parity; i++) {
1453 abd_iter_map(&citers[i]);
1454 abd_iter_map(&xiters[i]);
1455 caddrs[i] = citers[i].iter_mapaddr;
1456 xaddrs[i] = xiters[i].iter_mapaddr;
1462 len = MIN(xiters[2].iter_mapsize, len);
1463 len = MIN(citers[2].iter_mapsize, len);
1465 len = MIN(xiters[1].iter_mapsize, len);
1466 len = MIN(citers[1].iter_mapsize, len);
1468 len = MIN(xiters[0].iter_mapsize, len);
1469 len = MIN(citers[0].iter_mapsize, len);
1471 /* must be progressive */
1472 ASSERT3S(len, >, 0);
1474 * The iterated function likely will not do well if each
1475 * segment except the last one is not multiple of 512 (raidz).
1477 ASSERT3U(((uint64_t)len & 511ULL), ==, 0);
1479 func_raidz_rec(xaddrs, len, caddrs, mul);
1481 for (i = parity-1; i >= 0; i--) {
1482 abd_iter_unmap(&xiters[i]);
1483 abd_iter_unmap(&citers[i]);
1484 abd_iter_advance(&xiters[i], len);
1485 abd_iter_advance(&citers[i], len);
1489 ASSERT3S(tsize, >=, 0);
1491 local_irq_restore(flags);
1494 #if defined(_KERNEL)
1496 * bio_nr_pages for ABD.
1497 * @off is the offset in @abd
1500 abd_nr_pages_off(abd_t *abd, unsigned int size, size_t off)
1504 if (abd_is_linear(abd))
1505 pos = (unsigned long)abd_to_buf(abd) + off;
1507 pos = abd->abd_u.abd_scatter.abd_offset + off;
1509 return ((pos + size + PAGESIZE - 1) >> PAGE_SHIFT) -
1510 (pos >> PAGE_SHIFT);
1514 * bio_map for scatter ABD.
1515 * @off is the offset in @abd
1516 * Remaining IO size is returned
1519 abd_scatter_bio_map_off(struct bio *bio, abd_t *abd,
1520 unsigned int io_size, size_t off)
1523 struct abd_iter aiter;
1525 ASSERT(!abd_is_linear(abd));
1526 ASSERT3U(io_size, <=, abd->abd_size - off);
1528 abd_iter_init(&aiter, abd, 0);
1529 abd_iter_advance(&aiter, off);
1531 for (i = 0; i < bio->bi_max_vecs; i++) {
1533 size_t len, sgoff, pgoff;
1534 struct scatterlist *sg;
1540 sgoff = aiter.iter_offset;
1541 pgoff = sgoff & (PAGESIZE - 1);
1542 len = MIN(io_size, PAGESIZE - pgoff);
1545 pg = nth_page(sg_page(sg), sgoff >> PAGE_SHIFT);
1546 if (bio_add_page(bio, pg, len, pgoff) != len)
1550 abd_iter_advance(&aiter, len);
1556 /* Tunable Parameters */
1557 module_param(zfs_abd_scatter_enabled, int, 0644);
1558 MODULE_PARM_DESC(zfs_abd_scatter_enabled,
1559 "Toggle whether ABD allocations must be linear.");
1560 module_param(zfs_abd_scatter_min_size, int, 0644);
1561 MODULE_PARM_DESC(zfs_abd_scatter_min_size,
1562 "Minimum size of scatter allocations.");
1564 module_param(zfs_abd_scatter_max_order, uint, 0644);
1565 MODULE_PARM_DESC(zfs_abd_scatter_max_order,
1566 "Maximum order allocation used for a scatter ABD.");