4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2018, Joyent, Inc.
24 * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
25 * Copyright (c) 2014 by Saso Kiselkov. All rights reserved.
26 * Copyright 2017 Nexenta Systems, Inc. All rights reserved.
30 * DVA-based Adjustable Replacement Cache
32 * While much of the theory of operation used here is
33 * based on the self-tuning, low overhead replacement cache
34 * presented by Megiddo and Modha at FAST 2003, there are some
35 * significant differences:
37 * 1. The Megiddo and Modha model assumes any page is evictable.
38 * Pages in its cache cannot be "locked" into memory. This makes
39 * the eviction algorithm simple: evict the last page in the list.
40 * This also make the performance characteristics easy to reason
41 * about. Our cache is not so simple. At any given moment, some
42 * subset of the blocks in the cache are un-evictable because we
43 * have handed out a reference to them. Blocks are only evictable
44 * when there are no external references active. This makes
45 * eviction far more problematic: we choose to evict the evictable
46 * blocks that are the "lowest" in the list.
48 * There are times when it is not possible to evict the requested
49 * space. In these circumstances we are unable to adjust the cache
50 * size. To prevent the cache growing unbounded at these times we
51 * implement a "cache throttle" that slows the flow of new data
52 * into the cache until we can make space available.
54 * 2. The Megiddo and Modha model assumes a fixed cache size.
55 * Pages are evicted when the cache is full and there is a cache
56 * miss. Our model has a variable sized cache. It grows with
57 * high use, but also tries to react to memory pressure from the
58 * operating system: decreasing its size when system memory is
61 * 3. The Megiddo and Modha model assumes a fixed page size. All
62 * elements of the cache are therefore exactly the same size. So
63 * when adjusting the cache size following a cache miss, its simply
64 * a matter of choosing a single page to evict. In our model, we
65 * have variable sized cache blocks (rangeing from 512 bytes to
66 * 128K bytes). We therefore choose a set of blocks to evict to make
67 * space for a cache miss that approximates as closely as possible
68 * the space used by the new block.
70 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache"
71 * by N. Megiddo & D. Modha, FAST 2003
77 * A new reference to a cache buffer can be obtained in two
78 * ways: 1) via a hash table lookup using the DVA as a key,
79 * or 2) via one of the ARC lists. The arc_read() interface
80 * uses method 1, while the internal ARC algorithms for
81 * adjusting the cache use method 2. We therefore provide two
82 * types of locks: 1) the hash table lock array, and 2) the
85 * Buffers do not have their own mutexes, rather they rely on the
86 * hash table mutexes for the bulk of their protection (i.e. most
87 * fields in the arc_buf_hdr_t are protected by these mutexes).
89 * buf_hash_find() returns the appropriate mutex (held) when it
90 * locates the requested buffer in the hash table. It returns
91 * NULL for the mutex if the buffer was not in the table.
93 * buf_hash_remove() expects the appropriate hash mutex to be
94 * already held before it is invoked.
96 * Each ARC state also has a mutex which is used to protect the
97 * buffer list associated with the state. When attempting to
98 * obtain a hash table lock while holding an ARC list lock you
99 * must use: mutex_tryenter() to avoid deadlock. Also note that
100 * the active state mutex must be held before the ghost state mutex.
102 * It as also possible to register a callback which is run when the
103 * arc_meta_limit is reached and no buffers can be safely evicted. In
104 * this case the arc user should drop a reference on some arc buffers so
105 * they can be reclaimed and the arc_meta_limit honored. For example,
106 * when using the ZPL each dentry holds a references on a znode. These
107 * dentries must be pruned before the arc buffer holding the znode can
110 * Note that the majority of the performance stats are manipulated
111 * with atomic operations.
113 * The L2ARC uses the l2ad_mtx on each vdev for the following:
115 * - L2ARC buflist creation
116 * - L2ARC buflist eviction
117 * - L2ARC write completion, which walks L2ARC buflists
118 * - ARC header destruction, as it removes from L2ARC buflists
119 * - ARC header release, as it removes from L2ARC buflists
125 * Every block that is in the ARC is tracked by an arc_buf_hdr_t structure.
126 * This structure can point either to a block that is still in the cache or to
127 * one that is only accessible in an L2 ARC device, or it can provide
128 * information about a block that was recently evicted. If a block is
129 * only accessible in the L2ARC, then the arc_buf_hdr_t only has enough
130 * information to retrieve it from the L2ARC device. This information is
131 * stored in the l2arc_buf_hdr_t sub-structure of the arc_buf_hdr_t. A block
132 * that is in this state cannot access the data directly.
134 * Blocks that are actively being referenced or have not been evicted
135 * are cached in the L1ARC. The L1ARC (l1arc_buf_hdr_t) is a structure within
136 * the arc_buf_hdr_t that will point to the data block in memory. A block can
137 * only be read by a consumer if it has an l1arc_buf_hdr_t. The L1ARC
138 * caches data in two ways -- in a list of ARC buffers (arc_buf_t) and
139 * also in the arc_buf_hdr_t's private physical data block pointer (b_pabd).
141 * The L1ARC's data pointer may or may not be uncompressed. The ARC has the
142 * ability to store the physical data (b_pabd) associated with the DVA of the
143 * arc_buf_hdr_t. Since the b_pabd is a copy of the on-disk physical block,
144 * it will match its on-disk compression characteristics. This behavior can be
145 * disabled by setting 'zfs_compressed_arc_enabled' to B_FALSE. When the
146 * compressed ARC functionality is disabled, the b_pabd will point to an
147 * uncompressed version of the on-disk data.
149 * Data in the L1ARC is not accessed by consumers of the ARC directly. Each
150 * arc_buf_hdr_t can have multiple ARC buffers (arc_buf_t) which reference it.
151 * Each ARC buffer (arc_buf_t) is being actively accessed by a specific ARC
152 * consumer. The ARC will provide references to this data and will keep it
153 * cached until it is no longer in use. The ARC caches only the L1ARC's physical
154 * data block and will evict any arc_buf_t that is no longer referenced. The
155 * amount of memory consumed by the arc_buf_ts' data buffers can be seen via the
156 * "overhead_size" kstat.
158 * Depending on the consumer, an arc_buf_t can be requested in uncompressed or
159 * compressed form. The typical case is that consumers will want uncompressed
160 * data, and when that happens a new data buffer is allocated where the data is
161 * decompressed for them to use. Currently the only consumer who wants
162 * compressed arc_buf_t's is "zfs send", when it streams data exactly as it
163 * exists on disk. When this happens, the arc_buf_t's data buffer is shared
164 * with the arc_buf_hdr_t.
166 * Here is a diagram showing an arc_buf_hdr_t referenced by two arc_buf_t's. The
167 * first one is owned by a compressed send consumer (and therefore references
168 * the same compressed data buffer as the arc_buf_hdr_t) and the second could be
169 * used by any other consumer (and has its own uncompressed copy of the data
184 * | b_buf +------------>+-----------+ arc_buf_t
185 * | b_pabd +-+ |b_next +---->+-----------+
186 * +-----------+ | |-----------| |b_next +-->NULL
187 * | |b_comp = T | +-----------+
188 * | |b_data +-+ |b_comp = F |
189 * | +-----------+ | |b_data +-+
190 * +->+------+ | +-----------+ |
192 * data | |<--------------+ | uncompressed
193 * +------+ compressed, | data
194 * shared +-->+------+
199 * When a consumer reads a block, the ARC must first look to see if the
200 * arc_buf_hdr_t is cached. If the hdr is cached then the ARC allocates a new
201 * arc_buf_t and either copies uncompressed data into a new data buffer from an
202 * existing uncompressed arc_buf_t, decompresses the hdr's b_pabd buffer into a
203 * new data buffer, or shares the hdr's b_pabd buffer, depending on whether the
204 * hdr is compressed and the desired compression characteristics of the
205 * arc_buf_t consumer. If the arc_buf_t ends up sharing data with the
206 * arc_buf_hdr_t and both of them are uncompressed then the arc_buf_t must be
207 * the last buffer in the hdr's b_buf list, however a shared compressed buf can
208 * be anywhere in the hdr's list.
210 * The diagram below shows an example of an uncompressed ARC hdr that is
211 * sharing its data with an arc_buf_t (note that the shared uncompressed buf is
212 * the last element in the buf list):
224 * | | arc_buf_t (shared)
225 * | b_buf +------------>+---------+ arc_buf_t
226 * | | |b_next +---->+---------+
227 * | b_pabd +-+ |---------| |b_next +-->NULL
228 * +-----------+ | | | +---------+
230 * | +---------+ | |b_data +-+
231 * +->+------+ | +---------+ |
233 * uncompressed | | | |
236 * | uncompressed | | |
239 * +---------------------------------+
241 * Writing to the ARC requires that the ARC first discard the hdr's b_pabd
242 * since the physical block is about to be rewritten. The new data contents
243 * will be contained in the arc_buf_t. As the I/O pipeline performs the write,
244 * it may compress the data before writing it to disk. The ARC will be called
245 * with the transformed data and will bcopy the transformed on-disk block into
246 * a newly allocated b_pabd. Writes are always done into buffers which have
247 * either been loaned (and hence are new and don't have other readers) or
248 * buffers which have been released (and hence have their own hdr, if there
249 * were originally other readers of the buf's original hdr). This ensures that
250 * the ARC only needs to update a single buf and its hdr after a write occurs.
252 * When the L2ARC is in use, it will also take advantage of the b_pabd. The
253 * L2ARC will always write the contents of b_pabd to the L2ARC. This means
254 * that when compressed ARC is enabled that the L2ARC blocks are identical
255 * to the on-disk block in the main data pool. This provides a significant
256 * advantage since the ARC can leverage the bp's checksum when reading from the
257 * L2ARC to determine if the contents are valid. However, if the compressed
258 * ARC is disabled, then the L2ARC's block must be transformed to look
259 * like the physical block in the main data pool before comparing the
260 * checksum and determining its validity.
265 #include <sys/spa_impl.h>
266 #include <sys/zio_compress.h>
267 #include <sys/zio_checksum.h>
268 #include <sys/zfs_context.h>
270 #include <sys/refcount.h>
271 #include <sys/vdev.h>
272 #include <sys/vdev_impl.h>
273 #include <sys/dsl_pool.h>
274 #include <sys/zio_checksum.h>
275 #include <sys/multilist.h>
278 #include <sys/dnlc.h>
279 #include <sys/racct.h>
281 #include <sys/callb.h>
282 #include <sys/kstat.h>
283 #include <sys/trim_map.h>
284 #include <sys/zthr.h>
285 #include <zfs_fletcher.h>
287 #include <sys/aggsum.h>
288 #include <sys/cityhash.h>
290 #include <machine/vmparam.h>
294 /* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */
295 boolean_t arc_watch = B_FALSE;
301 * This thread's job is to keep enough free memory in the system, by
302 * calling arc_kmem_reap_now() plus arc_shrink(), which improves
303 * arc_available_memory().
305 static zthr_t *arc_reap_zthr;
308 * This thread's job is to keep arc_size under arc_c, by calling
309 * arc_adjust(), which improves arc_is_overflowing().
311 static zthr_t *arc_adjust_zthr;
313 static kmutex_t arc_adjust_lock;
314 static kcondvar_t arc_adjust_waiters_cv;
315 static boolean_t arc_adjust_needed = B_FALSE;
317 static kmutex_t arc_dnlc_evicts_lock;
318 static kcondvar_t arc_dnlc_evicts_cv;
319 static boolean_t arc_dnlc_evicts_thread_exit;
321 uint_t arc_reduce_dnlc_percent = 3;
324 * The number of headers to evict in arc_evict_state_impl() before
325 * dropping the sublist lock and evicting from another sublist. A lower
326 * value means we're more likely to evict the "correct" header (i.e. the
327 * oldest header in the arc state), but comes with higher overhead
328 * (i.e. more invocations of arc_evict_state_impl()).
330 int zfs_arc_evict_batch_limit = 10;
332 /* number of seconds before growing cache again */
333 int arc_grow_retry = 60;
336 * Minimum time between calls to arc_kmem_reap_soon(). Note that this will
337 * be converted to ticks, so with the default hz=100, a setting of 15 ms
338 * will actually wait 2 ticks, or 20ms.
340 int arc_kmem_cache_reap_retry_ms = 1000;
342 /* shift of arc_c for calculating overflow limit in arc_get_data_impl */
343 int zfs_arc_overflow_shift = 8;
345 /* shift of arc_c for calculating both min and max arc_p */
346 int arc_p_min_shift = 4;
348 /* log2(fraction of arc to reclaim) */
349 int arc_shrink_shift = 7;
352 * log2(fraction of ARC which must be free to allow growing).
353 * I.e. If there is less than arc_c >> arc_no_grow_shift free memory,
354 * when reading a new block into the ARC, we will evict an equal-sized block
357 * This must be less than arc_shrink_shift, so that when we shrink the ARC,
358 * we will still not allow it to grow.
360 int arc_no_grow_shift = 5;
364 * minimum lifespan of a prefetch block in clock ticks
365 * (initialized in arc_init())
367 static int zfs_arc_min_prefetch_ms = 1;
368 static int zfs_arc_min_prescient_prefetch_ms = 6;
371 * If this percent of memory is free, don't throttle.
373 int arc_lotsfree_percent = 10;
375 static boolean_t arc_initialized;
376 extern boolean_t zfs_prefetch_disable;
379 * The arc has filled available memory and has now warmed up.
381 static boolean_t arc_warm;
384 * log2 fraction of the zio arena to keep free.
386 int arc_zio_arena_free_shift = 2;
389 * These tunables are for performance analysis.
391 uint64_t zfs_arc_max;
392 uint64_t zfs_arc_min;
393 uint64_t zfs_arc_meta_limit = 0;
394 uint64_t zfs_arc_meta_min = 0;
395 uint64_t zfs_arc_dnode_limit = 0;
396 uint64_t zfs_arc_dnode_reduce_percent = 10;
397 int zfs_arc_grow_retry = 0;
398 int zfs_arc_shrink_shift = 0;
399 int zfs_arc_no_grow_shift = 0;
400 int zfs_arc_p_min_shift = 0;
401 uint64_t zfs_arc_average_blocksize = 8 * 1024; /* 8KB */
402 u_int zfs_arc_free_target = 0;
404 /* Absolute min for arc min / max is 16MB. */
405 static uint64_t arc_abs_min = 16 << 20;
408 * ARC dirty data constraints for arc_tempreserve_space() throttle
410 uint_t zfs_arc_dirty_limit_percent = 50; /* total dirty data limit */
411 uint_t zfs_arc_anon_limit_percent = 25; /* anon block dirty limit */
412 uint_t zfs_arc_pool_dirty_percent = 20; /* each pool's anon allowance */
414 boolean_t zfs_compressed_arc_enabled = B_TRUE;
416 static int sysctl_vfs_zfs_arc_free_target(SYSCTL_HANDLER_ARGS);
417 static int sysctl_vfs_zfs_arc_meta_limit(SYSCTL_HANDLER_ARGS);
418 static int sysctl_vfs_zfs_arc_max(SYSCTL_HANDLER_ARGS);
419 static int sysctl_vfs_zfs_arc_min(SYSCTL_HANDLER_ARGS);
420 static int sysctl_vfs_zfs_arc_no_grow_shift(SYSCTL_HANDLER_ARGS);
422 #if defined(__FreeBSD__) && defined(_KERNEL)
424 arc_free_target_init(void *unused __unused)
427 zfs_arc_free_target = vm_cnt.v_free_target;
429 SYSINIT(arc_free_target_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_ANY,
430 arc_free_target_init, NULL);
432 TUNABLE_QUAD("vfs.zfs.arc_meta_limit", &zfs_arc_meta_limit);
433 TUNABLE_QUAD("vfs.zfs.arc_meta_min", &zfs_arc_meta_min);
434 TUNABLE_INT("vfs.zfs.arc_shrink_shift", &zfs_arc_shrink_shift);
435 TUNABLE_INT("vfs.zfs.arc_grow_retry", &zfs_arc_grow_retry);
436 TUNABLE_INT("vfs.zfs.arc_no_grow_shift", &zfs_arc_no_grow_shift);
437 SYSCTL_DECL(_vfs_zfs);
438 SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_max, CTLTYPE_U64 | CTLFLAG_RWTUN,
439 0, sizeof(uint64_t), sysctl_vfs_zfs_arc_max, "QU", "Maximum ARC size");
440 SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_min, CTLTYPE_U64 | CTLFLAG_RWTUN,
441 0, sizeof(uint64_t), sysctl_vfs_zfs_arc_min, "QU", "Minimum ARC size");
442 SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_no_grow_shift, CTLTYPE_U32 | CTLFLAG_RWTUN,
443 0, sizeof(uint32_t), sysctl_vfs_zfs_arc_no_grow_shift, "U",
444 "log2(fraction of ARC which must be free to allow growing)");
445 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_average_blocksize, CTLFLAG_RDTUN,
446 &zfs_arc_average_blocksize, 0,
447 "ARC average blocksize");
448 SYSCTL_INT(_vfs_zfs, OID_AUTO, arc_shrink_shift, CTLFLAG_RW,
449 &arc_shrink_shift, 0,
450 "log2(fraction of arc to reclaim)");
451 SYSCTL_INT(_vfs_zfs, OID_AUTO, arc_grow_retry, CTLFLAG_RW,
453 "Wait in seconds before considering growing ARC");
454 SYSCTL_INT(_vfs_zfs, OID_AUTO, compressed_arc_enabled, CTLFLAG_RDTUN,
455 &zfs_compressed_arc_enabled, 0,
456 "Enable compressed ARC");
457 SYSCTL_INT(_vfs_zfs, OID_AUTO, arc_kmem_cache_reap_retry_ms, CTLFLAG_RWTUN,
458 &arc_kmem_cache_reap_retry_ms, 0,
459 "Interval between ARC kmem_cache reapings");
462 * We don't have a tunable for arc_free_target due to the dependency on
463 * pagedaemon initialisation.
465 SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_free_target,
466 CTLTYPE_UINT | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, sizeof(u_int),
467 sysctl_vfs_zfs_arc_free_target, "IU",
468 "Desired number of free pages below which ARC triggers reclaim");
471 sysctl_vfs_zfs_arc_free_target(SYSCTL_HANDLER_ARGS)
476 val = zfs_arc_free_target;
477 err = sysctl_handle_int(oidp, &val, 0, req);
478 if (err != 0 || req->newptr == NULL)
483 if (val > vm_cnt.v_page_count)
486 zfs_arc_free_target = val;
492 * Must be declared here, before the definition of corresponding kstat
493 * macro which uses the same names will confuse the compiler.
495 SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_meta_limit,
496 CTLTYPE_U64 | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, sizeof(uint64_t),
497 sysctl_vfs_zfs_arc_meta_limit, "QU",
498 "ARC metadata limit");
502 * Note that buffers can be in one of 6 states:
503 * ARC_anon - anonymous (discussed below)
504 * ARC_mru - recently used, currently cached
505 * ARC_mru_ghost - recentely used, no longer in cache
506 * ARC_mfu - frequently used, currently cached
507 * ARC_mfu_ghost - frequently used, no longer in cache
508 * ARC_l2c_only - exists in L2ARC but not other states
509 * When there are no active references to the buffer, they are
510 * are linked onto a list in one of these arc states. These are
511 * the only buffers that can be evicted or deleted. Within each
512 * state there are multiple lists, one for meta-data and one for
513 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes,
514 * etc.) is tracked separately so that it can be managed more
515 * explicitly: favored over data, limited explicitly.
517 * Anonymous buffers are buffers that are not associated with
518 * a DVA. These are buffers that hold dirty block copies
519 * before they are written to stable storage. By definition,
520 * they are "ref'd" and are considered part of arc_mru
521 * that cannot be freed. Generally, they will aquire a DVA
522 * as they are written and migrate onto the arc_mru list.
524 * The ARC_l2c_only state is for buffers that are in the second
525 * level ARC but no longer in any of the ARC_m* lists. The second
526 * level ARC itself may also contain buffers that are in any of
527 * the ARC_m* states - meaning that a buffer can exist in two
528 * places. The reason for the ARC_l2c_only state is to keep the
529 * buffer header in the hash table, so that reads that hit the
530 * second level ARC benefit from these fast lookups.
533 typedef struct arc_state {
535 * list of evictable buffers
537 multilist_t *arcs_list[ARC_BUFC_NUMTYPES];
539 * total amount of evictable data in this state
541 refcount_t arcs_esize[ARC_BUFC_NUMTYPES];
543 * total amount of data in this state; this includes: evictable,
544 * non-evictable, ARC_BUFC_DATA, and ARC_BUFC_METADATA.
546 refcount_t arcs_size;
548 * supports the "dbufs" kstat
550 arc_state_type_t arcs_state;
554 * Percentage that can be consumed by dnodes of ARC meta buffers.
556 int zfs_arc_meta_prune = 10000;
557 unsigned long zfs_arc_dnode_limit_percent = 10;
558 int zfs_arc_meta_strategy = ARC_STRATEGY_META_ONLY;
559 int zfs_arc_meta_adjust_restarts = 4096;
561 SYSCTL_INT(_vfs_zfs, OID_AUTO, arc_meta_strategy, CTLFLAG_RWTUN,
562 &zfs_arc_meta_strategy, 0,
563 "ARC metadata reclamation strategy "
564 "(0 = metadata only, 1 = balance data and metadata)");
567 static arc_state_t ARC_anon;
568 static arc_state_t ARC_mru;
569 static arc_state_t ARC_mru_ghost;
570 static arc_state_t ARC_mfu;
571 static arc_state_t ARC_mfu_ghost;
572 static arc_state_t ARC_l2c_only;
574 typedef struct arc_stats {
575 kstat_named_t arcstat_hits;
576 kstat_named_t arcstat_misses;
577 kstat_named_t arcstat_demand_data_hits;
578 kstat_named_t arcstat_demand_data_misses;
579 kstat_named_t arcstat_demand_metadata_hits;
580 kstat_named_t arcstat_demand_metadata_misses;
581 kstat_named_t arcstat_prefetch_data_hits;
582 kstat_named_t arcstat_prefetch_data_misses;
583 kstat_named_t arcstat_prefetch_metadata_hits;
584 kstat_named_t arcstat_prefetch_metadata_misses;
585 kstat_named_t arcstat_mru_hits;
586 kstat_named_t arcstat_mru_ghost_hits;
587 kstat_named_t arcstat_mfu_hits;
588 kstat_named_t arcstat_mfu_ghost_hits;
589 kstat_named_t arcstat_allocated;
590 kstat_named_t arcstat_deleted;
592 * Number of buffers that could not be evicted because the hash lock
593 * was held by another thread. The lock may not necessarily be held
594 * by something using the same buffer, since hash locks are shared
595 * by multiple buffers.
597 kstat_named_t arcstat_mutex_miss;
599 * Number of buffers skipped when updating the access state due to the
600 * header having already been released after acquiring the hash lock.
602 kstat_named_t arcstat_access_skip;
604 * Number of buffers skipped because they have I/O in progress, are
605 * indirect prefetch buffers that have not lived long enough, or are
606 * not from the spa we're trying to evict from.
608 kstat_named_t arcstat_evict_skip;
610 * Number of times arc_evict_state() was unable to evict enough
611 * buffers to reach it's target amount.
613 kstat_named_t arcstat_evict_not_enough;
614 kstat_named_t arcstat_evict_l2_cached;
615 kstat_named_t arcstat_evict_l2_eligible;
616 kstat_named_t arcstat_evict_l2_ineligible;
617 kstat_named_t arcstat_evict_l2_skip;
618 kstat_named_t arcstat_hash_elements;
619 kstat_named_t arcstat_hash_elements_max;
620 kstat_named_t arcstat_hash_collisions;
621 kstat_named_t arcstat_hash_chains;
622 kstat_named_t arcstat_hash_chain_max;
623 kstat_named_t arcstat_p;
624 kstat_named_t arcstat_c;
625 kstat_named_t arcstat_c_min;
626 kstat_named_t arcstat_c_max;
627 /* Not updated directly; only synced in arc_kstat_update. */
628 kstat_named_t arcstat_size;
630 * Number of compressed bytes stored in the arc_buf_hdr_t's b_pabd.
631 * Note that the compressed bytes may match the uncompressed bytes
632 * if the block is either not compressed or compressed arc is disabled.
634 kstat_named_t arcstat_compressed_size;
636 * Uncompressed size of the data stored in b_pabd. If compressed
637 * arc is disabled then this value will be identical to the stat
640 kstat_named_t arcstat_uncompressed_size;
642 * Number of bytes stored in all the arc_buf_t's. This is classified
643 * as "overhead" since this data is typically short-lived and will
644 * be evicted from the arc when it becomes unreferenced unless the
645 * zfs_keep_uncompressed_metadata or zfs_keep_uncompressed_level
646 * values have been set (see comment in dbuf.c for more information).
648 kstat_named_t arcstat_overhead_size;
650 * Number of bytes consumed by internal ARC structures necessary
651 * for tracking purposes; these structures are not actually
652 * backed by ARC buffers. This includes arc_buf_hdr_t structures
653 * (allocated via arc_buf_hdr_t_full and arc_buf_hdr_t_l2only
654 * caches), and arc_buf_t structures (allocated via arc_buf_t
656 * Not updated directly; only synced in arc_kstat_update.
658 kstat_named_t arcstat_hdr_size;
660 * Number of bytes consumed by ARC buffers of type equal to
661 * ARC_BUFC_DATA. This is generally consumed by buffers backing
662 * on disk user data (e.g. plain file contents).
663 * Not updated directly; only synced in arc_kstat_update.
665 kstat_named_t arcstat_data_size;
667 * Number of bytes consumed by ARC buffers of type equal to
668 * ARC_BUFC_METADATA. This is generally consumed by buffers
669 * backing on disk data that is used for internal ZFS
670 * structures (e.g. ZAP, dnode, indirect blocks, etc).
671 * Not updated directly; only synced in arc_kstat_update.
673 kstat_named_t arcstat_metadata_size;
675 * Number of bytes consumed by dmu_buf_impl_t objects.
677 kstat_named_t arcstat_dbuf_size;
679 * Number of bytes consumed by dnode_t objects.
681 kstat_named_t arcstat_dnode_size;
683 * Number of bytes consumed by bonus buffers.
685 kstat_named_t arcstat_bonus_size;
686 #if defined(__FreeBSD__) && defined(COMPAT_FREEBSD11)
688 * Sum of the previous three counters, provided for compatibility.
690 kstat_named_t arcstat_other_size;
693 * Total number of bytes consumed by ARC buffers residing in the
694 * arc_anon state. This includes *all* buffers in the arc_anon
695 * state; e.g. data, metadata, evictable, and unevictable buffers
696 * are all included in this value.
697 * Not updated directly; only synced in arc_kstat_update.
699 kstat_named_t arcstat_anon_size;
701 * Number of bytes consumed by ARC buffers that meet the
702 * following criteria: backing buffers of type ARC_BUFC_DATA,
703 * residing in the arc_anon state, and are eligible for eviction
704 * (e.g. have no outstanding holds on the buffer).
705 * Not updated directly; only synced in arc_kstat_update.
707 kstat_named_t arcstat_anon_evictable_data;
709 * Number of bytes consumed by ARC buffers that meet the
710 * following criteria: backing buffers of type ARC_BUFC_METADATA,
711 * residing in the arc_anon state, and are eligible for eviction
712 * (e.g. have no outstanding holds on the buffer).
713 * Not updated directly; only synced in arc_kstat_update.
715 kstat_named_t arcstat_anon_evictable_metadata;
717 * Total number of bytes consumed by ARC buffers residing in the
718 * arc_mru state. This includes *all* buffers in the arc_mru
719 * state; e.g. data, metadata, evictable, and unevictable buffers
720 * are all included in this value.
721 * Not updated directly; only synced in arc_kstat_update.
723 kstat_named_t arcstat_mru_size;
725 * Number of bytes consumed by ARC buffers that meet the
726 * following criteria: backing buffers of type ARC_BUFC_DATA,
727 * residing in the arc_mru state, and are eligible for eviction
728 * (e.g. have no outstanding holds on the buffer).
729 * Not updated directly; only synced in arc_kstat_update.
731 kstat_named_t arcstat_mru_evictable_data;
733 * Number of bytes consumed by ARC buffers that meet the
734 * following criteria: backing buffers of type ARC_BUFC_METADATA,
735 * residing in the arc_mru state, and are eligible for eviction
736 * (e.g. have no outstanding holds on the buffer).
737 * Not updated directly; only synced in arc_kstat_update.
739 kstat_named_t arcstat_mru_evictable_metadata;
741 * Total number of bytes that *would have been* consumed by ARC
742 * buffers in the arc_mru_ghost state. The key thing to note
743 * here, is the fact that this size doesn't actually indicate
744 * RAM consumption. The ghost lists only consist of headers and
745 * don't actually have ARC buffers linked off of these headers.
746 * Thus, *if* the headers had associated ARC buffers, these
747 * buffers *would have* consumed this number of bytes.
748 * Not updated directly; only synced in arc_kstat_update.
750 kstat_named_t arcstat_mru_ghost_size;
752 * Number of bytes that *would have been* consumed by ARC
753 * buffers that are eligible for eviction, of type
754 * ARC_BUFC_DATA, and linked off the arc_mru_ghost state.
755 * Not updated directly; only synced in arc_kstat_update.
757 kstat_named_t arcstat_mru_ghost_evictable_data;
759 * Number of bytes that *would have been* consumed by ARC
760 * buffers that are eligible for eviction, of type
761 * ARC_BUFC_METADATA, and linked off the arc_mru_ghost state.
762 * Not updated directly; only synced in arc_kstat_update.
764 kstat_named_t arcstat_mru_ghost_evictable_metadata;
766 * Total number of bytes consumed by ARC buffers residing in the
767 * arc_mfu state. This includes *all* buffers in the arc_mfu
768 * state; e.g. data, metadata, evictable, and unevictable buffers
769 * are all included in this value.
770 * Not updated directly; only synced in arc_kstat_update.
772 kstat_named_t arcstat_mfu_size;
774 * Number of bytes consumed by ARC buffers that are eligible for
775 * eviction, of type ARC_BUFC_DATA, and reside in the arc_mfu
777 * Not updated directly; only synced in arc_kstat_update.
779 kstat_named_t arcstat_mfu_evictable_data;
781 * Number of bytes consumed by ARC buffers that are eligible for
782 * eviction, of type ARC_BUFC_METADATA, and reside in the
784 * Not updated directly; only synced in arc_kstat_update.
786 kstat_named_t arcstat_mfu_evictable_metadata;
788 * Total number of bytes that *would have been* consumed by ARC
789 * buffers in the arc_mfu_ghost state. See the comment above
790 * arcstat_mru_ghost_size for more details.
791 * Not updated directly; only synced in arc_kstat_update.
793 kstat_named_t arcstat_mfu_ghost_size;
795 * Number of bytes that *would have been* consumed by ARC
796 * buffers that are eligible for eviction, of type
797 * ARC_BUFC_DATA, and linked off the arc_mfu_ghost state.
798 * Not updated directly; only synced in arc_kstat_update.
800 kstat_named_t arcstat_mfu_ghost_evictable_data;
802 * Number of bytes that *would have been* consumed by ARC
803 * buffers that are eligible for eviction, of type
804 * ARC_BUFC_METADATA, and linked off the arc_mru_ghost state.
805 * Not updated directly; only synced in arc_kstat_update.
807 kstat_named_t arcstat_mfu_ghost_evictable_metadata;
808 kstat_named_t arcstat_l2_hits;
809 kstat_named_t arcstat_l2_misses;
810 kstat_named_t arcstat_l2_feeds;
811 kstat_named_t arcstat_l2_rw_clash;
812 kstat_named_t arcstat_l2_read_bytes;
813 kstat_named_t arcstat_l2_write_bytes;
814 kstat_named_t arcstat_l2_writes_sent;
815 kstat_named_t arcstat_l2_writes_done;
816 kstat_named_t arcstat_l2_writes_error;
817 kstat_named_t arcstat_l2_writes_lock_retry;
818 kstat_named_t arcstat_l2_evict_lock_retry;
819 kstat_named_t arcstat_l2_evict_reading;
820 kstat_named_t arcstat_l2_evict_l1cached;
821 kstat_named_t arcstat_l2_free_on_write;
822 kstat_named_t arcstat_l2_abort_lowmem;
823 kstat_named_t arcstat_l2_cksum_bad;
824 kstat_named_t arcstat_l2_io_error;
825 kstat_named_t arcstat_l2_lsize;
826 kstat_named_t arcstat_l2_psize;
827 /* Not updated directly; only synced in arc_kstat_update. */
828 kstat_named_t arcstat_l2_hdr_size;
829 kstat_named_t arcstat_l2_write_trylock_fail;
830 kstat_named_t arcstat_l2_write_passed_headroom;
831 kstat_named_t arcstat_l2_write_spa_mismatch;
832 kstat_named_t arcstat_l2_write_in_l2;
833 kstat_named_t arcstat_l2_write_hdr_io_in_progress;
834 kstat_named_t arcstat_l2_write_not_cacheable;
835 kstat_named_t arcstat_l2_write_full;
836 kstat_named_t arcstat_l2_write_buffer_iter;
837 kstat_named_t arcstat_l2_write_pios;
838 kstat_named_t arcstat_l2_write_buffer_bytes_scanned;
839 kstat_named_t arcstat_l2_write_buffer_list_iter;
840 kstat_named_t arcstat_l2_write_buffer_list_null_iter;
841 kstat_named_t arcstat_memory_throttle_count;
842 kstat_named_t arcstat_memory_direct_count;
843 kstat_named_t arcstat_memory_indirect_count;
844 kstat_named_t arcstat_memory_all_bytes;
845 kstat_named_t arcstat_memory_free_bytes;
846 kstat_named_t arcstat_memory_available_bytes;
847 kstat_named_t arcstat_no_grow;
848 kstat_named_t arcstat_tempreserve;
849 kstat_named_t arcstat_loaned_bytes;
850 kstat_named_t arcstat_prune;
851 /* Not updated directly; only synced in arc_kstat_update. */
852 kstat_named_t arcstat_meta_used;
853 kstat_named_t arcstat_meta_limit;
854 kstat_named_t arcstat_dnode_limit;
855 kstat_named_t arcstat_meta_max;
856 kstat_named_t arcstat_meta_min;
857 kstat_named_t arcstat_async_upgrade_sync;
858 kstat_named_t arcstat_demand_hit_predictive_prefetch;
859 kstat_named_t arcstat_demand_hit_prescient_prefetch;
862 static arc_stats_t arc_stats = {
863 { "hits", KSTAT_DATA_UINT64 },
864 { "misses", KSTAT_DATA_UINT64 },
865 { "demand_data_hits", KSTAT_DATA_UINT64 },
866 { "demand_data_misses", KSTAT_DATA_UINT64 },
867 { "demand_metadata_hits", KSTAT_DATA_UINT64 },
868 { "demand_metadata_misses", KSTAT_DATA_UINT64 },
869 { "prefetch_data_hits", KSTAT_DATA_UINT64 },
870 { "prefetch_data_misses", KSTAT_DATA_UINT64 },
871 { "prefetch_metadata_hits", KSTAT_DATA_UINT64 },
872 { "prefetch_metadata_misses", KSTAT_DATA_UINT64 },
873 { "mru_hits", KSTAT_DATA_UINT64 },
874 { "mru_ghost_hits", KSTAT_DATA_UINT64 },
875 { "mfu_hits", KSTAT_DATA_UINT64 },
876 { "mfu_ghost_hits", KSTAT_DATA_UINT64 },
877 { "allocated", KSTAT_DATA_UINT64 },
878 { "deleted", KSTAT_DATA_UINT64 },
879 { "mutex_miss", KSTAT_DATA_UINT64 },
880 { "access_skip", KSTAT_DATA_UINT64 },
881 { "evict_skip", KSTAT_DATA_UINT64 },
882 { "evict_not_enough", KSTAT_DATA_UINT64 },
883 { "evict_l2_cached", KSTAT_DATA_UINT64 },
884 { "evict_l2_eligible", KSTAT_DATA_UINT64 },
885 { "evict_l2_ineligible", KSTAT_DATA_UINT64 },
886 { "evict_l2_skip", KSTAT_DATA_UINT64 },
887 { "hash_elements", KSTAT_DATA_UINT64 },
888 { "hash_elements_max", KSTAT_DATA_UINT64 },
889 { "hash_collisions", KSTAT_DATA_UINT64 },
890 { "hash_chains", KSTAT_DATA_UINT64 },
891 { "hash_chain_max", KSTAT_DATA_UINT64 },
892 { "p", KSTAT_DATA_UINT64 },
893 { "c", KSTAT_DATA_UINT64 },
894 { "c_min", KSTAT_DATA_UINT64 },
895 { "c_max", KSTAT_DATA_UINT64 },
896 { "size", KSTAT_DATA_UINT64 },
897 { "compressed_size", KSTAT_DATA_UINT64 },
898 { "uncompressed_size", KSTAT_DATA_UINT64 },
899 { "overhead_size", KSTAT_DATA_UINT64 },
900 { "hdr_size", KSTAT_DATA_UINT64 },
901 { "data_size", KSTAT_DATA_UINT64 },
902 { "metadata_size", KSTAT_DATA_UINT64 },
903 { "dbuf_size", KSTAT_DATA_UINT64 },
904 { "dnode_size", KSTAT_DATA_UINT64 },
905 { "bonus_size", KSTAT_DATA_UINT64 },
906 #if defined(__FreeBSD__) && defined(COMPAT_FREEBSD11)
907 { "other_size", KSTAT_DATA_UINT64 },
909 { "anon_size", KSTAT_DATA_UINT64 },
910 { "anon_evictable_data", KSTAT_DATA_UINT64 },
911 { "anon_evictable_metadata", KSTAT_DATA_UINT64 },
912 { "mru_size", KSTAT_DATA_UINT64 },
913 { "mru_evictable_data", KSTAT_DATA_UINT64 },
914 { "mru_evictable_metadata", KSTAT_DATA_UINT64 },
915 { "mru_ghost_size", KSTAT_DATA_UINT64 },
916 { "mru_ghost_evictable_data", KSTAT_DATA_UINT64 },
917 { "mru_ghost_evictable_metadata", KSTAT_DATA_UINT64 },
918 { "mfu_size", KSTAT_DATA_UINT64 },
919 { "mfu_evictable_data", KSTAT_DATA_UINT64 },
920 { "mfu_evictable_metadata", KSTAT_DATA_UINT64 },
921 { "mfu_ghost_size", KSTAT_DATA_UINT64 },
922 { "mfu_ghost_evictable_data", KSTAT_DATA_UINT64 },
923 { "mfu_ghost_evictable_metadata", KSTAT_DATA_UINT64 },
924 { "l2_hits", KSTAT_DATA_UINT64 },
925 { "l2_misses", KSTAT_DATA_UINT64 },
926 { "l2_feeds", KSTAT_DATA_UINT64 },
927 { "l2_rw_clash", KSTAT_DATA_UINT64 },
928 { "l2_read_bytes", KSTAT_DATA_UINT64 },
929 { "l2_write_bytes", KSTAT_DATA_UINT64 },
930 { "l2_writes_sent", KSTAT_DATA_UINT64 },
931 { "l2_writes_done", KSTAT_DATA_UINT64 },
932 { "l2_writes_error", KSTAT_DATA_UINT64 },
933 { "l2_writes_lock_retry", KSTAT_DATA_UINT64 },
934 { "l2_evict_lock_retry", KSTAT_DATA_UINT64 },
935 { "l2_evict_reading", KSTAT_DATA_UINT64 },
936 { "l2_evict_l1cached", KSTAT_DATA_UINT64 },
937 { "l2_free_on_write", KSTAT_DATA_UINT64 },
938 { "l2_abort_lowmem", KSTAT_DATA_UINT64 },
939 { "l2_cksum_bad", KSTAT_DATA_UINT64 },
940 { "l2_io_error", KSTAT_DATA_UINT64 },
941 { "l2_size", KSTAT_DATA_UINT64 },
942 { "l2_asize", KSTAT_DATA_UINT64 },
943 { "l2_hdr_size", KSTAT_DATA_UINT64 },
944 { "l2_write_trylock_fail", KSTAT_DATA_UINT64 },
945 { "l2_write_passed_headroom", KSTAT_DATA_UINT64 },
946 { "l2_write_spa_mismatch", KSTAT_DATA_UINT64 },
947 { "l2_write_in_l2", KSTAT_DATA_UINT64 },
948 { "l2_write_io_in_progress", KSTAT_DATA_UINT64 },
949 { "l2_write_not_cacheable", KSTAT_DATA_UINT64 },
950 { "l2_write_full", KSTAT_DATA_UINT64 },
951 { "l2_write_buffer_iter", KSTAT_DATA_UINT64 },
952 { "l2_write_pios", KSTAT_DATA_UINT64 },
953 { "l2_write_buffer_bytes_scanned", KSTAT_DATA_UINT64 },
954 { "l2_write_buffer_list_iter", KSTAT_DATA_UINT64 },
955 { "l2_write_buffer_list_null_iter", KSTAT_DATA_UINT64 },
956 { "memory_throttle_count", KSTAT_DATA_UINT64 },
957 { "memory_direct_count", KSTAT_DATA_UINT64 },
958 { "memory_indirect_count", KSTAT_DATA_UINT64 },
959 { "memory_all_bytes", KSTAT_DATA_UINT64 },
960 { "memory_free_bytes", KSTAT_DATA_UINT64 },
961 { "memory_available_bytes", KSTAT_DATA_UINT64 },
962 { "arc_no_grow", KSTAT_DATA_UINT64 },
963 { "arc_tempreserve", KSTAT_DATA_UINT64 },
964 { "arc_loaned_bytes", KSTAT_DATA_UINT64 },
965 { "arc_prune", KSTAT_DATA_UINT64 },
966 { "arc_meta_used", KSTAT_DATA_UINT64 },
967 { "arc_meta_limit", KSTAT_DATA_UINT64 },
968 { "arc_dnode_limit", KSTAT_DATA_UINT64 },
969 { "arc_meta_max", KSTAT_DATA_UINT64 },
970 { "arc_meta_min", KSTAT_DATA_UINT64 },
971 { "async_upgrade_sync", KSTAT_DATA_UINT64 },
972 { "demand_hit_predictive_prefetch", KSTAT_DATA_UINT64 },
973 { "demand_hit_prescient_prefetch", KSTAT_DATA_UINT64 },
976 #define ARCSTAT(stat) (arc_stats.stat.value.ui64)
978 #define ARCSTAT_INCR(stat, val) \
979 atomic_add_64(&arc_stats.stat.value.ui64, (val))
981 #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1)
982 #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1)
984 #define ARCSTAT_MAX(stat, val) { \
986 while ((val) > (m = arc_stats.stat.value.ui64) && \
987 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \
991 #define ARCSTAT_MAXSTAT(stat) \
992 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64)
995 * We define a macro to allow ARC hits/misses to be easily broken down by
996 * two separate conditions, giving a total of four different subtypes for
997 * each of hits and misses (so eight statistics total).
999 #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
1002 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
1004 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
1008 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
1010 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
1015 static arc_state_t *arc_anon;
1016 static arc_state_t *arc_mru;
1017 static arc_state_t *arc_mru_ghost;
1018 static arc_state_t *arc_mfu;
1019 static arc_state_t *arc_mfu_ghost;
1020 static arc_state_t *arc_l2c_only;
1023 * There are several ARC variables that are critical to export as kstats --
1024 * but we don't want to have to grovel around in the kstat whenever we wish to
1025 * manipulate them. For these variables, we therefore define them to be in
1026 * terms of the statistic variable. This assures that we are not introducing
1027 * the possibility of inconsistency by having shadow copies of the variables,
1028 * while still allowing the code to be readable.
1030 #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */
1031 #define arc_c ARCSTAT(arcstat_c) /* target size of cache */
1032 #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */
1033 #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */
1034 #define arc_meta_limit ARCSTAT(arcstat_meta_limit) /* max size for metadata */
1035 #define arc_dnode_limit ARCSTAT(arcstat_dnode_limit) /* max size for dnodes */
1036 #define arc_meta_min ARCSTAT(arcstat_meta_min) /* min size for metadata */
1037 #define arc_meta_max ARCSTAT(arcstat_meta_max) /* max size of metadata */
1038 #define arc_dbuf_size ARCSTAT(arcstat_dbuf_size) /* dbuf metadata */
1039 #define arc_dnode_size ARCSTAT(arcstat_dnode_size) /* dnode metadata */
1040 #define arc_bonus_size ARCSTAT(arcstat_bonus_size) /* bonus buffer metadata */
1042 /* compressed size of entire arc */
1043 #define arc_compressed_size ARCSTAT(arcstat_compressed_size)
1044 /* uncompressed size of entire arc */
1045 #define arc_uncompressed_size ARCSTAT(arcstat_uncompressed_size)
1046 /* number of bytes in the arc from arc_buf_t's */
1047 #define arc_overhead_size ARCSTAT(arcstat_overhead_size)
1050 * There are also some ARC variables that we want to export, but that are
1051 * updated so often that having the canonical representation be the statistic
1052 * variable causes a performance bottleneck. We want to use aggsum_t's for these
1053 * instead, but still be able to export the kstat in the same way as before.
1054 * The solution is to always use the aggsum version, except in the kstat update
1058 aggsum_t arc_meta_used;
1059 aggsum_t astat_data_size;
1060 aggsum_t astat_metadata_size;
1061 aggsum_t astat_hdr_size;
1062 aggsum_t astat_bonus_size;
1063 aggsum_t astat_dnode_size;
1064 aggsum_t astat_dbuf_size;
1065 aggsum_t astat_l2_hdr_size;
1067 static list_t arc_prune_list;
1068 static kmutex_t arc_prune_mtx;
1069 static taskq_t *arc_prune_taskq;
1071 static int arc_no_grow; /* Don't try to grow cache size */
1072 static hrtime_t arc_growtime;
1073 static uint64_t arc_tempreserve;
1074 static uint64_t arc_loaned_bytes;
1076 typedef struct arc_callback arc_callback_t;
1078 struct arc_callback {
1080 arc_read_done_func_t *acb_done;
1082 boolean_t acb_compressed;
1083 zio_t *acb_zio_dummy;
1084 zio_t *acb_zio_head;
1085 arc_callback_t *acb_next;
1088 typedef struct arc_write_callback arc_write_callback_t;
1090 struct arc_write_callback {
1092 arc_write_done_func_t *awcb_ready;
1093 arc_write_done_func_t *awcb_children_ready;
1094 arc_write_done_func_t *awcb_physdone;
1095 arc_write_done_func_t *awcb_done;
1096 arc_buf_t *awcb_buf;
1100 * ARC buffers are separated into multiple structs as a memory saving measure:
1101 * - Common fields struct, always defined, and embedded within it:
1102 * - L2-only fields, always allocated but undefined when not in L2ARC
1103 * - L1-only fields, only allocated when in L1ARC
1105 * Buffer in L1 Buffer only in L2
1106 * +------------------------+ +------------------------+
1107 * | arc_buf_hdr_t | | arc_buf_hdr_t |
1111 * +------------------------+ +------------------------+
1112 * | l2arc_buf_hdr_t | | l2arc_buf_hdr_t |
1113 * | (undefined if L1-only) | | |
1114 * +------------------------+ +------------------------+
1115 * | l1arc_buf_hdr_t |
1120 * +------------------------+
1122 * Because it's possible for the L2ARC to become extremely large, we can wind
1123 * up eating a lot of memory in L2ARC buffer headers, so the size of a header
1124 * is minimized by only allocating the fields necessary for an L1-cached buffer
1125 * when a header is actually in the L1 cache. The sub-headers (l1arc_buf_hdr and
1126 * l2arc_buf_hdr) are embedded rather than allocated separately to save a couple
1127 * words in pointers. arc_hdr_realloc() is used to switch a header between
1128 * these two allocation states.
1130 typedef struct l1arc_buf_hdr {
1131 kmutex_t b_freeze_lock;
1132 zio_cksum_t *b_freeze_cksum;
1135 * Used for debugging with kmem_flags - by allocating and freeing
1136 * b_thawed when the buffer is thawed, we get a record of the stack
1137 * trace that thawed it.
1144 /* for waiting on writes to complete */
1148 /* protected by arc state mutex */
1149 arc_state_t *b_state;
1150 multilist_node_t b_arc_node;
1152 /* updated atomically */
1153 clock_t b_arc_access;
1154 uint32_t b_mru_hits;
1155 uint32_t b_mru_ghost_hits;
1156 uint32_t b_mfu_hits;
1157 uint32_t b_mfu_ghost_hits;
1160 /* self protecting */
1161 refcount_t b_refcnt;
1163 arc_callback_t *b_acb;
1167 typedef struct l2arc_dev l2arc_dev_t;
1169 typedef struct l2arc_buf_hdr {
1170 /* protected by arc_buf_hdr mutex */
1171 l2arc_dev_t *b_dev; /* L2ARC device */
1172 uint64_t b_daddr; /* disk address, offset byte */
1175 list_node_t b_l2node;
1178 struct arc_buf_hdr {
1179 /* protected by hash lock */
1183 arc_buf_contents_t b_type;
1184 arc_buf_hdr_t *b_hash_next;
1185 arc_flags_t b_flags;
1188 * This field stores the size of the data buffer after
1189 * compression, and is set in the arc's zio completion handlers.
1190 * It is in units of SPA_MINBLOCKSIZE (e.g. 1 == 512 bytes).
1192 * While the block pointers can store up to 32MB in their psize
1193 * field, we can only store up to 32MB minus 512B. This is due
1194 * to the bp using a bias of 1, whereas we use a bias of 0 (i.e.
1195 * a field of zeros represents 512B in the bp). We can't use a
1196 * bias of 1 since we need to reserve a psize of zero, here, to
1197 * represent holes and embedded blocks.
1199 * This isn't a problem in practice, since the maximum size of a
1200 * buffer is limited to 16MB, so we never need to store 32MB in
1201 * this field. Even in the upstream illumos code base, the
1202 * maximum size of a buffer is limited to 16MB.
1207 * This field stores the size of the data buffer before
1208 * compression, and cannot change once set. It is in units
1209 * of SPA_MINBLOCKSIZE (e.g. 2 == 1024 bytes)
1211 uint16_t b_lsize; /* immutable */
1212 uint64_t b_spa; /* immutable */
1214 /* L2ARC fields. Undefined when not in L2ARC. */
1215 l2arc_buf_hdr_t b_l2hdr;
1216 /* L1ARC fields. Undefined when in l2arc_only state */
1217 l1arc_buf_hdr_t b_l1hdr;
1220 #if defined(__FreeBSD__) && defined(_KERNEL)
1222 sysctl_vfs_zfs_arc_meta_limit(SYSCTL_HANDLER_ARGS)
1227 val = arc_meta_limit;
1228 err = sysctl_handle_64(oidp, &val, 0, req);
1229 if (err != 0 || req->newptr == NULL)
1232 if (val <= 0 || val > arc_c_max)
1235 arc_meta_limit = val;
1240 sysctl_vfs_zfs_arc_no_grow_shift(SYSCTL_HANDLER_ARGS)
1245 val = arc_no_grow_shift;
1246 err = sysctl_handle_32(oidp, &val, 0, req);
1247 if (err != 0 || req->newptr == NULL)
1250 if (val >= arc_shrink_shift)
1253 arc_no_grow_shift = val;
1258 sysctl_vfs_zfs_arc_max(SYSCTL_HANDLER_ARGS)
1264 err = sysctl_handle_64(oidp, &val, 0, req);
1265 if (err != 0 || req->newptr == NULL)
1268 if (zfs_arc_max == 0) {
1269 /* Loader tunable so blindly set */
1274 if (val < arc_abs_min || val > kmem_size())
1276 if (val < arc_c_min)
1278 if (zfs_arc_meta_limit > 0 && val < zfs_arc_meta_limit)
1284 arc_p = (arc_c >> 1);
1286 if (zfs_arc_meta_limit == 0) {
1287 /* limit meta-data to 1/4 of the arc capacity */
1288 arc_meta_limit = arc_c_max / 4;
1291 /* if kmem_flags are set, lets try to use less memory */
1292 if (kmem_debugging())
1295 zfs_arc_max = arc_c;
1301 sysctl_vfs_zfs_arc_min(SYSCTL_HANDLER_ARGS)
1307 err = sysctl_handle_64(oidp, &val, 0, req);
1308 if (err != 0 || req->newptr == NULL)
1311 if (zfs_arc_min == 0) {
1312 /* Loader tunable so blindly set */
1317 if (val < arc_abs_min || val > arc_c_max)
1322 if (zfs_arc_meta_min == 0)
1323 arc_meta_min = arc_c_min / 2;
1325 if (arc_c < arc_c_min)
1328 zfs_arc_min = arc_c_min;
1334 #define GHOST_STATE(state) \
1335 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \
1336 (state) == arc_l2c_only)
1338 #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_FLAG_IN_HASH_TABLE)
1339 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS)
1340 #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_FLAG_IO_ERROR)
1341 #define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_FLAG_PREFETCH)
1342 #define HDR_PRESCIENT_PREFETCH(hdr) \
1343 ((hdr)->b_flags & ARC_FLAG_PRESCIENT_PREFETCH)
1344 #define HDR_COMPRESSION_ENABLED(hdr) \
1345 ((hdr)->b_flags & ARC_FLAG_COMPRESSED_ARC)
1347 #define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_FLAG_L2CACHE)
1348 #define HDR_L2_READING(hdr) \
1349 (((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS) && \
1350 ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR))
1351 #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITING)
1352 #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_FLAG_L2_EVICTED)
1353 #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITE_HEAD)
1354 #define HDR_SHARED_DATA(hdr) ((hdr)->b_flags & ARC_FLAG_SHARED_DATA)
1356 #define HDR_ISTYPE_METADATA(hdr) \
1357 ((hdr)->b_flags & ARC_FLAG_BUFC_METADATA)
1358 #define HDR_ISTYPE_DATA(hdr) (!HDR_ISTYPE_METADATA(hdr))
1360 #define HDR_HAS_L1HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L1HDR)
1361 #define HDR_HAS_L2HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR)
1363 /* For storing compression mode in b_flags */
1364 #define HDR_COMPRESS_OFFSET (highbit64(ARC_FLAG_COMPRESS_0) - 1)
1366 #define HDR_GET_COMPRESS(hdr) ((enum zio_compress)BF32_GET((hdr)->b_flags, \
1367 HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS))
1368 #define HDR_SET_COMPRESS(hdr, cmp) BF32_SET((hdr)->b_flags, \
1369 HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS, (cmp));
1371 #define ARC_BUF_LAST(buf) ((buf)->b_next == NULL)
1372 #define ARC_BUF_SHARED(buf) ((buf)->b_flags & ARC_BUF_FLAG_SHARED)
1373 #define ARC_BUF_COMPRESSED(buf) ((buf)->b_flags & ARC_BUF_FLAG_COMPRESSED)
1379 #define HDR_FULL_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
1380 #define HDR_L2ONLY_SIZE ((int64_t)offsetof(arc_buf_hdr_t, b_l1hdr))
1383 * Hash table routines
1386 #define HT_LOCK_PAD CACHE_LINE_SIZE
1391 unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))];
1395 #define BUF_LOCKS 256
1396 typedef struct buf_hash_table {
1398 arc_buf_hdr_t **ht_table;
1399 struct ht_lock ht_locks[BUF_LOCKS] __aligned(CACHE_LINE_SIZE);
1402 static buf_hash_table_t buf_hash_table;
1404 #define BUF_HASH_INDEX(spa, dva, birth) \
1405 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
1406 #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
1407 #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
1408 #define HDR_LOCK(hdr) \
1409 (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth)))
1411 uint64_t zfs_crc64_table[256];
1417 #define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */
1418 #define L2ARC_HEADROOM 2 /* num of writes */
1420 * If we discover during ARC scan any buffers to be compressed, we boost
1421 * our headroom for the next scanning cycle by this percentage multiple.
1423 #define L2ARC_HEADROOM_BOOST 200
1424 #define L2ARC_FEED_SECS 1 /* caching interval secs */
1425 #define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */
1427 #define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent)
1428 #define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done)
1430 /* L2ARC Performance Tunables */
1431 uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */
1432 uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */
1433 uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */
1434 uint64_t l2arc_headroom_boost = L2ARC_HEADROOM_BOOST;
1435 uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */
1436 uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */
1437 boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */
1438 boolean_t l2arc_feed_again = B_TRUE; /* turbo warmup */
1439 boolean_t l2arc_norw = B_TRUE; /* no reads during writes */
1441 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_max, CTLFLAG_RWTUN,
1442 &l2arc_write_max, 0, "max write size");
1443 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_boost, CTLFLAG_RWTUN,
1444 &l2arc_write_boost, 0, "extra write during warmup");
1445 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_headroom, CTLFLAG_RWTUN,
1446 &l2arc_headroom, 0, "number of dev writes");
1447 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_secs, CTLFLAG_RWTUN,
1448 &l2arc_feed_secs, 0, "interval seconds");
1449 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_min_ms, CTLFLAG_RWTUN,
1450 &l2arc_feed_min_ms, 0, "min interval milliseconds");
1452 SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_noprefetch, CTLFLAG_RWTUN,
1453 &l2arc_noprefetch, 0, "don't cache prefetch bufs");
1454 SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_feed_again, CTLFLAG_RWTUN,
1455 &l2arc_feed_again, 0, "turbo warmup");
1456 SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_norw, CTLFLAG_RWTUN,
1457 &l2arc_norw, 0, "no reads during writes");
1459 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_size, CTLFLAG_RD,
1460 &ARC_anon.arcs_size.rc_count, 0, "size of anonymous state");
1461 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_metadata_esize, CTLFLAG_RD,
1462 &ARC_anon.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
1463 "size of anonymous state");
1464 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_data_esize, CTLFLAG_RD,
1465 &ARC_anon.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
1466 "size of anonymous state");
1468 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_size, CTLFLAG_RD,
1469 &ARC_mru.arcs_size.rc_count, 0, "size of mru state");
1470 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_metadata_esize, CTLFLAG_RD,
1471 &ARC_mru.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
1472 "size of metadata in mru state");
1473 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_data_esize, CTLFLAG_RD,
1474 &ARC_mru.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
1475 "size of data in mru state");
1477 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_size, CTLFLAG_RD,
1478 &ARC_mru_ghost.arcs_size.rc_count, 0, "size of mru ghost state");
1479 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_metadata_esize, CTLFLAG_RD,
1480 &ARC_mru_ghost.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
1481 "size of metadata in mru ghost state");
1482 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_data_esize, CTLFLAG_RD,
1483 &ARC_mru_ghost.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
1484 "size of data in mru ghost state");
1486 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_size, CTLFLAG_RD,
1487 &ARC_mfu.arcs_size.rc_count, 0, "size of mfu state");
1488 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_metadata_esize, CTLFLAG_RD,
1489 &ARC_mfu.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
1490 "size of metadata in mfu state");
1491 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_data_esize, CTLFLAG_RD,
1492 &ARC_mfu.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
1493 "size of data in mfu state");
1495 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_size, CTLFLAG_RD,
1496 &ARC_mfu_ghost.arcs_size.rc_count, 0, "size of mfu ghost state");
1497 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_metadata_esize, CTLFLAG_RD,
1498 &ARC_mfu_ghost.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
1499 "size of metadata in mfu ghost state");
1500 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_data_esize, CTLFLAG_RD,
1501 &ARC_mfu_ghost.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
1502 "size of data in mfu ghost state");
1504 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2c_only_size, CTLFLAG_RD,
1505 &ARC_l2c_only.arcs_size.rc_count, 0, "size of mru state");
1507 SYSCTL_UINT(_vfs_zfs, OID_AUTO, arc_min_prefetch_ms, CTLFLAG_RW,
1508 &zfs_arc_min_prefetch_ms, 0, "Min life of prefetch block in ms");
1509 SYSCTL_UINT(_vfs_zfs, OID_AUTO, arc_min_prescient_prefetch_ms, CTLFLAG_RW,
1510 &zfs_arc_min_prescient_prefetch_ms, 0, "Min life of prescient prefetched block in ms");
1516 vdev_t *l2ad_vdev; /* vdev */
1517 spa_t *l2ad_spa; /* spa */
1518 uint64_t l2ad_hand; /* next write location */
1519 uint64_t l2ad_start; /* first addr on device */
1520 uint64_t l2ad_end; /* last addr on device */
1521 boolean_t l2ad_first; /* first sweep through */
1522 boolean_t l2ad_writing; /* currently writing */
1523 kmutex_t l2ad_mtx; /* lock for buffer list */
1524 list_t l2ad_buflist; /* buffer list */
1525 list_node_t l2ad_node; /* device list node */
1526 refcount_t l2ad_alloc; /* allocated bytes */
1529 static list_t L2ARC_dev_list; /* device list */
1530 static list_t *l2arc_dev_list; /* device list pointer */
1531 static kmutex_t l2arc_dev_mtx; /* device list mutex */
1532 static l2arc_dev_t *l2arc_dev_last; /* last device used */
1533 static list_t L2ARC_free_on_write; /* free after write buf list */
1534 static list_t *l2arc_free_on_write; /* free after write list ptr */
1535 static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */
1536 static uint64_t l2arc_ndev; /* number of devices */
1538 typedef struct l2arc_read_callback {
1539 arc_buf_hdr_t *l2rcb_hdr; /* read header */
1540 blkptr_t l2rcb_bp; /* original blkptr */
1541 zbookmark_phys_t l2rcb_zb; /* original bookmark */
1542 int l2rcb_flags; /* original flags */
1543 abd_t *l2rcb_abd; /* temporary buffer */
1544 } l2arc_read_callback_t;
1546 typedef struct l2arc_write_callback {
1547 l2arc_dev_t *l2wcb_dev; /* device info */
1548 arc_buf_hdr_t *l2wcb_head; /* head of write buflist */
1549 } l2arc_write_callback_t;
1551 typedef struct l2arc_data_free {
1552 /* protected by l2arc_free_on_write_mtx */
1555 arc_buf_contents_t l2df_type;
1556 list_node_t l2df_list_node;
1557 } l2arc_data_free_t;
1559 static kmutex_t l2arc_feed_thr_lock;
1560 static kcondvar_t l2arc_feed_thr_cv;
1561 static uint8_t l2arc_thread_exit;
1563 static abd_t *arc_get_data_abd(arc_buf_hdr_t *, uint64_t, void *);
1564 static void *arc_get_data_buf(arc_buf_hdr_t *, uint64_t, void *);
1565 static void arc_get_data_impl(arc_buf_hdr_t *, uint64_t, void *);
1566 static void arc_free_data_abd(arc_buf_hdr_t *, abd_t *, uint64_t, void *);
1567 static void arc_free_data_buf(arc_buf_hdr_t *, void *, uint64_t, void *);
1568 static void arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag);
1569 static void arc_hdr_free_pabd(arc_buf_hdr_t *);
1570 static void arc_hdr_alloc_pabd(arc_buf_hdr_t *);
1571 static void arc_access(arc_buf_hdr_t *, kmutex_t *);
1572 static boolean_t arc_is_overflowing();
1573 static void arc_buf_watch(arc_buf_t *);
1574 static void arc_prune_async(int64_t);
1576 static arc_buf_contents_t arc_buf_type(arc_buf_hdr_t *);
1577 static uint32_t arc_bufc_to_flags(arc_buf_contents_t);
1578 static inline void arc_hdr_set_flags(arc_buf_hdr_t *hdr, arc_flags_t flags);
1579 static inline void arc_hdr_clear_flags(arc_buf_hdr_t *hdr, arc_flags_t flags);
1581 static boolean_t l2arc_write_eligible(uint64_t, arc_buf_hdr_t *);
1582 static void l2arc_read_done(zio_t *);
1585 l2arc_trim(const arc_buf_hdr_t *hdr)
1587 l2arc_dev_t *dev = hdr->b_l2hdr.b_dev;
1589 ASSERT(HDR_HAS_L2HDR(hdr));
1590 ASSERT(MUTEX_HELD(&dev->l2ad_mtx));
1592 if (HDR_GET_PSIZE(hdr) != 0) {
1593 trim_map_free(dev->l2ad_vdev, hdr->b_l2hdr.b_daddr,
1594 HDR_GET_PSIZE(hdr), 0);
1599 * We use Cityhash for this. It's fast, and has good hash properties without
1600 * requiring any large static buffers.
1603 buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth)
1605 return (cityhash4(spa, dva->dva_word[0], dva->dva_word[1], birth));
1608 #define HDR_EMPTY(hdr) \
1609 ((hdr)->b_dva.dva_word[0] == 0 && \
1610 (hdr)->b_dva.dva_word[1] == 0)
1612 #define HDR_EQUAL(spa, dva, birth, hdr) \
1613 ((hdr)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \
1614 ((hdr)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \
1615 ((hdr)->b_birth == birth) && ((hdr)->b_spa == spa)
1618 buf_discard_identity(arc_buf_hdr_t *hdr)
1620 hdr->b_dva.dva_word[0] = 0;
1621 hdr->b_dva.dva_word[1] = 0;
1625 static arc_buf_hdr_t *
1626 buf_hash_find(uint64_t spa, const blkptr_t *bp, kmutex_t **lockp)
1628 const dva_t *dva = BP_IDENTITY(bp);
1629 uint64_t birth = BP_PHYSICAL_BIRTH(bp);
1630 uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
1631 kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
1634 mutex_enter(hash_lock);
1635 for (hdr = buf_hash_table.ht_table[idx]; hdr != NULL;
1636 hdr = hdr->b_hash_next) {
1637 if (HDR_EQUAL(spa, dva, birth, hdr)) {
1642 mutex_exit(hash_lock);
1648 * Insert an entry into the hash table. If there is already an element
1649 * equal to elem in the hash table, then the already existing element
1650 * will be returned and the new element will not be inserted.
1651 * Otherwise returns NULL.
1652 * If lockp == NULL, the caller is assumed to already hold the hash lock.
1654 static arc_buf_hdr_t *
1655 buf_hash_insert(arc_buf_hdr_t *hdr, kmutex_t **lockp)
1657 uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth);
1658 kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
1659 arc_buf_hdr_t *fhdr;
1662 ASSERT(!DVA_IS_EMPTY(&hdr->b_dva));
1663 ASSERT(hdr->b_birth != 0);
1664 ASSERT(!HDR_IN_HASH_TABLE(hdr));
1666 if (lockp != NULL) {
1668 mutex_enter(hash_lock);
1670 ASSERT(MUTEX_HELD(hash_lock));
1673 for (fhdr = buf_hash_table.ht_table[idx], i = 0; fhdr != NULL;
1674 fhdr = fhdr->b_hash_next, i++) {
1675 if (HDR_EQUAL(hdr->b_spa, &hdr->b_dva, hdr->b_birth, fhdr))
1679 hdr->b_hash_next = buf_hash_table.ht_table[idx];
1680 buf_hash_table.ht_table[idx] = hdr;
1681 arc_hdr_set_flags(hdr, ARC_FLAG_IN_HASH_TABLE);
1683 /* collect some hash table performance data */
1685 ARCSTAT_BUMP(arcstat_hash_collisions);
1687 ARCSTAT_BUMP(arcstat_hash_chains);
1689 ARCSTAT_MAX(arcstat_hash_chain_max, i);
1692 ARCSTAT_BUMP(arcstat_hash_elements);
1693 ARCSTAT_MAXSTAT(arcstat_hash_elements);
1699 buf_hash_remove(arc_buf_hdr_t *hdr)
1701 arc_buf_hdr_t *fhdr, **hdrp;
1702 uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth);
1704 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
1705 ASSERT(HDR_IN_HASH_TABLE(hdr));
1707 hdrp = &buf_hash_table.ht_table[idx];
1708 while ((fhdr = *hdrp) != hdr) {
1709 ASSERT3P(fhdr, !=, NULL);
1710 hdrp = &fhdr->b_hash_next;
1712 *hdrp = hdr->b_hash_next;
1713 hdr->b_hash_next = NULL;
1714 arc_hdr_clear_flags(hdr, ARC_FLAG_IN_HASH_TABLE);
1716 /* collect some hash table performance data */
1717 ARCSTAT_BUMPDOWN(arcstat_hash_elements);
1719 if (buf_hash_table.ht_table[idx] &&
1720 buf_hash_table.ht_table[idx]->b_hash_next == NULL)
1721 ARCSTAT_BUMPDOWN(arcstat_hash_chains);
1725 * Global data structures and functions for the buf kmem cache.
1727 static kmem_cache_t *hdr_full_cache;
1728 static kmem_cache_t *hdr_l2only_cache;
1729 static kmem_cache_t *buf_cache;
1736 kmem_free(buf_hash_table.ht_table,
1737 (buf_hash_table.ht_mask + 1) * sizeof (void *));
1738 for (i = 0; i < BUF_LOCKS; i++)
1739 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock);
1740 kmem_cache_destroy(hdr_full_cache);
1741 kmem_cache_destroy(hdr_l2only_cache);
1742 kmem_cache_destroy(buf_cache);
1746 * Constructor callback - called when the cache is empty
1747 * and a new buf is requested.
1751 hdr_full_cons(void *vbuf, void *unused, int kmflag)
1753 arc_buf_hdr_t *hdr = vbuf;
1755 bzero(hdr, HDR_FULL_SIZE);
1756 cv_init(&hdr->b_l1hdr.b_cv, NULL, CV_DEFAULT, NULL);
1757 refcount_create(&hdr->b_l1hdr.b_refcnt);
1758 mutex_init(&hdr->b_l1hdr.b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
1759 multilist_link_init(&hdr->b_l1hdr.b_arc_node);
1760 arc_space_consume(HDR_FULL_SIZE, ARC_SPACE_HDRS);
1767 hdr_l2only_cons(void *vbuf, void *unused, int kmflag)
1769 arc_buf_hdr_t *hdr = vbuf;
1771 bzero(hdr, HDR_L2ONLY_SIZE);
1772 arc_space_consume(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS);
1779 buf_cons(void *vbuf, void *unused, int kmflag)
1781 arc_buf_t *buf = vbuf;
1783 bzero(buf, sizeof (arc_buf_t));
1784 mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL);
1785 arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS);
1791 * Destructor callback - called when a cached buf is
1792 * no longer required.
1796 hdr_full_dest(void *vbuf, void *unused)
1798 arc_buf_hdr_t *hdr = vbuf;
1800 ASSERT(HDR_EMPTY(hdr));
1801 cv_destroy(&hdr->b_l1hdr.b_cv);
1802 refcount_destroy(&hdr->b_l1hdr.b_refcnt);
1803 mutex_destroy(&hdr->b_l1hdr.b_freeze_lock);
1804 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
1805 arc_space_return(HDR_FULL_SIZE, ARC_SPACE_HDRS);
1810 hdr_l2only_dest(void *vbuf, void *unused)
1812 arc_buf_hdr_t *hdr = vbuf;
1814 ASSERT(HDR_EMPTY(hdr));
1815 arc_space_return(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS);
1820 buf_dest(void *vbuf, void *unused)
1822 arc_buf_t *buf = vbuf;
1824 mutex_destroy(&buf->b_evict_lock);
1825 arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS);
1829 * Reclaim callback -- invoked when memory is low.
1833 hdr_recl(void *unused)
1835 dprintf("hdr_recl called\n");
1837 * umem calls the reclaim func when we destroy the buf cache,
1838 * which is after we do arc_fini().
1840 if (arc_initialized)
1841 zthr_wakeup(arc_reap_zthr);
1848 uint64_t hsize = 1ULL << 12;
1852 * The hash table is big enough to fill all of physical memory
1853 * with an average block size of zfs_arc_average_blocksize (default 8K).
1854 * By default, the table will take up
1855 * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
1857 while (hsize * zfs_arc_average_blocksize < (uint64_t)physmem * PAGESIZE)
1860 buf_hash_table.ht_mask = hsize - 1;
1861 buf_hash_table.ht_table =
1862 kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
1863 if (buf_hash_table.ht_table == NULL) {
1864 ASSERT(hsize > (1ULL << 8));
1869 hdr_full_cache = kmem_cache_create("arc_buf_hdr_t_full", HDR_FULL_SIZE,
1870 0, hdr_full_cons, hdr_full_dest, hdr_recl, NULL, NULL, 0);
1871 hdr_l2only_cache = kmem_cache_create("arc_buf_hdr_t_l2only",
1872 HDR_L2ONLY_SIZE, 0, hdr_l2only_cons, hdr_l2only_dest, hdr_recl,
1874 buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
1875 0, buf_cons, buf_dest, NULL, NULL, NULL, 0);
1877 for (i = 0; i < 256; i++)
1878 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
1879 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
1881 for (i = 0; i < BUF_LOCKS; i++) {
1882 mutex_init(&buf_hash_table.ht_locks[i].ht_lock,
1883 NULL, MUTEX_DEFAULT, NULL);
1888 * This is the size that the buf occupies in memory. If the buf is compressed,
1889 * it will correspond to the compressed size. You should use this method of
1890 * getting the buf size unless you explicitly need the logical size.
1893 arc_buf_size(arc_buf_t *buf)
1895 return (ARC_BUF_COMPRESSED(buf) ?
1896 HDR_GET_PSIZE(buf->b_hdr) : HDR_GET_LSIZE(buf->b_hdr));
1900 arc_buf_lsize(arc_buf_t *buf)
1902 return (HDR_GET_LSIZE(buf->b_hdr));
1906 arc_get_compression(arc_buf_t *buf)
1908 return (ARC_BUF_COMPRESSED(buf) ?
1909 HDR_GET_COMPRESS(buf->b_hdr) : ZIO_COMPRESS_OFF);
1912 #define ARC_MINTIME (hz>>4) /* 62 ms */
1914 static inline boolean_t
1915 arc_buf_is_shared(arc_buf_t *buf)
1917 boolean_t shared = (buf->b_data != NULL &&
1918 buf->b_hdr->b_l1hdr.b_pabd != NULL &&
1919 abd_is_linear(buf->b_hdr->b_l1hdr.b_pabd) &&
1920 buf->b_data == abd_to_buf(buf->b_hdr->b_l1hdr.b_pabd));
1921 IMPLY(shared, HDR_SHARED_DATA(buf->b_hdr));
1922 IMPLY(shared, ARC_BUF_SHARED(buf));
1923 IMPLY(shared, ARC_BUF_COMPRESSED(buf) || ARC_BUF_LAST(buf));
1926 * It would be nice to assert arc_can_share() too, but the "hdr isn't
1927 * already being shared" requirement prevents us from doing that.
1934 * Free the checksum associated with this header. If there is no checksum, this
1938 arc_cksum_free(arc_buf_hdr_t *hdr)
1940 ASSERT(HDR_HAS_L1HDR(hdr));
1941 mutex_enter(&hdr->b_l1hdr.b_freeze_lock);
1942 if (hdr->b_l1hdr.b_freeze_cksum != NULL) {
1943 kmem_free(hdr->b_l1hdr.b_freeze_cksum, sizeof (zio_cksum_t));
1944 hdr->b_l1hdr.b_freeze_cksum = NULL;
1946 mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
1950 * Return true iff at least one of the bufs on hdr is not compressed.
1953 arc_hdr_has_uncompressed_buf(arc_buf_hdr_t *hdr)
1955 for (arc_buf_t *b = hdr->b_l1hdr.b_buf; b != NULL; b = b->b_next) {
1956 if (!ARC_BUF_COMPRESSED(b)) {
1964 * If we've turned on the ZFS_DEBUG_MODIFY flag, verify that the buf's data
1965 * matches the checksum that is stored in the hdr. If there is no checksum,
1966 * or if the buf is compressed, this is a no-op.
1969 arc_cksum_verify(arc_buf_t *buf)
1971 arc_buf_hdr_t *hdr = buf->b_hdr;
1974 if (!(zfs_flags & ZFS_DEBUG_MODIFY))
1977 if (ARC_BUF_COMPRESSED(buf)) {
1978 ASSERT(hdr->b_l1hdr.b_freeze_cksum == NULL ||
1979 arc_hdr_has_uncompressed_buf(hdr));
1983 ASSERT(HDR_HAS_L1HDR(hdr));
1985 mutex_enter(&hdr->b_l1hdr.b_freeze_lock);
1986 if (hdr->b_l1hdr.b_freeze_cksum == NULL || HDR_IO_ERROR(hdr)) {
1987 mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
1991 fletcher_2_native(buf->b_data, arc_buf_size(buf), NULL, &zc);
1992 if (!ZIO_CHECKSUM_EQUAL(*hdr->b_l1hdr.b_freeze_cksum, zc))
1993 panic("buffer modified while frozen!");
1994 mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
1998 arc_cksum_is_equal(arc_buf_hdr_t *hdr, zio_t *zio)
2000 enum zio_compress compress = BP_GET_COMPRESS(zio->io_bp);
2001 boolean_t valid_cksum;
2003 ASSERT(!BP_IS_EMBEDDED(zio->io_bp));
2004 VERIFY3U(BP_GET_PSIZE(zio->io_bp), ==, HDR_GET_PSIZE(hdr));
2007 * We rely on the blkptr's checksum to determine if the block
2008 * is valid or not. When compressed arc is enabled, the l2arc
2009 * writes the block to the l2arc just as it appears in the pool.
2010 * This allows us to use the blkptr's checksum to validate the
2011 * data that we just read off of the l2arc without having to store
2012 * a separate checksum in the arc_buf_hdr_t. However, if compressed
2013 * arc is disabled, then the data written to the l2arc is always
2014 * uncompressed and won't match the block as it exists in the main
2015 * pool. When this is the case, we must first compress it if it is
2016 * compressed on the main pool before we can validate the checksum.
2018 if (!HDR_COMPRESSION_ENABLED(hdr) && compress != ZIO_COMPRESS_OFF) {
2019 ASSERT3U(HDR_GET_COMPRESS(hdr), ==, ZIO_COMPRESS_OFF);
2020 uint64_t lsize = HDR_GET_LSIZE(hdr);
2023 abd_t *cdata = abd_alloc_linear(HDR_GET_PSIZE(hdr), B_TRUE);
2024 csize = zio_compress_data(compress, zio->io_abd,
2025 abd_to_buf(cdata), lsize);
2027 ASSERT3U(csize, <=, HDR_GET_PSIZE(hdr));
2028 if (csize < HDR_GET_PSIZE(hdr)) {
2030 * Compressed blocks are always a multiple of the
2031 * smallest ashift in the pool. Ideally, we would
2032 * like to round up the csize to the next
2033 * spa_min_ashift but that value may have changed
2034 * since the block was last written. Instead,
2035 * we rely on the fact that the hdr's psize
2036 * was set to the psize of the block when it was
2037 * last written. We set the csize to that value
2038 * and zero out any part that should not contain
2041 abd_zero_off(cdata, csize, HDR_GET_PSIZE(hdr) - csize);
2042 csize = HDR_GET_PSIZE(hdr);
2044 zio_push_transform(zio, cdata, csize, HDR_GET_PSIZE(hdr), NULL);
2048 * Block pointers always store the checksum for the logical data.
2049 * If the block pointer has the gang bit set, then the checksum
2050 * it represents is for the reconstituted data and not for an
2051 * individual gang member. The zio pipeline, however, must be able to
2052 * determine the checksum of each of the gang constituents so it
2053 * treats the checksum comparison differently than what we need
2054 * for l2arc blocks. This prevents us from using the
2055 * zio_checksum_error() interface directly. Instead we must call the
2056 * zio_checksum_error_impl() so that we can ensure the checksum is
2057 * generated using the correct checksum algorithm and accounts for the
2058 * logical I/O size and not just a gang fragment.
2060 valid_cksum = (zio_checksum_error_impl(zio->io_spa, zio->io_bp,
2061 BP_GET_CHECKSUM(zio->io_bp), zio->io_abd, zio->io_size,
2062 zio->io_offset, NULL) == 0);
2063 zio_pop_transforms(zio);
2064 return (valid_cksum);
2068 * Given a buf full of data, if ZFS_DEBUG_MODIFY is enabled this computes a
2069 * checksum and attaches it to the buf's hdr so that we can ensure that the buf
2070 * isn't modified later on. If buf is compressed or there is already a checksum
2071 * on the hdr, this is a no-op (we only checksum uncompressed bufs).
2074 arc_cksum_compute(arc_buf_t *buf)
2076 arc_buf_hdr_t *hdr = buf->b_hdr;
2078 if (!(zfs_flags & ZFS_DEBUG_MODIFY))
2081 ASSERT(HDR_HAS_L1HDR(hdr));
2083 mutex_enter(&buf->b_hdr->b_l1hdr.b_freeze_lock);
2084 if (hdr->b_l1hdr.b_freeze_cksum != NULL) {
2085 ASSERT(arc_hdr_has_uncompressed_buf(hdr));
2086 mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
2088 } else if (ARC_BUF_COMPRESSED(buf)) {
2089 mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
2093 ASSERT(!ARC_BUF_COMPRESSED(buf));
2094 hdr->b_l1hdr.b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t),
2096 fletcher_2_native(buf->b_data, arc_buf_size(buf), NULL,
2097 hdr->b_l1hdr.b_freeze_cksum);
2098 mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
2106 typedef struct procctl {
2114 arc_buf_unwatch(arc_buf_t *buf)
2121 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data;
2122 ctl.prwatch.pr_size = 0;
2123 ctl.prwatch.pr_wflags = 0;
2124 result = write(arc_procfd, &ctl, sizeof (ctl));
2125 ASSERT3U(result, ==, sizeof (ctl));
2132 arc_buf_watch(arc_buf_t *buf)
2139 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data;
2140 ctl.prwatch.pr_size = arc_buf_size(buf);
2141 ctl.prwatch.pr_wflags = WA_WRITE;
2142 result = write(arc_procfd, &ctl, sizeof (ctl));
2143 ASSERT3U(result, ==, sizeof (ctl));
2147 #endif /* illumos */
2149 static arc_buf_contents_t
2150 arc_buf_type(arc_buf_hdr_t *hdr)
2152 arc_buf_contents_t type;
2153 if (HDR_ISTYPE_METADATA(hdr)) {
2154 type = ARC_BUFC_METADATA;
2156 type = ARC_BUFC_DATA;
2158 VERIFY3U(hdr->b_type, ==, type);
2163 arc_is_metadata(arc_buf_t *buf)
2165 return (HDR_ISTYPE_METADATA(buf->b_hdr) != 0);
2169 arc_bufc_to_flags(arc_buf_contents_t type)
2173 /* metadata field is 0 if buffer contains normal data */
2175 case ARC_BUFC_METADATA:
2176 return (ARC_FLAG_BUFC_METADATA);
2180 panic("undefined ARC buffer type!");
2181 return ((uint32_t)-1);
2185 arc_buf_thaw(arc_buf_t *buf)
2187 arc_buf_hdr_t *hdr = buf->b_hdr;
2189 ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
2190 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
2192 arc_cksum_verify(buf);
2195 * Compressed buffers do not manipulate the b_freeze_cksum or
2196 * allocate b_thawed.
2198 if (ARC_BUF_COMPRESSED(buf)) {
2199 ASSERT(hdr->b_l1hdr.b_freeze_cksum == NULL ||
2200 arc_hdr_has_uncompressed_buf(hdr));
2204 ASSERT(HDR_HAS_L1HDR(hdr));
2205 arc_cksum_free(hdr);
2207 mutex_enter(&hdr->b_l1hdr.b_freeze_lock);
2209 if (zfs_flags & ZFS_DEBUG_MODIFY) {
2210 if (hdr->b_l1hdr.b_thawed != NULL)
2211 kmem_free(hdr->b_l1hdr.b_thawed, 1);
2212 hdr->b_l1hdr.b_thawed = kmem_alloc(1, KM_SLEEP);
2216 mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
2219 arc_buf_unwatch(buf);
2224 arc_buf_freeze(arc_buf_t *buf)
2226 arc_buf_hdr_t *hdr = buf->b_hdr;
2227 kmutex_t *hash_lock;
2229 if (!(zfs_flags & ZFS_DEBUG_MODIFY))
2232 if (ARC_BUF_COMPRESSED(buf)) {
2233 ASSERT(hdr->b_l1hdr.b_freeze_cksum == NULL ||
2234 arc_hdr_has_uncompressed_buf(hdr));
2238 hash_lock = HDR_LOCK(hdr);
2239 mutex_enter(hash_lock);
2241 ASSERT(HDR_HAS_L1HDR(hdr));
2242 ASSERT(hdr->b_l1hdr.b_freeze_cksum != NULL ||
2243 hdr->b_l1hdr.b_state == arc_anon);
2244 arc_cksum_compute(buf);
2245 mutex_exit(hash_lock);
2249 * The arc_buf_hdr_t's b_flags should never be modified directly. Instead,
2250 * the following functions should be used to ensure that the flags are
2251 * updated in a thread-safe way. When manipulating the flags either
2252 * the hash_lock must be held or the hdr must be undiscoverable. This
2253 * ensures that we're not racing with any other threads when updating
2257 arc_hdr_set_flags(arc_buf_hdr_t *hdr, arc_flags_t flags)
2259 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
2260 hdr->b_flags |= flags;
2264 arc_hdr_clear_flags(arc_buf_hdr_t *hdr, arc_flags_t flags)
2266 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
2267 hdr->b_flags &= ~flags;
2271 * Setting the compression bits in the arc_buf_hdr_t's b_flags is
2272 * done in a special way since we have to clear and set bits
2273 * at the same time. Consumers that wish to set the compression bits
2274 * must use this function to ensure that the flags are updated in
2275 * thread-safe manner.
2278 arc_hdr_set_compress(arc_buf_hdr_t *hdr, enum zio_compress cmp)
2280 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
2283 * Holes and embedded blocks will always have a psize = 0 so
2284 * we ignore the compression of the blkptr and set the
2285 * arc_buf_hdr_t's compression to ZIO_COMPRESS_OFF.
2286 * Holes and embedded blocks remain anonymous so we don't
2287 * want to uncompress them. Mark them as uncompressed.
2289 if (!zfs_compressed_arc_enabled || HDR_GET_PSIZE(hdr) == 0) {
2290 arc_hdr_clear_flags(hdr, ARC_FLAG_COMPRESSED_ARC);
2291 HDR_SET_COMPRESS(hdr, ZIO_COMPRESS_OFF);
2292 ASSERT(!HDR_COMPRESSION_ENABLED(hdr));
2293 ASSERT3U(HDR_GET_COMPRESS(hdr), ==, ZIO_COMPRESS_OFF);
2295 arc_hdr_set_flags(hdr, ARC_FLAG_COMPRESSED_ARC);
2296 HDR_SET_COMPRESS(hdr, cmp);
2297 ASSERT3U(HDR_GET_COMPRESS(hdr), ==, cmp);
2298 ASSERT(HDR_COMPRESSION_ENABLED(hdr));
2303 * Looks for another buf on the same hdr which has the data decompressed, copies
2304 * from it, and returns true. If no such buf exists, returns false.
2307 arc_buf_try_copy_decompressed_data(arc_buf_t *buf)
2309 arc_buf_hdr_t *hdr = buf->b_hdr;
2310 boolean_t copied = B_FALSE;
2312 ASSERT(HDR_HAS_L1HDR(hdr));
2313 ASSERT3P(buf->b_data, !=, NULL);
2314 ASSERT(!ARC_BUF_COMPRESSED(buf));
2316 for (arc_buf_t *from = hdr->b_l1hdr.b_buf; from != NULL;
2317 from = from->b_next) {
2318 /* can't use our own data buffer */
2323 if (!ARC_BUF_COMPRESSED(from)) {
2324 bcopy(from->b_data, buf->b_data, arc_buf_size(buf));
2331 * There were no decompressed bufs, so there should not be a
2332 * checksum on the hdr either.
2334 EQUIV(!copied, hdr->b_l1hdr.b_freeze_cksum == NULL);
2340 * Given a buf that has a data buffer attached to it, this function will
2341 * efficiently fill the buf with data of the specified compression setting from
2342 * the hdr and update the hdr's b_freeze_cksum if necessary. If the buf and hdr
2343 * are already sharing a data buf, no copy is performed.
2345 * If the buf is marked as compressed but uncompressed data was requested, this
2346 * will allocate a new data buffer for the buf, remove that flag, and fill the
2347 * buf with uncompressed data. You can't request a compressed buf on a hdr with
2348 * uncompressed data, and (since we haven't added support for it yet) if you
2349 * want compressed data your buf must already be marked as compressed and have
2350 * the correct-sized data buffer.
2353 arc_buf_fill(arc_buf_t *buf, boolean_t compressed)
2355 arc_buf_hdr_t *hdr = buf->b_hdr;
2356 boolean_t hdr_compressed = (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF);
2357 dmu_object_byteswap_t bswap = hdr->b_l1hdr.b_byteswap;
2359 ASSERT3P(buf->b_data, !=, NULL);
2360 IMPLY(compressed, hdr_compressed);
2361 IMPLY(compressed, ARC_BUF_COMPRESSED(buf));
2363 if (hdr_compressed == compressed) {
2364 if (!arc_buf_is_shared(buf)) {
2365 abd_copy_to_buf(buf->b_data, hdr->b_l1hdr.b_pabd,
2369 ASSERT(hdr_compressed);
2370 ASSERT(!compressed);
2371 ASSERT3U(HDR_GET_LSIZE(hdr), !=, HDR_GET_PSIZE(hdr));
2374 * If the buf is sharing its data with the hdr, unlink it and
2375 * allocate a new data buffer for the buf.
2377 if (arc_buf_is_shared(buf)) {
2378 ASSERT(ARC_BUF_COMPRESSED(buf));
2380 /* We need to give the buf it's own b_data */
2381 buf->b_flags &= ~ARC_BUF_FLAG_SHARED;
2383 arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf);
2384 arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
2386 /* Previously overhead was 0; just add new overhead */
2387 ARCSTAT_INCR(arcstat_overhead_size, HDR_GET_LSIZE(hdr));
2388 } else if (ARC_BUF_COMPRESSED(buf)) {
2389 /* We need to reallocate the buf's b_data */
2390 arc_free_data_buf(hdr, buf->b_data, HDR_GET_PSIZE(hdr),
2393 arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf);
2395 /* We increased the size of b_data; update overhead */
2396 ARCSTAT_INCR(arcstat_overhead_size,
2397 HDR_GET_LSIZE(hdr) - HDR_GET_PSIZE(hdr));
2401 * Regardless of the buf's previous compression settings, it
2402 * should not be compressed at the end of this function.
2404 buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
2407 * Try copying the data from another buf which already has a
2408 * decompressed version. If that's not possible, it's time to
2409 * bite the bullet and decompress the data from the hdr.
2411 if (arc_buf_try_copy_decompressed_data(buf)) {
2412 /* Skip byteswapping and checksumming (already done) */
2413 ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, !=, NULL);
2416 int error = zio_decompress_data(HDR_GET_COMPRESS(hdr),
2417 hdr->b_l1hdr.b_pabd, buf->b_data,
2418 HDR_GET_PSIZE(hdr), HDR_GET_LSIZE(hdr));
2421 * Absent hardware errors or software bugs, this should
2422 * be impossible, but log it anyway so we can debug it.
2426 "hdr %p, compress %d, psize %d, lsize %d",
2427 hdr, HDR_GET_COMPRESS(hdr),
2428 HDR_GET_PSIZE(hdr), HDR_GET_LSIZE(hdr));
2429 return (SET_ERROR(EIO));
2434 /* Byteswap the buf's data if necessary */
2435 if (bswap != DMU_BSWAP_NUMFUNCS) {
2436 ASSERT(!HDR_SHARED_DATA(hdr));
2437 ASSERT3U(bswap, <, DMU_BSWAP_NUMFUNCS);
2438 dmu_ot_byteswap[bswap].ob_func(buf->b_data, HDR_GET_LSIZE(hdr));
2441 /* Compute the hdr's checksum if necessary */
2442 arc_cksum_compute(buf);
2448 arc_decompress(arc_buf_t *buf)
2450 return (arc_buf_fill(buf, B_FALSE));
2454 * Return the size of the block, b_pabd, that is stored in the arc_buf_hdr_t.
2457 arc_hdr_size(arc_buf_hdr_t *hdr)
2461 if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
2462 HDR_GET_PSIZE(hdr) > 0) {
2463 size = HDR_GET_PSIZE(hdr);
2465 ASSERT3U(HDR_GET_LSIZE(hdr), !=, 0);
2466 size = HDR_GET_LSIZE(hdr);
2472 * Increment the amount of evictable space in the arc_state_t's refcount.
2473 * We account for the space used by the hdr and the arc buf individually
2474 * so that we can add and remove them from the refcount individually.
2477 arc_evictable_space_increment(arc_buf_hdr_t *hdr, arc_state_t *state)
2479 arc_buf_contents_t type = arc_buf_type(hdr);
2481 ASSERT(HDR_HAS_L1HDR(hdr));
2483 if (GHOST_STATE(state)) {
2484 ASSERT0(hdr->b_l1hdr.b_bufcnt);
2485 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
2486 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
2487 (void) refcount_add_many(&state->arcs_esize[type],
2488 HDR_GET_LSIZE(hdr), hdr);
2492 ASSERT(!GHOST_STATE(state));
2493 if (hdr->b_l1hdr.b_pabd != NULL) {
2494 (void) refcount_add_many(&state->arcs_esize[type],
2495 arc_hdr_size(hdr), hdr);
2497 for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
2498 buf = buf->b_next) {
2499 if (arc_buf_is_shared(buf))
2501 (void) refcount_add_many(&state->arcs_esize[type],
2502 arc_buf_size(buf), buf);
2507 * Decrement the amount of evictable space in the arc_state_t's refcount.
2508 * We account for the space used by the hdr and the arc buf individually
2509 * so that we can add and remove them from the refcount individually.
2512 arc_evictable_space_decrement(arc_buf_hdr_t *hdr, arc_state_t *state)
2514 arc_buf_contents_t type = arc_buf_type(hdr);
2516 ASSERT(HDR_HAS_L1HDR(hdr));
2518 if (GHOST_STATE(state)) {
2519 ASSERT0(hdr->b_l1hdr.b_bufcnt);
2520 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
2521 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
2522 (void) refcount_remove_many(&state->arcs_esize[type],
2523 HDR_GET_LSIZE(hdr), hdr);
2527 ASSERT(!GHOST_STATE(state));
2528 if (hdr->b_l1hdr.b_pabd != NULL) {
2529 (void) refcount_remove_many(&state->arcs_esize[type],
2530 arc_hdr_size(hdr), hdr);
2532 for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
2533 buf = buf->b_next) {
2534 if (arc_buf_is_shared(buf))
2536 (void) refcount_remove_many(&state->arcs_esize[type],
2537 arc_buf_size(buf), buf);
2542 * Add a reference to this hdr indicating that someone is actively
2543 * referencing that memory. When the refcount transitions from 0 to 1,
2544 * we remove it from the respective arc_state_t list to indicate that
2545 * it is not evictable.
2548 add_reference(arc_buf_hdr_t *hdr, void *tag)
2550 ASSERT(HDR_HAS_L1HDR(hdr));
2551 if (!MUTEX_HELD(HDR_LOCK(hdr))) {
2552 ASSERT(hdr->b_l1hdr.b_state == arc_anon);
2553 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
2554 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
2557 arc_state_t *state = hdr->b_l1hdr.b_state;
2559 if ((refcount_add(&hdr->b_l1hdr.b_refcnt, tag) == 1) &&
2560 (state != arc_anon)) {
2561 /* We don't use the L2-only state list. */
2562 if (state != arc_l2c_only) {
2563 multilist_remove(state->arcs_list[arc_buf_type(hdr)],
2565 arc_evictable_space_decrement(hdr, state);
2567 /* remove the prefetch flag if we get a reference */
2568 arc_hdr_clear_flags(hdr, ARC_FLAG_PREFETCH);
2573 * Remove a reference from this hdr. When the reference transitions from
2574 * 1 to 0 and we're not anonymous, then we add this hdr to the arc_state_t's
2575 * list making it eligible for eviction.
2578 remove_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, void *tag)
2581 arc_state_t *state = hdr->b_l1hdr.b_state;
2583 ASSERT(HDR_HAS_L1HDR(hdr));
2584 ASSERT(state == arc_anon || MUTEX_HELD(hash_lock));
2585 ASSERT(!GHOST_STATE(state));
2588 * arc_l2c_only counts as a ghost state so we don't need to explicitly
2589 * check to prevent usage of the arc_l2c_only list.
2591 if (((cnt = refcount_remove(&hdr->b_l1hdr.b_refcnt, tag)) == 0) &&
2592 (state != arc_anon)) {
2593 multilist_insert(state->arcs_list[arc_buf_type(hdr)], hdr);
2594 ASSERT3U(hdr->b_l1hdr.b_bufcnt, >, 0);
2595 arc_evictable_space_increment(hdr, state);
2601 * Returns detailed information about a specific arc buffer. When the
2602 * state_index argument is set the function will calculate the arc header
2603 * list position for its arc state. Since this requires a linear traversal
2604 * callers are strongly encourage not to do this. However, it can be helpful
2605 * for targeted analysis so the functionality is provided.
2608 arc_buf_info(arc_buf_t *ab, arc_buf_info_t *abi, int state_index)
2610 arc_buf_hdr_t *hdr = ab->b_hdr;
2611 l1arc_buf_hdr_t *l1hdr = NULL;
2612 l2arc_buf_hdr_t *l2hdr = NULL;
2613 arc_state_t *state = NULL;
2615 memset(abi, 0, sizeof (arc_buf_info_t));
2620 abi->abi_flags = hdr->b_flags;
2622 if (HDR_HAS_L1HDR(hdr)) {
2623 l1hdr = &hdr->b_l1hdr;
2624 state = l1hdr->b_state;
2626 if (HDR_HAS_L2HDR(hdr))
2627 l2hdr = &hdr->b_l2hdr;
2630 abi->abi_bufcnt = l1hdr->b_bufcnt;
2631 abi->abi_access = l1hdr->b_arc_access;
2632 abi->abi_mru_hits = l1hdr->b_mru_hits;
2633 abi->abi_mru_ghost_hits = l1hdr->b_mru_ghost_hits;
2634 abi->abi_mfu_hits = l1hdr->b_mfu_hits;
2635 abi->abi_mfu_ghost_hits = l1hdr->b_mfu_ghost_hits;
2636 abi->abi_holds = refcount_count(&l1hdr->b_refcnt);
2640 abi->abi_l2arc_dattr = l2hdr->b_daddr;
2641 abi->abi_l2arc_hits = l2hdr->b_hits;
2644 abi->abi_state_type = state ? state->arcs_state : ARC_STATE_ANON;
2645 abi->abi_state_contents = arc_buf_type(hdr);
2646 abi->abi_size = arc_hdr_size(hdr);
2650 * Move the supplied buffer to the indicated state. The hash lock
2651 * for the buffer must be held by the caller.
2654 arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
2655 kmutex_t *hash_lock)
2657 arc_state_t *old_state;
2660 boolean_t update_old, update_new;
2661 arc_buf_contents_t buftype = arc_buf_type(hdr);
2664 * We almost always have an L1 hdr here, since we call arc_hdr_realloc()
2665 * in arc_read() when bringing a buffer out of the L2ARC. However, the
2666 * L1 hdr doesn't always exist when we change state to arc_anon before
2667 * destroying a header, in which case reallocating to add the L1 hdr is
2670 if (HDR_HAS_L1HDR(hdr)) {
2671 old_state = hdr->b_l1hdr.b_state;
2672 refcnt = refcount_count(&hdr->b_l1hdr.b_refcnt);
2673 bufcnt = hdr->b_l1hdr.b_bufcnt;
2674 update_old = (bufcnt > 0 || hdr->b_l1hdr.b_pabd != NULL);
2676 old_state = arc_l2c_only;
2679 update_old = B_FALSE;
2681 update_new = update_old;
2683 ASSERT(MUTEX_HELD(hash_lock));
2684 ASSERT3P(new_state, !=, old_state);
2685 ASSERT(!GHOST_STATE(new_state) || bufcnt == 0);
2686 ASSERT(old_state != arc_anon || bufcnt <= 1);
2689 * If this buffer is evictable, transfer it from the
2690 * old state list to the new state list.
2693 if (old_state != arc_anon && old_state != arc_l2c_only) {
2694 ASSERT(HDR_HAS_L1HDR(hdr));
2695 multilist_remove(old_state->arcs_list[buftype], hdr);
2697 if (GHOST_STATE(old_state)) {
2699 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
2700 update_old = B_TRUE;
2702 arc_evictable_space_decrement(hdr, old_state);
2704 if (new_state != arc_anon && new_state != arc_l2c_only) {
2707 * An L1 header always exists here, since if we're
2708 * moving to some L1-cached state (i.e. not l2c_only or
2709 * anonymous), we realloc the header to add an L1hdr
2712 ASSERT(HDR_HAS_L1HDR(hdr));
2713 multilist_insert(new_state->arcs_list[buftype], hdr);
2715 if (GHOST_STATE(new_state)) {
2717 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
2718 update_new = B_TRUE;
2720 arc_evictable_space_increment(hdr, new_state);
2724 ASSERT(!HDR_EMPTY(hdr));
2725 if (new_state == arc_anon && HDR_IN_HASH_TABLE(hdr))
2726 buf_hash_remove(hdr);
2728 /* adjust state sizes (ignore arc_l2c_only) */
2730 if (update_new && new_state != arc_l2c_only) {
2731 ASSERT(HDR_HAS_L1HDR(hdr));
2732 if (GHOST_STATE(new_state)) {
2736 * When moving a header to a ghost state, we first
2737 * remove all arc buffers. Thus, we'll have a
2738 * bufcnt of zero, and no arc buffer to use for
2739 * the reference. As a result, we use the arc
2740 * header pointer for the reference.
2742 (void) refcount_add_many(&new_state->arcs_size,
2743 HDR_GET_LSIZE(hdr), hdr);
2744 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
2746 uint32_t buffers = 0;
2749 * Each individual buffer holds a unique reference,
2750 * thus we must remove each of these references one
2753 for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
2754 buf = buf->b_next) {
2755 ASSERT3U(bufcnt, !=, 0);
2759 * When the arc_buf_t is sharing the data
2760 * block with the hdr, the owner of the
2761 * reference belongs to the hdr. Only
2762 * add to the refcount if the arc_buf_t is
2765 if (arc_buf_is_shared(buf))
2768 (void) refcount_add_many(&new_state->arcs_size,
2769 arc_buf_size(buf), buf);
2771 ASSERT3U(bufcnt, ==, buffers);
2773 if (hdr->b_l1hdr.b_pabd != NULL) {
2774 (void) refcount_add_many(&new_state->arcs_size,
2775 arc_hdr_size(hdr), hdr);
2777 ASSERT(GHOST_STATE(old_state));
2782 if (update_old && old_state != arc_l2c_only) {
2783 ASSERT(HDR_HAS_L1HDR(hdr));
2784 if (GHOST_STATE(old_state)) {
2786 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
2789 * When moving a header off of a ghost state,
2790 * the header will not contain any arc buffers.
2791 * We use the arc header pointer for the reference
2792 * which is exactly what we did when we put the
2793 * header on the ghost state.
2796 (void) refcount_remove_many(&old_state->arcs_size,
2797 HDR_GET_LSIZE(hdr), hdr);
2799 uint32_t buffers = 0;
2802 * Each individual buffer holds a unique reference,
2803 * thus we must remove each of these references one
2806 for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
2807 buf = buf->b_next) {
2808 ASSERT3U(bufcnt, !=, 0);
2812 * When the arc_buf_t is sharing the data
2813 * block with the hdr, the owner of the
2814 * reference belongs to the hdr. Only
2815 * add to the refcount if the arc_buf_t is
2818 if (arc_buf_is_shared(buf))
2821 (void) refcount_remove_many(
2822 &old_state->arcs_size, arc_buf_size(buf),
2825 ASSERT3U(bufcnt, ==, buffers);
2826 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
2827 (void) refcount_remove_many(
2828 &old_state->arcs_size, arc_hdr_size(hdr), hdr);
2832 if (HDR_HAS_L1HDR(hdr))
2833 hdr->b_l1hdr.b_state = new_state;
2836 * L2 headers should never be on the L2 state list since they don't
2837 * have L1 headers allocated.
2839 ASSERT(multilist_is_empty(arc_l2c_only->arcs_list[ARC_BUFC_DATA]) &&
2840 multilist_is_empty(arc_l2c_only->arcs_list[ARC_BUFC_METADATA]));
2844 arc_space_consume(uint64_t space, arc_space_type_t type)
2846 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
2849 case ARC_SPACE_DATA:
2850 aggsum_add(&astat_data_size, space);
2852 case ARC_SPACE_META:
2853 aggsum_add(&astat_metadata_size, space);
2855 case ARC_SPACE_BONUS:
2856 aggsum_add(&astat_bonus_size, space);
2858 case ARC_SPACE_DNODE:
2859 aggsum_add(&astat_dnode_size, space);
2861 case ARC_SPACE_DBUF:
2862 aggsum_add(&astat_dbuf_size, space);
2864 case ARC_SPACE_HDRS:
2865 aggsum_add(&astat_hdr_size, space);
2867 case ARC_SPACE_L2HDRS:
2868 aggsum_add(&astat_l2_hdr_size, space);
2872 if (type != ARC_SPACE_DATA)
2873 aggsum_add(&arc_meta_used, space);
2875 aggsum_add(&arc_size, space);
2879 arc_space_return(uint64_t space, arc_space_type_t type)
2881 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
2884 case ARC_SPACE_DATA:
2885 aggsum_add(&astat_data_size, -space);
2887 case ARC_SPACE_META:
2888 aggsum_add(&astat_metadata_size, -space);
2890 case ARC_SPACE_BONUS:
2891 aggsum_add(&astat_bonus_size, -space);
2893 case ARC_SPACE_DNODE:
2894 aggsum_add(&astat_dnode_size, -space);
2896 case ARC_SPACE_DBUF:
2897 aggsum_add(&astat_dbuf_size, -space);
2899 case ARC_SPACE_HDRS:
2900 aggsum_add(&astat_hdr_size, -space);
2902 case ARC_SPACE_L2HDRS:
2903 aggsum_add(&astat_l2_hdr_size, -space);
2907 if (type != ARC_SPACE_DATA) {
2908 ASSERT(aggsum_compare(&arc_meta_used, space) >= 0);
2910 * We use the upper bound here rather than the precise value
2911 * because the arc_meta_max value doesn't need to be
2912 * precise. It's only consumed by humans via arcstats.
2914 if (arc_meta_max < aggsum_upper_bound(&arc_meta_used))
2915 arc_meta_max = aggsum_upper_bound(&arc_meta_used);
2916 aggsum_add(&arc_meta_used, -space);
2919 ASSERT(aggsum_compare(&arc_size, space) >= 0);
2920 aggsum_add(&arc_size, -space);
2924 * Given a hdr and a buf, returns whether that buf can share its b_data buffer
2925 * with the hdr's b_pabd.
2928 arc_can_share(arc_buf_hdr_t *hdr, arc_buf_t *buf)
2931 * The criteria for sharing a hdr's data are:
2932 * 1. the hdr's compression matches the buf's compression
2933 * 2. the hdr doesn't need to be byteswapped
2934 * 3. the hdr isn't already being shared
2935 * 4. the buf is either compressed or it is the last buf in the hdr list
2937 * Criterion #4 maintains the invariant that shared uncompressed
2938 * bufs must be the final buf in the hdr's b_buf list. Reading this, you
2939 * might ask, "if a compressed buf is allocated first, won't that be the
2940 * last thing in the list?", but in that case it's impossible to create
2941 * a shared uncompressed buf anyway (because the hdr must be compressed
2942 * to have the compressed buf). You might also think that #3 is
2943 * sufficient to make this guarantee, however it's possible
2944 * (specifically in the rare L2ARC write race mentioned in
2945 * arc_buf_alloc_impl()) there will be an existing uncompressed buf that
2946 * is sharable, but wasn't at the time of its allocation. Rather than
2947 * allow a new shared uncompressed buf to be created and then shuffle
2948 * the list around to make it the last element, this simply disallows
2949 * sharing if the new buf isn't the first to be added.
2951 ASSERT3P(buf->b_hdr, ==, hdr);
2952 boolean_t hdr_compressed = HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF;
2953 boolean_t buf_compressed = ARC_BUF_COMPRESSED(buf) != 0;
2954 return (buf_compressed == hdr_compressed &&
2955 hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS &&
2956 !HDR_SHARED_DATA(hdr) &&
2957 (ARC_BUF_LAST(buf) || ARC_BUF_COMPRESSED(buf)));
2961 * Allocate a buf for this hdr. If you care about the data that's in the hdr,
2962 * or if you want a compressed buffer, pass those flags in. Returns 0 if the
2963 * copy was made successfully, or an error code otherwise.
2966 arc_buf_alloc_impl(arc_buf_hdr_t *hdr, void *tag, boolean_t compressed,
2967 boolean_t fill, arc_buf_t **ret)
2971 ASSERT(HDR_HAS_L1HDR(hdr));
2972 ASSERT3U(HDR_GET_LSIZE(hdr), >, 0);
2973 VERIFY(hdr->b_type == ARC_BUFC_DATA ||
2974 hdr->b_type == ARC_BUFC_METADATA);
2975 ASSERT3P(ret, !=, NULL);
2976 ASSERT3P(*ret, ==, NULL);
2978 buf = *ret = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
2981 buf->b_next = hdr->b_l1hdr.b_buf;
2984 add_reference(hdr, tag);
2987 * We're about to change the hdr's b_flags. We must either
2988 * hold the hash_lock or be undiscoverable.
2990 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
2993 * Only honor requests for compressed bufs if the hdr is actually
2996 if (compressed && HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF)
2997 buf->b_flags |= ARC_BUF_FLAG_COMPRESSED;
3000 * If the hdr's data can be shared then we share the data buffer and
3001 * set the appropriate bit in the hdr's b_flags to indicate the hdr is
3002 * sharing it's b_pabd with the arc_buf_t. Otherwise, we allocate a new
3003 * buffer to store the buf's data.
3005 * There are two additional restrictions here because we're sharing
3006 * hdr -> buf instead of the usual buf -> hdr. First, the hdr can't be
3007 * actively involved in an L2ARC write, because if this buf is used by
3008 * an arc_write() then the hdr's data buffer will be released when the
3009 * write completes, even though the L2ARC write might still be using it.
3010 * Second, the hdr's ABD must be linear so that the buf's user doesn't
3011 * need to be ABD-aware.
3013 boolean_t can_share = arc_can_share(hdr, buf) && !HDR_L2_WRITING(hdr) &&
3014 abd_is_linear(hdr->b_l1hdr.b_pabd);
3016 /* Set up b_data and sharing */
3018 buf->b_data = abd_to_buf(hdr->b_l1hdr.b_pabd);
3019 buf->b_flags |= ARC_BUF_FLAG_SHARED;
3020 arc_hdr_set_flags(hdr, ARC_FLAG_SHARED_DATA);
3023 arc_get_data_buf(hdr, arc_buf_size(buf), buf);
3024 ARCSTAT_INCR(arcstat_overhead_size, arc_buf_size(buf));
3026 VERIFY3P(buf->b_data, !=, NULL);
3028 hdr->b_l1hdr.b_buf = buf;
3029 hdr->b_l1hdr.b_bufcnt += 1;
3032 * If the user wants the data from the hdr, we need to either copy or
3033 * decompress the data.
3036 return (arc_buf_fill(buf, ARC_BUF_COMPRESSED(buf) != 0));
3042 static char *arc_onloan_tag = "onloan";
3045 arc_loaned_bytes_update(int64_t delta)
3047 atomic_add_64(&arc_loaned_bytes, delta);
3049 /* assert that it did not wrap around */
3050 ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0);
3054 * Loan out an anonymous arc buffer. Loaned buffers are not counted as in
3055 * flight data by arc_tempreserve_space() until they are "returned". Loaned
3056 * buffers must be returned to the arc before they can be used by the DMU or
3060 arc_loan_buf(spa_t *spa, boolean_t is_metadata, int size)
3062 arc_buf_t *buf = arc_alloc_buf(spa, arc_onloan_tag,
3063 is_metadata ? ARC_BUFC_METADATA : ARC_BUFC_DATA, size);
3065 arc_loaned_bytes_update(arc_buf_size(buf));
3071 arc_loan_compressed_buf(spa_t *spa, uint64_t psize, uint64_t lsize,
3072 enum zio_compress compression_type)
3074 arc_buf_t *buf = arc_alloc_compressed_buf(spa, arc_onloan_tag,
3075 psize, lsize, compression_type);
3077 arc_loaned_bytes_update(arc_buf_size(buf));
3084 * Return a loaned arc buffer to the arc.
3087 arc_return_buf(arc_buf_t *buf, void *tag)
3089 arc_buf_hdr_t *hdr = buf->b_hdr;
3091 ASSERT3P(buf->b_data, !=, NULL);
3092 ASSERT(HDR_HAS_L1HDR(hdr));
3093 (void) refcount_add(&hdr->b_l1hdr.b_refcnt, tag);
3094 (void) refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
3096 arc_loaned_bytes_update(-arc_buf_size(buf));
3099 /* Detach an arc_buf from a dbuf (tag) */
3101 arc_loan_inuse_buf(arc_buf_t *buf, void *tag)
3103 arc_buf_hdr_t *hdr = buf->b_hdr;
3105 ASSERT3P(buf->b_data, !=, NULL);
3106 ASSERT(HDR_HAS_L1HDR(hdr));
3107 (void) refcount_add(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
3108 (void) refcount_remove(&hdr->b_l1hdr.b_refcnt, tag);
3110 arc_loaned_bytes_update(arc_buf_size(buf));
3114 l2arc_free_abd_on_write(abd_t *abd, size_t size, arc_buf_contents_t type)
3116 l2arc_data_free_t *df = kmem_alloc(sizeof (*df), KM_SLEEP);
3119 df->l2df_size = size;
3120 df->l2df_type = type;
3121 mutex_enter(&l2arc_free_on_write_mtx);
3122 list_insert_head(l2arc_free_on_write, df);
3123 mutex_exit(&l2arc_free_on_write_mtx);
3127 arc_hdr_free_on_write(arc_buf_hdr_t *hdr)
3129 arc_state_t *state = hdr->b_l1hdr.b_state;
3130 arc_buf_contents_t type = arc_buf_type(hdr);
3131 uint64_t size = arc_hdr_size(hdr);
3133 /* protected by hash lock, if in the hash table */
3134 if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
3135 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
3136 ASSERT(state != arc_anon && state != arc_l2c_only);
3138 (void) refcount_remove_many(&state->arcs_esize[type],
3141 (void) refcount_remove_many(&state->arcs_size, size, hdr);
3142 if (type == ARC_BUFC_METADATA) {
3143 arc_space_return(size, ARC_SPACE_META);
3145 ASSERT(type == ARC_BUFC_DATA);
3146 arc_space_return(size, ARC_SPACE_DATA);
3149 l2arc_free_abd_on_write(hdr->b_l1hdr.b_pabd, size, type);
3153 * Share the arc_buf_t's data with the hdr. Whenever we are sharing the
3154 * data buffer, we transfer the refcount ownership to the hdr and update
3155 * the appropriate kstats.
3158 arc_share_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
3160 arc_state_t *state = hdr->b_l1hdr.b_state;
3162 ASSERT(arc_can_share(hdr, buf));
3163 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
3164 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
3167 * Start sharing the data buffer. We transfer the
3168 * refcount ownership to the hdr since it always owns
3169 * the refcount whenever an arc_buf_t is shared.
3171 refcount_transfer_ownership(&state->arcs_size, buf, hdr);
3172 hdr->b_l1hdr.b_pabd = abd_get_from_buf(buf->b_data, arc_buf_size(buf));
3173 abd_take_ownership_of_buf(hdr->b_l1hdr.b_pabd,
3174 HDR_ISTYPE_METADATA(hdr));
3175 arc_hdr_set_flags(hdr, ARC_FLAG_SHARED_DATA);
3176 buf->b_flags |= ARC_BUF_FLAG_SHARED;
3179 * Since we've transferred ownership to the hdr we need
3180 * to increment its compressed and uncompressed kstats and
3181 * decrement the overhead size.
3183 ARCSTAT_INCR(arcstat_compressed_size, arc_hdr_size(hdr));
3184 ARCSTAT_INCR(arcstat_uncompressed_size, HDR_GET_LSIZE(hdr));
3185 ARCSTAT_INCR(arcstat_overhead_size, -arc_buf_size(buf));
3189 arc_unshare_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
3191 arc_state_t *state = hdr->b_l1hdr.b_state;
3193 ASSERT(arc_buf_is_shared(buf));
3194 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
3195 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
3198 * We are no longer sharing this buffer so we need
3199 * to transfer its ownership to the rightful owner.
3201 refcount_transfer_ownership(&state->arcs_size, hdr, buf);
3202 arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
3203 abd_release_ownership_of_buf(hdr->b_l1hdr.b_pabd);
3204 abd_put(hdr->b_l1hdr.b_pabd);
3205 hdr->b_l1hdr.b_pabd = NULL;
3206 buf->b_flags &= ~ARC_BUF_FLAG_SHARED;
3209 * Since the buffer is no longer shared between
3210 * the arc buf and the hdr, count it as overhead.
3212 ARCSTAT_INCR(arcstat_compressed_size, -arc_hdr_size(hdr));
3213 ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr));
3214 ARCSTAT_INCR(arcstat_overhead_size, arc_buf_size(buf));
3218 * Remove an arc_buf_t from the hdr's buf list and return the last
3219 * arc_buf_t on the list. If no buffers remain on the list then return
3223 arc_buf_remove(arc_buf_hdr_t *hdr, arc_buf_t *buf)
3225 ASSERT(HDR_HAS_L1HDR(hdr));
3226 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
3228 arc_buf_t **bufp = &hdr->b_l1hdr.b_buf;
3229 arc_buf_t *lastbuf = NULL;
3232 * Remove the buf from the hdr list and locate the last
3233 * remaining buffer on the list.
3235 while (*bufp != NULL) {
3237 *bufp = buf->b_next;
3240 * If we've removed a buffer in the middle of
3241 * the list then update the lastbuf and update
3244 if (*bufp != NULL) {
3246 bufp = &(*bufp)->b_next;
3250 ASSERT3P(lastbuf, !=, buf);
3251 IMPLY(hdr->b_l1hdr.b_bufcnt > 0, lastbuf != NULL);
3252 IMPLY(hdr->b_l1hdr.b_bufcnt > 0, hdr->b_l1hdr.b_buf != NULL);
3253 IMPLY(lastbuf != NULL, ARC_BUF_LAST(lastbuf));
3259 * Free up buf->b_data and pull the arc_buf_t off of the the arc_buf_hdr_t's
3263 arc_buf_destroy_impl(arc_buf_t *buf)
3265 arc_buf_hdr_t *hdr = buf->b_hdr;
3268 * Free up the data associated with the buf but only if we're not
3269 * sharing this with the hdr. If we are sharing it with the hdr, the
3270 * hdr is responsible for doing the free.
3272 if (buf->b_data != NULL) {
3274 * We're about to change the hdr's b_flags. We must either
3275 * hold the hash_lock or be undiscoverable.
3277 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
3279 arc_cksum_verify(buf);
3281 arc_buf_unwatch(buf);
3284 if (arc_buf_is_shared(buf)) {
3285 arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
3287 uint64_t size = arc_buf_size(buf);
3288 arc_free_data_buf(hdr, buf->b_data, size, buf);
3289 ARCSTAT_INCR(arcstat_overhead_size, -size);
3293 ASSERT(hdr->b_l1hdr.b_bufcnt > 0);
3294 hdr->b_l1hdr.b_bufcnt -= 1;
3297 arc_buf_t *lastbuf = arc_buf_remove(hdr, buf);
3299 if (ARC_BUF_SHARED(buf) && !ARC_BUF_COMPRESSED(buf)) {
3301 * If the current arc_buf_t is sharing its data buffer with the
3302 * hdr, then reassign the hdr's b_pabd to share it with the new
3303 * buffer at the end of the list. The shared buffer is always
3304 * the last one on the hdr's buffer list.
3306 * There is an equivalent case for compressed bufs, but since
3307 * they aren't guaranteed to be the last buf in the list and
3308 * that is an exceedingly rare case, we just allow that space be
3309 * wasted temporarily.
3311 if (lastbuf != NULL) {
3312 /* Only one buf can be shared at once */
3313 VERIFY(!arc_buf_is_shared(lastbuf));
3314 /* hdr is uncompressed so can't have compressed buf */
3315 VERIFY(!ARC_BUF_COMPRESSED(lastbuf));
3317 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
3318 arc_hdr_free_pabd(hdr);
3321 * We must setup a new shared block between the
3322 * last buffer and the hdr. The data would have
3323 * been allocated by the arc buf so we need to transfer
3324 * ownership to the hdr since it's now being shared.
3326 arc_share_buf(hdr, lastbuf);
3328 } else if (HDR_SHARED_DATA(hdr)) {
3330 * Uncompressed shared buffers are always at the end
3331 * of the list. Compressed buffers don't have the
3332 * same requirements. This makes it hard to
3333 * simply assert that the lastbuf is shared so
3334 * we rely on the hdr's compression flags to determine
3335 * if we have a compressed, shared buffer.
3337 ASSERT3P(lastbuf, !=, NULL);
3338 ASSERT(arc_buf_is_shared(lastbuf) ||
3339 HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF);
3343 * Free the checksum if we're removing the last uncompressed buf from
3346 if (!arc_hdr_has_uncompressed_buf(hdr)) {
3347 arc_cksum_free(hdr);
3350 /* clean up the buf */
3352 kmem_cache_free(buf_cache, buf);
3356 arc_hdr_alloc_pabd(arc_buf_hdr_t *hdr)
3358 ASSERT3U(HDR_GET_LSIZE(hdr), >, 0);
3359 ASSERT(HDR_HAS_L1HDR(hdr));
3360 ASSERT(!HDR_SHARED_DATA(hdr));
3362 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
3363 hdr->b_l1hdr.b_pabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr);
3364 hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
3365 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
3367 ARCSTAT_INCR(arcstat_compressed_size, arc_hdr_size(hdr));
3368 ARCSTAT_INCR(arcstat_uncompressed_size, HDR_GET_LSIZE(hdr));
3372 arc_hdr_free_pabd(arc_buf_hdr_t *hdr)
3374 ASSERT(HDR_HAS_L1HDR(hdr));
3375 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
3378 * If the hdr is currently being written to the l2arc then
3379 * we defer freeing the data by adding it to the l2arc_free_on_write
3380 * list. The l2arc will free the data once it's finished
3381 * writing it to the l2arc device.
3383 if (HDR_L2_WRITING(hdr)) {
3384 arc_hdr_free_on_write(hdr);
3385 ARCSTAT_BUMP(arcstat_l2_free_on_write);
3387 arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd,
3388 arc_hdr_size(hdr), hdr);
3390 hdr->b_l1hdr.b_pabd = NULL;
3391 hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
3393 ARCSTAT_INCR(arcstat_compressed_size, -arc_hdr_size(hdr));
3394 ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr));
3397 static arc_buf_hdr_t *
3398 arc_hdr_alloc(uint64_t spa, int32_t psize, int32_t lsize,
3399 enum zio_compress compression_type, arc_buf_contents_t type)
3403 VERIFY(type == ARC_BUFC_DATA || type == ARC_BUFC_METADATA);
3405 hdr = kmem_cache_alloc(hdr_full_cache, KM_PUSHPAGE);
3406 ASSERT(HDR_EMPTY(hdr));
3407 ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
3408 ASSERT3P(hdr->b_l1hdr.b_thawed, ==, NULL);
3409 HDR_SET_PSIZE(hdr, psize);
3410 HDR_SET_LSIZE(hdr, lsize);
3414 arc_hdr_set_flags(hdr, arc_bufc_to_flags(type) | ARC_FLAG_HAS_L1HDR);
3415 arc_hdr_set_compress(hdr, compression_type);
3417 hdr->b_l1hdr.b_state = arc_anon;
3418 hdr->b_l1hdr.b_arc_access = 0;
3419 hdr->b_l1hdr.b_bufcnt = 0;
3420 hdr->b_l1hdr.b_buf = NULL;
3423 * Allocate the hdr's buffer. This will contain either
3424 * the compressed or uncompressed data depending on the block
3425 * it references and compressed arc enablement.
3427 arc_hdr_alloc_pabd(hdr);
3428 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
3434 * Transition between the two allocation states for the arc_buf_hdr struct.
3435 * The arc_buf_hdr struct can be allocated with (hdr_full_cache) or without
3436 * (hdr_l2only_cache) the fields necessary for the L1 cache - the smaller
3437 * version is used when a cache buffer is only in the L2ARC in order to reduce
3440 static arc_buf_hdr_t *
3441 arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old, kmem_cache_t *new)
3443 ASSERT(HDR_HAS_L2HDR(hdr));
3445 arc_buf_hdr_t *nhdr;
3446 l2arc_dev_t *dev = hdr->b_l2hdr.b_dev;
3448 ASSERT((old == hdr_full_cache && new == hdr_l2only_cache) ||
3449 (old == hdr_l2only_cache && new == hdr_full_cache));
3451 nhdr = kmem_cache_alloc(new, KM_PUSHPAGE);
3453 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
3454 buf_hash_remove(hdr);
3456 bcopy(hdr, nhdr, HDR_L2ONLY_SIZE);
3458 if (new == hdr_full_cache) {
3459 arc_hdr_set_flags(nhdr, ARC_FLAG_HAS_L1HDR);
3461 * arc_access and arc_change_state need to be aware that a
3462 * header has just come out of L2ARC, so we set its state to
3463 * l2c_only even though it's about to change.
3465 nhdr->b_l1hdr.b_state = arc_l2c_only;
3467 /* Verify previous threads set to NULL before freeing */
3468 ASSERT3P(nhdr->b_l1hdr.b_pabd, ==, NULL);
3470 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
3471 ASSERT0(hdr->b_l1hdr.b_bufcnt);
3472 ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
3475 * If we've reached here, We must have been called from
3476 * arc_evict_hdr(), as such we should have already been
3477 * removed from any ghost list we were previously on
3478 * (which protects us from racing with arc_evict_state),
3479 * thus no locking is needed during this check.
3481 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
3484 * A buffer must not be moved into the arc_l2c_only
3485 * state if it's not finished being written out to the
3486 * l2arc device. Otherwise, the b_l1hdr.b_pabd field
3487 * might try to be accessed, even though it was removed.
3489 VERIFY(!HDR_L2_WRITING(hdr));
3490 VERIFY3P(hdr->b_l1hdr.b_pabd, ==, NULL);
3493 if (hdr->b_l1hdr.b_thawed != NULL) {
3494 kmem_free(hdr->b_l1hdr.b_thawed, 1);
3495 hdr->b_l1hdr.b_thawed = NULL;
3499 arc_hdr_clear_flags(nhdr, ARC_FLAG_HAS_L1HDR);
3502 * The header has been reallocated so we need to re-insert it into any
3505 (void) buf_hash_insert(nhdr, NULL);
3507 ASSERT(list_link_active(&hdr->b_l2hdr.b_l2node));
3509 mutex_enter(&dev->l2ad_mtx);
3512 * We must place the realloc'ed header back into the list at
3513 * the same spot. Otherwise, if it's placed earlier in the list,
3514 * l2arc_write_buffers() could find it during the function's
3515 * write phase, and try to write it out to the l2arc.
3517 list_insert_after(&dev->l2ad_buflist, hdr, nhdr);
3518 list_remove(&dev->l2ad_buflist, hdr);
3520 mutex_exit(&dev->l2ad_mtx);
3523 * Since we're using the pointer address as the tag when
3524 * incrementing and decrementing the l2ad_alloc refcount, we
3525 * must remove the old pointer (that we're about to destroy) and
3526 * add the new pointer to the refcount. Otherwise we'd remove
3527 * the wrong pointer address when calling arc_hdr_destroy() later.
3530 (void) refcount_remove_many(&dev->l2ad_alloc, arc_hdr_size(hdr), hdr);
3531 (void) refcount_add_many(&dev->l2ad_alloc, arc_hdr_size(nhdr), nhdr);
3533 buf_discard_identity(hdr);
3534 kmem_cache_free(old, hdr);
3540 * Allocate a new arc_buf_hdr_t and arc_buf_t and return the buf to the caller.
3541 * The buf is returned thawed since we expect the consumer to modify it.
3544 arc_alloc_buf(spa_t *spa, void *tag, arc_buf_contents_t type, int32_t size)
3546 arc_buf_hdr_t *hdr = arc_hdr_alloc(spa_load_guid(spa), size, size,
3547 ZIO_COMPRESS_OFF, type);
3548 ASSERT(!MUTEX_HELD(HDR_LOCK(hdr)));
3550 arc_buf_t *buf = NULL;
3551 VERIFY0(arc_buf_alloc_impl(hdr, tag, B_FALSE, B_FALSE, &buf));
3558 * Allocate a compressed buf in the same manner as arc_alloc_buf. Don't use this
3559 * for bufs containing metadata.
3562 arc_alloc_compressed_buf(spa_t *spa, void *tag, uint64_t psize, uint64_t lsize,
3563 enum zio_compress compression_type)
3565 ASSERT3U(lsize, >, 0);
3566 ASSERT3U(lsize, >=, psize);
3567 ASSERT(compression_type > ZIO_COMPRESS_OFF);
3568 ASSERT(compression_type < ZIO_COMPRESS_FUNCTIONS);
3570 arc_buf_hdr_t *hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize,
3571 compression_type, ARC_BUFC_DATA);
3572 ASSERT(!MUTEX_HELD(HDR_LOCK(hdr)));
3574 arc_buf_t *buf = NULL;
3575 VERIFY0(arc_buf_alloc_impl(hdr, tag, B_TRUE, B_FALSE, &buf));
3577 ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
3579 if (!arc_buf_is_shared(buf)) {
3581 * To ensure that the hdr has the correct data in it if we call
3582 * arc_decompress() on this buf before it's been written to
3583 * disk, it's easiest if we just set up sharing between the
3586 ASSERT(!abd_is_linear(hdr->b_l1hdr.b_pabd));
3587 arc_hdr_free_pabd(hdr);
3588 arc_share_buf(hdr, buf);
3595 arc_hdr_l2hdr_destroy(arc_buf_hdr_t *hdr)
3597 l2arc_buf_hdr_t *l2hdr = &hdr->b_l2hdr;
3598 l2arc_dev_t *dev = l2hdr->b_dev;
3599 uint64_t psize = arc_hdr_size(hdr);
3601 ASSERT(MUTEX_HELD(&dev->l2ad_mtx));
3602 ASSERT(HDR_HAS_L2HDR(hdr));
3604 list_remove(&dev->l2ad_buflist, hdr);
3606 ARCSTAT_INCR(arcstat_l2_psize, -psize);
3607 ARCSTAT_INCR(arcstat_l2_lsize, -HDR_GET_LSIZE(hdr));
3609 vdev_space_update(dev->l2ad_vdev, -psize, 0, 0);
3611 (void) refcount_remove_many(&dev->l2ad_alloc, psize, hdr);
3612 arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR);
3616 arc_hdr_destroy(arc_buf_hdr_t *hdr)
3618 if (HDR_HAS_L1HDR(hdr)) {
3619 ASSERT(hdr->b_l1hdr.b_buf == NULL ||
3620 hdr->b_l1hdr.b_bufcnt > 0);
3621 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
3622 ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
3624 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
3625 ASSERT(!HDR_IN_HASH_TABLE(hdr));
3627 if (!HDR_EMPTY(hdr))
3628 buf_discard_identity(hdr);
3630 if (HDR_HAS_L2HDR(hdr)) {
3631 l2arc_dev_t *dev = hdr->b_l2hdr.b_dev;
3632 boolean_t buflist_held = MUTEX_HELD(&dev->l2ad_mtx);
3635 mutex_enter(&dev->l2ad_mtx);
3638 * Even though we checked this conditional above, we
3639 * need to check this again now that we have the
3640 * l2ad_mtx. This is because we could be racing with
3641 * another thread calling l2arc_evict() which might have
3642 * destroyed this header's L2 portion as we were waiting
3643 * to acquire the l2ad_mtx. If that happens, we don't
3644 * want to re-destroy the header's L2 portion.
3646 if (HDR_HAS_L2HDR(hdr)) {
3648 arc_hdr_l2hdr_destroy(hdr);
3652 mutex_exit(&dev->l2ad_mtx);
3655 if (HDR_HAS_L1HDR(hdr)) {
3656 arc_cksum_free(hdr);
3658 while (hdr->b_l1hdr.b_buf != NULL)
3659 arc_buf_destroy_impl(hdr->b_l1hdr.b_buf);
3662 if (hdr->b_l1hdr.b_thawed != NULL) {
3663 kmem_free(hdr->b_l1hdr.b_thawed, 1);
3664 hdr->b_l1hdr.b_thawed = NULL;
3668 if (hdr->b_l1hdr.b_pabd != NULL) {
3669 arc_hdr_free_pabd(hdr);
3673 ASSERT3P(hdr->b_hash_next, ==, NULL);
3674 if (HDR_HAS_L1HDR(hdr)) {
3675 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
3676 ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
3677 kmem_cache_free(hdr_full_cache, hdr);
3679 kmem_cache_free(hdr_l2only_cache, hdr);
3684 arc_buf_destroy(arc_buf_t *buf, void* tag)
3686 arc_buf_hdr_t *hdr = buf->b_hdr;
3687 kmutex_t *hash_lock = HDR_LOCK(hdr);
3689 if (hdr->b_l1hdr.b_state == arc_anon) {
3690 ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1);
3691 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
3692 VERIFY0(remove_reference(hdr, NULL, tag));
3693 arc_hdr_destroy(hdr);
3697 mutex_enter(hash_lock);
3698 ASSERT3P(hdr, ==, buf->b_hdr);
3699 ASSERT(hdr->b_l1hdr.b_bufcnt > 0);
3700 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
3701 ASSERT3P(hdr->b_l1hdr.b_state, !=, arc_anon);
3702 ASSERT3P(buf->b_data, !=, NULL);
3704 (void) remove_reference(hdr, hash_lock, tag);
3705 arc_buf_destroy_impl(buf);
3706 mutex_exit(hash_lock);
3710 * Evict the arc_buf_hdr that is provided as a parameter. The resultant
3711 * state of the header is dependent on its state prior to entering this
3712 * function. The following transitions are possible:
3714 * - arc_mru -> arc_mru_ghost
3715 * - arc_mfu -> arc_mfu_ghost
3716 * - arc_mru_ghost -> arc_l2c_only
3717 * - arc_mru_ghost -> deleted
3718 * - arc_mfu_ghost -> arc_l2c_only
3719 * - arc_mfu_ghost -> deleted
3722 arc_evict_hdr(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
3724 arc_state_t *evicted_state, *state;
3725 int64_t bytes_evicted = 0;
3726 int min_lifetime = HDR_PRESCIENT_PREFETCH(hdr) ?
3727 zfs_arc_min_prescient_prefetch_ms : zfs_arc_min_prefetch_ms;
3729 ASSERT(MUTEX_HELD(hash_lock));
3730 ASSERT(HDR_HAS_L1HDR(hdr));
3732 state = hdr->b_l1hdr.b_state;
3733 if (GHOST_STATE(state)) {
3734 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
3735 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
3738 * l2arc_write_buffers() relies on a header's L1 portion
3739 * (i.e. its b_pabd field) during it's write phase.
3740 * Thus, we cannot push a header onto the arc_l2c_only
3741 * state (removing it's L1 piece) until the header is
3742 * done being written to the l2arc.
3744 if (HDR_HAS_L2HDR(hdr) && HDR_L2_WRITING(hdr)) {
3745 ARCSTAT_BUMP(arcstat_evict_l2_skip);
3746 return (bytes_evicted);
3749 ARCSTAT_BUMP(arcstat_deleted);
3750 bytes_evicted += HDR_GET_LSIZE(hdr);
3752 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, hdr);
3754 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
3755 if (HDR_HAS_L2HDR(hdr)) {
3757 * This buffer is cached on the 2nd Level ARC;
3758 * don't destroy the header.
3760 arc_change_state(arc_l2c_only, hdr, hash_lock);
3762 * dropping from L1+L2 cached to L2-only,
3763 * realloc to remove the L1 header.
3765 hdr = arc_hdr_realloc(hdr, hdr_full_cache,
3768 arc_change_state(arc_anon, hdr, hash_lock);
3769 arc_hdr_destroy(hdr);
3771 return (bytes_evicted);
3774 ASSERT(state == arc_mru || state == arc_mfu);
3775 evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
3777 /* prefetch buffers have a minimum lifespan */
3778 if (HDR_IO_IN_PROGRESS(hdr) ||
3779 ((hdr->b_flags & (ARC_FLAG_PREFETCH | ARC_FLAG_INDIRECT)) &&
3780 ddi_get_lbolt() - hdr->b_l1hdr.b_arc_access < min_lifetime * hz)) {
3781 ARCSTAT_BUMP(arcstat_evict_skip);
3782 return (bytes_evicted);
3785 ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt));
3786 while (hdr->b_l1hdr.b_buf) {
3787 arc_buf_t *buf = hdr->b_l1hdr.b_buf;
3788 if (!mutex_tryenter(&buf->b_evict_lock)) {
3789 ARCSTAT_BUMP(arcstat_mutex_miss);
3792 if (buf->b_data != NULL)
3793 bytes_evicted += HDR_GET_LSIZE(hdr);
3794 mutex_exit(&buf->b_evict_lock);
3795 arc_buf_destroy_impl(buf);
3798 if (HDR_HAS_L2HDR(hdr)) {
3799 ARCSTAT_INCR(arcstat_evict_l2_cached, HDR_GET_LSIZE(hdr));
3801 if (l2arc_write_eligible(hdr->b_spa, hdr)) {
3802 ARCSTAT_INCR(arcstat_evict_l2_eligible,
3803 HDR_GET_LSIZE(hdr));
3805 ARCSTAT_INCR(arcstat_evict_l2_ineligible,
3806 HDR_GET_LSIZE(hdr));
3810 if (hdr->b_l1hdr.b_bufcnt == 0) {
3811 arc_cksum_free(hdr);
3813 bytes_evicted += arc_hdr_size(hdr);
3816 * If this hdr is being evicted and has a compressed
3817 * buffer then we discard it here before we change states.
3818 * This ensures that the accounting is updated correctly
3819 * in arc_free_data_impl().
3821 arc_hdr_free_pabd(hdr);
3823 arc_change_state(evicted_state, hdr, hash_lock);
3824 ASSERT(HDR_IN_HASH_TABLE(hdr));
3825 arc_hdr_set_flags(hdr, ARC_FLAG_IN_HASH_TABLE);
3826 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, hdr);
3829 return (bytes_evicted);
3833 arc_evict_state_impl(multilist_t *ml, int idx, arc_buf_hdr_t *marker,
3834 uint64_t spa, int64_t bytes)
3836 multilist_sublist_t *mls;
3837 uint64_t bytes_evicted = 0;
3839 kmutex_t *hash_lock;
3840 int evict_count = 0;
3842 ASSERT3P(marker, !=, NULL);
3843 IMPLY(bytes < 0, bytes == ARC_EVICT_ALL);
3845 mls = multilist_sublist_lock(ml, idx);
3847 for (hdr = multilist_sublist_prev(mls, marker); hdr != NULL;
3848 hdr = multilist_sublist_prev(mls, marker)) {
3849 if ((bytes != ARC_EVICT_ALL && bytes_evicted >= bytes) ||
3850 (evict_count >= zfs_arc_evict_batch_limit))
3854 * To keep our iteration location, move the marker
3855 * forward. Since we're not holding hdr's hash lock, we
3856 * must be very careful and not remove 'hdr' from the
3857 * sublist. Otherwise, other consumers might mistake the
3858 * 'hdr' as not being on a sublist when they call the
3859 * multilist_link_active() function (they all rely on
3860 * the hash lock protecting concurrent insertions and
3861 * removals). multilist_sublist_move_forward() was
3862 * specifically implemented to ensure this is the case
3863 * (only 'marker' will be removed and re-inserted).
3865 multilist_sublist_move_forward(mls, marker);
3868 * The only case where the b_spa field should ever be
3869 * zero, is the marker headers inserted by
3870 * arc_evict_state(). It's possible for multiple threads
3871 * to be calling arc_evict_state() concurrently (e.g.
3872 * dsl_pool_close() and zio_inject_fault()), so we must
3873 * skip any markers we see from these other threads.
3875 if (hdr->b_spa == 0)
3878 /* we're only interested in evicting buffers of a certain spa */
3879 if (spa != 0 && hdr->b_spa != spa) {
3880 ARCSTAT_BUMP(arcstat_evict_skip);
3884 hash_lock = HDR_LOCK(hdr);
3887 * We aren't calling this function from any code path
3888 * that would already be holding a hash lock, so we're
3889 * asserting on this assumption to be defensive in case
3890 * this ever changes. Without this check, it would be
3891 * possible to incorrectly increment arcstat_mutex_miss
3892 * below (e.g. if the code changed such that we called
3893 * this function with a hash lock held).
3895 ASSERT(!MUTEX_HELD(hash_lock));
3897 if (mutex_tryenter(hash_lock)) {
3898 uint64_t evicted = arc_evict_hdr(hdr, hash_lock);
3899 mutex_exit(hash_lock);
3901 bytes_evicted += evicted;
3904 * If evicted is zero, arc_evict_hdr() must have
3905 * decided to skip this header, don't increment
3906 * evict_count in this case.
3912 * If arc_size isn't overflowing, signal any
3913 * threads that might happen to be waiting.
3915 * For each header evicted, we wake up a single
3916 * thread. If we used cv_broadcast, we could
3917 * wake up "too many" threads causing arc_size
3918 * to significantly overflow arc_c; since
3919 * arc_get_data_impl() doesn't check for overflow
3920 * when it's woken up (it doesn't because it's
3921 * possible for the ARC to be overflowing while
3922 * full of un-evictable buffers, and the
3923 * function should proceed in this case).
3925 * If threads are left sleeping, due to not
3926 * using cv_broadcast here, they will be woken
3927 * up via cv_broadcast in arc_adjust_cb() just
3928 * before arc_adjust_zthr sleeps.
3930 mutex_enter(&arc_adjust_lock);
3931 if (!arc_is_overflowing())
3932 cv_signal(&arc_adjust_waiters_cv);
3933 mutex_exit(&arc_adjust_lock);
3935 ARCSTAT_BUMP(arcstat_mutex_miss);
3939 multilist_sublist_unlock(mls);
3941 return (bytes_evicted);
3945 * Evict buffers from the given arc state, until we've removed the
3946 * specified number of bytes. Move the removed buffers to the
3947 * appropriate evict state.
3949 * This function makes a "best effort". It skips over any buffers
3950 * it can't get a hash_lock on, and so, may not catch all candidates.
3951 * It may also return without evicting as much space as requested.
3953 * If bytes is specified using the special value ARC_EVICT_ALL, this
3954 * will evict all available (i.e. unlocked and evictable) buffers from
3955 * the given arc state; which is used by arc_flush().
3958 arc_evict_state(arc_state_t *state, uint64_t spa, int64_t bytes,
3959 arc_buf_contents_t type)
3961 uint64_t total_evicted = 0;
3962 multilist_t *ml = state->arcs_list[type];
3964 arc_buf_hdr_t **markers;
3966 IMPLY(bytes < 0, bytes == ARC_EVICT_ALL);
3968 num_sublists = multilist_get_num_sublists(ml);
3971 * If we've tried to evict from each sublist, made some
3972 * progress, but still have not hit the target number of bytes
3973 * to evict, we want to keep trying. The markers allow us to
3974 * pick up where we left off for each individual sublist, rather
3975 * than starting from the tail each time.
3977 markers = kmem_zalloc(sizeof (*markers) * num_sublists, KM_SLEEP);
3978 for (int i = 0; i < num_sublists; i++) {
3979 markers[i] = kmem_cache_alloc(hdr_full_cache, KM_SLEEP);
3982 * A b_spa of 0 is used to indicate that this header is
3983 * a marker. This fact is used in arc_adjust_type() and
3984 * arc_evict_state_impl().
3986 markers[i]->b_spa = 0;
3988 multilist_sublist_t *mls = multilist_sublist_lock(ml, i);
3989 multilist_sublist_insert_tail(mls, markers[i]);
3990 multilist_sublist_unlock(mls);
3994 * While we haven't hit our target number of bytes to evict, or
3995 * we're evicting all available buffers.
3997 while (total_evicted < bytes || bytes == ARC_EVICT_ALL) {
3998 int sublist_idx = multilist_get_random_index(ml);
3999 uint64_t scan_evicted = 0;
4002 * Try to reduce pinned dnodes with a floor of arc_dnode_limit.
4003 * Request that 10% of the LRUs be scanned by the superblock
4006 if (type == ARC_BUFC_DATA && aggsum_compare(&astat_dnode_size,
4007 arc_dnode_limit) > 0) {
4008 arc_prune_async((aggsum_upper_bound(&astat_dnode_size) -
4009 arc_dnode_limit) / sizeof (dnode_t) /
4010 zfs_arc_dnode_reduce_percent);
4014 * Start eviction using a randomly selected sublist,
4015 * this is to try and evenly balance eviction across all
4016 * sublists. Always starting at the same sublist
4017 * (e.g. index 0) would cause evictions to favor certain
4018 * sublists over others.
4020 for (int i = 0; i < num_sublists; i++) {
4021 uint64_t bytes_remaining;
4022 uint64_t bytes_evicted;
4024 if (bytes == ARC_EVICT_ALL)
4025 bytes_remaining = ARC_EVICT_ALL;
4026 else if (total_evicted < bytes)
4027 bytes_remaining = bytes - total_evicted;
4031 bytes_evicted = arc_evict_state_impl(ml, sublist_idx,
4032 markers[sublist_idx], spa, bytes_remaining);
4034 scan_evicted += bytes_evicted;
4035 total_evicted += bytes_evicted;
4037 /* we've reached the end, wrap to the beginning */
4038 if (++sublist_idx >= num_sublists)
4043 * If we didn't evict anything during this scan, we have
4044 * no reason to believe we'll evict more during another
4045 * scan, so break the loop.
4047 if (scan_evicted == 0) {
4048 /* This isn't possible, let's make that obvious */
4049 ASSERT3S(bytes, !=, 0);
4052 * When bytes is ARC_EVICT_ALL, the only way to
4053 * break the loop is when scan_evicted is zero.
4054 * In that case, we actually have evicted enough,
4055 * so we don't want to increment the kstat.
4057 if (bytes != ARC_EVICT_ALL) {
4058 ASSERT3S(total_evicted, <, bytes);
4059 ARCSTAT_BUMP(arcstat_evict_not_enough);
4066 for (int i = 0; i < num_sublists; i++) {
4067 multilist_sublist_t *mls = multilist_sublist_lock(ml, i);
4068 multilist_sublist_remove(mls, markers[i]);
4069 multilist_sublist_unlock(mls);
4071 kmem_cache_free(hdr_full_cache, markers[i]);
4073 kmem_free(markers, sizeof (*markers) * num_sublists);
4075 return (total_evicted);
4079 * Flush all "evictable" data of the given type from the arc state
4080 * specified. This will not evict any "active" buffers (i.e. referenced).
4082 * When 'retry' is set to B_FALSE, the function will make a single pass
4083 * over the state and evict any buffers that it can. Since it doesn't
4084 * continually retry the eviction, it might end up leaving some buffers
4085 * in the ARC due to lock misses.
4087 * When 'retry' is set to B_TRUE, the function will continually retry the
4088 * eviction until *all* evictable buffers have been removed from the
4089 * state. As a result, if concurrent insertions into the state are
4090 * allowed (e.g. if the ARC isn't shutting down), this function might
4091 * wind up in an infinite loop, continually trying to evict buffers.
4094 arc_flush_state(arc_state_t *state, uint64_t spa, arc_buf_contents_t type,
4097 uint64_t evicted = 0;
4099 while (refcount_count(&state->arcs_esize[type]) != 0) {
4100 evicted += arc_evict_state(state, spa, ARC_EVICT_ALL, type);
4110 * Helper function for arc_prune_async() it is responsible for safely
4111 * handling the execution of a registered arc_prune_func_t.
4114 arc_prune_task(void *ptr)
4116 arc_prune_t *ap = (arc_prune_t *)ptr;
4117 arc_prune_func_t *func = ap->p_pfunc;
4120 func(ap->p_adjust, ap->p_private);
4122 refcount_remove(&ap->p_refcnt, func);
4126 * Notify registered consumers they must drop holds on a portion of the ARC
4127 * buffered they reference. This provides a mechanism to ensure the ARC can
4128 * honor the arc_meta_limit and reclaim otherwise pinned ARC buffers. This
4129 * is analogous to dnlc_reduce_cache() but more generic.
4131 * This operation is performed asynchronously so it may be safely called
4132 * in the context of the arc_reclaim_thread(). A reference is taken here
4133 * for each registered arc_prune_t and the arc_prune_task() is responsible
4134 * for releasing it once the registered arc_prune_func_t has completed.
4137 arc_prune_async(int64_t adjust)
4141 mutex_enter(&arc_prune_mtx);
4142 for (ap = list_head(&arc_prune_list); ap != NULL;
4143 ap = list_next(&arc_prune_list, ap)) {
4145 if (refcount_count(&ap->p_refcnt) >= 2)
4148 refcount_add(&ap->p_refcnt, ap->p_pfunc);
4149 ap->p_adjust = adjust;
4150 if (taskq_dispatch(arc_prune_taskq, arc_prune_task,
4151 ap, TQ_SLEEP) == TASKQID_INVALID) {
4152 refcount_remove(&ap->p_refcnt, ap->p_pfunc);
4155 ARCSTAT_BUMP(arcstat_prune);
4157 mutex_exit(&arc_prune_mtx);
4161 * Evict the specified number of bytes from the state specified,
4162 * restricting eviction to the spa and type given. This function
4163 * prevents us from trying to evict more from a state's list than
4164 * is "evictable", and to skip evicting altogether when passed a
4165 * negative value for "bytes". In contrast, arc_evict_state() will
4166 * evict everything it can, when passed a negative value for "bytes".
4169 arc_adjust_impl(arc_state_t *state, uint64_t spa, int64_t bytes,
4170 arc_buf_contents_t type)
4174 if (bytes > 0 && refcount_count(&state->arcs_esize[type]) > 0) {
4175 delta = MIN(refcount_count(&state->arcs_esize[type]), bytes);
4176 return (arc_evict_state(state, spa, delta, type));
4183 * The goal of this function is to evict enough meta data buffers from the
4184 * ARC in order to enforce the arc_meta_limit. Achieving this is slightly
4185 * more complicated than it appears because it is common for data buffers
4186 * to have holds on meta data buffers. In addition, dnode meta data buffers
4187 * will be held by the dnodes in the block preventing them from being freed.
4188 * This means we can't simply traverse the ARC and expect to always find
4189 * enough unheld meta data buffer to release.
4191 * Therefore, this function has been updated to make alternating passes
4192 * over the ARC releasing data buffers and then newly unheld meta data
4193 * buffers. This ensures forward progress is maintained and meta_used
4194 * will decrease. Normally this is sufficient, but if required the ARC
4195 * will call the registered prune callbacks causing dentry and inodes to
4196 * be dropped from the VFS cache. This will make dnode meta data buffers
4197 * available for reclaim.
4200 arc_adjust_meta_balanced(uint64_t meta_used)
4202 int64_t delta, prune = 0, adjustmnt;
4203 uint64_t total_evicted = 0;
4204 arc_buf_contents_t type = ARC_BUFC_DATA;
4205 int restarts = MAX(zfs_arc_meta_adjust_restarts, 0);
4209 * This slightly differs than the way we evict from the mru in
4210 * arc_adjust because we don't have a "target" value (i.e. no
4211 * "meta" arc_p). As a result, I think we can completely
4212 * cannibalize the metadata in the MRU before we evict the
4213 * metadata from the MFU. I think we probably need to implement a
4214 * "metadata arc_p" value to do this properly.
4216 adjustmnt = meta_used - arc_meta_limit;
4218 if (adjustmnt > 0 && refcount_count(&arc_mru->arcs_esize[type]) > 0) {
4219 delta = MIN(refcount_count(&arc_mru->arcs_esize[type]),
4221 total_evicted += arc_adjust_impl(arc_mru, 0, delta, type);
4226 * We can't afford to recalculate adjustmnt here. If we do,
4227 * new metadata buffers can sneak into the MRU or ANON lists,
4228 * thus penalize the MFU metadata. Although the fudge factor is
4229 * small, it has been empirically shown to be significant for
4230 * certain workloads (e.g. creating many empty directories). As
4231 * such, we use the original calculation for adjustmnt, and
4232 * simply decrement the amount of data evicted from the MRU.
4235 if (adjustmnt > 0 && refcount_count(&arc_mfu->arcs_esize[type]) > 0) {
4236 delta = MIN(refcount_count(&arc_mfu->arcs_esize[type]),
4238 total_evicted += arc_adjust_impl(arc_mfu, 0, delta, type);
4241 adjustmnt = meta_used - arc_meta_limit;
4243 if (adjustmnt > 0 &&
4244 refcount_count(&arc_mru_ghost->arcs_esize[type]) > 0) {
4245 delta = MIN(adjustmnt,
4246 refcount_count(&arc_mru_ghost->arcs_esize[type]));
4247 total_evicted += arc_adjust_impl(arc_mru_ghost, 0, delta, type);
4251 if (adjustmnt > 0 &&
4252 refcount_count(&arc_mfu_ghost->arcs_esize[type]) > 0) {
4253 delta = MIN(adjustmnt,
4254 refcount_count(&arc_mfu_ghost->arcs_esize[type]));
4255 total_evicted += arc_adjust_impl(arc_mfu_ghost, 0, delta, type);
4259 * If after attempting to make the requested adjustment to the ARC
4260 * the meta limit is still being exceeded then request that the
4261 * higher layers drop some cached objects which have holds on ARC
4262 * meta buffers. Requests to the upper layers will be made with
4263 * increasingly large scan sizes until the ARC is below the limit.
4265 if (meta_used > arc_meta_limit) {
4266 if (type == ARC_BUFC_DATA) {
4267 type = ARC_BUFC_METADATA;
4269 type = ARC_BUFC_DATA;
4271 if (zfs_arc_meta_prune) {
4272 prune += zfs_arc_meta_prune;
4273 arc_prune_async(prune);
4282 return (total_evicted);
4286 * Evict metadata buffers from the cache, such that arc_meta_used is
4287 * capped by the arc_meta_limit tunable.
4290 arc_adjust_meta_only(uint64_t meta_used)
4292 uint64_t total_evicted = 0;
4296 * If we're over the meta limit, we want to evict enough
4297 * metadata to get back under the meta limit. We don't want to
4298 * evict so much that we drop the MRU below arc_p, though. If
4299 * we're over the meta limit more than we're over arc_p, we
4300 * evict some from the MRU here, and some from the MFU below.
4302 target = MIN((int64_t)(meta_used - arc_meta_limit),
4303 (int64_t)(refcount_count(&arc_anon->arcs_size) +
4304 refcount_count(&arc_mru->arcs_size) - arc_p));
4306 total_evicted += arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA);
4309 * Similar to the above, we want to evict enough bytes to get us
4310 * below the meta limit, but not so much as to drop us below the
4311 * space allotted to the MFU (which is defined as arc_c - arc_p).
4313 target = MIN((int64_t)(meta_used - arc_meta_limit),
4314 (int64_t)(refcount_count(&arc_mfu->arcs_size) -
4317 total_evicted += arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA);
4319 return (total_evicted);
4323 arc_adjust_meta(uint64_t meta_used)
4325 if (zfs_arc_meta_strategy == ARC_STRATEGY_META_ONLY)
4326 return (arc_adjust_meta_only(meta_used));
4328 return (arc_adjust_meta_balanced(meta_used));
4332 * Return the type of the oldest buffer in the given arc state
4334 * This function will select a random sublist of type ARC_BUFC_DATA and
4335 * a random sublist of type ARC_BUFC_METADATA. The tail of each sublist
4336 * is compared, and the type which contains the "older" buffer will be
4339 static arc_buf_contents_t
4340 arc_adjust_type(arc_state_t *state)
4342 multilist_t *data_ml = state->arcs_list[ARC_BUFC_DATA];
4343 multilist_t *meta_ml = state->arcs_list[ARC_BUFC_METADATA];
4344 int data_idx = multilist_get_random_index(data_ml);
4345 int meta_idx = multilist_get_random_index(meta_ml);
4346 multilist_sublist_t *data_mls;
4347 multilist_sublist_t *meta_mls;
4348 arc_buf_contents_t type;
4349 arc_buf_hdr_t *data_hdr;
4350 arc_buf_hdr_t *meta_hdr;
4353 * We keep the sublist lock until we're finished, to prevent
4354 * the headers from being destroyed via arc_evict_state().
4356 data_mls = multilist_sublist_lock(data_ml, data_idx);
4357 meta_mls = multilist_sublist_lock(meta_ml, meta_idx);
4360 * These two loops are to ensure we skip any markers that
4361 * might be at the tail of the lists due to arc_evict_state().
4364 for (data_hdr = multilist_sublist_tail(data_mls); data_hdr != NULL;
4365 data_hdr = multilist_sublist_prev(data_mls, data_hdr)) {
4366 if (data_hdr->b_spa != 0)
4370 for (meta_hdr = multilist_sublist_tail(meta_mls); meta_hdr != NULL;
4371 meta_hdr = multilist_sublist_prev(meta_mls, meta_hdr)) {
4372 if (meta_hdr->b_spa != 0)
4376 if (data_hdr == NULL && meta_hdr == NULL) {
4377 type = ARC_BUFC_DATA;
4378 } else if (data_hdr == NULL) {
4379 ASSERT3P(meta_hdr, !=, NULL);
4380 type = ARC_BUFC_METADATA;
4381 } else if (meta_hdr == NULL) {
4382 ASSERT3P(data_hdr, !=, NULL);
4383 type = ARC_BUFC_DATA;
4385 ASSERT3P(data_hdr, !=, NULL);
4386 ASSERT3P(meta_hdr, !=, NULL);
4388 /* The headers can't be on the sublist without an L1 header */
4389 ASSERT(HDR_HAS_L1HDR(data_hdr));
4390 ASSERT(HDR_HAS_L1HDR(meta_hdr));
4392 if (data_hdr->b_l1hdr.b_arc_access <
4393 meta_hdr->b_l1hdr.b_arc_access) {
4394 type = ARC_BUFC_DATA;
4396 type = ARC_BUFC_METADATA;
4400 multilist_sublist_unlock(meta_mls);
4401 multilist_sublist_unlock(data_mls);
4407 * Evict buffers from the cache, such that arc_size is capped by arc_c.
4412 uint64_t total_evicted = 0;
4415 uint64_t asize = aggsum_value(&arc_size);
4416 uint64_t ameta = aggsum_value(&arc_meta_used);
4419 * If we're over arc_meta_limit, we want to correct that before
4420 * potentially evicting data buffers below.
4422 total_evicted += arc_adjust_meta(ameta);
4427 * If we're over the target cache size, we want to evict enough
4428 * from the list to get back to our target size. We don't want
4429 * to evict too much from the MRU, such that it drops below
4430 * arc_p. So, if we're over our target cache size more than
4431 * the MRU is over arc_p, we'll evict enough to get back to
4432 * arc_p here, and then evict more from the MFU below.
4434 target = MIN((int64_t)(asize - arc_c),
4435 (int64_t)(refcount_count(&arc_anon->arcs_size) +
4436 refcount_count(&arc_mru->arcs_size) + ameta - arc_p));
4439 * If we're below arc_meta_min, always prefer to evict data.
4440 * Otherwise, try to satisfy the requested number of bytes to
4441 * evict from the type which contains older buffers; in an
4442 * effort to keep newer buffers in the cache regardless of their
4443 * type. If we cannot satisfy the number of bytes from this
4444 * type, spill over into the next type.
4446 if (arc_adjust_type(arc_mru) == ARC_BUFC_METADATA &&
4447 ameta > arc_meta_min) {
4448 bytes = arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA);
4449 total_evicted += bytes;
4452 * If we couldn't evict our target number of bytes from
4453 * metadata, we try to get the rest from data.
4458 arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_DATA);
4460 bytes = arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_DATA);
4461 total_evicted += bytes;
4464 * If we couldn't evict our target number of bytes from
4465 * data, we try to get the rest from metadata.
4470 arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA);
4474 * Re-sum ARC stats after the first round of evictions.
4476 asize = aggsum_value(&arc_size);
4477 ameta = aggsum_value(&arc_meta_used);
4482 * Now that we've tried to evict enough from the MRU to get its
4483 * size back to arc_p, if we're still above the target cache
4484 * size, we evict the rest from the MFU.
4486 target = asize - arc_c;
4488 if (arc_adjust_type(arc_mfu) == ARC_BUFC_METADATA &&
4489 ameta > arc_meta_min) {
4490 bytes = arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA);
4491 total_evicted += bytes;
4494 * If we couldn't evict our target number of bytes from
4495 * metadata, we try to get the rest from data.
4500 arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_DATA);
4502 bytes = arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_DATA);
4503 total_evicted += bytes;
4506 * If we couldn't evict our target number of bytes from
4507 * data, we try to get the rest from data.
4512 arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA);
4516 * Adjust ghost lists
4518 * In addition to the above, the ARC also defines target values
4519 * for the ghost lists. The sum of the mru list and mru ghost
4520 * list should never exceed the target size of the cache, and
4521 * the sum of the mru list, mfu list, mru ghost list, and mfu
4522 * ghost list should never exceed twice the target size of the
4523 * cache. The following logic enforces these limits on the ghost
4524 * caches, and evicts from them as needed.
4526 target = refcount_count(&arc_mru->arcs_size) +
4527 refcount_count(&arc_mru_ghost->arcs_size) - arc_c;
4529 bytes = arc_adjust_impl(arc_mru_ghost, 0, target, ARC_BUFC_DATA);
4530 total_evicted += bytes;
4535 arc_adjust_impl(arc_mru_ghost, 0, target, ARC_BUFC_METADATA);
4538 * We assume the sum of the mru list and mfu list is less than
4539 * or equal to arc_c (we enforced this above), which means we
4540 * can use the simpler of the two equations below:
4542 * mru + mfu + mru ghost + mfu ghost <= 2 * arc_c
4543 * mru ghost + mfu ghost <= arc_c
4545 target = refcount_count(&arc_mru_ghost->arcs_size) +
4546 refcount_count(&arc_mfu_ghost->arcs_size) - arc_c;
4548 bytes = arc_adjust_impl(arc_mfu_ghost, 0, target, ARC_BUFC_DATA);
4549 total_evicted += bytes;
4554 arc_adjust_impl(arc_mfu_ghost, 0, target, ARC_BUFC_METADATA);
4556 return (total_evicted);
4560 arc_flush(spa_t *spa, boolean_t retry)
4565 * If retry is B_TRUE, a spa must not be specified since we have
4566 * no good way to determine if all of a spa's buffers have been
4567 * evicted from an arc state.
4569 ASSERT(!retry || spa == 0);
4572 guid = spa_load_guid(spa);
4574 (void) arc_flush_state(arc_mru, guid, ARC_BUFC_DATA, retry);
4575 (void) arc_flush_state(arc_mru, guid, ARC_BUFC_METADATA, retry);
4577 (void) arc_flush_state(arc_mfu, guid, ARC_BUFC_DATA, retry);
4578 (void) arc_flush_state(arc_mfu, guid, ARC_BUFC_METADATA, retry);
4580 (void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_DATA, retry);
4581 (void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_METADATA, retry);
4583 (void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_DATA, retry);
4584 (void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_METADATA, retry);
4588 arc_reduce_target_size(int64_t to_free)
4590 uint64_t asize = aggsum_value(&arc_size);
4591 if (arc_c > arc_c_min) {
4592 DTRACE_PROBE4(arc__shrink, uint64_t, arc_c, uint64_t,
4593 arc_c_min, uint64_t, arc_p, uint64_t, to_free);
4594 if (arc_c > arc_c_min + to_free)
4595 atomic_add_64(&arc_c, -to_free);
4599 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift));
4601 arc_c = MAX(asize, arc_c_min);
4603 arc_p = (arc_c >> 1);
4605 DTRACE_PROBE2(arc__shrunk, uint64_t, arc_c, uint64_t,
4608 ASSERT(arc_c >= arc_c_min);
4609 ASSERT((int64_t)arc_p >= 0);
4612 if (asize > arc_c) {
4613 DTRACE_PROBE2(arc__shrink_adjust, uint64_t, asize,
4615 /* See comment in arc_adjust_cb_check() on why lock+flag */
4616 mutex_enter(&arc_adjust_lock);
4617 arc_adjust_needed = B_TRUE;
4618 mutex_exit(&arc_adjust_lock);
4619 zthr_wakeup(arc_adjust_zthr);
4623 typedef enum free_memory_reason_t {
4628 FMR_PAGES_PP_MAXIMUM,
4631 } free_memory_reason_t;
4633 int64_t last_free_memory;
4634 free_memory_reason_t last_free_reason;
4637 * Additional reserve of pages for pp_reserve.
4639 int64_t arc_pages_pp_reserve = 64;
4642 * Additional reserve of pages for swapfs.
4644 int64_t arc_swapfs_reserve = 64;
4647 * Return the amount of memory that can be consumed before reclaim will be
4648 * needed. Positive if there is sufficient free memory, negative indicates
4649 * the amount of memory that needs to be freed up.
4652 arc_available_memory(void)
4654 int64_t lowest = INT64_MAX;
4656 free_memory_reason_t r = FMR_UNKNOWN;
4661 * Cooperate with pagedaemon when it's time for it to scan
4662 * and reclaim some pages.
4664 n = PAGESIZE * ((int64_t)freemem - zfs_arc_free_target);
4672 n = PAGESIZE * (-needfree);
4680 * check that we're out of range of the pageout scanner. It starts to
4681 * schedule paging if freemem is less than lotsfree and needfree.
4682 * lotsfree is the high-water mark for pageout, and needfree is the
4683 * number of needed free pages. We add extra pages here to make sure
4684 * the scanner doesn't start up while we're freeing memory.
4686 n = PAGESIZE * (freemem - lotsfree - needfree - desfree);
4693 * check to make sure that swapfs has enough space so that anon
4694 * reservations can still succeed. anon_resvmem() checks that the
4695 * availrmem is greater than swapfs_minfree, and the number of reserved
4696 * swap pages. We also add a bit of extra here just to prevent
4697 * circumstances from getting really dire.
4699 n = PAGESIZE * (availrmem - swapfs_minfree - swapfs_reserve -
4700 desfree - arc_swapfs_reserve);
4703 r = FMR_SWAPFS_MINFREE;
4708 * Check that we have enough availrmem that memory locking (e.g., via
4709 * mlock(3C) or memcntl(2)) can still succeed. (pages_pp_maximum
4710 * stores the number of pages that cannot be locked; when availrmem
4711 * drops below pages_pp_maximum, page locking mechanisms such as
4712 * page_pp_lock() will fail.)
4714 n = PAGESIZE * (availrmem - pages_pp_maximum -
4715 arc_pages_pp_reserve);
4718 r = FMR_PAGES_PP_MAXIMUM;
4721 #endif /* __FreeBSD__ */
4722 #if defined(__i386) || !defined(UMA_MD_SMALL_ALLOC)
4724 * If we're on an i386 platform, it's possible that we'll exhaust the
4725 * kernel heap space before we ever run out of available physical
4726 * memory. Most checks of the size of the heap_area compare against
4727 * tune.t_minarmem, which is the minimum available real memory that we
4728 * can have in the system. However, this is generally fixed at 25 pages
4729 * which is so low that it's useless. In this comparison, we seek to
4730 * calculate the total heap-size, and reclaim if more than 3/4ths of the
4731 * heap is allocated. (Or, in the calculation, if less than 1/4th is
4734 n = uma_avail() - (long)(uma_limit() / 4);
4742 * If zio data pages are being allocated out of a separate heap segment,
4743 * then enforce that the size of available vmem for this arena remains
4744 * above about 1/4th (1/(2^arc_zio_arena_free_shift)) free.
4746 * Note that reducing the arc_zio_arena_free_shift keeps more virtual
4747 * memory (in the zio_arena) free, which can avoid memory
4748 * fragmentation issues.
4750 if (zio_arena != NULL) {
4751 n = (int64_t)vmem_size(zio_arena, VMEM_FREE) -
4752 (vmem_size(zio_arena, VMEM_ALLOC) >>
4753 arc_zio_arena_free_shift);
4761 /* Every 100 calls, free a small amount */
4762 if (spa_get_random(100) == 0)
4764 #endif /* _KERNEL */
4766 last_free_memory = lowest;
4767 last_free_reason = r;
4768 DTRACE_PROBE2(arc__available_memory, int64_t, lowest, int, r);
4774 * Determine if the system is under memory pressure and is asking
4775 * to reclaim memory. A return value of B_TRUE indicates that the system
4776 * is under memory pressure and that the arc should adjust accordingly.
4779 arc_reclaim_needed(void)
4781 return (arc_available_memory() < 0);
4784 extern kmem_cache_t *zio_buf_cache[];
4785 extern kmem_cache_t *zio_data_buf_cache[];
4786 extern kmem_cache_t *range_seg_cache;
4787 extern kmem_cache_t *abd_chunk_cache;
4789 static __noinline void
4790 arc_kmem_reap_soon(void)
4793 kmem_cache_t *prev_cache = NULL;
4794 kmem_cache_t *prev_data_cache = NULL;
4796 DTRACE_PROBE(arc__kmem_reap_start);
4798 if (aggsum_compare(&arc_meta_used, arc_meta_limit) >= 0) {
4800 * We are exceeding our meta-data cache limit.
4801 * Purge some DNLC entries to release holds on meta-data.
4803 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent);
4807 * Reclaim unused memory from all kmem caches.
4813 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
4814 if (zio_buf_cache[i] != prev_cache) {
4815 prev_cache = zio_buf_cache[i];
4816 kmem_cache_reap_soon(zio_buf_cache[i]);
4818 if (zio_data_buf_cache[i] != prev_data_cache) {
4819 prev_data_cache = zio_data_buf_cache[i];
4820 kmem_cache_reap_soon(zio_data_buf_cache[i]);
4823 kmem_cache_reap_soon(abd_chunk_cache);
4824 kmem_cache_reap_soon(buf_cache);
4825 kmem_cache_reap_soon(hdr_full_cache);
4826 kmem_cache_reap_soon(hdr_l2only_cache);
4827 kmem_cache_reap_soon(range_seg_cache);
4830 if (zio_arena != NULL) {
4832 * Ask the vmem arena to reclaim unused memory from its
4835 vmem_qcache_reap(zio_arena);
4838 DTRACE_PROBE(arc__kmem_reap_end);
4843 arc_adjust_cb_check(void *arg, zthr_t *zthr)
4846 * This is necessary in order for the mdb ::arc dcmd to
4847 * show up to date information. Since the ::arc command
4848 * does not call the kstat's update function, without
4849 * this call, the command may show stale stats for the
4850 * anon, mru, mru_ghost, mfu, and mfu_ghost lists. Even
4851 * with this change, the data might be up to 1 second
4852 * out of date(the arc_adjust_zthr has a maximum sleep
4853 * time of 1 second); but that should suffice. The
4854 * arc_state_t structures can be queried directly if more
4855 * accurate information is needed.
4857 if (arc_ksp != NULL)
4858 arc_ksp->ks_update(arc_ksp, KSTAT_READ);
4861 * We have to rely on arc_get_data_impl() to tell us when to adjust,
4862 * rather than checking if we are overflowing here, so that we are
4863 * sure to not leave arc_get_data_impl() waiting on
4864 * arc_adjust_waiters_cv. If we have become "not overflowing" since
4865 * arc_get_data_impl() checked, we need to wake it up. We could
4866 * broadcast the CV here, but arc_get_data_impl() may have not yet
4867 * gone to sleep. We would need to use a mutex to ensure that this
4868 * function doesn't broadcast until arc_get_data_impl() has gone to
4869 * sleep (e.g. the arc_adjust_lock). However, the lock ordering of
4870 * such a lock would necessarily be incorrect with respect to the
4871 * zthr_lock, which is held before this function is called, and is
4872 * held by arc_get_data_impl() when it calls zthr_wakeup().
4874 return (arc_adjust_needed);
4878 * Keep arc_size under arc_c by running arc_adjust which evicts data
4882 arc_adjust_cb(void *arg, zthr_t *zthr)
4884 uint64_t evicted = 0;
4886 /* Evict from cache */
4887 evicted = arc_adjust();
4890 * If evicted is zero, we couldn't evict anything
4891 * via arc_adjust(). This could be due to hash lock
4892 * collisions, but more likely due to the majority of
4893 * arc buffers being unevictable. Therefore, even if
4894 * arc_size is above arc_c, another pass is unlikely to
4895 * be helpful and could potentially cause us to enter an
4896 * infinite loop. Additionally, zthr_iscancelled() is
4897 * checked here so that if the arc is shutting down, the
4898 * broadcast will wake any remaining arc adjust waiters.
4900 mutex_enter(&arc_adjust_lock);
4901 arc_adjust_needed = !zthr_iscancelled(arc_adjust_zthr) &&
4902 evicted > 0 && aggsum_compare(&arc_size, arc_c) > 0;
4903 if (!arc_adjust_needed) {
4905 * We're either no longer overflowing, or we
4906 * can't evict anything more, so we should wake
4909 cv_broadcast(&arc_adjust_waiters_cv);
4911 mutex_exit(&arc_adjust_lock);
4918 arc_reap_cb_check(void *arg, zthr_t *zthr)
4920 int64_t free_memory = arc_available_memory();
4923 * If a kmem reap is already active, don't schedule more. We must
4924 * check for this because kmem_cache_reap_soon() won't actually
4925 * block on the cache being reaped (this is to prevent callers from
4926 * becoming implicitly blocked by a system-wide kmem reap -- which,
4927 * on a system with many, many full magazines, can take minutes).
4929 if (!kmem_cache_reap_active() &&
4931 arc_no_grow = B_TRUE;
4934 * Wait at least zfs_grow_retry (default 60) seconds
4935 * before considering growing.
4937 arc_growtime = gethrtime() + SEC2NSEC(arc_grow_retry);
4939 } else if (free_memory < arc_c >> arc_no_grow_shift) {
4940 arc_no_grow = B_TRUE;
4941 } else if (gethrtime() >= arc_growtime) {
4942 arc_no_grow = B_FALSE;
4949 * Keep enough free memory in the system by reaping the ARC's kmem
4950 * caches. To cause more slabs to be reapable, we may reduce the
4951 * target size of the cache (arc_c), causing the arc_adjust_cb()
4952 * to free more buffers.
4956 arc_reap_cb(void *arg, zthr_t *zthr)
4958 int64_t free_memory;
4961 * Kick off asynchronous kmem_reap()'s of all our caches.
4963 arc_kmem_reap_soon();
4966 * Wait at least arc_kmem_cache_reap_retry_ms between
4967 * arc_kmem_reap_soon() calls. Without this check it is possible to
4968 * end up in a situation where we spend lots of time reaping
4969 * caches, while we're near arc_c_min. Waiting here also gives the
4970 * subsequent free memory check a chance of finding that the
4971 * asynchronous reap has already freed enough memory, and we don't
4972 * need to call arc_reduce_target_size().
4974 delay((hz * arc_kmem_cache_reap_retry_ms + 999) / 1000);
4977 * Reduce the target size as needed to maintain the amount of free
4978 * memory in the system at a fraction of the arc_size (1/128th by
4979 * default). If oversubscribed (free_memory < 0) then reduce the
4980 * target arc_size by the deficit amount plus the fractional
4981 * amount. If free memory is positive but less then the fractional
4982 * amount, reduce by what is needed to hit the fractional amount.
4984 free_memory = arc_available_memory();
4987 (arc_c >> arc_shrink_shift) - free_memory;
4991 to_free = MAX(to_free, ptob(needfree));
4994 arc_reduce_target_size(to_free);
5000 static u_int arc_dnlc_evicts_arg;
5001 extern struct vfsops zfs_vfsops;
5004 arc_dnlc_evicts_thread(void *dummy __unused)
5009 CALLB_CPR_INIT(&cpr, &arc_dnlc_evicts_lock, callb_generic_cpr, FTAG);
5011 mutex_enter(&arc_dnlc_evicts_lock);
5012 while (!arc_dnlc_evicts_thread_exit) {
5013 CALLB_CPR_SAFE_BEGIN(&cpr);
5014 (void) cv_wait(&arc_dnlc_evicts_cv, &arc_dnlc_evicts_lock);
5015 CALLB_CPR_SAFE_END(&cpr, &arc_dnlc_evicts_lock);
5016 if (arc_dnlc_evicts_arg != 0) {
5017 percent = arc_dnlc_evicts_arg;
5018 mutex_exit(&arc_dnlc_evicts_lock);
5020 vnlru_free(desiredvnodes * percent / 100, &zfs_vfsops);
5022 mutex_enter(&arc_dnlc_evicts_lock);
5024 * Clear our token only after vnlru_free()
5025 * pass is done, to avoid false queueing of
5028 arc_dnlc_evicts_arg = 0;
5031 arc_dnlc_evicts_thread_exit = FALSE;
5032 cv_broadcast(&arc_dnlc_evicts_cv);
5033 CALLB_CPR_EXIT(&cpr);
5038 dnlc_reduce_cache(void *arg)
5042 percent = (u_int)(uintptr_t)arg;
5043 mutex_enter(&arc_dnlc_evicts_lock);
5044 if (arc_dnlc_evicts_arg == 0) {
5045 arc_dnlc_evicts_arg = percent;
5046 cv_broadcast(&arc_dnlc_evicts_cv);
5048 mutex_exit(&arc_dnlc_evicts_lock);
5052 * Adapt arc info given the number of bytes we are trying to add and
5053 * the state that we are comming from. This function is only called
5054 * when we are adding new content to the cache.
5057 arc_adapt(int bytes, arc_state_t *state)
5060 uint64_t arc_p_min = (arc_c >> arc_p_min_shift);
5061 int64_t mrug_size = refcount_count(&arc_mru_ghost->arcs_size);
5062 int64_t mfug_size = refcount_count(&arc_mfu_ghost->arcs_size);
5064 if (state == arc_l2c_only)
5069 * Adapt the target size of the MRU list:
5070 * - if we just hit in the MRU ghost list, then increase
5071 * the target size of the MRU list.
5072 * - if we just hit in the MFU ghost list, then increase
5073 * the target size of the MFU list by decreasing the
5074 * target size of the MRU list.
5076 if (state == arc_mru_ghost) {
5077 mult = (mrug_size >= mfug_size) ? 1 : (mfug_size / mrug_size);
5078 mult = MIN(mult, 10); /* avoid wild arc_p adjustment */
5080 arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult);
5081 } else if (state == arc_mfu_ghost) {
5084 mult = (mfug_size >= mrug_size) ? 1 : (mrug_size / mfug_size);
5085 mult = MIN(mult, 10);
5087 delta = MIN(bytes * mult, arc_p);
5088 arc_p = MAX(arc_p_min, arc_p - delta);
5090 ASSERT((int64_t)arc_p >= 0);
5093 * Wake reap thread if we do not have any available memory
5095 if (arc_reclaim_needed()) {
5096 zthr_wakeup(arc_reap_zthr);
5103 if (arc_c >= arc_c_max)
5107 * If we're within (2 * maxblocksize) bytes of the target
5108 * cache size, increment the target cache size
5110 if (aggsum_compare(&arc_size, arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) >
5112 DTRACE_PROBE1(arc__inc_adapt, int, bytes);
5113 atomic_add_64(&arc_c, (int64_t)bytes);
5114 if (arc_c > arc_c_max)
5116 else if (state == arc_anon)
5117 atomic_add_64(&arc_p, (int64_t)bytes);
5121 ASSERT((int64_t)arc_p >= 0);
5125 * Check if arc_size has grown past our upper threshold, determined by
5126 * zfs_arc_overflow_shift.
5129 arc_is_overflowing(void)
5131 /* Always allow at least one block of overflow */
5132 uint64_t overflow = MAX(SPA_MAXBLOCKSIZE,
5133 arc_c >> zfs_arc_overflow_shift);
5136 * We just compare the lower bound here for performance reasons. Our
5137 * primary goals are to make sure that the arc never grows without
5138 * bound, and that it can reach its maximum size. This check
5139 * accomplishes both goals. The maximum amount we could run over by is
5140 * 2 * aggsum_borrow_multiplier * NUM_CPUS * the average size of a block
5141 * in the ARC. In practice, that's in the tens of MB, which is low
5142 * enough to be safe.
5144 return (aggsum_lower_bound(&arc_size) >= arc_c + overflow);
5148 arc_get_data_abd(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
5150 arc_buf_contents_t type = arc_buf_type(hdr);
5152 arc_get_data_impl(hdr, size, tag);
5153 if (type == ARC_BUFC_METADATA) {
5154 return (abd_alloc(size, B_TRUE));
5156 ASSERT(type == ARC_BUFC_DATA);
5157 return (abd_alloc(size, B_FALSE));
5162 arc_get_data_buf(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
5164 arc_buf_contents_t type = arc_buf_type(hdr);
5166 arc_get_data_impl(hdr, size, tag);
5167 if (type == ARC_BUFC_METADATA) {
5168 return (zio_buf_alloc(size));
5170 ASSERT(type == ARC_BUFC_DATA);
5171 return (zio_data_buf_alloc(size));
5176 * Allocate a block and return it to the caller. If we are hitting the
5177 * hard limit for the cache size, we must sleep, waiting for the eviction
5178 * thread to catch up. If we're past the target size but below the hard
5179 * limit, we'll only signal the reclaim thread and continue on.
5182 arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
5184 arc_state_t *state = hdr->b_l1hdr.b_state;
5185 arc_buf_contents_t type = arc_buf_type(hdr);
5187 arc_adapt(size, state);
5190 * If arc_size is currently overflowing, and has grown past our
5191 * upper limit, we must be adding data faster than the evict
5192 * thread can evict. Thus, to ensure we don't compound the
5193 * problem by adding more data and forcing arc_size to grow even
5194 * further past it's target size, we halt and wait for the
5195 * eviction thread to catch up.
5197 * It's also possible that the reclaim thread is unable to evict
5198 * enough buffers to get arc_size below the overflow limit (e.g.
5199 * due to buffers being un-evictable, or hash lock collisions).
5200 * In this case, we want to proceed regardless if we're
5201 * overflowing; thus we don't use a while loop here.
5203 if (arc_is_overflowing()) {
5204 mutex_enter(&arc_adjust_lock);
5207 * Now that we've acquired the lock, we may no longer be
5208 * over the overflow limit, lets check.
5210 * We're ignoring the case of spurious wake ups. If that
5211 * were to happen, it'd let this thread consume an ARC
5212 * buffer before it should have (i.e. before we're under
5213 * the overflow limit and were signalled by the reclaim
5214 * thread). As long as that is a rare occurrence, it
5215 * shouldn't cause any harm.
5217 if (arc_is_overflowing()) {
5218 arc_adjust_needed = B_TRUE;
5219 zthr_wakeup(arc_adjust_zthr);
5220 (void) cv_wait(&arc_adjust_waiters_cv,
5223 mutex_exit(&arc_adjust_lock);
5226 VERIFY3U(hdr->b_type, ==, type);
5227 if (type == ARC_BUFC_METADATA) {
5228 arc_space_consume(size, ARC_SPACE_META);
5230 arc_space_consume(size, ARC_SPACE_DATA);
5234 * Update the state size. Note that ghost states have a
5235 * "ghost size" and so don't need to be updated.
5237 if (!GHOST_STATE(state)) {
5239 (void) refcount_add_many(&state->arcs_size, size, tag);
5242 * If this is reached via arc_read, the link is
5243 * protected by the hash lock. If reached via
5244 * arc_buf_alloc, the header should not be accessed by
5245 * any other thread. And, if reached via arc_read_done,
5246 * the hash lock will protect it if it's found in the
5247 * hash table; otherwise no other thread should be
5248 * trying to [add|remove]_reference it.
5250 if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
5251 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
5252 (void) refcount_add_many(&state->arcs_esize[type],
5257 * If we are growing the cache, and we are adding anonymous
5258 * data, and we have outgrown arc_p, update arc_p
5260 if (aggsum_compare(&arc_size, arc_c) < 0 &&
5261 hdr->b_l1hdr.b_state == arc_anon &&
5262 (refcount_count(&arc_anon->arcs_size) +
5263 refcount_count(&arc_mru->arcs_size) > arc_p))
5264 arc_p = MIN(arc_c, arc_p + size);
5266 ARCSTAT_BUMP(arcstat_allocated);
5270 arc_free_data_abd(arc_buf_hdr_t *hdr, abd_t *abd, uint64_t size, void *tag)
5272 arc_free_data_impl(hdr, size, tag);
5277 arc_free_data_buf(arc_buf_hdr_t *hdr, void *buf, uint64_t size, void *tag)
5279 arc_buf_contents_t type = arc_buf_type(hdr);
5281 arc_free_data_impl(hdr, size, tag);
5282 if (type == ARC_BUFC_METADATA) {
5283 zio_buf_free(buf, size);
5285 ASSERT(type == ARC_BUFC_DATA);
5286 zio_data_buf_free(buf, size);
5291 * Free the arc data buffer.
5294 arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
5296 arc_state_t *state = hdr->b_l1hdr.b_state;
5297 arc_buf_contents_t type = arc_buf_type(hdr);
5299 /* protected by hash lock, if in the hash table */
5300 if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
5301 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
5302 ASSERT(state != arc_anon && state != arc_l2c_only);
5304 (void) refcount_remove_many(&state->arcs_esize[type],
5307 (void) refcount_remove_many(&state->arcs_size, size, tag);
5309 VERIFY3U(hdr->b_type, ==, type);
5310 if (type == ARC_BUFC_METADATA) {
5311 arc_space_return(size, ARC_SPACE_META);
5313 ASSERT(type == ARC_BUFC_DATA);
5314 arc_space_return(size, ARC_SPACE_DATA);
5319 * This routine is called whenever a buffer is accessed.
5320 * NOTE: the hash lock is dropped in this function.
5323 arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
5327 ASSERT(MUTEX_HELD(hash_lock));
5328 ASSERT(HDR_HAS_L1HDR(hdr));
5330 if (hdr->b_l1hdr.b_state == arc_anon) {
5332 * This buffer is not in the cache, and does not
5333 * appear in our "ghost" list. Add the new buffer
5337 ASSERT0(hdr->b_l1hdr.b_arc_access);
5338 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
5339 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
5340 arc_change_state(arc_mru, hdr, hash_lock);
5342 } else if (hdr->b_l1hdr.b_state == arc_mru) {
5343 now = ddi_get_lbolt();
5346 * If this buffer is here because of a prefetch, then either:
5347 * - clear the flag if this is a "referencing" read
5348 * (any subsequent access will bump this into the MFU state).
5350 * - move the buffer to the head of the list if this is
5351 * another prefetch (to make it less likely to be evicted).
5353 if (HDR_PREFETCH(hdr) || HDR_PRESCIENT_PREFETCH(hdr)) {
5354 if (refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
5355 /* link protected by hash lock */
5356 ASSERT(multilist_link_active(
5357 &hdr->b_l1hdr.b_arc_node));
5359 arc_hdr_clear_flags(hdr,
5361 ARC_FLAG_PRESCIENT_PREFETCH);
5362 ARCSTAT_BUMP(arcstat_mru_hits);
5364 hdr->b_l1hdr.b_arc_access = now;
5369 * This buffer has been "accessed" only once so far,
5370 * but it is still in the cache. Move it to the MFU
5373 if (now > hdr->b_l1hdr.b_arc_access + ARC_MINTIME) {
5375 * More than 125ms have passed since we
5376 * instantiated this buffer. Move it to the
5377 * most frequently used state.
5379 hdr->b_l1hdr.b_arc_access = now;
5380 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
5381 arc_change_state(arc_mfu, hdr, hash_lock);
5383 atomic_inc_32(&hdr->b_l1hdr.b_mru_hits);
5384 ARCSTAT_BUMP(arcstat_mru_hits);
5385 } else if (hdr->b_l1hdr.b_state == arc_mru_ghost) {
5386 arc_state_t *new_state;
5388 * This buffer has been "accessed" recently, but
5389 * was evicted from the cache. Move it to the
5393 if (HDR_PREFETCH(hdr) || HDR_PRESCIENT_PREFETCH(hdr)) {
5394 new_state = arc_mru;
5395 if (refcount_count(&hdr->b_l1hdr.b_refcnt) > 0) {
5396 arc_hdr_clear_flags(hdr,
5398 ARC_FLAG_PRESCIENT_PREFETCH);
5400 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
5402 new_state = arc_mfu;
5403 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
5406 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
5407 arc_change_state(new_state, hdr, hash_lock);
5409 atomic_inc_32(&hdr->b_l1hdr.b_mru_ghost_hits);
5410 ARCSTAT_BUMP(arcstat_mru_ghost_hits);
5411 } else if (hdr->b_l1hdr.b_state == arc_mfu) {
5413 * This buffer has been accessed more than once and is
5414 * still in the cache. Keep it in the MFU state.
5416 * NOTE: an add_reference() that occurred when we did
5417 * the arc_read() will have kicked this off the list.
5418 * If it was a prefetch, we will explicitly move it to
5419 * the head of the list now.
5422 atomic_inc_32(&hdr->b_l1hdr.b_mfu_hits);
5423 ARCSTAT_BUMP(arcstat_mfu_hits);
5424 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
5425 } else if (hdr->b_l1hdr.b_state == arc_mfu_ghost) {
5426 arc_state_t *new_state = arc_mfu;
5428 * This buffer has been accessed more than once but has
5429 * been evicted from the cache. Move it back to the
5433 if (HDR_PREFETCH(hdr) || HDR_PRESCIENT_PREFETCH(hdr)) {
5435 * This is a prefetch access...
5436 * move this block back to the MRU state.
5438 new_state = arc_mru;
5441 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
5442 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
5443 arc_change_state(new_state, hdr, hash_lock);
5445 atomic_inc_32(&hdr->b_l1hdr.b_mfu_ghost_hits);
5446 ARCSTAT_BUMP(arcstat_mfu_ghost_hits);
5447 } else if (hdr->b_l1hdr.b_state == arc_l2c_only) {
5449 * This buffer is on the 2nd Level ARC.
5452 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
5453 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
5454 arc_change_state(arc_mfu, hdr, hash_lock);
5456 ASSERT(!"invalid arc state");
5461 * This routine is called by dbuf_hold() to update the arc_access() state
5462 * which otherwise would be skipped for entries in the dbuf cache.
5465 arc_buf_access(arc_buf_t *buf)
5467 mutex_enter(&buf->b_evict_lock);
5468 arc_buf_hdr_t *hdr = buf->b_hdr;
5471 * Avoid taking the hash_lock when possible as an optimization.
5472 * The header must be checked again under the hash_lock in order
5473 * to handle the case where it is concurrently being released.
5475 if (hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY(hdr)) {
5476 mutex_exit(&buf->b_evict_lock);
5477 ARCSTAT_BUMP(arcstat_access_skip);
5481 kmutex_t *hash_lock = HDR_LOCK(hdr);
5482 mutex_enter(hash_lock);
5484 if (hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY(hdr)) {
5485 mutex_exit(hash_lock);
5486 mutex_exit(&buf->b_evict_lock);
5487 ARCSTAT_BUMP(arcstat_access_skip);
5491 mutex_exit(&buf->b_evict_lock);
5493 ASSERT(hdr->b_l1hdr.b_state == arc_mru ||
5494 hdr->b_l1hdr.b_state == arc_mfu);
5496 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
5497 arc_access(hdr, hash_lock);
5498 mutex_exit(hash_lock);
5500 ARCSTAT_BUMP(arcstat_hits);
5501 ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr),
5502 demand, prefetch, !HDR_ISTYPE_METADATA(hdr), data, metadata, hits);
5505 /* a generic arc_read_done_func_t which you can use */
5508 arc_bcopy_func(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
5509 arc_buf_t *buf, void *arg)
5514 bcopy(buf->b_data, arg, arc_buf_size(buf));
5515 arc_buf_destroy(buf, arg);
5518 /* a generic arc_read_done_func_t */
5521 arc_getbuf_func(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
5522 arc_buf_t *buf, void *arg)
5524 arc_buf_t **bufp = arg;
5526 ASSERT(zio == NULL || zio->io_error != 0);
5529 ASSERT(zio == NULL || zio->io_error == 0);
5531 ASSERT(buf->b_data != NULL);
5536 arc_hdr_verify(arc_buf_hdr_t *hdr, blkptr_t *bp)
5538 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) {
5539 ASSERT3U(HDR_GET_PSIZE(hdr), ==, 0);
5540 ASSERT3U(HDR_GET_COMPRESS(hdr), ==, ZIO_COMPRESS_OFF);
5542 if (HDR_COMPRESSION_ENABLED(hdr)) {
5543 ASSERT3U(HDR_GET_COMPRESS(hdr), ==,
5544 BP_GET_COMPRESS(bp));
5546 ASSERT3U(HDR_GET_LSIZE(hdr), ==, BP_GET_LSIZE(bp));
5547 ASSERT3U(HDR_GET_PSIZE(hdr), ==, BP_GET_PSIZE(bp));
5552 arc_read_done(zio_t *zio)
5554 arc_buf_hdr_t *hdr = zio->io_private;
5555 kmutex_t *hash_lock = NULL;
5556 arc_callback_t *callback_list;
5557 arc_callback_t *acb;
5558 boolean_t freeable = B_FALSE;
5559 boolean_t no_zio_error = (zio->io_error == 0);
5562 * The hdr was inserted into hash-table and removed from lists
5563 * prior to starting I/O. We should find this header, since
5564 * it's in the hash table, and it should be legit since it's
5565 * not possible to evict it during the I/O. The only possible
5566 * reason for it not to be found is if we were freed during the
5569 if (HDR_IN_HASH_TABLE(hdr)) {
5570 ASSERT3U(hdr->b_birth, ==, BP_PHYSICAL_BIRTH(zio->io_bp));
5571 ASSERT3U(hdr->b_dva.dva_word[0], ==,
5572 BP_IDENTITY(zio->io_bp)->dva_word[0]);
5573 ASSERT3U(hdr->b_dva.dva_word[1], ==,
5574 BP_IDENTITY(zio->io_bp)->dva_word[1]);
5576 arc_buf_hdr_t *found = buf_hash_find(hdr->b_spa, zio->io_bp,
5579 ASSERT((found == hdr &&
5580 DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) ||
5581 (found == hdr && HDR_L2_READING(hdr)));
5582 ASSERT3P(hash_lock, !=, NULL);
5586 /* byteswap if necessary */
5587 if (BP_SHOULD_BYTESWAP(zio->io_bp)) {
5588 if (BP_GET_LEVEL(zio->io_bp) > 0) {
5589 hdr->b_l1hdr.b_byteswap = DMU_BSWAP_UINT64;
5591 hdr->b_l1hdr.b_byteswap =
5592 DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp));
5595 hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
5599 arc_hdr_clear_flags(hdr, ARC_FLAG_L2_EVICTED);
5600 if (l2arc_noprefetch && HDR_PREFETCH(hdr))
5601 arc_hdr_clear_flags(hdr, ARC_FLAG_L2CACHE);
5603 callback_list = hdr->b_l1hdr.b_acb;
5604 ASSERT3P(callback_list, !=, NULL);
5606 if (hash_lock && no_zio_error && hdr->b_l1hdr.b_state == arc_anon) {
5608 * Only call arc_access on anonymous buffers. This is because
5609 * if we've issued an I/O for an evicted buffer, we've already
5610 * called arc_access (to prevent any simultaneous readers from
5611 * getting confused).
5613 arc_access(hdr, hash_lock);
5617 * If a read request has a callback (i.e. acb_done is not NULL), then we
5618 * make a buf containing the data according to the parameters which were
5619 * passed in. The implementation of arc_buf_alloc_impl() ensures that we
5620 * aren't needlessly decompressing the data multiple times.
5622 int callback_cnt = 0;
5623 for (acb = callback_list; acb != NULL; acb = acb->acb_next) {
5630 int error = arc_buf_alloc_impl(hdr, acb->acb_private,
5631 acb->acb_compressed, zio->io_error == 0,
5635 * Decompression failed. Set io_error
5636 * so that when we call acb_done (below),
5637 * we will indicate that the read failed.
5638 * Note that in the unusual case where one
5639 * callback is compressed and another
5640 * uncompressed, we will mark all of them
5641 * as failed, even though the uncompressed
5642 * one can't actually fail. In this case,
5643 * the hdr will not be anonymous, because
5644 * if there are multiple callbacks, it's
5645 * because multiple threads found the same
5646 * arc buf in the hash table.
5648 zio->io_error = error;
5653 * If there are multiple callbacks, we must have the hash lock,
5654 * because the only way for multiple threads to find this hdr is
5655 * in the hash table. This ensures that if there are multiple
5656 * callbacks, the hdr is not anonymous. If it were anonymous,
5657 * we couldn't use arc_buf_destroy() in the error case below.
5659 ASSERT(callback_cnt < 2 || hash_lock != NULL);
5661 hdr->b_l1hdr.b_acb = NULL;
5662 arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
5663 if (callback_cnt == 0) {
5664 ASSERT(HDR_PREFETCH(hdr));
5665 ASSERT0(hdr->b_l1hdr.b_bufcnt);
5666 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
5669 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt) ||
5670 callback_list != NULL);
5673 arc_hdr_verify(hdr, zio->io_bp);
5675 arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR);
5676 if (hdr->b_l1hdr.b_state != arc_anon)
5677 arc_change_state(arc_anon, hdr, hash_lock);
5678 if (HDR_IN_HASH_TABLE(hdr))
5679 buf_hash_remove(hdr);
5680 freeable = refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
5684 * Broadcast before we drop the hash_lock to avoid the possibility
5685 * that the hdr (and hence the cv) might be freed before we get to
5686 * the cv_broadcast().
5688 cv_broadcast(&hdr->b_l1hdr.b_cv);
5690 if (hash_lock != NULL) {
5691 mutex_exit(hash_lock);
5694 * This block was freed while we waited for the read to
5695 * complete. It has been removed from the hash table and
5696 * moved to the anonymous state (so that it won't show up
5699 ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
5700 freeable = refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
5703 /* execute each callback and free its structure */
5704 while ((acb = callback_list) != NULL) {
5705 if (acb->acb_done != NULL) {
5706 if (zio->io_error != 0 && acb->acb_buf != NULL) {
5708 * If arc_buf_alloc_impl() fails during
5709 * decompression, the buf will still be
5710 * allocated, and needs to be freed here.
5712 arc_buf_destroy(acb->acb_buf, acb->acb_private);
5713 acb->acb_buf = NULL;
5715 acb->acb_done(zio, &zio->io_bookmark, zio->io_bp,
5716 acb->acb_buf, acb->acb_private);
5719 if (acb->acb_zio_dummy != NULL) {
5720 acb->acb_zio_dummy->io_error = zio->io_error;
5721 zio_nowait(acb->acb_zio_dummy);
5724 callback_list = acb->acb_next;
5725 kmem_free(acb, sizeof (arc_callback_t));
5729 arc_hdr_destroy(hdr);
5733 * "Read" the block at the specified DVA (in bp) via the
5734 * cache. If the block is found in the cache, invoke the provided
5735 * callback immediately and return. Note that the `zio' parameter
5736 * in the callback will be NULL in this case, since no IO was
5737 * required. If the block is not in the cache pass the read request
5738 * on to the spa with a substitute callback function, so that the
5739 * requested block will be added to the cache.
5741 * If a read request arrives for a block that has a read in-progress,
5742 * either wait for the in-progress read to complete (and return the
5743 * results); or, if this is a read with a "done" func, add a record
5744 * to the read to invoke the "done" func when the read completes,
5745 * and return; or just return.
5747 * arc_read_done() will invoke all the requested "done" functions
5748 * for readers of this block.
5751 arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_read_done_func_t *done,
5752 void *private, zio_priority_t priority, int zio_flags,
5753 arc_flags_t *arc_flags, const zbookmark_phys_t *zb)
5755 arc_buf_hdr_t *hdr = NULL;
5756 kmutex_t *hash_lock = NULL;
5758 uint64_t guid = spa_load_guid(spa);
5759 boolean_t compressed_read = (zio_flags & ZIO_FLAG_RAW) != 0;
5762 ASSERT(!BP_IS_EMBEDDED(bp) ||
5763 BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA);
5766 if (!BP_IS_EMBEDDED(bp)) {
5768 * Embedded BP's have no DVA and require no I/O to "read".
5769 * Create an anonymous arc buf to back it.
5771 hdr = buf_hash_find(guid, bp, &hash_lock);
5774 if (hdr != NULL && HDR_HAS_L1HDR(hdr) && hdr->b_l1hdr.b_pabd != NULL) {
5775 arc_buf_t *buf = NULL;
5776 *arc_flags |= ARC_FLAG_CACHED;
5778 if (HDR_IO_IN_PROGRESS(hdr)) {
5779 zio_t *head_zio = hdr->b_l1hdr.b_acb->acb_zio_head;
5781 ASSERT3P(head_zio, !=, NULL);
5782 if ((hdr->b_flags & ARC_FLAG_PRIO_ASYNC_READ) &&
5783 priority == ZIO_PRIORITY_SYNC_READ) {
5785 * This is a sync read that needs to wait for
5786 * an in-flight async read. Request that the
5787 * zio have its priority upgraded.
5789 zio_change_priority(head_zio, priority);
5790 DTRACE_PROBE1(arc__async__upgrade__sync,
5791 arc_buf_hdr_t *, hdr);
5792 ARCSTAT_BUMP(arcstat_async_upgrade_sync);
5794 if (hdr->b_flags & ARC_FLAG_PREDICTIVE_PREFETCH) {
5795 arc_hdr_clear_flags(hdr,
5796 ARC_FLAG_PREDICTIVE_PREFETCH);
5799 if (*arc_flags & ARC_FLAG_WAIT) {
5800 cv_wait(&hdr->b_l1hdr.b_cv, hash_lock);
5801 mutex_exit(hash_lock);
5804 ASSERT(*arc_flags & ARC_FLAG_NOWAIT);
5807 arc_callback_t *acb = NULL;
5809 acb = kmem_zalloc(sizeof (arc_callback_t),
5811 acb->acb_done = done;
5812 acb->acb_private = private;
5813 acb->acb_compressed = compressed_read;
5815 acb->acb_zio_dummy = zio_null(pio,
5816 spa, NULL, NULL, NULL, zio_flags);
5818 ASSERT3P(acb->acb_done, !=, NULL);
5819 acb->acb_zio_head = head_zio;
5820 acb->acb_next = hdr->b_l1hdr.b_acb;
5821 hdr->b_l1hdr.b_acb = acb;
5822 mutex_exit(hash_lock);
5825 mutex_exit(hash_lock);
5829 ASSERT(hdr->b_l1hdr.b_state == arc_mru ||
5830 hdr->b_l1hdr.b_state == arc_mfu);
5833 if (hdr->b_flags & ARC_FLAG_PREDICTIVE_PREFETCH) {
5835 * This is a demand read which does not have to
5836 * wait for i/o because we did a predictive
5837 * prefetch i/o for it, which has completed.
5840 arc__demand__hit__predictive__prefetch,
5841 arc_buf_hdr_t *, hdr);
5843 arcstat_demand_hit_predictive_prefetch);
5844 arc_hdr_clear_flags(hdr,
5845 ARC_FLAG_PREDICTIVE_PREFETCH);
5848 if (hdr->b_flags & ARC_FLAG_PRESCIENT_PREFETCH) {
5850 arcstat_demand_hit_prescient_prefetch);
5851 arc_hdr_clear_flags(hdr,
5852 ARC_FLAG_PRESCIENT_PREFETCH);
5855 ASSERT(!BP_IS_EMBEDDED(bp) || !BP_IS_HOLE(bp));
5856 /* Get a buf with the desired data in it. */
5857 rc = arc_buf_alloc_impl(hdr, private,
5858 compressed_read, B_TRUE, &buf);
5860 arc_buf_destroy(buf, private);
5863 ASSERT((zio_flags & ZIO_FLAG_SPECULATIVE) ||
5864 rc == 0 || rc != ENOENT);
5865 } else if (*arc_flags & ARC_FLAG_PREFETCH &&
5866 refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
5867 arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH);
5869 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
5870 arc_access(hdr, hash_lock);
5871 if (*arc_flags & ARC_FLAG_PRESCIENT_PREFETCH)
5872 arc_hdr_set_flags(hdr, ARC_FLAG_PRESCIENT_PREFETCH);
5873 if (*arc_flags & ARC_FLAG_L2CACHE)
5874 arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE);
5875 mutex_exit(hash_lock);
5876 ARCSTAT_BUMP(arcstat_hits);
5877 ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr),
5878 demand, prefetch, !HDR_ISTYPE_METADATA(hdr),
5879 data, metadata, hits);
5882 done(NULL, zb, bp, buf, private);
5884 uint64_t lsize = BP_GET_LSIZE(bp);
5885 uint64_t psize = BP_GET_PSIZE(bp);
5886 arc_callback_t *acb;
5889 boolean_t devw = B_FALSE;
5893 /* this block is not in the cache */
5894 arc_buf_hdr_t *exists = NULL;
5895 arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
5896 hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize,
5897 BP_GET_COMPRESS(bp), type);
5899 if (!BP_IS_EMBEDDED(bp)) {
5900 hdr->b_dva = *BP_IDENTITY(bp);
5901 hdr->b_birth = BP_PHYSICAL_BIRTH(bp);
5902 exists = buf_hash_insert(hdr, &hash_lock);
5904 if (exists != NULL) {
5905 /* somebody beat us to the hash insert */
5906 mutex_exit(hash_lock);
5907 buf_discard_identity(hdr);
5908 arc_hdr_destroy(hdr);
5909 goto top; /* restart the IO request */
5913 * This block is in the ghost cache. If it was L2-only
5914 * (and thus didn't have an L1 hdr), we realloc the
5915 * header to add an L1 hdr.
5917 if (!HDR_HAS_L1HDR(hdr)) {
5918 hdr = arc_hdr_realloc(hdr, hdr_l2only_cache,
5921 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
5922 ASSERT(GHOST_STATE(hdr->b_l1hdr.b_state));
5923 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
5924 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
5925 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
5926 ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
5929 * This is a delicate dance that we play here.
5930 * This hdr is in the ghost list so we access it
5931 * to move it out of the ghost list before we
5932 * initiate the read. If it's a prefetch then
5933 * it won't have a callback so we'll remove the
5934 * reference that arc_buf_alloc_impl() created. We
5935 * do this after we've called arc_access() to
5936 * avoid hitting an assert in remove_reference().
5938 arc_access(hdr, hash_lock);
5939 arc_hdr_alloc_pabd(hdr);
5941 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
5942 size = arc_hdr_size(hdr);
5945 * If compression is enabled on the hdr, then will do
5946 * RAW I/O and will store the compressed data in the hdr's
5947 * data block. Otherwise, the hdr's data block will contain
5948 * the uncompressed data.
5950 if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF) {
5951 zio_flags |= ZIO_FLAG_RAW;
5954 if (*arc_flags & ARC_FLAG_PREFETCH)
5955 arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH);
5956 if (*arc_flags & ARC_FLAG_PRESCIENT_PREFETCH)
5957 arc_hdr_set_flags(hdr, ARC_FLAG_PRESCIENT_PREFETCH);
5959 if (*arc_flags & ARC_FLAG_L2CACHE)
5960 arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE);
5961 if (BP_GET_LEVEL(bp) > 0)
5962 arc_hdr_set_flags(hdr, ARC_FLAG_INDIRECT);
5963 if (*arc_flags & ARC_FLAG_PREDICTIVE_PREFETCH)
5964 arc_hdr_set_flags(hdr, ARC_FLAG_PREDICTIVE_PREFETCH);
5965 ASSERT(!GHOST_STATE(hdr->b_l1hdr.b_state));
5967 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
5968 acb->acb_done = done;
5969 acb->acb_private = private;
5970 acb->acb_compressed = compressed_read;
5972 ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
5973 hdr->b_l1hdr.b_acb = acb;
5974 arc_hdr_set_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
5976 if (HDR_HAS_L2HDR(hdr) &&
5977 (vd = hdr->b_l2hdr.b_dev->l2ad_vdev) != NULL) {
5978 devw = hdr->b_l2hdr.b_dev->l2ad_writing;
5979 addr = hdr->b_l2hdr.b_daddr;
5981 * Lock out L2ARC device removal.
5983 if (vdev_is_dead(vd) ||
5984 !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER))
5989 * We count both async reads and scrub IOs as asynchronous so
5990 * that both can be upgraded in the event of a cache hit while
5991 * the read IO is still in-flight.
5993 if (priority == ZIO_PRIORITY_ASYNC_READ ||
5994 priority == ZIO_PRIORITY_SCRUB)
5995 arc_hdr_set_flags(hdr, ARC_FLAG_PRIO_ASYNC_READ);
5997 arc_hdr_clear_flags(hdr, ARC_FLAG_PRIO_ASYNC_READ);
6000 * At this point, we have a level 1 cache miss. Try again in
6001 * L2ARC if possible.
6003 ASSERT3U(HDR_GET_LSIZE(hdr), ==, lsize);
6005 DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp,
6006 uint64_t, lsize, zbookmark_phys_t *, zb);
6007 ARCSTAT_BUMP(arcstat_misses);
6008 ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr),
6009 demand, prefetch, !HDR_ISTYPE_METADATA(hdr),
6010 data, metadata, misses);
6015 racct_add_force(curproc, RACCT_READBPS, size);
6016 racct_add_force(curproc, RACCT_READIOPS, 1);
6017 PROC_UNLOCK(curproc);
6020 curthread->td_ru.ru_inblock++;
6023 if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) {
6025 * Read from the L2ARC if the following are true:
6026 * 1. The L2ARC vdev was previously cached.
6027 * 2. This buffer still has L2ARC metadata.
6028 * 3. This buffer isn't currently writing to the L2ARC.
6029 * 4. The L2ARC entry wasn't evicted, which may
6030 * also have invalidated the vdev.
6031 * 5. This isn't prefetch and l2arc_noprefetch is set.
6033 if (HDR_HAS_L2HDR(hdr) &&
6034 !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) &&
6035 !(l2arc_noprefetch && HDR_PREFETCH(hdr))) {
6036 l2arc_read_callback_t *cb;
6040 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr);
6041 ARCSTAT_BUMP(arcstat_l2_hits);
6042 atomic_inc_32(&hdr->b_l2hdr.b_hits);
6044 cb = kmem_zalloc(sizeof (l2arc_read_callback_t),
6046 cb->l2rcb_hdr = hdr;
6049 cb->l2rcb_flags = zio_flags;
6051 asize = vdev_psize_to_asize(vd, size);
6052 if (asize != size) {
6053 abd = abd_alloc_for_io(asize,
6054 HDR_ISTYPE_METADATA(hdr));
6055 cb->l2rcb_abd = abd;
6057 abd = hdr->b_l1hdr.b_pabd;
6060 ASSERT(addr >= VDEV_LABEL_START_SIZE &&
6061 addr + asize <= vd->vdev_psize -
6062 VDEV_LABEL_END_SIZE);
6065 * l2arc read. The SCL_L2ARC lock will be
6066 * released by l2arc_read_done().
6067 * Issue a null zio if the underlying buffer
6068 * was squashed to zero size by compression.
6070 ASSERT3U(HDR_GET_COMPRESS(hdr), !=,
6071 ZIO_COMPRESS_EMPTY);
6072 rzio = zio_read_phys(pio, vd, addr,
6075 l2arc_read_done, cb, priority,
6076 zio_flags | ZIO_FLAG_DONT_CACHE |
6078 ZIO_FLAG_DONT_PROPAGATE |
6079 ZIO_FLAG_DONT_RETRY, B_FALSE);
6080 acb->acb_zio_head = rzio;
6082 if (hash_lock != NULL)
6083 mutex_exit(hash_lock);
6085 DTRACE_PROBE2(l2arc__read, vdev_t *, vd,
6087 ARCSTAT_INCR(arcstat_l2_read_bytes, size);
6089 if (*arc_flags & ARC_FLAG_NOWAIT) {
6094 ASSERT(*arc_flags & ARC_FLAG_WAIT);
6095 if (zio_wait(rzio) == 0)
6098 /* l2arc read error; goto zio_read() */
6099 if (hash_lock != NULL)
6100 mutex_enter(hash_lock);
6102 DTRACE_PROBE1(l2arc__miss,
6103 arc_buf_hdr_t *, hdr);
6104 ARCSTAT_BUMP(arcstat_l2_misses);
6105 if (HDR_L2_WRITING(hdr))
6106 ARCSTAT_BUMP(arcstat_l2_rw_clash);
6107 spa_config_exit(spa, SCL_L2ARC, vd);
6111 spa_config_exit(spa, SCL_L2ARC, vd);
6112 if (l2arc_ndev != 0) {
6113 DTRACE_PROBE1(l2arc__miss,
6114 arc_buf_hdr_t *, hdr);
6115 ARCSTAT_BUMP(arcstat_l2_misses);
6119 rzio = zio_read(pio, spa, bp, hdr->b_l1hdr.b_pabd, size,
6120 arc_read_done, hdr, priority, zio_flags, zb);
6121 acb->acb_zio_head = rzio;
6123 if (hash_lock != NULL)
6124 mutex_exit(hash_lock);
6126 if (*arc_flags & ARC_FLAG_WAIT)
6127 return (zio_wait(rzio));
6129 ASSERT(*arc_flags & ARC_FLAG_NOWAIT);
6136 arc_add_prune_callback(arc_prune_func_t *func, void *private)
6140 p = kmem_alloc(sizeof (*p), KM_SLEEP);
6142 p->p_private = private;
6143 list_link_init(&p->p_node);
6144 refcount_create(&p->p_refcnt);
6146 mutex_enter(&arc_prune_mtx);
6147 refcount_add(&p->p_refcnt, &arc_prune_list);
6148 list_insert_head(&arc_prune_list, p);
6149 mutex_exit(&arc_prune_mtx);
6155 arc_remove_prune_callback(arc_prune_t *p)
6157 boolean_t wait = B_FALSE;
6158 mutex_enter(&arc_prune_mtx);
6159 list_remove(&arc_prune_list, p);
6160 if (refcount_remove(&p->p_refcnt, &arc_prune_list) > 0)
6162 mutex_exit(&arc_prune_mtx);
6164 /* wait for arc_prune_task to finish */
6166 taskq_wait(arc_prune_taskq);
6167 ASSERT0(refcount_count(&p->p_refcnt));
6168 refcount_destroy(&p->p_refcnt);
6169 kmem_free(p, sizeof (*p));
6173 * Notify the arc that a block was freed, and thus will never be used again.
6176 arc_freed(spa_t *spa, const blkptr_t *bp)
6179 kmutex_t *hash_lock;
6180 uint64_t guid = spa_load_guid(spa);
6182 ASSERT(!BP_IS_EMBEDDED(bp));
6184 hdr = buf_hash_find(guid, bp, &hash_lock);
6189 * We might be trying to free a block that is still doing I/O
6190 * (i.e. prefetch) or has a reference (i.e. a dedup-ed,
6191 * dmu_sync-ed block). If this block is being prefetched, then it
6192 * would still have the ARC_FLAG_IO_IN_PROGRESS flag set on the hdr
6193 * until the I/O completes. A block may also have a reference if it is
6194 * part of a dedup-ed, dmu_synced write. The dmu_sync() function would
6195 * have written the new block to its final resting place on disk but
6196 * without the dedup flag set. This would have left the hdr in the MRU
6197 * state and discoverable. When the txg finally syncs it detects that
6198 * the block was overridden in open context and issues an override I/O.
6199 * Since this is a dedup block, the override I/O will determine if the
6200 * block is already in the DDT. If so, then it will replace the io_bp
6201 * with the bp from the DDT and allow the I/O to finish. When the I/O
6202 * reaches the done callback, dbuf_write_override_done, it will
6203 * check to see if the io_bp and io_bp_override are identical.
6204 * If they are not, then it indicates that the bp was replaced with
6205 * the bp in the DDT and the override bp is freed. This allows
6206 * us to arrive here with a reference on a block that is being
6207 * freed. So if we have an I/O in progress, or a reference to
6208 * this hdr, then we don't destroy the hdr.
6210 if (!HDR_HAS_L1HDR(hdr) || (!HDR_IO_IN_PROGRESS(hdr) &&
6211 refcount_is_zero(&hdr->b_l1hdr.b_refcnt))) {
6212 arc_change_state(arc_anon, hdr, hash_lock);
6213 arc_hdr_destroy(hdr);
6214 mutex_exit(hash_lock);
6216 mutex_exit(hash_lock);
6222 * Release this buffer from the cache, making it an anonymous buffer. This
6223 * must be done after a read and prior to modifying the buffer contents.
6224 * If the buffer has more than one reference, we must make
6225 * a new hdr for the buffer.
6228 arc_release(arc_buf_t *buf, void *tag)
6230 arc_buf_hdr_t *hdr = buf->b_hdr;
6233 * It would be nice to assert that if it's DMU metadata (level >
6234 * 0 || it's the dnode file), then it must be syncing context.
6235 * But we don't know that information at this level.
6238 mutex_enter(&buf->b_evict_lock);
6240 ASSERT(HDR_HAS_L1HDR(hdr));
6243 * We don't grab the hash lock prior to this check, because if
6244 * the buffer's header is in the arc_anon state, it won't be
6245 * linked into the hash table.
6247 if (hdr->b_l1hdr.b_state == arc_anon) {
6248 mutex_exit(&buf->b_evict_lock);
6249 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
6250 ASSERT(!HDR_IN_HASH_TABLE(hdr));
6251 ASSERT(!HDR_HAS_L2HDR(hdr));
6252 ASSERT(HDR_EMPTY(hdr));
6253 ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1);
6254 ASSERT3S(refcount_count(&hdr->b_l1hdr.b_refcnt), ==, 1);
6255 ASSERT(!list_link_active(&hdr->b_l1hdr.b_arc_node));
6257 hdr->b_l1hdr.b_arc_access = 0;
6260 * If the buf is being overridden then it may already
6261 * have a hdr that is not empty.
6263 buf_discard_identity(hdr);
6269 kmutex_t *hash_lock = HDR_LOCK(hdr);
6270 mutex_enter(hash_lock);
6273 * This assignment is only valid as long as the hash_lock is
6274 * held, we must be careful not to reference state or the
6275 * b_state field after dropping the lock.
6277 arc_state_t *state = hdr->b_l1hdr.b_state;
6278 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
6279 ASSERT3P(state, !=, arc_anon);
6281 /* this buffer is not on any list */
6282 ASSERT3S(refcount_count(&hdr->b_l1hdr.b_refcnt), >, 0);
6284 if (HDR_HAS_L2HDR(hdr)) {
6285 mutex_enter(&hdr->b_l2hdr.b_dev->l2ad_mtx);
6288 * We have to recheck this conditional again now that
6289 * we're holding the l2ad_mtx to prevent a race with
6290 * another thread which might be concurrently calling
6291 * l2arc_evict(). In that case, l2arc_evict() might have
6292 * destroyed the header's L2 portion as we were waiting
6293 * to acquire the l2ad_mtx.
6295 if (HDR_HAS_L2HDR(hdr)) {
6297 arc_hdr_l2hdr_destroy(hdr);
6300 mutex_exit(&hdr->b_l2hdr.b_dev->l2ad_mtx);
6304 * Do we have more than one buf?
6306 if (hdr->b_l1hdr.b_bufcnt > 1) {
6307 arc_buf_hdr_t *nhdr;
6308 uint64_t spa = hdr->b_spa;
6309 uint64_t psize = HDR_GET_PSIZE(hdr);
6310 uint64_t lsize = HDR_GET_LSIZE(hdr);
6311 enum zio_compress compress = HDR_GET_COMPRESS(hdr);
6312 arc_buf_contents_t type = arc_buf_type(hdr);
6313 VERIFY3U(hdr->b_type, ==, type);
6315 ASSERT(hdr->b_l1hdr.b_buf != buf || buf->b_next != NULL);
6316 (void) remove_reference(hdr, hash_lock, tag);
6318 if (arc_buf_is_shared(buf) && !ARC_BUF_COMPRESSED(buf)) {
6319 ASSERT3P(hdr->b_l1hdr.b_buf, !=, buf);
6320 ASSERT(ARC_BUF_LAST(buf));
6324 * Pull the data off of this hdr and attach it to
6325 * a new anonymous hdr. Also find the last buffer
6326 * in the hdr's buffer list.
6328 arc_buf_t *lastbuf = arc_buf_remove(hdr, buf);
6329 ASSERT3P(lastbuf, !=, NULL);
6332 * If the current arc_buf_t and the hdr are sharing their data
6333 * buffer, then we must stop sharing that block.
6335 if (arc_buf_is_shared(buf)) {
6336 VERIFY(!arc_buf_is_shared(lastbuf));
6339 * First, sever the block sharing relationship between
6340 * buf and the arc_buf_hdr_t.
6342 arc_unshare_buf(hdr, buf);
6345 * Now we need to recreate the hdr's b_pabd. Since we
6346 * have lastbuf handy, we try to share with it, but if
6347 * we can't then we allocate a new b_pabd and copy the
6348 * data from buf into it.
6350 if (arc_can_share(hdr, lastbuf)) {
6351 arc_share_buf(hdr, lastbuf);
6353 arc_hdr_alloc_pabd(hdr);
6354 abd_copy_from_buf(hdr->b_l1hdr.b_pabd,
6355 buf->b_data, psize);
6357 VERIFY3P(lastbuf->b_data, !=, NULL);
6358 } else if (HDR_SHARED_DATA(hdr)) {
6360 * Uncompressed shared buffers are always at the end
6361 * of the list. Compressed buffers don't have the
6362 * same requirements. This makes it hard to
6363 * simply assert that the lastbuf is shared so
6364 * we rely on the hdr's compression flags to determine
6365 * if we have a compressed, shared buffer.
6367 ASSERT(arc_buf_is_shared(lastbuf) ||
6368 HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF);
6369 ASSERT(!ARC_BUF_SHARED(buf));
6371 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
6372 ASSERT3P(state, !=, arc_l2c_only);
6374 (void) refcount_remove_many(&state->arcs_size,
6375 arc_buf_size(buf), buf);
6377 if (refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) {
6378 ASSERT3P(state, !=, arc_l2c_only);
6379 (void) refcount_remove_many(&state->arcs_esize[type],
6380 arc_buf_size(buf), buf);
6383 hdr->b_l1hdr.b_bufcnt -= 1;
6384 arc_cksum_verify(buf);
6386 arc_buf_unwatch(buf);
6389 mutex_exit(hash_lock);
6392 * Allocate a new hdr. The new hdr will contain a b_pabd
6393 * buffer which will be freed in arc_write().
6395 nhdr = arc_hdr_alloc(spa, psize, lsize, compress, type);
6396 ASSERT3P(nhdr->b_l1hdr.b_buf, ==, NULL);
6397 ASSERT0(nhdr->b_l1hdr.b_bufcnt);
6398 ASSERT0(refcount_count(&nhdr->b_l1hdr.b_refcnt));
6399 VERIFY3U(nhdr->b_type, ==, type);
6400 ASSERT(!HDR_SHARED_DATA(nhdr));
6402 nhdr->b_l1hdr.b_buf = buf;
6403 nhdr->b_l1hdr.b_bufcnt = 1;
6404 (void) refcount_add(&nhdr->b_l1hdr.b_refcnt, tag);
6407 mutex_exit(&buf->b_evict_lock);
6408 (void) refcount_add_many(&arc_anon->arcs_size,
6409 arc_buf_size(buf), buf);
6411 mutex_exit(&buf->b_evict_lock);
6412 ASSERT(refcount_count(&hdr->b_l1hdr.b_refcnt) == 1);
6413 /* protected by hash lock, or hdr is on arc_anon */
6414 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
6415 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
6416 arc_change_state(arc_anon, hdr, hash_lock);
6417 hdr->b_l1hdr.b_arc_access = 0;
6418 mutex_exit(hash_lock);
6420 buf_discard_identity(hdr);
6426 arc_released(arc_buf_t *buf)
6430 mutex_enter(&buf->b_evict_lock);
6431 released = (buf->b_data != NULL &&
6432 buf->b_hdr->b_l1hdr.b_state == arc_anon);
6433 mutex_exit(&buf->b_evict_lock);
6439 arc_referenced(arc_buf_t *buf)
6443 mutex_enter(&buf->b_evict_lock);
6444 referenced = (refcount_count(&buf->b_hdr->b_l1hdr.b_refcnt));
6445 mutex_exit(&buf->b_evict_lock);
6446 return (referenced);
6451 arc_write_ready(zio_t *zio)
6453 arc_write_callback_t *callback = zio->io_private;
6454 arc_buf_t *buf = callback->awcb_buf;
6455 arc_buf_hdr_t *hdr = buf->b_hdr;
6456 uint64_t psize = BP_IS_HOLE(zio->io_bp) ? 0 : BP_GET_PSIZE(zio->io_bp);
6458 ASSERT(HDR_HAS_L1HDR(hdr));
6459 ASSERT(!refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt));
6460 ASSERT(hdr->b_l1hdr.b_bufcnt > 0);
6463 * If we're reexecuting this zio because the pool suspended, then
6464 * cleanup any state that was previously set the first time the
6465 * callback was invoked.
6467 if (zio->io_flags & ZIO_FLAG_REEXECUTED) {
6468 arc_cksum_free(hdr);
6470 arc_buf_unwatch(buf);
6472 if (hdr->b_l1hdr.b_pabd != NULL) {
6473 if (arc_buf_is_shared(buf)) {
6474 arc_unshare_buf(hdr, buf);
6476 arc_hdr_free_pabd(hdr);
6480 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
6481 ASSERT(!HDR_SHARED_DATA(hdr));
6482 ASSERT(!arc_buf_is_shared(buf));
6484 callback->awcb_ready(zio, buf, callback->awcb_private);
6486 if (HDR_IO_IN_PROGRESS(hdr))
6487 ASSERT(zio->io_flags & ZIO_FLAG_REEXECUTED);
6489 arc_cksum_compute(buf);
6490 arc_hdr_set_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
6492 enum zio_compress compress;
6493 if (BP_IS_HOLE(zio->io_bp) || BP_IS_EMBEDDED(zio->io_bp)) {
6494 compress = ZIO_COMPRESS_OFF;
6496 ASSERT3U(HDR_GET_LSIZE(hdr), ==, BP_GET_LSIZE(zio->io_bp));
6497 compress = BP_GET_COMPRESS(zio->io_bp);
6499 HDR_SET_PSIZE(hdr, psize);
6500 arc_hdr_set_compress(hdr, compress);
6504 * Fill the hdr with data. If the hdr is compressed, the data we want
6505 * is available from the zio, otherwise we can take it from the buf.
6507 * We might be able to share the buf's data with the hdr here. However,
6508 * doing so would cause the ARC to be full of linear ABDs if we write a
6509 * lot of shareable data. As a compromise, we check whether scattered
6510 * ABDs are allowed, and assume that if they are then the user wants
6511 * the ARC to be primarily filled with them regardless of the data being
6512 * written. Therefore, if they're allowed then we allocate one and copy
6513 * the data into it; otherwise, we share the data directly if we can.
6515 if (zfs_abd_scatter_enabled || !arc_can_share(hdr, buf)) {
6516 arc_hdr_alloc_pabd(hdr);
6519 * Ideally, we would always copy the io_abd into b_pabd, but the
6520 * user may have disabled compressed ARC, thus we must check the
6521 * hdr's compression setting rather than the io_bp's.
6523 if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF) {
6524 ASSERT3U(BP_GET_COMPRESS(zio->io_bp), !=,
6526 ASSERT3U(psize, >, 0);
6528 abd_copy(hdr->b_l1hdr.b_pabd, zio->io_abd, psize);
6530 ASSERT3U(zio->io_orig_size, ==, arc_hdr_size(hdr));
6532 abd_copy_from_buf(hdr->b_l1hdr.b_pabd, buf->b_data,
6536 ASSERT3P(buf->b_data, ==, abd_to_buf(zio->io_orig_abd));
6537 ASSERT3U(zio->io_orig_size, ==, arc_buf_size(buf));
6538 ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1);
6540 arc_share_buf(hdr, buf);
6543 arc_hdr_verify(hdr, zio->io_bp);
6547 arc_write_children_ready(zio_t *zio)
6549 arc_write_callback_t *callback = zio->io_private;
6550 arc_buf_t *buf = callback->awcb_buf;
6552 callback->awcb_children_ready(zio, buf, callback->awcb_private);
6556 * The SPA calls this callback for each physical write that happens on behalf
6557 * of a logical write. See the comment in dbuf_write_physdone() for details.
6560 arc_write_physdone(zio_t *zio)
6562 arc_write_callback_t *cb = zio->io_private;
6563 if (cb->awcb_physdone != NULL)
6564 cb->awcb_physdone(zio, cb->awcb_buf, cb->awcb_private);
6568 arc_write_done(zio_t *zio)
6570 arc_write_callback_t *callback = zio->io_private;
6571 arc_buf_t *buf = callback->awcb_buf;
6572 arc_buf_hdr_t *hdr = buf->b_hdr;
6574 ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
6576 if (zio->io_error == 0) {
6577 arc_hdr_verify(hdr, zio->io_bp);
6579 if (BP_IS_HOLE(zio->io_bp) || BP_IS_EMBEDDED(zio->io_bp)) {
6580 buf_discard_identity(hdr);
6582 hdr->b_dva = *BP_IDENTITY(zio->io_bp);
6583 hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp);
6586 ASSERT(HDR_EMPTY(hdr));
6590 * If the block to be written was all-zero or compressed enough to be
6591 * embedded in the BP, no write was performed so there will be no
6592 * dva/birth/checksum. The buffer must therefore remain anonymous
6595 if (!HDR_EMPTY(hdr)) {
6596 arc_buf_hdr_t *exists;
6597 kmutex_t *hash_lock;
6599 ASSERT3U(zio->io_error, ==, 0);
6601 arc_cksum_verify(buf);
6603 exists = buf_hash_insert(hdr, &hash_lock);
6604 if (exists != NULL) {
6606 * This can only happen if we overwrite for
6607 * sync-to-convergence, because we remove
6608 * buffers from the hash table when we arc_free().
6610 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
6611 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
6612 panic("bad overwrite, hdr=%p exists=%p",
6613 (void *)hdr, (void *)exists);
6614 ASSERT(refcount_is_zero(
6615 &exists->b_l1hdr.b_refcnt));
6616 arc_change_state(arc_anon, exists, hash_lock);
6617 mutex_exit(hash_lock);
6618 arc_hdr_destroy(exists);
6619 exists = buf_hash_insert(hdr, &hash_lock);
6620 ASSERT3P(exists, ==, NULL);
6621 } else if (zio->io_flags & ZIO_FLAG_NOPWRITE) {
6623 ASSERT(zio->io_prop.zp_nopwrite);
6624 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
6625 panic("bad nopwrite, hdr=%p exists=%p",
6626 (void *)hdr, (void *)exists);
6629 ASSERT(hdr->b_l1hdr.b_bufcnt == 1);
6630 ASSERT(hdr->b_l1hdr.b_state == arc_anon);
6631 ASSERT(BP_GET_DEDUP(zio->io_bp));
6632 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
6635 arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
6636 /* if it's not anon, we are doing a scrub */
6637 if (exists == NULL && hdr->b_l1hdr.b_state == arc_anon)
6638 arc_access(hdr, hash_lock);
6639 mutex_exit(hash_lock);
6641 arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
6644 ASSERT(!refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
6645 callback->awcb_done(zio, buf, callback->awcb_private);
6647 abd_put(zio->io_abd);
6648 kmem_free(callback, sizeof (arc_write_callback_t));
6652 arc_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, arc_buf_t *buf,
6653 boolean_t l2arc, const zio_prop_t *zp, arc_write_done_func_t *ready,
6654 arc_write_done_func_t *children_ready, arc_write_done_func_t *physdone,
6655 arc_write_done_func_t *done, void *private, zio_priority_t priority,
6656 int zio_flags, const zbookmark_phys_t *zb)
6658 arc_buf_hdr_t *hdr = buf->b_hdr;
6659 arc_write_callback_t *callback;
6661 zio_prop_t localprop = *zp;
6663 ASSERT3P(ready, !=, NULL);
6664 ASSERT3P(done, !=, NULL);
6665 ASSERT(!HDR_IO_ERROR(hdr));
6666 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
6667 ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
6668 ASSERT3U(hdr->b_l1hdr.b_bufcnt, >, 0);
6670 arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE);
6671 if (ARC_BUF_COMPRESSED(buf)) {
6673 * We're writing a pre-compressed buffer. Make the
6674 * compression algorithm requested by the zio_prop_t match
6675 * the pre-compressed buffer's compression algorithm.
6677 localprop.zp_compress = HDR_GET_COMPRESS(hdr);
6679 ASSERT3U(HDR_GET_LSIZE(hdr), !=, arc_buf_size(buf));
6680 zio_flags |= ZIO_FLAG_RAW;
6682 callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
6683 callback->awcb_ready = ready;
6684 callback->awcb_children_ready = children_ready;
6685 callback->awcb_physdone = physdone;
6686 callback->awcb_done = done;
6687 callback->awcb_private = private;
6688 callback->awcb_buf = buf;
6691 * The hdr's b_pabd is now stale, free it now. A new data block
6692 * will be allocated when the zio pipeline calls arc_write_ready().
6694 if (hdr->b_l1hdr.b_pabd != NULL) {
6696 * If the buf is currently sharing the data block with
6697 * the hdr then we need to break that relationship here.
6698 * The hdr will remain with a NULL data pointer and the
6699 * buf will take sole ownership of the block.
6701 if (arc_buf_is_shared(buf)) {
6702 arc_unshare_buf(hdr, buf);
6704 arc_hdr_free_pabd(hdr);
6706 VERIFY3P(buf->b_data, !=, NULL);
6707 arc_hdr_set_compress(hdr, ZIO_COMPRESS_OFF);
6709 ASSERT(!arc_buf_is_shared(buf));
6710 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
6712 zio = zio_write(pio, spa, txg, bp,
6713 abd_get_from_buf(buf->b_data, HDR_GET_LSIZE(hdr)),
6714 HDR_GET_LSIZE(hdr), arc_buf_size(buf), &localprop, arc_write_ready,
6715 (children_ready != NULL) ? arc_write_children_ready : NULL,
6716 arc_write_physdone, arc_write_done, callback,
6717 priority, zio_flags, zb);
6723 arc_memory_throttle(spa_t *spa, uint64_t reserve, uint64_t txg)
6726 uint64_t available_memory = ptob(freemem);
6728 #if defined(__i386) || !defined(UMA_MD_SMALL_ALLOC)
6729 available_memory = MIN(available_memory, uma_avail());
6732 if (freemem > (uint64_t)physmem * arc_lotsfree_percent / 100)
6735 if (txg > spa->spa_lowmem_last_txg) {
6736 spa->spa_lowmem_last_txg = txg;
6737 spa->spa_lowmem_page_load = 0;
6740 * If we are in pageout, we know that memory is already tight,
6741 * the arc is already going to be evicting, so we just want to
6742 * continue to let page writes occur as quickly as possible.
6744 if (curproc == pageproc) {
6745 if (spa->spa_lowmem_page_load >
6746 MAX(ptob(minfree), available_memory) / 4)
6747 return (SET_ERROR(ERESTART));
6748 /* Note: reserve is inflated, so we deflate */
6749 atomic_add_64(&spa->spa_lowmem_page_load, reserve / 8);
6751 } else if (spa->spa_lowmem_page_load > 0 && arc_reclaim_needed()) {
6752 /* memory is low, delay before restarting */
6753 ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
6754 return (SET_ERROR(EAGAIN));
6756 spa->spa_lowmem_page_load = 0;
6757 #endif /* _KERNEL */
6762 arc_tempreserve_clear(uint64_t reserve)
6764 atomic_add_64(&arc_tempreserve, -reserve);
6765 ASSERT((int64_t)arc_tempreserve >= 0);
6769 arc_tempreserve_space(spa_t *spa, uint64_t reserve, uint64_t txg)
6774 if (reserve > arc_c/4 && !arc_no_grow) {
6775 arc_c = MIN(arc_c_max, reserve * 4);
6776 DTRACE_PROBE1(arc__set_reserve, uint64_t, arc_c);
6778 if (reserve > arc_c)
6779 return (SET_ERROR(ENOMEM));
6782 * Don't count loaned bufs as in flight dirty data to prevent long
6783 * network delays from blocking transactions that are ready to be
6784 * assigned to a txg.
6787 /* assert that it has not wrapped around */
6788 ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0);
6790 anon_size = MAX((int64_t)(refcount_count(&arc_anon->arcs_size) -
6791 arc_loaned_bytes), 0);
6794 * Writes will, almost always, require additional memory allocations
6795 * in order to compress/encrypt/etc the data. We therefore need to
6796 * make sure that there is sufficient available memory for this.
6798 error = arc_memory_throttle(spa, reserve, txg);
6803 * Throttle writes when the amount of dirty data in the cache
6804 * gets too large. We try to keep the cache less than half full
6805 * of dirty blocks so that our sync times don't grow too large.
6807 * In the case of one pool being built on another pool, we want
6808 * to make sure we don't end up throttling the lower (backing)
6809 * pool when the upper pool is the majority contributor to dirty
6810 * data. To insure we make forward progress during throttling, we
6811 * also check the current pool's net dirty data and only throttle
6812 * if it exceeds zfs_arc_pool_dirty_percent of the anonymous dirty
6813 * data in the cache.
6815 * Note: if two requests come in concurrently, we might let them
6816 * both succeed, when one of them should fail. Not a huge deal.
6818 uint64_t total_dirty = reserve + arc_tempreserve + anon_size;
6819 uint64_t spa_dirty_anon = spa_dirty_data(spa);
6821 if (total_dirty > arc_c * zfs_arc_dirty_limit_percent / 100 &&
6822 anon_size > arc_c * zfs_arc_anon_limit_percent / 100 &&
6823 spa_dirty_anon > anon_size * zfs_arc_pool_dirty_percent / 100) {
6824 uint64_t meta_esize =
6825 refcount_count(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
6826 uint64_t data_esize =
6827 refcount_count(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
6828 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
6829 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
6830 arc_tempreserve >> 10, meta_esize >> 10,
6831 data_esize >> 10, reserve >> 10, arc_c >> 10);
6832 return (SET_ERROR(ERESTART));
6834 atomic_add_64(&arc_tempreserve, reserve);
6839 arc_kstat_update_state(arc_state_t *state, kstat_named_t *size,
6840 kstat_named_t *evict_data, kstat_named_t *evict_metadata)
6842 size->value.ui64 = refcount_count(&state->arcs_size);
6843 evict_data->value.ui64 =
6844 refcount_count(&state->arcs_esize[ARC_BUFC_DATA]);
6845 evict_metadata->value.ui64 =
6846 refcount_count(&state->arcs_esize[ARC_BUFC_METADATA]);
6850 arc_kstat_update(kstat_t *ksp, int rw)
6852 arc_stats_t *as = ksp->ks_data;
6854 if (rw == KSTAT_WRITE) {
6857 arc_kstat_update_state(arc_anon,
6858 &as->arcstat_anon_size,
6859 &as->arcstat_anon_evictable_data,
6860 &as->arcstat_anon_evictable_metadata);
6861 arc_kstat_update_state(arc_mru,
6862 &as->arcstat_mru_size,
6863 &as->arcstat_mru_evictable_data,
6864 &as->arcstat_mru_evictable_metadata);
6865 arc_kstat_update_state(arc_mru_ghost,
6866 &as->arcstat_mru_ghost_size,
6867 &as->arcstat_mru_ghost_evictable_data,
6868 &as->arcstat_mru_ghost_evictable_metadata);
6869 arc_kstat_update_state(arc_mfu,
6870 &as->arcstat_mfu_size,
6871 &as->arcstat_mfu_evictable_data,
6872 &as->arcstat_mfu_evictable_metadata);
6873 arc_kstat_update_state(arc_mfu_ghost,
6874 &as->arcstat_mfu_ghost_size,
6875 &as->arcstat_mfu_ghost_evictable_data,
6876 &as->arcstat_mfu_ghost_evictable_metadata);
6878 ARCSTAT(arcstat_size) = aggsum_value(&arc_size);
6879 ARCSTAT(arcstat_meta_used) = aggsum_value(&arc_meta_used);
6880 ARCSTAT(arcstat_data_size) = aggsum_value(&astat_data_size);
6881 ARCSTAT(arcstat_metadata_size) =
6882 aggsum_value(&astat_metadata_size);
6883 ARCSTAT(arcstat_hdr_size) = aggsum_value(&astat_hdr_size);
6884 ARCSTAT(arcstat_bonus_size) = aggsum_value(&astat_bonus_size);
6885 ARCSTAT(arcstat_dnode_size) = aggsum_value(&astat_dnode_size);
6886 ARCSTAT(arcstat_dbuf_size) = aggsum_value(&astat_dbuf_size);
6887 #if defined(__FreeBSD__) && defined(COMPAT_FREEBSD11)
6888 ARCSTAT(arcstat_other_size) = aggsum_value(&astat_bonus_size) +
6889 aggsum_value(&astat_dnode_size) +
6890 aggsum_value(&astat_dbuf_size);
6892 ARCSTAT(arcstat_l2_hdr_size) = aggsum_value(&astat_l2_hdr_size);
6899 * This function *must* return indices evenly distributed between all
6900 * sublists of the multilist. This is needed due to how the ARC eviction
6901 * code is laid out; arc_evict_state() assumes ARC buffers are evenly
6902 * distributed between all sublists and uses this assumption when
6903 * deciding which sublist to evict from and how much to evict from it.
6906 arc_state_multilist_index_func(multilist_t *ml, void *obj)
6908 arc_buf_hdr_t *hdr = obj;
6911 * We rely on b_dva to generate evenly distributed index
6912 * numbers using buf_hash below. So, as an added precaution,
6913 * let's make sure we never add empty buffers to the arc lists.
6915 ASSERT(!HDR_EMPTY(hdr));
6918 * The assumption here, is the hash value for a given
6919 * arc_buf_hdr_t will remain constant throughout it's lifetime
6920 * (i.e. it's b_spa, b_dva, and b_birth fields don't change).
6921 * Thus, we don't need to store the header's sublist index
6922 * on insertion, as this index can be recalculated on removal.
6924 * Also, the low order bits of the hash value are thought to be
6925 * distributed evenly. Otherwise, in the case that the multilist
6926 * has a power of two number of sublists, each sublists' usage
6927 * would not be evenly distributed.
6929 return (buf_hash(hdr->b_spa, &hdr->b_dva, hdr->b_birth) %
6930 multilist_get_num_sublists(ml));
6934 static eventhandler_tag arc_event_lowmem = NULL;
6937 arc_lowmem(void *arg __unused, int howto __unused)
6939 int64_t free_memory, to_free;
6941 arc_no_grow = B_TRUE;
6943 arc_growtime = gethrtime() + SEC2NSEC(arc_grow_retry);
6944 free_memory = arc_available_memory();
6945 to_free = (arc_c >> arc_shrink_shift) - MIN(free_memory, 0);
6946 DTRACE_PROBE2(arc__needfree, int64_t, free_memory, int64_t, to_free);
6947 arc_reduce_target_size(to_free);
6949 mutex_enter(&arc_adjust_lock);
6950 arc_adjust_needed = B_TRUE;
6951 zthr_wakeup(arc_adjust_zthr);
6954 * It is unsafe to block here in arbitrary threads, because we can come
6955 * here from ARC itself and may hold ARC locks and thus risk a deadlock
6956 * with ARC reclaim thread.
6958 if (curproc == pageproc)
6959 (void) cv_wait(&arc_adjust_waiters_cv, &arc_adjust_lock);
6960 mutex_exit(&arc_adjust_lock);
6965 arc_state_init(void)
6967 arc_anon = &ARC_anon;
6969 arc_mru_ghost = &ARC_mru_ghost;
6971 arc_mfu_ghost = &ARC_mfu_ghost;
6972 arc_l2c_only = &ARC_l2c_only;
6974 arc_mru->arcs_list[ARC_BUFC_METADATA] =
6975 multilist_create(sizeof (arc_buf_hdr_t),
6976 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
6977 arc_state_multilist_index_func);
6978 arc_mru->arcs_list[ARC_BUFC_DATA] =
6979 multilist_create(sizeof (arc_buf_hdr_t),
6980 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
6981 arc_state_multilist_index_func);
6982 arc_mru_ghost->arcs_list[ARC_BUFC_METADATA] =
6983 multilist_create(sizeof (arc_buf_hdr_t),
6984 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
6985 arc_state_multilist_index_func);
6986 arc_mru_ghost->arcs_list[ARC_BUFC_DATA] =
6987 multilist_create(sizeof (arc_buf_hdr_t),
6988 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
6989 arc_state_multilist_index_func);
6990 arc_mfu->arcs_list[ARC_BUFC_METADATA] =
6991 multilist_create(sizeof (arc_buf_hdr_t),
6992 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
6993 arc_state_multilist_index_func);
6994 arc_mfu->arcs_list[ARC_BUFC_DATA] =
6995 multilist_create(sizeof (arc_buf_hdr_t),
6996 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
6997 arc_state_multilist_index_func);
6998 arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA] =
6999 multilist_create(sizeof (arc_buf_hdr_t),
7000 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
7001 arc_state_multilist_index_func);
7002 arc_mfu_ghost->arcs_list[ARC_BUFC_DATA] =
7003 multilist_create(sizeof (arc_buf_hdr_t),
7004 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
7005 arc_state_multilist_index_func);
7006 arc_l2c_only->arcs_list[ARC_BUFC_METADATA] =
7007 multilist_create(sizeof (arc_buf_hdr_t),
7008 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
7009 arc_state_multilist_index_func);
7010 arc_l2c_only->arcs_list[ARC_BUFC_DATA] =
7011 multilist_create(sizeof (arc_buf_hdr_t),
7012 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
7013 arc_state_multilist_index_func);
7015 refcount_create(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
7016 refcount_create(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
7017 refcount_create(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
7018 refcount_create(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
7019 refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
7020 refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
7021 refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
7022 refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
7023 refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
7024 refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
7025 refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
7026 refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
7028 refcount_create(&arc_anon->arcs_size);
7029 refcount_create(&arc_mru->arcs_size);
7030 refcount_create(&arc_mru_ghost->arcs_size);
7031 refcount_create(&arc_mfu->arcs_size);
7032 refcount_create(&arc_mfu_ghost->arcs_size);
7033 refcount_create(&arc_l2c_only->arcs_size);
7035 aggsum_init(&arc_meta_used, 0);
7036 aggsum_init(&arc_size, 0);
7037 aggsum_init(&astat_data_size, 0);
7038 aggsum_init(&astat_metadata_size, 0);
7039 aggsum_init(&astat_hdr_size, 0);
7040 aggsum_init(&astat_bonus_size, 0);
7041 aggsum_init(&astat_dnode_size, 0);
7042 aggsum_init(&astat_dbuf_size, 0);
7043 aggsum_init(&astat_l2_hdr_size, 0);
7047 arc_state_fini(void)
7049 refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
7050 refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
7051 refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
7052 refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
7053 refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
7054 refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
7055 refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
7056 refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
7057 refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
7058 refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
7059 refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
7060 refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
7062 refcount_destroy(&arc_anon->arcs_size);
7063 refcount_destroy(&arc_mru->arcs_size);
7064 refcount_destroy(&arc_mru_ghost->arcs_size);
7065 refcount_destroy(&arc_mfu->arcs_size);
7066 refcount_destroy(&arc_mfu_ghost->arcs_size);
7067 refcount_destroy(&arc_l2c_only->arcs_size);
7069 multilist_destroy(arc_mru->arcs_list[ARC_BUFC_METADATA]);
7070 multilist_destroy(arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]);
7071 multilist_destroy(arc_mfu->arcs_list[ARC_BUFC_METADATA]);
7072 multilist_destroy(arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]);
7073 multilist_destroy(arc_mru->arcs_list[ARC_BUFC_DATA]);
7074 multilist_destroy(arc_mru_ghost->arcs_list[ARC_BUFC_DATA]);
7075 multilist_destroy(arc_mfu->arcs_list[ARC_BUFC_DATA]);
7076 multilist_destroy(arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]);
7078 aggsum_fini(&arc_meta_used);
7079 aggsum_fini(&arc_size);
7080 aggsum_fini(&astat_data_size);
7081 aggsum_fini(&astat_metadata_size);
7082 aggsum_fini(&astat_hdr_size);
7083 aggsum_fini(&astat_bonus_size);
7084 aggsum_fini(&astat_dnode_size);
7085 aggsum_fini(&astat_dbuf_size);
7086 aggsum_fini(&astat_l2_hdr_size);
7098 int i, prefetch_tunable_set = 0;
7101 * allmem is "all memory that we could possibly use".
7105 uint64_t allmem = ptob(physmem - swapfs_minfree);
7107 uint64_t allmem = (physmem * PAGESIZE) / 2;
7110 uint64_t allmem = kmem_size();
7112 mutex_init(&arc_adjust_lock, NULL, MUTEX_DEFAULT, NULL);
7113 cv_init(&arc_adjust_waiters_cv, NULL, CV_DEFAULT, NULL);
7115 mutex_init(&arc_dnlc_evicts_lock, NULL, MUTEX_DEFAULT, NULL);
7116 cv_init(&arc_dnlc_evicts_cv, NULL, CV_DEFAULT, NULL);
7118 /* set min cache to 1/32 of all memory, or arc_abs_min, whichever is more */
7119 arc_c_min = MAX(allmem / 32, arc_abs_min);
7120 /* set max to 5/8 of all memory, or all but 1GB, whichever is more */
7121 if (allmem >= 1 << 30)
7122 arc_c_max = allmem - (1 << 30);
7124 arc_c_max = arc_c_min;
7125 arc_c_max = MAX(allmem * 5 / 8, arc_c_max);
7128 * In userland, there's only the memory pressure that we artificially
7129 * create (see arc_available_memory()). Don't let arc_c get too
7130 * small, because it can cause transactions to be larger than
7131 * arc_c, causing arc_tempreserve_space() to fail.
7134 arc_c_min = arc_c_max / 2;
7139 * Allow the tunables to override our calculations if they are
7142 if (zfs_arc_max > arc_abs_min && zfs_arc_max < allmem) {
7143 arc_c_max = zfs_arc_max;
7144 arc_c_min = MIN(arc_c_min, arc_c_max);
7146 if (zfs_arc_min > arc_abs_min && zfs_arc_min <= arc_c_max)
7147 arc_c_min = zfs_arc_min;
7151 arc_p = (arc_c >> 1);
7153 /* limit meta-data to 1/4 of the arc capacity */
7154 arc_meta_limit = arc_c_max / 4;
7158 * Metadata is stored in the kernel's heap. Don't let us
7159 * use more than half the heap for the ARC.
7162 arc_meta_limit = MIN(arc_meta_limit, uma_limit() / 2);
7163 arc_dnode_limit = arc_meta_limit / 10;
7165 arc_meta_limit = MIN(arc_meta_limit,
7166 vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 2);
7170 /* Allow the tunable to override if it is reasonable */
7171 if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max)
7172 arc_meta_limit = zfs_arc_meta_limit;
7174 if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0)
7175 arc_c_min = arc_meta_limit / 2;
7177 if (zfs_arc_meta_min > 0) {
7178 arc_meta_min = zfs_arc_meta_min;
7180 arc_meta_min = arc_c_min / 2;
7183 /* Valid range: <arc_meta_min> - <arc_c_max> */
7184 if ((zfs_arc_dnode_limit) && (zfs_arc_dnode_limit != arc_dnode_limit) &&
7185 (zfs_arc_dnode_limit >= zfs_arc_meta_min) &&
7186 (zfs_arc_dnode_limit <= arc_c_max))
7187 arc_dnode_limit = zfs_arc_dnode_limit;
7189 if (zfs_arc_grow_retry > 0)
7190 arc_grow_retry = zfs_arc_grow_retry;
7192 if (zfs_arc_shrink_shift > 0)
7193 arc_shrink_shift = zfs_arc_shrink_shift;
7195 if (zfs_arc_no_grow_shift > 0)
7196 arc_no_grow_shift = zfs_arc_no_grow_shift;
7198 * Ensure that arc_no_grow_shift is less than arc_shrink_shift.
7200 if (arc_no_grow_shift >= arc_shrink_shift)
7201 arc_no_grow_shift = arc_shrink_shift - 1;
7203 if (zfs_arc_p_min_shift > 0)
7204 arc_p_min_shift = zfs_arc_p_min_shift;
7206 /* if kmem_flags are set, lets try to use less memory */
7207 if (kmem_debugging())
7209 if (arc_c < arc_c_min)
7212 zfs_arc_min = arc_c_min;
7213 zfs_arc_max = arc_c_max;
7218 * The arc must be "uninitialized", so that hdr_recl() (which is
7219 * registered by buf_init()) will not access arc_reap_zthr before
7222 ASSERT(!arc_initialized);
7225 list_create(&arc_prune_list, sizeof (arc_prune_t),
7226 offsetof(arc_prune_t, p_node));
7227 mutex_init(&arc_prune_mtx, NULL, MUTEX_DEFAULT, NULL);
7229 arc_prune_taskq = taskq_create("arc_prune", max_ncpus, minclsyspri,
7230 max_ncpus, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
7232 arc_dnlc_evicts_thread_exit = FALSE;
7234 arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED,
7235 sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
7237 if (arc_ksp != NULL) {
7238 arc_ksp->ks_data = &arc_stats;
7239 arc_ksp->ks_update = arc_kstat_update;
7240 kstat_install(arc_ksp);
7243 arc_adjust_zthr = zthr_create_timer(arc_adjust_cb_check,
7244 arc_adjust_cb, NULL, SEC2NSEC(1));
7245 arc_reap_zthr = zthr_create_timer(arc_reap_cb_check,
7246 arc_reap_cb, NULL, SEC2NSEC(1));
7249 arc_event_lowmem = EVENTHANDLER_REGISTER(vm_lowmem, arc_lowmem, NULL,
7250 EVENTHANDLER_PRI_FIRST);
7253 (void) thread_create(NULL, 0, arc_dnlc_evicts_thread, NULL, 0, &p0,
7254 TS_RUN, minclsyspri);
7256 arc_initialized = B_TRUE;
7260 * Calculate maximum amount of dirty data per pool.
7262 * If it has been set by /etc/system, take that.
7263 * Otherwise, use a percentage of physical memory defined by
7264 * zfs_dirty_data_max_percent (default 10%) with a cap at
7265 * zfs_dirty_data_max_max (default 4GB).
7267 if (zfs_dirty_data_max == 0) {
7268 zfs_dirty_data_max = ptob(physmem) *
7269 zfs_dirty_data_max_percent / 100;
7270 zfs_dirty_data_max = MIN(zfs_dirty_data_max,
7271 zfs_dirty_data_max_max);
7275 if (TUNABLE_INT_FETCH("vfs.zfs.prefetch_disable", &zfs_prefetch_disable))
7276 prefetch_tunable_set = 1;
7279 if (prefetch_tunable_set == 0) {
7280 printf("ZFS NOTICE: Prefetch is disabled by default on i386 "
7282 printf(" add \"vfs.zfs.prefetch_disable=0\" "
7283 "to /boot/loader.conf.\n");
7284 zfs_prefetch_disable = 1;
7287 if ((((uint64_t)physmem * PAGESIZE) < (1ULL << 32)) &&
7288 prefetch_tunable_set == 0) {
7289 printf("ZFS NOTICE: Prefetch is disabled by default if less "
7290 "than 4GB of RAM is present;\n"
7291 " to enable, add \"vfs.zfs.prefetch_disable=0\" "
7292 "to /boot/loader.conf.\n");
7293 zfs_prefetch_disable = 1;
7296 /* Warn about ZFS memory and address space requirements. */
7297 if (((uint64_t)physmem * PAGESIZE) < (256 + 128 + 64) * (1 << 20)) {
7298 printf("ZFS WARNING: Recommended minimum RAM size is 512MB; "
7299 "expect unstable behavior.\n");
7301 if (allmem < 512 * (1 << 20)) {
7302 printf("ZFS WARNING: Recommended minimum kmem_size is 512MB; "
7303 "expect unstable behavior.\n");
7304 printf(" Consider tuning vm.kmem_size and "
7305 "vm.kmem_size_max\n");
7306 printf(" in /boot/loader.conf.\n");
7317 if (arc_event_lowmem != NULL)
7318 EVENTHANDLER_DEREGISTER(vm_lowmem, arc_event_lowmem);
7321 /* Use B_TRUE to ensure *all* buffers are evicted */
7322 arc_flush(NULL, B_TRUE);
7324 mutex_enter(&arc_dnlc_evicts_lock);
7325 arc_dnlc_evicts_thread_exit = TRUE;
7327 * The user evicts thread will set arc_user_evicts_thread_exit
7328 * to FALSE when it is finished exiting; we're waiting for that.
7330 while (arc_dnlc_evicts_thread_exit) {
7331 cv_signal(&arc_dnlc_evicts_cv);
7332 cv_wait(&arc_dnlc_evicts_cv, &arc_dnlc_evicts_lock);
7334 mutex_exit(&arc_dnlc_evicts_lock);
7336 arc_initialized = B_FALSE;
7338 if (arc_ksp != NULL) {
7339 kstat_delete(arc_ksp);
7343 taskq_wait(arc_prune_taskq);
7344 taskq_destroy(arc_prune_taskq);
7346 mutex_enter(&arc_prune_mtx);
7347 while ((p = list_head(&arc_prune_list)) != NULL) {
7348 list_remove(&arc_prune_list, p);
7349 refcount_remove(&p->p_refcnt, &arc_prune_list);
7350 refcount_destroy(&p->p_refcnt);
7351 kmem_free(p, sizeof (*p));
7353 mutex_exit(&arc_prune_mtx);
7355 list_destroy(&arc_prune_list);
7356 mutex_destroy(&arc_prune_mtx);
7358 (void) zthr_cancel(arc_adjust_zthr);
7359 zthr_destroy(arc_adjust_zthr);
7361 mutex_destroy(&arc_dnlc_evicts_lock);
7362 cv_destroy(&arc_dnlc_evicts_cv);
7364 (void) zthr_cancel(arc_reap_zthr);
7365 zthr_destroy(arc_reap_zthr);
7367 mutex_destroy(&arc_adjust_lock);
7368 cv_destroy(&arc_adjust_waiters_cv);
7371 * buf_fini() must proceed arc_state_fini() because buf_fin() may
7372 * trigger the release of kmem magazines, which can callback to
7373 * arc_space_return() which accesses aggsums freed in act_state_fini().
7378 ASSERT0(arc_loaned_bytes);
7384 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
7385 * It uses dedicated storage devices to hold cached data, which are populated
7386 * using large infrequent writes. The main role of this cache is to boost
7387 * the performance of random read workloads. The intended L2ARC devices
7388 * include short-stroked disks, solid state disks, and other media with
7389 * substantially faster read latency than disk.
7391 * +-----------------------+
7393 * +-----------------------+
7396 * l2arc_feed_thread() arc_read()
7400 * +---------------+ |
7402 * +---------------+ |
7407 * +-------+ +-------+
7409 * | cache | | cache |
7410 * +-------+ +-------+
7411 * +=========+ .-----.
7412 * : L2ARC : |-_____-|
7413 * : devices : | Disks |
7414 * +=========+ `-_____-'
7416 * Read requests are satisfied from the following sources, in order:
7419 * 2) vdev cache of L2ARC devices
7421 * 4) vdev cache of disks
7424 * Some L2ARC device types exhibit extremely slow write performance.
7425 * To accommodate for this there are some significant differences between
7426 * the L2ARC and traditional cache design:
7428 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from
7429 * the ARC behave as usual, freeing buffers and placing headers on ghost
7430 * lists. The ARC does not send buffers to the L2ARC during eviction as
7431 * this would add inflated write latencies for all ARC memory pressure.
7433 * 2. The L2ARC attempts to cache data from the ARC before it is evicted.
7434 * It does this by periodically scanning buffers from the eviction-end of
7435 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
7436 * not already there. It scans until a headroom of buffers is satisfied,
7437 * which itself is a buffer for ARC eviction. If a compressible buffer is
7438 * found during scanning and selected for writing to an L2ARC device, we
7439 * temporarily boost scanning headroom during the next scan cycle to make
7440 * sure we adapt to compression effects (which might significantly reduce
7441 * the data volume we write to L2ARC). The thread that does this is
7442 * l2arc_feed_thread(), illustrated below; example sizes are included to
7443 * provide a better sense of ratio than this diagram:
7446 * +---------------------+----------+
7447 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC
7448 * +---------------------+----------+ | o L2ARC eligible
7449 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer
7450 * +---------------------+----------+ |
7451 * 15.9 Gbytes ^ 32 Mbytes |
7453 * l2arc_feed_thread()
7455 * l2arc write hand <--[oooo]--'
7459 * +==============================+
7460 * L2ARC dev |####|#|###|###| |####| ... |
7461 * +==============================+
7464 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of
7465 * evicted, then the L2ARC has cached a buffer much sooner than it probably
7466 * needed to, potentially wasting L2ARC device bandwidth and storage. It is
7467 * safe to say that this is an uncommon case, since buffers at the end of
7468 * the ARC lists have moved there due to inactivity.
7470 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
7471 * then the L2ARC simply misses copying some buffers. This serves as a
7472 * pressure valve to prevent heavy read workloads from both stalling the ARC
7473 * with waits and clogging the L2ARC with writes. This also helps prevent
7474 * the potential for the L2ARC to churn if it attempts to cache content too
7475 * quickly, such as during backups of the entire pool.
7477 * 5. After system boot and before the ARC has filled main memory, there are
7478 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
7479 * lists can remain mostly static. Instead of searching from tail of these
7480 * lists as pictured, the l2arc_feed_thread() will search from the list heads
7481 * for eligible buffers, greatly increasing its chance of finding them.
7483 * The L2ARC device write speed is also boosted during this time so that
7484 * the L2ARC warms up faster. Since there have been no ARC evictions yet,
7485 * there are no L2ARC reads, and no fear of degrading read performance
7486 * through increased writes.
7488 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
7489 * the vdev queue can aggregate them into larger and fewer writes. Each
7490 * device is written to in a rotor fashion, sweeping writes through
7491 * available space then repeating.
7493 * 7. The L2ARC does not store dirty content. It never needs to flush
7494 * write buffers back to disk based storage.
7496 * 8. If an ARC buffer is written (and dirtied) which also exists in the
7497 * L2ARC, the now stale L2ARC buffer is immediately dropped.
7499 * The performance of the L2ARC can be tweaked by a number of tunables, which
7500 * may be necessary for different workloads:
7502 * l2arc_write_max max write bytes per interval
7503 * l2arc_write_boost extra write bytes during device warmup
7504 * l2arc_noprefetch skip caching prefetched buffers
7505 * l2arc_headroom number of max device writes to precache
7506 * l2arc_headroom_boost when we find compressed buffers during ARC
7507 * scanning, we multiply headroom by this
7508 * percentage factor for the next scan cycle,
7509 * since more compressed buffers are likely to
7511 * l2arc_feed_secs seconds between L2ARC writing
7513 * Tunables may be removed or added as future performance improvements are
7514 * integrated, and also may become zpool properties.
7516 * There are three key functions that control how the L2ARC warms up:
7518 * l2arc_write_eligible() check if a buffer is eligible to cache
7519 * l2arc_write_size() calculate how much to write
7520 * l2arc_write_interval() calculate sleep delay between writes
7522 * These three functions determine what to write, how much, and how quickly
7527 l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *hdr)
7530 * A buffer is *not* eligible for the L2ARC if it:
7531 * 1. belongs to a different spa.
7532 * 2. is already cached on the L2ARC.
7533 * 3. has an I/O in progress (it may be an incomplete read).
7534 * 4. is flagged not eligible (zfs property).
7536 if (hdr->b_spa != spa_guid) {
7537 ARCSTAT_BUMP(arcstat_l2_write_spa_mismatch);
7540 if (HDR_HAS_L2HDR(hdr)) {
7541 ARCSTAT_BUMP(arcstat_l2_write_in_l2);
7544 if (HDR_IO_IN_PROGRESS(hdr)) {
7545 ARCSTAT_BUMP(arcstat_l2_write_hdr_io_in_progress);
7548 if (!HDR_L2CACHE(hdr)) {
7549 ARCSTAT_BUMP(arcstat_l2_write_not_cacheable);
7557 l2arc_write_size(void)
7562 * Make sure our globals have meaningful values in case the user
7565 size = l2arc_write_max;
7567 cmn_err(CE_NOTE, "Bad value for l2arc_write_max, value must "
7568 "be greater than zero, resetting it to the default (%d)",
7570 size = l2arc_write_max = L2ARC_WRITE_SIZE;
7573 if (arc_warm == B_FALSE)
7574 size += l2arc_write_boost;
7581 l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote)
7583 clock_t interval, next, now;
7586 * If the ARC lists are busy, increase our write rate; if the
7587 * lists are stale, idle back. This is achieved by checking
7588 * how much we previously wrote - if it was more than half of
7589 * what we wanted, schedule the next write much sooner.
7591 if (l2arc_feed_again && wrote > (wanted / 2))
7592 interval = (hz * l2arc_feed_min_ms) / 1000;
7594 interval = hz * l2arc_feed_secs;
7596 now = ddi_get_lbolt();
7597 next = MAX(now, MIN(now + interval, began + interval));
7603 * Cycle through L2ARC devices. This is how L2ARC load balances.
7604 * If a device is returned, this also returns holding the spa config lock.
7606 static l2arc_dev_t *
7607 l2arc_dev_get_next(void)
7609 l2arc_dev_t *first, *next = NULL;
7612 * Lock out the removal of spas (spa_namespace_lock), then removal
7613 * of cache devices (l2arc_dev_mtx). Once a device has been selected,
7614 * both locks will be dropped and a spa config lock held instead.
7616 mutex_enter(&spa_namespace_lock);
7617 mutex_enter(&l2arc_dev_mtx);
7619 /* if there are no vdevs, there is nothing to do */
7620 if (l2arc_ndev == 0)
7624 next = l2arc_dev_last;
7626 /* loop around the list looking for a non-faulted vdev */
7628 next = list_head(l2arc_dev_list);
7630 next = list_next(l2arc_dev_list, next);
7632 next = list_head(l2arc_dev_list);
7635 /* if we have come back to the start, bail out */
7638 else if (next == first)
7641 } while (vdev_is_dead(next->l2ad_vdev));
7643 /* if we were unable to find any usable vdevs, return NULL */
7644 if (vdev_is_dead(next->l2ad_vdev))
7647 l2arc_dev_last = next;
7650 mutex_exit(&l2arc_dev_mtx);
7653 * Grab the config lock to prevent the 'next' device from being
7654 * removed while we are writing to it.
7657 spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER);
7658 mutex_exit(&spa_namespace_lock);
7664 * Free buffers that were tagged for destruction.
7667 l2arc_do_free_on_write()
7670 l2arc_data_free_t *df, *df_prev;
7672 mutex_enter(&l2arc_free_on_write_mtx);
7673 buflist = l2arc_free_on_write;
7675 for (df = list_tail(buflist); df; df = df_prev) {
7676 df_prev = list_prev(buflist, df);
7677 ASSERT3P(df->l2df_abd, !=, NULL);
7678 abd_free(df->l2df_abd);
7679 list_remove(buflist, df);
7680 kmem_free(df, sizeof (l2arc_data_free_t));
7683 mutex_exit(&l2arc_free_on_write_mtx);
7687 * A write to a cache device has completed. Update all headers to allow
7688 * reads from these buffers to begin.
7691 l2arc_write_done(zio_t *zio)
7693 l2arc_write_callback_t *cb;
7696 arc_buf_hdr_t *head, *hdr, *hdr_prev;
7697 kmutex_t *hash_lock;
7698 int64_t bytes_dropped = 0;
7700 cb = zio->io_private;
7701 ASSERT3P(cb, !=, NULL);
7702 dev = cb->l2wcb_dev;
7703 ASSERT3P(dev, !=, NULL);
7704 head = cb->l2wcb_head;
7705 ASSERT3P(head, !=, NULL);
7706 buflist = &dev->l2ad_buflist;
7707 ASSERT3P(buflist, !=, NULL);
7708 DTRACE_PROBE2(l2arc__iodone, zio_t *, zio,
7709 l2arc_write_callback_t *, cb);
7711 if (zio->io_error != 0)
7712 ARCSTAT_BUMP(arcstat_l2_writes_error);
7715 * All writes completed, or an error was hit.
7718 mutex_enter(&dev->l2ad_mtx);
7719 for (hdr = list_prev(buflist, head); hdr; hdr = hdr_prev) {
7720 hdr_prev = list_prev(buflist, hdr);
7722 hash_lock = HDR_LOCK(hdr);
7725 * We cannot use mutex_enter or else we can deadlock
7726 * with l2arc_write_buffers (due to swapping the order
7727 * the hash lock and l2ad_mtx are taken).
7729 if (!mutex_tryenter(hash_lock)) {
7731 * Missed the hash lock. We must retry so we
7732 * don't leave the ARC_FLAG_L2_WRITING bit set.
7734 ARCSTAT_BUMP(arcstat_l2_writes_lock_retry);
7737 * We don't want to rescan the headers we've
7738 * already marked as having been written out, so
7739 * we reinsert the head node so we can pick up
7740 * where we left off.
7742 list_remove(buflist, head);
7743 list_insert_after(buflist, hdr, head);
7745 mutex_exit(&dev->l2ad_mtx);
7748 * We wait for the hash lock to become available
7749 * to try and prevent busy waiting, and increase
7750 * the chance we'll be able to acquire the lock
7751 * the next time around.
7753 mutex_enter(hash_lock);
7754 mutex_exit(hash_lock);
7759 * We could not have been moved into the arc_l2c_only
7760 * state while in-flight due to our ARC_FLAG_L2_WRITING
7761 * bit being set. Let's just ensure that's being enforced.
7763 ASSERT(HDR_HAS_L1HDR(hdr));
7765 if (zio->io_error != 0) {
7767 * Error - drop L2ARC entry.
7769 list_remove(buflist, hdr);
7771 arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR);
7773 ARCSTAT_INCR(arcstat_l2_psize, -arc_hdr_size(hdr));
7774 ARCSTAT_INCR(arcstat_l2_lsize, -HDR_GET_LSIZE(hdr));
7776 bytes_dropped += arc_hdr_size(hdr);
7777 (void) refcount_remove_many(&dev->l2ad_alloc,
7778 arc_hdr_size(hdr), hdr);
7782 * Allow ARC to begin reads and ghost list evictions to
7785 arc_hdr_clear_flags(hdr, ARC_FLAG_L2_WRITING);
7787 mutex_exit(hash_lock);
7790 atomic_inc_64(&l2arc_writes_done);
7791 list_remove(buflist, head);
7792 ASSERT(!HDR_HAS_L1HDR(head));
7793 kmem_cache_free(hdr_l2only_cache, head);
7794 mutex_exit(&dev->l2ad_mtx);
7796 vdev_space_update(dev->l2ad_vdev, -bytes_dropped, 0, 0);
7798 l2arc_do_free_on_write();
7800 kmem_free(cb, sizeof (l2arc_write_callback_t));
7804 * A read to a cache device completed. Validate buffer contents before
7805 * handing over to the regular ARC routines.
7808 l2arc_read_done(zio_t *zio)
7810 l2arc_read_callback_t *cb;
7812 kmutex_t *hash_lock;
7813 boolean_t valid_cksum;
7815 ASSERT3P(zio->io_vd, !=, NULL);
7816 ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE);
7818 spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd);
7820 cb = zio->io_private;
7821 ASSERT3P(cb, !=, NULL);
7822 hdr = cb->l2rcb_hdr;
7823 ASSERT3P(hdr, !=, NULL);
7825 hash_lock = HDR_LOCK(hdr);
7826 mutex_enter(hash_lock);
7827 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
7830 * If the data was read into a temporary buffer,
7831 * move it and free the buffer.
7833 if (cb->l2rcb_abd != NULL) {
7834 ASSERT3U(arc_hdr_size(hdr), <, zio->io_size);
7835 if (zio->io_error == 0) {
7836 abd_copy(hdr->b_l1hdr.b_pabd, cb->l2rcb_abd,
7841 * The following must be done regardless of whether
7842 * there was an error:
7843 * - free the temporary buffer
7844 * - point zio to the real ARC buffer
7845 * - set zio size accordingly
7846 * These are required because zio is either re-used for
7847 * an I/O of the block in the case of the error
7848 * or the zio is passed to arc_read_done() and it
7851 abd_free(cb->l2rcb_abd);
7852 zio->io_size = zio->io_orig_size = arc_hdr_size(hdr);
7853 zio->io_abd = zio->io_orig_abd = hdr->b_l1hdr.b_pabd;
7856 ASSERT3P(zio->io_abd, !=, NULL);
7859 * Check this survived the L2ARC journey.
7861 ASSERT3P(zio->io_abd, ==, hdr->b_l1hdr.b_pabd);
7862 zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */
7863 zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */
7865 valid_cksum = arc_cksum_is_equal(hdr, zio);
7866 if (valid_cksum && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) {
7867 mutex_exit(hash_lock);
7868 zio->io_private = hdr;
7871 mutex_exit(hash_lock);
7873 * Buffer didn't survive caching. Increment stats and
7874 * reissue to the original storage device.
7876 if (zio->io_error != 0) {
7877 ARCSTAT_BUMP(arcstat_l2_io_error);
7879 zio->io_error = SET_ERROR(EIO);
7882 ARCSTAT_BUMP(arcstat_l2_cksum_bad);
7885 * If there's no waiter, issue an async i/o to the primary
7886 * storage now. If there *is* a waiter, the caller must
7887 * issue the i/o in a context where it's OK to block.
7889 if (zio->io_waiter == NULL) {
7890 zio_t *pio = zio_unique_parent(zio);
7892 ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL);
7894 zio_nowait(zio_read(pio, zio->io_spa, zio->io_bp,
7895 hdr->b_l1hdr.b_pabd, zio->io_size, arc_read_done,
7896 hdr, zio->io_priority, cb->l2rcb_flags,
7901 kmem_free(cb, sizeof (l2arc_read_callback_t));
7905 * This is the list priority from which the L2ARC will search for pages to
7906 * cache. This is used within loops (0..3) to cycle through lists in the
7907 * desired order. This order can have a significant effect on cache
7910 * Currently the metadata lists are hit first, MFU then MRU, followed by
7911 * the data lists. This function returns a locked list, and also returns
7914 static multilist_sublist_t *
7915 l2arc_sublist_lock(int list_num)
7917 multilist_t *ml = NULL;
7920 ASSERT(list_num >= 0 && list_num <= 3);
7924 ml = arc_mfu->arcs_list[ARC_BUFC_METADATA];
7927 ml = arc_mru->arcs_list[ARC_BUFC_METADATA];
7930 ml = arc_mfu->arcs_list[ARC_BUFC_DATA];
7933 ml = arc_mru->arcs_list[ARC_BUFC_DATA];
7938 * Return a randomly-selected sublist. This is acceptable
7939 * because the caller feeds only a little bit of data for each
7940 * call (8MB). Subsequent calls will result in different
7941 * sublists being selected.
7943 idx = multilist_get_random_index(ml);
7944 return (multilist_sublist_lock(ml, idx));
7948 * Evict buffers from the device write hand to the distance specified in
7949 * bytes. This distance may span populated buffers, it may span nothing.
7950 * This is clearing a region on the L2ARC device ready for writing.
7951 * If the 'all' boolean is set, every buffer is evicted.
7954 l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all)
7957 arc_buf_hdr_t *hdr, *hdr_prev;
7958 kmutex_t *hash_lock;
7961 buflist = &dev->l2ad_buflist;
7963 if (!all && dev->l2ad_first) {
7965 * This is the first sweep through the device. There is
7971 if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) {
7973 * When nearing the end of the device, evict to the end
7974 * before the device write hand jumps to the start.
7976 taddr = dev->l2ad_end;
7978 taddr = dev->l2ad_hand + distance;
7980 DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist,
7981 uint64_t, taddr, boolean_t, all);
7984 mutex_enter(&dev->l2ad_mtx);
7985 for (hdr = list_tail(buflist); hdr; hdr = hdr_prev) {
7986 hdr_prev = list_prev(buflist, hdr);
7988 hash_lock = HDR_LOCK(hdr);
7991 * We cannot use mutex_enter or else we can deadlock
7992 * with l2arc_write_buffers (due to swapping the order
7993 * the hash lock and l2ad_mtx are taken).
7995 if (!mutex_tryenter(hash_lock)) {
7997 * Missed the hash lock. Retry.
7999 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry);
8000 mutex_exit(&dev->l2ad_mtx);
8001 mutex_enter(hash_lock);
8002 mutex_exit(hash_lock);
8007 * A header can't be on this list if it doesn't have L2 header.
8009 ASSERT(HDR_HAS_L2HDR(hdr));
8011 /* Ensure this header has finished being written. */
8012 ASSERT(!HDR_L2_WRITING(hdr));
8013 ASSERT(!HDR_L2_WRITE_HEAD(hdr));
8015 if (!all && (hdr->b_l2hdr.b_daddr >= taddr ||
8016 hdr->b_l2hdr.b_daddr < dev->l2ad_hand)) {
8018 * We've evicted to the target address,
8019 * or the end of the device.
8021 mutex_exit(hash_lock);
8025 if (!HDR_HAS_L1HDR(hdr)) {
8026 ASSERT(!HDR_L2_READING(hdr));
8028 * This doesn't exist in the ARC. Destroy.
8029 * arc_hdr_destroy() will call list_remove()
8030 * and decrement arcstat_l2_lsize.
8032 arc_change_state(arc_anon, hdr, hash_lock);
8033 arc_hdr_destroy(hdr);
8035 ASSERT(hdr->b_l1hdr.b_state != arc_l2c_only);
8036 ARCSTAT_BUMP(arcstat_l2_evict_l1cached);
8038 * Invalidate issued or about to be issued
8039 * reads, since we may be about to write
8040 * over this location.
8042 if (HDR_L2_READING(hdr)) {
8043 ARCSTAT_BUMP(arcstat_l2_evict_reading);
8044 arc_hdr_set_flags(hdr, ARC_FLAG_L2_EVICTED);
8047 arc_hdr_l2hdr_destroy(hdr);
8049 mutex_exit(hash_lock);
8051 mutex_exit(&dev->l2ad_mtx);
8055 * Find and write ARC buffers to the L2ARC device.
8057 * An ARC_FLAG_L2_WRITING flag is set so that the L2ARC buffers are not valid
8058 * for reading until they have completed writing.
8059 * The headroom_boost is an in-out parameter used to maintain headroom boost
8060 * state between calls to this function.
8062 * Returns the number of bytes actually written (which may be smaller than
8063 * the delta by which the device hand has changed due to alignment).
8066 l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
8068 arc_buf_hdr_t *hdr, *hdr_prev, *head;
8069 uint64_t write_asize, write_psize, write_lsize, headroom;
8071 l2arc_write_callback_t *cb;
8073 uint64_t guid = spa_load_guid(spa);
8076 ASSERT3P(dev->l2ad_vdev, !=, NULL);
8079 write_lsize = write_asize = write_psize = 0;
8081 head = kmem_cache_alloc(hdr_l2only_cache, KM_PUSHPAGE);
8082 arc_hdr_set_flags(head, ARC_FLAG_L2_WRITE_HEAD | ARC_FLAG_HAS_L2HDR);
8084 ARCSTAT_BUMP(arcstat_l2_write_buffer_iter);
8086 * Copy buffers for L2ARC writing.
8088 for (try = 0; try <= 3; try++) {
8089 multilist_sublist_t *mls = l2arc_sublist_lock(try);
8090 uint64_t passed_sz = 0;
8092 ARCSTAT_BUMP(arcstat_l2_write_buffer_list_iter);
8095 * L2ARC fast warmup.
8097 * Until the ARC is warm and starts to evict, read from the
8098 * head of the ARC lists rather than the tail.
8100 if (arc_warm == B_FALSE)
8101 hdr = multilist_sublist_head(mls);
8103 hdr = multilist_sublist_tail(mls);
8105 ARCSTAT_BUMP(arcstat_l2_write_buffer_list_null_iter);
8107 headroom = target_sz * l2arc_headroom;
8108 if (zfs_compressed_arc_enabled)
8109 headroom = (headroom * l2arc_headroom_boost) / 100;
8111 for (; hdr; hdr = hdr_prev) {
8112 kmutex_t *hash_lock;
8114 if (arc_warm == B_FALSE)
8115 hdr_prev = multilist_sublist_next(mls, hdr);
8117 hdr_prev = multilist_sublist_prev(mls, hdr);
8118 ARCSTAT_INCR(arcstat_l2_write_buffer_bytes_scanned,
8119 HDR_GET_LSIZE(hdr));
8121 hash_lock = HDR_LOCK(hdr);
8122 if (!mutex_tryenter(hash_lock)) {
8123 ARCSTAT_BUMP(arcstat_l2_write_trylock_fail);
8125 * Skip this buffer rather than waiting.
8130 passed_sz += HDR_GET_LSIZE(hdr);
8131 if (passed_sz > headroom) {
8135 mutex_exit(hash_lock);
8136 ARCSTAT_BUMP(arcstat_l2_write_passed_headroom);
8140 if (!l2arc_write_eligible(guid, hdr)) {
8141 mutex_exit(hash_lock);
8146 * We rely on the L1 portion of the header below, so
8147 * it's invalid for this header to have been evicted out
8148 * of the ghost cache, prior to being written out. The
8149 * ARC_FLAG_L2_WRITING bit ensures this won't happen.
8151 ASSERT(HDR_HAS_L1HDR(hdr));
8153 ASSERT3U(HDR_GET_PSIZE(hdr), >, 0);
8154 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
8155 ASSERT3U(arc_hdr_size(hdr), >, 0);
8156 uint64_t psize = arc_hdr_size(hdr);
8157 uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev,
8160 if ((write_asize + asize) > target_sz) {
8162 mutex_exit(hash_lock);
8163 ARCSTAT_BUMP(arcstat_l2_write_full);
8169 * Insert a dummy header on the buflist so
8170 * l2arc_write_done() can find where the
8171 * write buffers begin without searching.
8173 mutex_enter(&dev->l2ad_mtx);
8174 list_insert_head(&dev->l2ad_buflist, head);
8175 mutex_exit(&dev->l2ad_mtx);
8178 sizeof (l2arc_write_callback_t), KM_SLEEP);
8179 cb->l2wcb_dev = dev;
8180 cb->l2wcb_head = head;
8181 pio = zio_root(spa, l2arc_write_done, cb,
8183 ARCSTAT_BUMP(arcstat_l2_write_pios);
8186 hdr->b_l2hdr.b_dev = dev;
8187 hdr->b_l2hdr.b_daddr = dev->l2ad_hand;
8188 arc_hdr_set_flags(hdr,
8189 ARC_FLAG_L2_WRITING | ARC_FLAG_HAS_L2HDR);
8191 mutex_enter(&dev->l2ad_mtx);
8192 list_insert_head(&dev->l2ad_buflist, hdr);
8193 mutex_exit(&dev->l2ad_mtx);
8195 (void) refcount_add_many(&dev->l2ad_alloc, psize, hdr);
8198 * Normally the L2ARC can use the hdr's data, but if
8199 * we're sharing data between the hdr and one of its
8200 * bufs, L2ARC needs its own copy of the data so that
8201 * the ZIO below can't race with the buf consumer.
8202 * Another case where we need to create a copy of the
8203 * data is when the buffer size is not device-aligned
8204 * and we need to pad the block to make it such.
8205 * That also keeps the clock hand suitably aligned.
8207 * To ensure that the copy will be available for the
8208 * lifetime of the ZIO and be cleaned up afterwards, we
8209 * add it to the l2arc_free_on_write queue.
8212 if (!HDR_SHARED_DATA(hdr) && psize == asize) {
8213 to_write = hdr->b_l1hdr.b_pabd;
8215 to_write = abd_alloc_for_io(asize,
8216 HDR_ISTYPE_METADATA(hdr));
8217 abd_copy(to_write, hdr->b_l1hdr.b_pabd, psize);
8218 if (asize != psize) {
8219 abd_zero_off(to_write, psize,
8222 l2arc_free_abd_on_write(to_write, asize,
8225 wzio = zio_write_phys(pio, dev->l2ad_vdev,
8226 hdr->b_l2hdr.b_daddr, asize, to_write,
8227 ZIO_CHECKSUM_OFF, NULL, hdr,
8228 ZIO_PRIORITY_ASYNC_WRITE,
8229 ZIO_FLAG_CANFAIL, B_FALSE);
8231 write_lsize += HDR_GET_LSIZE(hdr);
8232 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev,
8235 write_psize += psize;
8236 write_asize += asize;
8237 dev->l2ad_hand += asize;
8239 mutex_exit(hash_lock);
8241 (void) zio_nowait(wzio);
8244 multilist_sublist_unlock(mls);
8250 /* No buffers selected for writing? */
8252 ASSERT0(write_lsize);
8253 ASSERT(!HDR_HAS_L1HDR(head));
8254 kmem_cache_free(hdr_l2only_cache, head);
8258 ASSERT3U(write_psize, <=, target_sz);
8259 ARCSTAT_BUMP(arcstat_l2_writes_sent);
8260 ARCSTAT_INCR(arcstat_l2_write_bytes, write_psize);
8261 ARCSTAT_INCR(arcstat_l2_lsize, write_lsize);
8262 ARCSTAT_INCR(arcstat_l2_psize, write_psize);
8263 vdev_space_update(dev->l2ad_vdev, write_psize, 0, 0);
8266 * Bump device hand to the device start if it is approaching the end.
8267 * l2arc_evict() will already have evicted ahead for this case.
8269 if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) {
8270 dev->l2ad_hand = dev->l2ad_start;
8271 dev->l2ad_first = B_FALSE;
8274 dev->l2ad_writing = B_TRUE;
8275 (void) zio_wait(pio);
8276 dev->l2ad_writing = B_FALSE;
8278 return (write_asize);
8282 * This thread feeds the L2ARC at regular intervals. This is the beating
8283 * heart of the L2ARC.
8287 l2arc_feed_thread(void *unused __unused)
8292 uint64_t size, wrote;
8293 clock_t begin, next = ddi_get_lbolt();
8295 CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG);
8297 mutex_enter(&l2arc_feed_thr_lock);
8299 while (l2arc_thread_exit == 0) {
8300 CALLB_CPR_SAFE_BEGIN(&cpr);
8301 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock,
8302 next - ddi_get_lbolt());
8303 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock);
8304 next = ddi_get_lbolt() + hz;
8307 * Quick check for L2ARC devices.
8309 mutex_enter(&l2arc_dev_mtx);
8310 if (l2arc_ndev == 0) {
8311 mutex_exit(&l2arc_dev_mtx);
8314 mutex_exit(&l2arc_dev_mtx);
8315 begin = ddi_get_lbolt();
8318 * This selects the next l2arc device to write to, and in
8319 * doing so the next spa to feed from: dev->l2ad_spa. This
8320 * will return NULL if there are now no l2arc devices or if
8321 * they are all faulted.
8323 * If a device is returned, its spa's config lock is also
8324 * held to prevent device removal. l2arc_dev_get_next()
8325 * will grab and release l2arc_dev_mtx.
8327 if ((dev = l2arc_dev_get_next()) == NULL)
8330 spa = dev->l2ad_spa;
8331 ASSERT3P(spa, !=, NULL);
8334 * If the pool is read-only then force the feed thread to
8335 * sleep a little longer.
8337 if (!spa_writeable(spa)) {
8338 next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz;
8339 spa_config_exit(spa, SCL_L2ARC, dev);
8344 * Avoid contributing to memory pressure.
8346 if (arc_reclaim_needed()) {
8347 ARCSTAT_BUMP(arcstat_l2_abort_lowmem);
8348 spa_config_exit(spa, SCL_L2ARC, dev);
8352 ARCSTAT_BUMP(arcstat_l2_feeds);
8354 size = l2arc_write_size();
8357 * Evict L2ARC buffers that will be overwritten.
8359 l2arc_evict(dev, size, B_FALSE);
8362 * Write ARC buffers.
8364 wrote = l2arc_write_buffers(spa, dev, size);
8367 * Calculate interval between writes.
8369 next = l2arc_write_interval(begin, size, wrote);
8370 spa_config_exit(spa, SCL_L2ARC, dev);
8373 l2arc_thread_exit = 0;
8374 cv_broadcast(&l2arc_feed_thr_cv);
8375 CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */
8380 l2arc_vdev_present(vdev_t *vd)
8384 mutex_enter(&l2arc_dev_mtx);
8385 for (dev = list_head(l2arc_dev_list); dev != NULL;
8386 dev = list_next(l2arc_dev_list, dev)) {
8387 if (dev->l2ad_vdev == vd)
8390 mutex_exit(&l2arc_dev_mtx);
8392 return (dev != NULL);
8396 * Add a vdev for use by the L2ARC. By this point the spa has already
8397 * validated the vdev and opened it.
8400 l2arc_add_vdev(spa_t *spa, vdev_t *vd)
8402 l2arc_dev_t *adddev;
8404 ASSERT(!l2arc_vdev_present(vd));
8406 vdev_ashift_optimize(vd);
8409 * Create a new l2arc device entry.
8411 adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP);
8412 adddev->l2ad_spa = spa;
8413 adddev->l2ad_vdev = vd;
8414 adddev->l2ad_start = VDEV_LABEL_START_SIZE;
8415 adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd);
8416 adddev->l2ad_hand = adddev->l2ad_start;
8417 adddev->l2ad_first = B_TRUE;
8418 adddev->l2ad_writing = B_FALSE;
8420 mutex_init(&adddev->l2ad_mtx, NULL, MUTEX_DEFAULT, NULL);
8422 * This is a list of all ARC buffers that are still valid on the
8425 list_create(&adddev->l2ad_buflist, sizeof (arc_buf_hdr_t),
8426 offsetof(arc_buf_hdr_t, b_l2hdr.b_l2node));
8428 vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand);
8429 refcount_create(&adddev->l2ad_alloc);
8432 * Add device to global list
8434 mutex_enter(&l2arc_dev_mtx);
8435 list_insert_head(l2arc_dev_list, adddev);
8436 atomic_inc_64(&l2arc_ndev);
8437 mutex_exit(&l2arc_dev_mtx);
8441 * Remove a vdev from the L2ARC.
8444 l2arc_remove_vdev(vdev_t *vd)
8446 l2arc_dev_t *dev, *nextdev, *remdev = NULL;
8449 * Find the device by vdev
8451 mutex_enter(&l2arc_dev_mtx);
8452 for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) {
8453 nextdev = list_next(l2arc_dev_list, dev);
8454 if (vd == dev->l2ad_vdev) {
8459 ASSERT3P(remdev, !=, NULL);
8462 * Remove device from global list
8464 list_remove(l2arc_dev_list, remdev);
8465 l2arc_dev_last = NULL; /* may have been invalidated */
8466 atomic_dec_64(&l2arc_ndev);
8467 mutex_exit(&l2arc_dev_mtx);
8470 * Clear all buflists and ARC references. L2ARC device flush.
8472 l2arc_evict(remdev, 0, B_TRUE);
8473 list_destroy(&remdev->l2ad_buflist);
8474 mutex_destroy(&remdev->l2ad_mtx);
8475 refcount_destroy(&remdev->l2ad_alloc);
8476 kmem_free(remdev, sizeof (l2arc_dev_t));
8482 l2arc_thread_exit = 0;
8484 l2arc_writes_sent = 0;
8485 l2arc_writes_done = 0;
8487 mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL);
8488 cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL);
8489 mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL);
8490 mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL);
8492 l2arc_dev_list = &L2ARC_dev_list;
8493 l2arc_free_on_write = &L2ARC_free_on_write;
8494 list_create(l2arc_dev_list, sizeof (l2arc_dev_t),
8495 offsetof(l2arc_dev_t, l2ad_node));
8496 list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t),
8497 offsetof(l2arc_data_free_t, l2df_list_node));
8504 * This is called from dmu_fini(), which is called from spa_fini();
8505 * Because of this, we can assume that all l2arc devices have
8506 * already been removed when the pools themselves were removed.
8509 l2arc_do_free_on_write();
8511 mutex_destroy(&l2arc_feed_thr_lock);
8512 cv_destroy(&l2arc_feed_thr_cv);
8513 mutex_destroy(&l2arc_dev_mtx);
8514 mutex_destroy(&l2arc_free_on_write_mtx);
8516 list_destroy(l2arc_dev_list);
8517 list_destroy(l2arc_free_on_write);
8523 if (!(spa_mode_global & FWRITE))
8526 (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0,
8527 TS_RUN, minclsyspri);
8533 if (!(spa_mode_global & FWRITE))
8536 mutex_enter(&l2arc_feed_thr_lock);
8537 cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */
8538 l2arc_thread_exit = 1;
8539 while (l2arc_thread_exit != 0)
8540 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock);
8541 mutex_exit(&l2arc_feed_thr_lock);