4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #pragma ident "%Z%%M% %I% %E% SMI"
29 * DVA-based Adjustable Replacement Cache
31 * While much of the theory of operation used here is
32 * based on the self-tuning, low overhead replacement cache
33 * presented by Megiddo and Modha at FAST 2003, there are some
34 * significant differences:
36 * 1. The Megiddo and Modha model assumes any page is evictable.
37 * Pages in its cache cannot be "locked" into memory. This makes
38 * the eviction algorithm simple: evict the last page in the list.
39 * This also make the performance characteristics easy to reason
40 * about. Our cache is not so simple. At any given moment, some
41 * subset of the blocks in the cache are un-evictable because we
42 * have handed out a reference to them. Blocks are only evictable
43 * when there are no external references active. This makes
44 * eviction far more problematic: we choose to evict the evictable
45 * blocks that are the "lowest" in the list.
47 * There are times when it is not possible to evict the requested
48 * space. In these circumstances we are unable to adjust the cache
49 * size. To prevent the cache growing unbounded at these times we
50 * implement a "cache throttle" that slowes the flow of new data
51 * into the cache until we can make space avaiable.
53 * 2. The Megiddo and Modha model assumes a fixed cache size.
54 * Pages are evicted when the cache is full and there is a cache
55 * miss. Our model has a variable sized cache. It grows with
56 * high use, but also tries to react to memory preasure from the
57 * operating system: decreasing its size when system memory is
60 * 3. The Megiddo and Modha model assumes a fixed page size. All
61 * elements of the cache are therefor exactly the same size. So
62 * when adjusting the cache size following a cache miss, its simply
63 * a matter of choosing a single page to evict. In our model, we
64 * have variable sized cache blocks (rangeing from 512 bytes to
65 * 128K bytes). We therefor choose a set of blocks to evict to make
66 * space for a cache miss that approximates as closely as possible
67 * the space used by the new block.
69 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache"
70 * by N. Megiddo & D. Modha, FAST 2003
76 * A new reference to a cache buffer can be obtained in two
77 * ways: 1) via a hash table lookup using the DVA as a key,
78 * or 2) via one of the ARC lists. The arc_read() inerface
79 * uses method 1, while the internal arc algorithms for
80 * adjusting the cache use method 2. We therefor provide two
81 * types of locks: 1) the hash table lock array, and 2) the
84 * Buffers do not have their own mutexs, rather they rely on the
85 * hash table mutexs for the bulk of their protection (i.e. most
86 * fields in the arc_buf_hdr_t are protected by these mutexs).
88 * buf_hash_find() returns the appropriate mutex (held) when it
89 * locates the requested buffer in the hash table. It returns
90 * NULL for the mutex if the buffer was not in the table.
92 * buf_hash_remove() expects the appropriate hash mutex to be
93 * already held before it is invoked.
95 * Each arc state also has a mutex which is used to protect the
96 * buffer list associated with the state. When attempting to
97 * obtain a hash table lock while holding an arc list lock you
98 * must use: mutex_tryenter() to avoid deadlock. Also note that
99 * the active state mutex must be held before the ghost state mutex.
101 * Arc buffers may have an associated eviction callback function.
102 * This function will be invoked prior to removing the buffer (e.g.
103 * in arc_do_user_evicts()). Note however that the data associated
104 * with the buffer may be evicted prior to the callback. The callback
105 * must be made with *no locks held* (to prevent deadlock). Additionally,
106 * the users of callbacks must ensure that their private data is
107 * protected from simultaneous callbacks from arc_buf_evict()
108 * and arc_do_user_evicts().
110 * Note that the majority of the performance stats are manipulated
111 * with atomic operations.
116 #include <sys/zio_checksum.h>
117 #include <sys/zfs_context.h>
119 #include <sys/refcount.h>
121 #include <sys/dnlc.h>
123 #include <sys/callb.h>
124 #include <sys/kstat.h>
127 static kmutex_t arc_reclaim_thr_lock;
128 static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */
129 static uint8_t arc_thread_exit;
131 #define ARC_REDUCE_DNLC_PERCENT 3
132 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT;
134 typedef enum arc_reclaim_strategy {
135 ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */
136 ARC_RECLAIM_CONS /* Conservative reclaim strategy */
137 } arc_reclaim_strategy_t;
139 /* number of seconds before growing cache again */
140 static int arc_grow_retry = 60;
143 * minimum lifespan of a prefetch block in clock ticks
144 * (initialized in arc_init())
146 static int arc_min_prefetch_lifespan;
151 * These tunables are for performance analysis.
155 TUNABLE_ULONG("vfs.zfs.arc_max", &zfs_arc_max);
156 TUNABLE_ULONG("vfs.zfs.arc_min", &zfs_arc_min);
157 SYSCTL_DECL(_vfs_zfs);
158 SYSCTL_ULONG(_vfs_zfs, OID_AUTO, arc_max, CTLFLAG_RDTUN, &zfs_arc_max, 0,
160 SYSCTL_ULONG(_vfs_zfs, OID_AUTO, arc_min, CTLFLAG_RDTUN, &zfs_arc_min, 0,
164 * Note that buffers can be on one of 5 states:
165 * ARC_anon - anonymous (discussed below)
166 * ARC_mru - recently used, currently cached
167 * ARC_mru_ghost - recentely used, no longer in cache
168 * ARC_mfu - frequently used, currently cached
169 * ARC_mfu_ghost - frequently used, no longer in cache
170 * When there are no active references to the buffer, they
171 * are linked onto one of the lists in arc. These are the
172 * only buffers that can be evicted or deleted.
174 * Anonymous buffers are buffers that are not associated with
175 * a DVA. These are buffers that hold dirty block copies
176 * before they are written to stable storage. By definition,
177 * they are "ref'd" and are considered part of arc_mru
178 * that cannot be freed. Generally, they will aquire a DVA
179 * as they are written and migrate onto the arc_mru list.
182 typedef struct arc_state {
183 list_t arcs_list; /* linked list of evictable buffer in state */
184 uint64_t arcs_lsize; /* total size of buffers in the linked list */
185 uint64_t arcs_size; /* total size of all buffers in this state */
190 static arc_state_t ARC_anon;
191 static arc_state_t ARC_mru;
192 static arc_state_t ARC_mru_ghost;
193 static arc_state_t ARC_mfu;
194 static arc_state_t ARC_mfu_ghost;
196 typedef struct arc_stats {
197 kstat_named_t arcstat_hits;
198 kstat_named_t arcstat_misses;
199 kstat_named_t arcstat_demand_data_hits;
200 kstat_named_t arcstat_demand_data_misses;
201 kstat_named_t arcstat_demand_metadata_hits;
202 kstat_named_t arcstat_demand_metadata_misses;
203 kstat_named_t arcstat_prefetch_data_hits;
204 kstat_named_t arcstat_prefetch_data_misses;
205 kstat_named_t arcstat_prefetch_metadata_hits;
206 kstat_named_t arcstat_prefetch_metadata_misses;
207 kstat_named_t arcstat_mru_hits;
208 kstat_named_t arcstat_mru_ghost_hits;
209 kstat_named_t arcstat_mfu_hits;
210 kstat_named_t arcstat_mfu_ghost_hits;
211 kstat_named_t arcstat_deleted;
212 kstat_named_t arcstat_recycle_miss;
213 kstat_named_t arcstat_mutex_miss;
214 kstat_named_t arcstat_evict_skip;
215 kstat_named_t arcstat_hash_elements;
216 kstat_named_t arcstat_hash_elements_max;
217 kstat_named_t arcstat_hash_collisions;
218 kstat_named_t arcstat_hash_chains;
219 kstat_named_t arcstat_hash_chain_max;
220 kstat_named_t arcstat_p;
221 kstat_named_t arcstat_c;
222 kstat_named_t arcstat_c_min;
223 kstat_named_t arcstat_c_max;
224 kstat_named_t arcstat_size;
227 static arc_stats_t arc_stats = {
228 { "hits", KSTAT_DATA_UINT64 },
229 { "misses", KSTAT_DATA_UINT64 },
230 { "demand_data_hits", KSTAT_DATA_UINT64 },
231 { "demand_data_misses", KSTAT_DATA_UINT64 },
232 { "demand_metadata_hits", KSTAT_DATA_UINT64 },
233 { "demand_metadata_misses", KSTAT_DATA_UINT64 },
234 { "prefetch_data_hits", KSTAT_DATA_UINT64 },
235 { "prefetch_data_misses", KSTAT_DATA_UINT64 },
236 { "prefetch_metadata_hits", KSTAT_DATA_UINT64 },
237 { "prefetch_metadata_misses", KSTAT_DATA_UINT64 },
238 { "mru_hits", KSTAT_DATA_UINT64 },
239 { "mru_ghost_hits", KSTAT_DATA_UINT64 },
240 { "mfu_hits", KSTAT_DATA_UINT64 },
241 { "mfu_ghost_hits", KSTAT_DATA_UINT64 },
242 { "deleted", KSTAT_DATA_UINT64 },
243 { "recycle_miss", KSTAT_DATA_UINT64 },
244 { "mutex_miss", KSTAT_DATA_UINT64 },
245 { "evict_skip", KSTAT_DATA_UINT64 },
246 { "hash_elements", KSTAT_DATA_UINT64 },
247 { "hash_elements_max", KSTAT_DATA_UINT64 },
248 { "hash_collisions", KSTAT_DATA_UINT64 },
249 { "hash_chains", KSTAT_DATA_UINT64 },
250 { "hash_chain_max", KSTAT_DATA_UINT64 },
251 { "p", KSTAT_DATA_UINT64 },
252 { "c", KSTAT_DATA_UINT64 },
253 { "c_min", KSTAT_DATA_UINT64 },
254 { "c_max", KSTAT_DATA_UINT64 },
255 { "size", KSTAT_DATA_UINT64 }
258 #define ARCSTAT(stat) (arc_stats.stat.value.ui64)
260 #define ARCSTAT_INCR(stat, val) \
261 atomic_add_64(&arc_stats.stat.value.ui64, (val));
263 #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1)
264 #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1)
266 #define ARCSTAT_MAX(stat, val) { \
268 while ((val) > (m = arc_stats.stat.value.ui64) && \
269 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \
273 #define ARCSTAT_MAXSTAT(stat) \
274 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64)
277 * We define a macro to allow ARC hits/misses to be easily broken down by
278 * two separate conditions, giving a total of four different subtypes for
279 * each of hits and misses (so eight statistics total).
281 #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
284 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
286 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
290 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
292 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
297 static arc_state_t *arc_anon;
298 static arc_state_t *arc_mru;
299 static arc_state_t *arc_mru_ghost;
300 static arc_state_t *arc_mfu;
301 static arc_state_t *arc_mfu_ghost;
304 * There are several ARC variables that are critical to export as kstats --
305 * but we don't want to have to grovel around in the kstat whenever we wish to
306 * manipulate them. For these variables, we therefore define them to be in
307 * terms of the statistic variable. This assures that we are not introducing
308 * the possibility of inconsistency by having shadow copies of the variables,
309 * while still allowing the code to be readable.
311 #define arc_size ARCSTAT(arcstat_size) /* actual total arc size */
312 #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */
313 #define arc_c ARCSTAT(arcstat_c) /* target size of cache */
314 #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */
315 #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */
317 static int arc_no_grow; /* Don't try to grow cache size */
318 static uint64_t arc_tempreserve;
320 typedef struct arc_callback arc_callback_t;
322 struct arc_callback {
324 arc_done_func_t *acb_done;
325 arc_byteswap_func_t *acb_byteswap;
327 zio_t *acb_zio_dummy;
328 arc_callback_t *acb_next;
331 typedef struct arc_write_callback arc_write_callback_t;
333 struct arc_write_callback {
335 arc_done_func_t *awcb_ready;
336 arc_done_func_t *awcb_done;
341 /* protected by hash lock */
346 kmutex_t b_freeze_lock;
347 zio_cksum_t *b_freeze_cksum;
349 arc_buf_hdr_t *b_hash_next;
354 arc_callback_t *b_acb;
358 arc_buf_contents_t b_type;
362 /* protected by arc state mutex */
363 arc_state_t *b_state;
364 list_node_t b_arc_node;
366 /* updated atomically */
367 clock_t b_arc_access;
369 /* self protecting */
373 static arc_buf_t *arc_eviction_list;
374 static kmutex_t arc_eviction_mtx;
375 static arc_buf_hdr_t arc_eviction_hdr;
376 static void arc_get_data_buf(arc_buf_t *buf);
377 static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock);
379 #define GHOST_STATE(state) \
380 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost)
383 * Private ARC flags. These flags are private ARC only flags that will show up
384 * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can
385 * be passed in as arc_flags in things like arc_read. However, these flags
386 * should never be passed and should only be set by ARC code. When adding new
387 * public flags, make sure not to smash the private ones.
390 #define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */
391 #define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */
392 #define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */
393 #define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */
394 #define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */
395 #define ARC_INDIRECT (1 << 14) /* this is an indirect block */
397 #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE)
398 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS)
399 #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR)
400 #define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ)
401 #define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE)
404 * Hash table routines
407 #define HT_LOCK_PAD 128
412 unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))];
416 #define BUF_LOCKS 256
417 typedef struct buf_hash_table {
419 arc_buf_hdr_t **ht_table;
420 struct ht_lock ht_locks[BUF_LOCKS];
423 static buf_hash_table_t buf_hash_table;
425 #define BUF_HASH_INDEX(spa, dva, birth) \
426 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
427 #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
428 #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
429 #define HDR_LOCK(buf) \
430 (BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth)))
432 uint64_t zfs_crc64_table[256];
435 buf_hash(spa_t *spa, dva_t *dva, uint64_t birth)
437 uintptr_t spav = (uintptr_t)spa;
438 uint8_t *vdva = (uint8_t *)dva;
439 uint64_t crc = -1ULL;
442 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
444 for (i = 0; i < sizeof (dva_t); i++)
445 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF];
447 crc ^= (spav>>8) ^ birth;
452 #define BUF_EMPTY(buf) \
453 ((buf)->b_dva.dva_word[0] == 0 && \
454 (buf)->b_dva.dva_word[1] == 0 && \
457 #define BUF_EQUAL(spa, dva, birth, buf) \
458 ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \
459 ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \
460 ((buf)->b_birth == birth) && ((buf)->b_spa == spa)
462 static arc_buf_hdr_t *
463 buf_hash_find(spa_t *spa, dva_t *dva, uint64_t birth, kmutex_t **lockp)
465 uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
466 kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
469 mutex_enter(hash_lock);
470 for (buf = buf_hash_table.ht_table[idx]; buf != NULL;
471 buf = buf->b_hash_next) {
472 if (BUF_EQUAL(spa, dva, birth, buf)) {
477 mutex_exit(hash_lock);
483 * Insert an entry into the hash table. If there is already an element
484 * equal to elem in the hash table, then the already existing element
485 * will be returned and the new element will not be inserted.
486 * Otherwise returns NULL.
488 static arc_buf_hdr_t *
489 buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp)
491 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
492 kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
496 ASSERT(!HDR_IN_HASH_TABLE(buf));
498 mutex_enter(hash_lock);
499 for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL;
500 fbuf = fbuf->b_hash_next, i++) {
501 if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf))
505 buf->b_hash_next = buf_hash_table.ht_table[idx];
506 buf_hash_table.ht_table[idx] = buf;
507 buf->b_flags |= ARC_IN_HASH_TABLE;
509 /* collect some hash table performance data */
511 ARCSTAT_BUMP(arcstat_hash_collisions);
513 ARCSTAT_BUMP(arcstat_hash_chains);
515 ARCSTAT_MAX(arcstat_hash_chain_max, i);
518 ARCSTAT_BUMP(arcstat_hash_elements);
519 ARCSTAT_MAXSTAT(arcstat_hash_elements);
525 buf_hash_remove(arc_buf_hdr_t *buf)
527 arc_buf_hdr_t *fbuf, **bufp;
528 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
530 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
531 ASSERT(HDR_IN_HASH_TABLE(buf));
533 bufp = &buf_hash_table.ht_table[idx];
534 while ((fbuf = *bufp) != buf) {
535 ASSERT(fbuf != NULL);
536 bufp = &fbuf->b_hash_next;
538 *bufp = buf->b_hash_next;
539 buf->b_hash_next = NULL;
540 buf->b_flags &= ~ARC_IN_HASH_TABLE;
542 /* collect some hash table performance data */
543 ARCSTAT_BUMPDOWN(arcstat_hash_elements);
545 if (buf_hash_table.ht_table[idx] &&
546 buf_hash_table.ht_table[idx]->b_hash_next == NULL)
547 ARCSTAT_BUMPDOWN(arcstat_hash_chains);
551 * Global data structures and functions for the buf kmem cache.
553 static kmem_cache_t *hdr_cache;
554 static kmem_cache_t *buf_cache;
561 kmem_free(buf_hash_table.ht_table,
562 (buf_hash_table.ht_mask + 1) * sizeof (void *));
563 for (i = 0; i < BUF_LOCKS; i++)
564 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock);
565 kmem_cache_destroy(hdr_cache);
566 kmem_cache_destroy(buf_cache);
570 * Constructor callback - called when the cache is empty
571 * and a new buf is requested.
575 hdr_cons(void *vbuf, void *unused, int kmflag)
577 arc_buf_hdr_t *buf = vbuf;
579 bzero(buf, sizeof (arc_buf_hdr_t));
580 refcount_create(&buf->b_refcnt);
581 cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL);
586 * Destructor callback - called when a cached buf is
587 * no longer required.
591 hdr_dest(void *vbuf, void *unused)
593 arc_buf_hdr_t *buf = vbuf;
595 refcount_destroy(&buf->b_refcnt);
596 cv_destroy(&buf->b_cv);
600 * Reclaim callback -- invoked when memory is low.
604 hdr_recl(void *unused)
606 dprintf("hdr_recl called\n");
608 * umem calls the reclaim func when we destroy the buf cache,
609 * which is after we do arc_fini().
612 cv_signal(&arc_reclaim_thr_cv);
619 uint64_t hsize = 1ULL << 12;
623 * The hash table is big enough to fill all of physical memory
624 * with an average 64K block size. The table will take up
625 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers).
627 while (hsize * 65536 < (uint64_t)physmem * PAGESIZE)
630 buf_hash_table.ht_mask = hsize - 1;
631 buf_hash_table.ht_table =
632 kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
633 if (buf_hash_table.ht_table == NULL) {
634 ASSERT(hsize > (1ULL << 8));
639 hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t),
640 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0);
641 buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
642 0, NULL, NULL, NULL, NULL, NULL, 0);
644 for (i = 0; i < 256; i++)
645 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
646 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
648 for (i = 0; i < BUF_LOCKS; i++) {
649 mutex_init(&buf_hash_table.ht_locks[i].ht_lock,
650 NULL, MUTEX_DEFAULT, NULL);
654 #define ARC_MINTIME (hz>>4) /* 62 ms */
657 arc_cksum_verify(arc_buf_t *buf)
661 if (!(zfs_flags & ZFS_DEBUG_MODIFY))
664 mutex_enter(&buf->b_hdr->b_freeze_lock);
665 if (buf->b_hdr->b_freeze_cksum == NULL ||
666 (buf->b_hdr->b_flags & ARC_IO_ERROR)) {
667 mutex_exit(&buf->b_hdr->b_freeze_lock);
670 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
671 if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc))
672 panic("buffer modified while frozen!");
673 mutex_exit(&buf->b_hdr->b_freeze_lock);
677 arc_cksum_compute(arc_buf_t *buf)
679 if (!(zfs_flags & ZFS_DEBUG_MODIFY))
682 mutex_enter(&buf->b_hdr->b_freeze_lock);
683 if (buf->b_hdr->b_freeze_cksum != NULL) {
684 mutex_exit(&buf->b_hdr->b_freeze_lock);
687 buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP);
688 fletcher_2_native(buf->b_data, buf->b_hdr->b_size,
689 buf->b_hdr->b_freeze_cksum);
690 mutex_exit(&buf->b_hdr->b_freeze_lock);
694 arc_buf_thaw(arc_buf_t *buf)
696 if (!(zfs_flags & ZFS_DEBUG_MODIFY))
699 if (buf->b_hdr->b_state != arc_anon)
700 panic("modifying non-anon buffer!");
701 if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS)
702 panic("modifying buffer while i/o in progress!");
703 arc_cksum_verify(buf);
704 mutex_enter(&buf->b_hdr->b_freeze_lock);
705 if (buf->b_hdr->b_freeze_cksum != NULL) {
706 kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t));
707 buf->b_hdr->b_freeze_cksum = NULL;
709 mutex_exit(&buf->b_hdr->b_freeze_lock);
713 arc_buf_freeze(arc_buf_t *buf)
715 if (!(zfs_flags & ZFS_DEBUG_MODIFY))
718 ASSERT(buf->b_hdr->b_freeze_cksum != NULL ||
719 buf->b_hdr->b_state == arc_anon);
720 arc_cksum_compute(buf);
724 add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
726 ASSERT(MUTEX_HELD(hash_lock));
728 if ((refcount_add(&ab->b_refcnt, tag) == 1) &&
729 (ab->b_state != arc_anon)) {
730 uint64_t delta = ab->b_size * ab->b_datacnt;
732 ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx));
733 mutex_enter(&ab->b_state->arcs_mtx);
734 ASSERT(list_link_active(&ab->b_arc_node));
735 list_remove(&ab->b_state->arcs_list, ab);
736 if (GHOST_STATE(ab->b_state)) {
737 ASSERT3U(ab->b_datacnt, ==, 0);
738 ASSERT3P(ab->b_buf, ==, NULL);
742 ASSERT3U(ab->b_state->arcs_lsize, >=, delta);
743 atomic_add_64(&ab->b_state->arcs_lsize, -delta);
744 mutex_exit(&ab->b_state->arcs_mtx);
745 /* remove the prefetch flag is we get a reference */
746 if (ab->b_flags & ARC_PREFETCH)
747 ab->b_flags &= ~ARC_PREFETCH;
752 remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
755 arc_state_t *state = ab->b_state;
757 ASSERT(state == arc_anon || MUTEX_HELD(hash_lock));
758 ASSERT(!GHOST_STATE(state));
760 if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) &&
761 (state != arc_anon)) {
762 ASSERT(!MUTEX_HELD(&state->arcs_mtx));
763 mutex_enter(&state->arcs_mtx);
764 ASSERT(!list_link_active(&ab->b_arc_node));
765 list_insert_head(&state->arcs_list, ab);
766 ASSERT(ab->b_datacnt > 0);
767 atomic_add_64(&state->arcs_lsize, ab->b_size * ab->b_datacnt);
768 ASSERT3U(state->arcs_size, >=, state->arcs_lsize);
769 mutex_exit(&state->arcs_mtx);
775 * Move the supplied buffer to the indicated state. The mutex
776 * for the buffer must be held by the caller.
779 arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock)
781 arc_state_t *old_state = ab->b_state;
782 int64_t refcnt = refcount_count(&ab->b_refcnt);
783 uint64_t from_delta, to_delta;
785 ASSERT(MUTEX_HELD(hash_lock));
786 ASSERT(new_state != old_state);
787 ASSERT(refcnt == 0 || ab->b_datacnt > 0);
788 ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state));
790 from_delta = to_delta = ab->b_datacnt * ab->b_size;
793 * If this buffer is evictable, transfer it from the
794 * old state list to the new state list.
797 if (old_state != arc_anon) {
798 int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx);
801 mutex_enter(&old_state->arcs_mtx);
803 ASSERT(list_link_active(&ab->b_arc_node));
804 list_remove(&old_state->arcs_list, ab);
807 * If prefetching out of the ghost cache,
808 * we will have a non-null datacnt.
810 if (GHOST_STATE(old_state) && ab->b_datacnt == 0) {
811 /* ghost elements have a ghost size */
812 ASSERT(ab->b_buf == NULL);
813 from_delta = ab->b_size;
815 ASSERT3U(old_state->arcs_lsize, >=, from_delta);
816 atomic_add_64(&old_state->arcs_lsize, -from_delta);
819 mutex_exit(&old_state->arcs_mtx);
821 if (new_state != arc_anon) {
822 int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx);
825 mutex_enter(&new_state->arcs_mtx);
827 list_insert_head(&new_state->arcs_list, ab);
829 /* ghost elements have a ghost size */
830 if (GHOST_STATE(new_state)) {
831 ASSERT(ab->b_datacnt == 0);
832 ASSERT(ab->b_buf == NULL);
833 to_delta = ab->b_size;
835 atomic_add_64(&new_state->arcs_lsize, to_delta);
836 ASSERT3U(new_state->arcs_size + to_delta, >=,
837 new_state->arcs_lsize);
840 mutex_exit(&new_state->arcs_mtx);
844 ASSERT(!BUF_EMPTY(ab));
845 if (new_state == arc_anon && old_state != arc_anon) {
849 /* adjust state sizes */
851 atomic_add_64(&new_state->arcs_size, to_delta);
853 ASSERT3U(old_state->arcs_size, >=, from_delta);
854 atomic_add_64(&old_state->arcs_size, -from_delta);
856 ab->b_state = new_state;
860 arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type)
865 ASSERT3U(size, >, 0);
866 hdr = kmem_cache_alloc(hdr_cache, KM_SLEEP);
867 ASSERT(BUF_EMPTY(hdr));
871 hdr->b_state = arc_anon;
872 hdr->b_arc_access = 0;
873 mutex_init(&hdr->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
874 buf = kmem_cache_alloc(buf_cache, KM_SLEEP);
878 buf->b_private = NULL;
881 arc_get_data_buf(buf);
884 ASSERT(refcount_is_zero(&hdr->b_refcnt));
885 (void) refcount_add(&hdr->b_refcnt, tag);
891 arc_buf_clone(arc_buf_t *from)
894 arc_buf_hdr_t *hdr = from->b_hdr;
895 uint64_t size = hdr->b_size;
897 buf = kmem_cache_alloc(buf_cache, KM_SLEEP);
901 buf->b_private = NULL;
902 buf->b_next = hdr->b_buf;
904 arc_get_data_buf(buf);
905 bcopy(from->b_data, buf->b_data, size);
911 arc_buf_add_ref(arc_buf_t *buf, void* tag)
917 * Check to see if this buffer is currently being evicted via
918 * arc_do_user_evicts().
920 mutex_enter(&arc_eviction_mtx);
923 mutex_exit(&arc_eviction_mtx);
926 hash_lock = HDR_LOCK(hdr);
927 mutex_exit(&arc_eviction_mtx);
929 mutex_enter(hash_lock);
930 if (buf->b_data == NULL) {
932 * This buffer is evicted.
934 mutex_exit(hash_lock);
938 ASSERT(buf->b_hdr == hdr);
939 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
940 add_reference(hdr, hash_lock, tag);
941 arc_access(hdr, hash_lock);
942 mutex_exit(hash_lock);
943 ARCSTAT_BUMP(arcstat_hits);
944 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
945 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
946 data, metadata, hits);
950 arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all)
954 /* free up data associated with the buf */
956 arc_state_t *state = buf->b_hdr->b_state;
957 uint64_t size = buf->b_hdr->b_size;
958 arc_buf_contents_t type = buf->b_hdr->b_type;
960 arc_cksum_verify(buf);
962 if (type == ARC_BUFC_METADATA) {
963 zio_buf_free(buf->b_data, size);
965 ASSERT(type == ARC_BUFC_DATA);
966 zio_data_buf_free(buf->b_data, size);
968 atomic_add_64(&arc_size, -size);
970 if (list_link_active(&buf->b_hdr->b_arc_node)) {
971 ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt));
972 ASSERT(state != arc_anon);
973 ASSERT3U(state->arcs_lsize, >=, size);
974 atomic_add_64(&state->arcs_lsize, -size);
976 ASSERT3U(state->arcs_size, >=, size);
977 atomic_add_64(&state->arcs_size, -size);
979 ASSERT(buf->b_hdr->b_datacnt > 0);
980 buf->b_hdr->b_datacnt -= 1;
983 /* only remove the buf if requested */
987 /* remove the buf from the hdr list */
988 for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next)
992 ASSERT(buf->b_efunc == NULL);
994 /* clean up the buf */
996 kmem_cache_free(buf_cache, buf);
1000 arc_hdr_destroy(arc_buf_hdr_t *hdr)
1002 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1003 ASSERT3P(hdr->b_state, ==, arc_anon);
1004 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
1006 if (!BUF_EMPTY(hdr)) {
1007 ASSERT(!HDR_IN_HASH_TABLE(hdr));
1008 bzero(&hdr->b_dva, sizeof (dva_t));
1012 while (hdr->b_buf) {
1013 arc_buf_t *buf = hdr->b_buf;
1016 mutex_enter(&arc_eviction_mtx);
1017 ASSERT(buf->b_hdr != NULL);
1018 arc_buf_destroy(hdr->b_buf, FALSE, FALSE);
1019 hdr->b_buf = buf->b_next;
1020 buf->b_hdr = &arc_eviction_hdr;
1021 buf->b_next = arc_eviction_list;
1022 arc_eviction_list = buf;
1023 mutex_exit(&arc_eviction_mtx);
1025 arc_buf_destroy(hdr->b_buf, FALSE, TRUE);
1028 if (hdr->b_freeze_cksum != NULL) {
1029 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
1030 hdr->b_freeze_cksum = NULL;
1032 mutex_destroy(&hdr->b_freeze_lock);
1034 ASSERT(!list_link_active(&hdr->b_arc_node));
1035 ASSERT3P(hdr->b_hash_next, ==, NULL);
1036 ASSERT3P(hdr->b_acb, ==, NULL);
1037 kmem_cache_free(hdr_cache, hdr);
1041 arc_buf_free(arc_buf_t *buf, void *tag)
1043 arc_buf_hdr_t *hdr = buf->b_hdr;
1044 int hashed = hdr->b_state != arc_anon;
1046 ASSERT(buf->b_efunc == NULL);
1047 ASSERT(buf->b_data != NULL);
1050 kmutex_t *hash_lock = HDR_LOCK(hdr);
1052 mutex_enter(hash_lock);
1053 (void) remove_reference(hdr, hash_lock, tag);
1054 if (hdr->b_datacnt > 1)
1055 arc_buf_destroy(buf, FALSE, TRUE);
1057 hdr->b_flags |= ARC_BUF_AVAILABLE;
1058 mutex_exit(hash_lock);
1059 } else if (HDR_IO_IN_PROGRESS(hdr)) {
1062 * We are in the middle of an async write. Don't destroy
1063 * this buffer unless the write completes before we finish
1064 * decrementing the reference count.
1066 mutex_enter(&arc_eviction_mtx);
1067 (void) remove_reference(hdr, NULL, tag);
1068 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1069 destroy_hdr = !HDR_IO_IN_PROGRESS(hdr);
1070 mutex_exit(&arc_eviction_mtx);
1072 arc_hdr_destroy(hdr);
1074 if (remove_reference(hdr, NULL, tag) > 0) {
1075 ASSERT(HDR_IO_ERROR(hdr));
1076 arc_buf_destroy(buf, FALSE, TRUE);
1078 arc_hdr_destroy(hdr);
1084 arc_buf_remove_ref(arc_buf_t *buf, void* tag)
1086 arc_buf_hdr_t *hdr = buf->b_hdr;
1087 kmutex_t *hash_lock = HDR_LOCK(hdr);
1088 int no_callback = (buf->b_efunc == NULL);
1090 if (hdr->b_state == arc_anon) {
1091 arc_buf_free(buf, tag);
1092 return (no_callback);
1095 mutex_enter(hash_lock);
1096 ASSERT(hdr->b_state != arc_anon);
1097 ASSERT(buf->b_data != NULL);
1099 (void) remove_reference(hdr, hash_lock, tag);
1100 if (hdr->b_datacnt > 1) {
1102 arc_buf_destroy(buf, FALSE, TRUE);
1103 } else if (no_callback) {
1104 ASSERT(hdr->b_buf == buf && buf->b_next == NULL);
1105 hdr->b_flags |= ARC_BUF_AVAILABLE;
1107 ASSERT(no_callback || hdr->b_datacnt > 1 ||
1108 refcount_is_zero(&hdr->b_refcnt));
1109 mutex_exit(hash_lock);
1110 return (no_callback);
1114 arc_buf_size(arc_buf_t *buf)
1116 return (buf->b_hdr->b_size);
1120 * Evict buffers from list until we've removed the specified number of
1121 * bytes. Move the removed buffers to the appropriate evict state.
1122 * If the recycle flag is set, then attempt to "recycle" a buffer:
1123 * - look for a buffer to evict that is `bytes' long.
1124 * - return the data block from this buffer rather than freeing it.
1125 * This flag is used by callers that are trying to make space for a
1126 * new buffer in a full arc cache.
1129 arc_evict(arc_state_t *state, int64_t bytes, boolean_t recycle,
1130 arc_buf_contents_t type)
1132 arc_state_t *evicted_state;
1133 uint64_t bytes_evicted = 0, skipped = 0, missed = 0;
1134 arc_buf_hdr_t *ab, *ab_prev = NULL;
1135 kmutex_t *hash_lock;
1136 boolean_t have_lock;
1137 void *stolen = NULL;
1139 ASSERT(state == arc_mru || state == arc_mfu);
1141 evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
1143 mutex_enter(&state->arcs_mtx);
1144 mutex_enter(&evicted_state->arcs_mtx);
1146 for (ab = list_tail(&state->arcs_list); ab; ab = ab_prev) {
1147 ab_prev = list_prev(&state->arcs_list, ab);
1148 /* prefetch buffers have a minimum lifespan */
1149 if (HDR_IO_IN_PROGRESS(ab) ||
1150 (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) &&
1151 LBOLT - ab->b_arc_access < arc_min_prefetch_lifespan)) {
1155 /* "lookahead" for better eviction candidate */
1156 if (recycle && ab->b_size != bytes &&
1157 ab_prev && ab_prev->b_size == bytes)
1159 hash_lock = HDR_LOCK(ab);
1160 have_lock = MUTEX_HELD(hash_lock);
1161 if (have_lock || mutex_tryenter(hash_lock)) {
1162 ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0);
1163 ASSERT(ab->b_datacnt > 0);
1165 arc_buf_t *buf = ab->b_buf;
1167 bytes_evicted += ab->b_size;
1168 if (recycle && ab->b_type == type &&
1169 ab->b_size == bytes) {
1170 stolen = buf->b_data;
1175 mutex_enter(&arc_eviction_mtx);
1176 arc_buf_destroy(buf,
1177 buf->b_data == stolen, FALSE);
1178 ab->b_buf = buf->b_next;
1179 buf->b_hdr = &arc_eviction_hdr;
1180 buf->b_next = arc_eviction_list;
1181 arc_eviction_list = buf;
1182 mutex_exit(&arc_eviction_mtx);
1184 arc_buf_destroy(buf,
1185 buf->b_data == stolen, TRUE);
1188 ASSERT(ab->b_datacnt == 0);
1189 arc_change_state(evicted_state, ab, hash_lock);
1190 ASSERT(HDR_IN_HASH_TABLE(ab));
1191 ab->b_flags = ARC_IN_HASH_TABLE;
1192 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab);
1194 mutex_exit(hash_lock);
1195 if (bytes >= 0 && bytes_evicted >= bytes)
1202 mutex_exit(&evicted_state->arcs_mtx);
1203 mutex_exit(&state->arcs_mtx);
1205 if (bytes_evicted < bytes)
1206 dprintf("only evicted %lld bytes from %x",
1207 (longlong_t)bytes_evicted, state);
1210 ARCSTAT_INCR(arcstat_evict_skip, skipped);
1213 ARCSTAT_INCR(arcstat_mutex_miss, missed);
1219 * Remove buffers from list until we've removed the specified number of
1220 * bytes. Destroy the buffers that are removed.
1223 arc_evict_ghost(arc_state_t *state, int64_t bytes)
1225 arc_buf_hdr_t *ab, *ab_prev;
1226 kmutex_t *hash_lock;
1227 uint64_t bytes_deleted = 0;
1228 uint64_t bufs_skipped = 0;
1230 ASSERT(GHOST_STATE(state));
1232 mutex_enter(&state->arcs_mtx);
1233 for (ab = list_tail(&state->arcs_list); ab; ab = ab_prev) {
1234 ab_prev = list_prev(&state->arcs_list, ab);
1235 hash_lock = HDR_LOCK(ab);
1236 if (mutex_tryenter(hash_lock)) {
1237 ASSERT(!HDR_IO_IN_PROGRESS(ab));
1238 ASSERT(ab->b_buf == NULL);
1239 arc_change_state(arc_anon, ab, hash_lock);
1240 mutex_exit(hash_lock);
1241 ARCSTAT_BUMP(arcstat_deleted);
1242 bytes_deleted += ab->b_size;
1243 arc_hdr_destroy(ab);
1244 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab);
1245 if (bytes >= 0 && bytes_deleted >= bytes)
1249 mutex_exit(&state->arcs_mtx);
1250 mutex_enter(hash_lock);
1251 mutex_exit(hash_lock);
1257 mutex_exit(&state->arcs_mtx);
1260 ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped);
1264 if (bytes_deleted < bytes)
1265 dprintf("only deleted %lld bytes from %p",
1266 (longlong_t)bytes_deleted, state);
1272 int64_t top_sz, mru_over, arc_over, todelete;
1274 top_sz = arc_anon->arcs_size + arc_mru->arcs_size;
1276 if (top_sz > arc_p && arc_mru->arcs_lsize > 0) {
1277 int64_t toevict = MIN(arc_mru->arcs_lsize, top_sz - arc_p);
1278 (void) arc_evict(arc_mru, toevict, FALSE, ARC_BUFC_UNDEF);
1279 top_sz = arc_anon->arcs_size + arc_mru->arcs_size;
1282 mru_over = top_sz + arc_mru_ghost->arcs_size - arc_c;
1285 if (arc_mru_ghost->arcs_lsize > 0) {
1286 todelete = MIN(arc_mru_ghost->arcs_lsize, mru_over);
1287 arc_evict_ghost(arc_mru_ghost, todelete);
1291 if ((arc_over = arc_size - arc_c) > 0) {
1294 if (arc_mfu->arcs_lsize > 0) {
1295 int64_t toevict = MIN(arc_mfu->arcs_lsize, arc_over);
1296 (void) arc_evict(arc_mfu, toevict, FALSE,
1300 tbl_over = arc_size + arc_mru_ghost->arcs_lsize +
1301 arc_mfu_ghost->arcs_lsize - arc_c*2;
1303 if (tbl_over > 0 && arc_mfu_ghost->arcs_lsize > 0) {
1304 todelete = MIN(arc_mfu_ghost->arcs_lsize, tbl_over);
1305 arc_evict_ghost(arc_mfu_ghost, todelete);
1311 arc_do_user_evicts(void)
1313 mutex_enter(&arc_eviction_mtx);
1314 while (arc_eviction_list != NULL) {
1315 arc_buf_t *buf = arc_eviction_list;
1316 arc_eviction_list = buf->b_next;
1318 mutex_exit(&arc_eviction_mtx);
1320 if (buf->b_efunc != NULL)
1321 VERIFY(buf->b_efunc(buf) == 0);
1323 buf->b_efunc = NULL;
1324 buf->b_private = NULL;
1325 kmem_cache_free(buf_cache, buf);
1326 mutex_enter(&arc_eviction_mtx);
1328 mutex_exit(&arc_eviction_mtx);
1332 * Flush all *evictable* data from the cache.
1333 * NOTE: this will not touch "active" (i.e. referenced) data.
1338 while (list_head(&arc_mru->arcs_list))
1339 (void) arc_evict(arc_mru, -1, FALSE, ARC_BUFC_UNDEF);
1340 while (list_head(&arc_mfu->arcs_list))
1341 (void) arc_evict(arc_mfu, -1, FALSE, ARC_BUFC_UNDEF);
1343 arc_evict_ghost(arc_mru_ghost, -1);
1344 arc_evict_ghost(arc_mfu_ghost, -1);
1346 mutex_enter(&arc_reclaim_thr_lock);
1347 arc_do_user_evicts();
1348 mutex_exit(&arc_reclaim_thr_lock);
1349 ASSERT(arc_eviction_list == NULL);
1352 int arc_shrink_shift = 5; /* log2(fraction of arc to reclaim) */
1357 if (arc_c > arc_c_min) {
1361 to_free = arc_c >> arc_shrink_shift;
1363 to_free = arc_c >> arc_shrink_shift;
1365 if (arc_c > arc_c_min + to_free)
1366 atomic_add_64(&arc_c, -to_free);
1370 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift));
1371 if (arc_c > arc_size)
1372 arc_c = MAX(arc_size, arc_c_min);
1374 arc_p = (arc_c >> 1);
1375 ASSERT(arc_c >= arc_c_min);
1376 ASSERT((int64_t)arc_p >= 0);
1379 if (arc_size > arc_c)
1383 static int zfs_needfree = 0;
1386 arc_reclaim_needed(void)
1399 * check to make sure that swapfs has enough space so that anon
1400 * reservations can still succeeed. anon_resvmem() checks that the
1401 * availrmem is greater than swapfs_minfree, and the number of reserved
1402 * swap pages. We also add a bit of extra here just to prevent
1403 * circumstances from getting really dire.
1405 if (availrmem < swapfs_minfree + swapfs_reserve + extra)
1409 * If zio data pages are being allocated out of a separate heap segment,
1410 * then check that the size of available vmem for this area remains
1411 * above 1/4th free. This needs to be done when the size of the
1412 * non-default segment is smaller than physical memory, so we could
1413 * conceivably run out of VA in that segment before running out of
1416 if (zio_arena != NULL) {
1417 size_t arc_ziosize =
1418 btop(vmem_size(zio_arena, VMEM_FREE | VMEM_ALLOC));
1420 if ((physmem > arc_ziosize) &&
1421 (btop(vmem_size(zio_arena, VMEM_FREE)) < arc_ziosize >> 2))
1427 * If we're on an i386 platform, it's possible that we'll exhaust the
1428 * kernel heap space before we ever run out of available physical
1429 * memory. Most checks of the size of the heap_area compare against
1430 * tune.t_minarmem, which is the minimum available real memory that we
1431 * can have in the system. However, this is generally fixed at 25 pages
1432 * which is so low that it's useless. In this comparison, we seek to
1433 * calculate the total heap-size, and reclaim if more than 3/4ths of the
1434 * heap is allocated. (Or, in the caclulation, if less than 1/4th is
1437 if (btop(vmem_size(heap_arena, VMEM_FREE)) <
1438 (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2))
1442 if (kmem_used() > (kmem_size() * 4) / 5)
1447 if (spa_get_random(100) == 0)
1454 arc_kmem_reap_now(arc_reclaim_strategy_t strat)
1458 kmem_cache_t *prev_cache = NULL;
1459 kmem_cache_t *prev_data_cache = NULL;
1460 extern kmem_cache_t *zio_buf_cache[];
1461 extern kmem_cache_t *zio_data_buf_cache[];
1466 * First purge some DNLC entries, in case the DNLC is using
1467 * up too much memory.
1469 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent);
1473 * Reclaim unused memory from all kmem caches.
1480 * An agressive reclamation will shrink the cache size as well as
1481 * reap free buffers from the arc kmem caches.
1483 if (strat == ARC_RECLAIM_AGGR)
1487 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
1488 if (zio_buf_cache[i] != prev_cache) {
1489 prev_cache = zio_buf_cache[i];
1490 kmem_cache_reap_now(zio_buf_cache[i]);
1492 if (zio_data_buf_cache[i] != prev_data_cache) {
1493 prev_data_cache = zio_data_buf_cache[i];
1494 kmem_cache_reap_now(zio_data_buf_cache[i]);
1498 kmem_cache_reap_now(buf_cache);
1499 kmem_cache_reap_now(hdr_cache);
1503 arc_reclaim_thread(void *dummy __unused)
1505 clock_t growtime = 0;
1506 arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS;
1509 CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG);
1511 mutex_enter(&arc_reclaim_thr_lock);
1512 while (arc_thread_exit == 0) {
1513 if (arc_reclaim_needed()) {
1516 if (last_reclaim == ARC_RECLAIM_CONS) {
1517 last_reclaim = ARC_RECLAIM_AGGR;
1519 last_reclaim = ARC_RECLAIM_CONS;
1523 last_reclaim = ARC_RECLAIM_AGGR;
1527 /* reset the growth delay for every reclaim */
1528 growtime = LBOLT + (arc_grow_retry * hz);
1529 ASSERT(growtime > 0);
1531 if (zfs_needfree && last_reclaim == ARC_RECLAIM_CONS) {
1533 * If zfs_needfree is TRUE our vm_lowmem hook
1534 * was called and in that case we must free some
1535 * memory, so switch to aggressive mode.
1538 last_reclaim = ARC_RECLAIM_AGGR;
1540 arc_kmem_reap_now(last_reclaim);
1541 } else if ((growtime > 0) && ((growtime - LBOLT) <= 0)) {
1542 arc_no_grow = FALSE;
1546 (2 * arc_c < arc_size +
1547 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size))
1550 if (arc_eviction_list != NULL)
1551 arc_do_user_evicts();
1553 if (arc_reclaim_needed()) {
1556 wakeup(&zfs_needfree);
1560 /* block until needed, or one second, whichever is shorter */
1561 CALLB_CPR_SAFE_BEGIN(&cpr);
1562 (void) cv_timedwait(&arc_reclaim_thr_cv,
1563 &arc_reclaim_thr_lock, hz);
1564 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock);
1567 arc_thread_exit = 0;
1568 cv_broadcast(&arc_reclaim_thr_cv);
1569 CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */
1574 * Adapt arc info given the number of bytes we are trying to add and
1575 * the state that we are comming from. This function is only called
1576 * when we are adding new content to the cache.
1579 arc_adapt(int bytes, arc_state_t *state)
1585 * Adapt the target size of the MRU list:
1586 * - if we just hit in the MRU ghost list, then increase
1587 * the target size of the MRU list.
1588 * - if we just hit in the MFU ghost list, then increase
1589 * the target size of the MFU list by decreasing the
1590 * target size of the MRU list.
1592 if (state == arc_mru_ghost) {
1593 mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ?
1594 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size));
1596 arc_p = MIN(arc_c, arc_p + bytes * mult);
1597 } else if (state == arc_mfu_ghost) {
1598 mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ?
1599 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size));
1601 arc_p = MAX(0, (int64_t)arc_p - bytes * mult);
1603 ASSERT((int64_t)arc_p >= 0);
1605 if (arc_reclaim_needed()) {
1606 cv_signal(&arc_reclaim_thr_cv);
1613 if (arc_c >= arc_c_max)
1617 * If we're within (2 * maxblocksize) bytes of the target
1618 * cache size, increment the target cache size
1620 if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) {
1621 atomic_add_64(&arc_c, (int64_t)bytes);
1622 if (arc_c > arc_c_max)
1624 else if (state == arc_anon)
1625 atomic_add_64(&arc_p, (int64_t)bytes);
1629 ASSERT((int64_t)arc_p >= 0);
1633 * Check if the cache has reached its limits and eviction is required
1639 if (arc_reclaim_needed())
1642 return (arc_size > arc_c);
1646 * The buffer, supplied as the first argument, needs a data block.
1647 * So, if we are at cache max, determine which cache should be victimized.
1648 * We have the following cases:
1650 * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) ->
1651 * In this situation if we're out of space, but the resident size of the MFU is
1652 * under the limit, victimize the MFU cache to satisfy this insertion request.
1654 * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) ->
1655 * Here, we've used up all of the available space for the MRU, so we need to
1656 * evict from our own cache instead. Evict from the set of resident MRU
1659 * 3. Insert for MFU (c - p) > sizeof(arc_mfu) ->
1660 * c minus p represents the MFU space in the cache, since p is the size of the
1661 * cache that is dedicated to the MRU. In this situation there's still space on
1662 * the MFU side, so the MRU side needs to be victimized.
1664 * 4. Insert for MFU (c - p) < sizeof(arc_mfu) ->
1665 * MFU's resident set is consuming more space than it has been allotted. In
1666 * this situation, we must victimize our own cache, the MFU, for this insertion.
1669 arc_get_data_buf(arc_buf_t *buf)
1671 arc_state_t *state = buf->b_hdr->b_state;
1672 uint64_t size = buf->b_hdr->b_size;
1673 arc_buf_contents_t type = buf->b_hdr->b_type;
1675 arc_adapt(size, state);
1678 * We have not yet reached cache maximum size,
1679 * just allocate a new buffer.
1681 if (!arc_evict_needed()) {
1682 if (type == ARC_BUFC_METADATA) {
1683 buf->b_data = zio_buf_alloc(size);
1685 ASSERT(type == ARC_BUFC_DATA);
1686 buf->b_data = zio_data_buf_alloc(size);
1688 atomic_add_64(&arc_size, size);
1693 * If we are prefetching from the mfu ghost list, this buffer
1694 * will end up on the mru list; so steal space from there.
1696 if (state == arc_mfu_ghost)
1697 state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu;
1698 else if (state == arc_mru_ghost)
1701 if (state == arc_mru || state == arc_anon) {
1702 uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size;
1703 state = (arc_p > mru_used) ? arc_mfu : arc_mru;
1706 uint64_t mfu_space = arc_c - arc_p;
1707 state = (mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu;
1709 if ((buf->b_data = arc_evict(state, size, TRUE, type)) == NULL) {
1710 if (type == ARC_BUFC_METADATA) {
1711 buf->b_data = zio_buf_alloc(size);
1713 ASSERT(type == ARC_BUFC_DATA);
1714 buf->b_data = zio_data_buf_alloc(size);
1716 atomic_add_64(&arc_size, size);
1717 ARCSTAT_BUMP(arcstat_recycle_miss);
1719 ASSERT(buf->b_data != NULL);
1722 * Update the state size. Note that ghost states have a
1723 * "ghost size" and so don't need to be updated.
1725 if (!GHOST_STATE(buf->b_hdr->b_state)) {
1726 arc_buf_hdr_t *hdr = buf->b_hdr;
1728 atomic_add_64(&hdr->b_state->arcs_size, size);
1729 if (list_link_active(&hdr->b_arc_node)) {
1730 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1731 atomic_add_64(&hdr->b_state->arcs_lsize, size);
1734 * If we are growing the cache, and we are adding anonymous
1735 * data, and we have outgrown arc_p, update arc_p
1737 if (arc_size < arc_c && hdr->b_state == arc_anon &&
1738 arc_anon->arcs_size + arc_mru->arcs_size > arc_p)
1739 arc_p = MIN(arc_c, arc_p + size);
1744 * This routine is called whenever a buffer is accessed.
1745 * NOTE: the hash lock is dropped in this function.
1748 arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
1750 ASSERT(MUTEX_HELD(hash_lock));
1752 if (buf->b_state == arc_anon) {
1754 * This buffer is not in the cache, and does not
1755 * appear in our "ghost" list. Add the new buffer
1759 ASSERT(buf->b_arc_access == 0);
1760 buf->b_arc_access = LBOLT;
1761 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
1762 arc_change_state(arc_mru, buf, hash_lock);
1764 } else if (buf->b_state == arc_mru) {
1766 * If this buffer is here because of a prefetch, then either:
1767 * - clear the flag if this is a "referencing" read
1768 * (any subsequent access will bump this into the MFU state).
1770 * - move the buffer to the head of the list if this is
1771 * another prefetch (to make it less likely to be evicted).
1773 if ((buf->b_flags & ARC_PREFETCH) != 0) {
1774 if (refcount_count(&buf->b_refcnt) == 0) {
1775 ASSERT(list_link_active(&buf->b_arc_node));
1776 mutex_enter(&arc_mru->arcs_mtx);
1777 list_remove(&arc_mru->arcs_list, buf);
1778 list_insert_head(&arc_mru->arcs_list, buf);
1779 mutex_exit(&arc_mru->arcs_mtx);
1781 buf->b_flags &= ~ARC_PREFETCH;
1782 ARCSTAT_BUMP(arcstat_mru_hits);
1784 buf->b_arc_access = LBOLT;
1789 * This buffer has been "accessed" only once so far,
1790 * but it is still in the cache. Move it to the MFU
1793 if (LBOLT > buf->b_arc_access + ARC_MINTIME) {
1795 * More than 125ms have passed since we
1796 * instantiated this buffer. Move it to the
1797 * most frequently used state.
1799 buf->b_arc_access = LBOLT;
1800 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
1801 arc_change_state(arc_mfu, buf, hash_lock);
1803 ARCSTAT_BUMP(arcstat_mru_hits);
1804 } else if (buf->b_state == arc_mru_ghost) {
1805 arc_state_t *new_state;
1807 * This buffer has been "accessed" recently, but
1808 * was evicted from the cache. Move it to the
1812 if (buf->b_flags & ARC_PREFETCH) {
1813 new_state = arc_mru;
1814 if (refcount_count(&buf->b_refcnt) > 0)
1815 buf->b_flags &= ~ARC_PREFETCH;
1816 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
1818 new_state = arc_mfu;
1819 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
1822 buf->b_arc_access = LBOLT;
1823 arc_change_state(new_state, buf, hash_lock);
1825 ARCSTAT_BUMP(arcstat_mru_ghost_hits);
1826 } else if (buf->b_state == arc_mfu) {
1828 * This buffer has been accessed more than once and is
1829 * still in the cache. Keep it in the MFU state.
1831 * NOTE: an add_reference() that occurred when we did
1832 * the arc_read() will have kicked this off the list.
1833 * If it was a prefetch, we will explicitly move it to
1834 * the head of the list now.
1836 if ((buf->b_flags & ARC_PREFETCH) != 0) {
1837 ASSERT(refcount_count(&buf->b_refcnt) == 0);
1838 ASSERT(list_link_active(&buf->b_arc_node));
1839 mutex_enter(&arc_mfu->arcs_mtx);
1840 list_remove(&arc_mfu->arcs_list, buf);
1841 list_insert_head(&arc_mfu->arcs_list, buf);
1842 mutex_exit(&arc_mfu->arcs_mtx);
1844 ARCSTAT_BUMP(arcstat_mfu_hits);
1845 buf->b_arc_access = LBOLT;
1846 } else if (buf->b_state == arc_mfu_ghost) {
1847 arc_state_t *new_state = arc_mfu;
1849 * This buffer has been accessed more than once but has
1850 * been evicted from the cache. Move it back to the
1854 if (buf->b_flags & ARC_PREFETCH) {
1856 * This is a prefetch access...
1857 * move this block back to the MRU state.
1859 ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0);
1860 new_state = arc_mru;
1863 buf->b_arc_access = LBOLT;
1864 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
1865 arc_change_state(new_state, buf, hash_lock);
1867 ARCSTAT_BUMP(arcstat_mfu_ghost_hits);
1869 ASSERT(!"invalid arc state");
1873 /* a generic arc_done_func_t which you can use */
1876 arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg)
1878 bcopy(buf->b_data, arg, buf->b_hdr->b_size);
1879 VERIFY(arc_buf_remove_ref(buf, arg) == 1);
1882 /* a generic arc_done_func_t which you can use */
1884 arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg)
1886 arc_buf_t **bufp = arg;
1887 if (zio && zio->io_error) {
1888 VERIFY(arc_buf_remove_ref(buf, arg) == 1);
1896 arc_read_done(zio_t *zio)
1898 arc_buf_hdr_t *hdr, *found;
1900 arc_buf_t *abuf; /* buffer we're assigning to callback */
1901 kmutex_t *hash_lock;
1902 arc_callback_t *callback_list, *acb;
1903 int freeable = FALSE;
1905 buf = zio->io_private;
1909 * The hdr was inserted into hash-table and removed from lists
1910 * prior to starting I/O. We should find this header, since
1911 * it's in the hash table, and it should be legit since it's
1912 * not possible to evict it during the I/O. The only possible
1913 * reason for it not to be found is if we were freed during the
1916 found = buf_hash_find(zio->io_spa, &hdr->b_dva, hdr->b_birth,
1919 ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) ||
1920 (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))));
1922 /* byteswap if necessary */
1923 callback_list = hdr->b_acb;
1924 ASSERT(callback_list != NULL);
1925 if (BP_SHOULD_BYTESWAP(zio->io_bp) && callback_list->acb_byteswap)
1926 callback_list->acb_byteswap(buf->b_data, hdr->b_size);
1928 arc_cksum_compute(buf);
1930 /* create copies of the data buffer for the callers */
1932 for (acb = callback_list; acb; acb = acb->acb_next) {
1933 if (acb->acb_done) {
1935 abuf = arc_buf_clone(buf);
1936 acb->acb_buf = abuf;
1941 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
1942 ASSERT(!HDR_BUF_AVAILABLE(hdr));
1944 hdr->b_flags |= ARC_BUF_AVAILABLE;
1946 ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL);
1948 if (zio->io_error != 0) {
1949 hdr->b_flags |= ARC_IO_ERROR;
1950 if (hdr->b_state != arc_anon)
1951 arc_change_state(arc_anon, hdr, hash_lock);
1952 if (HDR_IN_HASH_TABLE(hdr))
1953 buf_hash_remove(hdr);
1954 freeable = refcount_is_zero(&hdr->b_refcnt);
1955 /* convert checksum errors into IO errors */
1956 if (zio->io_error == ECKSUM)
1957 zio->io_error = EIO;
1961 * Broadcast before we drop the hash_lock to avoid the possibility
1962 * that the hdr (and hence the cv) might be freed before we get to
1963 * the cv_broadcast().
1965 cv_broadcast(&hdr->b_cv);
1969 * Only call arc_access on anonymous buffers. This is because
1970 * if we've issued an I/O for an evicted buffer, we've already
1971 * called arc_access (to prevent any simultaneous readers from
1972 * getting confused).
1974 if (zio->io_error == 0 && hdr->b_state == arc_anon)
1975 arc_access(hdr, hash_lock);
1976 mutex_exit(hash_lock);
1979 * This block was freed while we waited for the read to
1980 * complete. It has been removed from the hash table and
1981 * moved to the anonymous state (so that it won't show up
1984 ASSERT3P(hdr->b_state, ==, arc_anon);
1985 freeable = refcount_is_zero(&hdr->b_refcnt);
1988 /* execute each callback and free its structure */
1989 while ((acb = callback_list) != NULL) {
1991 acb->acb_done(zio, acb->acb_buf, acb->acb_private);
1993 if (acb->acb_zio_dummy != NULL) {
1994 acb->acb_zio_dummy->io_error = zio->io_error;
1995 zio_nowait(acb->acb_zio_dummy);
1998 callback_list = acb->acb_next;
1999 kmem_free(acb, sizeof (arc_callback_t));
2003 arc_hdr_destroy(hdr);
2007 * "Read" the block block at the specified DVA (in bp) via the
2008 * cache. If the block is found in the cache, invoke the provided
2009 * callback immediately and return. Note that the `zio' parameter
2010 * in the callback will be NULL in this case, since no IO was
2011 * required. If the block is not in the cache pass the read request
2012 * on to the spa with a substitute callback function, so that the
2013 * requested block will be added to the cache.
2015 * If a read request arrives for a block that has a read in-progress,
2016 * either wait for the in-progress read to complete (and return the
2017 * results); or, if this is a read with a "done" func, add a record
2018 * to the read to invoke the "done" func when the read completes,
2019 * and return; or just return.
2021 * arc_read_done() will invoke all the requested "done" functions
2022 * for readers of this block.
2025 arc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_byteswap_func_t *swap,
2026 arc_done_func_t *done, void *private, int priority, int flags,
2027 uint32_t *arc_flags, zbookmark_t *zb)
2031 kmutex_t *hash_lock;
2035 hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock);
2036 if (hdr && hdr->b_datacnt > 0) {
2038 *arc_flags |= ARC_CACHED;
2040 if (HDR_IO_IN_PROGRESS(hdr)) {
2042 if (*arc_flags & ARC_WAIT) {
2043 cv_wait(&hdr->b_cv, hash_lock);
2044 mutex_exit(hash_lock);
2047 ASSERT(*arc_flags & ARC_NOWAIT);
2050 arc_callback_t *acb = NULL;
2052 acb = kmem_zalloc(sizeof (arc_callback_t),
2054 acb->acb_done = done;
2055 acb->acb_private = private;
2056 acb->acb_byteswap = swap;
2058 acb->acb_zio_dummy = zio_null(pio,
2059 spa, NULL, NULL, flags);
2061 ASSERT(acb->acb_done != NULL);
2062 acb->acb_next = hdr->b_acb;
2064 add_reference(hdr, hash_lock, private);
2065 mutex_exit(hash_lock);
2068 mutex_exit(hash_lock);
2072 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
2075 add_reference(hdr, hash_lock, private);
2077 * If this block is already in use, create a new
2078 * copy of the data so that we will be guaranteed
2079 * that arc_release() will always succeed.
2083 ASSERT(buf->b_data);
2084 if (HDR_BUF_AVAILABLE(hdr)) {
2085 ASSERT(buf->b_efunc == NULL);
2086 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
2088 buf = arc_buf_clone(buf);
2090 } else if (*arc_flags & ARC_PREFETCH &&
2091 refcount_count(&hdr->b_refcnt) == 0) {
2092 hdr->b_flags |= ARC_PREFETCH;
2094 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
2095 arc_access(hdr, hash_lock);
2096 mutex_exit(hash_lock);
2097 ARCSTAT_BUMP(arcstat_hits);
2098 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
2099 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
2100 data, metadata, hits);
2103 done(NULL, buf, private);
2105 uint64_t size = BP_GET_LSIZE(bp);
2106 arc_callback_t *acb;
2109 /* this block is not in the cache */
2110 arc_buf_hdr_t *exists;
2111 arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
2112 buf = arc_buf_alloc(spa, size, private, type);
2114 hdr->b_dva = *BP_IDENTITY(bp);
2115 hdr->b_birth = bp->blk_birth;
2116 hdr->b_cksum0 = bp->blk_cksum.zc_word[0];
2117 exists = buf_hash_insert(hdr, &hash_lock);
2119 /* somebody beat us to the hash insert */
2120 mutex_exit(hash_lock);
2121 bzero(&hdr->b_dva, sizeof (dva_t));
2124 (void) arc_buf_remove_ref(buf, private);
2125 goto top; /* restart the IO request */
2127 /* if this is a prefetch, we don't have a reference */
2128 if (*arc_flags & ARC_PREFETCH) {
2129 (void) remove_reference(hdr, hash_lock,
2131 hdr->b_flags |= ARC_PREFETCH;
2133 if (BP_GET_LEVEL(bp) > 0)
2134 hdr->b_flags |= ARC_INDIRECT;
2136 /* this block is in the ghost cache */
2137 ASSERT(GHOST_STATE(hdr->b_state));
2138 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
2139 ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0);
2140 ASSERT(hdr->b_buf == NULL);
2142 /* if this is a prefetch, we don't have a reference */
2143 if (*arc_flags & ARC_PREFETCH)
2144 hdr->b_flags |= ARC_PREFETCH;
2146 add_reference(hdr, hash_lock, private);
2147 buf = kmem_cache_alloc(buf_cache, KM_SLEEP);
2150 buf->b_efunc = NULL;
2151 buf->b_private = NULL;
2154 arc_get_data_buf(buf);
2155 ASSERT(hdr->b_datacnt == 0);
2160 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
2161 acb->acb_done = done;
2162 acb->acb_private = private;
2163 acb->acb_byteswap = swap;
2165 ASSERT(hdr->b_acb == NULL);
2167 hdr->b_flags |= ARC_IO_IN_PROGRESS;
2170 * If the buffer has been evicted, migrate it to a present state
2171 * before issuing the I/O. Once we drop the hash-table lock,
2172 * the header will be marked as I/O in progress and have an
2173 * attached buffer. At this point, anybody who finds this
2174 * buffer ought to notice that it's legit but has a pending I/O.
2177 if (GHOST_STATE(hdr->b_state))
2178 arc_access(hdr, hash_lock);
2179 mutex_exit(hash_lock);
2181 ASSERT3U(hdr->b_size, ==, size);
2182 DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size,
2184 ARCSTAT_BUMP(arcstat_misses);
2185 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
2186 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
2187 data, metadata, misses);
2189 rzio = zio_read(pio, spa, bp, buf->b_data, size,
2190 arc_read_done, buf, priority, flags, zb);
2192 if (*arc_flags & ARC_WAIT)
2193 return (zio_wait(rzio));
2195 ASSERT(*arc_flags & ARC_NOWAIT);
2202 * arc_read() variant to support pool traversal. If the block is already
2203 * in the ARC, make a copy of it; otherwise, the caller will do the I/O.
2204 * The idea is that we don't want pool traversal filling up memory, but
2205 * if the ARC already has the data anyway, we shouldn't pay for the I/O.
2208 arc_tryread(spa_t *spa, blkptr_t *bp, void *data)
2214 hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx);
2216 if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) {
2217 arc_buf_t *buf = hdr->b_buf;
2220 while (buf->b_data == NULL) {
2224 bcopy(buf->b_data, data, hdr->b_size);
2230 mutex_exit(hash_mtx);
2236 arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private)
2238 ASSERT(buf->b_hdr != NULL);
2239 ASSERT(buf->b_hdr->b_state != arc_anon);
2240 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL);
2241 buf->b_efunc = func;
2242 buf->b_private = private;
2246 * This is used by the DMU to let the ARC know that a buffer is
2247 * being evicted, so the ARC should clean up. If this arc buf
2248 * is not yet in the evicted state, it will be put there.
2251 arc_buf_evict(arc_buf_t *buf)
2254 kmutex_t *hash_lock;
2257 mutex_enter(&arc_eviction_mtx);
2261 * We are in arc_do_user_evicts().
2263 ASSERT(buf->b_data == NULL);
2264 mutex_exit(&arc_eviction_mtx);
2267 hash_lock = HDR_LOCK(hdr);
2268 mutex_exit(&arc_eviction_mtx);
2270 mutex_enter(hash_lock);
2272 if (buf->b_data == NULL) {
2274 * We are on the eviction list.
2276 mutex_exit(hash_lock);
2277 mutex_enter(&arc_eviction_mtx);
2278 if (buf->b_hdr == NULL) {
2280 * We are already in arc_do_user_evicts().
2282 mutex_exit(&arc_eviction_mtx);
2285 arc_buf_t copy = *buf; /* structure assignment */
2287 * Process this buffer now
2288 * but let arc_do_user_evicts() do the reaping.
2290 buf->b_efunc = NULL;
2291 mutex_exit(&arc_eviction_mtx);
2292 VERIFY(copy.b_efunc(©) == 0);
2297 ASSERT(buf->b_hdr == hdr);
2298 ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt);
2299 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
2302 * Pull this buffer off of the hdr
2305 while (*bufp != buf)
2306 bufp = &(*bufp)->b_next;
2307 *bufp = buf->b_next;
2309 ASSERT(buf->b_data != NULL);
2310 arc_buf_destroy(buf, FALSE, FALSE);
2312 if (hdr->b_datacnt == 0) {
2313 arc_state_t *old_state = hdr->b_state;
2314 arc_state_t *evicted_state;
2316 ASSERT(refcount_is_zero(&hdr->b_refcnt));
2319 (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
2321 mutex_enter(&old_state->arcs_mtx);
2322 mutex_enter(&evicted_state->arcs_mtx);
2324 arc_change_state(evicted_state, hdr, hash_lock);
2325 ASSERT(HDR_IN_HASH_TABLE(hdr));
2326 hdr->b_flags = ARC_IN_HASH_TABLE;
2328 mutex_exit(&evicted_state->arcs_mtx);
2329 mutex_exit(&old_state->arcs_mtx);
2331 mutex_exit(hash_lock);
2333 VERIFY(buf->b_efunc(buf) == 0);
2334 buf->b_efunc = NULL;
2335 buf->b_private = NULL;
2337 kmem_cache_free(buf_cache, buf);
2342 * Release this buffer from the cache. This must be done
2343 * after a read and prior to modifying the buffer contents.
2344 * If the buffer has more than one reference, we must make
2345 * make a new hdr for the buffer.
2348 arc_release(arc_buf_t *buf, void *tag)
2350 arc_buf_hdr_t *hdr = buf->b_hdr;
2351 kmutex_t *hash_lock = HDR_LOCK(hdr);
2353 /* this buffer is not on any list */
2354 ASSERT(refcount_count(&hdr->b_refcnt) > 0);
2356 if (hdr->b_state == arc_anon) {
2357 /* this buffer is already released */
2358 ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1);
2359 ASSERT(BUF_EMPTY(hdr));
2360 ASSERT(buf->b_efunc == NULL);
2365 mutex_enter(hash_lock);
2368 * Do we have more than one buf?
2370 if (hdr->b_buf != buf || buf->b_next != NULL) {
2371 arc_buf_hdr_t *nhdr;
2373 uint64_t blksz = hdr->b_size;
2374 spa_t *spa = hdr->b_spa;
2375 arc_buf_contents_t type = hdr->b_type;
2377 ASSERT(hdr->b_datacnt > 1);
2379 * Pull the data off of this buf and attach it to
2380 * a new anonymous buf.
2382 (void) remove_reference(hdr, hash_lock, tag);
2384 while (*bufp != buf)
2385 bufp = &(*bufp)->b_next;
2386 *bufp = (*bufp)->b_next;
2389 ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size);
2390 atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size);
2391 if (refcount_is_zero(&hdr->b_refcnt)) {
2392 ASSERT3U(hdr->b_state->arcs_lsize, >=, hdr->b_size);
2393 atomic_add_64(&hdr->b_state->arcs_lsize, -hdr->b_size);
2395 hdr->b_datacnt -= 1;
2396 arc_cksum_verify(buf);
2398 mutex_exit(hash_lock);
2400 nhdr = kmem_cache_alloc(hdr_cache, KM_SLEEP);
2401 nhdr->b_size = blksz;
2403 nhdr->b_type = type;
2405 nhdr->b_state = arc_anon;
2406 nhdr->b_arc_access = 0;
2408 nhdr->b_datacnt = 1;
2409 nhdr->b_freeze_cksum = NULL;
2410 mutex_init(&nhdr->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
2411 (void) refcount_add(&nhdr->b_refcnt, tag);
2413 atomic_add_64(&arc_anon->arcs_size, blksz);
2417 ASSERT(refcount_count(&hdr->b_refcnt) == 1);
2418 ASSERT(!list_link_active(&hdr->b_arc_node));
2419 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
2420 arc_change_state(arc_anon, hdr, hash_lock);
2421 hdr->b_arc_access = 0;
2422 mutex_exit(hash_lock);
2423 bzero(&hdr->b_dva, sizeof (dva_t));
2428 buf->b_efunc = NULL;
2429 buf->b_private = NULL;
2433 arc_released(arc_buf_t *buf)
2435 return (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon);
2439 arc_has_callback(arc_buf_t *buf)
2441 return (buf->b_efunc != NULL);
2446 arc_referenced(arc_buf_t *buf)
2448 return (refcount_count(&buf->b_hdr->b_refcnt));
2453 arc_write_ready(zio_t *zio)
2455 arc_write_callback_t *callback = zio->io_private;
2456 arc_buf_t *buf = callback->awcb_buf;
2458 if (callback->awcb_ready) {
2459 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt));
2460 callback->awcb_ready(zio, buf, callback->awcb_private);
2462 arc_cksum_compute(buf);
2466 arc_write_done(zio_t *zio)
2468 arc_write_callback_t *callback = zio->io_private;
2469 arc_buf_t *buf = callback->awcb_buf;
2470 arc_buf_hdr_t *hdr = buf->b_hdr;
2474 /* this buffer is on no lists and is not in the hash table */
2475 ASSERT3P(hdr->b_state, ==, arc_anon);
2477 hdr->b_dva = *BP_IDENTITY(zio->io_bp);
2478 hdr->b_birth = zio->io_bp->blk_birth;
2479 hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0];
2481 * If the block to be written was all-zero, we may have
2482 * compressed it away. In this case no write was performed
2483 * so there will be no dva/birth-date/checksum. The buffer
2484 * must therefor remain anonymous (and uncached).
2486 if (!BUF_EMPTY(hdr)) {
2487 arc_buf_hdr_t *exists;
2488 kmutex_t *hash_lock;
2490 arc_cksum_verify(buf);
2492 exists = buf_hash_insert(hdr, &hash_lock);
2495 * This can only happen if we overwrite for
2496 * sync-to-convergence, because we remove
2497 * buffers from the hash table when we arc_free().
2499 ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig),
2500 BP_IDENTITY(zio->io_bp)));
2501 ASSERT3U(zio->io_bp_orig.blk_birth, ==,
2502 zio->io_bp->blk_birth);
2504 ASSERT(refcount_is_zero(&exists->b_refcnt));
2505 arc_change_state(arc_anon, exists, hash_lock);
2506 mutex_exit(hash_lock);
2507 arc_hdr_destroy(exists);
2508 exists = buf_hash_insert(hdr, &hash_lock);
2509 ASSERT3P(exists, ==, NULL);
2511 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
2512 arc_access(hdr, hash_lock);
2513 mutex_exit(hash_lock);
2514 } else if (callback->awcb_done == NULL) {
2517 * This is an anonymous buffer with no user callback,
2518 * destroy it if there are no active references.
2520 mutex_enter(&arc_eviction_mtx);
2521 destroy_hdr = refcount_is_zero(&hdr->b_refcnt);
2522 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
2523 mutex_exit(&arc_eviction_mtx);
2525 arc_hdr_destroy(hdr);
2527 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
2530 if (callback->awcb_done) {
2531 ASSERT(!refcount_is_zero(&hdr->b_refcnt));
2532 callback->awcb_done(zio, buf, callback->awcb_private);
2535 kmem_free(callback, sizeof (arc_write_callback_t));
2539 arc_write(zio_t *pio, spa_t *spa, int checksum, int compress, int ncopies,
2540 uint64_t txg, blkptr_t *bp, arc_buf_t *buf,
2541 arc_done_func_t *ready, arc_done_func_t *done, void *private, int priority,
2542 int flags, zbookmark_t *zb)
2544 arc_buf_hdr_t *hdr = buf->b_hdr;
2545 arc_write_callback_t *callback;
2548 /* this is a private buffer - no locking required */
2549 ASSERT3P(hdr->b_state, ==, arc_anon);
2550 ASSERT(BUF_EMPTY(hdr));
2551 ASSERT(!HDR_IO_ERROR(hdr));
2552 ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0);
2553 ASSERT(hdr->b_acb == 0);
2554 callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
2555 callback->awcb_ready = ready;
2556 callback->awcb_done = done;
2557 callback->awcb_private = private;
2558 callback->awcb_buf = buf;
2559 hdr->b_flags |= ARC_IO_IN_PROGRESS;
2560 zio = zio_write(pio, spa, checksum, compress, ncopies, txg, bp,
2561 buf->b_data, hdr->b_size, arc_write_ready, arc_write_done, callback,
2562 priority, flags, zb);
2568 arc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
2569 zio_done_func_t *done, void *private, uint32_t arc_flags)
2572 kmutex_t *hash_lock;
2576 * If this buffer is in the cache, release it, so it
2579 ab = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock);
2582 * The checksum of blocks to free is not always
2583 * preserved (eg. on the deadlist). However, if it is
2584 * nonzero, it should match what we have in the cache.
2586 ASSERT(bp->blk_cksum.zc_word[0] == 0 ||
2587 ab->b_cksum0 == bp->blk_cksum.zc_word[0]);
2588 if (ab->b_state != arc_anon)
2589 arc_change_state(arc_anon, ab, hash_lock);
2590 if (HDR_IO_IN_PROGRESS(ab)) {
2592 * This should only happen when we prefetch.
2594 ASSERT(ab->b_flags & ARC_PREFETCH);
2595 ASSERT3U(ab->b_datacnt, ==, 1);
2596 ab->b_flags |= ARC_FREED_IN_READ;
2597 if (HDR_IN_HASH_TABLE(ab))
2598 buf_hash_remove(ab);
2599 ab->b_arc_access = 0;
2600 bzero(&ab->b_dva, sizeof (dva_t));
2603 ab->b_buf->b_efunc = NULL;
2604 ab->b_buf->b_private = NULL;
2605 mutex_exit(hash_lock);
2606 } else if (refcount_is_zero(&ab->b_refcnt)) {
2607 mutex_exit(hash_lock);
2608 arc_hdr_destroy(ab);
2609 ARCSTAT_BUMP(arcstat_deleted);
2612 * We still have an active reference on this
2613 * buffer. This can happen, e.g., from
2614 * dbuf_unoverride().
2616 ASSERT(!HDR_IN_HASH_TABLE(ab));
2617 ab->b_arc_access = 0;
2618 bzero(&ab->b_dva, sizeof (dva_t));
2621 ab->b_buf->b_efunc = NULL;
2622 ab->b_buf->b_private = NULL;
2623 mutex_exit(hash_lock);
2627 zio = zio_free(pio, spa, txg, bp, done, private);
2629 if (arc_flags & ARC_WAIT)
2630 return (zio_wait(zio));
2632 ASSERT(arc_flags & ARC_NOWAIT);
2639 arc_tempreserve_clear(uint64_t tempreserve)
2641 atomic_add_64(&arc_tempreserve, -tempreserve);
2642 ASSERT((int64_t)arc_tempreserve >= 0);
2646 arc_tempreserve_space(uint64_t tempreserve)
2650 * Once in a while, fail for no reason. Everything should cope.
2652 if (spa_get_random(10000) == 0) {
2653 dprintf("forcing random failure\n");
2657 if (tempreserve > arc_c/4 && !arc_no_grow)
2658 arc_c = MIN(arc_c_max, tempreserve * 4);
2659 if (tempreserve > arc_c)
2663 * Throttle writes when the amount of dirty data in the cache
2664 * gets too large. We try to keep the cache less than half full
2665 * of dirty blocks so that our sync times don't grow too large.
2666 * Note: if two requests come in concurrently, we might let them
2667 * both succeed, when one of them should fail. Not a huge deal.
2669 * XXX The limit should be adjusted dynamically to keep the time
2670 * to sync a dataset fixed (around 1-5 seconds?).
2673 if (tempreserve + arc_tempreserve + arc_anon->arcs_size > arc_c / 2 &&
2674 arc_tempreserve + arc_anon->arcs_size > arc_c / 4) {
2675 dprintf("failing, arc_tempreserve=%lluK anon=%lluK "
2676 "tempreserve=%lluK arc_c=%lluK\n",
2677 arc_tempreserve>>10, arc_anon->arcs_lsize>>10,
2678 tempreserve>>10, arc_c>>10);
2681 atomic_add_64(&arc_tempreserve, tempreserve);
2685 static kmutex_t arc_lowmem_lock;
2687 static eventhandler_tag arc_event_lowmem = NULL;
2690 arc_lowmem(void *arg __unused, int howto __unused)
2693 /* Serialize access via arc_lowmem_lock. */
2694 mutex_enter(&arc_lowmem_lock);
2696 cv_signal(&arc_reclaim_thr_cv);
2697 while (zfs_needfree)
2698 tsleep(&zfs_needfree, 0, "zfs:lowmem", hz / 5);
2699 mutex_exit(&arc_lowmem_lock);
2706 mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL);
2707 cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL);
2708 mutex_init(&arc_lowmem_lock, NULL, MUTEX_DEFAULT, NULL);
2710 /* Convert seconds to clock ticks */
2711 arc_min_prefetch_lifespan = 1 * hz;
2713 /* Start out with 1/8 of all memory */
2714 arc_c = kmem_size() / 8;
2718 * On architectures where the physical memory can be larger
2719 * than the addressable space (intel in 32-bit mode), we may
2720 * need to limit the cache to 1/8 of VM size.
2722 arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8);
2725 /* set min cache to 1/32 of all memory, or 16MB, whichever is more */
2726 arc_c_min = MAX(arc_c / 4, 64<<18);
2727 /* set max to 1/2 of all memory, or all but 1GB, whichever is more */
2728 if (arc_c * 8 >= 1<<30)
2729 arc_c_max = (arc_c * 8) - (1<<30);
2731 arc_c_max = arc_c_min;
2732 arc_c_max = MAX(arc_c * 6, arc_c_max);
2735 * Allow the tunables to override our calculations if they are
2736 * reasonable (ie. over 16MB)
2738 if (zfs_arc_max >= 64<<18 && zfs_arc_max < kmem_size())
2739 arc_c_max = zfs_arc_max;
2740 if (zfs_arc_min >= 64<<18 && zfs_arc_min <= arc_c_max)
2741 arc_c_min = zfs_arc_min;
2744 arc_p = (arc_c >> 1);
2746 /* if kmem_flags are set, lets try to use less memory */
2747 if (kmem_debugging())
2749 if (arc_c < arc_c_min)
2752 zfs_arc_min = arc_c_min;
2753 zfs_arc_max = arc_c_max;
2755 arc_anon = &ARC_anon;
2757 arc_mru_ghost = &ARC_mru_ghost;
2759 arc_mfu_ghost = &ARC_mfu_ghost;
2762 mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
2763 mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
2764 mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
2765 mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
2766 mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
2768 list_create(&arc_mru->arcs_list, sizeof (arc_buf_hdr_t),
2769 offsetof(arc_buf_hdr_t, b_arc_node));
2770 list_create(&arc_mru_ghost->arcs_list, sizeof (arc_buf_hdr_t),
2771 offsetof(arc_buf_hdr_t, b_arc_node));
2772 list_create(&arc_mfu->arcs_list, sizeof (arc_buf_hdr_t),
2773 offsetof(arc_buf_hdr_t, b_arc_node));
2774 list_create(&arc_mfu_ghost->arcs_list, sizeof (arc_buf_hdr_t),
2775 offsetof(arc_buf_hdr_t, b_arc_node));
2779 arc_thread_exit = 0;
2780 arc_eviction_list = NULL;
2781 mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL);
2782 bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t));
2784 arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED,
2785 sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
2787 if (arc_ksp != NULL) {
2788 arc_ksp->ks_data = &arc_stats;
2789 kstat_install(arc_ksp);
2792 (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0,
2793 TS_RUN, minclsyspri);
2796 arc_event_lowmem = EVENTHANDLER_REGISTER(vm_lowmem, arc_lowmem, NULL,
2797 EVENTHANDLER_PRI_FIRST);
2803 /* Warn about ZFS memory requirements. */
2804 if (((uint64_t)physmem * PAGESIZE) < (256 + 128 + 64) * (1 << 20)) {
2805 printf("ZFS WARNING: Recommended minimum RAM size is 512MB; "
2806 "expect unstable behavior.\n");
2807 } else if (kmem_size() < 256 * (1 << 20)) {
2808 printf("ZFS WARNING: Recommended minimum kmem_size is 256MB; "
2809 "expect unstable behavior.\n");
2810 printf(" Consider tuning vm.kmem_size or "
2811 "vm.kmem_size_min\n");
2812 printf(" in /boot/loader.conf.\n");
2820 mutex_enter(&arc_reclaim_thr_lock);
2821 arc_thread_exit = 1;
2822 cv_signal(&arc_reclaim_thr_cv);
2823 while (arc_thread_exit != 0)
2824 cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock);
2825 mutex_exit(&arc_reclaim_thr_lock);
2831 if (arc_ksp != NULL) {
2832 kstat_delete(arc_ksp);
2836 mutex_destroy(&arc_eviction_mtx);
2837 mutex_destroy(&arc_reclaim_thr_lock);
2838 cv_destroy(&arc_reclaim_thr_cv);
2840 list_destroy(&arc_mru->arcs_list);
2841 list_destroy(&arc_mru_ghost->arcs_list);
2842 list_destroy(&arc_mfu->arcs_list);
2843 list_destroy(&arc_mfu_ghost->arcs_list);
2845 mutex_destroy(&arc_anon->arcs_mtx);
2846 mutex_destroy(&arc_mru->arcs_mtx);
2847 mutex_destroy(&arc_mru_ghost->arcs_mtx);
2848 mutex_destroy(&arc_mfu->arcs_mtx);
2849 mutex_destroy(&arc_mfu_ghost->arcs_mtx);
2853 mutex_destroy(&arc_lowmem_lock);
2855 if (arc_event_lowmem != NULL)
2856 EVENTHANDLER_DEREGISTER(vm_lowmem, arc_event_lowmem);