4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
24 * Copyright (c) 2014 by Saso Kiselkov. All rights reserved.
25 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
29 * DVA-based Adjustable Replacement Cache
31 * While much of the theory of operation used here is
32 * based on the self-tuning, low overhead replacement cache
33 * presented by Megiddo and Modha at FAST 2003, there are some
34 * significant differences:
36 * 1. The Megiddo and Modha model assumes any page is evictable.
37 * Pages in its cache cannot be "locked" into memory. This makes
38 * the eviction algorithm simple: evict the last page in the list.
39 * This also make the performance characteristics easy to reason
40 * about. Our cache is not so simple. At any given moment, some
41 * subset of the blocks in the cache are un-evictable because we
42 * have handed out a reference to them. Blocks are only evictable
43 * when there are no external references active. This makes
44 * eviction far more problematic: we choose to evict the evictable
45 * blocks that are the "lowest" in the list.
47 * There are times when it is not possible to evict the requested
48 * space. In these circumstances we are unable to adjust the cache
49 * size. To prevent the cache growing unbounded at these times we
50 * implement a "cache throttle" that slows the flow of new data
51 * into the cache until we can make space available.
53 * 2. The Megiddo and Modha model assumes a fixed cache size.
54 * Pages are evicted when the cache is full and there is a cache
55 * miss. Our model has a variable sized cache. It grows with
56 * high use, but also tries to react to memory pressure from the
57 * operating system: decreasing its size when system memory is
60 * 3. The Megiddo and Modha model assumes a fixed page size. All
61 * elements of the cache are therefore exactly the same size. So
62 * when adjusting the cache size following a cache miss, its simply
63 * a matter of choosing a single page to evict. In our model, we
64 * have variable sized cache blocks (rangeing from 512 bytes to
65 * 128K bytes). We therefore choose a set of blocks to evict to make
66 * space for a cache miss that approximates as closely as possible
67 * the space used by the new block.
69 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache"
70 * by N. Megiddo & D. Modha, FAST 2003
76 * A new reference to a cache buffer can be obtained in two
77 * ways: 1) via a hash table lookup using the DVA as a key,
78 * or 2) via one of the ARC lists. The arc_read() interface
79 * uses method 1, while the internal arc algorithms for
80 * adjusting the cache use method 2. We therefore provide two
81 * types of locks: 1) the hash table lock array, and 2) the
84 * Buffers do not have their own mutexs, rather they rely on the
85 * hash table mutexs for the bulk of their protection (i.e. most
86 * fields in the arc_buf_hdr_t are protected by these mutexs).
88 * buf_hash_find() returns the appropriate mutex (held) when it
89 * locates the requested buffer in the hash table. It returns
90 * NULL for the mutex if the buffer was not in the table.
92 * buf_hash_remove() expects the appropriate hash mutex to be
93 * already held before it is invoked.
95 * Each arc state also has a mutex which is used to protect the
96 * buffer list associated with the state. When attempting to
97 * obtain a hash table lock while holding an arc list lock you
98 * must use: mutex_tryenter() to avoid deadlock. Also note that
99 * the active state mutex must be held before the ghost state mutex.
101 * Arc buffers may have an associated eviction callback function.
102 * This function will be invoked prior to removing the buffer (e.g.
103 * in arc_do_user_evicts()). Note however that the data associated
104 * with the buffer may be evicted prior to the callback. The callback
105 * must be made with *no locks held* (to prevent deadlock). Additionally,
106 * the users of callbacks must ensure that their private data is
107 * protected from simultaneous callbacks from arc_clear_callback()
108 * and arc_do_user_evicts().
110 * Note that the majority of the performance stats are manipulated
111 * with atomic operations.
113 * The L2ARC uses the l2arc_buflist_mtx global mutex for the following:
115 * - L2ARC buflist creation
116 * - L2ARC buflist eviction
117 * - L2ARC write completion, which walks L2ARC buflists
118 * - ARC header destruction, as it removes from L2ARC buflists
119 * - ARC header release, as it removes from L2ARC buflists
124 #include <sys/zio_compress.h>
125 #include <sys/zfs_context.h>
127 #include <sys/refcount.h>
128 #include <sys/vdev.h>
129 #include <sys/vdev_impl.h>
130 #include <sys/dsl_pool.h>
132 #include <sys/dnlc.h>
134 #include <sys/callb.h>
135 #include <sys/kstat.h>
136 #include <sys/trim_map.h>
137 #include <zfs_fletcher.h>
140 #include <vm/vm_pageout.h>
141 #include <machine/vmparam.h>
145 /* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */
146 boolean_t arc_watch = B_FALSE;
151 static kmutex_t arc_reclaim_thr_lock;
152 static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */
153 static uint8_t arc_thread_exit;
155 #define ARC_REDUCE_DNLC_PERCENT 3
156 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT;
158 typedef enum arc_reclaim_strategy {
159 ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */
160 ARC_RECLAIM_CONS /* Conservative reclaim strategy */
161 } arc_reclaim_strategy_t;
164 * The number of iterations through arc_evict_*() before we
165 * drop & reacquire the lock.
167 int arc_evict_iterations = 100;
169 /* number of seconds before growing cache again */
170 static int arc_grow_retry = 60;
172 /* shift of arc_c for calculating both min and max arc_p */
173 static int arc_p_min_shift = 4;
175 /* log2(fraction of arc to reclaim) */
176 static int arc_shrink_shift = 5;
179 * minimum lifespan of a prefetch block in clock ticks
180 * (initialized in arc_init())
182 static int arc_min_prefetch_lifespan;
185 * If this percent of memory is free, don't throttle.
187 int arc_lotsfree_percent = 10;
190 extern int zfs_prefetch_disable;
193 * The arc has filled available memory and has now warmed up.
195 static boolean_t arc_warm;
197 uint64_t zfs_arc_max;
198 uint64_t zfs_arc_min;
199 uint64_t zfs_arc_meta_limit = 0;
200 int zfs_arc_grow_retry = 0;
201 int zfs_arc_shrink_shift = 0;
202 int zfs_arc_p_min_shift = 0;
203 int zfs_disable_dup_eviction = 0;
204 uint64_t zfs_arc_average_blocksize = 8 * 1024; /* 8KB */
205 u_int zfs_arc_free_target = 0;
207 static int sysctl_vfs_zfs_arc_free_target(SYSCTL_HANDLER_ARGS);
211 arc_free_target_init(void *unused __unused)
214 zfs_arc_free_target = vm_pageout_wakeup_thresh;
216 SYSINIT(arc_free_target_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_ANY,
217 arc_free_target_init, NULL);
219 TUNABLE_QUAD("vfs.zfs.arc_max", &zfs_arc_max);
220 TUNABLE_QUAD("vfs.zfs.arc_min", &zfs_arc_min);
221 TUNABLE_QUAD("vfs.zfs.arc_meta_limit", &zfs_arc_meta_limit);
222 TUNABLE_QUAD("vfs.zfs.arc_average_blocksize", &zfs_arc_average_blocksize);
223 TUNABLE_INT("vfs.zfs.arc_shrink_shift", &zfs_arc_shrink_shift);
224 SYSCTL_DECL(_vfs_zfs);
225 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_max, CTLFLAG_RDTUN, &zfs_arc_max, 0,
227 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_min, CTLFLAG_RDTUN, &zfs_arc_min, 0,
229 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_average_blocksize, CTLFLAG_RDTUN,
230 &zfs_arc_average_blocksize, 0,
231 "ARC average blocksize");
232 SYSCTL_INT(_vfs_zfs, OID_AUTO, arc_shrink_shift, CTLFLAG_RW,
233 &arc_shrink_shift, 0,
234 "log2(fraction of arc to reclaim)");
237 * We don't have a tunable for arc_free_target due to the dependency on
238 * pagedaemon initialisation.
240 SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_free_target,
241 CTLTYPE_UINT | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, sizeof(u_int),
242 sysctl_vfs_zfs_arc_free_target, "IU",
243 "Desired number of free pages below which ARC triggers reclaim");
246 sysctl_vfs_zfs_arc_free_target(SYSCTL_HANDLER_ARGS)
251 val = zfs_arc_free_target;
252 err = sysctl_handle_int(oidp, &val, 0, req);
253 if (err != 0 || req->newptr == NULL)
258 if (val > cnt.v_page_count)
261 zfs_arc_free_target = val;
268 * Note that buffers can be in one of 6 states:
269 * ARC_anon - anonymous (discussed below)
270 * ARC_mru - recently used, currently cached
271 * ARC_mru_ghost - recentely used, no longer in cache
272 * ARC_mfu - frequently used, currently cached
273 * ARC_mfu_ghost - frequently used, no longer in cache
274 * ARC_l2c_only - exists in L2ARC but not other states
275 * When there are no active references to the buffer, they are
276 * are linked onto a list in one of these arc states. These are
277 * the only buffers that can be evicted or deleted. Within each
278 * state there are multiple lists, one for meta-data and one for
279 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes,
280 * etc.) is tracked separately so that it can be managed more
281 * explicitly: favored over data, limited explicitly.
283 * Anonymous buffers are buffers that are not associated with
284 * a DVA. These are buffers that hold dirty block copies
285 * before they are written to stable storage. By definition,
286 * they are "ref'd" and are considered part of arc_mru
287 * that cannot be freed. Generally, they will aquire a DVA
288 * as they are written and migrate onto the arc_mru list.
290 * The ARC_l2c_only state is for buffers that are in the second
291 * level ARC but no longer in any of the ARC_m* lists. The second
292 * level ARC itself may also contain buffers that are in any of
293 * the ARC_m* states - meaning that a buffer can exist in two
294 * places. The reason for the ARC_l2c_only state is to keep the
295 * buffer header in the hash table, so that reads that hit the
296 * second level ARC benefit from these fast lookups.
299 #define ARCS_LOCK_PAD CACHE_LINE_SIZE
303 unsigned char pad[(ARCS_LOCK_PAD - sizeof (kmutex_t))];
308 * must be power of two for mask use to work
311 #define ARC_BUFC_NUMDATALISTS 16
312 #define ARC_BUFC_NUMMETADATALISTS 16
313 #define ARC_BUFC_NUMLISTS (ARC_BUFC_NUMMETADATALISTS + ARC_BUFC_NUMDATALISTS)
315 typedef struct arc_state {
316 uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */
317 uint64_t arcs_size; /* total amount of data in this state */
318 list_t arcs_lists[ARC_BUFC_NUMLISTS]; /* list of evictable buffers */
319 struct arcs_lock arcs_locks[ARC_BUFC_NUMLISTS] __aligned(CACHE_LINE_SIZE);
322 #define ARCS_LOCK(s, i) (&((s)->arcs_locks[(i)].arcs_lock))
325 static arc_state_t ARC_anon;
326 static arc_state_t ARC_mru;
327 static arc_state_t ARC_mru_ghost;
328 static arc_state_t ARC_mfu;
329 static arc_state_t ARC_mfu_ghost;
330 static arc_state_t ARC_l2c_only;
332 typedef struct arc_stats {
333 kstat_named_t arcstat_hits;
334 kstat_named_t arcstat_misses;
335 kstat_named_t arcstat_demand_data_hits;
336 kstat_named_t arcstat_demand_data_misses;
337 kstat_named_t arcstat_demand_metadata_hits;
338 kstat_named_t arcstat_demand_metadata_misses;
339 kstat_named_t arcstat_prefetch_data_hits;
340 kstat_named_t arcstat_prefetch_data_misses;
341 kstat_named_t arcstat_prefetch_metadata_hits;
342 kstat_named_t arcstat_prefetch_metadata_misses;
343 kstat_named_t arcstat_mru_hits;
344 kstat_named_t arcstat_mru_ghost_hits;
345 kstat_named_t arcstat_mfu_hits;
346 kstat_named_t arcstat_mfu_ghost_hits;
347 kstat_named_t arcstat_allocated;
348 kstat_named_t arcstat_deleted;
349 kstat_named_t arcstat_stolen;
350 kstat_named_t arcstat_recycle_miss;
352 * Number of buffers that could not be evicted because the hash lock
353 * was held by another thread. The lock may not necessarily be held
354 * by something using the same buffer, since hash locks are shared
355 * by multiple buffers.
357 kstat_named_t arcstat_mutex_miss;
359 * Number of buffers skipped because they have I/O in progress, are
360 * indrect prefetch buffers that have not lived long enough, or are
361 * not from the spa we're trying to evict from.
363 kstat_named_t arcstat_evict_skip;
364 kstat_named_t arcstat_evict_l2_cached;
365 kstat_named_t arcstat_evict_l2_eligible;
366 kstat_named_t arcstat_evict_l2_ineligible;
367 kstat_named_t arcstat_hash_elements;
368 kstat_named_t arcstat_hash_elements_max;
369 kstat_named_t arcstat_hash_collisions;
370 kstat_named_t arcstat_hash_chains;
371 kstat_named_t arcstat_hash_chain_max;
372 kstat_named_t arcstat_p;
373 kstat_named_t arcstat_c;
374 kstat_named_t arcstat_c_min;
375 kstat_named_t arcstat_c_max;
376 kstat_named_t arcstat_size;
377 kstat_named_t arcstat_hdr_size;
378 kstat_named_t arcstat_data_size;
379 kstat_named_t arcstat_other_size;
380 kstat_named_t arcstat_l2_hits;
381 kstat_named_t arcstat_l2_misses;
382 kstat_named_t arcstat_l2_feeds;
383 kstat_named_t arcstat_l2_rw_clash;
384 kstat_named_t arcstat_l2_read_bytes;
385 kstat_named_t arcstat_l2_write_bytes;
386 kstat_named_t arcstat_l2_writes_sent;
387 kstat_named_t arcstat_l2_writes_done;
388 kstat_named_t arcstat_l2_writes_error;
389 kstat_named_t arcstat_l2_writes_hdr_miss;
390 kstat_named_t arcstat_l2_evict_lock_retry;
391 kstat_named_t arcstat_l2_evict_reading;
392 kstat_named_t arcstat_l2_free_on_write;
393 kstat_named_t arcstat_l2_cdata_free_on_write;
394 kstat_named_t arcstat_l2_abort_lowmem;
395 kstat_named_t arcstat_l2_cksum_bad;
396 kstat_named_t arcstat_l2_io_error;
397 kstat_named_t arcstat_l2_size;
398 kstat_named_t arcstat_l2_asize;
399 kstat_named_t arcstat_l2_hdr_size;
400 kstat_named_t arcstat_l2_compress_successes;
401 kstat_named_t arcstat_l2_compress_zeros;
402 kstat_named_t arcstat_l2_compress_failures;
403 kstat_named_t arcstat_l2_write_trylock_fail;
404 kstat_named_t arcstat_l2_write_passed_headroom;
405 kstat_named_t arcstat_l2_write_spa_mismatch;
406 kstat_named_t arcstat_l2_write_in_l2;
407 kstat_named_t arcstat_l2_write_hdr_io_in_progress;
408 kstat_named_t arcstat_l2_write_not_cacheable;
409 kstat_named_t arcstat_l2_write_full;
410 kstat_named_t arcstat_l2_write_buffer_iter;
411 kstat_named_t arcstat_l2_write_pios;
412 kstat_named_t arcstat_l2_write_buffer_bytes_scanned;
413 kstat_named_t arcstat_l2_write_buffer_list_iter;
414 kstat_named_t arcstat_l2_write_buffer_list_null_iter;
415 kstat_named_t arcstat_memory_throttle_count;
416 kstat_named_t arcstat_duplicate_buffers;
417 kstat_named_t arcstat_duplicate_buffers_size;
418 kstat_named_t arcstat_duplicate_reads;
421 static arc_stats_t arc_stats = {
422 { "hits", KSTAT_DATA_UINT64 },
423 { "misses", KSTAT_DATA_UINT64 },
424 { "demand_data_hits", KSTAT_DATA_UINT64 },
425 { "demand_data_misses", KSTAT_DATA_UINT64 },
426 { "demand_metadata_hits", KSTAT_DATA_UINT64 },
427 { "demand_metadata_misses", KSTAT_DATA_UINT64 },
428 { "prefetch_data_hits", KSTAT_DATA_UINT64 },
429 { "prefetch_data_misses", KSTAT_DATA_UINT64 },
430 { "prefetch_metadata_hits", KSTAT_DATA_UINT64 },
431 { "prefetch_metadata_misses", KSTAT_DATA_UINT64 },
432 { "mru_hits", KSTAT_DATA_UINT64 },
433 { "mru_ghost_hits", KSTAT_DATA_UINT64 },
434 { "mfu_hits", KSTAT_DATA_UINT64 },
435 { "mfu_ghost_hits", KSTAT_DATA_UINT64 },
436 { "allocated", KSTAT_DATA_UINT64 },
437 { "deleted", KSTAT_DATA_UINT64 },
438 { "stolen", KSTAT_DATA_UINT64 },
439 { "recycle_miss", KSTAT_DATA_UINT64 },
440 { "mutex_miss", KSTAT_DATA_UINT64 },
441 { "evict_skip", KSTAT_DATA_UINT64 },
442 { "evict_l2_cached", KSTAT_DATA_UINT64 },
443 { "evict_l2_eligible", KSTAT_DATA_UINT64 },
444 { "evict_l2_ineligible", KSTAT_DATA_UINT64 },
445 { "hash_elements", KSTAT_DATA_UINT64 },
446 { "hash_elements_max", KSTAT_DATA_UINT64 },
447 { "hash_collisions", KSTAT_DATA_UINT64 },
448 { "hash_chains", KSTAT_DATA_UINT64 },
449 { "hash_chain_max", KSTAT_DATA_UINT64 },
450 { "p", KSTAT_DATA_UINT64 },
451 { "c", KSTAT_DATA_UINT64 },
452 { "c_min", KSTAT_DATA_UINT64 },
453 { "c_max", KSTAT_DATA_UINT64 },
454 { "size", KSTAT_DATA_UINT64 },
455 { "hdr_size", KSTAT_DATA_UINT64 },
456 { "data_size", KSTAT_DATA_UINT64 },
457 { "other_size", KSTAT_DATA_UINT64 },
458 { "l2_hits", KSTAT_DATA_UINT64 },
459 { "l2_misses", KSTAT_DATA_UINT64 },
460 { "l2_feeds", KSTAT_DATA_UINT64 },
461 { "l2_rw_clash", KSTAT_DATA_UINT64 },
462 { "l2_read_bytes", KSTAT_DATA_UINT64 },
463 { "l2_write_bytes", KSTAT_DATA_UINT64 },
464 { "l2_writes_sent", KSTAT_DATA_UINT64 },
465 { "l2_writes_done", KSTAT_DATA_UINT64 },
466 { "l2_writes_error", KSTAT_DATA_UINT64 },
467 { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 },
468 { "l2_evict_lock_retry", KSTAT_DATA_UINT64 },
469 { "l2_evict_reading", KSTAT_DATA_UINT64 },
470 { "l2_free_on_write", KSTAT_DATA_UINT64 },
471 { "l2_cdata_free_on_write", KSTAT_DATA_UINT64 },
472 { "l2_abort_lowmem", KSTAT_DATA_UINT64 },
473 { "l2_cksum_bad", KSTAT_DATA_UINT64 },
474 { "l2_io_error", KSTAT_DATA_UINT64 },
475 { "l2_size", KSTAT_DATA_UINT64 },
476 { "l2_asize", KSTAT_DATA_UINT64 },
477 { "l2_hdr_size", KSTAT_DATA_UINT64 },
478 { "l2_compress_successes", KSTAT_DATA_UINT64 },
479 { "l2_compress_zeros", KSTAT_DATA_UINT64 },
480 { "l2_compress_failures", KSTAT_DATA_UINT64 },
481 { "l2_write_trylock_fail", KSTAT_DATA_UINT64 },
482 { "l2_write_passed_headroom", KSTAT_DATA_UINT64 },
483 { "l2_write_spa_mismatch", KSTAT_DATA_UINT64 },
484 { "l2_write_in_l2", KSTAT_DATA_UINT64 },
485 { "l2_write_io_in_progress", KSTAT_DATA_UINT64 },
486 { "l2_write_not_cacheable", KSTAT_DATA_UINT64 },
487 { "l2_write_full", KSTAT_DATA_UINT64 },
488 { "l2_write_buffer_iter", KSTAT_DATA_UINT64 },
489 { "l2_write_pios", KSTAT_DATA_UINT64 },
490 { "l2_write_buffer_bytes_scanned", KSTAT_DATA_UINT64 },
491 { "l2_write_buffer_list_iter", KSTAT_DATA_UINT64 },
492 { "l2_write_buffer_list_null_iter", KSTAT_DATA_UINT64 },
493 { "memory_throttle_count", KSTAT_DATA_UINT64 },
494 { "duplicate_buffers", KSTAT_DATA_UINT64 },
495 { "duplicate_buffers_size", KSTAT_DATA_UINT64 },
496 { "duplicate_reads", KSTAT_DATA_UINT64 }
499 #define ARCSTAT(stat) (arc_stats.stat.value.ui64)
501 #define ARCSTAT_INCR(stat, val) \
502 atomic_add_64(&arc_stats.stat.value.ui64, (val))
504 #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1)
505 #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1)
507 #define ARCSTAT_MAX(stat, val) { \
509 while ((val) > (m = arc_stats.stat.value.ui64) && \
510 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \
514 #define ARCSTAT_MAXSTAT(stat) \
515 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64)
518 * We define a macro to allow ARC hits/misses to be easily broken down by
519 * two separate conditions, giving a total of four different subtypes for
520 * each of hits and misses (so eight statistics total).
522 #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
525 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
527 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
531 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
533 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
538 static arc_state_t *arc_anon;
539 static arc_state_t *arc_mru;
540 static arc_state_t *arc_mru_ghost;
541 static arc_state_t *arc_mfu;
542 static arc_state_t *arc_mfu_ghost;
543 static arc_state_t *arc_l2c_only;
546 * There are several ARC variables that are critical to export as kstats --
547 * but we don't want to have to grovel around in the kstat whenever we wish to
548 * manipulate them. For these variables, we therefore define them to be in
549 * terms of the statistic variable. This assures that we are not introducing
550 * the possibility of inconsistency by having shadow copies of the variables,
551 * while still allowing the code to be readable.
553 #define arc_size ARCSTAT(arcstat_size) /* actual total arc size */
554 #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */
555 #define arc_c ARCSTAT(arcstat_c) /* target size of cache */
556 #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */
557 #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */
559 #define L2ARC_IS_VALID_COMPRESS(_c_) \
560 ((_c_) == ZIO_COMPRESS_LZ4 || (_c_) == ZIO_COMPRESS_EMPTY)
562 static int arc_no_grow; /* Don't try to grow cache size */
563 static uint64_t arc_tempreserve;
564 static uint64_t arc_loaned_bytes;
565 static uint64_t arc_meta_used;
566 static uint64_t arc_meta_limit;
567 static uint64_t arc_meta_max = 0;
568 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_meta_used, CTLFLAG_RD, &arc_meta_used, 0,
569 "ARC metadata used");
570 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_meta_limit, CTLFLAG_RW, &arc_meta_limit, 0,
571 "ARC metadata limit");
573 typedef struct l2arc_buf_hdr l2arc_buf_hdr_t;
575 typedef struct arc_callback arc_callback_t;
577 struct arc_callback {
579 arc_done_func_t *acb_done;
581 zio_t *acb_zio_dummy;
582 arc_callback_t *acb_next;
585 typedef struct arc_write_callback arc_write_callback_t;
587 struct arc_write_callback {
589 arc_done_func_t *awcb_ready;
590 arc_done_func_t *awcb_physdone;
591 arc_done_func_t *awcb_done;
596 /* protected by hash lock */
601 kmutex_t b_freeze_lock;
602 zio_cksum_t *b_freeze_cksum;
605 arc_buf_hdr_t *b_hash_next;
610 arc_callback_t *b_acb;
614 arc_buf_contents_t b_type;
618 /* protected by arc state mutex */
619 arc_state_t *b_state;
620 list_node_t b_arc_node;
622 /* updated atomically */
623 clock_t b_arc_access;
625 /* self protecting */
628 l2arc_buf_hdr_t *b_l2hdr;
629 list_node_t b_l2node;
632 static arc_buf_t *arc_eviction_list;
633 static kmutex_t arc_eviction_mtx;
634 static arc_buf_hdr_t arc_eviction_hdr;
635 static void arc_get_data_buf(arc_buf_t *buf);
636 static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock);
637 static int arc_evict_needed(arc_buf_contents_t type);
638 static void arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes);
640 static void arc_buf_watch(arc_buf_t *buf);
643 static boolean_t l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab);
645 #define GHOST_STATE(state) \
646 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \
647 (state) == arc_l2c_only)
650 * Private ARC flags. These flags are private ARC only flags that will show up
651 * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can
652 * be passed in as arc_flags in things like arc_read. However, these flags
653 * should never be passed and should only be set by ARC code. When adding new
654 * public flags, make sure not to smash the private ones.
657 #define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */
658 #define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */
659 #define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */
660 #define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */
661 #define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */
662 #define ARC_INDIRECT (1 << 14) /* this is an indirect block */
663 #define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */
664 #define ARC_L2_WRITING (1 << 16) /* L2ARC write in progress */
665 #define ARC_L2_EVICTED (1 << 17) /* evicted during I/O */
666 #define ARC_L2_WRITE_HEAD (1 << 18) /* head of write list */
668 #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE)
669 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS)
670 #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR)
671 #define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_PREFETCH)
672 #define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ)
673 #define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE)
674 #define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS)
675 #define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_L2CACHE)
676 #define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS && \
677 (hdr)->b_l2hdr != NULL)
678 #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING)
679 #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED)
680 #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD)
686 #define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
687 #define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t))
690 * Hash table routines
693 #define HT_LOCK_PAD CACHE_LINE_SIZE
698 unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))];
702 #define BUF_LOCKS 256
703 typedef struct buf_hash_table {
705 arc_buf_hdr_t **ht_table;
706 struct ht_lock ht_locks[BUF_LOCKS] __aligned(CACHE_LINE_SIZE);
709 static buf_hash_table_t buf_hash_table;
711 #define BUF_HASH_INDEX(spa, dva, birth) \
712 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
713 #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
714 #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
715 #define HDR_LOCK(hdr) \
716 (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth)))
718 uint64_t zfs_crc64_table[256];
724 #define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */
725 #define L2ARC_HEADROOM 2 /* num of writes */
727 * If we discover during ARC scan any buffers to be compressed, we boost
728 * our headroom for the next scanning cycle by this percentage multiple.
730 #define L2ARC_HEADROOM_BOOST 200
731 #define L2ARC_FEED_SECS 1 /* caching interval secs */
732 #define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */
734 #define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent)
735 #define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done)
737 /* L2ARC Performance Tunables */
738 uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */
739 uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */
740 uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */
741 uint64_t l2arc_headroom_boost = L2ARC_HEADROOM_BOOST;
742 uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */
743 uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */
744 boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */
745 boolean_t l2arc_feed_again = B_TRUE; /* turbo warmup */
746 boolean_t l2arc_norw = B_TRUE; /* no reads during writes */
748 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_max, CTLFLAG_RW,
749 &l2arc_write_max, 0, "max write size");
750 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_boost, CTLFLAG_RW,
751 &l2arc_write_boost, 0, "extra write during warmup");
752 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_headroom, CTLFLAG_RW,
753 &l2arc_headroom, 0, "number of dev writes");
754 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_secs, CTLFLAG_RW,
755 &l2arc_feed_secs, 0, "interval seconds");
756 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_min_ms, CTLFLAG_RW,
757 &l2arc_feed_min_ms, 0, "min interval milliseconds");
759 SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_noprefetch, CTLFLAG_RW,
760 &l2arc_noprefetch, 0, "don't cache prefetch bufs");
761 SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_feed_again, CTLFLAG_RW,
762 &l2arc_feed_again, 0, "turbo warmup");
763 SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_norw, CTLFLAG_RW,
764 &l2arc_norw, 0, "no reads during writes");
766 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_size, CTLFLAG_RD,
767 &ARC_anon.arcs_size, 0, "size of anonymous state");
768 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_metadata_lsize, CTLFLAG_RD,
769 &ARC_anon.arcs_lsize[ARC_BUFC_METADATA], 0, "size of anonymous state");
770 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_data_lsize, CTLFLAG_RD,
771 &ARC_anon.arcs_lsize[ARC_BUFC_DATA], 0, "size of anonymous state");
773 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_size, CTLFLAG_RD,
774 &ARC_mru.arcs_size, 0, "size of mru state");
775 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_metadata_lsize, CTLFLAG_RD,
776 &ARC_mru.arcs_lsize[ARC_BUFC_METADATA], 0, "size of metadata in mru state");
777 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_data_lsize, CTLFLAG_RD,
778 &ARC_mru.arcs_lsize[ARC_BUFC_DATA], 0, "size of data in mru state");
780 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_size, CTLFLAG_RD,
781 &ARC_mru_ghost.arcs_size, 0, "size of mru ghost state");
782 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_metadata_lsize, CTLFLAG_RD,
783 &ARC_mru_ghost.arcs_lsize[ARC_BUFC_METADATA], 0,
784 "size of metadata in mru ghost state");
785 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_data_lsize, CTLFLAG_RD,
786 &ARC_mru_ghost.arcs_lsize[ARC_BUFC_DATA], 0,
787 "size of data in mru ghost state");
789 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_size, CTLFLAG_RD,
790 &ARC_mfu.arcs_size, 0, "size of mfu state");
791 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_metadata_lsize, CTLFLAG_RD,
792 &ARC_mfu.arcs_lsize[ARC_BUFC_METADATA], 0, "size of metadata in mfu state");
793 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_data_lsize, CTLFLAG_RD,
794 &ARC_mfu.arcs_lsize[ARC_BUFC_DATA], 0, "size of data in mfu state");
796 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_size, CTLFLAG_RD,
797 &ARC_mfu_ghost.arcs_size, 0, "size of mfu ghost state");
798 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_metadata_lsize, CTLFLAG_RD,
799 &ARC_mfu_ghost.arcs_lsize[ARC_BUFC_METADATA], 0,
800 "size of metadata in mfu ghost state");
801 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_data_lsize, CTLFLAG_RD,
802 &ARC_mfu_ghost.arcs_lsize[ARC_BUFC_DATA], 0,
803 "size of data in mfu ghost state");
805 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2c_only_size, CTLFLAG_RD,
806 &ARC_l2c_only.arcs_size, 0, "size of mru state");
811 typedef struct l2arc_dev {
812 vdev_t *l2ad_vdev; /* vdev */
813 spa_t *l2ad_spa; /* spa */
814 uint64_t l2ad_hand; /* next write location */
815 uint64_t l2ad_start; /* first addr on device */
816 uint64_t l2ad_end; /* last addr on device */
817 uint64_t l2ad_evict; /* last addr eviction reached */
818 boolean_t l2ad_first; /* first sweep through */
819 boolean_t l2ad_writing; /* currently writing */
820 list_t *l2ad_buflist; /* buffer list */
821 list_node_t l2ad_node; /* device list node */
824 static list_t L2ARC_dev_list; /* device list */
825 static list_t *l2arc_dev_list; /* device list pointer */
826 static kmutex_t l2arc_dev_mtx; /* device list mutex */
827 static l2arc_dev_t *l2arc_dev_last; /* last device used */
828 static kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */
829 static list_t L2ARC_free_on_write; /* free after write buf list */
830 static list_t *l2arc_free_on_write; /* free after write list ptr */
831 static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */
832 static uint64_t l2arc_ndev; /* number of devices */
834 typedef struct l2arc_read_callback {
835 arc_buf_t *l2rcb_buf; /* read buffer */
836 spa_t *l2rcb_spa; /* spa */
837 blkptr_t l2rcb_bp; /* original blkptr */
838 zbookmark_phys_t l2rcb_zb; /* original bookmark */
839 int l2rcb_flags; /* original flags */
840 enum zio_compress l2rcb_compress; /* applied compress */
841 } l2arc_read_callback_t;
843 typedef struct l2arc_write_callback {
844 l2arc_dev_t *l2wcb_dev; /* device info */
845 arc_buf_hdr_t *l2wcb_head; /* head of write buflist */
846 } l2arc_write_callback_t;
848 struct l2arc_buf_hdr {
849 /* protected by arc_buf_hdr mutex */
850 l2arc_dev_t *b_dev; /* L2ARC device */
851 uint64_t b_daddr; /* disk address, offset byte */
852 /* compression applied to buffer data */
853 enum zio_compress b_compress;
854 /* real alloc'd buffer size depending on b_compress applied */
856 /* temporary buffer holder for in-flight compressed data */
860 typedef struct l2arc_data_free {
861 /* protected by l2arc_free_on_write_mtx */
864 void (*l2df_func)(void *, size_t);
865 list_node_t l2df_list_node;
868 static kmutex_t l2arc_feed_thr_lock;
869 static kcondvar_t l2arc_feed_thr_cv;
870 static uint8_t l2arc_thread_exit;
872 static void l2arc_read_done(zio_t *zio);
873 static void l2arc_hdr_stat_add(void);
874 static void l2arc_hdr_stat_remove(void);
876 static boolean_t l2arc_compress_buf(l2arc_buf_hdr_t *l2hdr);
877 static void l2arc_decompress_zio(zio_t *zio, arc_buf_hdr_t *hdr,
878 enum zio_compress c);
879 static void l2arc_release_cdata_buf(arc_buf_hdr_t *ab);
882 buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth)
884 uint8_t *vdva = (uint8_t *)dva;
885 uint64_t crc = -1ULL;
888 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
890 for (i = 0; i < sizeof (dva_t); i++)
891 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF];
893 crc ^= (spa>>8) ^ birth;
898 #define BUF_EMPTY(buf) \
899 ((buf)->b_dva.dva_word[0] == 0 && \
900 (buf)->b_dva.dva_word[1] == 0 && \
901 (buf)->b_cksum0 == 0)
903 #define BUF_EQUAL(spa, dva, birth, buf) \
904 ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \
905 ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \
906 ((buf)->b_birth == birth) && ((buf)->b_spa == spa)
909 buf_discard_identity(arc_buf_hdr_t *hdr)
911 hdr->b_dva.dva_word[0] = 0;
912 hdr->b_dva.dva_word[1] = 0;
917 static arc_buf_hdr_t *
918 buf_hash_find(uint64_t spa, const blkptr_t *bp, kmutex_t **lockp)
920 const dva_t *dva = BP_IDENTITY(bp);
921 uint64_t birth = BP_PHYSICAL_BIRTH(bp);
922 uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
923 kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
926 mutex_enter(hash_lock);
927 for (buf = buf_hash_table.ht_table[idx]; buf != NULL;
928 buf = buf->b_hash_next) {
929 if (BUF_EQUAL(spa, dva, birth, buf)) {
934 mutex_exit(hash_lock);
940 * Insert an entry into the hash table. If there is already an element
941 * equal to elem in the hash table, then the already existing element
942 * will be returned and the new element will not be inserted.
943 * Otherwise returns NULL.
945 static arc_buf_hdr_t *
946 buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp)
948 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
949 kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
953 ASSERT(!DVA_IS_EMPTY(&buf->b_dva));
954 ASSERT(buf->b_birth != 0);
955 ASSERT(!HDR_IN_HASH_TABLE(buf));
957 mutex_enter(hash_lock);
958 for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL;
959 fbuf = fbuf->b_hash_next, i++) {
960 if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf))
964 buf->b_hash_next = buf_hash_table.ht_table[idx];
965 buf_hash_table.ht_table[idx] = buf;
966 buf->b_flags |= ARC_IN_HASH_TABLE;
968 /* collect some hash table performance data */
970 ARCSTAT_BUMP(arcstat_hash_collisions);
972 ARCSTAT_BUMP(arcstat_hash_chains);
974 ARCSTAT_MAX(arcstat_hash_chain_max, i);
977 ARCSTAT_BUMP(arcstat_hash_elements);
978 ARCSTAT_MAXSTAT(arcstat_hash_elements);
984 buf_hash_remove(arc_buf_hdr_t *buf)
986 arc_buf_hdr_t *fbuf, **bufp;
987 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
989 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
990 ASSERT(HDR_IN_HASH_TABLE(buf));
992 bufp = &buf_hash_table.ht_table[idx];
993 while ((fbuf = *bufp) != buf) {
994 ASSERT(fbuf != NULL);
995 bufp = &fbuf->b_hash_next;
997 *bufp = buf->b_hash_next;
998 buf->b_hash_next = NULL;
999 buf->b_flags &= ~ARC_IN_HASH_TABLE;
1001 /* collect some hash table performance data */
1002 ARCSTAT_BUMPDOWN(arcstat_hash_elements);
1004 if (buf_hash_table.ht_table[idx] &&
1005 buf_hash_table.ht_table[idx]->b_hash_next == NULL)
1006 ARCSTAT_BUMPDOWN(arcstat_hash_chains);
1010 * Global data structures and functions for the buf kmem cache.
1012 static kmem_cache_t *hdr_cache;
1013 static kmem_cache_t *buf_cache;
1020 kmem_free(buf_hash_table.ht_table,
1021 (buf_hash_table.ht_mask + 1) * sizeof (void *));
1022 for (i = 0; i < BUF_LOCKS; i++)
1023 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock);
1024 kmem_cache_destroy(hdr_cache);
1025 kmem_cache_destroy(buf_cache);
1029 * Constructor callback - called when the cache is empty
1030 * and a new buf is requested.
1034 hdr_cons(void *vbuf, void *unused, int kmflag)
1036 arc_buf_hdr_t *buf = vbuf;
1038 bzero(buf, sizeof (arc_buf_hdr_t));
1039 refcount_create(&buf->b_refcnt);
1040 cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL);
1041 mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
1042 arc_space_consume(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS);
1049 buf_cons(void *vbuf, void *unused, int kmflag)
1051 arc_buf_t *buf = vbuf;
1053 bzero(buf, sizeof (arc_buf_t));
1054 mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL);
1055 arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS);
1061 * Destructor callback - called when a cached buf is
1062 * no longer required.
1066 hdr_dest(void *vbuf, void *unused)
1068 arc_buf_hdr_t *buf = vbuf;
1070 ASSERT(BUF_EMPTY(buf));
1071 refcount_destroy(&buf->b_refcnt);
1072 cv_destroy(&buf->b_cv);
1073 mutex_destroy(&buf->b_freeze_lock);
1074 arc_space_return(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS);
1079 buf_dest(void *vbuf, void *unused)
1081 arc_buf_t *buf = vbuf;
1083 mutex_destroy(&buf->b_evict_lock);
1084 arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS);
1088 * Reclaim callback -- invoked when memory is low.
1092 hdr_recl(void *unused)
1094 dprintf("hdr_recl called\n");
1096 * umem calls the reclaim func when we destroy the buf cache,
1097 * which is after we do arc_fini().
1100 cv_signal(&arc_reclaim_thr_cv);
1107 uint64_t hsize = 1ULL << 12;
1111 * The hash table is big enough to fill all of physical memory
1112 * with an average block size of zfs_arc_average_blocksize (default 8K).
1113 * By default, the table will take up
1114 * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
1116 while (hsize * zfs_arc_average_blocksize < (uint64_t)physmem * PAGESIZE)
1119 buf_hash_table.ht_mask = hsize - 1;
1120 buf_hash_table.ht_table =
1121 kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
1122 if (buf_hash_table.ht_table == NULL) {
1123 ASSERT(hsize > (1ULL << 8));
1128 hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t),
1129 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0);
1130 buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
1131 0, buf_cons, buf_dest, NULL, NULL, NULL, 0);
1133 for (i = 0; i < 256; i++)
1134 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
1135 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
1137 for (i = 0; i < BUF_LOCKS; i++) {
1138 mutex_init(&buf_hash_table.ht_locks[i].ht_lock,
1139 NULL, MUTEX_DEFAULT, NULL);
1143 #define ARC_MINTIME (hz>>4) /* 62 ms */
1146 arc_cksum_verify(arc_buf_t *buf)
1150 if (!(zfs_flags & ZFS_DEBUG_MODIFY))
1153 mutex_enter(&buf->b_hdr->b_freeze_lock);
1154 if (buf->b_hdr->b_freeze_cksum == NULL ||
1155 (buf->b_hdr->b_flags & ARC_IO_ERROR)) {
1156 mutex_exit(&buf->b_hdr->b_freeze_lock);
1159 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
1160 if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc))
1161 panic("buffer modified while frozen!");
1162 mutex_exit(&buf->b_hdr->b_freeze_lock);
1166 arc_cksum_equal(arc_buf_t *buf)
1171 mutex_enter(&buf->b_hdr->b_freeze_lock);
1172 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
1173 equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc);
1174 mutex_exit(&buf->b_hdr->b_freeze_lock);
1180 arc_cksum_compute(arc_buf_t *buf, boolean_t force)
1182 if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY))
1185 mutex_enter(&buf->b_hdr->b_freeze_lock);
1186 if (buf->b_hdr->b_freeze_cksum != NULL) {
1187 mutex_exit(&buf->b_hdr->b_freeze_lock);
1190 buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP);
1191 fletcher_2_native(buf->b_data, buf->b_hdr->b_size,
1192 buf->b_hdr->b_freeze_cksum);
1193 mutex_exit(&buf->b_hdr->b_freeze_lock);
1196 #endif /* illumos */
1201 typedef struct procctl {
1209 arc_buf_unwatch(arc_buf_t *buf)
1216 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data;
1217 ctl.prwatch.pr_size = 0;
1218 ctl.prwatch.pr_wflags = 0;
1219 result = write(arc_procfd, &ctl, sizeof (ctl));
1220 ASSERT3U(result, ==, sizeof (ctl));
1227 arc_buf_watch(arc_buf_t *buf)
1234 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data;
1235 ctl.prwatch.pr_size = buf->b_hdr->b_size;
1236 ctl.prwatch.pr_wflags = WA_WRITE;
1237 result = write(arc_procfd, &ctl, sizeof (ctl));
1238 ASSERT3U(result, ==, sizeof (ctl));
1242 #endif /* illumos */
1245 arc_buf_thaw(arc_buf_t *buf)
1247 if (zfs_flags & ZFS_DEBUG_MODIFY) {
1248 if (buf->b_hdr->b_state != arc_anon)
1249 panic("modifying non-anon buffer!");
1250 if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS)
1251 panic("modifying buffer while i/o in progress!");
1252 arc_cksum_verify(buf);
1255 mutex_enter(&buf->b_hdr->b_freeze_lock);
1256 if (buf->b_hdr->b_freeze_cksum != NULL) {
1257 kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t));
1258 buf->b_hdr->b_freeze_cksum = NULL;
1261 if (zfs_flags & ZFS_DEBUG_MODIFY) {
1262 if (buf->b_hdr->b_thawed)
1263 kmem_free(buf->b_hdr->b_thawed, 1);
1264 buf->b_hdr->b_thawed = kmem_alloc(1, KM_SLEEP);
1267 mutex_exit(&buf->b_hdr->b_freeze_lock);
1270 arc_buf_unwatch(buf);
1271 #endif /* illumos */
1275 arc_buf_freeze(arc_buf_t *buf)
1277 kmutex_t *hash_lock;
1279 if (!(zfs_flags & ZFS_DEBUG_MODIFY))
1282 hash_lock = HDR_LOCK(buf->b_hdr);
1283 mutex_enter(hash_lock);
1285 ASSERT(buf->b_hdr->b_freeze_cksum != NULL ||
1286 buf->b_hdr->b_state == arc_anon);
1287 arc_cksum_compute(buf, B_FALSE);
1288 mutex_exit(hash_lock);
1293 get_buf_info(arc_buf_hdr_t *ab, arc_state_t *state, list_t **list, kmutex_t **lock)
1295 uint64_t buf_hashid = buf_hash(ab->b_spa, &ab->b_dva, ab->b_birth);
1297 if (ab->b_type == ARC_BUFC_METADATA)
1298 buf_hashid &= (ARC_BUFC_NUMMETADATALISTS - 1);
1300 buf_hashid &= (ARC_BUFC_NUMDATALISTS - 1);
1301 buf_hashid += ARC_BUFC_NUMMETADATALISTS;
1304 *list = &state->arcs_lists[buf_hashid];
1305 *lock = ARCS_LOCK(state, buf_hashid);
1310 add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
1312 ASSERT(MUTEX_HELD(hash_lock));
1314 if ((refcount_add(&ab->b_refcnt, tag) == 1) &&
1315 (ab->b_state != arc_anon)) {
1316 uint64_t delta = ab->b_size * ab->b_datacnt;
1317 uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type];
1321 get_buf_info(ab, ab->b_state, &list, &lock);
1322 ASSERT(!MUTEX_HELD(lock));
1324 ASSERT(list_link_active(&ab->b_arc_node));
1325 list_remove(list, ab);
1326 if (GHOST_STATE(ab->b_state)) {
1327 ASSERT0(ab->b_datacnt);
1328 ASSERT3P(ab->b_buf, ==, NULL);
1332 ASSERT3U(*size, >=, delta);
1333 atomic_add_64(size, -delta);
1335 /* remove the prefetch flag if we get a reference */
1336 if (ab->b_flags & ARC_PREFETCH)
1337 ab->b_flags &= ~ARC_PREFETCH;
1342 remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
1345 arc_state_t *state = ab->b_state;
1347 ASSERT(state == arc_anon || MUTEX_HELD(hash_lock));
1348 ASSERT(!GHOST_STATE(state));
1350 if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) &&
1351 (state != arc_anon)) {
1352 uint64_t *size = &state->arcs_lsize[ab->b_type];
1356 get_buf_info(ab, state, &list, &lock);
1357 ASSERT(!MUTEX_HELD(lock));
1359 ASSERT(!list_link_active(&ab->b_arc_node));
1360 list_insert_head(list, ab);
1361 ASSERT(ab->b_datacnt > 0);
1362 atomic_add_64(size, ab->b_size * ab->b_datacnt);
1369 * Move the supplied buffer to the indicated state. The mutex
1370 * for the buffer must be held by the caller.
1373 arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock)
1375 arc_state_t *old_state = ab->b_state;
1376 int64_t refcnt = refcount_count(&ab->b_refcnt);
1377 uint64_t from_delta, to_delta;
1381 ASSERT(MUTEX_HELD(hash_lock));
1382 ASSERT3P(new_state, !=, old_state);
1383 ASSERT(refcnt == 0 || ab->b_datacnt > 0);
1384 ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state));
1385 ASSERT(ab->b_datacnt <= 1 || old_state != arc_anon);
1387 from_delta = to_delta = ab->b_datacnt * ab->b_size;
1390 * If this buffer is evictable, transfer it from the
1391 * old state list to the new state list.
1394 if (old_state != arc_anon) {
1396 uint64_t *size = &old_state->arcs_lsize[ab->b_type];
1398 get_buf_info(ab, old_state, &list, &lock);
1399 use_mutex = !MUTEX_HELD(lock);
1403 ASSERT(list_link_active(&ab->b_arc_node));
1404 list_remove(list, ab);
1407 * If prefetching out of the ghost cache,
1408 * we will have a non-zero datacnt.
1410 if (GHOST_STATE(old_state) && ab->b_datacnt == 0) {
1411 /* ghost elements have a ghost size */
1412 ASSERT(ab->b_buf == NULL);
1413 from_delta = ab->b_size;
1415 ASSERT3U(*size, >=, from_delta);
1416 atomic_add_64(size, -from_delta);
1421 if (new_state != arc_anon) {
1423 uint64_t *size = &new_state->arcs_lsize[ab->b_type];
1425 get_buf_info(ab, new_state, &list, &lock);
1426 use_mutex = !MUTEX_HELD(lock);
1430 list_insert_head(list, ab);
1432 /* ghost elements have a ghost size */
1433 if (GHOST_STATE(new_state)) {
1434 ASSERT(ab->b_datacnt == 0);
1435 ASSERT(ab->b_buf == NULL);
1436 to_delta = ab->b_size;
1438 atomic_add_64(size, to_delta);
1445 ASSERT(!BUF_EMPTY(ab));
1446 if (new_state == arc_anon && HDR_IN_HASH_TABLE(ab))
1447 buf_hash_remove(ab);
1449 /* adjust state sizes */
1451 atomic_add_64(&new_state->arcs_size, to_delta);
1453 ASSERT3U(old_state->arcs_size, >=, from_delta);
1454 atomic_add_64(&old_state->arcs_size, -from_delta);
1456 ab->b_state = new_state;
1458 /* adjust l2arc hdr stats */
1459 if (new_state == arc_l2c_only)
1460 l2arc_hdr_stat_add();
1461 else if (old_state == arc_l2c_only)
1462 l2arc_hdr_stat_remove();
1466 arc_space_consume(uint64_t space, arc_space_type_t type)
1468 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
1471 case ARC_SPACE_DATA:
1472 ARCSTAT_INCR(arcstat_data_size, space);
1474 case ARC_SPACE_OTHER:
1475 ARCSTAT_INCR(arcstat_other_size, space);
1477 case ARC_SPACE_HDRS:
1478 ARCSTAT_INCR(arcstat_hdr_size, space);
1480 case ARC_SPACE_L2HDRS:
1481 ARCSTAT_INCR(arcstat_l2_hdr_size, space);
1485 atomic_add_64(&arc_meta_used, space);
1486 atomic_add_64(&arc_size, space);
1490 arc_space_return(uint64_t space, arc_space_type_t type)
1492 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
1495 case ARC_SPACE_DATA:
1496 ARCSTAT_INCR(arcstat_data_size, -space);
1498 case ARC_SPACE_OTHER:
1499 ARCSTAT_INCR(arcstat_other_size, -space);
1501 case ARC_SPACE_HDRS:
1502 ARCSTAT_INCR(arcstat_hdr_size, -space);
1504 case ARC_SPACE_L2HDRS:
1505 ARCSTAT_INCR(arcstat_l2_hdr_size, -space);
1509 ASSERT(arc_meta_used >= space);
1510 if (arc_meta_max < arc_meta_used)
1511 arc_meta_max = arc_meta_used;
1512 atomic_add_64(&arc_meta_used, -space);
1513 ASSERT(arc_size >= space);
1514 atomic_add_64(&arc_size, -space);
1518 arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type)
1523 ASSERT3U(size, >, 0);
1524 hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
1525 ASSERT(BUF_EMPTY(hdr));
1528 hdr->b_spa = spa_load_guid(spa);
1529 hdr->b_state = arc_anon;
1530 hdr->b_arc_access = 0;
1531 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1534 buf->b_efunc = NULL;
1535 buf->b_private = NULL;
1538 arc_get_data_buf(buf);
1541 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1542 (void) refcount_add(&hdr->b_refcnt, tag);
1547 static char *arc_onloan_tag = "onloan";
1550 * Loan out an anonymous arc buffer. Loaned buffers are not counted as in
1551 * flight data by arc_tempreserve_space() until they are "returned". Loaned
1552 * buffers must be returned to the arc before they can be used by the DMU or
1556 arc_loan_buf(spa_t *spa, int size)
1560 buf = arc_buf_alloc(spa, size, arc_onloan_tag, ARC_BUFC_DATA);
1562 atomic_add_64(&arc_loaned_bytes, size);
1567 * Return a loaned arc buffer to the arc.
1570 arc_return_buf(arc_buf_t *buf, void *tag)
1572 arc_buf_hdr_t *hdr = buf->b_hdr;
1574 ASSERT(buf->b_data != NULL);
1575 (void) refcount_add(&hdr->b_refcnt, tag);
1576 (void) refcount_remove(&hdr->b_refcnt, arc_onloan_tag);
1578 atomic_add_64(&arc_loaned_bytes, -hdr->b_size);
1581 /* Detach an arc_buf from a dbuf (tag) */
1583 arc_loan_inuse_buf(arc_buf_t *buf, void *tag)
1587 ASSERT(buf->b_data != NULL);
1589 (void) refcount_add(&hdr->b_refcnt, arc_onloan_tag);
1590 (void) refcount_remove(&hdr->b_refcnt, tag);
1591 buf->b_efunc = NULL;
1592 buf->b_private = NULL;
1594 atomic_add_64(&arc_loaned_bytes, hdr->b_size);
1598 arc_buf_clone(arc_buf_t *from)
1601 arc_buf_hdr_t *hdr = from->b_hdr;
1602 uint64_t size = hdr->b_size;
1604 ASSERT(hdr->b_state != arc_anon);
1606 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1609 buf->b_efunc = NULL;
1610 buf->b_private = NULL;
1611 buf->b_next = hdr->b_buf;
1613 arc_get_data_buf(buf);
1614 bcopy(from->b_data, buf->b_data, size);
1617 * This buffer already exists in the arc so create a duplicate
1618 * copy for the caller. If the buffer is associated with user data
1619 * then track the size and number of duplicates. These stats will be
1620 * updated as duplicate buffers are created and destroyed.
1622 if (hdr->b_type == ARC_BUFC_DATA) {
1623 ARCSTAT_BUMP(arcstat_duplicate_buffers);
1624 ARCSTAT_INCR(arcstat_duplicate_buffers_size, size);
1626 hdr->b_datacnt += 1;
1631 arc_buf_add_ref(arc_buf_t *buf, void* tag)
1634 kmutex_t *hash_lock;
1637 * Check to see if this buffer is evicted. Callers
1638 * must verify b_data != NULL to know if the add_ref
1641 mutex_enter(&buf->b_evict_lock);
1642 if (buf->b_data == NULL) {
1643 mutex_exit(&buf->b_evict_lock);
1646 hash_lock = HDR_LOCK(buf->b_hdr);
1647 mutex_enter(hash_lock);
1649 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1650 mutex_exit(&buf->b_evict_lock);
1652 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
1653 add_reference(hdr, hash_lock, tag);
1654 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
1655 arc_access(hdr, hash_lock);
1656 mutex_exit(hash_lock);
1657 ARCSTAT_BUMP(arcstat_hits);
1658 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
1659 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
1660 data, metadata, hits);
1664 arc_buf_free_on_write(void *data, size_t size,
1665 void (*free_func)(void *, size_t))
1667 l2arc_data_free_t *df;
1669 df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP);
1670 df->l2df_data = data;
1671 df->l2df_size = size;
1672 df->l2df_func = free_func;
1673 mutex_enter(&l2arc_free_on_write_mtx);
1674 list_insert_head(l2arc_free_on_write, df);
1675 mutex_exit(&l2arc_free_on_write_mtx);
1679 * Free the arc data buffer. If it is an l2arc write in progress,
1680 * the buffer is placed on l2arc_free_on_write to be freed later.
1683 arc_buf_data_free(arc_buf_t *buf, void (*free_func)(void *, size_t))
1685 arc_buf_hdr_t *hdr = buf->b_hdr;
1687 if (HDR_L2_WRITING(hdr)) {
1688 arc_buf_free_on_write(buf->b_data, hdr->b_size, free_func);
1689 ARCSTAT_BUMP(arcstat_l2_free_on_write);
1691 free_func(buf->b_data, hdr->b_size);
1696 * Free up buf->b_data and if 'remove' is set, then pull the
1697 * arc_buf_t off of the the arc_buf_hdr_t's list and free it.
1700 arc_buf_l2_cdata_free(arc_buf_hdr_t *hdr)
1702 l2arc_buf_hdr_t *l2hdr = hdr->b_l2hdr;
1704 ASSERT(MUTEX_HELD(&l2arc_buflist_mtx));
1706 if (l2hdr->b_tmp_cdata == NULL)
1709 ASSERT(HDR_L2_WRITING(hdr));
1710 arc_buf_free_on_write(l2hdr->b_tmp_cdata, hdr->b_size,
1712 ARCSTAT_BUMP(arcstat_l2_cdata_free_on_write);
1713 l2hdr->b_tmp_cdata = NULL;
1717 arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t remove)
1721 /* free up data associated with the buf */
1723 arc_state_t *state = buf->b_hdr->b_state;
1724 uint64_t size = buf->b_hdr->b_size;
1725 arc_buf_contents_t type = buf->b_hdr->b_type;
1727 arc_cksum_verify(buf);
1729 arc_buf_unwatch(buf);
1730 #endif /* illumos */
1733 if (type == ARC_BUFC_METADATA) {
1734 arc_buf_data_free(buf, zio_buf_free);
1735 arc_space_return(size, ARC_SPACE_DATA);
1737 ASSERT(type == ARC_BUFC_DATA);
1738 arc_buf_data_free(buf, zio_data_buf_free);
1739 ARCSTAT_INCR(arcstat_data_size, -size);
1740 atomic_add_64(&arc_size, -size);
1743 if (list_link_active(&buf->b_hdr->b_arc_node)) {
1744 uint64_t *cnt = &state->arcs_lsize[type];
1746 ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt));
1747 ASSERT(state != arc_anon);
1749 ASSERT3U(*cnt, >=, size);
1750 atomic_add_64(cnt, -size);
1752 ASSERT3U(state->arcs_size, >=, size);
1753 atomic_add_64(&state->arcs_size, -size);
1757 * If we're destroying a duplicate buffer make sure
1758 * that the appropriate statistics are updated.
1760 if (buf->b_hdr->b_datacnt > 1 &&
1761 buf->b_hdr->b_type == ARC_BUFC_DATA) {
1762 ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers);
1763 ARCSTAT_INCR(arcstat_duplicate_buffers_size, -size);
1765 ASSERT(buf->b_hdr->b_datacnt > 0);
1766 buf->b_hdr->b_datacnt -= 1;
1769 /* only remove the buf if requested */
1773 /* remove the buf from the hdr list */
1774 for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next)
1776 *bufp = buf->b_next;
1779 ASSERT(buf->b_efunc == NULL);
1781 /* clean up the buf */
1783 kmem_cache_free(buf_cache, buf);
1787 arc_hdr_destroy(arc_buf_hdr_t *hdr)
1789 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1790 ASSERT3P(hdr->b_state, ==, arc_anon);
1791 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
1792 l2arc_buf_hdr_t *l2hdr = hdr->b_l2hdr;
1794 if (l2hdr != NULL) {
1795 boolean_t buflist_held = MUTEX_HELD(&l2arc_buflist_mtx);
1797 * To prevent arc_free() and l2arc_evict() from
1798 * attempting to free the same buffer at the same time,
1799 * a FREE_IN_PROGRESS flag is given to arc_free() to
1800 * give it priority. l2arc_evict() can't destroy this
1801 * header while we are waiting on l2arc_buflist_mtx.
1803 * The hdr may be removed from l2ad_buflist before we
1804 * grab l2arc_buflist_mtx, so b_l2hdr is rechecked.
1806 if (!buflist_held) {
1807 mutex_enter(&l2arc_buflist_mtx);
1808 l2hdr = hdr->b_l2hdr;
1811 if (l2hdr != NULL) {
1812 trim_map_free(l2hdr->b_dev->l2ad_vdev, l2hdr->b_daddr,
1814 list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
1815 arc_buf_l2_cdata_free(hdr);
1816 ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size);
1817 ARCSTAT_INCR(arcstat_l2_asize, -l2hdr->b_asize);
1818 vdev_space_update(l2hdr->b_dev->l2ad_vdev,
1819 -l2hdr->b_asize, 0, 0);
1820 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
1821 if (hdr->b_state == arc_l2c_only)
1822 l2arc_hdr_stat_remove();
1823 hdr->b_l2hdr = NULL;
1827 mutex_exit(&l2arc_buflist_mtx);
1830 if (!BUF_EMPTY(hdr)) {
1831 ASSERT(!HDR_IN_HASH_TABLE(hdr));
1832 buf_discard_identity(hdr);
1834 while (hdr->b_buf) {
1835 arc_buf_t *buf = hdr->b_buf;
1838 mutex_enter(&arc_eviction_mtx);
1839 mutex_enter(&buf->b_evict_lock);
1840 ASSERT(buf->b_hdr != NULL);
1841 arc_buf_destroy(hdr->b_buf, FALSE, FALSE);
1842 hdr->b_buf = buf->b_next;
1843 buf->b_hdr = &arc_eviction_hdr;
1844 buf->b_next = arc_eviction_list;
1845 arc_eviction_list = buf;
1846 mutex_exit(&buf->b_evict_lock);
1847 mutex_exit(&arc_eviction_mtx);
1849 arc_buf_destroy(hdr->b_buf, FALSE, TRUE);
1852 if (hdr->b_freeze_cksum != NULL) {
1853 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
1854 hdr->b_freeze_cksum = NULL;
1856 if (hdr->b_thawed) {
1857 kmem_free(hdr->b_thawed, 1);
1858 hdr->b_thawed = NULL;
1861 ASSERT(!list_link_active(&hdr->b_arc_node));
1862 ASSERT3P(hdr->b_hash_next, ==, NULL);
1863 ASSERT3P(hdr->b_acb, ==, NULL);
1864 kmem_cache_free(hdr_cache, hdr);
1868 arc_buf_free(arc_buf_t *buf, void *tag)
1870 arc_buf_hdr_t *hdr = buf->b_hdr;
1871 int hashed = hdr->b_state != arc_anon;
1873 ASSERT(buf->b_efunc == NULL);
1874 ASSERT(buf->b_data != NULL);
1877 kmutex_t *hash_lock = HDR_LOCK(hdr);
1879 mutex_enter(hash_lock);
1881 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1883 (void) remove_reference(hdr, hash_lock, tag);
1884 if (hdr->b_datacnt > 1) {
1885 arc_buf_destroy(buf, FALSE, TRUE);
1887 ASSERT(buf == hdr->b_buf);
1888 ASSERT(buf->b_efunc == NULL);
1889 hdr->b_flags |= ARC_BUF_AVAILABLE;
1891 mutex_exit(hash_lock);
1892 } else if (HDR_IO_IN_PROGRESS(hdr)) {
1895 * We are in the middle of an async write. Don't destroy
1896 * this buffer unless the write completes before we finish
1897 * decrementing the reference count.
1899 mutex_enter(&arc_eviction_mtx);
1900 (void) remove_reference(hdr, NULL, tag);
1901 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1902 destroy_hdr = !HDR_IO_IN_PROGRESS(hdr);
1903 mutex_exit(&arc_eviction_mtx);
1905 arc_hdr_destroy(hdr);
1907 if (remove_reference(hdr, NULL, tag) > 0)
1908 arc_buf_destroy(buf, FALSE, TRUE);
1910 arc_hdr_destroy(hdr);
1915 arc_buf_remove_ref(arc_buf_t *buf, void* tag)
1917 arc_buf_hdr_t *hdr = buf->b_hdr;
1918 kmutex_t *hash_lock = HDR_LOCK(hdr);
1919 boolean_t no_callback = (buf->b_efunc == NULL);
1921 if (hdr->b_state == arc_anon) {
1922 ASSERT(hdr->b_datacnt == 1);
1923 arc_buf_free(buf, tag);
1924 return (no_callback);
1927 mutex_enter(hash_lock);
1929 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1930 ASSERT(hdr->b_state != arc_anon);
1931 ASSERT(buf->b_data != NULL);
1933 (void) remove_reference(hdr, hash_lock, tag);
1934 if (hdr->b_datacnt > 1) {
1936 arc_buf_destroy(buf, FALSE, TRUE);
1937 } else if (no_callback) {
1938 ASSERT(hdr->b_buf == buf && buf->b_next == NULL);
1939 ASSERT(buf->b_efunc == NULL);
1940 hdr->b_flags |= ARC_BUF_AVAILABLE;
1942 ASSERT(no_callback || hdr->b_datacnt > 1 ||
1943 refcount_is_zero(&hdr->b_refcnt));
1944 mutex_exit(hash_lock);
1945 return (no_callback);
1949 arc_buf_size(arc_buf_t *buf)
1951 return (buf->b_hdr->b_size);
1955 * Called from the DMU to determine if the current buffer should be
1956 * evicted. In order to ensure proper locking, the eviction must be initiated
1957 * from the DMU. Return true if the buffer is associated with user data and
1958 * duplicate buffers still exist.
1961 arc_buf_eviction_needed(arc_buf_t *buf)
1964 boolean_t evict_needed = B_FALSE;
1966 if (zfs_disable_dup_eviction)
1969 mutex_enter(&buf->b_evict_lock);
1973 * We are in arc_do_user_evicts(); let that function
1974 * perform the eviction.
1976 ASSERT(buf->b_data == NULL);
1977 mutex_exit(&buf->b_evict_lock);
1979 } else if (buf->b_data == NULL) {
1981 * We have already been added to the arc eviction list;
1982 * recommend eviction.
1984 ASSERT3P(hdr, ==, &arc_eviction_hdr);
1985 mutex_exit(&buf->b_evict_lock);
1989 if (hdr->b_datacnt > 1 && hdr->b_type == ARC_BUFC_DATA)
1990 evict_needed = B_TRUE;
1992 mutex_exit(&buf->b_evict_lock);
1993 return (evict_needed);
1997 * Evict buffers from list until we've removed the specified number of
1998 * bytes. Move the removed buffers to the appropriate evict state.
1999 * If the recycle flag is set, then attempt to "recycle" a buffer:
2000 * - look for a buffer to evict that is `bytes' long.
2001 * - return the data block from this buffer rather than freeing it.
2002 * This flag is used by callers that are trying to make space for a
2003 * new buffer in a full arc cache.
2005 * This function makes a "best effort". It skips over any buffers
2006 * it can't get a hash_lock on, and so may not catch all candidates.
2007 * It may also return without evicting as much space as requested.
2010 arc_evict(arc_state_t *state, uint64_t spa, int64_t bytes, boolean_t recycle,
2011 arc_buf_contents_t type)
2013 arc_state_t *evicted_state;
2014 uint64_t bytes_evicted = 0, skipped = 0, missed = 0;
2015 int64_t bytes_remaining;
2016 arc_buf_hdr_t *ab, *ab_prev = NULL;
2017 list_t *evicted_list, *list, *evicted_list_start, *list_start;
2018 kmutex_t *lock, *evicted_lock;
2019 kmutex_t *hash_lock;
2020 boolean_t have_lock;
2021 void *stolen = NULL;
2022 arc_buf_hdr_t marker = { 0 };
2024 static int evict_metadata_offset, evict_data_offset;
2025 int i, idx, offset, list_count, lists;
2027 ASSERT(state == arc_mru || state == arc_mfu);
2029 evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
2031 if (type == ARC_BUFC_METADATA) {
2033 list_count = ARC_BUFC_NUMMETADATALISTS;
2034 list_start = &state->arcs_lists[0];
2035 evicted_list_start = &evicted_state->arcs_lists[0];
2036 idx = evict_metadata_offset;
2038 offset = ARC_BUFC_NUMMETADATALISTS;
2039 list_start = &state->arcs_lists[offset];
2040 evicted_list_start = &evicted_state->arcs_lists[offset];
2041 list_count = ARC_BUFC_NUMDATALISTS;
2042 idx = evict_data_offset;
2044 bytes_remaining = evicted_state->arcs_lsize[type];
2048 list = &list_start[idx];
2049 evicted_list = &evicted_list_start[idx];
2050 lock = ARCS_LOCK(state, (offset + idx));
2051 evicted_lock = ARCS_LOCK(evicted_state, (offset + idx));
2054 mutex_enter(evicted_lock);
2056 for (ab = list_tail(list); ab; ab = ab_prev) {
2057 ab_prev = list_prev(list, ab);
2058 bytes_remaining -= (ab->b_size * ab->b_datacnt);
2059 /* prefetch buffers have a minimum lifespan */
2060 if (HDR_IO_IN_PROGRESS(ab) ||
2061 (spa && ab->b_spa != spa) ||
2062 (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) &&
2063 ddi_get_lbolt() - ab->b_arc_access <
2064 arc_min_prefetch_lifespan)) {
2068 /* "lookahead" for better eviction candidate */
2069 if (recycle && ab->b_size != bytes &&
2070 ab_prev && ab_prev->b_size == bytes)
2073 /* ignore markers */
2078 * It may take a long time to evict all the bufs requested.
2079 * To avoid blocking all arc activity, periodically drop
2080 * the arcs_mtx and give other threads a chance to run
2081 * before reacquiring the lock.
2083 * If we are looking for a buffer to recycle, we are in
2084 * the hot code path, so don't sleep.
2086 if (!recycle && count++ > arc_evict_iterations) {
2087 list_insert_after(list, ab, &marker);
2088 mutex_exit(evicted_lock);
2090 kpreempt(KPREEMPT_SYNC);
2092 mutex_enter(evicted_lock);
2093 ab_prev = list_prev(list, &marker);
2094 list_remove(list, &marker);
2099 hash_lock = HDR_LOCK(ab);
2100 have_lock = MUTEX_HELD(hash_lock);
2101 if (have_lock || mutex_tryenter(hash_lock)) {
2102 ASSERT0(refcount_count(&ab->b_refcnt));
2103 ASSERT(ab->b_datacnt > 0);
2105 arc_buf_t *buf = ab->b_buf;
2106 if (!mutex_tryenter(&buf->b_evict_lock)) {
2111 bytes_evicted += ab->b_size;
2112 if (recycle && ab->b_type == type &&
2113 ab->b_size == bytes &&
2114 !HDR_L2_WRITING(ab)) {
2115 stolen = buf->b_data;
2120 mutex_enter(&arc_eviction_mtx);
2121 arc_buf_destroy(buf,
2122 buf->b_data == stolen, FALSE);
2123 ab->b_buf = buf->b_next;
2124 buf->b_hdr = &arc_eviction_hdr;
2125 buf->b_next = arc_eviction_list;
2126 arc_eviction_list = buf;
2127 mutex_exit(&arc_eviction_mtx);
2128 mutex_exit(&buf->b_evict_lock);
2130 mutex_exit(&buf->b_evict_lock);
2131 arc_buf_destroy(buf,
2132 buf->b_data == stolen, TRUE);
2137 ARCSTAT_INCR(arcstat_evict_l2_cached,
2140 if (l2arc_write_eligible(ab->b_spa, ab)) {
2141 ARCSTAT_INCR(arcstat_evict_l2_eligible,
2145 arcstat_evict_l2_ineligible,
2150 if (ab->b_datacnt == 0) {
2151 arc_change_state(evicted_state, ab, hash_lock);
2152 ASSERT(HDR_IN_HASH_TABLE(ab));
2153 ab->b_flags |= ARC_IN_HASH_TABLE;
2154 ab->b_flags &= ~ARC_BUF_AVAILABLE;
2155 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab);
2158 mutex_exit(hash_lock);
2159 if (bytes >= 0 && bytes_evicted >= bytes)
2161 if (bytes_remaining > 0) {
2162 mutex_exit(evicted_lock);
2164 idx = ((idx + 1) & (list_count - 1));
2173 mutex_exit(evicted_lock);
2176 idx = ((idx + 1) & (list_count - 1));
2179 if (bytes_evicted < bytes) {
2180 if (lists < list_count)
2183 dprintf("only evicted %lld bytes from %x",
2184 (longlong_t)bytes_evicted, state);
2186 if (type == ARC_BUFC_METADATA)
2187 evict_metadata_offset = idx;
2189 evict_data_offset = idx;
2192 ARCSTAT_INCR(arcstat_evict_skip, skipped);
2195 ARCSTAT_INCR(arcstat_mutex_miss, missed);
2198 * Note: we have just evicted some data into the ghost state,
2199 * potentially putting the ghost size over the desired size. Rather
2200 * that evicting from the ghost list in this hot code path, leave
2201 * this chore to the arc_reclaim_thread().
2205 ARCSTAT_BUMP(arcstat_stolen);
2210 * Remove buffers from list until we've removed the specified number of
2211 * bytes. Destroy the buffers that are removed.
2214 arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes)
2216 arc_buf_hdr_t *ab, *ab_prev;
2217 arc_buf_hdr_t marker = { 0 };
2218 list_t *list, *list_start;
2219 kmutex_t *hash_lock, *lock;
2220 uint64_t bytes_deleted = 0;
2221 uint64_t bufs_skipped = 0;
2223 static int evict_offset;
2224 int list_count, idx = evict_offset;
2225 int offset, lists = 0;
2227 ASSERT(GHOST_STATE(state));
2230 * data lists come after metadata lists
2232 list_start = &state->arcs_lists[ARC_BUFC_NUMMETADATALISTS];
2233 list_count = ARC_BUFC_NUMDATALISTS;
2234 offset = ARC_BUFC_NUMMETADATALISTS;
2237 list = &list_start[idx];
2238 lock = ARCS_LOCK(state, idx + offset);
2241 for (ab = list_tail(list); ab; ab = ab_prev) {
2242 ab_prev = list_prev(list, ab);
2243 if (ab->b_type > ARC_BUFC_NUMTYPES)
2244 panic("invalid ab=%p", (void *)ab);
2245 if (spa && ab->b_spa != spa)
2248 /* ignore markers */
2252 hash_lock = HDR_LOCK(ab);
2253 /* caller may be trying to modify this buffer, skip it */
2254 if (MUTEX_HELD(hash_lock))
2258 * It may take a long time to evict all the bufs requested.
2259 * To avoid blocking all arc activity, periodically drop
2260 * the arcs_mtx and give other threads a chance to run
2261 * before reacquiring the lock.
2263 if (count++ > arc_evict_iterations) {
2264 list_insert_after(list, ab, &marker);
2266 kpreempt(KPREEMPT_SYNC);
2268 ab_prev = list_prev(list, &marker);
2269 list_remove(list, &marker);
2273 if (mutex_tryenter(hash_lock)) {
2274 ASSERT(!HDR_IO_IN_PROGRESS(ab));
2275 ASSERT(ab->b_buf == NULL);
2276 ARCSTAT_BUMP(arcstat_deleted);
2277 bytes_deleted += ab->b_size;
2279 if (ab->b_l2hdr != NULL) {
2281 * This buffer is cached on the 2nd Level ARC;
2282 * don't destroy the header.
2284 arc_change_state(arc_l2c_only, ab, hash_lock);
2285 mutex_exit(hash_lock);
2287 arc_change_state(arc_anon, ab, hash_lock);
2288 mutex_exit(hash_lock);
2289 arc_hdr_destroy(ab);
2292 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab);
2293 if (bytes >= 0 && bytes_deleted >= bytes)
2295 } else if (bytes < 0) {
2297 * Insert a list marker and then wait for the
2298 * hash lock to become available. Once its
2299 * available, restart from where we left off.
2301 list_insert_after(list, ab, &marker);
2303 mutex_enter(hash_lock);
2304 mutex_exit(hash_lock);
2306 ab_prev = list_prev(list, &marker);
2307 list_remove(list, &marker);
2314 idx = ((idx + 1) & (ARC_BUFC_NUMDATALISTS - 1));
2317 if (lists < list_count)
2321 if ((uintptr_t)list > (uintptr_t)&state->arcs_lists[ARC_BUFC_NUMMETADATALISTS] &&
2322 (bytes < 0 || bytes_deleted < bytes)) {
2323 list_start = &state->arcs_lists[0];
2324 list_count = ARC_BUFC_NUMMETADATALISTS;
2330 ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped);
2334 if (bytes_deleted < bytes)
2335 dprintf("only deleted %lld bytes from %p",
2336 (longlong_t)bytes_deleted, state);
2342 int64_t adjustment, delta;
2348 adjustment = MIN((int64_t)(arc_size - arc_c),
2349 (int64_t)(arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used -
2352 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) {
2353 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], adjustment);
2354 (void) arc_evict(arc_mru, 0, delta, FALSE, ARC_BUFC_DATA);
2355 adjustment -= delta;
2358 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) {
2359 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], adjustment);
2360 (void) arc_evict(arc_mru, 0, delta, FALSE,
2368 adjustment = arc_size - arc_c;
2370 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) {
2371 delta = MIN(adjustment, arc_mfu->arcs_lsize[ARC_BUFC_DATA]);
2372 (void) arc_evict(arc_mfu, 0, delta, FALSE, ARC_BUFC_DATA);
2373 adjustment -= delta;
2376 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) {
2377 int64_t delta = MIN(adjustment,
2378 arc_mfu->arcs_lsize[ARC_BUFC_METADATA]);
2379 (void) arc_evict(arc_mfu, 0, delta, FALSE,
2384 * Adjust ghost lists
2387 adjustment = arc_mru->arcs_size + arc_mru_ghost->arcs_size - arc_c;
2389 if (adjustment > 0 && arc_mru_ghost->arcs_size > 0) {
2390 delta = MIN(arc_mru_ghost->arcs_size, adjustment);
2391 arc_evict_ghost(arc_mru_ghost, 0, delta);
2395 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size - arc_c;
2397 if (adjustment > 0 && arc_mfu_ghost->arcs_size > 0) {
2398 delta = MIN(arc_mfu_ghost->arcs_size, adjustment);
2399 arc_evict_ghost(arc_mfu_ghost, 0, delta);
2404 arc_do_user_evicts(void)
2406 static arc_buf_t *tmp_arc_eviction_list;
2409 * Move list over to avoid LOR
2412 mutex_enter(&arc_eviction_mtx);
2413 tmp_arc_eviction_list = arc_eviction_list;
2414 arc_eviction_list = NULL;
2415 mutex_exit(&arc_eviction_mtx);
2417 while (tmp_arc_eviction_list != NULL) {
2418 arc_buf_t *buf = tmp_arc_eviction_list;
2419 tmp_arc_eviction_list = buf->b_next;
2420 mutex_enter(&buf->b_evict_lock);
2422 mutex_exit(&buf->b_evict_lock);
2424 if (buf->b_efunc != NULL)
2425 VERIFY0(buf->b_efunc(buf->b_private));
2427 buf->b_efunc = NULL;
2428 buf->b_private = NULL;
2429 kmem_cache_free(buf_cache, buf);
2432 if (arc_eviction_list != NULL)
2437 * Flush all *evictable* data from the cache for the given spa.
2438 * NOTE: this will not touch "active" (i.e. referenced) data.
2441 arc_flush(spa_t *spa)
2446 guid = spa_load_guid(spa);
2448 while (arc_mru->arcs_lsize[ARC_BUFC_DATA]) {
2449 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_DATA);
2453 while (arc_mru->arcs_lsize[ARC_BUFC_METADATA]) {
2454 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_METADATA);
2458 while (arc_mfu->arcs_lsize[ARC_BUFC_DATA]) {
2459 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_DATA);
2463 while (arc_mfu->arcs_lsize[ARC_BUFC_METADATA]) {
2464 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_METADATA);
2469 arc_evict_ghost(arc_mru_ghost, guid, -1);
2470 arc_evict_ghost(arc_mfu_ghost, guid, -1);
2472 mutex_enter(&arc_reclaim_thr_lock);
2473 arc_do_user_evicts();
2474 mutex_exit(&arc_reclaim_thr_lock);
2475 ASSERT(spa || arc_eviction_list == NULL);
2482 if (arc_c > arc_c_min) {
2485 DTRACE_PROBE4(arc__shrink, uint64_t, arc_c, uint64_t,
2486 arc_c_min, uint64_t, arc_p, uint64_t, to_free);
2488 to_free = arc_c >> arc_shrink_shift;
2490 to_free = arc_c >> arc_shrink_shift;
2492 if (arc_c > arc_c_min + to_free)
2493 atomic_add_64(&arc_c, -to_free);
2497 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift));
2498 if (arc_c > arc_size)
2499 arc_c = MAX(arc_size, arc_c_min);
2501 arc_p = (arc_c >> 1);
2503 DTRACE_PROBE2(arc__shrunk, uint64_t, arc_c, uint64_t,
2506 ASSERT(arc_c >= arc_c_min);
2507 ASSERT((int64_t)arc_p >= 0);
2510 if (arc_size > arc_c) {
2511 DTRACE_PROBE2(arc__shrink_adjust, uint64_t, arc_size,
2517 static int needfree = 0;
2520 arc_reclaim_needed(void)
2526 DTRACE_PROBE(arc__reclaim_needfree);
2531 * Cooperate with pagedaemon when it's time for it to scan
2532 * and reclaim some pages.
2534 if (freemem < zfs_arc_free_target) {
2535 DTRACE_PROBE2(arc__reclaim_freemem, uint64_t,
2536 freemem, uint64_t, zfs_arc_free_target);
2542 * take 'desfree' extra pages, so we reclaim sooner, rather than later
2547 * check that we're out of range of the pageout scanner. It starts to
2548 * schedule paging if freemem is less than lotsfree and needfree.
2549 * lotsfree is the high-water mark for pageout, and needfree is the
2550 * number of needed free pages. We add extra pages here to make sure
2551 * the scanner doesn't start up while we're freeing memory.
2553 if (freemem < lotsfree + needfree + extra)
2557 * check to make sure that swapfs has enough space so that anon
2558 * reservations can still succeed. anon_resvmem() checks that the
2559 * availrmem is greater than swapfs_minfree, and the number of reserved
2560 * swap pages. We also add a bit of extra here just to prevent
2561 * circumstances from getting really dire.
2563 if (availrmem < swapfs_minfree + swapfs_reserve + extra)
2567 * Check that we have enough availrmem that memory locking (e.g., via
2568 * mlock(3C) or memcntl(2)) can still succeed. (pages_pp_maximum
2569 * stores the number of pages that cannot be locked; when availrmem
2570 * drops below pages_pp_maximum, page locking mechanisms such as
2571 * page_pp_lock() will fail.)
2573 if (availrmem <= pages_pp_maximum)
2577 #if defined(__i386) || !defined(UMA_MD_SMALL_ALLOC)
2579 * If we're on an i386 platform, it's possible that we'll exhaust the
2580 * kernel heap space before we ever run out of available physical
2581 * memory. Most checks of the size of the heap_area compare against
2582 * tune.t_minarmem, which is the minimum available real memory that we
2583 * can have in the system. However, this is generally fixed at 25 pages
2584 * which is so low that it's useless. In this comparison, we seek to
2585 * calculate the total heap-size, and reclaim if more than 3/4ths of the
2586 * heap is allocated. (Or, in the calculation, if less than 1/4th is
2589 if (vmem_size(heap_arena, VMEM_FREE) <
2590 (vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC) >> 2)) {
2591 DTRACE_PROBE2(arc__reclaim_used, uint64_t,
2592 vmem_size(heap_arena, VMEM_FREE), uint64_t,
2593 (vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2);
2599 * If zio data pages are being allocated out of a separate heap segment,
2600 * then enforce that the size of available vmem for this arena remains
2601 * above about 1/16th free.
2603 * Note: The 1/16th arena free requirement was put in place
2604 * to aggressively evict memory from the arc in order to avoid
2605 * memory fragmentation issues.
2607 if (zio_arena != NULL &&
2608 vmem_size(zio_arena, VMEM_FREE) <
2609 (vmem_size(zio_arena, VMEM_ALLOC) >> 4))
2613 if (spa_get_random(100) == 0)
2615 #endif /* _KERNEL */
2616 DTRACE_PROBE(arc__reclaim_no);
2621 extern kmem_cache_t *zio_buf_cache[];
2622 extern kmem_cache_t *zio_data_buf_cache[];
2623 extern kmem_cache_t *range_seg_cache;
2625 static void __noinline
2626 arc_kmem_reap_now(arc_reclaim_strategy_t strat)
2629 kmem_cache_t *prev_cache = NULL;
2630 kmem_cache_t *prev_data_cache = NULL;
2632 DTRACE_PROBE(arc__kmem_reap_start);
2634 if (arc_meta_used >= arc_meta_limit) {
2636 * We are exceeding our meta-data cache limit.
2637 * Purge some DNLC entries to release holds on meta-data.
2639 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent);
2643 * Reclaim unused memory from all kmem caches.
2650 * An aggressive reclamation will shrink the cache size as well as
2651 * reap free buffers from the arc kmem caches.
2653 if (strat == ARC_RECLAIM_AGGR)
2656 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
2657 if (zio_buf_cache[i] != prev_cache) {
2658 prev_cache = zio_buf_cache[i];
2659 kmem_cache_reap_now(zio_buf_cache[i]);
2661 if (zio_data_buf_cache[i] != prev_data_cache) {
2662 prev_data_cache = zio_data_buf_cache[i];
2663 kmem_cache_reap_now(zio_data_buf_cache[i]);
2666 kmem_cache_reap_now(buf_cache);
2667 kmem_cache_reap_now(hdr_cache);
2668 kmem_cache_reap_now(range_seg_cache);
2672 * Ask the vmem arena to reclaim unused memory from its
2675 if (zio_arena != NULL && strat == ARC_RECLAIM_AGGR)
2676 vmem_qcache_reap(zio_arena);
2678 DTRACE_PROBE(arc__kmem_reap_end);
2682 arc_reclaim_thread(void *dummy __unused)
2684 clock_t growtime = 0;
2685 arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS;
2688 CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG);
2690 mutex_enter(&arc_reclaim_thr_lock);
2691 while (arc_thread_exit == 0) {
2692 if (arc_reclaim_needed()) {
2695 if (last_reclaim == ARC_RECLAIM_CONS) {
2696 DTRACE_PROBE(arc__reclaim_aggr_no_grow);
2697 last_reclaim = ARC_RECLAIM_AGGR;
2699 last_reclaim = ARC_RECLAIM_CONS;
2703 last_reclaim = ARC_RECLAIM_AGGR;
2704 DTRACE_PROBE(arc__reclaim_aggr);
2708 /* reset the growth delay for every reclaim */
2709 growtime = ddi_get_lbolt() + (arc_grow_retry * hz);
2711 if (needfree && last_reclaim == ARC_RECLAIM_CONS) {
2713 * If needfree is TRUE our vm_lowmem hook
2714 * was called and in that case we must free some
2715 * memory, so switch to aggressive mode.
2718 last_reclaim = ARC_RECLAIM_AGGR;
2720 arc_kmem_reap_now(last_reclaim);
2723 } else if (arc_no_grow && ddi_get_lbolt() >= growtime) {
2724 arc_no_grow = FALSE;
2729 if (arc_eviction_list != NULL)
2730 arc_do_user_evicts();
2739 /* block until needed, or one second, whichever is shorter */
2740 CALLB_CPR_SAFE_BEGIN(&cpr);
2741 (void) cv_timedwait(&arc_reclaim_thr_cv,
2742 &arc_reclaim_thr_lock, hz);
2743 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock);
2746 arc_thread_exit = 0;
2747 cv_broadcast(&arc_reclaim_thr_cv);
2748 CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */
2753 * Adapt arc info given the number of bytes we are trying to add and
2754 * the state that we are comming from. This function is only called
2755 * when we are adding new content to the cache.
2758 arc_adapt(int bytes, arc_state_t *state)
2761 uint64_t arc_p_min = (arc_c >> arc_p_min_shift);
2763 if (state == arc_l2c_only)
2768 * Adapt the target size of the MRU list:
2769 * - if we just hit in the MRU ghost list, then increase
2770 * the target size of the MRU list.
2771 * - if we just hit in the MFU ghost list, then increase
2772 * the target size of the MFU list by decreasing the
2773 * target size of the MRU list.
2775 if (state == arc_mru_ghost) {
2776 mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ?
2777 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size));
2778 mult = MIN(mult, 10); /* avoid wild arc_p adjustment */
2780 arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult);
2781 } else if (state == arc_mfu_ghost) {
2784 mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ?
2785 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size));
2786 mult = MIN(mult, 10);
2788 delta = MIN(bytes * mult, arc_p);
2789 arc_p = MAX(arc_p_min, arc_p - delta);
2791 ASSERT((int64_t)arc_p >= 0);
2793 if (arc_reclaim_needed()) {
2794 cv_signal(&arc_reclaim_thr_cv);
2801 if (arc_c >= arc_c_max)
2805 * If we're within (2 * maxblocksize) bytes of the target
2806 * cache size, increment the target cache size
2808 if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) {
2809 DTRACE_PROBE1(arc__inc_adapt, int, bytes);
2810 atomic_add_64(&arc_c, (int64_t)bytes);
2811 if (arc_c > arc_c_max)
2813 else if (state == arc_anon)
2814 atomic_add_64(&arc_p, (int64_t)bytes);
2818 ASSERT((int64_t)arc_p >= 0);
2822 * Check if the cache has reached its limits and eviction is required
2826 arc_evict_needed(arc_buf_contents_t type)
2828 if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit)
2831 if (arc_reclaim_needed())
2834 return (arc_size > arc_c);
2838 * The buffer, supplied as the first argument, needs a data block.
2839 * So, if we are at cache max, determine which cache should be victimized.
2840 * We have the following cases:
2842 * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) ->
2843 * In this situation if we're out of space, but the resident size of the MFU is
2844 * under the limit, victimize the MFU cache to satisfy this insertion request.
2846 * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) ->
2847 * Here, we've used up all of the available space for the MRU, so we need to
2848 * evict from our own cache instead. Evict from the set of resident MRU
2851 * 3. Insert for MFU (c - p) > sizeof(arc_mfu) ->
2852 * c minus p represents the MFU space in the cache, since p is the size of the
2853 * cache that is dedicated to the MRU. In this situation there's still space on
2854 * the MFU side, so the MRU side needs to be victimized.
2856 * 4. Insert for MFU (c - p) < sizeof(arc_mfu) ->
2857 * MFU's resident set is consuming more space than it has been allotted. In
2858 * this situation, we must victimize our own cache, the MFU, for this insertion.
2861 arc_get_data_buf(arc_buf_t *buf)
2863 arc_state_t *state = buf->b_hdr->b_state;
2864 uint64_t size = buf->b_hdr->b_size;
2865 arc_buf_contents_t type = buf->b_hdr->b_type;
2867 arc_adapt(size, state);
2870 * We have not yet reached cache maximum size,
2871 * just allocate a new buffer.
2873 if (!arc_evict_needed(type)) {
2874 if (type == ARC_BUFC_METADATA) {
2875 buf->b_data = zio_buf_alloc(size);
2876 arc_space_consume(size, ARC_SPACE_DATA);
2878 ASSERT(type == ARC_BUFC_DATA);
2879 buf->b_data = zio_data_buf_alloc(size);
2880 ARCSTAT_INCR(arcstat_data_size, size);
2881 atomic_add_64(&arc_size, size);
2887 * If we are prefetching from the mfu ghost list, this buffer
2888 * will end up on the mru list; so steal space from there.
2890 if (state == arc_mfu_ghost)
2891 state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu;
2892 else if (state == arc_mru_ghost)
2895 if (state == arc_mru || state == arc_anon) {
2896 uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size;
2897 state = (arc_mfu->arcs_lsize[type] >= size &&
2898 arc_p > mru_used) ? arc_mfu : arc_mru;
2901 uint64_t mfu_space = arc_c - arc_p;
2902 state = (arc_mru->arcs_lsize[type] >= size &&
2903 mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu;
2905 if ((buf->b_data = arc_evict(state, 0, size, TRUE, type)) == NULL) {
2906 if (type == ARC_BUFC_METADATA) {
2907 buf->b_data = zio_buf_alloc(size);
2908 arc_space_consume(size, ARC_SPACE_DATA);
2910 ASSERT(type == ARC_BUFC_DATA);
2911 buf->b_data = zio_data_buf_alloc(size);
2912 ARCSTAT_INCR(arcstat_data_size, size);
2913 atomic_add_64(&arc_size, size);
2915 ARCSTAT_BUMP(arcstat_recycle_miss);
2917 ASSERT(buf->b_data != NULL);
2920 * Update the state size. Note that ghost states have a
2921 * "ghost size" and so don't need to be updated.
2923 if (!GHOST_STATE(buf->b_hdr->b_state)) {
2924 arc_buf_hdr_t *hdr = buf->b_hdr;
2926 atomic_add_64(&hdr->b_state->arcs_size, size);
2927 if (list_link_active(&hdr->b_arc_node)) {
2928 ASSERT(refcount_is_zero(&hdr->b_refcnt));
2929 atomic_add_64(&hdr->b_state->arcs_lsize[type], size);
2932 * If we are growing the cache, and we are adding anonymous
2933 * data, and we have outgrown arc_p, update arc_p
2935 if (arc_size < arc_c && hdr->b_state == arc_anon &&
2936 arc_anon->arcs_size + arc_mru->arcs_size > arc_p)
2937 arc_p = MIN(arc_c, arc_p + size);
2939 ARCSTAT_BUMP(arcstat_allocated);
2943 * This routine is called whenever a buffer is accessed.
2944 * NOTE: the hash lock is dropped in this function.
2947 arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
2951 ASSERT(MUTEX_HELD(hash_lock));
2953 if (buf->b_state == arc_anon) {
2955 * This buffer is not in the cache, and does not
2956 * appear in our "ghost" list. Add the new buffer
2960 ASSERT(buf->b_arc_access == 0);
2961 buf->b_arc_access = ddi_get_lbolt();
2962 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
2963 arc_change_state(arc_mru, buf, hash_lock);
2965 } else if (buf->b_state == arc_mru) {
2966 now = ddi_get_lbolt();
2969 * If this buffer is here because of a prefetch, then either:
2970 * - clear the flag if this is a "referencing" read
2971 * (any subsequent access will bump this into the MFU state).
2973 * - move the buffer to the head of the list if this is
2974 * another prefetch (to make it less likely to be evicted).
2976 if ((buf->b_flags & ARC_PREFETCH) != 0) {
2977 if (refcount_count(&buf->b_refcnt) == 0) {
2978 ASSERT(list_link_active(&buf->b_arc_node));
2980 buf->b_flags &= ~ARC_PREFETCH;
2981 ARCSTAT_BUMP(arcstat_mru_hits);
2983 buf->b_arc_access = now;
2988 * This buffer has been "accessed" only once so far,
2989 * but it is still in the cache. Move it to the MFU
2992 if (now > buf->b_arc_access + ARC_MINTIME) {
2994 * More than 125ms have passed since we
2995 * instantiated this buffer. Move it to the
2996 * most frequently used state.
2998 buf->b_arc_access = now;
2999 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
3000 arc_change_state(arc_mfu, buf, hash_lock);
3002 ARCSTAT_BUMP(arcstat_mru_hits);
3003 } else if (buf->b_state == arc_mru_ghost) {
3004 arc_state_t *new_state;
3006 * This buffer has been "accessed" recently, but
3007 * was evicted from the cache. Move it to the
3011 if (buf->b_flags & ARC_PREFETCH) {
3012 new_state = arc_mru;
3013 if (refcount_count(&buf->b_refcnt) > 0)
3014 buf->b_flags &= ~ARC_PREFETCH;
3015 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
3017 new_state = arc_mfu;
3018 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
3021 buf->b_arc_access = ddi_get_lbolt();
3022 arc_change_state(new_state, buf, hash_lock);
3024 ARCSTAT_BUMP(arcstat_mru_ghost_hits);
3025 } else if (buf->b_state == arc_mfu) {
3027 * This buffer has been accessed more than once and is
3028 * still in the cache. Keep it in the MFU state.
3030 * NOTE: an add_reference() that occurred when we did
3031 * the arc_read() will have kicked this off the list.
3032 * If it was a prefetch, we will explicitly move it to
3033 * the head of the list now.
3035 if ((buf->b_flags & ARC_PREFETCH) != 0) {
3036 ASSERT(refcount_count(&buf->b_refcnt) == 0);
3037 ASSERT(list_link_active(&buf->b_arc_node));
3039 ARCSTAT_BUMP(arcstat_mfu_hits);
3040 buf->b_arc_access = ddi_get_lbolt();
3041 } else if (buf->b_state == arc_mfu_ghost) {
3042 arc_state_t *new_state = arc_mfu;
3044 * This buffer has been accessed more than once but has
3045 * been evicted from the cache. Move it back to the
3049 if (buf->b_flags & ARC_PREFETCH) {
3051 * This is a prefetch access...
3052 * move this block back to the MRU state.
3054 ASSERT0(refcount_count(&buf->b_refcnt));
3055 new_state = arc_mru;
3058 buf->b_arc_access = ddi_get_lbolt();
3059 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
3060 arc_change_state(new_state, buf, hash_lock);
3062 ARCSTAT_BUMP(arcstat_mfu_ghost_hits);
3063 } else if (buf->b_state == arc_l2c_only) {
3065 * This buffer is on the 2nd Level ARC.
3068 buf->b_arc_access = ddi_get_lbolt();
3069 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
3070 arc_change_state(arc_mfu, buf, hash_lock);
3072 ASSERT(!"invalid arc state");
3076 /* a generic arc_done_func_t which you can use */
3079 arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg)
3081 if (zio == NULL || zio->io_error == 0)
3082 bcopy(buf->b_data, arg, buf->b_hdr->b_size);
3083 VERIFY(arc_buf_remove_ref(buf, arg));
3086 /* a generic arc_done_func_t */
3088 arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg)
3090 arc_buf_t **bufp = arg;
3091 if (zio && zio->io_error) {
3092 VERIFY(arc_buf_remove_ref(buf, arg));
3096 ASSERT(buf->b_data);
3101 arc_read_done(zio_t *zio)
3105 arc_buf_t *abuf; /* buffer we're assigning to callback */
3106 kmutex_t *hash_lock = NULL;
3107 arc_callback_t *callback_list, *acb;
3108 int freeable = FALSE;
3110 buf = zio->io_private;
3114 * The hdr was inserted into hash-table and removed from lists
3115 * prior to starting I/O. We should find this header, since
3116 * it's in the hash table, and it should be legit since it's
3117 * not possible to evict it during the I/O. The only possible
3118 * reason for it not to be found is if we were freed during the
3121 if (HDR_IN_HASH_TABLE(hdr)) {
3122 ASSERT3U(hdr->b_birth, ==, BP_PHYSICAL_BIRTH(zio->io_bp));
3123 ASSERT3U(hdr->b_dva.dva_word[0], ==,
3124 BP_IDENTITY(zio->io_bp)->dva_word[0]);
3125 ASSERT3U(hdr->b_dva.dva_word[1], ==,
3126 BP_IDENTITY(zio->io_bp)->dva_word[1]);
3128 arc_buf_hdr_t *found = buf_hash_find(hdr->b_spa, zio->io_bp,
3131 ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) &&
3132 hash_lock == NULL) ||
3134 DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) ||
3135 (found == hdr && HDR_L2_READING(hdr)));
3138 hdr->b_flags &= ~ARC_L2_EVICTED;
3139 if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH))
3140 hdr->b_flags &= ~ARC_L2CACHE;
3142 /* byteswap if necessary */
3143 callback_list = hdr->b_acb;
3144 ASSERT(callback_list != NULL);
3145 if (BP_SHOULD_BYTESWAP(zio->io_bp) && zio->io_error == 0) {
3146 dmu_object_byteswap_t bswap =
3147 DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp));
3148 arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ?
3149 byteswap_uint64_array :
3150 dmu_ot_byteswap[bswap].ob_func;
3151 func(buf->b_data, hdr->b_size);
3154 arc_cksum_compute(buf, B_FALSE);
3157 #endif /* illumos */
3159 if (hash_lock && zio->io_error == 0 && hdr->b_state == arc_anon) {
3161 * Only call arc_access on anonymous buffers. This is because
3162 * if we've issued an I/O for an evicted buffer, we've already
3163 * called arc_access (to prevent any simultaneous readers from
3164 * getting confused).
3166 arc_access(hdr, hash_lock);
3169 /* create copies of the data buffer for the callers */
3171 for (acb = callback_list; acb; acb = acb->acb_next) {
3172 if (acb->acb_done) {
3174 ARCSTAT_BUMP(arcstat_duplicate_reads);
3175 abuf = arc_buf_clone(buf);
3177 acb->acb_buf = abuf;
3182 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3183 ASSERT(!HDR_BUF_AVAILABLE(hdr));
3185 ASSERT(buf->b_efunc == NULL);
3186 ASSERT(hdr->b_datacnt == 1);
3187 hdr->b_flags |= ARC_BUF_AVAILABLE;
3190 ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL);
3192 if (zio->io_error != 0) {
3193 hdr->b_flags |= ARC_IO_ERROR;
3194 if (hdr->b_state != arc_anon)
3195 arc_change_state(arc_anon, hdr, hash_lock);
3196 if (HDR_IN_HASH_TABLE(hdr))
3197 buf_hash_remove(hdr);
3198 freeable = refcount_is_zero(&hdr->b_refcnt);
3202 * Broadcast before we drop the hash_lock to avoid the possibility
3203 * that the hdr (and hence the cv) might be freed before we get to
3204 * the cv_broadcast().
3206 cv_broadcast(&hdr->b_cv);
3209 mutex_exit(hash_lock);
3212 * This block was freed while we waited for the read to
3213 * complete. It has been removed from the hash table and
3214 * moved to the anonymous state (so that it won't show up
3217 ASSERT3P(hdr->b_state, ==, arc_anon);
3218 freeable = refcount_is_zero(&hdr->b_refcnt);
3221 /* execute each callback and free its structure */
3222 while ((acb = callback_list) != NULL) {
3224 acb->acb_done(zio, acb->acb_buf, acb->acb_private);
3226 if (acb->acb_zio_dummy != NULL) {
3227 acb->acb_zio_dummy->io_error = zio->io_error;
3228 zio_nowait(acb->acb_zio_dummy);
3231 callback_list = acb->acb_next;
3232 kmem_free(acb, sizeof (arc_callback_t));
3236 arc_hdr_destroy(hdr);
3240 * "Read" the block block at the specified DVA (in bp) via the
3241 * cache. If the block is found in the cache, invoke the provided
3242 * callback immediately and return. Note that the `zio' parameter
3243 * in the callback will be NULL in this case, since no IO was
3244 * required. If the block is not in the cache pass the read request
3245 * on to the spa with a substitute callback function, so that the
3246 * requested block will be added to the cache.
3248 * If a read request arrives for a block that has a read in-progress,
3249 * either wait for the in-progress read to complete (and return the
3250 * results); or, if this is a read with a "done" func, add a record
3251 * to the read to invoke the "done" func when the read completes,
3252 * and return; or just return.
3254 * arc_read_done() will invoke all the requested "done" functions
3255 * for readers of this block.
3258 arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done,
3259 void *private, zio_priority_t priority, int zio_flags, uint32_t *arc_flags,
3260 const zbookmark_phys_t *zb)
3262 arc_buf_hdr_t *hdr = NULL;
3263 arc_buf_t *buf = NULL;
3264 kmutex_t *hash_lock = NULL;
3266 uint64_t guid = spa_load_guid(spa);
3268 ASSERT(!BP_IS_EMBEDDED(bp) ||
3269 BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA);
3272 if (!BP_IS_EMBEDDED(bp)) {
3274 * Embedded BP's have no DVA and require no I/O to "read".
3275 * Create an anonymous arc buf to back it.
3277 hdr = buf_hash_find(guid, bp, &hash_lock);
3280 if (hdr != NULL && hdr->b_datacnt > 0) {
3282 *arc_flags |= ARC_CACHED;
3284 if (HDR_IO_IN_PROGRESS(hdr)) {
3286 if (*arc_flags & ARC_WAIT) {
3287 cv_wait(&hdr->b_cv, hash_lock);
3288 mutex_exit(hash_lock);
3291 ASSERT(*arc_flags & ARC_NOWAIT);
3294 arc_callback_t *acb = NULL;
3296 acb = kmem_zalloc(sizeof (arc_callback_t),
3298 acb->acb_done = done;
3299 acb->acb_private = private;
3301 acb->acb_zio_dummy = zio_null(pio,
3302 spa, NULL, NULL, NULL, zio_flags);
3304 ASSERT(acb->acb_done != NULL);
3305 acb->acb_next = hdr->b_acb;
3307 add_reference(hdr, hash_lock, private);
3308 mutex_exit(hash_lock);
3311 mutex_exit(hash_lock);
3315 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
3318 add_reference(hdr, hash_lock, private);
3320 * If this block is already in use, create a new
3321 * copy of the data so that we will be guaranteed
3322 * that arc_release() will always succeed.
3326 ASSERT(buf->b_data);
3327 if (HDR_BUF_AVAILABLE(hdr)) {
3328 ASSERT(buf->b_efunc == NULL);
3329 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
3331 buf = arc_buf_clone(buf);
3334 } else if (*arc_flags & ARC_PREFETCH &&
3335 refcount_count(&hdr->b_refcnt) == 0) {
3336 hdr->b_flags |= ARC_PREFETCH;
3338 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
3339 arc_access(hdr, hash_lock);
3340 if (*arc_flags & ARC_L2CACHE)
3341 hdr->b_flags |= ARC_L2CACHE;
3342 if (*arc_flags & ARC_L2COMPRESS)
3343 hdr->b_flags |= ARC_L2COMPRESS;
3344 mutex_exit(hash_lock);
3345 ARCSTAT_BUMP(arcstat_hits);
3346 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
3347 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
3348 data, metadata, hits);
3351 done(NULL, buf, private);
3353 uint64_t size = BP_GET_LSIZE(bp);
3354 arc_callback_t *acb;
3357 boolean_t devw = B_FALSE;
3358 enum zio_compress b_compress = ZIO_COMPRESS_OFF;
3359 uint64_t b_asize = 0;
3362 /* this block is not in the cache */
3363 arc_buf_hdr_t *exists = NULL;
3364 arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
3365 buf = arc_buf_alloc(spa, size, private, type);
3367 if (!BP_IS_EMBEDDED(bp)) {
3368 hdr->b_dva = *BP_IDENTITY(bp);
3369 hdr->b_birth = BP_PHYSICAL_BIRTH(bp);
3370 hdr->b_cksum0 = bp->blk_cksum.zc_word[0];
3371 exists = buf_hash_insert(hdr, &hash_lock);
3373 if (exists != NULL) {
3374 /* somebody beat us to the hash insert */
3375 mutex_exit(hash_lock);
3376 buf_discard_identity(hdr);
3377 (void) arc_buf_remove_ref(buf, private);
3378 goto top; /* restart the IO request */
3380 /* if this is a prefetch, we don't have a reference */
3381 if (*arc_flags & ARC_PREFETCH) {
3382 (void) remove_reference(hdr, hash_lock,
3384 hdr->b_flags |= ARC_PREFETCH;
3386 if (*arc_flags & ARC_L2CACHE)
3387 hdr->b_flags |= ARC_L2CACHE;
3388 if (*arc_flags & ARC_L2COMPRESS)
3389 hdr->b_flags |= ARC_L2COMPRESS;
3390 if (BP_GET_LEVEL(bp) > 0)
3391 hdr->b_flags |= ARC_INDIRECT;
3393 /* this block is in the ghost cache */
3394 ASSERT(GHOST_STATE(hdr->b_state));
3395 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
3396 ASSERT0(refcount_count(&hdr->b_refcnt));
3397 ASSERT(hdr->b_buf == NULL);
3399 /* if this is a prefetch, we don't have a reference */
3400 if (*arc_flags & ARC_PREFETCH)
3401 hdr->b_flags |= ARC_PREFETCH;
3403 add_reference(hdr, hash_lock, private);
3404 if (*arc_flags & ARC_L2CACHE)
3405 hdr->b_flags |= ARC_L2CACHE;
3406 if (*arc_flags & ARC_L2COMPRESS)
3407 hdr->b_flags |= ARC_L2COMPRESS;
3408 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
3411 buf->b_efunc = NULL;
3412 buf->b_private = NULL;
3415 ASSERT(hdr->b_datacnt == 0);
3417 arc_get_data_buf(buf);
3418 arc_access(hdr, hash_lock);
3421 ASSERT(!GHOST_STATE(hdr->b_state));
3423 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
3424 acb->acb_done = done;
3425 acb->acb_private = private;
3427 ASSERT(hdr->b_acb == NULL);
3429 hdr->b_flags |= ARC_IO_IN_PROGRESS;
3431 if (hdr->b_l2hdr != NULL &&
3432 (vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) {
3433 devw = hdr->b_l2hdr->b_dev->l2ad_writing;
3434 addr = hdr->b_l2hdr->b_daddr;
3435 b_compress = hdr->b_l2hdr->b_compress;
3436 b_asize = hdr->b_l2hdr->b_asize;
3438 * Lock out device removal.
3440 if (vdev_is_dead(vd) ||
3441 !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER))
3445 if (hash_lock != NULL)
3446 mutex_exit(hash_lock);
3449 * At this point, we have a level 1 cache miss. Try again in
3450 * L2ARC if possible.
3452 ASSERT3U(hdr->b_size, ==, size);
3453 DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp,
3454 uint64_t, size, zbookmark_phys_t *, zb);
3455 ARCSTAT_BUMP(arcstat_misses);
3456 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
3457 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
3458 data, metadata, misses);
3460 curthread->td_ru.ru_inblock++;
3463 if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) {
3465 * Read from the L2ARC if the following are true:
3466 * 1. The L2ARC vdev was previously cached.
3467 * 2. This buffer still has L2ARC metadata.
3468 * 3. This buffer isn't currently writing to the L2ARC.
3469 * 4. The L2ARC entry wasn't evicted, which may
3470 * also have invalidated the vdev.
3471 * 5. This isn't prefetch and l2arc_noprefetch is set.
3473 if (hdr->b_l2hdr != NULL &&
3474 !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) &&
3475 !(l2arc_noprefetch && HDR_PREFETCH(hdr))) {
3476 l2arc_read_callback_t *cb;
3478 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr);
3479 ARCSTAT_BUMP(arcstat_l2_hits);
3481 cb = kmem_zalloc(sizeof (l2arc_read_callback_t),
3483 cb->l2rcb_buf = buf;
3484 cb->l2rcb_spa = spa;
3487 cb->l2rcb_flags = zio_flags;
3488 cb->l2rcb_compress = b_compress;
3490 ASSERT(addr >= VDEV_LABEL_START_SIZE &&
3491 addr + size < vd->vdev_psize -
3492 VDEV_LABEL_END_SIZE);
3495 * l2arc read. The SCL_L2ARC lock will be
3496 * released by l2arc_read_done().
3497 * Issue a null zio if the underlying buffer
3498 * was squashed to zero size by compression.
3500 if (b_compress == ZIO_COMPRESS_EMPTY) {
3501 rzio = zio_null(pio, spa, vd,
3502 l2arc_read_done, cb,
3503 zio_flags | ZIO_FLAG_DONT_CACHE |
3505 ZIO_FLAG_DONT_PROPAGATE |
3506 ZIO_FLAG_DONT_RETRY);
3508 rzio = zio_read_phys(pio, vd, addr,
3509 b_asize, buf->b_data,
3511 l2arc_read_done, cb, priority,
3512 zio_flags | ZIO_FLAG_DONT_CACHE |
3514 ZIO_FLAG_DONT_PROPAGATE |
3515 ZIO_FLAG_DONT_RETRY, B_FALSE);
3517 DTRACE_PROBE2(l2arc__read, vdev_t *, vd,
3519 ARCSTAT_INCR(arcstat_l2_read_bytes, b_asize);
3521 if (*arc_flags & ARC_NOWAIT) {
3526 ASSERT(*arc_flags & ARC_WAIT);
3527 if (zio_wait(rzio) == 0)
3530 /* l2arc read error; goto zio_read() */
3532 DTRACE_PROBE1(l2arc__miss,
3533 arc_buf_hdr_t *, hdr);
3534 ARCSTAT_BUMP(arcstat_l2_misses);
3535 if (HDR_L2_WRITING(hdr))
3536 ARCSTAT_BUMP(arcstat_l2_rw_clash);
3537 spa_config_exit(spa, SCL_L2ARC, vd);
3541 spa_config_exit(spa, SCL_L2ARC, vd);
3542 if (l2arc_ndev != 0) {
3543 DTRACE_PROBE1(l2arc__miss,
3544 arc_buf_hdr_t *, hdr);
3545 ARCSTAT_BUMP(arcstat_l2_misses);
3549 rzio = zio_read(pio, spa, bp, buf->b_data, size,
3550 arc_read_done, buf, priority, zio_flags, zb);
3552 if (*arc_flags & ARC_WAIT)
3553 return (zio_wait(rzio));
3555 ASSERT(*arc_flags & ARC_NOWAIT);
3562 arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private)
3564 ASSERT(buf->b_hdr != NULL);
3565 ASSERT(buf->b_hdr->b_state != arc_anon);
3566 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL);
3567 ASSERT(buf->b_efunc == NULL);
3568 ASSERT(!HDR_BUF_AVAILABLE(buf->b_hdr));
3570 buf->b_efunc = func;
3571 buf->b_private = private;
3575 * Notify the arc that a block was freed, and thus will never be used again.
3578 arc_freed(spa_t *spa, const blkptr_t *bp)
3581 kmutex_t *hash_lock;
3582 uint64_t guid = spa_load_guid(spa);
3584 ASSERT(!BP_IS_EMBEDDED(bp));
3586 hdr = buf_hash_find(guid, bp, &hash_lock);
3589 if (HDR_BUF_AVAILABLE(hdr)) {
3590 arc_buf_t *buf = hdr->b_buf;
3591 add_reference(hdr, hash_lock, FTAG);
3592 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
3593 mutex_exit(hash_lock);
3595 arc_release(buf, FTAG);
3596 (void) arc_buf_remove_ref(buf, FTAG);
3598 mutex_exit(hash_lock);
3604 * Clear the user eviction callback set by arc_set_callback(), first calling
3605 * it if it exists. Because the presence of a callback keeps an arc_buf cached
3606 * clearing the callback may result in the arc_buf being destroyed. However,
3607 * it will not result in the *last* arc_buf being destroyed, hence the data
3608 * will remain cached in the ARC. We make a copy of the arc buffer here so
3609 * that we can process the callback without holding any locks.
3611 * It's possible that the callback is already in the process of being cleared
3612 * by another thread. In this case we can not clear the callback.
3614 * Returns B_TRUE if the callback was successfully called and cleared.
3617 arc_clear_callback(arc_buf_t *buf)
3620 kmutex_t *hash_lock;
3621 arc_evict_func_t *efunc = buf->b_efunc;
3622 void *private = buf->b_private;
3623 list_t *list, *evicted_list;
3624 kmutex_t *lock, *evicted_lock;
3626 mutex_enter(&buf->b_evict_lock);
3630 * We are in arc_do_user_evicts().
3632 ASSERT(buf->b_data == NULL);
3633 mutex_exit(&buf->b_evict_lock);
3635 } else if (buf->b_data == NULL) {
3637 * We are on the eviction list; process this buffer now
3638 * but let arc_do_user_evicts() do the reaping.
3640 buf->b_efunc = NULL;
3641 mutex_exit(&buf->b_evict_lock);
3642 VERIFY0(efunc(private));
3645 hash_lock = HDR_LOCK(hdr);
3646 mutex_enter(hash_lock);
3648 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
3650 ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt);
3651 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
3653 buf->b_efunc = NULL;
3654 buf->b_private = NULL;
3656 if (hdr->b_datacnt > 1) {
3657 mutex_exit(&buf->b_evict_lock);
3658 arc_buf_destroy(buf, FALSE, TRUE);
3660 ASSERT(buf == hdr->b_buf);
3661 hdr->b_flags |= ARC_BUF_AVAILABLE;
3662 mutex_exit(&buf->b_evict_lock);
3665 mutex_exit(hash_lock);
3666 VERIFY0(efunc(private));
3671 * Release this buffer from the cache, making it an anonymous buffer. This
3672 * must be done after a read and prior to modifying the buffer contents.
3673 * If the buffer has more than one reference, we must make
3674 * a new hdr for the buffer.
3677 arc_release(arc_buf_t *buf, void *tag)
3680 kmutex_t *hash_lock = NULL;
3681 l2arc_buf_hdr_t *l2hdr;
3685 * It would be nice to assert that if it's DMU metadata (level >
3686 * 0 || it's the dnode file), then it must be syncing context.
3687 * But we don't know that information at this level.
3690 mutex_enter(&buf->b_evict_lock);
3693 /* this buffer is not on any list */
3694 ASSERT(refcount_count(&hdr->b_refcnt) > 0);
3696 if (hdr->b_state == arc_anon) {
3697 /* this buffer is already released */
3698 ASSERT(buf->b_efunc == NULL);
3700 hash_lock = HDR_LOCK(hdr);
3701 mutex_enter(hash_lock);
3703 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
3706 l2hdr = hdr->b_l2hdr;
3708 mutex_enter(&l2arc_buflist_mtx);
3709 arc_buf_l2_cdata_free(hdr);
3710 hdr->b_l2hdr = NULL;
3711 list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
3713 buf_size = hdr->b_size;
3716 * Do we have more than one buf?
3718 if (hdr->b_datacnt > 1) {
3719 arc_buf_hdr_t *nhdr;
3721 uint64_t blksz = hdr->b_size;
3722 uint64_t spa = hdr->b_spa;
3723 arc_buf_contents_t type = hdr->b_type;
3724 uint32_t flags = hdr->b_flags;
3726 ASSERT(hdr->b_buf != buf || buf->b_next != NULL);
3728 * Pull the data off of this hdr and attach it to
3729 * a new anonymous hdr.
3731 (void) remove_reference(hdr, hash_lock, tag);
3733 while (*bufp != buf)
3734 bufp = &(*bufp)->b_next;
3735 *bufp = buf->b_next;
3738 ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size);
3739 atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size);
3740 if (refcount_is_zero(&hdr->b_refcnt)) {
3741 uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type];
3742 ASSERT3U(*size, >=, hdr->b_size);
3743 atomic_add_64(size, -hdr->b_size);
3747 * We're releasing a duplicate user data buffer, update
3748 * our statistics accordingly.
3750 if (hdr->b_type == ARC_BUFC_DATA) {
3751 ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers);
3752 ARCSTAT_INCR(arcstat_duplicate_buffers_size,
3755 hdr->b_datacnt -= 1;
3756 arc_cksum_verify(buf);
3758 arc_buf_unwatch(buf);
3759 #endif /* illumos */
3761 mutex_exit(hash_lock);
3763 nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
3764 nhdr->b_size = blksz;
3766 nhdr->b_type = type;
3768 nhdr->b_state = arc_anon;
3769 nhdr->b_arc_access = 0;
3770 nhdr->b_flags = flags & ARC_L2_WRITING;
3771 nhdr->b_l2hdr = NULL;
3772 nhdr->b_datacnt = 1;
3773 nhdr->b_freeze_cksum = NULL;
3774 (void) refcount_add(&nhdr->b_refcnt, tag);
3776 mutex_exit(&buf->b_evict_lock);
3777 atomic_add_64(&arc_anon->arcs_size, blksz);
3779 mutex_exit(&buf->b_evict_lock);
3780 ASSERT(refcount_count(&hdr->b_refcnt) == 1);
3781 ASSERT(!list_link_active(&hdr->b_arc_node));
3782 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
3783 if (hdr->b_state != arc_anon)
3784 arc_change_state(arc_anon, hdr, hash_lock);
3785 hdr->b_arc_access = 0;
3787 mutex_exit(hash_lock);
3789 buf_discard_identity(hdr);
3792 buf->b_efunc = NULL;
3793 buf->b_private = NULL;
3796 ARCSTAT_INCR(arcstat_l2_asize, -l2hdr->b_asize);
3797 vdev_space_update(l2hdr->b_dev->l2ad_vdev,
3798 -l2hdr->b_asize, 0, 0);
3799 trim_map_free(l2hdr->b_dev->l2ad_vdev, l2hdr->b_daddr,
3801 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
3802 ARCSTAT_INCR(arcstat_l2_size, -buf_size);
3803 mutex_exit(&l2arc_buflist_mtx);
3808 arc_released(arc_buf_t *buf)
3812 mutex_enter(&buf->b_evict_lock);
3813 released = (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon);
3814 mutex_exit(&buf->b_evict_lock);
3820 arc_referenced(arc_buf_t *buf)
3824 mutex_enter(&buf->b_evict_lock);
3825 referenced = (refcount_count(&buf->b_hdr->b_refcnt));
3826 mutex_exit(&buf->b_evict_lock);
3827 return (referenced);
3832 arc_write_ready(zio_t *zio)
3834 arc_write_callback_t *callback = zio->io_private;
3835 arc_buf_t *buf = callback->awcb_buf;
3836 arc_buf_hdr_t *hdr = buf->b_hdr;
3838 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt));
3839 callback->awcb_ready(zio, buf, callback->awcb_private);
3842 * If the IO is already in progress, then this is a re-write
3843 * attempt, so we need to thaw and re-compute the cksum.
3844 * It is the responsibility of the callback to handle the
3845 * accounting for any re-write attempt.
3847 if (HDR_IO_IN_PROGRESS(hdr)) {
3848 mutex_enter(&hdr->b_freeze_lock);
3849 if (hdr->b_freeze_cksum != NULL) {
3850 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
3851 hdr->b_freeze_cksum = NULL;
3853 mutex_exit(&hdr->b_freeze_lock);
3855 arc_cksum_compute(buf, B_FALSE);
3856 hdr->b_flags |= ARC_IO_IN_PROGRESS;
3860 * The SPA calls this callback for each physical write that happens on behalf
3861 * of a logical write. See the comment in dbuf_write_physdone() for details.
3864 arc_write_physdone(zio_t *zio)
3866 arc_write_callback_t *cb = zio->io_private;
3867 if (cb->awcb_physdone != NULL)
3868 cb->awcb_physdone(zio, cb->awcb_buf, cb->awcb_private);
3872 arc_write_done(zio_t *zio)
3874 arc_write_callback_t *callback = zio->io_private;
3875 arc_buf_t *buf = callback->awcb_buf;
3876 arc_buf_hdr_t *hdr = buf->b_hdr;
3878 ASSERT(hdr->b_acb == NULL);
3880 if (zio->io_error == 0) {
3881 if (BP_IS_HOLE(zio->io_bp) || BP_IS_EMBEDDED(zio->io_bp)) {
3882 buf_discard_identity(hdr);
3884 hdr->b_dva = *BP_IDENTITY(zio->io_bp);
3885 hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp);
3886 hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0];
3889 ASSERT(BUF_EMPTY(hdr));
3893 * If the block to be written was all-zero or compressed enough to be
3894 * embedded in the BP, no write was performed so there will be no
3895 * dva/birth/checksum. The buffer must therefore remain anonymous
3898 if (!BUF_EMPTY(hdr)) {
3899 arc_buf_hdr_t *exists;
3900 kmutex_t *hash_lock;
3902 ASSERT(zio->io_error == 0);
3904 arc_cksum_verify(buf);
3906 exists = buf_hash_insert(hdr, &hash_lock);
3909 * This can only happen if we overwrite for
3910 * sync-to-convergence, because we remove
3911 * buffers from the hash table when we arc_free().
3913 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
3914 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
3915 panic("bad overwrite, hdr=%p exists=%p",
3916 (void *)hdr, (void *)exists);
3917 ASSERT(refcount_is_zero(&exists->b_refcnt));
3918 arc_change_state(arc_anon, exists, hash_lock);
3919 mutex_exit(hash_lock);
3920 arc_hdr_destroy(exists);
3921 exists = buf_hash_insert(hdr, &hash_lock);
3922 ASSERT3P(exists, ==, NULL);
3923 } else if (zio->io_flags & ZIO_FLAG_NOPWRITE) {
3925 ASSERT(zio->io_prop.zp_nopwrite);
3926 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
3927 panic("bad nopwrite, hdr=%p exists=%p",
3928 (void *)hdr, (void *)exists);
3931 ASSERT(hdr->b_datacnt == 1);
3932 ASSERT(hdr->b_state == arc_anon);
3933 ASSERT(BP_GET_DEDUP(zio->io_bp));
3934 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
3937 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3938 /* if it's not anon, we are doing a scrub */
3939 if (!exists && hdr->b_state == arc_anon)
3940 arc_access(hdr, hash_lock);
3941 mutex_exit(hash_lock);
3943 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3946 ASSERT(!refcount_is_zero(&hdr->b_refcnt));
3947 callback->awcb_done(zio, buf, callback->awcb_private);
3949 kmem_free(callback, sizeof (arc_write_callback_t));
3953 arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
3954 blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, boolean_t l2arc_compress,
3955 const zio_prop_t *zp, arc_done_func_t *ready, arc_done_func_t *physdone,
3956 arc_done_func_t *done, void *private, zio_priority_t priority,
3957 int zio_flags, const zbookmark_phys_t *zb)
3959 arc_buf_hdr_t *hdr = buf->b_hdr;
3960 arc_write_callback_t *callback;
3963 ASSERT(ready != NULL);
3964 ASSERT(done != NULL);
3965 ASSERT(!HDR_IO_ERROR(hdr));
3966 ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0);
3967 ASSERT(hdr->b_acb == NULL);
3969 hdr->b_flags |= ARC_L2CACHE;
3971 hdr->b_flags |= ARC_L2COMPRESS;
3972 callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
3973 callback->awcb_ready = ready;
3974 callback->awcb_physdone = physdone;
3975 callback->awcb_done = done;
3976 callback->awcb_private = private;
3977 callback->awcb_buf = buf;
3979 zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, zp,
3980 arc_write_ready, arc_write_physdone, arc_write_done, callback,
3981 priority, zio_flags, zb);
3987 arc_memory_throttle(uint64_t reserve, uint64_t txg)
3990 uint64_t available_memory = ptob(freemem);
3991 static uint64_t page_load = 0;
3992 static uint64_t last_txg = 0;
3994 #if defined(__i386) || !defined(UMA_MD_SMALL_ALLOC)
3996 MIN(available_memory, ptob(vmem_size(heap_arena, VMEM_FREE)));
3999 if (freemem > (uint64_t)physmem * arc_lotsfree_percent / 100)
4002 if (txg > last_txg) {
4007 * If we are in pageout, we know that memory is already tight,
4008 * the arc is already going to be evicting, so we just want to
4009 * continue to let page writes occur as quickly as possible.
4011 if (curproc == pageproc) {
4012 if (page_load > MAX(ptob(minfree), available_memory) / 4)
4013 return (SET_ERROR(ERESTART));
4014 /* Note: reserve is inflated, so we deflate */
4015 page_load += reserve / 8;
4017 } else if (page_load > 0 && arc_reclaim_needed()) {
4018 /* memory is low, delay before restarting */
4019 ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
4020 return (SET_ERROR(EAGAIN));
4028 arc_tempreserve_clear(uint64_t reserve)
4030 atomic_add_64(&arc_tempreserve, -reserve);
4031 ASSERT((int64_t)arc_tempreserve >= 0);
4035 arc_tempreserve_space(uint64_t reserve, uint64_t txg)
4040 if (reserve > arc_c/4 && !arc_no_grow) {
4041 arc_c = MIN(arc_c_max, reserve * 4);
4042 DTRACE_PROBE1(arc__set_reserve, uint64_t, arc_c);
4044 if (reserve > arc_c)
4045 return (SET_ERROR(ENOMEM));
4048 * Don't count loaned bufs as in flight dirty data to prevent long
4049 * network delays from blocking transactions that are ready to be
4050 * assigned to a txg.
4052 anon_size = MAX((int64_t)(arc_anon->arcs_size - arc_loaned_bytes), 0);
4055 * Writes will, almost always, require additional memory allocations
4056 * in order to compress/encrypt/etc the data. We therefore need to
4057 * make sure that there is sufficient available memory for this.
4059 error = arc_memory_throttle(reserve, txg);
4064 * Throttle writes when the amount of dirty data in the cache
4065 * gets too large. We try to keep the cache less than half full
4066 * of dirty blocks so that our sync times don't grow too large.
4067 * Note: if two requests come in concurrently, we might let them
4068 * both succeed, when one of them should fail. Not a huge deal.
4071 if (reserve + arc_tempreserve + anon_size > arc_c / 2 &&
4072 anon_size > arc_c / 4) {
4073 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
4074 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
4075 arc_tempreserve>>10,
4076 arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10,
4077 arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10,
4078 reserve>>10, arc_c>>10);
4079 return (SET_ERROR(ERESTART));
4081 atomic_add_64(&arc_tempreserve, reserve);
4085 static kmutex_t arc_lowmem_lock;
4087 static eventhandler_tag arc_event_lowmem = NULL;
4090 arc_lowmem(void *arg __unused, int howto __unused)
4093 /* Serialize access via arc_lowmem_lock. */
4094 mutex_enter(&arc_lowmem_lock);
4095 mutex_enter(&arc_reclaim_thr_lock);
4097 DTRACE_PROBE(arc__needfree);
4098 cv_signal(&arc_reclaim_thr_cv);
4101 * It is unsafe to block here in arbitrary threads, because we can come
4102 * here from ARC itself and may hold ARC locks and thus risk a deadlock
4103 * with ARC reclaim thread.
4105 if (curproc == pageproc) {
4107 msleep(&needfree, &arc_reclaim_thr_lock, 0, "zfs:lowmem", 0);
4109 mutex_exit(&arc_reclaim_thr_lock);
4110 mutex_exit(&arc_lowmem_lock);
4117 int i, prefetch_tunable_set = 0;
4119 mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL);
4120 cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL);
4121 mutex_init(&arc_lowmem_lock, NULL, MUTEX_DEFAULT, NULL);
4123 /* Convert seconds to clock ticks */
4124 arc_min_prefetch_lifespan = 1 * hz;
4126 /* Start out with 1/8 of all memory */
4127 arc_c = kmem_size() / 8;
4132 * On architectures where the physical memory can be larger
4133 * than the addressable space (intel in 32-bit mode), we may
4134 * need to limit the cache to 1/8 of VM size.
4136 arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8);
4139 /* set min cache to 1/32 of all memory, or 16MB, whichever is more */
4140 arc_c_min = MAX(arc_c / 4, 64<<18);
4141 /* set max to 1/2 of all memory, or all but 1GB, whichever is more */
4142 if (arc_c * 8 >= 1<<30)
4143 arc_c_max = (arc_c * 8) - (1<<30);
4145 arc_c_max = arc_c_min;
4146 arc_c_max = MAX(arc_c * 5, arc_c_max);
4150 * Allow the tunables to override our calculations if they are
4151 * reasonable (ie. over 16MB)
4153 if (zfs_arc_max > 64<<18 && zfs_arc_max < kmem_size())
4154 arc_c_max = zfs_arc_max;
4155 if (zfs_arc_min > 64<<18 && zfs_arc_min <= arc_c_max)
4156 arc_c_min = zfs_arc_min;
4160 arc_p = (arc_c >> 1);
4162 /* limit meta-data to 1/4 of the arc capacity */
4163 arc_meta_limit = arc_c_max / 4;
4165 /* Allow the tunable to override if it is reasonable */
4166 if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max)
4167 arc_meta_limit = zfs_arc_meta_limit;
4169 if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0)
4170 arc_c_min = arc_meta_limit / 2;
4172 if (zfs_arc_grow_retry > 0)
4173 arc_grow_retry = zfs_arc_grow_retry;
4175 if (zfs_arc_shrink_shift > 0)
4176 arc_shrink_shift = zfs_arc_shrink_shift;
4178 if (zfs_arc_p_min_shift > 0)
4179 arc_p_min_shift = zfs_arc_p_min_shift;
4181 /* if kmem_flags are set, lets try to use less memory */
4182 if (kmem_debugging())
4184 if (arc_c < arc_c_min)
4187 zfs_arc_min = arc_c_min;
4188 zfs_arc_max = arc_c_max;
4190 arc_anon = &ARC_anon;
4192 arc_mru_ghost = &ARC_mru_ghost;
4194 arc_mfu_ghost = &ARC_mfu_ghost;
4195 arc_l2c_only = &ARC_l2c_only;
4198 for (i = 0; i < ARC_BUFC_NUMLISTS; i++) {
4199 mutex_init(&arc_anon->arcs_locks[i].arcs_lock,
4200 NULL, MUTEX_DEFAULT, NULL);
4201 mutex_init(&arc_mru->arcs_locks[i].arcs_lock,
4202 NULL, MUTEX_DEFAULT, NULL);
4203 mutex_init(&arc_mru_ghost->arcs_locks[i].arcs_lock,
4204 NULL, MUTEX_DEFAULT, NULL);
4205 mutex_init(&arc_mfu->arcs_locks[i].arcs_lock,
4206 NULL, MUTEX_DEFAULT, NULL);
4207 mutex_init(&arc_mfu_ghost->arcs_locks[i].arcs_lock,
4208 NULL, MUTEX_DEFAULT, NULL);
4209 mutex_init(&arc_l2c_only->arcs_locks[i].arcs_lock,
4210 NULL, MUTEX_DEFAULT, NULL);
4212 list_create(&arc_mru->arcs_lists[i],
4213 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
4214 list_create(&arc_mru_ghost->arcs_lists[i],
4215 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
4216 list_create(&arc_mfu->arcs_lists[i],
4217 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
4218 list_create(&arc_mfu_ghost->arcs_lists[i],
4219 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
4220 list_create(&arc_mfu_ghost->arcs_lists[i],
4221 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
4222 list_create(&arc_l2c_only->arcs_lists[i],
4223 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
4228 arc_thread_exit = 0;
4229 arc_eviction_list = NULL;
4230 mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL);
4231 bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t));
4233 arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED,
4234 sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
4236 if (arc_ksp != NULL) {
4237 arc_ksp->ks_data = &arc_stats;
4238 kstat_install(arc_ksp);
4241 (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0,
4242 TS_RUN, minclsyspri);
4245 arc_event_lowmem = EVENTHANDLER_REGISTER(vm_lowmem, arc_lowmem, NULL,
4246 EVENTHANDLER_PRI_FIRST);
4253 * Calculate maximum amount of dirty data per pool.
4255 * If it has been set by /etc/system, take that.
4256 * Otherwise, use a percentage of physical memory defined by
4257 * zfs_dirty_data_max_percent (default 10%) with a cap at
4258 * zfs_dirty_data_max_max (default 4GB).
4260 if (zfs_dirty_data_max == 0) {
4261 zfs_dirty_data_max = ptob(physmem) *
4262 zfs_dirty_data_max_percent / 100;
4263 zfs_dirty_data_max = MIN(zfs_dirty_data_max,
4264 zfs_dirty_data_max_max);
4268 if (TUNABLE_INT_FETCH("vfs.zfs.prefetch_disable", &zfs_prefetch_disable))
4269 prefetch_tunable_set = 1;
4272 if (prefetch_tunable_set == 0) {
4273 printf("ZFS NOTICE: Prefetch is disabled by default on i386 "
4275 printf(" add \"vfs.zfs.prefetch_disable=0\" "
4276 "to /boot/loader.conf.\n");
4277 zfs_prefetch_disable = 1;
4280 if ((((uint64_t)physmem * PAGESIZE) < (1ULL << 32)) &&
4281 prefetch_tunable_set == 0) {
4282 printf("ZFS NOTICE: Prefetch is disabled by default if less "
4283 "than 4GB of RAM is present;\n"
4284 " to enable, add \"vfs.zfs.prefetch_disable=0\" "
4285 "to /boot/loader.conf.\n");
4286 zfs_prefetch_disable = 1;
4289 /* Warn about ZFS memory and address space requirements. */
4290 if (((uint64_t)physmem * PAGESIZE) < (256 + 128 + 64) * (1 << 20)) {
4291 printf("ZFS WARNING: Recommended minimum RAM size is 512MB; "
4292 "expect unstable behavior.\n");
4294 if (kmem_size() < 512 * (1 << 20)) {
4295 printf("ZFS WARNING: Recommended minimum kmem_size is 512MB; "
4296 "expect unstable behavior.\n");
4297 printf(" Consider tuning vm.kmem_size and "
4298 "vm.kmem_size_max\n");
4299 printf(" in /boot/loader.conf.\n");
4309 mutex_enter(&arc_reclaim_thr_lock);
4310 arc_thread_exit = 1;
4311 cv_signal(&arc_reclaim_thr_cv);
4312 while (arc_thread_exit != 0)
4313 cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock);
4314 mutex_exit(&arc_reclaim_thr_lock);
4320 if (arc_ksp != NULL) {
4321 kstat_delete(arc_ksp);
4325 mutex_destroy(&arc_eviction_mtx);
4326 mutex_destroy(&arc_reclaim_thr_lock);
4327 cv_destroy(&arc_reclaim_thr_cv);
4329 for (i = 0; i < ARC_BUFC_NUMLISTS; i++) {
4330 list_destroy(&arc_mru->arcs_lists[i]);
4331 list_destroy(&arc_mru_ghost->arcs_lists[i]);
4332 list_destroy(&arc_mfu->arcs_lists[i]);
4333 list_destroy(&arc_mfu_ghost->arcs_lists[i]);
4334 list_destroy(&arc_l2c_only->arcs_lists[i]);
4336 mutex_destroy(&arc_anon->arcs_locks[i].arcs_lock);
4337 mutex_destroy(&arc_mru->arcs_locks[i].arcs_lock);
4338 mutex_destroy(&arc_mru_ghost->arcs_locks[i].arcs_lock);
4339 mutex_destroy(&arc_mfu->arcs_locks[i].arcs_lock);
4340 mutex_destroy(&arc_mfu_ghost->arcs_locks[i].arcs_lock);
4341 mutex_destroy(&arc_l2c_only->arcs_locks[i].arcs_lock);
4346 ASSERT(arc_loaned_bytes == 0);
4348 mutex_destroy(&arc_lowmem_lock);
4350 if (arc_event_lowmem != NULL)
4351 EVENTHANDLER_DEREGISTER(vm_lowmem, arc_event_lowmem);
4358 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
4359 * It uses dedicated storage devices to hold cached data, which are populated
4360 * using large infrequent writes. The main role of this cache is to boost
4361 * the performance of random read workloads. The intended L2ARC devices
4362 * include short-stroked disks, solid state disks, and other media with
4363 * substantially faster read latency than disk.
4365 * +-----------------------+
4367 * +-----------------------+
4370 * l2arc_feed_thread() arc_read()
4374 * +---------------+ |
4376 * +---------------+ |
4381 * +-------+ +-------+
4383 * | cache | | cache |
4384 * +-------+ +-------+
4385 * +=========+ .-----.
4386 * : L2ARC : |-_____-|
4387 * : devices : | Disks |
4388 * +=========+ `-_____-'
4390 * Read requests are satisfied from the following sources, in order:
4393 * 2) vdev cache of L2ARC devices
4395 * 4) vdev cache of disks
4398 * Some L2ARC device types exhibit extremely slow write performance.
4399 * To accommodate for this there are some significant differences between
4400 * the L2ARC and traditional cache design:
4402 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from
4403 * the ARC behave as usual, freeing buffers and placing headers on ghost
4404 * lists. The ARC does not send buffers to the L2ARC during eviction as
4405 * this would add inflated write latencies for all ARC memory pressure.
4407 * 2. The L2ARC attempts to cache data from the ARC before it is evicted.
4408 * It does this by periodically scanning buffers from the eviction-end of
4409 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
4410 * not already there. It scans until a headroom of buffers is satisfied,
4411 * which itself is a buffer for ARC eviction. If a compressible buffer is
4412 * found during scanning and selected for writing to an L2ARC device, we
4413 * temporarily boost scanning headroom during the next scan cycle to make
4414 * sure we adapt to compression effects (which might significantly reduce
4415 * the data volume we write to L2ARC). The thread that does this is
4416 * l2arc_feed_thread(), illustrated below; example sizes are included to
4417 * provide a better sense of ratio than this diagram:
4420 * +---------------------+----------+
4421 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC
4422 * +---------------------+----------+ | o L2ARC eligible
4423 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer
4424 * +---------------------+----------+ |
4425 * 15.9 Gbytes ^ 32 Mbytes |
4427 * l2arc_feed_thread()
4429 * l2arc write hand <--[oooo]--'
4433 * +==============================+
4434 * L2ARC dev |####|#|###|###| |####| ... |
4435 * +==============================+
4438 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of
4439 * evicted, then the L2ARC has cached a buffer much sooner than it probably
4440 * needed to, potentially wasting L2ARC device bandwidth and storage. It is
4441 * safe to say that this is an uncommon case, since buffers at the end of
4442 * the ARC lists have moved there due to inactivity.
4444 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
4445 * then the L2ARC simply misses copying some buffers. This serves as a
4446 * pressure valve to prevent heavy read workloads from both stalling the ARC
4447 * with waits and clogging the L2ARC with writes. This also helps prevent
4448 * the potential for the L2ARC to churn if it attempts to cache content too
4449 * quickly, such as during backups of the entire pool.
4451 * 5. After system boot and before the ARC has filled main memory, there are
4452 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
4453 * lists can remain mostly static. Instead of searching from tail of these
4454 * lists as pictured, the l2arc_feed_thread() will search from the list heads
4455 * for eligible buffers, greatly increasing its chance of finding them.
4457 * The L2ARC device write speed is also boosted during this time so that
4458 * the L2ARC warms up faster. Since there have been no ARC evictions yet,
4459 * there are no L2ARC reads, and no fear of degrading read performance
4460 * through increased writes.
4462 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
4463 * the vdev queue can aggregate them into larger and fewer writes. Each
4464 * device is written to in a rotor fashion, sweeping writes through
4465 * available space then repeating.
4467 * 7. The L2ARC does not store dirty content. It never needs to flush
4468 * write buffers back to disk based storage.
4470 * 8. If an ARC buffer is written (and dirtied) which also exists in the
4471 * L2ARC, the now stale L2ARC buffer is immediately dropped.
4473 * The performance of the L2ARC can be tweaked by a number of tunables, which
4474 * may be necessary for different workloads:
4476 * l2arc_write_max max write bytes per interval
4477 * l2arc_write_boost extra write bytes during device warmup
4478 * l2arc_noprefetch skip caching prefetched buffers
4479 * l2arc_headroom number of max device writes to precache
4480 * l2arc_headroom_boost when we find compressed buffers during ARC
4481 * scanning, we multiply headroom by this
4482 * percentage factor for the next scan cycle,
4483 * since more compressed buffers are likely to
4485 * l2arc_feed_secs seconds between L2ARC writing
4487 * Tunables may be removed or added as future performance improvements are
4488 * integrated, and also may become zpool properties.
4490 * There are three key functions that control how the L2ARC warms up:
4492 * l2arc_write_eligible() check if a buffer is eligible to cache
4493 * l2arc_write_size() calculate how much to write
4494 * l2arc_write_interval() calculate sleep delay between writes
4496 * These three functions determine what to write, how much, and how quickly
4501 l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab)
4504 * A buffer is *not* eligible for the L2ARC if it:
4505 * 1. belongs to a different spa.
4506 * 2. is already cached on the L2ARC.
4507 * 3. has an I/O in progress (it may be an incomplete read).
4508 * 4. is flagged not eligible (zfs property).
4510 if (ab->b_spa != spa_guid) {
4511 ARCSTAT_BUMP(arcstat_l2_write_spa_mismatch);
4514 if (ab->b_l2hdr != NULL) {
4515 ARCSTAT_BUMP(arcstat_l2_write_in_l2);
4518 if (HDR_IO_IN_PROGRESS(ab)) {
4519 ARCSTAT_BUMP(arcstat_l2_write_hdr_io_in_progress);
4522 if (!HDR_L2CACHE(ab)) {
4523 ARCSTAT_BUMP(arcstat_l2_write_not_cacheable);
4531 l2arc_write_size(void)
4536 * Make sure our globals have meaningful values in case the user
4539 size = l2arc_write_max;
4541 cmn_err(CE_NOTE, "Bad value for l2arc_write_max, value must "
4542 "be greater than zero, resetting it to the default (%d)",
4544 size = l2arc_write_max = L2ARC_WRITE_SIZE;
4547 if (arc_warm == B_FALSE)
4548 size += l2arc_write_boost;
4555 l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote)
4557 clock_t interval, next, now;
4560 * If the ARC lists are busy, increase our write rate; if the
4561 * lists are stale, idle back. This is achieved by checking
4562 * how much we previously wrote - if it was more than half of
4563 * what we wanted, schedule the next write much sooner.
4565 if (l2arc_feed_again && wrote > (wanted / 2))
4566 interval = (hz * l2arc_feed_min_ms) / 1000;
4568 interval = hz * l2arc_feed_secs;
4570 now = ddi_get_lbolt();
4571 next = MAX(now, MIN(now + interval, began + interval));
4577 l2arc_hdr_stat_add(void)
4579 ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE);
4580 ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE);
4584 l2arc_hdr_stat_remove(void)
4586 ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE));
4587 ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE);
4591 * Cycle through L2ARC devices. This is how L2ARC load balances.
4592 * If a device is returned, this also returns holding the spa config lock.
4594 static l2arc_dev_t *
4595 l2arc_dev_get_next(void)
4597 l2arc_dev_t *first, *next = NULL;
4600 * Lock out the removal of spas (spa_namespace_lock), then removal
4601 * of cache devices (l2arc_dev_mtx). Once a device has been selected,
4602 * both locks will be dropped and a spa config lock held instead.
4604 mutex_enter(&spa_namespace_lock);
4605 mutex_enter(&l2arc_dev_mtx);
4607 /* if there are no vdevs, there is nothing to do */
4608 if (l2arc_ndev == 0)
4612 next = l2arc_dev_last;
4614 /* loop around the list looking for a non-faulted vdev */
4616 next = list_head(l2arc_dev_list);
4618 next = list_next(l2arc_dev_list, next);
4620 next = list_head(l2arc_dev_list);
4623 /* if we have come back to the start, bail out */
4626 else if (next == first)
4629 } while (vdev_is_dead(next->l2ad_vdev));
4631 /* if we were unable to find any usable vdevs, return NULL */
4632 if (vdev_is_dead(next->l2ad_vdev))
4635 l2arc_dev_last = next;
4638 mutex_exit(&l2arc_dev_mtx);
4641 * Grab the config lock to prevent the 'next' device from being
4642 * removed while we are writing to it.
4645 spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER);
4646 mutex_exit(&spa_namespace_lock);
4652 * Free buffers that were tagged for destruction.
4655 l2arc_do_free_on_write()
4658 l2arc_data_free_t *df, *df_prev;
4660 mutex_enter(&l2arc_free_on_write_mtx);
4661 buflist = l2arc_free_on_write;
4663 for (df = list_tail(buflist); df; df = df_prev) {
4664 df_prev = list_prev(buflist, df);
4665 ASSERT(df->l2df_data != NULL);
4666 ASSERT(df->l2df_func != NULL);
4667 df->l2df_func(df->l2df_data, df->l2df_size);
4668 list_remove(buflist, df);
4669 kmem_free(df, sizeof (l2arc_data_free_t));
4672 mutex_exit(&l2arc_free_on_write_mtx);
4676 * A write to a cache device has completed. Update all headers to allow
4677 * reads from these buffers to begin.
4680 l2arc_write_done(zio_t *zio)
4682 l2arc_write_callback_t *cb;
4685 arc_buf_hdr_t *head, *ab, *ab_prev;
4686 l2arc_buf_hdr_t *abl2;
4687 kmutex_t *hash_lock;
4688 int64_t bytes_dropped = 0;
4690 cb = zio->io_private;
4692 dev = cb->l2wcb_dev;
4693 ASSERT(dev != NULL);
4694 head = cb->l2wcb_head;
4695 ASSERT(head != NULL);
4696 buflist = dev->l2ad_buflist;
4697 ASSERT(buflist != NULL);
4698 DTRACE_PROBE2(l2arc__iodone, zio_t *, zio,
4699 l2arc_write_callback_t *, cb);
4701 if (zio->io_error != 0)
4702 ARCSTAT_BUMP(arcstat_l2_writes_error);
4704 mutex_enter(&l2arc_buflist_mtx);
4707 * All writes completed, or an error was hit.
4709 for (ab = list_prev(buflist, head); ab; ab = ab_prev) {
4710 ab_prev = list_prev(buflist, ab);
4714 * Release the temporary compressed buffer as soon as possible.
4716 if (abl2->b_compress != ZIO_COMPRESS_OFF)
4717 l2arc_release_cdata_buf(ab);
4719 hash_lock = HDR_LOCK(ab);
4720 if (!mutex_tryenter(hash_lock)) {
4722 * This buffer misses out. It may be in a stage
4723 * of eviction. Its ARC_L2_WRITING flag will be
4724 * left set, denying reads to this buffer.
4726 ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss);
4730 if (zio->io_error != 0) {
4732 * Error - drop L2ARC entry.
4734 list_remove(buflist, ab);
4735 ARCSTAT_INCR(arcstat_l2_asize, -abl2->b_asize);
4736 bytes_dropped += abl2->b_asize;
4738 trim_map_free(abl2->b_dev->l2ad_vdev, abl2->b_daddr,
4740 kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
4741 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
4745 * Allow ARC to begin reads to this L2ARC entry.
4747 ab->b_flags &= ~ARC_L2_WRITING;
4749 mutex_exit(hash_lock);
4752 atomic_inc_64(&l2arc_writes_done);
4753 list_remove(buflist, head);
4754 kmem_cache_free(hdr_cache, head);
4755 mutex_exit(&l2arc_buflist_mtx);
4757 vdev_space_update(dev->l2ad_vdev, -bytes_dropped, 0, 0);
4759 l2arc_do_free_on_write();
4761 kmem_free(cb, sizeof (l2arc_write_callback_t));
4765 * A read to a cache device completed. Validate buffer contents before
4766 * handing over to the regular ARC routines.
4769 l2arc_read_done(zio_t *zio)
4771 l2arc_read_callback_t *cb;
4774 kmutex_t *hash_lock;
4777 ASSERT(zio->io_vd != NULL);
4778 ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE);
4780 spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd);
4782 cb = zio->io_private;
4784 buf = cb->l2rcb_buf;
4785 ASSERT(buf != NULL);
4787 hash_lock = HDR_LOCK(buf->b_hdr);
4788 mutex_enter(hash_lock);
4790 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
4793 * If the buffer was compressed, decompress it first.
4795 if (cb->l2rcb_compress != ZIO_COMPRESS_OFF)
4796 l2arc_decompress_zio(zio, hdr, cb->l2rcb_compress);
4797 ASSERT(zio->io_data != NULL);
4800 * Check this survived the L2ARC journey.
4802 equal = arc_cksum_equal(buf);
4803 if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) {
4804 mutex_exit(hash_lock);
4805 zio->io_private = buf;
4806 zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */
4807 zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */
4810 mutex_exit(hash_lock);
4812 * Buffer didn't survive caching. Increment stats and
4813 * reissue to the original storage device.
4815 if (zio->io_error != 0) {
4816 ARCSTAT_BUMP(arcstat_l2_io_error);
4818 zio->io_error = SET_ERROR(EIO);
4821 ARCSTAT_BUMP(arcstat_l2_cksum_bad);
4824 * If there's no waiter, issue an async i/o to the primary
4825 * storage now. If there *is* a waiter, the caller must
4826 * issue the i/o in a context where it's OK to block.
4828 if (zio->io_waiter == NULL) {
4829 zio_t *pio = zio_unique_parent(zio);
4831 ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL);
4833 zio_nowait(zio_read(pio, cb->l2rcb_spa, &cb->l2rcb_bp,
4834 buf->b_data, zio->io_size, arc_read_done, buf,
4835 zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb));
4839 kmem_free(cb, sizeof (l2arc_read_callback_t));
4843 * This is the list priority from which the L2ARC will search for pages to
4844 * cache. This is used within loops (0..3) to cycle through lists in the
4845 * desired order. This order can have a significant effect on cache
4848 * Currently the metadata lists are hit first, MFU then MRU, followed by
4849 * the data lists. This function returns a locked list, and also returns
4853 l2arc_list_locked(int list_num, kmutex_t **lock)
4855 list_t *list = NULL;
4858 ASSERT(list_num >= 0 && list_num < 2 * ARC_BUFC_NUMLISTS);
4860 if (list_num < ARC_BUFC_NUMMETADATALISTS) {
4862 list = &arc_mfu->arcs_lists[idx];
4863 *lock = ARCS_LOCK(arc_mfu, idx);
4864 } else if (list_num < ARC_BUFC_NUMMETADATALISTS * 2) {
4865 idx = list_num - ARC_BUFC_NUMMETADATALISTS;
4866 list = &arc_mru->arcs_lists[idx];
4867 *lock = ARCS_LOCK(arc_mru, idx);
4868 } else if (list_num < (ARC_BUFC_NUMMETADATALISTS * 2 +
4869 ARC_BUFC_NUMDATALISTS)) {
4870 idx = list_num - ARC_BUFC_NUMMETADATALISTS;
4871 list = &arc_mfu->arcs_lists[idx];
4872 *lock = ARCS_LOCK(arc_mfu, idx);
4874 idx = list_num - ARC_BUFC_NUMLISTS;
4875 list = &arc_mru->arcs_lists[idx];
4876 *lock = ARCS_LOCK(arc_mru, idx);
4879 ASSERT(!(MUTEX_HELD(*lock)));
4885 * Evict buffers from the device write hand to the distance specified in
4886 * bytes. This distance may span populated buffers, it may span nothing.
4887 * This is clearing a region on the L2ARC device ready for writing.
4888 * If the 'all' boolean is set, every buffer is evicted.
4891 l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all)
4894 l2arc_buf_hdr_t *abl2;
4895 arc_buf_hdr_t *ab, *ab_prev;
4896 kmutex_t *hash_lock;
4898 int64_t bytes_evicted = 0;
4900 buflist = dev->l2ad_buflist;
4902 if (buflist == NULL)
4905 if (!all && dev->l2ad_first) {
4907 * This is the first sweep through the device. There is
4913 if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) {
4915 * When nearing the end of the device, evict to the end
4916 * before the device write hand jumps to the start.
4918 taddr = dev->l2ad_end;
4920 taddr = dev->l2ad_hand + distance;
4922 DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist,
4923 uint64_t, taddr, boolean_t, all);
4926 mutex_enter(&l2arc_buflist_mtx);
4927 for (ab = list_tail(buflist); ab; ab = ab_prev) {
4928 ab_prev = list_prev(buflist, ab);
4930 hash_lock = HDR_LOCK(ab);
4931 if (!mutex_tryenter(hash_lock)) {
4933 * Missed the hash lock. Retry.
4935 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry);
4936 mutex_exit(&l2arc_buflist_mtx);
4937 mutex_enter(hash_lock);
4938 mutex_exit(hash_lock);
4942 if (HDR_L2_WRITE_HEAD(ab)) {
4944 * We hit a write head node. Leave it for
4945 * l2arc_write_done().
4947 list_remove(buflist, ab);
4948 mutex_exit(hash_lock);
4952 if (!all && ab->b_l2hdr != NULL &&
4953 (ab->b_l2hdr->b_daddr > taddr ||
4954 ab->b_l2hdr->b_daddr < dev->l2ad_hand)) {
4956 * We've evicted to the target address,
4957 * or the end of the device.
4959 mutex_exit(hash_lock);
4963 if (HDR_FREE_IN_PROGRESS(ab)) {
4965 * Already on the path to destruction.
4967 mutex_exit(hash_lock);
4971 if (ab->b_state == arc_l2c_only) {
4972 ASSERT(!HDR_L2_READING(ab));
4974 * This doesn't exist in the ARC. Destroy.
4975 * arc_hdr_destroy() will call list_remove()
4976 * and decrement arcstat_l2_size.
4978 arc_change_state(arc_anon, ab, hash_lock);
4979 arc_hdr_destroy(ab);
4982 * Invalidate issued or about to be issued
4983 * reads, since we may be about to write
4984 * over this location.
4986 if (HDR_L2_READING(ab)) {
4987 ARCSTAT_BUMP(arcstat_l2_evict_reading);
4988 ab->b_flags |= ARC_L2_EVICTED;
4992 * Tell ARC this no longer exists in L2ARC.
4994 if (ab->b_l2hdr != NULL) {
4996 ARCSTAT_INCR(arcstat_l2_asize, -abl2->b_asize);
4997 bytes_evicted += abl2->b_asize;
5000 * We are destroying l2hdr, so ensure that
5001 * its compressed buffer, if any, is not leaked.
5003 ASSERT(abl2->b_tmp_cdata == NULL);
5004 kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
5005 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
5007 list_remove(buflist, ab);
5010 * This may have been leftover after a
5013 ab->b_flags &= ~ARC_L2_WRITING;
5015 mutex_exit(hash_lock);
5017 mutex_exit(&l2arc_buflist_mtx);
5019 vdev_space_update(dev->l2ad_vdev, -bytes_evicted, 0, 0);
5020 dev->l2ad_evict = taddr;
5024 * Find and write ARC buffers to the L2ARC device.
5026 * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid
5027 * for reading until they have completed writing.
5028 * The headroom_boost is an in-out parameter used to maintain headroom boost
5029 * state between calls to this function.
5031 * Returns the number of bytes actually written (which may be smaller than
5032 * the delta by which the device hand has changed due to alignment).
5035 l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
5036 boolean_t *headroom_boost)
5038 arc_buf_hdr_t *ab, *ab_prev, *head;
5040 uint64_t write_asize, write_psize, write_sz, headroom,
5043 kmutex_t *list_lock;
5045 l2arc_write_callback_t *cb;
5047 uint64_t guid = spa_load_guid(spa);
5048 const boolean_t do_headroom_boost = *headroom_boost;
5051 ASSERT(dev->l2ad_vdev != NULL);
5053 /* Lower the flag now, we might want to raise it again later. */
5054 *headroom_boost = B_FALSE;
5057 write_sz = write_asize = write_psize = 0;
5059 head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
5060 head->b_flags |= ARC_L2_WRITE_HEAD;
5062 ARCSTAT_BUMP(arcstat_l2_write_buffer_iter);
5064 * We will want to try to compress buffers that are at least 2x the
5065 * device sector size.
5067 buf_compress_minsz = 2 << dev->l2ad_vdev->vdev_ashift;
5070 * Copy buffers for L2ARC writing.
5072 mutex_enter(&l2arc_buflist_mtx);
5073 for (try = 0; try < 2 * ARC_BUFC_NUMLISTS; try++) {
5074 uint64_t passed_sz = 0;
5076 list = l2arc_list_locked(try, &list_lock);
5077 ARCSTAT_BUMP(arcstat_l2_write_buffer_list_iter);
5080 * L2ARC fast warmup.
5082 * Until the ARC is warm and starts to evict, read from the
5083 * head of the ARC lists rather than the tail.
5085 if (arc_warm == B_FALSE)
5086 ab = list_head(list);
5088 ab = list_tail(list);
5090 ARCSTAT_BUMP(arcstat_l2_write_buffer_list_null_iter);
5092 headroom = target_sz * l2arc_headroom * 2 / ARC_BUFC_NUMLISTS;
5093 if (do_headroom_boost)
5094 headroom = (headroom * l2arc_headroom_boost) / 100;
5096 for (; ab; ab = ab_prev) {
5097 l2arc_buf_hdr_t *l2hdr;
5098 kmutex_t *hash_lock;
5101 if (arc_warm == B_FALSE)
5102 ab_prev = list_next(list, ab);
5104 ab_prev = list_prev(list, ab);
5105 ARCSTAT_INCR(arcstat_l2_write_buffer_bytes_scanned, ab->b_size);
5107 hash_lock = HDR_LOCK(ab);
5108 if (!mutex_tryenter(hash_lock)) {
5109 ARCSTAT_BUMP(arcstat_l2_write_trylock_fail);
5111 * Skip this buffer rather than waiting.
5116 passed_sz += ab->b_size;
5117 if (passed_sz > headroom) {
5121 mutex_exit(hash_lock);
5122 ARCSTAT_BUMP(arcstat_l2_write_passed_headroom);
5126 if (!l2arc_write_eligible(guid, ab)) {
5127 mutex_exit(hash_lock);
5131 if ((write_sz + ab->b_size) > target_sz) {
5133 mutex_exit(hash_lock);
5134 ARCSTAT_BUMP(arcstat_l2_write_full);
5140 * Insert a dummy header on the buflist so
5141 * l2arc_write_done() can find where the
5142 * write buffers begin without searching.
5144 list_insert_head(dev->l2ad_buflist, head);
5147 sizeof (l2arc_write_callback_t), KM_SLEEP);
5148 cb->l2wcb_dev = dev;
5149 cb->l2wcb_head = head;
5150 pio = zio_root(spa, l2arc_write_done, cb,
5152 ARCSTAT_BUMP(arcstat_l2_write_pios);
5156 * Create and add a new L2ARC header.
5158 l2hdr = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP);
5160 ab->b_flags |= ARC_L2_WRITING;
5163 * Temporarily stash the data buffer in b_tmp_cdata.
5164 * The subsequent write step will pick it up from
5165 * there. This is because can't access ab->b_buf
5166 * without holding the hash_lock, which we in turn
5167 * can't access without holding the ARC list locks
5168 * (which we want to avoid during compression/writing).
5170 l2hdr->b_compress = ZIO_COMPRESS_OFF;
5171 l2hdr->b_asize = ab->b_size;
5172 l2hdr->b_tmp_cdata = ab->b_buf->b_data;
5174 buf_sz = ab->b_size;
5175 ab->b_l2hdr = l2hdr;
5177 list_insert_head(dev->l2ad_buflist, ab);
5180 * Compute and store the buffer cksum before
5181 * writing. On debug the cksum is verified first.
5183 arc_cksum_verify(ab->b_buf);
5184 arc_cksum_compute(ab->b_buf, B_TRUE);
5186 mutex_exit(hash_lock);
5191 mutex_exit(list_lock);
5197 /* No buffers selected for writing? */
5200 mutex_exit(&l2arc_buflist_mtx);
5201 kmem_cache_free(hdr_cache, head);
5206 * Now start writing the buffers. We're starting at the write head
5207 * and work backwards, retracing the course of the buffer selector
5210 for (ab = list_prev(dev->l2ad_buflist, head); ab;
5211 ab = list_prev(dev->l2ad_buflist, ab)) {
5212 l2arc_buf_hdr_t *l2hdr;
5216 * We shouldn't need to lock the buffer here, since we flagged
5217 * it as ARC_L2_WRITING in the previous step, but we must take
5218 * care to only access its L2 cache parameters. In particular,
5219 * ab->b_buf may be invalid by now due to ARC eviction.
5221 l2hdr = ab->b_l2hdr;
5222 l2hdr->b_daddr = dev->l2ad_hand;
5224 if ((ab->b_flags & ARC_L2COMPRESS) &&
5225 l2hdr->b_asize >= buf_compress_minsz) {
5226 if (l2arc_compress_buf(l2hdr)) {
5228 * If compression succeeded, enable headroom
5229 * boost on the next scan cycle.
5231 *headroom_boost = B_TRUE;
5236 * Pick up the buffer data we had previously stashed away
5237 * (and now potentially also compressed).
5239 buf_data = l2hdr->b_tmp_cdata;
5240 buf_sz = l2hdr->b_asize;
5243 * If the data has not been compressed, then clear b_tmp_cdata
5244 * to make sure that it points only to a temporary compression
5247 if (!L2ARC_IS_VALID_COMPRESS(l2hdr->b_compress))
5248 l2hdr->b_tmp_cdata = NULL;
5250 /* Compression may have squashed the buffer to zero length. */
5254 wzio = zio_write_phys(pio, dev->l2ad_vdev,
5255 dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF,
5256 NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE,
5257 ZIO_FLAG_CANFAIL, B_FALSE);
5259 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev,
5261 (void) zio_nowait(wzio);
5263 write_asize += buf_sz;
5265 * Keep the clock hand suitably device-aligned.
5267 buf_p_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz);
5268 write_psize += buf_p_sz;
5269 dev->l2ad_hand += buf_p_sz;
5273 mutex_exit(&l2arc_buflist_mtx);
5275 ASSERT3U(write_asize, <=, target_sz);
5276 ARCSTAT_BUMP(arcstat_l2_writes_sent);
5277 ARCSTAT_INCR(arcstat_l2_write_bytes, write_asize);
5278 ARCSTAT_INCR(arcstat_l2_size, write_sz);
5279 ARCSTAT_INCR(arcstat_l2_asize, write_asize);
5280 vdev_space_update(dev->l2ad_vdev, write_psize, 0, 0);
5283 * Bump device hand to the device start if it is approaching the end.
5284 * l2arc_evict() will already have evicted ahead for this case.
5286 if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) {
5287 dev->l2ad_hand = dev->l2ad_start;
5288 dev->l2ad_evict = dev->l2ad_start;
5289 dev->l2ad_first = B_FALSE;
5292 dev->l2ad_writing = B_TRUE;
5293 (void) zio_wait(pio);
5294 dev->l2ad_writing = B_FALSE;
5296 return (write_asize);
5300 * Compresses an L2ARC buffer.
5301 * The data to be compressed must be prefilled in l2hdr->b_tmp_cdata and its
5302 * size in l2hdr->b_asize. This routine tries to compress the data and
5303 * depending on the compression result there are three possible outcomes:
5304 * *) The buffer was incompressible. The original l2hdr contents were left
5305 * untouched and are ready for writing to an L2 device.
5306 * *) The buffer was all-zeros, so there is no need to write it to an L2
5307 * device. To indicate this situation b_tmp_cdata is NULL'ed, b_asize is
5308 * set to zero and b_compress is set to ZIO_COMPRESS_EMPTY.
5309 * *) Compression succeeded and b_tmp_cdata was replaced with a temporary
5310 * data buffer which holds the compressed data to be written, and b_asize
5311 * tells us how much data there is. b_compress is set to the appropriate
5312 * compression algorithm. Once writing is done, invoke
5313 * l2arc_release_cdata_buf on this l2hdr to free this temporary buffer.
5315 * Returns B_TRUE if compression succeeded, or B_FALSE if it didn't (the
5316 * buffer was incompressible).
5319 l2arc_compress_buf(l2arc_buf_hdr_t *l2hdr)
5322 size_t csize, len, rounded;
5324 ASSERT(l2hdr->b_compress == ZIO_COMPRESS_OFF);
5325 ASSERT(l2hdr->b_tmp_cdata != NULL);
5327 len = l2hdr->b_asize;
5328 cdata = zio_data_buf_alloc(len);
5329 csize = zio_compress_data(ZIO_COMPRESS_LZ4, l2hdr->b_tmp_cdata,
5330 cdata, l2hdr->b_asize);
5333 /* zero block, indicate that there's nothing to write */
5334 zio_data_buf_free(cdata, len);
5335 l2hdr->b_compress = ZIO_COMPRESS_EMPTY;
5337 l2hdr->b_tmp_cdata = NULL;
5338 ARCSTAT_BUMP(arcstat_l2_compress_zeros);
5342 rounded = P2ROUNDUP(csize,
5343 (size_t)1 << l2hdr->b_dev->l2ad_vdev->vdev_ashift);
5344 if (rounded < len) {
5346 * Compression succeeded, we'll keep the cdata around for
5347 * writing and release it afterwards.
5349 if (rounded > csize) {
5350 bzero((char *)cdata + csize, rounded - csize);
5353 l2hdr->b_compress = ZIO_COMPRESS_LZ4;
5354 l2hdr->b_asize = csize;
5355 l2hdr->b_tmp_cdata = cdata;
5356 ARCSTAT_BUMP(arcstat_l2_compress_successes);
5360 * Compression failed, release the compressed buffer.
5361 * l2hdr will be left unmodified.
5363 zio_data_buf_free(cdata, len);
5364 ARCSTAT_BUMP(arcstat_l2_compress_failures);
5370 * Decompresses a zio read back from an l2arc device. On success, the
5371 * underlying zio's io_data buffer is overwritten by the uncompressed
5372 * version. On decompression error (corrupt compressed stream), the
5373 * zio->io_error value is set to signal an I/O error.
5375 * Please note that the compressed data stream is not checksummed, so
5376 * if the underlying device is experiencing data corruption, we may feed
5377 * corrupt data to the decompressor, so the decompressor needs to be
5378 * able to handle this situation (LZ4 does).
5381 l2arc_decompress_zio(zio_t *zio, arc_buf_hdr_t *hdr, enum zio_compress c)
5383 ASSERT(L2ARC_IS_VALID_COMPRESS(c));
5385 if (zio->io_error != 0) {
5387 * An io error has occured, just restore the original io
5388 * size in preparation for a main pool read.
5390 zio->io_orig_size = zio->io_size = hdr->b_size;
5394 if (c == ZIO_COMPRESS_EMPTY) {
5396 * An empty buffer results in a null zio, which means we
5397 * need to fill its io_data after we're done restoring the
5398 * buffer's contents.
5400 ASSERT(hdr->b_buf != NULL);
5401 bzero(hdr->b_buf->b_data, hdr->b_size);
5402 zio->io_data = zio->io_orig_data = hdr->b_buf->b_data;
5404 ASSERT(zio->io_data != NULL);
5406 * We copy the compressed data from the start of the arc buffer
5407 * (the zio_read will have pulled in only what we need, the
5408 * rest is garbage which we will overwrite at decompression)
5409 * and then decompress back to the ARC data buffer. This way we
5410 * can minimize copying by simply decompressing back over the
5411 * original compressed data (rather than decompressing to an
5412 * aux buffer and then copying back the uncompressed buffer,
5413 * which is likely to be much larger).
5418 csize = zio->io_size;
5419 cdata = zio_data_buf_alloc(csize);
5420 bcopy(zio->io_data, cdata, csize);
5421 if (zio_decompress_data(c, cdata, zio->io_data, csize,
5423 zio->io_error = EIO;
5424 zio_data_buf_free(cdata, csize);
5427 /* Restore the expected uncompressed IO size. */
5428 zio->io_orig_size = zio->io_size = hdr->b_size;
5432 * Releases the temporary b_tmp_cdata buffer in an l2arc header structure.
5433 * This buffer serves as a temporary holder of compressed data while
5434 * the buffer entry is being written to an l2arc device. Once that is
5435 * done, we can dispose of it.
5438 l2arc_release_cdata_buf(arc_buf_hdr_t *ab)
5440 l2arc_buf_hdr_t *l2hdr = ab->b_l2hdr;
5442 ASSERT(L2ARC_IS_VALID_COMPRESS(l2hdr->b_compress));
5443 if (l2hdr->b_compress != ZIO_COMPRESS_EMPTY) {
5445 * If the data was compressed, then we've allocated a
5446 * temporary buffer for it, so now we need to release it.
5448 ASSERT(l2hdr->b_tmp_cdata != NULL);
5449 zio_data_buf_free(l2hdr->b_tmp_cdata, ab->b_size);
5450 l2hdr->b_tmp_cdata = NULL;
5452 ASSERT(l2hdr->b_tmp_cdata == NULL);
5457 * This thread feeds the L2ARC at regular intervals. This is the beating
5458 * heart of the L2ARC.
5461 l2arc_feed_thread(void *dummy __unused)
5466 uint64_t size, wrote;
5467 clock_t begin, next = ddi_get_lbolt();
5468 boolean_t headroom_boost = B_FALSE;
5470 CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG);
5472 mutex_enter(&l2arc_feed_thr_lock);
5474 while (l2arc_thread_exit == 0) {
5475 CALLB_CPR_SAFE_BEGIN(&cpr);
5476 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock,
5477 next - ddi_get_lbolt());
5478 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock);
5479 next = ddi_get_lbolt() + hz;
5482 * Quick check for L2ARC devices.
5484 mutex_enter(&l2arc_dev_mtx);
5485 if (l2arc_ndev == 0) {
5486 mutex_exit(&l2arc_dev_mtx);
5489 mutex_exit(&l2arc_dev_mtx);
5490 begin = ddi_get_lbolt();
5493 * This selects the next l2arc device to write to, and in
5494 * doing so the next spa to feed from: dev->l2ad_spa. This
5495 * will return NULL if there are now no l2arc devices or if
5496 * they are all faulted.
5498 * If a device is returned, its spa's config lock is also
5499 * held to prevent device removal. l2arc_dev_get_next()
5500 * will grab and release l2arc_dev_mtx.
5502 if ((dev = l2arc_dev_get_next()) == NULL)
5505 spa = dev->l2ad_spa;
5506 ASSERT(spa != NULL);
5509 * If the pool is read-only then force the feed thread to
5510 * sleep a little longer.
5512 if (!spa_writeable(spa)) {
5513 next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz;
5514 spa_config_exit(spa, SCL_L2ARC, dev);
5519 * Avoid contributing to memory pressure.
5521 if (arc_reclaim_needed()) {
5522 ARCSTAT_BUMP(arcstat_l2_abort_lowmem);
5523 spa_config_exit(spa, SCL_L2ARC, dev);
5527 ARCSTAT_BUMP(arcstat_l2_feeds);
5529 size = l2arc_write_size();
5532 * Evict L2ARC buffers that will be overwritten.
5534 l2arc_evict(dev, size, B_FALSE);
5537 * Write ARC buffers.
5539 wrote = l2arc_write_buffers(spa, dev, size, &headroom_boost);
5542 * Calculate interval between writes.
5544 next = l2arc_write_interval(begin, size, wrote);
5545 spa_config_exit(spa, SCL_L2ARC, dev);
5548 l2arc_thread_exit = 0;
5549 cv_broadcast(&l2arc_feed_thr_cv);
5550 CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */
5555 l2arc_vdev_present(vdev_t *vd)
5559 mutex_enter(&l2arc_dev_mtx);
5560 for (dev = list_head(l2arc_dev_list); dev != NULL;
5561 dev = list_next(l2arc_dev_list, dev)) {
5562 if (dev->l2ad_vdev == vd)
5565 mutex_exit(&l2arc_dev_mtx);
5567 return (dev != NULL);
5571 * Add a vdev for use by the L2ARC. By this point the spa has already
5572 * validated the vdev and opened it.
5575 l2arc_add_vdev(spa_t *spa, vdev_t *vd)
5577 l2arc_dev_t *adddev;
5579 ASSERT(!l2arc_vdev_present(vd));
5581 vdev_ashift_optimize(vd);
5584 * Create a new l2arc device entry.
5586 adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP);
5587 adddev->l2ad_spa = spa;
5588 adddev->l2ad_vdev = vd;
5589 adddev->l2ad_start = VDEV_LABEL_START_SIZE;
5590 adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd);
5591 adddev->l2ad_hand = adddev->l2ad_start;
5592 adddev->l2ad_evict = adddev->l2ad_start;
5593 adddev->l2ad_first = B_TRUE;
5594 adddev->l2ad_writing = B_FALSE;
5597 * This is a list of all ARC buffers that are still valid on the
5600 adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP);
5601 list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t),
5602 offsetof(arc_buf_hdr_t, b_l2node));
5604 vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand);
5607 * Add device to global list
5609 mutex_enter(&l2arc_dev_mtx);
5610 list_insert_head(l2arc_dev_list, adddev);
5611 atomic_inc_64(&l2arc_ndev);
5612 mutex_exit(&l2arc_dev_mtx);
5616 * Remove a vdev from the L2ARC.
5619 l2arc_remove_vdev(vdev_t *vd)
5621 l2arc_dev_t *dev, *nextdev, *remdev = NULL;
5624 * Find the device by vdev
5626 mutex_enter(&l2arc_dev_mtx);
5627 for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) {
5628 nextdev = list_next(l2arc_dev_list, dev);
5629 if (vd == dev->l2ad_vdev) {
5634 ASSERT(remdev != NULL);
5637 * Remove device from global list
5639 list_remove(l2arc_dev_list, remdev);
5640 l2arc_dev_last = NULL; /* may have been invalidated */
5641 atomic_dec_64(&l2arc_ndev);
5642 mutex_exit(&l2arc_dev_mtx);
5645 * Clear all buflists and ARC references. L2ARC device flush.
5647 l2arc_evict(remdev, 0, B_TRUE);
5648 list_destroy(remdev->l2ad_buflist);
5649 kmem_free(remdev->l2ad_buflist, sizeof (list_t));
5650 kmem_free(remdev, sizeof (l2arc_dev_t));
5656 l2arc_thread_exit = 0;
5658 l2arc_writes_sent = 0;
5659 l2arc_writes_done = 0;
5661 mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL);
5662 cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL);
5663 mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL);
5664 mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL);
5665 mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL);
5667 l2arc_dev_list = &L2ARC_dev_list;
5668 l2arc_free_on_write = &L2ARC_free_on_write;
5669 list_create(l2arc_dev_list, sizeof (l2arc_dev_t),
5670 offsetof(l2arc_dev_t, l2ad_node));
5671 list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t),
5672 offsetof(l2arc_data_free_t, l2df_list_node));
5679 * This is called from dmu_fini(), which is called from spa_fini();
5680 * Because of this, we can assume that all l2arc devices have
5681 * already been removed when the pools themselves were removed.
5684 l2arc_do_free_on_write();
5686 mutex_destroy(&l2arc_feed_thr_lock);
5687 cv_destroy(&l2arc_feed_thr_cv);
5688 mutex_destroy(&l2arc_dev_mtx);
5689 mutex_destroy(&l2arc_buflist_mtx);
5690 mutex_destroy(&l2arc_free_on_write_mtx);
5692 list_destroy(l2arc_dev_list);
5693 list_destroy(l2arc_free_on_write);
5699 if (!(spa_mode_global & FWRITE))
5702 (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0,
5703 TS_RUN, minclsyspri);
5709 if (!(spa_mode_global & FWRITE))
5712 mutex_enter(&l2arc_feed_thr_lock);
5713 cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */
5714 l2arc_thread_exit = 1;
5715 while (l2arc_thread_exit != 0)
5716 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock);
5717 mutex_exit(&l2arc_feed_thr_lock);