4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2018, Joyent, Inc.
24 * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
25 * Copyright (c) 2014 by Saso Kiselkov. All rights reserved.
26 * Copyright 2017 Nexenta Systems, Inc. All rights reserved.
31 #include <sys/spa_impl.h>
32 #include <sys/zio_compress.h>
33 #include <sys/zio_checksum.h>
34 #include <sys/zfs_context.h>
36 #include <sys/zfs_refcount.h>
38 #include <sys/vdev_trim.h>
39 #include <sys/vdev_impl.h>
40 #include <sys/dsl_pool.h>
41 #include <sys/zio_checksum.h>
42 #include <sys/multilist.h>
45 #include <sys/fm/fs/zfs.h>
47 #include <sys/shrinker.h>
48 #include <sys/vmsystm.h>
50 #include <linux/page_compat.h>
52 #include <sys/callb.h>
53 #include <sys/kstat.h>
55 #include <zfs_fletcher.h>
56 #include <sys/arc_impl.h>
57 #include <sys/trace_zfs.h>
58 #include <sys/aggsum.h>
61 * This is a limit on how many pages the ARC shrinker makes available for
62 * eviction in response to one page allocation attempt. Note that in
63 * practice, the kernel's shrinker can ask us to evict up to about 4x this
64 * for one allocation attempt.
66 * The default limit of 10,000 (in practice, 160MB per allocation attempt
67 * with 4K pages) limits the amount of time spent attempting to reclaim ARC
68 * memory to less than 100ms per allocation attempt, even with a small
69 * average compressed block size of ~8KB.
71 * See also the comment in arc_shrinker_count().
72 * Set to 0 to disable limit.
74 int zfs_arc_shrinker_limit = 10000;
78 * Return a default max arc size based on the amount of physical memory.
81 arc_default_max(uint64_t min, uint64_t allmem)
83 /* Default to 1/2 of all memory. */
84 return (MAX(allmem / 2, min));
89 * Return maximum amount of memory that we could possibly use. Reduced
90 * to half of all memory in user space which is primarily used for testing.
96 return (ptob(zfs_totalram_pages - zfs_totalhigh_pages));
98 return (ptob(zfs_totalram_pages));
99 #endif /* CONFIG_HIGHMEM */
103 * Return the amount of memory that is considered free. In user space
104 * which is primarily used for testing we pretend that free memory ranges
105 * from 0-20% of all memory.
108 arc_free_memory(void)
110 #ifdef CONFIG_HIGHMEM
113 return (ptob(si.freeram - si.freehigh));
115 return (ptob(nr_free_pages() +
116 nr_inactive_file_pages()));
117 #endif /* CONFIG_HIGHMEM */
121 * Return the amount of memory that can be consumed before reclaim will be
122 * needed. Positive if there is sufficient free memory, negative indicates
123 * the amount of memory that needs to be freed up.
126 arc_available_memory(void)
128 return (arc_free_memory() - arc_sys_free);
132 arc_evictable_memory(void)
134 int64_t asize = aggsum_value(&arc_size);
136 zfs_refcount_count(&arc_mru->arcs_esize[ARC_BUFC_DATA]) +
137 zfs_refcount_count(&arc_mru->arcs_esize[ARC_BUFC_METADATA]) +
138 zfs_refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_DATA]) +
139 zfs_refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
140 uint64_t arc_dirty = MAX((int64_t)asize - (int64_t)arc_clean, 0);
143 * Scale reported evictable memory in proportion to page cache, cap
144 * at specified min/max.
146 uint64_t min = (ptob(nr_file_pages()) / 100) * zfs_arc_pc_percent;
147 min = MAX(arc_c_min, MIN(arc_c_max, min));
149 if (arc_dirty >= min)
152 return (MAX((int64_t)asize - (int64_t)min, 0));
156 * The _count() function returns the number of free-able objects.
157 * The _scan() function returns the number of objects that were freed.
160 arc_shrinker_count(struct shrinker *shrink, struct shrink_control *sc)
163 * __GFP_FS won't be set if we are called from ZFS code (see
164 * kmem_flags_convert(), which removes it). To avoid a deadlock, we
165 * don't allow evicting in this case. We return 0 rather than
166 * SHRINK_STOP so that the shrinker logic doesn't accumulate a
167 * deficit against us.
169 if (!(sc->gfp_mask & __GFP_FS)) {
174 * This code is reached in the "direct reclaim" case, where the
175 * kernel (outside ZFS) is trying to allocate a page, and the system
178 * The kernel's shrinker code doesn't understand how many pages the
179 * ARC's callback actually frees, so it may ask the ARC to shrink a
180 * lot for one page allocation. This is problematic because it may
181 * take a long time, thus delaying the page allocation, and because
182 * it may force the ARC to unnecessarily shrink very small.
184 * Therefore, we limit the amount of data that we say is evictable,
185 * which limits the amount that the shrinker will ask us to evict for
186 * one page allocation attempt.
188 * In practice, we may be asked to shrink 4x the limit to satisfy one
189 * page allocation, before the kernel's shrinker code gives up on us.
190 * When that happens, we rely on the kernel code to find the pages
191 * that we freed before invoking the OOM killer. This happens in
192 * __alloc_pages_slowpath(), which retries and finds the pages we
193 * freed when it calls get_page_from_freelist().
195 * See also the comment above zfs_arc_shrinker_limit.
197 int64_t limit = zfs_arc_shrinker_limit != 0 ?
198 zfs_arc_shrinker_limit : INT64_MAX;
199 return (MIN(limit, btop((int64_t)arc_evictable_memory())));
203 arc_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc)
205 ASSERT((sc->gfp_mask & __GFP_FS) != 0);
207 /* The arc is considered warm once reclaim has occurred */
208 if (unlikely(arc_warm == B_FALSE))
212 * Evict the requested number of pages by reducing arc_c and waiting
213 * for the requested amount of data to be evicted.
215 arc_reduce_target_size(ptob(sc->nr_to_scan));
216 arc_wait_for_eviction(ptob(sc->nr_to_scan));
217 if (current->reclaim_state != NULL)
218 current->reclaim_state->reclaimed_slab += sc->nr_to_scan;
221 * We are experiencing memory pressure which the arc_evict_zthr was
222 * unable to keep up with. Set arc_no_grow to briefly pause arc
223 * growth to avoid compounding the memory pressure.
225 arc_no_grow = B_TRUE;
228 * When direct reclaim is observed it usually indicates a rapid
229 * increase in memory pressure. This occurs because the kswapd
230 * threads were unable to asynchronously keep enough free memory
233 if (current_is_kswapd()) {
234 ARCSTAT_BUMP(arcstat_memory_indirect_count);
236 ARCSTAT_BUMP(arcstat_memory_direct_count);
239 return (sc->nr_to_scan);
242 SPL_SHRINKER_DECLARE(arc_shrinker,
243 arc_shrinker_count, arc_shrinker_scan, DEFAULT_SEEKS);
246 arc_memory_throttle(spa_t *spa, uint64_t reserve, uint64_t txg)
248 uint64_t free_memory = arc_free_memory();
250 if (free_memory > arc_all_memory() * arc_lotsfree_percent / 100)
253 if (txg > spa->spa_lowmem_last_txg) {
254 spa->spa_lowmem_last_txg = txg;
255 spa->spa_lowmem_page_load = 0;
258 * If we are in pageout, we know that memory is already tight,
259 * the arc is already going to be evicting, so we just want to
260 * continue to let page writes occur as quickly as possible.
262 if (current_is_kswapd()) {
263 if (spa->spa_lowmem_page_load >
264 MAX(arc_sys_free / 4, free_memory) / 4) {
265 DMU_TX_STAT_BUMP(dmu_tx_memory_reclaim);
266 return (SET_ERROR(ERESTART));
268 /* Note: reserve is inflated, so we deflate */
269 atomic_add_64(&spa->spa_lowmem_page_load, reserve / 8);
271 } else if (spa->spa_lowmem_page_load > 0 && arc_reclaim_needed()) {
272 /* memory is low, delay before restarting */
273 ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
274 DMU_TX_STAT_BUMP(dmu_tx_memory_reclaim);
275 return (SET_ERROR(EAGAIN));
277 spa->spa_lowmem_page_load = 0;
282 arc_lowmem_init(void)
284 uint64_t allmem = arc_all_memory();
287 * Register a shrinker to support synchronous (direct) memory
288 * reclaim from the arc. This is done to prevent kswapd from
289 * swapping out pages when it is preferable to shrink the arc.
291 spl_register_shrinker(&arc_shrinker);
294 * The ARC tries to keep at least this much memory available for the
295 * system. This gives the ARC time to shrink in response to memory
296 * pressure, before running completely out of memory and invoking the
297 * direct-reclaim ARC shrinker.
299 * This should be more than twice high_wmark_pages(), so that
300 * arc_wait_for_eviction() will wait until at least the
301 * high_wmark_pages() are free (see arc_evict_state_impl()).
303 * Note: Even when the system is very low on memory, the kernel's
304 * shrinker code may only ask for one "batch" of pages (512KB) to be
305 * evicted. If concurrent allocations consume these pages, there may
306 * still be insufficient free pages, and the OOM killer takes action.
308 * By setting arc_sys_free large enough, and having
309 * arc_wait_for_eviction() wait until there is at least arc_sys_free/2
310 * free memory, it is much less likely that concurrent allocations can
311 * consume all the memory that was evicted before checking for
314 * It's hard to iterate the zones from a linux kernel module, which
315 * makes it difficult to determine the watermark dynamically. Instead
316 * we compute the maximum high watermark for this system, based
317 * on the amount of memory, assuming default parameters on Linux kernel
322 * Base wmark_low is 4 * the square root of Kbytes of RAM.
324 long wmark = 4 * int_sqrt(allmem/1024) * 1024;
327 * Clamp to between 128K and 64MB.
329 wmark = MAX(wmark, 128 * 1024);
330 wmark = MIN(wmark, 64 * 1024 * 1024);
333 * watermark_boost can increase the wmark by up to 150%.
335 wmark += wmark * 150 / 100;
338 * arc_sys_free needs to be more than 2x the watermark, because
339 * arc_wait_for_eviction() waits for half of arc_sys_free. Bump this up
340 * to 3x to ensure we're above it.
342 arc_sys_free = wmark * 3 + allmem / 32;
346 arc_lowmem_fini(void)
348 spl_unregister_shrinker(&arc_shrinker);
352 param_set_arc_long(const char *buf, zfs_kernel_param_t *kp)
356 error = param_set_long(buf, kp);
358 return (SET_ERROR(error));
360 arc_tuning_update(B_TRUE);
366 param_set_arc_int(const char *buf, zfs_kernel_param_t *kp)
370 error = param_set_int(buf, kp);
372 return (SET_ERROR(error));
374 arc_tuning_update(B_TRUE);
380 arc_available_memory(void)
382 int64_t lowest = INT64_MAX;
384 /* Every 100 calls, free a small amount */
385 if (spa_get_random(100) == 0)
392 arc_memory_throttle(spa_t *spa, uint64_t reserve, uint64_t txg)
400 return (ptob(physmem) / 2);
404 arc_free_memory(void)
406 return (spa_get_random(arc_all_memory() * 20 / 100));
411 * Helper function for arc_prune_async() it is responsible for safely
412 * handling the execution of a registered arc_prune_func_t.
415 arc_prune_task(void *ptr)
417 arc_prune_t *ap = (arc_prune_t *)ptr;
418 arc_prune_func_t *func = ap->p_pfunc;
421 func(ap->p_adjust, ap->p_private);
423 zfs_refcount_remove(&ap->p_refcnt, func);
427 * Notify registered consumers they must drop holds on a portion of the ARC
428 * buffered they reference. This provides a mechanism to ensure the ARC can
429 * honor the arc_meta_limit and reclaim otherwise pinned ARC buffers. This
430 * is analogous to dnlc_reduce_cache() but more generic.
432 * This operation is performed asynchronously so it may be safely called
433 * in the context of the arc_reclaim_thread(). A reference is taken here
434 * for each registered arc_prune_t and the arc_prune_task() is responsible
435 * for releasing it once the registered arc_prune_func_t has completed.
438 arc_prune_async(int64_t adjust)
442 mutex_enter(&arc_prune_mtx);
443 for (ap = list_head(&arc_prune_list); ap != NULL;
444 ap = list_next(&arc_prune_list, ap)) {
446 if (zfs_refcount_count(&ap->p_refcnt) >= 2)
449 zfs_refcount_add(&ap->p_refcnt, ap->p_pfunc);
450 ap->p_adjust = adjust;
451 if (taskq_dispatch(arc_prune_taskq, arc_prune_task,
452 ap, TQ_SLEEP) == TASKQID_INVALID) {
453 zfs_refcount_remove(&ap->p_refcnt, ap->p_pfunc);
456 ARCSTAT_BUMP(arcstat_prune);
458 mutex_exit(&arc_prune_mtx);
462 ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, shrinker_limit, INT, ZMOD_RW,
463 "Limit on number of pages that ARC shrinker can reclaim at once");