4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #pragma ident "%Z%%M% %I% %E% SMI"
28 #include <sys/zfs_context.h>
30 #include <sys/vdev_impl.h>
34 * Virtual device read-ahead caching.
36 * This file implements a simple LRU read-ahead cache. When the DMU reads
37 * a given block, it will often want other, nearby blocks soon thereafter.
38 * We take advantage of this by reading a larger disk region and caching
39 * the result. In the best case, this can turn 256 back-to-back 512-byte
40 * reads into a single 128k read followed by 255 cache hits; this reduces
41 * latency dramatically. In the worst case, it can turn an isolated 512-byte
42 * read into a 128k read, which doesn't affect latency all that much but is
43 * terribly wasteful of bandwidth. A more intelligent version of the cache
44 * could keep track of access patterns and not do read-ahead unless it sees
45 * at least two temporally close I/Os to the same region. It could also
46 * take advantage of semantic information about the I/O. And it could use
47 * something faster than an AVL tree; that was chosen solely for convenience.
49 * There are five cache operations: allocate, fill, read, write, evict.
51 * (1) Allocate. This reserves a cache entry for the specified region.
52 * We separate the allocate and fill operations so that multiple threads
53 * don't generate I/O for the same cache miss.
55 * (2) Fill. When the I/O for a cache miss completes, the fill routine
56 * places the data in the previously allocated cache entry.
58 * (3) Read. Read data from the cache.
60 * (4) Write. Update cache contents after write completion.
62 * (5) Evict. When allocating a new entry, we evict the oldest (LRU) entry
63 * if the total cache size exceeds zfs_vdev_cache_size.
67 * These tunables are for performance analysis.
70 * All i/os smaller than zfs_vdev_cache_max will be turned into
71 * 1<<zfs_vdev_cache_bshift byte reads by the vdev_cache (aka software
72 * track buffer. At most zfs_vdev_cache_size bytes will be kept in each
75 int zfs_vdev_cache_max = 1<<14;
76 int zfs_vdev_cache_size = 10ULL << 20;
77 int zfs_vdev_cache_bshift = 16;
79 SYSCTL_DECL(_vfs_zfs_vdev);
80 SYSCTL_NODE(_vfs_zfs_vdev, OID_AUTO, cache, CTLFLAG_RW, 0, "ZFS VDEV Cache");
81 TUNABLE_INT("vfs.zfs.vdev.cache.max", &zfs_vdev_cache_max);
82 SYSCTL_INT(_vfs_zfs_vdev_cache, OID_AUTO, max, CTLFLAG_RDTUN,
83 &zfs_vdev_cache_max, 0, "Maximum I/O request size that increase read size");
84 TUNABLE_INT("vfs.zfs.vdev.cache.size", &zfs_vdev_cache_size);
85 SYSCTL_INT(_vfs_zfs_vdev_cache, OID_AUTO, size, CTLFLAG_RDTUN,
86 &zfs_vdev_cache_size, 0, "Size of VDEV cache");
88 #define VCBS (1 << zfs_vdev_cache_bshift)
91 vdev_cache_offset_compare(const void *a1, const void *a2)
93 const vdev_cache_entry_t *ve1 = a1;
94 const vdev_cache_entry_t *ve2 = a2;
96 if (ve1->ve_offset < ve2->ve_offset)
98 if (ve1->ve_offset > ve2->ve_offset)
104 vdev_cache_lastused_compare(const void *a1, const void *a2)
106 const vdev_cache_entry_t *ve1 = a1;
107 const vdev_cache_entry_t *ve2 = a2;
109 if (ve1->ve_lastused < ve2->ve_lastused)
111 if (ve1->ve_lastused > ve2->ve_lastused)
115 * Among equally old entries, sort by offset to ensure uniqueness.
117 return (vdev_cache_offset_compare(a1, a2));
121 * Evict the specified entry from the cache.
124 vdev_cache_evict(vdev_cache_t *vc, vdev_cache_entry_t *ve)
126 ASSERT(MUTEX_HELD(&vc->vc_lock));
127 ASSERT(ve->ve_fill_io == NULL);
128 ASSERT(ve->ve_data != NULL);
130 dprintf("evicting %p, off %llx, LRU %llu, age %lu, hits %u, stale %u\n",
131 vc, ve->ve_offset, ve->ve_lastused, LBOLT - ve->ve_lastused,
132 ve->ve_hits, ve->ve_missed_update);
134 avl_remove(&vc->vc_lastused_tree, ve);
135 avl_remove(&vc->vc_offset_tree, ve);
136 zio_buf_free(ve->ve_data, VCBS);
137 kmem_free(ve, sizeof (vdev_cache_entry_t));
141 * Allocate an entry in the cache. At the point we don't have the data,
142 * we're just creating a placeholder so that multiple threads don't all
143 * go off and read the same blocks.
145 static vdev_cache_entry_t *
146 vdev_cache_allocate(zio_t *zio)
148 vdev_cache_t *vc = &zio->io_vd->vdev_cache;
149 uint64_t offset = P2ALIGN(zio->io_offset, VCBS);
150 vdev_cache_entry_t *ve;
152 ASSERT(MUTEX_HELD(&vc->vc_lock));
154 if (zfs_vdev_cache_size == 0)
158 * If adding a new entry would exceed the cache size,
159 * evict the oldest entry (LRU).
161 if ((avl_numnodes(&vc->vc_lastused_tree) << zfs_vdev_cache_bshift) >
162 zfs_vdev_cache_size) {
163 ve = avl_first(&vc->vc_lastused_tree);
164 if (ve->ve_fill_io != NULL) {
165 dprintf("can't evict in %p, still filling\n", vc);
168 ASSERT(ve->ve_hits != 0);
169 vdev_cache_evict(vc, ve);
172 ve = kmem_zalloc(sizeof (vdev_cache_entry_t), KM_SLEEP);
173 ve->ve_offset = offset;
174 ve->ve_lastused = LBOLT;
175 ve->ve_data = zio_buf_alloc(VCBS);
177 avl_add(&vc->vc_offset_tree, ve);
178 avl_add(&vc->vc_lastused_tree, ve);
184 vdev_cache_hit(vdev_cache_t *vc, vdev_cache_entry_t *ve, zio_t *zio)
186 uint64_t cache_phase = P2PHASE(zio->io_offset, VCBS);
188 ASSERT(MUTEX_HELD(&vc->vc_lock));
189 ASSERT(ve->ve_fill_io == NULL);
191 if (ve->ve_lastused != LBOLT) {
192 avl_remove(&vc->vc_lastused_tree, ve);
193 ve->ve_lastused = LBOLT;
194 avl_add(&vc->vc_lastused_tree, ve);
198 bcopy(ve->ve_data + cache_phase, zio->io_data, zio->io_size);
202 * Fill a previously allocated cache entry with data.
205 vdev_cache_fill(zio_t *zio)
207 vdev_t *vd = zio->io_vd;
208 vdev_cache_t *vc = &vd->vdev_cache;
209 vdev_cache_entry_t *ve = zio->io_private;
212 ASSERT(zio->io_size == VCBS);
215 * Add data to the cache.
217 mutex_enter(&vc->vc_lock);
219 ASSERT(ve->ve_fill_io == zio);
220 ASSERT(ve->ve_offset == zio->io_offset);
221 ASSERT(ve->ve_data == zio->io_data);
223 ve->ve_fill_io = NULL;
226 * Even if this cache line was invalidated by a missed write update,
227 * any reads that were queued up before the missed update are still
228 * valid, so we can satisfy them from this line before we evict it.
230 for (dio = zio->io_delegate_list; dio; dio = dio->io_delegate_next)
231 vdev_cache_hit(vc, ve, dio);
233 if (zio->io_error || ve->ve_missed_update)
234 vdev_cache_evict(vc, ve);
236 mutex_exit(&vc->vc_lock);
238 while ((dio = zio->io_delegate_list) != NULL) {
239 zio->io_delegate_list = dio->io_delegate_next;
240 dio->io_delegate_next = NULL;
241 dio->io_error = zio->io_error;
247 * Read data from the cache. Returns 0 on cache hit, errno on a miss.
250 vdev_cache_read(zio_t *zio)
252 vdev_cache_t *vc = &zio->io_vd->vdev_cache;
253 vdev_cache_entry_t *ve, ve_search;
254 uint64_t cache_offset = P2ALIGN(zio->io_offset, VCBS);
255 uint64_t cache_phase = P2PHASE(zio->io_offset, VCBS);
258 ASSERT(zio->io_type == ZIO_TYPE_READ);
260 if (zio->io_flags & ZIO_FLAG_DONT_CACHE)
263 if (zio->io_size > zfs_vdev_cache_max)
267 * If the I/O straddles two or more cache blocks, don't cache it.
269 if (P2CROSS(zio->io_offset, zio->io_offset + zio->io_size - 1, VCBS))
272 ASSERT(cache_phase + zio->io_size <= VCBS);
274 mutex_enter(&vc->vc_lock);
276 ve_search.ve_offset = cache_offset;
277 ve = avl_find(&vc->vc_offset_tree, &ve_search, NULL);
280 if (ve->ve_missed_update) {
281 mutex_exit(&vc->vc_lock);
285 if ((fio = ve->ve_fill_io) != NULL) {
286 zio->io_delegate_next = fio->io_delegate_list;
287 fio->io_delegate_list = zio;
288 zio_vdev_io_bypass(zio);
289 mutex_exit(&vc->vc_lock);
293 vdev_cache_hit(vc, ve, zio);
294 zio_vdev_io_bypass(zio);
296 mutex_exit(&vc->vc_lock);
301 ve = vdev_cache_allocate(zio);
304 mutex_exit(&vc->vc_lock);
308 fio = zio_vdev_child_io(zio, NULL, zio->io_vd, cache_offset,
309 ve->ve_data, VCBS, ZIO_TYPE_READ, ZIO_PRIORITY_CACHE_FILL,
310 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_PROPAGATE |
311 ZIO_FLAG_DONT_RETRY | ZIO_FLAG_NOBOOKMARK,
312 vdev_cache_fill, ve);
314 ve->ve_fill_io = fio;
315 fio->io_delegate_list = zio;
316 zio_vdev_io_bypass(zio);
318 mutex_exit(&vc->vc_lock);
325 * Update cache contents upon write completion.
328 vdev_cache_write(zio_t *zio)
330 vdev_cache_t *vc = &zio->io_vd->vdev_cache;
331 vdev_cache_entry_t *ve, ve_search;
332 uint64_t io_start = zio->io_offset;
333 uint64_t io_end = io_start + zio->io_size;
334 uint64_t min_offset = P2ALIGN(io_start, VCBS);
335 uint64_t max_offset = P2ROUNDUP(io_end, VCBS);
338 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
340 mutex_enter(&vc->vc_lock);
342 ve_search.ve_offset = min_offset;
343 ve = avl_find(&vc->vc_offset_tree, &ve_search, &where);
346 ve = avl_nearest(&vc->vc_offset_tree, where, AVL_AFTER);
348 while (ve != NULL && ve->ve_offset < max_offset) {
349 uint64_t start = MAX(ve->ve_offset, io_start);
350 uint64_t end = MIN(ve->ve_offset + VCBS, io_end);
352 if (ve->ve_fill_io != NULL) {
353 ve->ve_missed_update = 1;
355 bcopy((char *)zio->io_data + start - io_start,
356 ve->ve_data + start - ve->ve_offset, end - start);
358 ve = AVL_NEXT(&vc->vc_offset_tree, ve);
360 mutex_exit(&vc->vc_lock);
364 vdev_cache_init(vdev_t *vd)
366 vdev_cache_t *vc = &vd->vdev_cache;
368 mutex_init(&vc->vc_lock, NULL, MUTEX_DEFAULT, NULL);
370 avl_create(&vc->vc_offset_tree, vdev_cache_offset_compare,
371 sizeof (vdev_cache_entry_t),
372 offsetof(struct vdev_cache_entry, ve_offset_node));
374 avl_create(&vc->vc_lastused_tree, vdev_cache_lastused_compare,
375 sizeof (vdev_cache_entry_t),
376 offsetof(struct vdev_cache_entry, ve_lastused_node));
380 vdev_cache_fini(vdev_t *vd)
382 vdev_cache_t *vc = &vd->vdev_cache;
383 vdev_cache_entry_t *ve;
385 mutex_enter(&vc->vc_lock);
386 while ((ve = avl_first(&vc->vc_offset_tree)) != NULL)
387 vdev_cache_evict(vc, ve);
388 mutex_exit(&vc->vc_lock);
390 avl_destroy(&vc->vc_offset_tree);
391 avl_destroy(&vc->vc_lastused_tree);
393 mutex_destroy(&vc->vc_lock);