4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * Copyright (c) 2013 by Delphix. All rights reserved.
30 #include <sys/zfs_context.h>
31 #include <sys/dnode.h>
32 #include <sys/dmu_objset.h>
33 #include <sys/dmu_zfetch.h>
36 #include <sys/kstat.h>
39 * I'm against tune-ables, but these should probably exist as tweakable globals
40 * until we can get this working the way we want it to.
43 int zfs_prefetch_disable = 0;
45 /* max # of streams per zfetch */
46 uint32_t zfetch_max_streams = 8;
47 /* min time before stream reclaim */
48 uint32_t zfetch_min_sec_reap = 2;
49 /* max number of blocks to fetch at a time */
50 uint32_t zfetch_block_cap = 256;
51 /* number of bytes in a array_read at which we stop prefetching (1Mb) */
52 uint64_t zfetch_array_rd_sz = 1024 * 1024;
54 SYSCTL_DECL(_vfs_zfs);
55 SYSCTL_INT(_vfs_zfs, OID_AUTO, prefetch_disable, CTLFLAG_RW,
56 &zfs_prefetch_disable, 0, "Disable prefetch");
57 SYSCTL_NODE(_vfs_zfs, OID_AUTO, zfetch, CTLFLAG_RW, 0, "ZFS ZFETCH");
58 TUNABLE_INT("vfs.zfs.zfetch.max_streams", &zfetch_max_streams);
59 SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, max_streams, CTLFLAG_RW,
60 &zfetch_max_streams, 0, "Max # of streams per zfetch");
61 TUNABLE_INT("vfs.zfs.zfetch.min_sec_reap", &zfetch_min_sec_reap);
62 SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, min_sec_reap, CTLFLAG_RDTUN,
63 &zfetch_min_sec_reap, 0, "Min time before stream reclaim");
64 TUNABLE_INT("vfs.zfs.zfetch.block_cap", &zfetch_block_cap);
65 SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, block_cap, CTLFLAG_RDTUN,
66 &zfetch_block_cap, 0, "Max number of blocks to fetch at a time");
67 TUNABLE_QUAD("vfs.zfs.zfetch.array_rd_sz", &zfetch_array_rd_sz);
68 SYSCTL_QUAD(_vfs_zfs_zfetch, OID_AUTO, array_rd_sz, CTLFLAG_RDTUN,
69 &zfetch_array_rd_sz, 0,
70 "Number of bytes in a array_read at which we stop prefetching");
72 /* forward decls for static routines */
73 static boolean_t dmu_zfetch_colinear(zfetch_t *, zstream_t *);
74 static void dmu_zfetch_dofetch(zfetch_t *, zstream_t *);
75 static uint64_t dmu_zfetch_fetch(dnode_t *, uint64_t, uint64_t);
76 static uint64_t dmu_zfetch_fetchsz(dnode_t *, uint64_t, uint64_t);
77 static boolean_t dmu_zfetch_find(zfetch_t *, zstream_t *, int);
78 static int dmu_zfetch_stream_insert(zfetch_t *, zstream_t *);
79 static zstream_t *dmu_zfetch_stream_reclaim(zfetch_t *);
80 static void dmu_zfetch_stream_remove(zfetch_t *, zstream_t *);
81 static int dmu_zfetch_streams_equal(zstream_t *, zstream_t *);
83 typedef struct zfetch_stats {
84 kstat_named_t zfetchstat_hits;
85 kstat_named_t zfetchstat_misses;
86 kstat_named_t zfetchstat_colinear_hits;
87 kstat_named_t zfetchstat_colinear_misses;
88 kstat_named_t zfetchstat_stride_hits;
89 kstat_named_t zfetchstat_stride_misses;
90 kstat_named_t zfetchstat_reclaim_successes;
91 kstat_named_t zfetchstat_reclaim_failures;
92 kstat_named_t zfetchstat_stream_resets;
93 kstat_named_t zfetchstat_stream_noresets;
94 kstat_named_t zfetchstat_bogus_streams;
97 static zfetch_stats_t zfetch_stats = {
98 { "hits", KSTAT_DATA_UINT64 },
99 { "misses", KSTAT_DATA_UINT64 },
100 { "colinear_hits", KSTAT_DATA_UINT64 },
101 { "colinear_misses", KSTAT_DATA_UINT64 },
102 { "stride_hits", KSTAT_DATA_UINT64 },
103 { "stride_misses", KSTAT_DATA_UINT64 },
104 { "reclaim_successes", KSTAT_DATA_UINT64 },
105 { "reclaim_failures", KSTAT_DATA_UINT64 },
106 { "streams_resets", KSTAT_DATA_UINT64 },
107 { "streams_noresets", KSTAT_DATA_UINT64 },
108 { "bogus_streams", KSTAT_DATA_UINT64 },
111 #define ZFETCHSTAT_INCR(stat, val) \
112 atomic_add_64(&zfetch_stats.stat.value.ui64, (val));
114 #define ZFETCHSTAT_BUMP(stat) ZFETCHSTAT_INCR(stat, 1);
119 * Given a zfetch structure and a zstream structure, determine whether the
120 * blocks to be read are part of a co-linear pair of existing prefetch
121 * streams. If a set is found, coalesce the streams, removing one, and
122 * configure the prefetch so it looks for a strided access pattern.
124 * In other words: if we find two sequential access streams that are
125 * the same length and distance N appart, and this read is N from the
126 * last stream, then we are probably in a strided access pattern. So
127 * combine the two sequential streams into a single strided stream.
129 * Returns whether co-linear streams were found.
132 dmu_zfetch_colinear(zfetch_t *zf, zstream_t *zh)
137 if (! rw_tryenter(&zf->zf_rwlock, RW_WRITER))
141 rw_exit(&zf->zf_rwlock);
145 for (z_walk = list_head(&zf->zf_stream); z_walk;
146 z_walk = list_next(&zf->zf_stream, z_walk)) {
147 for (z_comp = list_next(&zf->zf_stream, z_walk); z_comp;
148 z_comp = list_next(&zf->zf_stream, z_comp)) {
151 if (z_walk->zst_len != z_walk->zst_stride ||
152 z_comp->zst_len != z_comp->zst_stride) {
156 diff = z_comp->zst_offset - z_walk->zst_offset;
157 if (z_comp->zst_offset + diff == zh->zst_offset) {
158 z_walk->zst_offset = zh->zst_offset;
159 z_walk->zst_direction = diff < 0 ? -1 : 1;
161 diff * z_walk->zst_direction;
162 z_walk->zst_ph_offset =
163 zh->zst_offset + z_walk->zst_stride;
164 dmu_zfetch_stream_remove(zf, z_comp);
165 mutex_destroy(&z_comp->zst_lock);
166 kmem_free(z_comp, sizeof (zstream_t));
168 dmu_zfetch_dofetch(zf, z_walk);
170 rw_exit(&zf->zf_rwlock);
174 diff = z_walk->zst_offset - z_comp->zst_offset;
175 if (z_walk->zst_offset + diff == zh->zst_offset) {
176 z_walk->zst_offset = zh->zst_offset;
177 z_walk->zst_direction = diff < 0 ? -1 : 1;
179 diff * z_walk->zst_direction;
180 z_walk->zst_ph_offset =
181 zh->zst_offset + z_walk->zst_stride;
182 dmu_zfetch_stream_remove(zf, z_comp);
183 mutex_destroy(&z_comp->zst_lock);
184 kmem_free(z_comp, sizeof (zstream_t));
186 dmu_zfetch_dofetch(zf, z_walk);
188 rw_exit(&zf->zf_rwlock);
194 rw_exit(&zf->zf_rwlock);
199 * Given a zstream_t, determine the bounds of the prefetch. Then call the
200 * routine that actually prefetches the individual blocks.
203 dmu_zfetch_dofetch(zfetch_t *zf, zstream_t *zs)
205 uint64_t prefetch_tail;
206 uint64_t prefetch_limit;
207 uint64_t prefetch_ofst;
208 uint64_t prefetch_len;
209 uint64_t blocks_fetched;
211 zs->zst_stride = MAX((int64_t)zs->zst_stride, zs->zst_len);
212 zs->zst_cap = MIN(zfetch_block_cap, 2 * zs->zst_cap);
214 prefetch_tail = MAX((int64_t)zs->zst_ph_offset,
215 (int64_t)(zs->zst_offset + zs->zst_stride));
217 * XXX: use a faster division method?
219 prefetch_limit = zs->zst_offset + zs->zst_len +
220 (zs->zst_cap * zs->zst_stride) / zs->zst_len;
222 while (prefetch_tail < prefetch_limit) {
223 prefetch_ofst = zs->zst_offset + zs->zst_direction *
224 (prefetch_tail - zs->zst_offset);
226 prefetch_len = zs->zst_len;
229 * Don't prefetch beyond the end of the file, if working
232 if ((zs->zst_direction == ZFETCH_BACKWARD) &&
233 (prefetch_ofst > prefetch_tail)) {
234 prefetch_len += prefetch_ofst;
238 /* don't prefetch more than we're supposed to */
239 if (prefetch_len > zs->zst_len)
242 blocks_fetched = dmu_zfetch_fetch(zf->zf_dnode,
243 prefetch_ofst, zs->zst_len);
245 prefetch_tail += zs->zst_stride;
246 /* stop if we've run out of stuff to prefetch */
247 if (blocks_fetched < zs->zst_len)
250 zs->zst_ph_offset = prefetch_tail;
251 zs->zst_last = ddi_get_lbolt();
258 zfetch_ksp = kstat_create("zfs", 0, "zfetchstats", "misc",
259 KSTAT_TYPE_NAMED, sizeof (zfetch_stats) / sizeof (kstat_named_t),
262 if (zfetch_ksp != NULL) {
263 zfetch_ksp->ks_data = &zfetch_stats;
264 kstat_install(zfetch_ksp);
271 if (zfetch_ksp != NULL) {
272 kstat_delete(zfetch_ksp);
278 * This takes a pointer to a zfetch structure and a dnode. It performs the
279 * necessary setup for the zfetch structure, grokking data from the
283 dmu_zfetch_init(zfetch_t *zf, dnode_t *dno)
290 zf->zf_stream_cnt = 0;
291 zf->zf_alloc_fail = 0;
293 list_create(&zf->zf_stream, sizeof (zstream_t),
294 offsetof(zstream_t, zst_node));
296 rw_init(&zf->zf_rwlock, NULL, RW_DEFAULT, NULL);
300 * This function computes the actual size, in blocks, that can be prefetched,
304 dmu_zfetch_fetch(dnode_t *dn, uint64_t blkid, uint64_t nblks)
309 fetchsz = dmu_zfetch_fetchsz(dn, blkid, nblks);
311 for (i = 0; i < fetchsz; i++) {
312 dbuf_prefetch(dn, blkid + i, ZIO_PRIORITY_ASYNC_READ);
319 * this function returns the number of blocks that would be prefetched, based
320 * upon the supplied dnode, blockid, and nblks. This is used so that we can
321 * update streams in place, and then prefetch with their old value after the
322 * fact. This way, we can delay the prefetch, but subsequent accesses to the
323 * stream won't result in the same data being prefetched multiple times.
326 dmu_zfetch_fetchsz(dnode_t *dn, uint64_t blkid, uint64_t nblks)
330 if (blkid > dn->dn_maxblkid) {
334 /* compute fetch size */
335 if (blkid + nblks + 1 > dn->dn_maxblkid) {
336 fetchsz = (dn->dn_maxblkid - blkid) + 1;
337 ASSERT(blkid + fetchsz - 1 <= dn->dn_maxblkid);
347 * given a zfetch and a zstream structure, see if there is an associated zstream
348 * for this block read. If so, it starts a prefetch for the stream it
349 * located and returns true, otherwise it returns false
352 dmu_zfetch_find(zfetch_t *zf, zstream_t *zh, int prefetched)
356 int reset = !prefetched;
363 * XXX: This locking strategy is a bit coarse; however, it's impact has
364 * yet to be tested. If this turns out to be an issue, it can be
365 * modified in a number of different ways.
368 rw_enter(&zf->zf_rwlock, RW_READER);
371 for (zs = list_head(&zf->zf_stream); zs;
372 zs = list_next(&zf->zf_stream, zs)) {
375 * XXX - should this be an assert?
377 if (zs->zst_len == 0) {
379 ZFETCHSTAT_BUMP(zfetchstat_bogus_streams);
384 * We hit this case when we are in a strided prefetch stream:
385 * we will read "len" blocks before "striding".
387 if (zh->zst_offset >= zs->zst_offset &&
388 zh->zst_offset < zs->zst_offset + zs->zst_len) {
390 /* already fetched */
391 ZFETCHSTAT_BUMP(zfetchstat_stride_hits);
395 ZFETCHSTAT_BUMP(zfetchstat_stride_misses);
400 * This is the forward sequential read case: we increment
401 * len by one each time we hit here, so we will enter this
402 * case on every read.
404 if (zh->zst_offset == zs->zst_offset + zs->zst_len) {
406 reset = !prefetched && zs->zst_len > 1;
408 if (mutex_tryenter(&zs->zst_lock) == 0) {
413 if (zh->zst_offset != zs->zst_offset + zs->zst_len) {
414 mutex_exit(&zs->zst_lock);
417 zs->zst_len += zh->zst_len;
418 diff = zs->zst_len - zfetch_block_cap;
420 zs->zst_offset += diff;
421 zs->zst_len = zs->zst_len > diff ?
422 zs->zst_len - diff : 0;
424 zs->zst_direction = ZFETCH_FORWARD;
429 * Same as above, but reading backwards through the file.
431 } else if (zh->zst_offset == zs->zst_offset - zh->zst_len) {
432 /* backwards sequential access */
434 reset = !prefetched && zs->zst_len > 1;
436 if (mutex_tryenter(&zs->zst_lock) == 0) {
441 if (zh->zst_offset != zs->zst_offset - zh->zst_len) {
442 mutex_exit(&zs->zst_lock);
446 zs->zst_offset = zs->zst_offset > zh->zst_len ?
447 zs->zst_offset - zh->zst_len : 0;
448 zs->zst_ph_offset = zs->zst_ph_offset > zh->zst_len ?
449 zs->zst_ph_offset - zh->zst_len : 0;
450 zs->zst_len += zh->zst_len;
452 diff = zs->zst_len - zfetch_block_cap;
454 zs->zst_ph_offset = zs->zst_ph_offset > diff ?
455 zs->zst_ph_offset - diff : 0;
456 zs->zst_len = zs->zst_len > diff ?
457 zs->zst_len - diff : zs->zst_len;
459 zs->zst_direction = ZFETCH_BACKWARD;
463 } else if ((zh->zst_offset - zs->zst_offset - zs->zst_stride <
464 zs->zst_len) && (zs->zst_len != zs->zst_stride)) {
465 /* strided forward access */
467 if (mutex_tryenter(&zs->zst_lock) == 0) {
472 if ((zh->zst_offset - zs->zst_offset - zs->zst_stride >=
473 zs->zst_len) || (zs->zst_len == zs->zst_stride)) {
474 mutex_exit(&zs->zst_lock);
478 zs->zst_offset += zs->zst_stride;
479 zs->zst_direction = ZFETCH_FORWARD;
483 } else if ((zh->zst_offset - zs->zst_offset + zs->zst_stride <
484 zs->zst_len) && (zs->zst_len != zs->zst_stride)) {
485 /* strided reverse access */
487 if (mutex_tryenter(&zs->zst_lock) == 0) {
492 if ((zh->zst_offset - zs->zst_offset + zs->zst_stride >=
493 zs->zst_len) || (zs->zst_len == zs->zst_stride)) {
494 mutex_exit(&zs->zst_lock);
498 zs->zst_offset = zs->zst_offset > zs->zst_stride ?
499 zs->zst_offset - zs->zst_stride : 0;
500 zs->zst_ph_offset = (zs->zst_ph_offset >
501 (2 * zs->zst_stride)) ?
502 (zs->zst_ph_offset - (2 * zs->zst_stride)) : 0;
503 zs->zst_direction = ZFETCH_BACKWARD;
511 zstream_t *remove = zs;
513 ZFETCHSTAT_BUMP(zfetchstat_stream_resets);
515 mutex_exit(&zs->zst_lock);
516 rw_exit(&zf->zf_rwlock);
517 rw_enter(&zf->zf_rwlock, RW_WRITER);
519 * Relocate the stream, in case someone removes
520 * it while we were acquiring the WRITER lock.
522 for (zs = list_head(&zf->zf_stream); zs;
523 zs = list_next(&zf->zf_stream, zs)) {
525 dmu_zfetch_stream_remove(zf, zs);
526 mutex_destroy(&zs->zst_lock);
527 kmem_free(zs, sizeof (zstream_t));
532 ZFETCHSTAT_BUMP(zfetchstat_stream_noresets);
534 dmu_zfetch_dofetch(zf, zs);
535 mutex_exit(&zs->zst_lock);
539 rw_exit(&zf->zf_rwlock);
544 * Clean-up state associated with a zfetch structure. This frees allocated
545 * structure members, empties the zf_stream tree, and generally makes things
546 * nice. This doesn't free the zfetch_t itself, that's left to the caller.
549 dmu_zfetch_rele(zfetch_t *zf)
554 ASSERT(!RW_LOCK_HELD(&zf->zf_rwlock));
556 for (zs = list_head(&zf->zf_stream); zs; zs = zs_next) {
557 zs_next = list_next(&zf->zf_stream, zs);
559 list_remove(&zf->zf_stream, zs);
560 mutex_destroy(&zs->zst_lock);
561 kmem_free(zs, sizeof (zstream_t));
563 list_destroy(&zf->zf_stream);
564 rw_destroy(&zf->zf_rwlock);
570 * Given a zfetch and zstream structure, insert the zstream structure into the
571 * AVL tree contained within the zfetch structure. Peform the appropriate
572 * book-keeping. It is possible that another thread has inserted a stream which
573 * matches one that we are about to insert, so we must be sure to check for this
574 * case. If one is found, return failure, and let the caller cleanup the
578 dmu_zfetch_stream_insert(zfetch_t *zf, zstream_t *zs)
583 ASSERT(RW_WRITE_HELD(&zf->zf_rwlock));
585 for (zs_walk = list_head(&zf->zf_stream); zs_walk; zs_walk = zs_next) {
586 zs_next = list_next(&zf->zf_stream, zs_walk);
588 if (dmu_zfetch_streams_equal(zs_walk, zs)) {
593 list_insert_head(&zf->zf_stream, zs);
600 * Walk the list of zstreams in the given zfetch, find an old one (by time), and
601 * reclaim it for use by the caller.
604 dmu_zfetch_stream_reclaim(zfetch_t *zf)
608 if (! rw_tryenter(&zf->zf_rwlock, RW_WRITER))
611 for (zs = list_head(&zf->zf_stream); zs;
612 zs = list_next(&zf->zf_stream, zs)) {
614 if (((ddi_get_lbolt() - zs->zst_last)/hz) > zfetch_min_sec_reap)
619 dmu_zfetch_stream_remove(zf, zs);
620 mutex_destroy(&zs->zst_lock);
621 bzero(zs, sizeof (zstream_t));
625 rw_exit(&zf->zf_rwlock);
631 * Given a zfetch and zstream structure, remove the zstream structure from its
632 * container in the zfetch structure. Perform the appropriate book-keeping.
635 dmu_zfetch_stream_remove(zfetch_t *zf, zstream_t *zs)
637 ASSERT(RW_WRITE_HELD(&zf->zf_rwlock));
639 list_remove(&zf->zf_stream, zs);
644 dmu_zfetch_streams_equal(zstream_t *zs1, zstream_t *zs2)
646 if (zs1->zst_offset != zs2->zst_offset)
649 if (zs1->zst_len != zs2->zst_len)
652 if (zs1->zst_stride != zs2->zst_stride)
655 if (zs1->zst_ph_offset != zs2->zst_ph_offset)
658 if (zs1->zst_cap != zs2->zst_cap)
661 if (zs1->zst_direction != zs2->zst_direction)
668 * This is the prefetch entry point. It calls all of the other dmu_zfetch
669 * routines to create, delete, find, or operate upon prefetch streams.
672 dmu_zfetch(zfetch_t *zf, uint64_t offset, uint64_t size, int prefetched)
675 zstream_t *newstream;
678 unsigned int blkshft;
681 if (zfs_prefetch_disable)
684 /* files that aren't ln2 blocksz are only one block -- nothing to do */
685 if (!zf->zf_dnode->dn_datablkshift)
688 /* convert offset and size, into blockid and nblocks */
689 blkshft = zf->zf_dnode->dn_datablkshift;
690 blksz = (1 << blkshft);
692 bzero(&zst, sizeof (zstream_t));
693 zst.zst_offset = offset >> blkshft;
694 zst.zst_len = (P2ROUNDUP(offset + size, blksz) -
695 P2ALIGN(offset, blksz)) >> blkshft;
697 fetched = dmu_zfetch_find(zf, &zst, prefetched);
699 ZFETCHSTAT_BUMP(zfetchstat_hits);
701 ZFETCHSTAT_BUMP(zfetchstat_misses);
702 fetched = dmu_zfetch_colinear(zf, &zst);
704 ZFETCHSTAT_BUMP(zfetchstat_colinear_hits);
706 ZFETCHSTAT_BUMP(zfetchstat_colinear_misses);
711 newstream = dmu_zfetch_stream_reclaim(zf);
714 * we still couldn't find a stream, drop the lock, and allocate
715 * one if possible. Otherwise, give up and go home.
718 ZFETCHSTAT_BUMP(zfetchstat_reclaim_successes);
721 uint32_t max_streams;
722 uint32_t cur_streams;
724 ZFETCHSTAT_BUMP(zfetchstat_reclaim_failures);
725 cur_streams = zf->zf_stream_cnt;
726 maxblocks = zf->zf_dnode->dn_maxblkid;
728 max_streams = MIN(zfetch_max_streams,
729 (maxblocks / zfetch_block_cap));
730 if (max_streams == 0) {
734 if (cur_streams >= max_streams) {
737 newstream = kmem_zalloc(sizeof (zstream_t), KM_SLEEP);
740 newstream->zst_offset = zst.zst_offset;
741 newstream->zst_len = zst.zst_len;
742 newstream->zst_stride = zst.zst_len;
743 newstream->zst_ph_offset = zst.zst_len + zst.zst_offset;
744 newstream->zst_cap = zst.zst_len;
745 newstream->zst_direction = ZFETCH_FORWARD;
746 newstream->zst_last = ddi_get_lbolt();
748 mutex_init(&newstream->zst_lock, NULL, MUTEX_DEFAULT, NULL);
750 rw_enter(&zf->zf_rwlock, RW_WRITER);
751 inserted = dmu_zfetch_stream_insert(zf, newstream);
752 rw_exit(&zf->zf_rwlock);
755 mutex_destroy(&newstream->zst_lock);
756 kmem_free(newstream, sizeof (zstream_t));