4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #include <sys/zfs_context.h>
27 #include <sys/dnode.h>
28 #include <sys/dmu_objset.h>
29 #include <sys/dmu_zfetch.h>
32 #include <sys/kstat.h>
35 * I'm against tune-ables, but these should probably exist as tweakable globals
36 * until we can get this working the way we want it to.
39 int zfs_prefetch_disable = 0;
41 /* max # of streams per zfetch */
42 uint32_t zfetch_max_streams = 8;
43 /* min time before stream reclaim */
44 uint32_t zfetch_min_sec_reap = 2;
45 /* max number of blocks to fetch at a time */
46 uint32_t zfetch_block_cap = 256;
47 /* number of bytes in a array_read at which we stop prefetching (1Mb) */
48 uint64_t zfetch_array_rd_sz = 1024 * 1024;
50 SYSCTL_DECL(_vfs_zfs);
51 SYSCTL_INT(_vfs_zfs, OID_AUTO, prefetch_disable, CTLFLAG_RW,
52 &zfs_prefetch_disable, 0, "Disable prefetch");
53 SYSCTL_NODE(_vfs_zfs, OID_AUTO, zfetch, CTLFLAG_RW, 0, "ZFS ZFETCH");
54 TUNABLE_INT("vfs.zfs.zfetch.max_streams", &zfetch_max_streams);
55 SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, max_streams, CTLFLAG_RW,
56 &zfetch_max_streams, 0, "Max # of streams per zfetch");
57 TUNABLE_INT("vfs.zfs.zfetch.min_sec_reap", &zfetch_min_sec_reap);
58 SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, min_sec_reap, CTLFLAG_RDTUN,
59 &zfetch_min_sec_reap, 0, "Min time before stream reclaim");
60 TUNABLE_INT("vfs.zfs.zfetch.block_cap", &zfetch_block_cap);
61 SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, block_cap, CTLFLAG_RDTUN,
62 &zfetch_block_cap, 0, "Max number of blocks to fetch at a time");
63 TUNABLE_QUAD("vfs.zfs.zfetch.array_rd_sz", &zfetch_array_rd_sz);
64 SYSCTL_UQUAD(_vfs_zfs_zfetch, OID_AUTO, array_rd_sz, CTLFLAG_RDTUN,
65 &zfetch_array_rd_sz, 0,
66 "Number of bytes in a array_read at which we stop prefetching");
68 /* forward decls for static routines */
69 static int dmu_zfetch_colinear(zfetch_t *, zstream_t *);
70 static void dmu_zfetch_dofetch(zfetch_t *, zstream_t *);
71 static uint64_t dmu_zfetch_fetch(dnode_t *, uint64_t, uint64_t);
72 static uint64_t dmu_zfetch_fetchsz(dnode_t *, uint64_t, uint64_t);
73 static int dmu_zfetch_find(zfetch_t *, zstream_t *, int);
74 static int dmu_zfetch_stream_insert(zfetch_t *, zstream_t *);
75 static zstream_t *dmu_zfetch_stream_reclaim(zfetch_t *);
76 static void dmu_zfetch_stream_remove(zfetch_t *, zstream_t *);
77 static int dmu_zfetch_streams_equal(zstream_t *, zstream_t *);
79 typedef struct zfetch_stats {
80 kstat_named_t zfetchstat_hits;
81 kstat_named_t zfetchstat_misses;
82 kstat_named_t zfetchstat_colinear_hits;
83 kstat_named_t zfetchstat_colinear_misses;
84 kstat_named_t zfetchstat_stride_hits;
85 kstat_named_t zfetchstat_stride_misses;
86 kstat_named_t zfetchstat_reclaim_successes;
87 kstat_named_t zfetchstat_reclaim_failures;
88 kstat_named_t zfetchstat_stream_resets;
89 kstat_named_t zfetchstat_stream_noresets;
90 kstat_named_t zfetchstat_bogus_streams;
93 static zfetch_stats_t zfetch_stats = {
94 { "hits", KSTAT_DATA_UINT64 },
95 { "misses", KSTAT_DATA_UINT64 },
96 { "colinear_hits", KSTAT_DATA_UINT64 },
97 { "colinear_misses", KSTAT_DATA_UINT64 },
98 { "stride_hits", KSTAT_DATA_UINT64 },
99 { "stride_misses", KSTAT_DATA_UINT64 },
100 { "reclaim_successes", KSTAT_DATA_UINT64 },
101 { "reclaim_failures", KSTAT_DATA_UINT64 },
102 { "streams_resets", KSTAT_DATA_UINT64 },
103 { "streams_noresets", KSTAT_DATA_UINT64 },
104 { "bogus_streams", KSTAT_DATA_UINT64 },
107 #define ZFETCHSTAT_INCR(stat, val) \
108 atomic_add_64(&zfetch_stats.stat.value.ui64, (val));
110 #define ZFETCHSTAT_BUMP(stat) ZFETCHSTAT_INCR(stat, 1);
115 * Given a zfetch structure and a zstream structure, determine whether the
116 * blocks to be read are part of a co-linear pair of existing prefetch
117 * streams. If a set is found, coalesce the streams, removing one, and
118 * configure the prefetch so it looks for a strided access pattern.
120 * In other words: if we find two sequential access streams that are
121 * the same length and distance N appart, and this read is N from the
122 * last stream, then we are probably in a strided access pattern. So
123 * combine the two sequential streams into a single strided stream.
125 * If no co-linear streams are found, return NULL.
128 dmu_zfetch_colinear(zfetch_t *zf, zstream_t *zh)
133 if (! rw_tryenter(&zf->zf_rwlock, RW_WRITER))
137 rw_exit(&zf->zf_rwlock);
141 for (z_walk = list_head(&zf->zf_stream); z_walk;
142 z_walk = list_next(&zf->zf_stream, z_walk)) {
143 for (z_comp = list_next(&zf->zf_stream, z_walk); z_comp;
144 z_comp = list_next(&zf->zf_stream, z_comp)) {
147 if (z_walk->zst_len != z_walk->zst_stride ||
148 z_comp->zst_len != z_comp->zst_stride) {
152 diff = z_comp->zst_offset - z_walk->zst_offset;
153 if (z_comp->zst_offset + diff == zh->zst_offset) {
154 z_walk->zst_offset = zh->zst_offset;
155 z_walk->zst_direction = diff < 0 ? -1 : 1;
157 diff * z_walk->zst_direction;
158 z_walk->zst_ph_offset =
159 zh->zst_offset + z_walk->zst_stride;
160 dmu_zfetch_stream_remove(zf, z_comp);
161 mutex_destroy(&z_comp->zst_lock);
162 kmem_free(z_comp, sizeof (zstream_t));
164 dmu_zfetch_dofetch(zf, z_walk);
166 rw_exit(&zf->zf_rwlock);
170 diff = z_walk->zst_offset - z_comp->zst_offset;
171 if (z_walk->zst_offset + diff == zh->zst_offset) {
172 z_walk->zst_offset = zh->zst_offset;
173 z_walk->zst_direction = diff < 0 ? -1 : 1;
175 diff * z_walk->zst_direction;
176 z_walk->zst_ph_offset =
177 zh->zst_offset + z_walk->zst_stride;
178 dmu_zfetch_stream_remove(zf, z_comp);
179 mutex_destroy(&z_comp->zst_lock);
180 kmem_free(z_comp, sizeof (zstream_t));
182 dmu_zfetch_dofetch(zf, z_walk);
184 rw_exit(&zf->zf_rwlock);
190 rw_exit(&zf->zf_rwlock);
195 * Given a zstream_t, determine the bounds of the prefetch. Then call the
196 * routine that actually prefetches the individual blocks.
199 dmu_zfetch_dofetch(zfetch_t *zf, zstream_t *zs)
201 uint64_t prefetch_tail;
202 uint64_t prefetch_limit;
203 uint64_t prefetch_ofst;
204 uint64_t prefetch_len;
205 uint64_t blocks_fetched;
207 zs->zst_stride = MAX((int64_t)zs->zst_stride, zs->zst_len);
208 zs->zst_cap = MIN(zfetch_block_cap, 2 * zs->zst_cap);
210 prefetch_tail = MAX((int64_t)zs->zst_ph_offset,
211 (int64_t)(zs->zst_offset + zs->zst_stride));
213 * XXX: use a faster division method?
215 prefetch_limit = zs->zst_offset + zs->zst_len +
216 (zs->zst_cap * zs->zst_stride) / zs->zst_len;
218 while (prefetch_tail < prefetch_limit) {
219 prefetch_ofst = zs->zst_offset + zs->zst_direction *
220 (prefetch_tail - zs->zst_offset);
222 prefetch_len = zs->zst_len;
225 * Don't prefetch beyond the end of the file, if working
228 if ((zs->zst_direction == ZFETCH_BACKWARD) &&
229 (prefetch_ofst > prefetch_tail)) {
230 prefetch_len += prefetch_ofst;
234 /* don't prefetch more than we're supposed to */
235 if (prefetch_len > zs->zst_len)
238 blocks_fetched = dmu_zfetch_fetch(zf->zf_dnode,
239 prefetch_ofst, zs->zst_len);
241 prefetch_tail += zs->zst_stride;
242 /* stop if we've run out of stuff to prefetch */
243 if (blocks_fetched < zs->zst_len)
246 zs->zst_ph_offset = prefetch_tail;
247 zs->zst_last = ddi_get_lbolt();
254 zfetch_ksp = kstat_create("zfs", 0, "zfetchstats", "misc",
255 KSTAT_TYPE_NAMED, sizeof (zfetch_stats) / sizeof (kstat_named_t),
258 if (zfetch_ksp != NULL) {
259 zfetch_ksp->ks_data = &zfetch_stats;
260 kstat_install(zfetch_ksp);
267 if (zfetch_ksp != NULL) {
268 kstat_delete(zfetch_ksp);
274 * This takes a pointer to a zfetch structure and a dnode. It performs the
275 * necessary setup for the zfetch structure, grokking data from the
279 dmu_zfetch_init(zfetch_t *zf, dnode_t *dno)
286 zf->zf_stream_cnt = 0;
287 zf->zf_alloc_fail = 0;
289 list_create(&zf->zf_stream, sizeof (zstream_t),
290 offsetof(zstream_t, zst_node));
292 rw_init(&zf->zf_rwlock, NULL, RW_DEFAULT, NULL);
296 * This function computes the actual size, in blocks, that can be prefetched,
300 dmu_zfetch_fetch(dnode_t *dn, uint64_t blkid, uint64_t nblks)
305 fetchsz = dmu_zfetch_fetchsz(dn, blkid, nblks);
307 for (i = 0; i < fetchsz; i++) {
308 dbuf_prefetch(dn, blkid + i);
315 * this function returns the number of blocks that would be prefetched, based
316 * upon the supplied dnode, blockid, and nblks. This is used so that we can
317 * update streams in place, and then prefetch with their old value after the
318 * fact. This way, we can delay the prefetch, but subsequent accesses to the
319 * stream won't result in the same data being prefetched multiple times.
322 dmu_zfetch_fetchsz(dnode_t *dn, uint64_t blkid, uint64_t nblks)
326 if (blkid > dn->dn_maxblkid) {
330 /* compute fetch size */
331 if (blkid + nblks + 1 > dn->dn_maxblkid) {
332 fetchsz = (dn->dn_maxblkid - blkid) + 1;
333 ASSERT(blkid + fetchsz - 1 <= dn->dn_maxblkid);
343 * given a zfetch and a zstream structure, see if there is an associated zstream
344 * for this block read. If so, it starts a prefetch for the stream it
345 * located and returns true, otherwise it returns false
348 dmu_zfetch_find(zfetch_t *zf, zstream_t *zh, int prefetched)
352 int reset = !prefetched;
359 * XXX: This locking strategy is a bit coarse; however, it's impact has
360 * yet to be tested. If this turns out to be an issue, it can be
361 * modified in a number of different ways.
364 rw_enter(&zf->zf_rwlock, RW_READER);
367 for (zs = list_head(&zf->zf_stream); zs;
368 zs = list_next(&zf->zf_stream, zs)) {
371 * XXX - should this be an assert?
373 if (zs->zst_len == 0) {
375 ZFETCHSTAT_BUMP(zfetchstat_bogus_streams);
380 * We hit this case when we are in a strided prefetch stream:
381 * we will read "len" blocks before "striding".
383 if (zh->zst_offset >= zs->zst_offset &&
384 zh->zst_offset < zs->zst_offset + zs->zst_len) {
386 /* already fetched */
387 ZFETCHSTAT_BUMP(zfetchstat_stride_hits);
391 ZFETCHSTAT_BUMP(zfetchstat_stride_misses);
396 * This is the forward sequential read case: we increment
397 * len by one each time we hit here, so we will enter this
398 * case on every read.
400 if (zh->zst_offset == zs->zst_offset + zs->zst_len) {
402 reset = !prefetched && zs->zst_len > 1;
404 if (mutex_tryenter(&zs->zst_lock) == 0) {
409 if (zh->zst_offset != zs->zst_offset + zs->zst_len) {
410 mutex_exit(&zs->zst_lock);
413 zs->zst_len += zh->zst_len;
414 diff = zs->zst_len - zfetch_block_cap;
416 zs->zst_offset += diff;
417 zs->zst_len = zs->zst_len > diff ?
418 zs->zst_len - diff : 0;
420 zs->zst_direction = ZFETCH_FORWARD;
425 * Same as above, but reading backwards through the file.
427 } else if (zh->zst_offset == zs->zst_offset - zh->zst_len) {
428 /* backwards sequential access */
430 reset = !prefetched && zs->zst_len > 1;
432 if (mutex_tryenter(&zs->zst_lock) == 0) {
437 if (zh->zst_offset != zs->zst_offset - zh->zst_len) {
438 mutex_exit(&zs->zst_lock);
442 zs->zst_offset = zs->zst_offset > zh->zst_len ?
443 zs->zst_offset - zh->zst_len : 0;
444 zs->zst_ph_offset = zs->zst_ph_offset > zh->zst_len ?
445 zs->zst_ph_offset - zh->zst_len : 0;
446 zs->zst_len += zh->zst_len;
448 diff = zs->zst_len - zfetch_block_cap;
450 zs->zst_ph_offset = zs->zst_ph_offset > diff ?
451 zs->zst_ph_offset - diff : 0;
452 zs->zst_len = zs->zst_len > diff ?
453 zs->zst_len - diff : zs->zst_len;
455 zs->zst_direction = ZFETCH_BACKWARD;
459 } else if ((zh->zst_offset - zs->zst_offset - zs->zst_stride <
460 zs->zst_len) && (zs->zst_len != zs->zst_stride)) {
461 /* strided forward access */
463 if (mutex_tryenter(&zs->zst_lock) == 0) {
468 if ((zh->zst_offset - zs->zst_offset - zs->zst_stride >=
469 zs->zst_len) || (zs->zst_len == zs->zst_stride)) {
470 mutex_exit(&zs->zst_lock);
474 zs->zst_offset += zs->zst_stride;
475 zs->zst_direction = ZFETCH_FORWARD;
479 } else if ((zh->zst_offset - zs->zst_offset + zs->zst_stride <
480 zs->zst_len) && (zs->zst_len != zs->zst_stride)) {
481 /* strided reverse access */
483 if (mutex_tryenter(&zs->zst_lock) == 0) {
488 if ((zh->zst_offset - zs->zst_offset + zs->zst_stride >=
489 zs->zst_len) || (zs->zst_len == zs->zst_stride)) {
490 mutex_exit(&zs->zst_lock);
494 zs->zst_offset = zs->zst_offset > zs->zst_stride ?
495 zs->zst_offset - zs->zst_stride : 0;
496 zs->zst_ph_offset = (zs->zst_ph_offset >
497 (2 * zs->zst_stride)) ?
498 (zs->zst_ph_offset - (2 * zs->zst_stride)) : 0;
499 zs->zst_direction = ZFETCH_BACKWARD;
507 zstream_t *remove = zs;
509 ZFETCHSTAT_BUMP(zfetchstat_stream_resets);
511 mutex_exit(&zs->zst_lock);
512 rw_exit(&zf->zf_rwlock);
513 rw_enter(&zf->zf_rwlock, RW_WRITER);
515 * Relocate the stream, in case someone removes
516 * it while we were acquiring the WRITER lock.
518 for (zs = list_head(&zf->zf_stream); zs;
519 zs = list_next(&zf->zf_stream, zs)) {
521 dmu_zfetch_stream_remove(zf, zs);
522 mutex_destroy(&zs->zst_lock);
523 kmem_free(zs, sizeof (zstream_t));
528 ZFETCHSTAT_BUMP(zfetchstat_stream_noresets);
530 dmu_zfetch_dofetch(zf, zs);
531 mutex_exit(&zs->zst_lock);
535 rw_exit(&zf->zf_rwlock);
540 * Clean-up state associated with a zfetch structure. This frees allocated
541 * structure members, empties the zf_stream tree, and generally makes things
542 * nice. This doesn't free the zfetch_t itself, that's left to the caller.
545 dmu_zfetch_rele(zfetch_t *zf)
550 ASSERT(!RW_LOCK_HELD(&zf->zf_rwlock));
552 for (zs = list_head(&zf->zf_stream); zs; zs = zs_next) {
553 zs_next = list_next(&zf->zf_stream, zs);
555 list_remove(&zf->zf_stream, zs);
556 mutex_destroy(&zs->zst_lock);
557 kmem_free(zs, sizeof (zstream_t));
559 list_destroy(&zf->zf_stream);
560 rw_destroy(&zf->zf_rwlock);
566 * Given a zfetch and zstream structure, insert the zstream structure into the
567 * AVL tree contained within the zfetch structure. Peform the appropriate
568 * book-keeping. It is possible that another thread has inserted a stream which
569 * matches one that we are about to insert, so we must be sure to check for this
570 * case. If one is found, return failure, and let the caller cleanup the
574 dmu_zfetch_stream_insert(zfetch_t *zf, zstream_t *zs)
579 ASSERT(RW_WRITE_HELD(&zf->zf_rwlock));
581 for (zs_walk = list_head(&zf->zf_stream); zs_walk; zs_walk = zs_next) {
582 zs_next = list_next(&zf->zf_stream, zs_walk);
584 if (dmu_zfetch_streams_equal(zs_walk, zs)) {
589 list_insert_head(&zf->zf_stream, zs);
596 * Walk the list of zstreams in the given zfetch, find an old one (by time), and
597 * reclaim it for use by the caller.
600 dmu_zfetch_stream_reclaim(zfetch_t *zf)
604 if (! rw_tryenter(&zf->zf_rwlock, RW_WRITER))
607 for (zs = list_head(&zf->zf_stream); zs;
608 zs = list_next(&zf->zf_stream, zs)) {
610 if (((ddi_get_lbolt() - zs->zst_last)/hz) > zfetch_min_sec_reap)
615 dmu_zfetch_stream_remove(zf, zs);
616 mutex_destroy(&zs->zst_lock);
617 bzero(zs, sizeof (zstream_t));
621 rw_exit(&zf->zf_rwlock);
627 * Given a zfetch and zstream structure, remove the zstream structure from its
628 * container in the zfetch structure. Perform the appropriate book-keeping.
631 dmu_zfetch_stream_remove(zfetch_t *zf, zstream_t *zs)
633 ASSERT(RW_WRITE_HELD(&zf->zf_rwlock));
635 list_remove(&zf->zf_stream, zs);
640 dmu_zfetch_streams_equal(zstream_t *zs1, zstream_t *zs2)
642 if (zs1->zst_offset != zs2->zst_offset)
645 if (zs1->zst_len != zs2->zst_len)
648 if (zs1->zst_stride != zs2->zst_stride)
651 if (zs1->zst_ph_offset != zs2->zst_ph_offset)
654 if (zs1->zst_cap != zs2->zst_cap)
657 if (zs1->zst_direction != zs2->zst_direction)
664 * This is the prefetch entry point. It calls all of the other dmu_zfetch
665 * routines to create, delete, find, or operate upon prefetch streams.
668 dmu_zfetch(zfetch_t *zf, uint64_t offset, uint64_t size, int prefetched)
671 zstream_t *newstream;
674 unsigned int blkshft;
677 if (zfs_prefetch_disable)
680 /* files that aren't ln2 blocksz are only one block -- nothing to do */
681 if (!zf->zf_dnode->dn_datablkshift)
684 /* convert offset and size, into blockid and nblocks */
685 blkshft = zf->zf_dnode->dn_datablkshift;
686 blksz = (1 << blkshft);
688 bzero(&zst, sizeof (zstream_t));
689 zst.zst_offset = offset >> blkshft;
690 zst.zst_len = (P2ROUNDUP(offset + size, blksz) -
691 P2ALIGN(offset, blksz)) >> blkshft;
693 fetched = dmu_zfetch_find(zf, &zst, prefetched);
695 ZFETCHSTAT_BUMP(zfetchstat_hits);
697 ZFETCHSTAT_BUMP(zfetchstat_misses);
698 if (fetched = dmu_zfetch_colinear(zf, &zst)) {
699 ZFETCHSTAT_BUMP(zfetchstat_colinear_hits);
701 ZFETCHSTAT_BUMP(zfetchstat_colinear_misses);
706 newstream = dmu_zfetch_stream_reclaim(zf);
709 * we still couldn't find a stream, drop the lock, and allocate
710 * one if possible. Otherwise, give up and go home.
713 ZFETCHSTAT_BUMP(zfetchstat_reclaim_successes);
716 uint32_t max_streams;
717 uint32_t cur_streams;
719 ZFETCHSTAT_BUMP(zfetchstat_reclaim_failures);
720 cur_streams = zf->zf_stream_cnt;
721 maxblocks = zf->zf_dnode->dn_maxblkid;
723 max_streams = MIN(zfetch_max_streams,
724 (maxblocks / zfetch_block_cap));
725 if (max_streams == 0) {
729 if (cur_streams >= max_streams) {
732 newstream = kmem_zalloc(sizeof (zstream_t), KM_SLEEP);
735 newstream->zst_offset = zst.zst_offset;
736 newstream->zst_len = zst.zst_len;
737 newstream->zst_stride = zst.zst_len;
738 newstream->zst_ph_offset = zst.zst_len + zst.zst_offset;
739 newstream->zst_cap = zst.zst_len;
740 newstream->zst_direction = ZFETCH_FORWARD;
741 newstream->zst_last = ddi_get_lbolt();
743 mutex_init(&newstream->zst_lock, NULL, MUTEX_DEFAULT, NULL);
745 rw_enter(&zf->zf_rwlock, RW_WRITER);
746 inserted = dmu_zfetch_stream_insert(zf, newstream);
747 rw_exit(&zf->zf_rwlock);
750 mutex_destroy(&newstream->zst_lock);
751 kmem_free(newstream, sizeof (zstream_t));