]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/vfs_cluster.c
zfs: merge openzfs/zfs@eb62221ff (zfs-2.1-release) into stable/13
[FreeBSD/FreeBSD.git] / sys / kern / vfs_cluster.c
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1993
5  *      The Regents of the University of California.  All rights reserved.
6  * Modifications/enhancements:
7  *      Copyright (c) 1995 John S. Dyson.  All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *      @(#)vfs_cluster.c       8.7 (Berkeley) 2/13/94
34  */
35
36 #include <sys/cdefs.h>
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/proc.h>
41 #include <sys/bio.h>
42 #include <sys/buf.h>
43 #include <sys/vnode.h>
44 #include <sys/malloc.h>
45 #include <sys/mount.h>
46 #include <sys/racct.h>
47 #include <sys/resourcevar.h>
48 #include <sys/rwlock.h>
49 #include <sys/vmmeter.h>
50 #include <vm/vm.h>
51 #include <vm/vm_object.h>
52 #include <vm/vm_page.h>
53 #include <sys/sysctl.h>
54
55 static MALLOC_DEFINE(M_SEGMENT, "cl_savebuf", "cluster_save buffer");
56 static uma_zone_t cluster_pbuf_zone;
57
58 static void cluster_init(void *);
59 static struct cluster_save *cluster_collectbufs(struct vnode *vp,
60             struct buf *last_bp, int gbflags);
61 static struct buf *cluster_rbuild(struct vnode *vp, u_quad_t filesize,
62             daddr_t lbn, daddr_t blkno, long size, int run, int gbflags,
63             struct buf *fbp);
64 static void cluster_callback(struct buf *);
65
66 static int write_behind = 1;
67 SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0,
68     "Cluster write-behind; 0: disable, 1: enable, 2: backed off");
69
70 static int read_max = 64;
71 SYSCTL_INT(_vfs, OID_AUTO, read_max, CTLFLAG_RW, &read_max, 0,
72     "Cluster read-ahead max block count");
73
74 static int read_min = 1;
75 SYSCTL_INT(_vfs, OID_AUTO, read_min, CTLFLAG_RW, &read_min, 0,
76     "Cluster read min block count");
77
78 SYSINIT(cluster, SI_SUB_CPU, SI_ORDER_ANY, cluster_init, NULL);
79
80 static void
81 cluster_init(void *dummy)
82 {
83
84         cluster_pbuf_zone = pbuf_zsecond_create("clpbuf", nswbuf / 2);
85 }
86
87 /*
88  * Read data to a buf, including read-ahead if we find this to be beneficial.
89  * cluster_read replaces bread.
90  */
91 int
92 cluster_read(struct vnode *vp, u_quad_t filesize, daddr_t lblkno, long size,
93     struct ucred *cred, long totread, int seqcount, int gbflags,
94     struct buf **bpp)
95 {
96         struct buf *bp, *rbp, *reqbp;
97         struct bufobj *bo;
98         struct thread *td;
99         daddr_t blkno, origblkno;
100         int maxra, racluster;
101         int error, ncontig;
102         int i;
103
104         error = 0;
105         td = curthread;
106         bo = &vp->v_bufobj;
107         if (!unmapped_buf_allowed)
108                 gbflags &= ~GB_UNMAPPED;
109
110         /*
111          * Try to limit the amount of read-ahead by a few
112          * ad-hoc parameters.  This needs work!!!
113          */
114         racluster = vp->v_mount->mnt_iosize_max / size;
115         maxra = seqcount;
116         maxra = min(read_max, maxra);
117         maxra = min(nbuf/8, maxra);
118         if (((u_quad_t)(lblkno + maxra + 1) * size) > filesize)
119                 maxra = (filesize / size) - lblkno;
120
121         /*
122          * get the requested block
123          */
124         error = getblkx(vp, lblkno, lblkno, size, 0, 0, gbflags, &bp);
125         if (error != 0) {
126                 *bpp = NULL;
127                 return (error);
128         }
129         gbflags &= ~GB_NOSPARSE;
130         origblkno = lblkno;
131         *bpp = reqbp = bp;
132
133         /*
134          * if it is in the cache, then check to see if the reads have been
135          * sequential.  If they have, then try some read-ahead, otherwise
136          * back-off on prospective read-aheads.
137          */
138         if (bp->b_flags & B_CACHE) {
139                 if (!seqcount) {
140                         return 0;
141                 } else if ((bp->b_flags & B_RAM) == 0) {
142                         return 0;
143                 } else {
144                         bp->b_flags &= ~B_RAM;
145                         BO_RLOCK(bo);
146                         for (i = 1; i < maxra; i++) {
147                                 /*
148                                  * Stop if the buffer does not exist or it
149                                  * is invalid (about to go away?)
150                                  */
151                                 rbp = gbincore(&vp->v_bufobj, lblkno+i);
152                                 if (rbp == NULL || (rbp->b_flags & B_INVAL))
153                                         break;
154
155                                 /*
156                                  * Set another read-ahead mark so we know 
157                                  * to check again. (If we can lock the
158                                  * buffer without waiting)
159                                  */
160                                 if ((((i % racluster) == (racluster - 1)) ||
161                                     (i == (maxra - 1))) 
162                                     && (0 == BUF_LOCK(rbp, 
163                                         LK_EXCLUSIVE | LK_NOWAIT, NULL))) {
164                                         rbp->b_flags |= B_RAM;
165                                         BUF_UNLOCK(rbp);
166                                 }
167                         }
168                         BO_RUNLOCK(bo);
169                         if (i >= maxra) {
170                                 return 0;
171                         }
172                         lblkno += i;
173                 }
174                 reqbp = bp = NULL;
175         /*
176          * If it isn't in the cache, then get a chunk from
177          * disk if sequential, otherwise just get the block.
178          */
179         } else {
180                 off_t firstread = bp->b_offset;
181                 int nblks;
182                 long minread;
183
184                 KASSERT(bp->b_offset != NOOFFSET,
185                     ("cluster_read: no buffer offset"));
186
187                 ncontig = 0;
188
189                 /*
190                  * Adjust totread if needed
191                  */
192                 minread = read_min * size;
193                 if (minread > totread)
194                         totread = minread;
195
196                 /*
197                  * Compute the total number of blocks that we should read
198                  * synchronously.
199                  */
200                 if (firstread + totread > filesize)
201                         totread = filesize - firstread;
202                 nblks = howmany(totread, size);
203                 if (nblks > racluster)
204                         nblks = racluster;
205
206                 /*
207                  * Now compute the number of contiguous blocks.
208                  */
209                 if (nblks > 1) {
210                         error = VOP_BMAP(vp, lblkno, NULL,
211                                 &blkno, &ncontig, NULL);
212                         /*
213                          * If this failed to map just do the original block.
214                          */
215                         if (error || blkno == -1)
216                                 ncontig = 0;
217                 }
218
219                 /*
220                  * If we have contiguous data available do a cluster
221                  * otherwise just read the requested block.
222                  */
223                 if (ncontig) {
224                         /* Account for our first block. */
225                         ncontig = min(ncontig + 1, nblks);
226                         if (ncontig < nblks)
227                                 nblks = ncontig;
228                         bp = cluster_rbuild(vp, filesize, lblkno,
229                             blkno, size, nblks, gbflags, bp);
230                         lblkno += (bp->b_bufsize / size);
231                 } else {
232                         bp->b_flags |= B_RAM;
233                         bp->b_iocmd = BIO_READ;
234                         lblkno += 1;
235                 }
236         }
237
238         /*
239          * handle the synchronous read so that it is available ASAP.
240          */
241         if (bp) {
242                 if ((bp->b_flags & B_CLUSTER) == 0) {
243                         vfs_busy_pages(bp, 0);
244                 }
245                 bp->b_flags &= ~B_INVAL;
246                 bp->b_ioflags &= ~BIO_ERROR;
247                 if ((bp->b_flags & B_ASYNC) || bp->b_iodone != NULL)
248                         BUF_KERNPROC(bp);
249                 bp->b_iooffset = dbtob(bp->b_blkno);
250                 bstrategy(bp);
251 #ifdef RACCT
252                 if (racct_enable) {
253                         PROC_LOCK(td->td_proc);
254                         racct_add_buf(td->td_proc, bp, 0);
255                         PROC_UNLOCK(td->td_proc);
256                 }
257 #endif /* RACCT */
258                 td->td_ru.ru_inblock++;
259         }
260
261         /*
262          * If we have been doing sequential I/O, then do some read-ahead.
263          */
264         while (lblkno < (origblkno + maxra)) {
265                 error = VOP_BMAP(vp, lblkno, NULL, &blkno, &ncontig, NULL);
266                 if (error)
267                         break;
268
269                 if (blkno == -1)
270                         break;
271
272                 /*
273                  * We could throttle ncontig here by maxra but we might as
274                  * well read the data if it is contiguous.  We're throttled
275                  * by racluster anyway.
276                  */
277                 if (ncontig) {
278                         ncontig = min(ncontig + 1, racluster);
279                         rbp = cluster_rbuild(vp, filesize, lblkno, blkno,
280                             size, ncontig, gbflags, NULL);
281                         lblkno += (rbp->b_bufsize / size);
282                         if (rbp->b_flags & B_DELWRI) {
283                                 bqrelse(rbp);
284                                 continue;
285                         }
286                 } else {
287                         rbp = getblk(vp, lblkno, size, 0, 0, gbflags);
288                         lblkno += 1;
289                         if (rbp->b_flags & B_DELWRI) {
290                                 bqrelse(rbp);
291                                 continue;
292                         }
293                         rbp->b_flags |= B_ASYNC | B_RAM;
294                         rbp->b_iocmd = BIO_READ;
295                         rbp->b_blkno = blkno;
296                 }
297                 if (rbp->b_flags & B_CACHE) {
298                         rbp->b_flags &= ~B_ASYNC;
299                         bqrelse(rbp);
300                         continue;
301                 }
302                 if ((rbp->b_flags & B_CLUSTER) == 0) {
303                         vfs_busy_pages(rbp, 0);
304                 }
305                 rbp->b_flags &= ~B_INVAL;
306                 rbp->b_ioflags &= ~BIO_ERROR;
307                 if ((rbp->b_flags & B_ASYNC) || rbp->b_iodone != NULL)
308                         BUF_KERNPROC(rbp);
309                 rbp->b_iooffset = dbtob(rbp->b_blkno);
310                 bstrategy(rbp);
311 #ifdef RACCT
312                 if (racct_enable) {
313                         PROC_LOCK(td->td_proc);
314                         racct_add_buf(td->td_proc, rbp, 0);
315                         PROC_UNLOCK(td->td_proc);
316                 }
317 #endif /* RACCT */
318                 td->td_ru.ru_inblock++;
319         }
320
321         if (reqbp) {
322                 /*
323                  * Like bread, always brelse() the buffer when
324                  * returning an error.
325                  */
326                 error = bufwait(reqbp);
327                 if (error != 0) {
328                         brelse(reqbp);
329                         *bpp = NULL;
330                 }
331         }
332         return (error);
333 }
334
335 /*
336  * If blocks are contiguous on disk, use this to provide clustered
337  * read ahead.  We will read as many blocks as possible sequentially
338  * and then parcel them up into logical blocks in the buffer hash table.
339  */
340 static struct buf *
341 cluster_rbuild(struct vnode *vp, u_quad_t filesize, daddr_t lbn,
342     daddr_t blkno, long size, int run, int gbflags, struct buf *fbp)
343 {
344         struct buf *bp, *tbp;
345         daddr_t bn;
346         off_t off;
347         long tinc, tsize;
348         int i, inc, j, k, toff;
349
350         KASSERT(size == vp->v_mount->mnt_stat.f_iosize,
351             ("cluster_rbuild: size %ld != f_iosize %jd\n",
352             size, (intmax_t)vp->v_mount->mnt_stat.f_iosize));
353
354         /*
355          * avoid a division
356          */
357         while ((u_quad_t) size * (lbn + run) > filesize) {
358                 --run;
359         }
360
361         if (fbp) {
362                 tbp = fbp;
363                 tbp->b_iocmd = BIO_READ; 
364         } else {
365                 tbp = getblk(vp, lbn, size, 0, 0, gbflags);
366                 if (tbp->b_flags & B_CACHE)
367                         return tbp;
368                 tbp->b_flags |= B_ASYNC | B_RAM;
369                 tbp->b_iocmd = BIO_READ;
370         }
371         tbp->b_blkno = blkno;
372         if( (tbp->b_flags & B_MALLOC) ||
373                 ((tbp->b_flags & B_VMIO) == 0) || (run <= 1) )
374                 return tbp;
375
376         bp = uma_zalloc(cluster_pbuf_zone, M_NOWAIT);
377         if (bp == NULL)
378                 return tbp;
379         MPASS((bp->b_flags & B_MAXPHYS) != 0);
380
381         /*
382          * We are synthesizing a buffer out of vm_page_t's, but
383          * if the block size is not page aligned then the starting
384          * address may not be either.  Inherit the b_data offset
385          * from the original buffer.
386          */
387         bp->b_flags = B_ASYNC | B_CLUSTER | B_VMIO;
388         if ((gbflags & GB_UNMAPPED) != 0) {
389                 bp->b_data = unmapped_buf;
390         } else {
391                 bp->b_data = (char *)((vm_offset_t)bp->b_data |
392                     ((vm_offset_t)tbp->b_data & PAGE_MASK));
393         }
394         bp->b_iocmd = BIO_READ;
395         bp->b_iodone = cluster_callback;
396         bp->b_blkno = blkno;
397         bp->b_lblkno = lbn;
398         bp->b_offset = tbp->b_offset;
399         KASSERT(bp->b_offset != NOOFFSET, ("cluster_rbuild: no buffer offset"));
400         pbgetvp(vp, bp);
401
402         TAILQ_INIT(&bp->b_cluster.cluster_head);
403
404         bp->b_bcount = 0;
405         bp->b_bufsize = 0;
406         bp->b_npages = 0;
407
408         inc = btodb(size);
409         for (bn = blkno, i = 0; i < run; ++i, bn += inc) {
410                 if (i == 0) {
411                         vm_object_pip_add(tbp->b_bufobj->bo_object,
412                             tbp->b_npages);
413                         vfs_busy_pages_acquire(tbp);
414                 } else {
415                         if ((bp->b_npages * PAGE_SIZE) +
416                             round_page(size) > vp->v_mount->mnt_iosize_max) {
417                                 break;
418                         }
419
420                         tbp = getblk(vp, lbn + i, size, 0, 0, GB_LOCK_NOWAIT |
421                             (gbflags & GB_UNMAPPED));
422
423                         /* Don't wait around for locked bufs. */
424                         if (tbp == NULL)
425                                 break;
426
427                         /*
428                          * Stop scanning if the buffer is fully valid
429                          * (marked B_CACHE), or locked (may be doing a
430                          * background write), or if the buffer is not
431                          * VMIO backed.  The clustering code can only deal
432                          * with VMIO-backed buffers.  The bo lock is not
433                          * required for the BKGRDINPROG check since it
434                          * can not be set without the buf lock.
435                          */
436                         if ((tbp->b_vflags & BV_BKGRDINPROG) ||
437                             (tbp->b_flags & B_CACHE) ||
438                             (tbp->b_flags & B_VMIO) == 0) {
439                                 bqrelse(tbp);
440                                 break;
441                         }
442
443                         /*
444                          * The buffer must be completely invalid in order to
445                          * take part in the cluster.  If it is partially valid
446                          * then we stop.
447                          */
448                         off = tbp->b_offset;
449                         tsize = size;
450                         for (j = 0; tsize > 0; j++) {
451                                 toff = off & PAGE_MASK;
452                                 tinc = tsize;
453                                 if (toff + tinc > PAGE_SIZE)
454                                         tinc = PAGE_SIZE - toff;
455                                 if (vm_page_trysbusy(tbp->b_pages[j]) == 0)
456                                         break;
457                                 if ((tbp->b_pages[j]->valid &
458                                     vm_page_bits(toff, tinc)) != 0) {
459                                         vm_page_sunbusy(tbp->b_pages[j]);
460                                         break;
461                                 }
462                                 vm_object_pip_add(tbp->b_bufobj->bo_object, 1);
463                                 off += tinc;
464                                 tsize -= tinc;
465                         }
466                         if (tsize > 0) {
467 clean_sbusy:
468                                 vm_object_pip_wakeupn(tbp->b_bufobj->bo_object,
469                                     j);
470                                 for (k = 0; k < j; k++)
471                                         vm_page_sunbusy(tbp->b_pages[k]);
472                                 bqrelse(tbp);
473                                 break;
474                         }
475
476                         /*
477                          * Set a read-ahead mark as appropriate
478                          */
479                         if ((fbp && (i == 1)) || (i == (run - 1)))
480                                 tbp->b_flags |= B_RAM;
481
482                         /*
483                          * Set the buffer up for an async read (XXX should
484                          * we do this only if we do not wind up brelse()ing?).
485                          * Set the block number if it isn't set, otherwise
486                          * if it is make sure it matches the block number we
487                          * expect.
488                          */
489                         tbp->b_flags |= B_ASYNC;
490                         tbp->b_iocmd = BIO_READ;
491                         if (tbp->b_blkno == tbp->b_lblkno) {
492                                 tbp->b_blkno = bn;
493                         } else if (tbp->b_blkno != bn) {
494                                 goto clean_sbusy;
495                         }
496                 }
497                 /*
498                  * XXX fbp from caller may not be B_ASYNC, but we are going
499                  * to biodone() it in cluster_callback() anyway
500                  */
501                 BUF_KERNPROC(tbp);
502                 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
503                         tbp, b_cluster.cluster_entry);
504                 for (j = 0; j < tbp->b_npages; j += 1) {
505                         vm_page_t m;
506
507                         m = tbp->b_pages[j];
508                         if ((bp->b_npages == 0) ||
509                             (bp->b_pages[bp->b_npages-1] != m)) {
510                                 bp->b_pages[bp->b_npages] = m;
511                                 bp->b_npages++;
512                         }
513                         if (vm_page_all_valid(m))
514                                 tbp->b_pages[j] = bogus_page;
515                 }
516
517                 /*
518                  * Don't inherit tbp->b_bufsize as it may be larger due to
519                  * a non-page-aligned size.  Instead just aggregate using
520                  * 'size'.
521                  */
522                 if (tbp->b_bcount != size)
523                         printf("warning: tbp->b_bcount wrong %ld vs %ld\n", tbp->b_bcount, size);
524                 if (tbp->b_bufsize != size)
525                         printf("warning: tbp->b_bufsize wrong %ld vs %ld\n", tbp->b_bufsize, size);
526                 bp->b_bcount += size;
527                 bp->b_bufsize += size;
528         }
529
530         /*
531          * Fully valid pages in the cluster are already good and do not need
532          * to be re-read from disk.  Replace the page with bogus_page
533          */
534         for (j = 0; j < bp->b_npages; j++) {
535                 if (vm_page_all_valid(bp->b_pages[j]))
536                         bp->b_pages[j] = bogus_page;
537         }
538         if (bp->b_bufsize > bp->b_kvasize)
539                 panic("cluster_rbuild: b_bufsize(%ld) > b_kvasize(%d)\n",
540                     bp->b_bufsize, bp->b_kvasize);
541
542         if (buf_mapped(bp)) {
543                 pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
544                     (vm_page_t *)bp->b_pages, bp->b_npages);
545         }
546         return (bp);
547 }
548
549 /*
550  * Cleanup after a clustered read or write.
551  * This is complicated by the fact that any of the buffers might have
552  * extra memory (if there were no empty buffer headers at allocbuf time)
553  * that we will need to shift around.
554  */
555 static void
556 cluster_callback(struct buf *bp)
557 {
558         struct buf *nbp, *tbp;
559         int error = 0;
560
561         /*
562          * Must propagate errors to all the components.
563          */
564         if (bp->b_ioflags & BIO_ERROR)
565                 error = bp->b_error;
566
567         if (buf_mapped(bp)) {
568                 pmap_qremove(trunc_page((vm_offset_t) bp->b_data),
569                     bp->b_npages);
570         }
571         /*
572          * Move memory from the large cluster buffer into the component
573          * buffers and mark IO as done on these.
574          */
575         for (tbp = TAILQ_FIRST(&bp->b_cluster.cluster_head);
576                 tbp; tbp = nbp) {
577                 nbp = TAILQ_NEXT(&tbp->b_cluster, cluster_entry);
578                 if (error) {
579                         tbp->b_ioflags |= BIO_ERROR;
580                         tbp->b_error = error;
581                 } else {
582                         tbp->b_dirtyoff = tbp->b_dirtyend = 0;
583                         tbp->b_flags &= ~B_INVAL;
584                         tbp->b_ioflags &= ~BIO_ERROR;
585                         /*
586                          * XXX the bdwrite()/bqrelse() issued during
587                          * cluster building clears B_RELBUF (see bqrelse()
588                          * comment).  If direct I/O was specified, we have
589                          * to restore it here to allow the buffer and VM
590                          * to be freed.
591                          */
592                         if (tbp->b_flags & B_DIRECT)
593                                 tbp->b_flags |= B_RELBUF;
594                 }
595                 bufdone(tbp);
596         }
597         pbrelvp(bp);
598         uma_zfree(cluster_pbuf_zone, bp);
599 }
600
601 /*
602  *      cluster_wbuild_wb:
603  *
604  *      Implement modified write build for cluster.
605  *
606  *              write_behind = 0        write behind disabled
607  *              write_behind = 1        write behind normal (default)
608  *              write_behind = 2        write behind backed-off
609  */
610
611 static __inline int
612 cluster_wbuild_wb(struct vnode *vp, long size, daddr_t start_lbn, int len,
613     int gbflags)
614 {
615         int r = 0;
616
617         switch (write_behind) {
618         case 2:
619                 if (start_lbn < len)
620                         break;
621                 start_lbn -= len;
622                 /* FALLTHROUGH */
623         case 1:
624                 r = cluster_wbuild(vp, size, start_lbn, len, gbflags);
625                 /* FALLTHROUGH */
626         default:
627                 /* FALLTHROUGH */
628                 break;
629         }
630         return(r);
631 }
632
633 /*
634  * Do clustered write for FFS.
635  *
636  * Three cases:
637  *      1. Write is not sequential (write asynchronously)
638  *      Write is sequential:
639  *      2.      beginning of cluster - begin cluster
640  *      3.      middle of a cluster - add to cluster
641  *      4.      end of a cluster - asynchronously write cluster
642  */
643 void
644 cluster_write(struct vnode *vp, struct buf *bp, u_quad_t filesize, int seqcount,
645     int gbflags)
646 {
647         daddr_t lbn, pbn;
648         int maxclen, cursize;
649         int lblocksize;
650         int async;
651
652         if (!unmapped_buf_allowed)
653                 gbflags &= ~GB_UNMAPPED;
654
655         if (vp->v_type == VREG) {
656                 async = DOINGASYNC(vp);
657                 lblocksize = vp->v_mount->mnt_stat.f_iosize;
658         } else {
659                 async = 0;
660                 lblocksize = bp->b_bufsize;
661         }
662         lbn = bp->b_lblkno;
663         KASSERT(bp->b_offset != NOOFFSET, ("cluster_write: no buffer offset"));
664
665         /* Initialize vnode to beginning of file. */
666         if (lbn == 0)
667                 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
668
669         if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 ||
670             (bp->b_blkno != vp->v_lasta + btodb(lblocksize))) {
671                 maxclen = vp->v_mount->mnt_iosize_max / lblocksize - 1;
672                 if (vp->v_clen != 0) {
673                         /*
674                          * Next block is not sequential.
675                          *
676                          * If we are not writing at end of file, the process
677                          * seeked to another point in the file since its last
678                          * write, or we have reached our maximum cluster size,
679                          * then push the previous cluster. Otherwise try
680                          * reallocating to make it sequential.
681                          *
682                          * Change to algorithm: only push previous cluster if
683                          * it was sequential from the point of view of the
684                          * seqcount heuristic, otherwise leave the buffer 
685                          * intact so we can potentially optimize the I/O
686                          * later on in the buf_daemon or update daemon
687                          * flush.
688                          */
689                         cursize = vp->v_lastw - vp->v_cstart + 1;
690                         if (((u_quad_t) bp->b_offset + lblocksize) != filesize ||
691                             lbn != vp->v_lastw + 1 || vp->v_clen <= cursize) {
692                                 if (!async && seqcount > 0) {
693                                         cluster_wbuild_wb(vp, lblocksize,
694                                             vp->v_cstart, cursize, gbflags);
695                                 }
696                         } else {
697                                 struct buf **bpp, **endbp;
698                                 struct cluster_save *buflist;
699
700                                 buflist = cluster_collectbufs(vp, bp, gbflags);
701                                 if (buflist == NULL) {
702                                         /*
703                                          * Cluster build failed so just write
704                                          * it now.
705                                          */
706                                         bawrite(bp);
707                                         return;
708                                 }
709                                 endbp = &buflist->bs_children
710                                     [buflist->bs_nchildren - 1];
711                                 if (VOP_REALLOCBLKS(vp, buflist)) {
712                                         /*
713                                          * Failed, push the previous cluster
714                                          * if *really* writing sequentially
715                                          * in the logical file (seqcount > 1),
716                                          * otherwise delay it in the hopes that
717                                          * the low level disk driver can
718                                          * optimize the write ordering.
719                                          */
720                                         for (bpp = buflist->bs_children;
721                                              bpp < endbp; bpp++)
722                                                 brelse(*bpp);
723                                         free(buflist, M_SEGMENT);
724                                         if (seqcount > 1) {
725                                                 cluster_wbuild_wb(vp, 
726                                                     lblocksize, vp->v_cstart, 
727                                                     cursize, gbflags);
728                                         }
729                                 } else {
730                                         /*
731                                          * Succeeded, keep building cluster.
732                                          */
733                                         for (bpp = buflist->bs_children;
734                                              bpp <= endbp; bpp++)
735                                                 bdwrite(*bpp);
736                                         free(buflist, M_SEGMENT);
737                                         vp->v_lastw = lbn;
738                                         vp->v_lasta = bp->b_blkno;
739                                         return;
740                                 }
741                         }
742                 }
743                 /*
744                  * Consider beginning a cluster. If at end of file, make
745                  * cluster as large as possible, otherwise find size of
746                  * existing cluster.
747                  */
748                 if ((vp->v_type == VREG) &&
749                         ((u_quad_t) bp->b_offset + lblocksize) != filesize &&
750                     (bp->b_blkno == bp->b_lblkno) &&
751                     (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, NULL) ||
752                      bp->b_blkno == -1)) {
753                         pbn = bp->b_blkno;
754                         bawrite(bp);
755                         vp->v_clen = 0;
756                         vp->v_lasta = pbn;
757                         vp->v_cstart = lbn + 1;
758                         vp->v_lastw = lbn;
759                         return;
760                 }
761                 vp->v_clen = maxclen;
762                 pbn = bp->b_blkno;
763                 if (!async && maxclen == 0) {   /* I/O not contiguous */
764                         vp->v_cstart = lbn + 1;
765                         bawrite(bp);
766                 } else {        /* Wait for rest of cluster */
767                         vp->v_cstart = lbn;
768                         bdwrite(bp);
769                 }
770         } else if (lbn == vp->v_cstart + vp->v_clen) {
771                 /*
772                  * At end of cluster, write it out if seqcount tells us we
773                  * are operating sequentially, otherwise let the buf or
774                  * update daemon handle it.
775                  */
776                 pbn = bp->b_blkno;
777                 bdwrite(bp);
778                 if (seqcount > 1) {
779                         cluster_wbuild_wb(vp, lblocksize, vp->v_cstart,
780                             vp->v_clen + 1, gbflags);
781                 }
782                 vp->v_clen = 0;
783                 vp->v_cstart = lbn + 1;
784         } else if (vm_page_count_severe()) {
785                 /*
786                  * We are low on memory, get it going NOW
787                  */
788                 pbn = bp->b_blkno;
789                 bawrite(bp);
790         } else {
791                 /*
792                  * In the middle of a cluster, so just delay the I/O for now.
793                  */
794                 pbn = bp->b_blkno;
795                 bdwrite(bp);
796         }
797         vp->v_lastw = lbn;
798         vp->v_lasta = pbn;
799 }
800
801 /*
802  * This is an awful lot like cluster_rbuild...wish they could be combined.
803  * The last lbn argument is the current block on which I/O is being
804  * performed.  Check to see that it doesn't fall in the middle of
805  * the current block (if last_bp == NULL).
806  */
807 int
808 cluster_wbuild(struct vnode *vp, long size, daddr_t start_lbn, int len,
809     int gbflags)
810 {
811         struct buf *bp, *tbp;
812         struct bufobj *bo;
813         int i, j;
814         int totalwritten = 0;
815         int dbsize = btodb(size);
816
817         if (!unmapped_buf_allowed)
818                 gbflags &= ~GB_UNMAPPED;
819
820         bo = &vp->v_bufobj;
821         while (len > 0) {
822                 /*
823                  * If the buffer is not delayed-write (i.e. dirty), or it
824                  * is delayed-write but either locked or inval, it cannot
825                  * partake in the clustered write.
826                  */
827                 BO_LOCK(bo);
828                 if ((tbp = gbincore(&vp->v_bufobj, start_lbn)) == NULL ||
829                     (tbp->b_vflags & BV_BKGRDINPROG)) {
830                         BO_UNLOCK(bo);
831                         ++start_lbn;
832                         --len;
833                         continue;
834                 }
835                 if (BUF_LOCK(tbp,
836                     LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, BO_LOCKPTR(bo))) {
837                         ++start_lbn;
838                         --len;
839                         continue;
840                 }
841                 if ((tbp->b_flags & (B_INVAL | B_DELWRI)) != B_DELWRI) {
842                         BUF_UNLOCK(tbp);
843                         ++start_lbn;
844                         --len;
845                         continue;
846                 }
847                 bremfree(tbp);
848                 tbp->b_flags &= ~B_DONE;
849
850                 /*
851                  * Extra memory in the buffer, punt on this buffer.
852                  * XXX we could handle this in most cases, but we would
853                  * have to push the extra memory down to after our max
854                  * possible cluster size and then potentially pull it back
855                  * up if the cluster was terminated prematurely--too much
856                  * hassle.
857                  */
858                 if (((tbp->b_flags & (B_CLUSTEROK | B_MALLOC | B_VMIO)) != 
859                      (B_CLUSTEROK | B_VMIO)) ||
860                   (tbp->b_bcount != tbp->b_bufsize) ||
861                   (tbp->b_bcount != size) ||
862                   (len == 1) ||
863                   ((bp = uma_zalloc(cluster_pbuf_zone, M_NOWAIT)) == NULL)) {
864                         totalwritten += tbp->b_bufsize;
865                         bawrite(tbp);
866                         ++start_lbn;
867                         --len;
868                         continue;
869                 }
870                 MPASS((bp->b_flags & B_MAXPHYS) != 0);
871
872                 /*
873                  * We got a pbuf to make the cluster in.
874                  * so initialise it.
875                  */
876                 TAILQ_INIT(&bp->b_cluster.cluster_head);
877                 bp->b_bcount = 0;
878                 bp->b_bufsize = 0;
879                 bp->b_npages = 0;
880                 if (tbp->b_wcred != NOCRED)
881                         bp->b_wcred = crhold(tbp->b_wcred);
882
883                 bp->b_blkno = tbp->b_blkno;
884                 bp->b_lblkno = tbp->b_lblkno;
885                 bp->b_offset = tbp->b_offset;
886
887                 /*
888                  * We are synthesizing a buffer out of vm_page_t's, but
889                  * if the block size is not page aligned then the starting
890                  * address may not be either.  Inherit the b_data offset
891                  * from the original buffer.
892                  */
893                 if ((gbflags & GB_UNMAPPED) == 0 ||
894                     (tbp->b_flags & B_VMIO) == 0) {
895                         bp->b_data = (char *)((vm_offset_t)bp->b_data |
896                             ((vm_offset_t)tbp->b_data & PAGE_MASK));
897                 } else {
898                         bp->b_data = unmapped_buf;
899                 }
900                 bp->b_flags |= B_CLUSTER | (tbp->b_flags & (B_VMIO |
901                     B_NEEDCOMMIT));
902                 bp->b_iodone = cluster_callback;
903                 pbgetvp(vp, bp);
904                 /*
905                  * From this location in the file, scan forward to see
906                  * if there are buffers with adjacent data that need to
907                  * be written as well.
908                  */
909                 for (i = 0; i < len; ++i, ++start_lbn) {
910                         if (i != 0) { /* If not the first buffer */
911                                 /*
912                                  * If the adjacent data is not even in core it
913                                  * can't need to be written.
914                                  */
915                                 BO_LOCK(bo);
916                                 if ((tbp = gbincore(bo, start_lbn)) == NULL ||
917                                     (tbp->b_vflags & BV_BKGRDINPROG)) {
918                                         BO_UNLOCK(bo);
919                                         break;
920                                 }
921
922                                 /*
923                                  * If it IS in core, but has different
924                                  * characteristics, or is locked (which
925                                  * means it could be undergoing a background
926                                  * I/O or be in a weird state), then don't
927                                  * cluster with it.
928                                  */
929                                 if (BUF_LOCK(tbp,
930                                     LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK,
931                                     BO_LOCKPTR(bo)))
932                                         break;
933
934                                 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK |
935                                     B_INVAL | B_DELWRI | B_NEEDCOMMIT))
936                                     != (B_DELWRI | B_CLUSTEROK |
937                                     (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) ||
938                                     tbp->b_wcred != bp->b_wcred) {
939                                         BUF_UNLOCK(tbp);
940                                         break;
941                                 }
942
943                                 /*
944                                  * Check that the combined cluster
945                                  * would make sense with regard to pages
946                                  * and would not be too large
947                                  */
948                                 if ((tbp->b_bcount != size) ||
949                                   ((bp->b_blkno + (dbsize * i)) !=
950                                     tbp->b_blkno) ||
951                                   ((tbp->b_npages + bp->b_npages) >
952                                     (vp->v_mount->mnt_iosize_max / PAGE_SIZE))) {
953                                         BUF_UNLOCK(tbp);
954                                         break;
955                                 }
956
957                                 /*
958                                  * Ok, it's passed all the tests,
959                                  * so remove it from the free list
960                                  * and mark it busy. We will use it.
961                                  */
962                                 bremfree(tbp);
963                                 tbp->b_flags &= ~B_DONE;
964                         } /* end of code for non-first buffers only */
965                         /*
966                          * If the IO is via the VM then we do some
967                          * special VM hackery (yuck).  Since the buffer's
968                          * block size may not be page-aligned it is possible
969                          * for a page to be shared between two buffers.  We
970                          * have to get rid of the duplication when building
971                          * the cluster.
972                          */
973                         if (tbp->b_flags & B_VMIO) {
974                                 vm_page_t m;
975
976                                 if (i == 0) {
977                                         vfs_busy_pages_acquire(tbp);
978                                 } else { /* if not first buffer */
979                                         for (j = 0; j < tbp->b_npages; j += 1) {
980                                                 m = tbp->b_pages[j];
981                                                 if (vm_page_trysbusy(m) == 0) {
982                                                         for (j--; j >= 0; j--)
983                                                                 vm_page_sunbusy(
984                                                                     tbp->b_pages[j]);
985                                                         bqrelse(tbp);
986                                                         goto finishcluster;
987                                                 }
988                                         }
989                                 }
990                                 vm_object_pip_add(tbp->b_bufobj->bo_object,
991                                     tbp->b_npages);
992                                 for (j = 0; j < tbp->b_npages; j += 1) {
993                                         m = tbp->b_pages[j];
994                                         if ((bp->b_npages == 0) ||
995                                           (bp->b_pages[bp->b_npages - 1] != m)) {
996                                                 bp->b_pages[bp->b_npages] = m;
997                                                 bp->b_npages++;
998                                         }
999                                 }
1000                         }
1001                         bp->b_bcount += size;
1002                         bp->b_bufsize += size;
1003                         /*
1004                          * If any of the clustered buffers have their
1005                          * B_BARRIER flag set, transfer that request to
1006                          * the cluster.
1007                          */
1008                         bp->b_flags |= (tbp->b_flags & B_BARRIER);
1009                         tbp->b_flags &= ~(B_DONE | B_BARRIER);
1010                         tbp->b_flags |= B_ASYNC;
1011                         tbp->b_ioflags &= ~BIO_ERROR;
1012                         tbp->b_iocmd = BIO_WRITE;
1013                         bundirty(tbp);
1014                         reassignbuf(tbp);               /* put on clean list */
1015                         bufobj_wref(tbp->b_bufobj);
1016                         BUF_KERNPROC(tbp);
1017                         buf_track(tbp, __func__);
1018                         TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
1019                                 tbp, b_cluster.cluster_entry);
1020                 }
1021         finishcluster:
1022                 if (buf_mapped(bp)) {
1023                         pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
1024                             (vm_page_t *)bp->b_pages, bp->b_npages);
1025                 }
1026                 if (bp->b_bufsize > bp->b_kvasize)
1027                         panic(
1028                             "cluster_wbuild: b_bufsize(%ld) > b_kvasize(%d)\n",
1029                             bp->b_bufsize, bp->b_kvasize);
1030                 totalwritten += bp->b_bufsize;
1031                 bp->b_dirtyoff = 0;
1032                 bp->b_dirtyend = bp->b_bufsize;
1033                 bawrite(bp);
1034
1035                 len -= i;
1036         }
1037         return totalwritten;
1038 }
1039
1040 /*
1041  * Collect together all the buffers in a cluster.
1042  * Plus add one additional buffer.
1043  */
1044 static struct cluster_save *
1045 cluster_collectbufs(struct vnode *vp, struct buf *last_bp, int gbflags)
1046 {
1047         struct cluster_save *buflist;
1048         struct buf *bp;
1049         daddr_t lbn;
1050         int i, j, len, error;
1051
1052         len = vp->v_lastw - vp->v_cstart + 1;
1053         buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
1054             M_SEGMENT, M_WAITOK);
1055         buflist->bs_nchildren = 0;
1056         buflist->bs_children = (struct buf **) (buflist + 1);
1057         for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++) {
1058                 error = bread_gb(vp, lbn, last_bp->b_bcount, NOCRED,
1059                     gbflags, &bp);
1060                 if (error != 0) {
1061                         /*
1062                          * If read fails, release collected buffers
1063                          * and return failure.
1064                          */
1065                         for (j = 0; j < i; j++)
1066                                 brelse(buflist->bs_children[j]);
1067                         free(buflist, M_SEGMENT);
1068                         return (NULL);
1069                 }
1070                 buflist->bs_children[i] = bp;
1071                 if (bp->b_blkno == bp->b_lblkno)
1072                         VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno,
1073                                 NULL, NULL);
1074         }
1075         buflist->bs_children[i] = bp = last_bp;
1076         if (bp->b_blkno == bp->b_lblkno)
1077                 VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
1078         buflist->bs_nchildren = i + 1;
1079         return (buflist);
1080 }