]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/vfs_cluster.c
Merge libc++ trunk r338150, and resolve conflicts.
[FreeBSD/FreeBSD.git] / sys / kern / vfs_cluster.c
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1993
5  *      The Regents of the University of California.  All rights reserved.
6  * Modifications/enhancements:
7  *      Copyright (c) 1995 John S. Dyson.  All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *      @(#)vfs_cluster.c       8.7 (Berkeley) 2/13/94
34  */
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38
39 #include "opt_debug_cluster.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/proc.h>
45 #include <sys/bio.h>
46 #include <sys/buf.h>
47 #include <sys/vnode.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/racct.h>
51 #include <sys/resourcevar.h>
52 #include <sys/rwlock.h>
53 #include <sys/vmmeter.h>
54 #include <vm/vm.h>
55 #include <vm/vm_object.h>
56 #include <vm/vm_page.h>
57 #include <sys/sysctl.h>
58
59 #if defined(CLUSTERDEBUG)
60 static int      rcluster= 0;
61 SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0,
62     "Debug VFS clustering code");
63 #endif
64
65 static MALLOC_DEFINE(M_SEGMENT, "cl_savebuf", "cluster_save buffer");
66
67 static struct cluster_save *cluster_collectbufs(struct vnode *vp,
68             struct buf *last_bp, int gbflags);
69 static struct buf *cluster_rbuild(struct vnode *vp, u_quad_t filesize,
70             daddr_t lbn, daddr_t blkno, long size, int run, int gbflags,
71             struct buf *fbp);
72 static void cluster_callback(struct buf *);
73
74 static int write_behind = 1;
75 SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0,
76     "Cluster write-behind; 0: disable, 1: enable, 2: backed off");
77
78 static int read_max = 64;
79 SYSCTL_INT(_vfs, OID_AUTO, read_max, CTLFLAG_RW, &read_max, 0,
80     "Cluster read-ahead max block count");
81
82 static int read_min = 1;
83 SYSCTL_INT(_vfs, OID_AUTO, read_min, CTLFLAG_RW, &read_min, 0,
84     "Cluster read min block count");
85
86 /*
87  * Read data to a buf, including read-ahead if we find this to be beneficial.
88  * cluster_read replaces bread.
89  */
90 int
91 cluster_read(struct vnode *vp, u_quad_t filesize, daddr_t lblkno, long size,
92     struct ucred *cred, long totread, int seqcount, int gbflags,
93     struct buf **bpp)
94 {
95         struct buf *bp, *rbp, *reqbp;
96         struct bufobj *bo;
97         struct thread *td;
98         daddr_t blkno, origblkno;
99         int maxra, racluster;
100         int error, ncontig;
101         int i;
102
103         error = 0;
104         td = curthread;
105         bo = &vp->v_bufobj;
106         if (!unmapped_buf_allowed)
107                 gbflags &= ~GB_UNMAPPED;
108
109         /*
110          * Try to limit the amount of read-ahead by a few
111          * ad-hoc parameters.  This needs work!!!
112          */
113         racluster = vp->v_mount->mnt_iosize_max / size;
114         maxra = seqcount;
115         maxra = min(read_max, maxra);
116         maxra = min(nbuf/8, maxra);
117         if (((u_quad_t)(lblkno + maxra + 1) * size) > filesize)
118                 maxra = (filesize / size) - lblkno;
119
120         /*
121          * get the requested block
122          */
123         error = getblkx(vp, lblkno, size, 0, 0, gbflags, &bp);
124         if (error != 0) {
125                 *bpp = NULL;
126                 return (error);
127         }
128         gbflags &= ~GB_NOSPARSE;
129         origblkno = lblkno;
130         *bpp = reqbp = bp;
131
132         /*
133          * if it is in the cache, then check to see if the reads have been
134          * sequential.  If they have, then try some read-ahead, otherwise
135          * back-off on prospective read-aheads.
136          */
137         if (bp->b_flags & B_CACHE) {
138                 if (!seqcount) {
139                         return 0;
140                 } else if ((bp->b_flags & B_RAM) == 0) {
141                         return 0;
142                 } else {
143                         bp->b_flags &= ~B_RAM;
144                         BO_RLOCK(bo);
145                         for (i = 1; i < maxra; i++) {
146                                 /*
147                                  * Stop if the buffer does not exist or it
148                                  * is invalid (about to go away?)
149                                  */
150                                 rbp = gbincore(&vp->v_bufobj, lblkno+i);
151                                 if (rbp == NULL || (rbp->b_flags & B_INVAL))
152                                         break;
153
154                                 /*
155                                  * Set another read-ahead mark so we know 
156                                  * to check again. (If we can lock the
157                                  * buffer without waiting)
158                                  */
159                                 if ((((i % racluster) == (racluster - 1)) ||
160                                     (i == (maxra - 1))) 
161                                     && (0 == BUF_LOCK(rbp, 
162                                         LK_EXCLUSIVE | LK_NOWAIT, NULL))) {
163                                         rbp->b_flags |= B_RAM;
164                                         BUF_UNLOCK(rbp);
165                                 }                       
166                         }
167                         BO_RUNLOCK(bo);
168                         if (i >= maxra) {
169                                 return 0;
170                         }
171                         lblkno += i;
172                 }
173                 reqbp = bp = NULL;
174         /*
175          * If it isn't in the cache, then get a chunk from
176          * disk if sequential, otherwise just get the block.
177          */
178         } else {
179                 off_t firstread = bp->b_offset;
180                 int nblks;
181                 long minread;
182
183                 KASSERT(bp->b_offset != NOOFFSET,
184                     ("cluster_read: no buffer offset"));
185
186                 ncontig = 0;
187
188                 /*
189                  * Adjust totread if needed
190                  */
191                 minread = read_min * size;
192                 if (minread > totread)
193                         totread = minread;
194
195                 /*
196                  * Compute the total number of blocks that we should read
197                  * synchronously.
198                  */
199                 if (firstread + totread > filesize)
200                         totread = filesize - firstread;
201                 nblks = howmany(totread, size);
202                 if (nblks > racluster)
203                         nblks = racluster;
204
205                 /*
206                  * Now compute the number of contiguous blocks.
207                  */
208                 if (nblks > 1) {
209                         error = VOP_BMAP(vp, lblkno, NULL,
210                                 &blkno, &ncontig, NULL);
211                         /*
212                          * If this failed to map just do the original block.
213                          */
214                         if (error || blkno == -1)
215                                 ncontig = 0;
216                 }
217
218                 /*
219                  * If we have contiguous data available do a cluster
220                  * otherwise just read the requested block.
221                  */
222                 if (ncontig) {
223                         /* Account for our first block. */
224                         ncontig = min(ncontig + 1, nblks);
225                         if (ncontig < nblks)
226                                 nblks = ncontig;
227                         bp = cluster_rbuild(vp, filesize, lblkno,
228                             blkno, size, nblks, gbflags, bp);
229                         lblkno += (bp->b_bufsize / size);
230                 } else {
231                         bp->b_flags |= B_RAM;
232                         bp->b_iocmd = BIO_READ;
233                         lblkno += 1;
234                 }
235         }
236
237         /*
238          * handle the synchronous read so that it is available ASAP.
239          */
240         if (bp) {
241                 if ((bp->b_flags & B_CLUSTER) == 0) {
242                         vfs_busy_pages(bp, 0);
243                 }
244                 bp->b_flags &= ~B_INVAL;
245                 bp->b_ioflags &= ~BIO_ERROR;
246                 if ((bp->b_flags & B_ASYNC) || bp->b_iodone != NULL)
247                         BUF_KERNPROC(bp);
248                 bp->b_iooffset = dbtob(bp->b_blkno);
249                 bstrategy(bp);
250 #ifdef RACCT
251                 if (racct_enable) {
252                         PROC_LOCK(td->td_proc);
253                         racct_add_buf(td->td_proc, bp, 0);
254                         PROC_UNLOCK(td->td_proc);
255                 }
256 #endif /* RACCT */
257                 td->td_ru.ru_inblock++;
258         }
259
260         /*
261          * If we have been doing sequential I/O, then do some read-ahead.
262          */
263         while (lblkno < (origblkno + maxra)) {
264                 error = VOP_BMAP(vp, lblkno, NULL, &blkno, &ncontig, NULL);
265                 if (error)
266                         break;
267
268                 if (blkno == -1)
269                         break;
270
271                 /*
272                  * We could throttle ncontig here by maxra but we might as
273                  * well read the data if it is contiguous.  We're throttled
274                  * by racluster anyway.
275                  */
276                 if (ncontig) {
277                         ncontig = min(ncontig + 1, racluster);
278                         rbp = cluster_rbuild(vp, filesize, lblkno, blkno,
279                             size, ncontig, gbflags, NULL);
280                         lblkno += (rbp->b_bufsize / size);
281                         if (rbp->b_flags & B_DELWRI) {
282                                 bqrelse(rbp);
283                                 continue;
284                         }
285                 } else {
286                         rbp = getblk(vp, lblkno, size, 0, 0, gbflags);
287                         lblkno += 1;
288                         if (rbp->b_flags & B_DELWRI) {
289                                 bqrelse(rbp);
290                                 continue;
291                         }
292                         rbp->b_flags |= B_ASYNC | B_RAM;
293                         rbp->b_iocmd = BIO_READ;
294                         rbp->b_blkno = blkno;
295                 }
296                 if (rbp->b_flags & B_CACHE) {
297                         rbp->b_flags &= ~B_ASYNC;
298                         bqrelse(rbp);
299                         continue;
300                 }
301                 if ((rbp->b_flags & B_CLUSTER) == 0) {
302                         vfs_busy_pages(rbp, 0);
303                 }
304                 rbp->b_flags &= ~B_INVAL;
305                 rbp->b_ioflags &= ~BIO_ERROR;
306                 if ((rbp->b_flags & B_ASYNC) || rbp->b_iodone != NULL)
307                         BUF_KERNPROC(rbp);
308                 rbp->b_iooffset = dbtob(rbp->b_blkno);
309                 bstrategy(rbp);
310 #ifdef RACCT
311                 if (racct_enable) {
312                         PROC_LOCK(td->td_proc);
313                         racct_add_buf(td->td_proc, rbp, 0);
314                         PROC_UNLOCK(td->td_proc);
315                 }
316 #endif /* RACCT */
317                 td->td_ru.ru_inblock++;
318         }
319
320         if (reqbp) {
321                 /*
322                  * Like bread, always brelse() the buffer when
323                  * returning an error.
324                  */
325                 error = bufwait(reqbp);
326                 if (error != 0) {
327                         brelse(reqbp);
328                         *bpp = NULL;
329                 }
330         }
331         return (error);
332 }
333
334 /*
335  * If blocks are contiguous on disk, use this to provide clustered
336  * read ahead.  We will read as many blocks as possible sequentially
337  * and then parcel them up into logical blocks in the buffer hash table.
338  */
339 static struct buf *
340 cluster_rbuild(struct vnode *vp, u_quad_t filesize, daddr_t lbn,
341     daddr_t blkno, long size, int run, int gbflags, struct buf *fbp)
342 {
343         struct buf *bp, *tbp;
344         daddr_t bn;
345         off_t off;
346         long tinc, tsize;
347         int i, inc, j, k, toff;
348
349         KASSERT(size == vp->v_mount->mnt_stat.f_iosize,
350             ("cluster_rbuild: size %ld != f_iosize %jd\n",
351             size, (intmax_t)vp->v_mount->mnt_stat.f_iosize));
352
353         /*
354          * avoid a division
355          */
356         while ((u_quad_t) size * (lbn + run) > filesize) {
357                 --run;
358         }
359
360         if (fbp) {
361                 tbp = fbp;
362                 tbp->b_iocmd = BIO_READ; 
363         } else {
364                 tbp = getblk(vp, lbn, size, 0, 0, gbflags);
365                 if (tbp->b_flags & B_CACHE)
366                         return tbp;
367                 tbp->b_flags |= B_ASYNC | B_RAM;
368                 tbp->b_iocmd = BIO_READ;
369         }
370         tbp->b_blkno = blkno;
371         if( (tbp->b_flags & B_MALLOC) ||
372                 ((tbp->b_flags & B_VMIO) == 0) || (run <= 1) )
373                 return tbp;
374
375         bp = trypbuf(&cluster_pbuf_freecnt);
376         if (bp == NULL)
377                 return tbp;
378
379         /*
380          * We are synthesizing a buffer out of vm_page_t's, but
381          * if the block size is not page aligned then the starting
382          * address may not be either.  Inherit the b_data offset
383          * from the original buffer.
384          */
385         bp->b_flags = B_ASYNC | B_CLUSTER | B_VMIO;
386         if ((gbflags & GB_UNMAPPED) != 0) {
387                 bp->b_data = unmapped_buf;
388         } else {
389                 bp->b_data = (char *)((vm_offset_t)bp->b_data |
390                     ((vm_offset_t)tbp->b_data & PAGE_MASK));
391         }
392         bp->b_iocmd = BIO_READ;
393         bp->b_iodone = cluster_callback;
394         bp->b_blkno = blkno;
395         bp->b_lblkno = lbn;
396         bp->b_offset = tbp->b_offset;
397         KASSERT(bp->b_offset != NOOFFSET, ("cluster_rbuild: no buffer offset"));
398         pbgetvp(vp, bp);
399
400         TAILQ_INIT(&bp->b_cluster.cluster_head);
401
402         bp->b_bcount = 0;
403         bp->b_bufsize = 0;
404         bp->b_npages = 0;
405
406         inc = btodb(size);
407         for (bn = blkno, i = 0; i < run; ++i, bn += inc) {
408                 if (i == 0) {
409                         VM_OBJECT_WLOCK(tbp->b_bufobj->bo_object);
410                         vfs_drain_busy_pages(tbp);
411                         vm_object_pip_add(tbp->b_bufobj->bo_object,
412                             tbp->b_npages);
413                         for (k = 0; k < tbp->b_npages; k++)
414                                 vm_page_sbusy(tbp->b_pages[k]);
415                         VM_OBJECT_WUNLOCK(tbp->b_bufobj->bo_object);
416                 } else {
417                         if ((bp->b_npages * PAGE_SIZE) +
418                             round_page(size) > vp->v_mount->mnt_iosize_max) {
419                                 break;
420                         }
421
422                         tbp = getblk(vp, lbn + i, size, 0, 0, GB_LOCK_NOWAIT |
423                             (gbflags & GB_UNMAPPED));
424
425                         /* Don't wait around for locked bufs. */
426                         if (tbp == NULL)
427                                 break;
428
429                         /*
430                          * Stop scanning if the buffer is fully valid
431                          * (marked B_CACHE), or locked (may be doing a
432                          * background write), or if the buffer is not
433                          * VMIO backed.  The clustering code can only deal
434                          * with VMIO-backed buffers.  The bo lock is not
435                          * required for the BKGRDINPROG check since it
436                          * can not be set without the buf lock.
437                          */
438                         if ((tbp->b_vflags & BV_BKGRDINPROG) ||
439                             (tbp->b_flags & B_CACHE) ||
440                             (tbp->b_flags & B_VMIO) == 0) {
441                                 bqrelse(tbp);
442                                 break;
443                         }
444
445                         /*
446                          * The buffer must be completely invalid in order to
447                          * take part in the cluster.  If it is partially valid
448                          * then we stop.
449                          */
450                         off = tbp->b_offset;
451                         tsize = size;
452                         VM_OBJECT_WLOCK(tbp->b_bufobj->bo_object);
453                         for (j = 0; tsize > 0; j++) {
454                                 toff = off & PAGE_MASK;
455                                 tinc = tsize;
456                                 if (toff + tinc > PAGE_SIZE)
457                                         tinc = PAGE_SIZE - toff;
458                                 VM_OBJECT_ASSERT_WLOCKED(tbp->b_pages[j]->object);
459                                 if ((tbp->b_pages[j]->valid &
460                                     vm_page_bits(toff, tinc)) != 0)
461                                         break;
462                                 if (vm_page_xbusied(tbp->b_pages[j]))
463                                         break;
464                                 vm_object_pip_add(tbp->b_bufobj->bo_object, 1);
465                                 vm_page_sbusy(tbp->b_pages[j]);
466                                 off += tinc;
467                                 tsize -= tinc;
468                         }
469                         if (tsize > 0) {
470 clean_sbusy:
471                                 vm_object_pip_add(tbp->b_bufobj->bo_object, -j);
472                                 for (k = 0; k < j; k++)
473                                         vm_page_sunbusy(tbp->b_pages[k]);
474                                 VM_OBJECT_WUNLOCK(tbp->b_bufobj->bo_object);
475                                 bqrelse(tbp);
476                                 break;
477                         }
478                         VM_OBJECT_WUNLOCK(tbp->b_bufobj->bo_object);
479
480                         /*
481                          * Set a read-ahead mark as appropriate
482                          */
483                         if ((fbp && (i == 1)) || (i == (run - 1)))
484                                 tbp->b_flags |= B_RAM;
485
486                         /*
487                          * Set the buffer up for an async read (XXX should
488                          * we do this only if we do not wind up brelse()ing?).
489                          * Set the block number if it isn't set, otherwise
490                          * if it is make sure it matches the block number we
491                          * expect.
492                          */
493                         tbp->b_flags |= B_ASYNC;
494                         tbp->b_iocmd = BIO_READ;
495                         if (tbp->b_blkno == tbp->b_lblkno) {
496                                 tbp->b_blkno = bn;
497                         } else if (tbp->b_blkno != bn) {
498                                 VM_OBJECT_WLOCK(tbp->b_bufobj->bo_object);
499                                 goto clean_sbusy;
500                         }
501                 }
502                 /*
503                  * XXX fbp from caller may not be B_ASYNC, but we are going
504                  * to biodone() it in cluster_callback() anyway
505                  */
506                 BUF_KERNPROC(tbp);
507                 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
508                         tbp, b_cluster.cluster_entry);
509                 VM_OBJECT_WLOCK(tbp->b_bufobj->bo_object);
510                 for (j = 0; j < tbp->b_npages; j += 1) {
511                         vm_page_t m;
512                         m = tbp->b_pages[j];
513                         if ((bp->b_npages == 0) ||
514                             (bp->b_pages[bp->b_npages-1] != m)) {
515                                 bp->b_pages[bp->b_npages] = m;
516                                 bp->b_npages++;
517                         }
518                         if (m->valid == VM_PAGE_BITS_ALL)
519                                 tbp->b_pages[j] = bogus_page;
520                 }
521                 VM_OBJECT_WUNLOCK(tbp->b_bufobj->bo_object);
522                 /*
523                  * Don't inherit tbp->b_bufsize as it may be larger due to
524                  * a non-page-aligned size.  Instead just aggregate using
525                  * 'size'.
526                  */
527                 if (tbp->b_bcount != size)
528                         printf("warning: tbp->b_bcount wrong %ld vs %ld\n", tbp->b_bcount, size);
529                 if (tbp->b_bufsize != size)
530                         printf("warning: tbp->b_bufsize wrong %ld vs %ld\n", tbp->b_bufsize, size);
531                 bp->b_bcount += size;
532                 bp->b_bufsize += size;
533         }
534
535         /*
536          * Fully valid pages in the cluster are already good and do not need
537          * to be re-read from disk.  Replace the page with bogus_page
538          */
539         VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
540         for (j = 0; j < bp->b_npages; j++) {
541                 VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[j]->object);
542                 if (bp->b_pages[j]->valid == VM_PAGE_BITS_ALL)
543                         bp->b_pages[j] = bogus_page;
544         }
545         VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
546         if (bp->b_bufsize > bp->b_kvasize)
547                 panic("cluster_rbuild: b_bufsize(%ld) > b_kvasize(%d)\n",
548                     bp->b_bufsize, bp->b_kvasize);
549
550         if (buf_mapped(bp)) {
551                 pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
552                     (vm_page_t *)bp->b_pages, bp->b_npages);
553         }
554         return (bp);
555 }
556
557 /*
558  * Cleanup after a clustered read or write.
559  * This is complicated by the fact that any of the buffers might have
560  * extra memory (if there were no empty buffer headers at allocbuf time)
561  * that we will need to shift around.
562  */
563 static void
564 cluster_callback(struct buf *bp)
565 {
566         struct buf *nbp, *tbp;
567         int error = 0;
568
569         /*
570          * Must propagate errors to all the components.
571          */
572         if (bp->b_ioflags & BIO_ERROR)
573                 error = bp->b_error;
574
575         if (buf_mapped(bp)) {
576                 pmap_qremove(trunc_page((vm_offset_t) bp->b_data),
577                     bp->b_npages);
578         }
579         /*
580          * Move memory from the large cluster buffer into the component
581          * buffers and mark IO as done on these.
582          */
583         for (tbp = TAILQ_FIRST(&bp->b_cluster.cluster_head);
584                 tbp; tbp = nbp) {
585                 nbp = TAILQ_NEXT(&tbp->b_cluster, cluster_entry);
586                 if (error) {
587                         tbp->b_ioflags |= BIO_ERROR;
588                         tbp->b_error = error;
589                 } else {
590                         tbp->b_dirtyoff = tbp->b_dirtyend = 0;
591                         tbp->b_flags &= ~B_INVAL;
592                         tbp->b_ioflags &= ~BIO_ERROR;
593                         /*
594                          * XXX the bdwrite()/bqrelse() issued during
595                          * cluster building clears B_RELBUF (see bqrelse()
596                          * comment).  If direct I/O was specified, we have
597                          * to restore it here to allow the buffer and VM
598                          * to be freed.
599                          */
600                         if (tbp->b_flags & B_DIRECT)
601                                 tbp->b_flags |= B_RELBUF;
602                 }
603                 bufdone(tbp);
604         }
605         pbrelvp(bp);
606         relpbuf(bp, &cluster_pbuf_freecnt);
607 }
608
609 /*
610  *      cluster_wbuild_wb:
611  *
612  *      Implement modified write build for cluster.
613  *
614  *              write_behind = 0        write behind disabled
615  *              write_behind = 1        write behind normal (default)
616  *              write_behind = 2        write behind backed-off
617  */
618
619 static __inline int
620 cluster_wbuild_wb(struct vnode *vp, long size, daddr_t start_lbn, int len,
621     int gbflags)
622 {
623         int r = 0;
624
625         switch (write_behind) {
626         case 2:
627                 if (start_lbn < len)
628                         break;
629                 start_lbn -= len;
630                 /* FALLTHROUGH */
631         case 1:
632                 r = cluster_wbuild(vp, size, start_lbn, len, gbflags);
633                 /* FALLTHROUGH */
634         default:
635                 /* FALLTHROUGH */
636                 break;
637         }
638         return(r);
639 }
640
641 /*
642  * Do clustered write for FFS.
643  *
644  * Three cases:
645  *      1. Write is not sequential (write asynchronously)
646  *      Write is sequential:
647  *      2.      beginning of cluster - begin cluster
648  *      3.      middle of a cluster - add to cluster
649  *      4.      end of a cluster - asynchronously write cluster
650  */
651 void
652 cluster_write(struct vnode *vp, struct buf *bp, u_quad_t filesize, int seqcount,
653     int gbflags)
654 {
655         daddr_t lbn;
656         int maxclen, cursize;
657         int lblocksize;
658         int async;
659
660         if (!unmapped_buf_allowed)
661                 gbflags &= ~GB_UNMAPPED;
662
663         if (vp->v_type == VREG) {
664                 async = DOINGASYNC(vp);
665                 lblocksize = vp->v_mount->mnt_stat.f_iosize;
666         } else {
667                 async = 0;
668                 lblocksize = bp->b_bufsize;
669         }
670         lbn = bp->b_lblkno;
671         KASSERT(bp->b_offset != NOOFFSET, ("cluster_write: no buffer offset"));
672
673         /* Initialize vnode to beginning of file. */
674         if (lbn == 0)
675                 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
676
677         if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 ||
678             (bp->b_blkno != vp->v_lasta + btodb(lblocksize))) {
679                 maxclen = vp->v_mount->mnt_iosize_max / lblocksize - 1;
680                 if (vp->v_clen != 0) {
681                         /*
682                          * Next block is not sequential.
683                          *
684                          * If we are not writing at end of file, the process
685                          * seeked to another point in the file since its last
686                          * write, or we have reached our maximum cluster size,
687                          * then push the previous cluster. Otherwise try
688                          * reallocating to make it sequential.
689                          *
690                          * Change to algorithm: only push previous cluster if
691                          * it was sequential from the point of view of the
692                          * seqcount heuristic, otherwise leave the buffer 
693                          * intact so we can potentially optimize the I/O
694                          * later on in the buf_daemon or update daemon
695                          * flush.
696                          */
697                         cursize = vp->v_lastw - vp->v_cstart + 1;
698                         if (((u_quad_t) bp->b_offset + lblocksize) != filesize ||
699                             lbn != vp->v_lastw + 1 || vp->v_clen <= cursize) {
700                                 if (!async && seqcount > 0) {
701                                         cluster_wbuild_wb(vp, lblocksize,
702                                             vp->v_cstart, cursize, gbflags);
703                                 }
704                         } else {
705                                 struct buf **bpp, **endbp;
706                                 struct cluster_save *buflist;
707
708                                 buflist = cluster_collectbufs(vp, bp, gbflags);
709                                 endbp = &buflist->bs_children
710                                     [buflist->bs_nchildren - 1];
711                                 if (VOP_REALLOCBLKS(vp, buflist)) {
712                                         /*
713                                          * Failed, push the previous cluster
714                                          * if *really* writing sequentially
715                                          * in the logical file (seqcount > 1),
716                                          * otherwise delay it in the hopes that
717                                          * the low level disk driver can
718                                          * optimize the write ordering.
719                                          */
720                                         for (bpp = buflist->bs_children;
721                                              bpp < endbp; bpp++)
722                                                 brelse(*bpp);
723                                         free(buflist, M_SEGMENT);
724                                         if (seqcount > 1) {
725                                                 cluster_wbuild_wb(vp, 
726                                                     lblocksize, vp->v_cstart, 
727                                                     cursize, gbflags);
728                                         }
729                                 } else {
730                                         /*
731                                          * Succeeded, keep building cluster.
732                                          */
733                                         for (bpp = buflist->bs_children;
734                                              bpp <= endbp; bpp++)
735                                                 bdwrite(*bpp);
736                                         free(buflist, M_SEGMENT);
737                                         vp->v_lastw = lbn;
738                                         vp->v_lasta = bp->b_blkno;
739                                         return;
740                                 }
741                         }
742                 }
743                 /*
744                  * Consider beginning a cluster. If at end of file, make
745                  * cluster as large as possible, otherwise find size of
746                  * existing cluster.
747                  */
748                 if ((vp->v_type == VREG) &&
749                         ((u_quad_t) bp->b_offset + lblocksize) != filesize &&
750                     (bp->b_blkno == bp->b_lblkno) &&
751                     (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, NULL) ||
752                      bp->b_blkno == -1)) {
753                         bawrite(bp);
754                         vp->v_clen = 0;
755                         vp->v_lasta = bp->b_blkno;
756                         vp->v_cstart = lbn + 1;
757                         vp->v_lastw = lbn;
758                         return;
759                 }
760                 vp->v_clen = maxclen;
761                 if (!async && maxclen == 0) {   /* I/O not contiguous */
762                         vp->v_cstart = lbn + 1;
763                         bawrite(bp);
764                 } else {        /* Wait for rest of cluster */
765                         vp->v_cstart = lbn;
766                         bdwrite(bp);
767                 }
768         } else if (lbn == vp->v_cstart + vp->v_clen) {
769                 /*
770                  * At end of cluster, write it out if seqcount tells us we
771                  * are operating sequentially, otherwise let the buf or
772                  * update daemon handle it.
773                  */
774                 bdwrite(bp);
775                 if (seqcount > 1) {
776                         cluster_wbuild_wb(vp, lblocksize, vp->v_cstart,
777                             vp->v_clen + 1, gbflags);
778                 }
779                 vp->v_clen = 0;
780                 vp->v_cstart = lbn + 1;
781         } else if (vm_page_count_severe()) {
782                 /*
783                  * We are low on memory, get it going NOW
784                  */
785                 bawrite(bp);
786         } else {
787                 /*
788                  * In the middle of a cluster, so just delay the I/O for now.
789                  */
790                 bdwrite(bp);
791         }
792         vp->v_lastw = lbn;
793         vp->v_lasta = bp->b_blkno;
794 }
795
796
797 /*
798  * This is an awful lot like cluster_rbuild...wish they could be combined.
799  * The last lbn argument is the current block on which I/O is being
800  * performed.  Check to see that it doesn't fall in the middle of
801  * the current block (if last_bp == NULL).
802  */
803 int
804 cluster_wbuild(struct vnode *vp, long size, daddr_t start_lbn, int len,
805     int gbflags)
806 {
807         struct buf *bp, *tbp;
808         struct bufobj *bo;
809         int i, j;
810         int totalwritten = 0;
811         int dbsize = btodb(size);
812
813         if (!unmapped_buf_allowed)
814                 gbflags &= ~GB_UNMAPPED;
815
816         bo = &vp->v_bufobj;
817         while (len > 0) {
818                 /*
819                  * If the buffer is not delayed-write (i.e. dirty), or it
820                  * is delayed-write but either locked or inval, it cannot
821                  * partake in the clustered write.
822                  */
823                 BO_LOCK(bo);
824                 if ((tbp = gbincore(&vp->v_bufobj, start_lbn)) == NULL ||
825                     (tbp->b_vflags & BV_BKGRDINPROG)) {
826                         BO_UNLOCK(bo);
827                         ++start_lbn;
828                         --len;
829                         continue;
830                 }
831                 if (BUF_LOCK(tbp,
832                     LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, BO_LOCKPTR(bo))) {
833                         ++start_lbn;
834                         --len;
835                         continue;
836                 }
837                 if ((tbp->b_flags & (B_INVAL | B_DELWRI)) != B_DELWRI) {
838                         BUF_UNLOCK(tbp);
839                         ++start_lbn;
840                         --len;
841                         continue;
842                 }
843                 bremfree(tbp);
844                 tbp->b_flags &= ~B_DONE;
845
846                 /*
847                  * Extra memory in the buffer, punt on this buffer.
848                  * XXX we could handle this in most cases, but we would
849                  * have to push the extra memory down to after our max
850                  * possible cluster size and then potentially pull it back
851                  * up if the cluster was terminated prematurely--too much
852                  * hassle.
853                  */
854                 if (((tbp->b_flags & (B_CLUSTEROK | B_MALLOC | B_VMIO)) != 
855                      (B_CLUSTEROK | B_VMIO)) ||
856                   (tbp->b_bcount != tbp->b_bufsize) ||
857                   (tbp->b_bcount != size) ||
858                   (len == 1) ||
859                   ((bp = (vp->v_vflag & VV_MD) != 0 ?
860                   trypbuf(&cluster_pbuf_freecnt) :
861                   getpbuf(&cluster_pbuf_freecnt)) == NULL)) {
862                         totalwritten += tbp->b_bufsize;
863                         bawrite(tbp);
864                         ++start_lbn;
865                         --len;
866                         continue;
867                 }
868
869                 /*
870                  * We got a pbuf to make the cluster in.
871                  * so initialise it.
872                  */
873                 TAILQ_INIT(&bp->b_cluster.cluster_head);
874                 bp->b_bcount = 0;
875                 bp->b_bufsize = 0;
876                 bp->b_npages = 0;
877                 if (tbp->b_wcred != NOCRED)
878                         bp->b_wcred = crhold(tbp->b_wcred);
879
880                 bp->b_blkno = tbp->b_blkno;
881                 bp->b_lblkno = tbp->b_lblkno;
882                 bp->b_offset = tbp->b_offset;
883
884                 /*
885                  * We are synthesizing a buffer out of vm_page_t's, but
886                  * if the block size is not page aligned then the starting
887                  * address may not be either.  Inherit the b_data offset
888                  * from the original buffer.
889                  */
890                 if ((gbflags & GB_UNMAPPED) == 0 ||
891                     (tbp->b_flags & B_VMIO) == 0) {
892                         bp->b_data = (char *)((vm_offset_t)bp->b_data |
893                             ((vm_offset_t)tbp->b_data & PAGE_MASK));
894                 } else {
895                         bp->b_data = unmapped_buf;
896                 }
897                 bp->b_flags |= B_CLUSTER | (tbp->b_flags & (B_VMIO |
898                     B_NEEDCOMMIT));
899                 bp->b_iodone = cluster_callback;
900                 pbgetvp(vp, bp);
901                 /*
902                  * From this location in the file, scan forward to see
903                  * if there are buffers with adjacent data that need to
904                  * be written as well.
905                  */
906                 for (i = 0; i < len; ++i, ++start_lbn) {
907                         if (i != 0) { /* If not the first buffer */
908                                 /*
909                                  * If the adjacent data is not even in core it
910                                  * can't need to be written.
911                                  */
912                                 BO_LOCK(bo);
913                                 if ((tbp = gbincore(bo, start_lbn)) == NULL ||
914                                     (tbp->b_vflags & BV_BKGRDINPROG)) {
915                                         BO_UNLOCK(bo);
916                                         break;
917                                 }
918
919                                 /*
920                                  * If it IS in core, but has different
921                                  * characteristics, or is locked (which
922                                  * means it could be undergoing a background
923                                  * I/O or be in a weird state), then don't
924                                  * cluster with it.
925                                  */
926                                 if (BUF_LOCK(tbp,
927                                     LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK,
928                                     BO_LOCKPTR(bo)))
929                                         break;
930
931                                 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK |
932                                     B_INVAL | B_DELWRI | B_NEEDCOMMIT))
933                                     != (B_DELWRI | B_CLUSTEROK |
934                                     (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) ||
935                                     tbp->b_wcred != bp->b_wcred) {
936                                         BUF_UNLOCK(tbp);
937                                         break;
938                                 }
939
940                                 /*
941                                  * Check that the combined cluster
942                                  * would make sense with regard to pages
943                                  * and would not be too large
944                                  */
945                                 if ((tbp->b_bcount != size) ||
946                                   ((bp->b_blkno + (dbsize * i)) !=
947                                     tbp->b_blkno) ||
948                                   ((tbp->b_npages + bp->b_npages) >
949                                     (vp->v_mount->mnt_iosize_max / PAGE_SIZE))) {
950                                         BUF_UNLOCK(tbp);
951                                         break;
952                                 }
953
954                                 /*
955                                  * Ok, it's passed all the tests,
956                                  * so remove it from the free list
957                                  * and mark it busy. We will use it.
958                                  */
959                                 bremfree(tbp);
960                                 tbp->b_flags &= ~B_DONE;
961                         } /* end of code for non-first buffers only */
962                         /*
963                          * If the IO is via the VM then we do some
964                          * special VM hackery (yuck).  Since the buffer's
965                          * block size may not be page-aligned it is possible
966                          * for a page to be shared between two buffers.  We
967                          * have to get rid of the duplication when building
968                          * the cluster.
969                          */
970                         if (tbp->b_flags & B_VMIO) {
971                                 vm_page_t m;
972
973                                 VM_OBJECT_WLOCK(tbp->b_bufobj->bo_object);
974                                 if (i == 0) {
975                                         vfs_drain_busy_pages(tbp);
976                                 } else { /* if not first buffer */
977                                         for (j = 0; j < tbp->b_npages; j += 1) {
978                                                 m = tbp->b_pages[j];
979                                                 if (vm_page_xbusied(m)) {
980                                                         VM_OBJECT_WUNLOCK(
981                                                             tbp->b_object);
982                                                         bqrelse(tbp);
983                                                         goto finishcluster;
984                                                 }
985                                         }
986                                 }
987                                 for (j = 0; j < tbp->b_npages; j += 1) {
988                                         m = tbp->b_pages[j];
989                                         vm_page_sbusy(m);
990                                         vm_object_pip_add(m->object, 1);
991                                         if ((bp->b_npages == 0) ||
992                                           (bp->b_pages[bp->b_npages - 1] != m)) {
993                                                 bp->b_pages[bp->b_npages] = m;
994                                                 bp->b_npages++;
995                                         }
996                                 }
997                                 VM_OBJECT_WUNLOCK(tbp->b_bufobj->bo_object);
998                         }
999                         bp->b_bcount += size;
1000                         bp->b_bufsize += size;
1001                         /*
1002                          * If any of the clustered buffers have their
1003                          * B_BARRIER flag set, transfer that request to
1004                          * the cluster.
1005                          */
1006                         bp->b_flags |= (tbp->b_flags & B_BARRIER);
1007                         tbp->b_flags &= ~(B_DONE | B_BARRIER);
1008                         tbp->b_flags |= B_ASYNC;
1009                         tbp->b_ioflags &= ~BIO_ERROR;
1010                         tbp->b_iocmd = BIO_WRITE;
1011                         bundirty(tbp);
1012                         reassignbuf(tbp);               /* put on clean list */
1013                         bufobj_wref(tbp->b_bufobj);
1014                         BUF_KERNPROC(tbp);
1015                         buf_track(tbp, __func__);
1016                         TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
1017                                 tbp, b_cluster.cluster_entry);
1018                 }
1019         finishcluster:
1020                 if (buf_mapped(bp)) {
1021                         pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
1022                             (vm_page_t *)bp->b_pages, bp->b_npages);
1023                 }
1024                 if (bp->b_bufsize > bp->b_kvasize)
1025                         panic(
1026                             "cluster_wbuild: b_bufsize(%ld) > b_kvasize(%d)\n",
1027                             bp->b_bufsize, bp->b_kvasize);
1028                 totalwritten += bp->b_bufsize;
1029                 bp->b_dirtyoff = 0;
1030                 bp->b_dirtyend = bp->b_bufsize;
1031                 bawrite(bp);
1032
1033                 len -= i;
1034         }
1035         return totalwritten;
1036 }
1037
1038 /*
1039  * Collect together all the buffers in a cluster.
1040  * Plus add one additional buffer.
1041  */
1042 static struct cluster_save *
1043 cluster_collectbufs(struct vnode *vp, struct buf *last_bp, int gbflags)
1044 {
1045         struct cluster_save *buflist;
1046         struct buf *bp;
1047         daddr_t lbn;
1048         int i, len;
1049
1050         len = vp->v_lastw - vp->v_cstart + 1;
1051         buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
1052             M_SEGMENT, M_WAITOK);
1053         buflist->bs_nchildren = 0;
1054         buflist->bs_children = (struct buf **) (buflist + 1);
1055         for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++) {
1056                 (void)bread_gb(vp, lbn, last_bp->b_bcount, NOCRED,
1057                     gbflags, &bp);
1058                 buflist->bs_children[i] = bp;
1059                 if (bp->b_blkno == bp->b_lblkno)
1060                         VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno,
1061                                 NULL, NULL);
1062         }
1063         buflist->bs_children[i] = bp = last_bp;
1064         if (bp->b_blkno == bp->b_lblkno)
1065                 VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
1066         buflist->bs_nchildren = i + 1;
1067         return (buflist);
1068 }