]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/vfs_cluster.c
lockprof: pass lock type as an argument instead of reading the spin flag
[FreeBSD/FreeBSD.git] / sys / kern / vfs_cluster.c
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1993
5  *      The Regents of the University of California.  All rights reserved.
6  * Modifications/enhancements:
7  *      Copyright (c) 1995 John S. Dyson.  All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *      @(#)vfs_cluster.c       8.7 (Berkeley) 2/13/94
34  */
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/proc.h>
43 #include <sys/bio.h>
44 #include <sys/buf.h>
45 #include <sys/vnode.h>
46 #include <sys/malloc.h>
47 #include <sys/mount.h>
48 #include <sys/racct.h>
49 #include <sys/resourcevar.h>
50 #include <sys/rwlock.h>
51 #include <sys/vmmeter.h>
52 #include <vm/vm.h>
53 #include <vm/vm_object.h>
54 #include <vm/vm_page.h>
55 #include <sys/sysctl.h>
56
57 static MALLOC_DEFINE(M_SEGMENT, "cl_savebuf", "cluster_save buffer");
58 static uma_zone_t cluster_pbuf_zone;
59
60 static void cluster_init(void *);
61 static struct cluster_save *cluster_collectbufs(struct vnode *vp,
62             struct buf *last_bp, int gbflags);
63 static struct buf *cluster_rbuild(struct vnode *vp, u_quad_t filesize,
64             daddr_t lbn, daddr_t blkno, long size, int run, int gbflags,
65             struct buf *fbp);
66 static void cluster_callback(struct buf *);
67
68 static int write_behind = 1;
69 SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0,
70     "Cluster write-behind; 0: disable, 1: enable, 2: backed off");
71
72 static int read_max = 64;
73 SYSCTL_INT(_vfs, OID_AUTO, read_max, CTLFLAG_RW, &read_max, 0,
74     "Cluster read-ahead max block count");
75
76 static int read_min = 1;
77 SYSCTL_INT(_vfs, OID_AUTO, read_min, CTLFLAG_RW, &read_min, 0,
78     "Cluster read min block count");
79
80 SYSINIT(cluster, SI_SUB_CPU, SI_ORDER_ANY, cluster_init, NULL);
81
82 static void
83 cluster_init(void *dummy)
84 {
85
86         cluster_pbuf_zone = pbuf_zsecond_create("clpbuf", nswbuf / 2);
87 }
88
89 /*
90  * Read data to a buf, including read-ahead if we find this to be beneficial.
91  * cluster_read replaces bread.
92  */
93 int
94 cluster_read(struct vnode *vp, u_quad_t filesize, daddr_t lblkno, long size,
95     struct ucred *cred, long totread, int seqcount, int gbflags,
96     struct buf **bpp)
97 {
98         struct buf *bp, *rbp, *reqbp;
99         struct bufobj *bo;
100         struct thread *td;
101         daddr_t blkno, origblkno;
102         int maxra, racluster;
103         int error, ncontig;
104         int i;
105
106         error = 0;
107         td = curthread;
108         bo = &vp->v_bufobj;
109         if (!unmapped_buf_allowed)
110                 gbflags &= ~GB_UNMAPPED;
111
112         /*
113          * Try to limit the amount of read-ahead by a few
114          * ad-hoc parameters.  This needs work!!!
115          */
116         racluster = vp->v_mount->mnt_iosize_max / size;
117         maxra = seqcount;
118         maxra = min(read_max, maxra);
119         maxra = min(nbuf/8, maxra);
120         if (((u_quad_t)(lblkno + maxra + 1) * size) > filesize)
121                 maxra = (filesize / size) - lblkno;
122
123         /*
124          * get the requested block
125          */
126         error = getblkx(vp, lblkno, lblkno, size, 0, 0, gbflags, &bp);
127         if (error != 0) {
128                 *bpp = NULL;
129                 return (error);
130         }
131         gbflags &= ~GB_NOSPARSE;
132         origblkno = lblkno;
133         *bpp = reqbp = bp;
134
135         /*
136          * if it is in the cache, then check to see if the reads have been
137          * sequential.  If they have, then try some read-ahead, otherwise
138          * back-off on prospective read-aheads.
139          */
140         if (bp->b_flags & B_CACHE) {
141                 if (!seqcount) {
142                         return 0;
143                 } else if ((bp->b_flags & B_RAM) == 0) {
144                         return 0;
145                 } else {
146                         bp->b_flags &= ~B_RAM;
147                         BO_RLOCK(bo);
148                         for (i = 1; i < maxra; i++) {
149                                 /*
150                                  * Stop if the buffer does not exist or it
151                                  * is invalid (about to go away?)
152                                  */
153                                 rbp = gbincore(&vp->v_bufobj, lblkno+i);
154                                 if (rbp == NULL || (rbp->b_flags & B_INVAL))
155                                         break;
156
157                                 /*
158                                  * Set another read-ahead mark so we know 
159                                  * to check again. (If we can lock the
160                                  * buffer without waiting)
161                                  */
162                                 if ((((i % racluster) == (racluster - 1)) ||
163                                     (i == (maxra - 1))) 
164                                     && (0 == BUF_LOCK(rbp, 
165                                         LK_EXCLUSIVE | LK_NOWAIT, NULL))) {
166                                         rbp->b_flags |= B_RAM;
167                                         BUF_UNLOCK(rbp);
168                                 }
169                         }
170                         BO_RUNLOCK(bo);
171                         if (i >= maxra) {
172                                 return 0;
173                         }
174                         lblkno += i;
175                 }
176                 reqbp = bp = NULL;
177         /*
178          * If it isn't in the cache, then get a chunk from
179          * disk if sequential, otherwise just get the block.
180          */
181         } else {
182                 off_t firstread = bp->b_offset;
183                 int nblks;
184                 long minread;
185
186                 KASSERT(bp->b_offset != NOOFFSET,
187                     ("cluster_read: no buffer offset"));
188
189                 ncontig = 0;
190
191                 /*
192                  * Adjust totread if needed
193                  */
194                 minread = read_min * size;
195                 if (minread > totread)
196                         totread = minread;
197
198                 /*
199                  * Compute the total number of blocks that we should read
200                  * synchronously.
201                  */
202                 if (firstread + totread > filesize)
203                         totread = filesize - firstread;
204                 nblks = howmany(totread, size);
205                 if (nblks > racluster)
206                         nblks = racluster;
207
208                 /*
209                  * Now compute the number of contiguous blocks.
210                  */
211                 if (nblks > 1) {
212                         error = VOP_BMAP(vp, lblkno, NULL,
213                                 &blkno, &ncontig, NULL);
214                         /*
215                          * If this failed to map just do the original block.
216                          */
217                         if (error || blkno == -1)
218                                 ncontig = 0;
219                 }
220
221                 /*
222                  * If we have contiguous data available do a cluster
223                  * otherwise just read the requested block.
224                  */
225                 if (ncontig) {
226                         /* Account for our first block. */
227                         ncontig = min(ncontig + 1, nblks);
228                         if (ncontig < nblks)
229                                 nblks = ncontig;
230                         bp = cluster_rbuild(vp, filesize, lblkno,
231                             blkno, size, nblks, gbflags, bp);
232                         lblkno += (bp->b_bufsize / size);
233                 } else {
234                         bp->b_flags |= B_RAM;
235                         bp->b_iocmd = BIO_READ;
236                         lblkno += 1;
237                 }
238         }
239
240         /*
241          * handle the synchronous read so that it is available ASAP.
242          */
243         if (bp) {
244                 if ((bp->b_flags & B_CLUSTER) == 0) {
245                         vfs_busy_pages(bp, 0);
246                 }
247                 bp->b_flags &= ~B_INVAL;
248                 bp->b_ioflags &= ~BIO_ERROR;
249                 if ((bp->b_flags & B_ASYNC) || bp->b_iodone != NULL)
250                         BUF_KERNPROC(bp);
251                 bp->b_iooffset = dbtob(bp->b_blkno);
252                 bstrategy(bp);
253 #ifdef RACCT
254                 if (racct_enable) {
255                         PROC_LOCK(td->td_proc);
256                         racct_add_buf(td->td_proc, bp, 0);
257                         PROC_UNLOCK(td->td_proc);
258                 }
259 #endif /* RACCT */
260                 td->td_ru.ru_inblock++;
261         }
262
263         /*
264          * If we have been doing sequential I/O, then do some read-ahead.
265          */
266         while (lblkno < (origblkno + maxra)) {
267                 error = VOP_BMAP(vp, lblkno, NULL, &blkno, &ncontig, NULL);
268                 if (error)
269                         break;
270
271                 if (blkno == -1)
272                         break;
273
274                 /*
275                  * We could throttle ncontig here by maxra but we might as
276                  * well read the data if it is contiguous.  We're throttled
277                  * by racluster anyway.
278                  */
279                 if (ncontig) {
280                         ncontig = min(ncontig + 1, racluster);
281                         rbp = cluster_rbuild(vp, filesize, lblkno, blkno,
282                             size, ncontig, gbflags, NULL);
283                         lblkno += (rbp->b_bufsize / size);
284                         if (rbp->b_flags & B_DELWRI) {
285                                 bqrelse(rbp);
286                                 continue;
287                         }
288                 } else {
289                         rbp = getblk(vp, lblkno, size, 0, 0, gbflags);
290                         lblkno += 1;
291                         if (rbp->b_flags & B_DELWRI) {
292                                 bqrelse(rbp);
293                                 continue;
294                         }
295                         rbp->b_flags |= B_ASYNC | B_RAM;
296                         rbp->b_iocmd = BIO_READ;
297                         rbp->b_blkno = blkno;
298                 }
299                 if (rbp->b_flags & B_CACHE) {
300                         rbp->b_flags &= ~B_ASYNC;
301                         bqrelse(rbp);
302                         continue;
303                 }
304                 if ((rbp->b_flags & B_CLUSTER) == 0) {
305                         vfs_busy_pages(rbp, 0);
306                 }
307                 rbp->b_flags &= ~B_INVAL;
308                 rbp->b_ioflags &= ~BIO_ERROR;
309                 if ((rbp->b_flags & B_ASYNC) || rbp->b_iodone != NULL)
310                         BUF_KERNPROC(rbp);
311                 rbp->b_iooffset = dbtob(rbp->b_blkno);
312                 bstrategy(rbp);
313 #ifdef RACCT
314                 if (racct_enable) {
315                         PROC_LOCK(td->td_proc);
316                         racct_add_buf(td->td_proc, rbp, 0);
317                         PROC_UNLOCK(td->td_proc);
318                 }
319 #endif /* RACCT */
320                 td->td_ru.ru_inblock++;
321         }
322
323         if (reqbp) {
324                 /*
325                  * Like bread, always brelse() the buffer when
326                  * returning an error.
327                  */
328                 error = bufwait(reqbp);
329                 if (error != 0) {
330                         brelse(reqbp);
331                         *bpp = NULL;
332                 }
333         }
334         return (error);
335 }
336
337 /*
338  * If blocks are contiguous on disk, use this to provide clustered
339  * read ahead.  We will read as many blocks as possible sequentially
340  * and then parcel them up into logical blocks in the buffer hash table.
341  */
342 static struct buf *
343 cluster_rbuild(struct vnode *vp, u_quad_t filesize, daddr_t lbn,
344     daddr_t blkno, long size, int run, int gbflags, struct buf *fbp)
345 {
346         struct buf *bp, *tbp;
347         daddr_t bn;
348         off_t off;
349         long tinc, tsize;
350         int i, inc, j, k, toff;
351
352         KASSERT(size == vp->v_mount->mnt_stat.f_iosize,
353             ("cluster_rbuild: size %ld != f_iosize %jd\n",
354             size, (intmax_t)vp->v_mount->mnt_stat.f_iosize));
355
356         /*
357          * avoid a division
358          */
359         while ((u_quad_t) size * (lbn + run) > filesize) {
360                 --run;
361         }
362
363         if (fbp) {
364                 tbp = fbp;
365                 tbp->b_iocmd = BIO_READ; 
366         } else {
367                 tbp = getblk(vp, lbn, size, 0, 0, gbflags);
368                 if (tbp->b_flags & B_CACHE)
369                         return tbp;
370                 tbp->b_flags |= B_ASYNC | B_RAM;
371                 tbp->b_iocmd = BIO_READ;
372         }
373         tbp->b_blkno = blkno;
374         if( (tbp->b_flags & B_MALLOC) ||
375                 ((tbp->b_flags & B_VMIO) == 0) || (run <= 1) )
376                 return tbp;
377
378         bp = uma_zalloc(cluster_pbuf_zone, M_NOWAIT);
379         if (bp == NULL)
380                 return tbp;
381         MPASS((bp->b_flags & B_MAXPHYS) != 0);
382
383         /*
384          * We are synthesizing a buffer out of vm_page_t's, but
385          * if the block size is not page aligned then the starting
386          * address may not be either.  Inherit the b_data offset
387          * from the original buffer.
388          */
389         bp->b_flags = B_ASYNC | B_CLUSTER | B_VMIO;
390         if ((gbflags & GB_UNMAPPED) != 0) {
391                 bp->b_data = unmapped_buf;
392         } else {
393                 bp->b_data = (char *)((vm_offset_t)bp->b_data |
394                     ((vm_offset_t)tbp->b_data & PAGE_MASK));
395         }
396         bp->b_iocmd = BIO_READ;
397         bp->b_iodone = cluster_callback;
398         bp->b_blkno = blkno;
399         bp->b_lblkno = lbn;
400         bp->b_offset = tbp->b_offset;
401         KASSERT(bp->b_offset != NOOFFSET, ("cluster_rbuild: no buffer offset"));
402         pbgetvp(vp, bp);
403
404         TAILQ_INIT(&bp->b_cluster.cluster_head);
405
406         bp->b_bcount = 0;
407         bp->b_bufsize = 0;
408         bp->b_npages = 0;
409
410         inc = btodb(size);
411         for (bn = blkno, i = 0; i < run; ++i, bn += inc) {
412                 if (i == 0) {
413                         vm_object_pip_add(tbp->b_bufobj->bo_object,
414                             tbp->b_npages);
415                         vfs_busy_pages_acquire(tbp);
416                 } else {
417                         if ((bp->b_npages * PAGE_SIZE) +
418                             round_page(size) > vp->v_mount->mnt_iosize_max) {
419                                 break;
420                         }
421
422                         tbp = getblk(vp, lbn + i, size, 0, 0, GB_LOCK_NOWAIT |
423                             (gbflags & GB_UNMAPPED));
424
425                         /* Don't wait around for locked bufs. */
426                         if (tbp == NULL)
427                                 break;
428
429                         /*
430                          * Stop scanning if the buffer is fully valid
431                          * (marked B_CACHE), or locked (may be doing a
432                          * background write), or if the buffer is not
433                          * VMIO backed.  The clustering code can only deal
434                          * with VMIO-backed buffers.  The bo lock is not
435                          * required for the BKGRDINPROG check since it
436                          * can not be set without the buf lock.
437                          */
438                         if ((tbp->b_vflags & BV_BKGRDINPROG) ||
439                             (tbp->b_flags & B_CACHE) ||
440                             (tbp->b_flags & B_VMIO) == 0) {
441                                 bqrelse(tbp);
442                                 break;
443                         }
444
445                         /*
446                          * The buffer must be completely invalid in order to
447                          * take part in the cluster.  If it is partially valid
448                          * then we stop.
449                          */
450                         off = tbp->b_offset;
451                         tsize = size;
452                         for (j = 0; tsize > 0; j++) {
453                                 toff = off & PAGE_MASK;
454                                 tinc = tsize;
455                                 if (toff + tinc > PAGE_SIZE)
456                                         tinc = PAGE_SIZE - toff;
457                                 if (vm_page_trysbusy(tbp->b_pages[j]) == 0)
458                                         break;
459                                 if ((tbp->b_pages[j]->valid &
460                                     vm_page_bits(toff, tinc)) != 0) {
461                                         vm_page_sunbusy(tbp->b_pages[j]);
462                                         break;
463                                 }
464                                 vm_object_pip_add(tbp->b_bufobj->bo_object, 1);
465                                 off += tinc;
466                                 tsize -= tinc;
467                         }
468                         if (tsize > 0) {
469 clean_sbusy:
470                                 vm_object_pip_wakeupn(tbp->b_bufobj->bo_object,
471                                     j);
472                                 for (k = 0; k < j; k++)
473                                         vm_page_sunbusy(tbp->b_pages[k]);
474                                 bqrelse(tbp);
475                                 break;
476                         }
477
478                         /*
479                          * Set a read-ahead mark as appropriate
480                          */
481                         if ((fbp && (i == 1)) || (i == (run - 1)))
482                                 tbp->b_flags |= B_RAM;
483
484                         /*
485                          * Set the buffer up for an async read (XXX should
486                          * we do this only if we do not wind up brelse()ing?).
487                          * Set the block number if it isn't set, otherwise
488                          * if it is make sure it matches the block number we
489                          * expect.
490                          */
491                         tbp->b_flags |= B_ASYNC;
492                         tbp->b_iocmd = BIO_READ;
493                         if (tbp->b_blkno == tbp->b_lblkno) {
494                                 tbp->b_blkno = bn;
495                         } else if (tbp->b_blkno != bn) {
496                                 goto clean_sbusy;
497                         }
498                 }
499                 /*
500                  * XXX fbp from caller may not be B_ASYNC, but we are going
501                  * to biodone() it in cluster_callback() anyway
502                  */
503                 BUF_KERNPROC(tbp);
504                 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
505                         tbp, b_cluster.cluster_entry);
506                 for (j = 0; j < tbp->b_npages; j += 1) {
507                         vm_page_t m;
508
509                         m = tbp->b_pages[j];
510                         if ((bp->b_npages == 0) ||
511                             (bp->b_pages[bp->b_npages-1] != m)) {
512                                 bp->b_pages[bp->b_npages] = m;
513                                 bp->b_npages++;
514                         }
515                         if (vm_page_all_valid(m))
516                                 tbp->b_pages[j] = bogus_page;
517                 }
518
519                 /*
520                  * Don't inherit tbp->b_bufsize as it may be larger due to
521                  * a non-page-aligned size.  Instead just aggregate using
522                  * 'size'.
523                  */
524                 if (tbp->b_bcount != size)
525                         printf("warning: tbp->b_bcount wrong %ld vs %ld\n", tbp->b_bcount, size);
526                 if (tbp->b_bufsize != size)
527                         printf("warning: tbp->b_bufsize wrong %ld vs %ld\n", tbp->b_bufsize, size);
528                 bp->b_bcount += size;
529                 bp->b_bufsize += size;
530         }
531
532         /*
533          * Fully valid pages in the cluster are already good and do not need
534          * to be re-read from disk.  Replace the page with bogus_page
535          */
536         for (j = 0; j < bp->b_npages; j++) {
537                 if (vm_page_all_valid(bp->b_pages[j]))
538                         bp->b_pages[j] = bogus_page;
539         }
540         if (bp->b_bufsize > bp->b_kvasize)
541                 panic("cluster_rbuild: b_bufsize(%ld) > b_kvasize(%d)\n",
542                     bp->b_bufsize, bp->b_kvasize);
543
544         if (buf_mapped(bp)) {
545                 pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
546                     (vm_page_t *)bp->b_pages, bp->b_npages);
547         }
548         return (bp);
549 }
550
551 /*
552  * Cleanup after a clustered read or write.
553  * This is complicated by the fact that any of the buffers might have
554  * extra memory (if there were no empty buffer headers at allocbuf time)
555  * that we will need to shift around.
556  */
557 static void
558 cluster_callback(struct buf *bp)
559 {
560         struct buf *nbp, *tbp;
561         int error = 0;
562
563         /*
564          * Must propagate errors to all the components.
565          */
566         if (bp->b_ioflags & BIO_ERROR)
567                 error = bp->b_error;
568
569         if (buf_mapped(bp)) {
570                 pmap_qremove(trunc_page((vm_offset_t) bp->b_data),
571                     bp->b_npages);
572         }
573         /*
574          * Move memory from the large cluster buffer into the component
575          * buffers and mark IO as done on these.
576          */
577         for (tbp = TAILQ_FIRST(&bp->b_cluster.cluster_head);
578                 tbp; tbp = nbp) {
579                 nbp = TAILQ_NEXT(&tbp->b_cluster, cluster_entry);
580                 if (error) {
581                         tbp->b_ioflags |= BIO_ERROR;
582                         tbp->b_error = error;
583                 } else {
584                         tbp->b_dirtyoff = tbp->b_dirtyend = 0;
585                         tbp->b_flags &= ~B_INVAL;
586                         tbp->b_ioflags &= ~BIO_ERROR;
587                         /*
588                          * XXX the bdwrite()/bqrelse() issued during
589                          * cluster building clears B_RELBUF (see bqrelse()
590                          * comment).  If direct I/O was specified, we have
591                          * to restore it here to allow the buffer and VM
592                          * to be freed.
593                          */
594                         if (tbp->b_flags & B_DIRECT)
595                                 tbp->b_flags |= B_RELBUF;
596                 }
597                 bufdone(tbp);
598         }
599         pbrelvp(bp);
600         uma_zfree(cluster_pbuf_zone, bp);
601 }
602
603 /*
604  *      cluster_wbuild_wb:
605  *
606  *      Implement modified write build for cluster.
607  *
608  *              write_behind = 0        write behind disabled
609  *              write_behind = 1        write behind normal (default)
610  *              write_behind = 2        write behind backed-off
611  */
612
613 static __inline int
614 cluster_wbuild_wb(struct vnode *vp, long size, daddr_t start_lbn, int len,
615     int gbflags)
616 {
617         int r = 0;
618
619         switch (write_behind) {
620         case 2:
621                 if (start_lbn < len)
622                         break;
623                 start_lbn -= len;
624                 /* FALLTHROUGH */
625         case 1:
626                 r = cluster_wbuild(vp, size, start_lbn, len, gbflags);
627                 /* FALLTHROUGH */
628         default:
629                 /* FALLTHROUGH */
630                 break;
631         }
632         return(r);
633 }
634
635 /*
636  * Do clustered write for FFS.
637  *
638  * Three cases:
639  *      1. Write is not sequential (write asynchronously)
640  *      Write is sequential:
641  *      2.      beginning of cluster - begin cluster
642  *      3.      middle of a cluster - add to cluster
643  *      4.      end of a cluster - asynchronously write cluster
644  */
645 void
646 cluster_write(struct vnode *vp, struct buf *bp, u_quad_t filesize, int seqcount,
647     int gbflags)
648 {
649         daddr_t lbn;
650         int maxclen, cursize;
651         int lblocksize;
652         int async;
653
654         if (!unmapped_buf_allowed)
655                 gbflags &= ~GB_UNMAPPED;
656
657         if (vp->v_type == VREG) {
658                 async = DOINGASYNC(vp);
659                 lblocksize = vp->v_mount->mnt_stat.f_iosize;
660         } else {
661                 async = 0;
662                 lblocksize = bp->b_bufsize;
663         }
664         lbn = bp->b_lblkno;
665         KASSERT(bp->b_offset != NOOFFSET, ("cluster_write: no buffer offset"));
666
667         /* Initialize vnode to beginning of file. */
668         if (lbn == 0)
669                 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
670
671         if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 ||
672             (bp->b_blkno != vp->v_lasta + btodb(lblocksize))) {
673                 maxclen = vp->v_mount->mnt_iosize_max / lblocksize - 1;
674                 if (vp->v_clen != 0) {
675                         /*
676                          * Next block is not sequential.
677                          *
678                          * If we are not writing at end of file, the process
679                          * seeked to another point in the file since its last
680                          * write, or we have reached our maximum cluster size,
681                          * then push the previous cluster. Otherwise try
682                          * reallocating to make it sequential.
683                          *
684                          * Change to algorithm: only push previous cluster if
685                          * it was sequential from the point of view of the
686                          * seqcount heuristic, otherwise leave the buffer 
687                          * intact so we can potentially optimize the I/O
688                          * later on in the buf_daemon or update daemon
689                          * flush.
690                          */
691                         cursize = vp->v_lastw - vp->v_cstart + 1;
692                         if (((u_quad_t) bp->b_offset + lblocksize) != filesize ||
693                             lbn != vp->v_lastw + 1 || vp->v_clen <= cursize) {
694                                 if (!async && seqcount > 0) {
695                                         cluster_wbuild_wb(vp, lblocksize,
696                                             vp->v_cstart, cursize, gbflags);
697                                 }
698                         } else {
699                                 struct buf **bpp, **endbp;
700                                 struct cluster_save *buflist;
701
702                                 buflist = cluster_collectbufs(vp, bp, gbflags);
703                                 if (buflist == NULL) {
704                                         /*
705                                          * Cluster build failed so just write
706                                          * it now.
707                                          */
708                                         bawrite(bp);
709                                         return;
710                                 }
711                                 endbp = &buflist->bs_children
712                                     [buflist->bs_nchildren - 1];
713                                 if (VOP_REALLOCBLKS(vp, buflist)) {
714                                         /*
715                                          * Failed, push the previous cluster
716                                          * if *really* writing sequentially
717                                          * in the logical file (seqcount > 1),
718                                          * otherwise delay it in the hopes that
719                                          * the low level disk driver can
720                                          * optimize the write ordering.
721                                          */
722                                         for (bpp = buflist->bs_children;
723                                              bpp < endbp; bpp++)
724                                                 brelse(*bpp);
725                                         free(buflist, M_SEGMENT);
726                                         if (seqcount > 1) {
727                                                 cluster_wbuild_wb(vp, 
728                                                     lblocksize, vp->v_cstart, 
729                                                     cursize, gbflags);
730                                         }
731                                 } else {
732                                         /*
733                                          * Succeeded, keep building cluster.
734                                          */
735                                         for (bpp = buflist->bs_children;
736                                              bpp <= endbp; bpp++)
737                                                 bdwrite(*bpp);
738                                         free(buflist, M_SEGMENT);
739                                         vp->v_lastw = lbn;
740                                         vp->v_lasta = bp->b_blkno;
741                                         return;
742                                 }
743                         }
744                 }
745                 /*
746                  * Consider beginning a cluster. If at end of file, make
747                  * cluster as large as possible, otherwise find size of
748                  * existing cluster.
749                  */
750                 if ((vp->v_type == VREG) &&
751                         ((u_quad_t) bp->b_offset + lblocksize) != filesize &&
752                     (bp->b_blkno == bp->b_lblkno) &&
753                     (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, NULL) ||
754                      bp->b_blkno == -1)) {
755                         bawrite(bp);
756                         vp->v_clen = 0;
757                         vp->v_lasta = bp->b_blkno;
758                         vp->v_cstart = lbn + 1;
759                         vp->v_lastw = lbn;
760                         return;
761                 }
762                 vp->v_clen = maxclen;
763                 if (!async && maxclen == 0) {   /* I/O not contiguous */
764                         vp->v_cstart = lbn + 1;
765                         bawrite(bp);
766                 } else {        /* Wait for rest of cluster */
767                         vp->v_cstart = lbn;
768                         bdwrite(bp);
769                 }
770         } else if (lbn == vp->v_cstart + vp->v_clen) {
771                 /*
772                  * At end of cluster, write it out if seqcount tells us we
773                  * are operating sequentially, otherwise let the buf or
774                  * update daemon handle it.
775                  */
776                 bdwrite(bp);
777                 if (seqcount > 1) {
778                         cluster_wbuild_wb(vp, lblocksize, vp->v_cstart,
779                             vp->v_clen + 1, gbflags);
780                 }
781                 vp->v_clen = 0;
782                 vp->v_cstart = lbn + 1;
783         } else if (vm_page_count_severe()) {
784                 /*
785                  * We are low on memory, get it going NOW
786                  */
787                 bawrite(bp);
788         } else {
789                 /*
790                  * In the middle of a cluster, so just delay the I/O for now.
791                  */
792                 bdwrite(bp);
793         }
794         vp->v_lastw = lbn;
795         vp->v_lasta = bp->b_blkno;
796 }
797
798 /*
799  * This is an awful lot like cluster_rbuild...wish they could be combined.
800  * The last lbn argument is the current block on which I/O is being
801  * performed.  Check to see that it doesn't fall in the middle of
802  * the current block (if last_bp == NULL).
803  */
804 int
805 cluster_wbuild(struct vnode *vp, long size, daddr_t start_lbn, int len,
806     int gbflags)
807 {
808         struct buf *bp, *tbp;
809         struct bufobj *bo;
810         int i, j;
811         int totalwritten = 0;
812         int dbsize = btodb(size);
813
814         if (!unmapped_buf_allowed)
815                 gbflags &= ~GB_UNMAPPED;
816
817         bo = &vp->v_bufobj;
818         while (len > 0) {
819                 /*
820                  * If the buffer is not delayed-write (i.e. dirty), or it
821                  * is delayed-write but either locked or inval, it cannot
822                  * partake in the clustered write.
823                  */
824                 BO_LOCK(bo);
825                 if ((tbp = gbincore(&vp->v_bufobj, start_lbn)) == NULL ||
826                     (tbp->b_vflags & BV_BKGRDINPROG)) {
827                         BO_UNLOCK(bo);
828                         ++start_lbn;
829                         --len;
830                         continue;
831                 }
832                 if (BUF_LOCK(tbp,
833                     LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, BO_LOCKPTR(bo))) {
834                         ++start_lbn;
835                         --len;
836                         continue;
837                 }
838                 if ((tbp->b_flags & (B_INVAL | B_DELWRI)) != B_DELWRI) {
839                         BUF_UNLOCK(tbp);
840                         ++start_lbn;
841                         --len;
842                         continue;
843                 }
844                 bremfree(tbp);
845                 tbp->b_flags &= ~B_DONE;
846
847                 /*
848                  * Extra memory in the buffer, punt on this buffer.
849                  * XXX we could handle this in most cases, but we would
850                  * have to push the extra memory down to after our max
851                  * possible cluster size and then potentially pull it back
852                  * up if the cluster was terminated prematurely--too much
853                  * hassle.
854                  */
855                 if (((tbp->b_flags & (B_CLUSTEROK | B_MALLOC | B_VMIO)) != 
856                      (B_CLUSTEROK | B_VMIO)) ||
857                   (tbp->b_bcount != tbp->b_bufsize) ||
858                   (tbp->b_bcount != size) ||
859                   (len == 1) ||
860                   ((bp = uma_zalloc(cluster_pbuf_zone, M_NOWAIT)) == NULL)) {
861                         totalwritten += tbp->b_bufsize;
862                         bawrite(tbp);
863                         ++start_lbn;
864                         --len;
865                         continue;
866                 }
867                 MPASS((bp->b_flags & B_MAXPHYS) != 0);
868
869                 /*
870                  * We got a pbuf to make the cluster in.
871                  * so initialise it.
872                  */
873                 TAILQ_INIT(&bp->b_cluster.cluster_head);
874                 bp->b_bcount = 0;
875                 bp->b_bufsize = 0;
876                 bp->b_npages = 0;
877                 if (tbp->b_wcred != NOCRED)
878                         bp->b_wcred = crhold(tbp->b_wcred);
879
880                 bp->b_blkno = tbp->b_blkno;
881                 bp->b_lblkno = tbp->b_lblkno;
882                 bp->b_offset = tbp->b_offset;
883
884                 /*
885                  * We are synthesizing a buffer out of vm_page_t's, but
886                  * if the block size is not page aligned then the starting
887                  * address may not be either.  Inherit the b_data offset
888                  * from the original buffer.
889                  */
890                 if ((gbflags & GB_UNMAPPED) == 0 ||
891                     (tbp->b_flags & B_VMIO) == 0) {
892                         bp->b_data = (char *)((vm_offset_t)bp->b_data |
893                             ((vm_offset_t)tbp->b_data & PAGE_MASK));
894                 } else {
895                         bp->b_data = unmapped_buf;
896                 }
897                 bp->b_flags |= B_CLUSTER | (tbp->b_flags & (B_VMIO |
898                     B_NEEDCOMMIT));
899                 bp->b_iodone = cluster_callback;
900                 pbgetvp(vp, bp);
901                 /*
902                  * From this location in the file, scan forward to see
903                  * if there are buffers with adjacent data that need to
904                  * be written as well.
905                  */
906                 for (i = 0; i < len; ++i, ++start_lbn) {
907                         if (i != 0) { /* If not the first buffer */
908                                 /*
909                                  * If the adjacent data is not even in core it
910                                  * can't need to be written.
911                                  */
912                                 BO_LOCK(bo);
913                                 if ((tbp = gbincore(bo, start_lbn)) == NULL ||
914                                     (tbp->b_vflags & BV_BKGRDINPROG)) {
915                                         BO_UNLOCK(bo);
916                                         break;
917                                 }
918
919                                 /*
920                                  * If it IS in core, but has different
921                                  * characteristics, or is locked (which
922                                  * means it could be undergoing a background
923                                  * I/O or be in a weird state), then don't
924                                  * cluster with it.
925                                  */
926                                 if (BUF_LOCK(tbp,
927                                     LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK,
928                                     BO_LOCKPTR(bo)))
929                                         break;
930
931                                 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK |
932                                     B_INVAL | B_DELWRI | B_NEEDCOMMIT))
933                                     != (B_DELWRI | B_CLUSTEROK |
934                                     (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) ||
935                                     tbp->b_wcred != bp->b_wcred) {
936                                         BUF_UNLOCK(tbp);
937                                         break;
938                                 }
939
940                                 /*
941                                  * Check that the combined cluster
942                                  * would make sense with regard to pages
943                                  * and would not be too large
944                                  */
945                                 if ((tbp->b_bcount != size) ||
946                                   ((bp->b_blkno + (dbsize * i)) !=
947                                     tbp->b_blkno) ||
948                                   ((tbp->b_npages + bp->b_npages) >
949                                     (vp->v_mount->mnt_iosize_max / PAGE_SIZE))) {
950                                         BUF_UNLOCK(tbp);
951                                         break;
952                                 }
953
954                                 /*
955                                  * Ok, it's passed all the tests,
956                                  * so remove it from the free list
957                                  * and mark it busy. We will use it.
958                                  */
959                                 bremfree(tbp);
960                                 tbp->b_flags &= ~B_DONE;
961                         } /* end of code for non-first buffers only */
962                         /*
963                          * If the IO is via the VM then we do some
964                          * special VM hackery (yuck).  Since the buffer's
965                          * block size may not be page-aligned it is possible
966                          * for a page to be shared between two buffers.  We
967                          * have to get rid of the duplication when building
968                          * the cluster.
969                          */
970                         if (tbp->b_flags & B_VMIO) {
971                                 vm_page_t m;
972
973                                 if (i == 0) {
974                                         vfs_busy_pages_acquire(tbp);
975                                 } else { /* if not first buffer */
976                                         for (j = 0; j < tbp->b_npages; j += 1) {
977                                                 m = tbp->b_pages[j];
978                                                 if (vm_page_trysbusy(m) == 0) {
979                                                         for (j--; j >= 0; j--)
980                                                                 vm_page_sunbusy(
981                                                                     tbp->b_pages[j]);
982                                                         bqrelse(tbp);
983                                                         goto finishcluster;
984                                                 }
985                                         }
986                                 }
987                                 vm_object_pip_add(tbp->b_bufobj->bo_object,
988                                     tbp->b_npages);
989                                 for (j = 0; j < tbp->b_npages; j += 1) {
990                                         m = tbp->b_pages[j];
991                                         if ((bp->b_npages == 0) ||
992                                           (bp->b_pages[bp->b_npages - 1] != m)) {
993                                                 bp->b_pages[bp->b_npages] = m;
994                                                 bp->b_npages++;
995                                         }
996                                 }
997                         }
998                         bp->b_bcount += size;
999                         bp->b_bufsize += size;
1000                         /*
1001                          * If any of the clustered buffers have their
1002                          * B_BARRIER flag set, transfer that request to
1003                          * the cluster.
1004                          */
1005                         bp->b_flags |= (tbp->b_flags & B_BARRIER);
1006                         tbp->b_flags &= ~(B_DONE | B_BARRIER);
1007                         tbp->b_flags |= B_ASYNC;
1008                         tbp->b_ioflags &= ~BIO_ERROR;
1009                         tbp->b_iocmd = BIO_WRITE;
1010                         bundirty(tbp);
1011                         reassignbuf(tbp);               /* put on clean list */
1012                         bufobj_wref(tbp->b_bufobj);
1013                         BUF_KERNPROC(tbp);
1014                         buf_track(tbp, __func__);
1015                         TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
1016                                 tbp, b_cluster.cluster_entry);
1017                 }
1018         finishcluster:
1019                 if (buf_mapped(bp)) {
1020                         pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
1021                             (vm_page_t *)bp->b_pages, bp->b_npages);
1022                 }
1023                 if (bp->b_bufsize > bp->b_kvasize)
1024                         panic(
1025                             "cluster_wbuild: b_bufsize(%ld) > b_kvasize(%d)\n",
1026                             bp->b_bufsize, bp->b_kvasize);
1027                 totalwritten += bp->b_bufsize;
1028                 bp->b_dirtyoff = 0;
1029                 bp->b_dirtyend = bp->b_bufsize;
1030                 bawrite(bp);
1031
1032                 len -= i;
1033         }
1034         return totalwritten;
1035 }
1036
1037 /*
1038  * Collect together all the buffers in a cluster.
1039  * Plus add one additional buffer.
1040  */
1041 static struct cluster_save *
1042 cluster_collectbufs(struct vnode *vp, struct buf *last_bp, int gbflags)
1043 {
1044         struct cluster_save *buflist;
1045         struct buf *bp;
1046         daddr_t lbn;
1047         int i, j, len, error;
1048
1049         len = vp->v_lastw - vp->v_cstart + 1;
1050         buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
1051             M_SEGMENT, M_WAITOK);
1052         buflist->bs_nchildren = 0;
1053         buflist->bs_children = (struct buf **) (buflist + 1);
1054         for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++) {
1055                 error = bread_gb(vp, lbn, last_bp->b_bcount, NOCRED,
1056                     gbflags, &bp);
1057                 if (error != 0) {
1058                         /*
1059                          * If read fails, release collected buffers
1060                          * and return failure.
1061                          */
1062                         for (j = 0; j < i; j++)
1063                                 brelse(buflist->bs_children[j]);
1064                         free(buflist, M_SEGMENT);
1065                         return (NULL);
1066                 }
1067                 buflist->bs_children[i] = bp;
1068                 if (bp->b_blkno == bp->b_lblkno)
1069                         VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno,
1070                                 NULL, NULL);
1071         }
1072         buflist->bs_children[i] = bp = last_bp;
1073         if (bp->b_blkno == bp->b_lblkno)
1074                 VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
1075         buflist->bs_nchildren = i + 1;
1076         return (buflist);
1077 }