2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * @(#)buf.h 8.9 (Berkeley) 3/30/95
45 #include <sys/queue.h>
47 #include <sys/lockmgr.h>
55 * To avoid including <ufs/ffs/softdep.h>
57 LIST_HEAD(workhead, worklist);
59 * These are currently used only by the soft dependency code, hence
60 * are stored once in a global variable. If other subsystems wanted
61 * to use these hooks, a pointer to a set of bio_ops could be added
64 extern struct bio_ops {
65 void (*io_start)(struct buf *);
66 void (*io_complete)(struct buf *);
67 void (*io_deallocate)(struct buf *);
68 void (*io_movedeps)(struct buf *, struct buf *);
69 int (*io_countdeps)(struct buf *, int);
74 int (*bop_write)(struct buf *);
77 extern struct buf_ops buf_ops_bio;
81 typedef unsigned char b_xflags_t;
84 * The buffer header describes an I/O operation in the kernel.
87 * b_bufsize, b_bcount. b_bufsize is the allocation size of the
88 * buffer, either DEV_BSIZE or PAGE_SIZE aligned. b_bcount is the
89 * originally requested buffer size and can serve as a bounds check
90 * against EOF. For most, but not all uses, b_bcount == b_bufsize.
92 * b_dirtyoff, b_dirtyend. Buffers support piecemeal, unaligned
93 * ranges of dirty data that need to be written to backing store.
94 * The range is typically clipped at b_bcount ( not b_bufsize ).
96 * b_resid. Number of bytes remaining in I/O. After an I/O operation
97 * completes, b_resid is usually 0 indicating 100% success.
99 * All fields are protected by the buffer lock except those marked:
100 * V - Protected by owning vnode lock
101 * Q - Protected by the buf queue lock
102 * D - Protected by an dependency implementation specific lock
105 struct bio b_io; /* "Builtin" I/O request. */
106 #define b_bcount b_io.bio_bcount
107 #define b_blkno b_io.bio_blkno
108 #define b_caller1 b_io.bio_caller1
109 #define b_caller2 b_io.bio_caller2
110 #define b_data b_io.bio_data
111 #define b_dev b_io.bio_dev
112 #define b_driver1 b_io.bio_driver1
113 #define b_driver2 b_io.bio_driver2
114 #define b_error b_io.bio_error
115 #define b_iocmd b_io.bio_cmd
116 #define b_ioflags b_io.bio_flags
117 #define b_pblkno b_io.bio_pblkno
118 #define b_resid b_io.bio_resid
119 struct buf_ops *b_op;
121 #define B_MAGIC_BIO 0x10b10b10
122 #define B_MAGIC_NFS 0x67238234
123 void (*b_iodone)(struct buf *);
124 off_t b_offset; /* Offset into file. */
125 TAILQ_ENTRY(buf) b_vnbufs; /* (V) Buffer's associated vnode. */
126 struct buf *b_left; /* (V) splay tree link */
127 struct buf *b_right; /* (V) splay tree link */
128 uint32_t b_vflags; /* (V) BV_* flags */
129 TAILQ_ENTRY(buf) b_freelist; /* (Q) Free list position inactive. */
130 unsigned short b_qindex; /* (Q) buffer queue index */
131 uint32_t b_flags; /* B_* flags. */
132 b_xflags_t b_xflags; /* extra flags */
133 struct lock b_lock; /* Buffer lock */
134 long b_bufsize; /* Allocated buffer size. */
135 long b_runningbufspace; /* when I/O is running, pipelining */
136 caddr_t b_kvabase; /* base kva for buffer */
137 int b_kvasize; /* size of kva for buffer */
138 daddr_t b_lblkno; /* Logical block number. */
139 struct vnode *b_vp; /* Device vnode. */
140 struct vm_object *b_object; /* Object for vp */
141 int b_dirtyoff; /* Offset in buffer of dirty region. */
142 int b_dirtyend; /* Offset of end of dirty region. */
143 struct ucred *b_rcred; /* Read credentials reference. */
144 struct ucred *b_wcred; /* Write credentials reference. */
145 void *b_saveaddr; /* Original b_addr for physio. */
150 TAILQ_HEAD(cluster_list_head, buf) cluster_head;
151 TAILQ_ENTRY(buf) cluster_entry;
153 struct vm_page *b_pages[btoc(MAXPHYS)];
155 struct workhead b_dep; /* (D) List of filesystem dependencies. */
159 * These flags are kept in b_flags.
163 * B_ASYNC VOP calls on bp's are usually async whether or not
164 * B_ASYNC is set, but some subsystems, such as NFS, like
165 * to know what is best for the caller so they can
168 * B_PAGING Indicates that bp is being used by the paging system or
169 * some paging system and that the bp is not linked into
170 * the b_vp's clean/dirty linked lists or ref counts.
171 * Buffer vp reassignments are illegal in this case.
173 * B_CACHE This may only be set if the buffer is entirely valid.
174 * The situation where B_DELWRI is set and B_CACHE is
175 * clear MUST be committed to disk by getblk() so
176 * B_DELWRI can also be cleared. See the comments for
177 * getblk() in kern/vfs_bio.c. If B_CACHE is clear,
178 * the caller is expected to clear BIO_ERROR and B_INVAL,
179 * set BIO_READ, and initiate an I/O.
181 * The 'entire buffer' is defined to be the range from
182 * 0 through b_bcount.
184 * B_MALLOC Request that the buffer be allocated from the malloc
185 * pool, DEV_BSIZE aligned instead of PAGE_SIZE aligned.
187 * B_CLUSTEROK This flag is typically set for B_DELWRI buffers
188 * by filesystems that allow clustering when the buffer
189 * is fully dirty and indicates that it may be clustered
190 * with other adjacent dirty buffers. Note the clustering
191 * may not be used with the stage 1 data write under NFS
192 * but may be used for the commit rpc portion.
194 * B_VMIO Indicates that the buffer is tied into an VM object.
195 * The buffer's data is always PAGE_SIZE aligned even
196 * if b_bufsize and b_bcount are not. ( b_bufsize is
197 * always at least DEV_BSIZE aligned, though ).
199 * B_DIRECT Hint that we should attempt to completely free
200 * the pages underlying the buffer. B_DIRECT is
201 * sticky until the buffer is released and typically
202 * only has an effect when B_RELBUF is also set.
206 #define B_AGE 0x00000001 /* Move to age queue when I/O done. */
207 #define B_NEEDCOMMIT 0x00000002 /* Append-write in progress. */
208 #define B_ASYNC 0x00000004 /* Start I/O, do not wait. */
209 #define B_DIRECT 0x00000008 /* direct I/O flag (pls free vmio) */
210 #define B_DEFERRED 0x00000010 /* Skipped over for cleaning */
211 #define B_CACHE 0x00000020 /* Bread found us in the cache. */
212 #define B_VALIDSUSPWRT 0x00000040 /* Valid write during suspension. */
213 #define B_DELWRI 0x00000080 /* Delay I/O until buffer reused. */
214 #define B_00000100 0x00000100 /* Available flag. */
215 #define B_DONE 0x00000200 /* I/O completed. */
216 #define B_EINTR 0x00000400 /* I/O was interrupted */
217 #define B_00000800 0x00000800 /* Availabel flag. */
218 #define B_00001000 0x00001000 /* Available flag. */
219 #define B_INVAL 0x00002000 /* Does not contain valid info. */
220 #define B_00004000 0x00004000 /* Available flag. */
221 #define B_NOCACHE 0x00008000 /* Do not cache block after use. */
222 #define B_MALLOC 0x00010000 /* malloced b_data */
223 #define B_CLUSTEROK 0x00020000 /* Pagein op, so swap() can count it. */
224 #define B_PHYS 0x00040000 /* I/O to user memory. */
225 #define B_000800000 0x00080000 /* Available flag. */
226 #define B_00100000 0x00100000 /* Available flag. */
227 #define B_DIRTY 0x00200000 /* Needs writing later (in EXT2FS). */
228 #define B_RELBUF 0x00400000 /* Release VMIO buffer. */
229 #define B_00800000 0x00800000 /* Available flag. */
230 #define B_WRITEINPROG 0x01000000 /* Write in progress. */
231 #define B_02000000 0x02000000 /* Available flag. */
232 #define B_PAGING 0x04000000 /* volatile paging I/O -- bypass VMIO */
233 #define B_08000000 0x08000000 /* Available flag. */
234 #define B_RAM 0x10000000 /* Read ahead mark (flag) */
235 #define B_VMIO 0x20000000 /* VMIO flag */
236 #define B_CLUSTER 0x40000000 /* pagein op, so swap() can count it */
237 #define B_80000000 0x80000000 /* Available flag. */
239 #define PRINT_BUF_FLAGS "\20\40b31\37cluster\36vmio\35ram\34b27" \
240 "\33paging\32b25\31writeinprog\30b23\27relbuf\26dirty\25b20" \
241 "\24b19\23phys\22clusterok\21malloc\20nocache\17locked\16inval" \
242 "\15scanned\14nowdrain\13eintr\12done\11b8\10delwri\7validsuspwrt" \
243 "\6cache\5deferred\4direct\3async\2needcommit\1age"
246 * These flags are kept in b_xflags.
248 #define BX_VNDIRTY 0x00000001 /* On vnode dirty list */
249 #define BX_VNCLEAN 0x00000002 /* On vnode clean list */
250 #define BX_BKGRDWRITE 0x00000010 /* Do writes in background */
251 #define BX_BKGRDMARKER 0x00000020 /* Mark buffer for splay tree */
252 #define BX_ALTDATA 0x00000040 /* Holds extended data */
254 #define NOOFFSET (-1LL) /* No buffer offset calculated yet */
257 * These flags are kept in b_vflags.
259 #define BV_SCANNED 0x00000001 /* VOP_FSYNC funcs mark written bufs */
260 #define BV_BKGRDINPROG 0x00000002 /* Background write in progress */
261 #define BV_BKGRDWAIT 0x00000004 /* Background write waiting */
267 extern const char *buf_wmesg; /* Default buffer lock message */
268 #define BUF_WMESG "bufwait"
269 #include <sys/proc.h> /* XXX for curthread */
270 #include <sys/mutex.h>
275 #define BUF_LOCKINIT(bp) \
276 lockinit(&(bp)->b_lock, PRIBIO + 4, buf_wmesg, 0, 0)
279 * Get a lock sleeping non-interruptably until it becomes available.
281 static __inline int BUF_LOCK(struct buf *, int, struct mtx *);
283 BUF_LOCK(struct buf *bp, int locktype, struct mtx *interlock)
288 mtx_lock(bp->b_lock.lk_interlock);
289 locktype |= LK_INTERNAL;
290 bp->b_lock.lk_wmesg = buf_wmesg;
291 bp->b_lock.lk_prio = PRIBIO + 4;
292 ret = lockmgr(&(bp)->b_lock, locktype, interlock, curthread);
297 * Get a lock sleeping with specified interruptably and timeout.
299 static __inline int BUF_TIMELOCK(struct buf *, int, struct mtx *,
302 BUF_TIMELOCK(struct buf *bp, int locktype, struct mtx *interlock,
303 char *wmesg, int catch, int timo)
308 mtx_lock(bp->b_lock.lk_interlock);
309 locktype |= LK_INTERNAL | LK_TIMELOCK;
310 bp->b_lock.lk_wmesg = wmesg;
311 bp->b_lock.lk_prio = (PRIBIO + 4) | catch;
312 bp->b_lock.lk_timo = timo;
313 ret = lockmgr(&(bp)->b_lock, (locktype), interlock, curthread);
318 * Release a lock. Only the acquiring process may free the lock unless
319 * it has been handed off to biodone.
321 static __inline void BUF_UNLOCK(struct buf *);
323 BUF_UNLOCK(struct buf *bp)
328 lockmgr(&(bp)->b_lock, LK_RELEASE, NULL, curthread);
333 * Free a buffer lock.
335 #define BUF_LOCKFREE(bp) \
337 if (BUF_REFCNT(bp) > 0) \
338 panic("free locked buf"); \
339 lockdestroy(&(bp)->b_lock); \
342 #ifdef _SYS_PROC_H_ /* Avoid #include <sys/proc.h> pollution */
344 * When initiating asynchronous I/O, change ownership of the lock to the
345 * kernel. Once done, the lock may legally released by biodone. The
346 * original owning process can no longer acquire it recursively, but must
347 * wait until the I/O is completed and the lock has been freed by biodone.
349 static __inline void BUF_KERNPROC(struct buf *);
351 BUF_KERNPROC(struct buf *bp)
353 struct thread *td = curthread;
355 if ((td != PCPU_GET(idlethread))
356 && bp->b_lock.lk_lockholder == td)
358 bp->b_lock.lk_lockholder = LK_KERNPROC;
362 * Find out the number of references to a lock.
364 static __inline int BUF_REFCNT(struct buf *);
366 BUF_REFCNT(struct buf *bp)
371 * When the system is panicing, the lock manager grants all lock
372 * requests whether or not the lock is available. To avoid "unlocked
373 * buffer" panics after a crash, we just claim that all buffers
374 * are locked when cleaning up after a system panic.
376 if (panicstr != NULL)
379 ret = lockcount(&(bp)->b_lock);
386 struct buf_queue_head {
387 TAILQ_HEAD(buf_queue, buf) queue;
389 struct buf *insert_point;
390 struct buf *switch_point;
394 * This structure describes a clustered I/O. It is stored in the b_saveaddr
395 * field of the buffer on which I/O is done. At I/O completion, cluster
396 * callback uses the structure to parcel I/O's to individual buffers, and
397 * then free's this structure.
399 struct cluster_save {
400 long bs_bcount; /* Saved b_bcount. */
401 long bs_bufsize; /* Saved b_bufsize. */
402 void *bs_saveaddr; /* Saved b_addr. */
403 int bs_nchildren; /* Number of associated buffers. */
404 struct buf **bs_children; /* List of associated buffers. */
409 #define BUF_WRITE(bp) \
410 (bp)->b_op->bop_write(bp)
413 buf_start(struct buf *bp)
416 (*bioops.io_start)(bp);
420 buf_complete(struct buf *bp)
422 if (bioops.io_complete)
423 (*bioops.io_complete)(bp);
427 buf_deallocate(struct buf *bp)
429 if (bioops.io_deallocate)
430 (*bioops.io_deallocate)(bp);
435 buf_movedeps(struct buf *bp, struct buf *bp2)
437 if (bioops.io_movedeps)
438 (*bioops.io_movedeps)(bp, bp2);
442 buf_countdeps(struct buf *bp, int i)
444 if (bioops.io_countdeps)
445 return ((*bioops.io_countdeps)(bp, i));
453 * Zero out the buffer's data area.
455 #define clrbuf(bp) { \
456 bzero((bp)->b_data, (u_int)(bp)->b_bcount); \
461 * Flags for getblk's last parameter.
463 #define GB_LOCK_NOWAIT 0x0001 /* Fail if we block on a buf lock. */
464 #define GB_NOCREAT 0x0002 /* Don't create a buf if not found. */
467 extern int nbuf; /* The number of buffer headers */
468 extern int maxswzone; /* Max KVA for swap structures */
469 extern int maxbcache; /* Max KVA for buffer cache */
470 extern int runningbufspace;
471 extern int buf_maxio; /* nominal maximum I/O for buffer */
472 extern struct buf *buf; /* The buffer headers. */
473 extern char *buffers; /* The buffer contents. */
474 extern int bufpages; /* Number of memory pages in the buffer pool. */
475 extern struct buf *swbuf; /* Swap I/O buffer headers. */
476 extern int nswbuf; /* Number of swap I/O buffer headers. */
480 caddr_t kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est);
482 void bwillwrite(void);
483 int buf_dirty_count_severe(void);
484 void bremfree(struct buf *);
485 int bread(struct vnode *, daddr_t, int, struct ucred *, struct buf **);
486 int breadn(struct vnode *, daddr_t, int, daddr_t *, int *, int,
487 struct ucred *, struct buf **);
488 int bwrite(struct buf *);
489 void bdwrite(struct buf *);
490 void bawrite(struct buf *);
491 void bdirty(struct buf *);
492 void bundirty(struct buf *);
493 void brelse(struct buf *);
494 void bqrelse(struct buf *);
495 int vfs_bio_awrite(struct buf *);
496 struct buf * getpbuf(int *);
497 struct buf *incore(struct vnode *, daddr_t);
498 struct buf *gbincore(struct vnode *, daddr_t);
499 int inmem(struct vnode *, daddr_t);
500 struct buf *getblk(struct vnode *, daddr_t, int, int, int, int);
501 struct buf *geteblk(int);
502 int bufwait(struct buf *);
503 void bufdone(struct buf *);
504 void bufdonebio(struct bio *);
506 void cluster_callback(struct buf *);
507 int cluster_read(struct vnode *, u_quad_t, daddr_t, long,
508 struct ucred *, long, int, struct buf **);
509 int cluster_wbuild(struct vnode *, long, daddr_t, int);
510 void cluster_write(struct buf *, u_quad_t, int);
511 void vfs_bio_set_validclean(struct buf *, int base, int size);
512 void vfs_bio_clrbuf(struct buf *);
513 void vfs_busy_pages(struct buf *, int clear_modify);
514 void vfs_unbusy_pages(struct buf *);
515 void vwakeup(struct buf *);
516 int vmapbuf(struct buf *);
517 void vunmapbuf(struct buf *);
518 void relpbuf(struct buf *, int *);
519 void brelvp(struct buf *);
520 void bgetvp(struct vnode *, struct buf *);
521 void pbgetvp(struct vnode *, struct buf *);
522 void pbrelvp(struct buf *);
523 int allocbuf(struct buf *bp, int size);
524 void reassignbuf(struct buf *, struct vnode *);
525 struct buf *trypbuf(int *);
526 void bwait(struct buf *, u_char, const char *);
527 void bdone(struct buf *);
531 #endif /* !_SYS_BUF_H_ */