2 * Copyright (c) 1998 Matthew Dillon,
3 * Copyright (c) 1994 John S. Dyson
4 * Copyright (c) 1990 University of Utah.
5 * Copyright (c) 1991, 1993
6 * The Regents of the University of California. All rights reserved.
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43 * Radix Bitmap 'blists'.
45 * - The new swapper uses the new radix bitmap code. This should scale
46 * to arbitrarily small or arbitrarily large swap spaces and an almost
47 * arbitrary degree of fragmentation.
51 * - on the fly reallocation of swap during putpages. The new system
52 * does not try to keep previously allocated swap blocks for dirty
55 * - on the fly deallocation of swap
57 * - No more garbage collection required. Unnecessarily allocated swap
58 * blocks only exist for dirty vm_page_t's now and these are already
59 * cycled (in a high-load system) by the pager. We also do on-the-fly
60 * removal of invalidated swap blocks when a page is destroyed
63 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
65 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
70 #include <sys/param.h>
71 #include <sys/systm.h>
73 #include <sys/kernel.h>
77 #include <sys/vnode.h>
78 #include <sys/malloc.h>
79 #include <sys/vmmeter.h>
80 #include <sys/sysctl.h>
81 #include <sys/blist.h>
84 #include <sys/vmmeter.h>
86 #ifndef MAX_PAGEOUT_CLUSTER
87 #define MAX_PAGEOUT_CLUSTER 16
90 #define SWB_NPAGES MAX_PAGEOUT_CLUSTER
95 #include <vm/vm_map.h>
96 #include <vm/vm_kern.h>
97 #include <vm/vm_object.h>
98 #include <vm/vm_page.h>
99 #include <vm/vm_pager.h>
100 #include <vm/vm_pageout.h>
101 #include <vm/vm_zone.h>
102 #include <vm/swap_pager.h>
103 #include <vm/vm_extern.h>
105 #define SWM_FREE 0x02 /* free, period */
106 #define SWM_POP 0x04 /* pop out */
109 * vm_swap_size is in page-sized chunks now. It was DEV_BSIZE'd chunks
113 extern int vm_swap_size; /* number of free swap blocks, in pages */
115 int swap_pager_full; /* swap space exhaustion (task killing) */
116 static int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/
117 static int nsw_rcount; /* free read buffers */
118 static int nsw_wcount_sync; /* limit write buffers / synchronous */
119 static int nsw_wcount_async; /* limit write buffers / asynchronous */
120 static int nsw_wcount_async_max;/* assigned maximum */
121 static int nsw_cluster_max; /* maximum VOP I/O allowed */
123 struct blist *swapblist;
124 static struct swblock **swhash;
125 static int swhash_mask;
126 static int swap_async_max = 4; /* maximum in-progress async I/O's */
127 static struct sx sw_alloc_sx;
130 extern struct vnode *swapdev_vp;
131 extern struct swdevt *swdevt;
134 SYSCTL_INT(_vm, OID_AUTO, swap_async_max,
135 CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops");
137 #define BLK2DEVIDX(blk) (nswdev > 1 ? blk / dmmax % nswdev : 0)
140 * "named" and "unnamed" anon region objects. Try to reduce the overhead
141 * of searching a named list by hashing it just a little.
146 #define NOBJLIST(handle) \
147 (&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)])
149 static struct mtx sw_alloc_mtx; /* protect list manipulation */
150 static struct pagerlst swap_pager_object_list[NOBJLISTS];
151 struct pagerlst swap_pager_un_object_list;
155 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure
156 * calls hooked from other parts of the VM system and do not appear here.
157 * (see vm/swap_pager.h).
161 swap_pager_alloc __P((void *handle, vm_ooffset_t size,
162 vm_prot_t prot, vm_ooffset_t offset));
163 static void swap_pager_dealloc __P((vm_object_t object));
164 static int swap_pager_getpages __P((vm_object_t, vm_page_t *, int, int));
165 static void swap_pager_init __P((void));
166 static void swap_pager_unswapped __P((vm_page_t));
167 static void swap_pager_strategy __P((vm_object_t, struct bio *));
169 struct pagerops swappagerops = {
170 swap_pager_init, /* early system initialization of pager */
171 swap_pager_alloc, /* allocate an OBJT_SWAP object */
172 swap_pager_dealloc, /* deallocate an OBJT_SWAP object */
173 swap_pager_getpages, /* pagein */
174 swap_pager_putpages, /* pageout */
175 swap_pager_haspage, /* get backing store status for page */
176 swap_pager_unswapped, /* remove swap related to page */
177 swap_pager_strategy /* pager strategy call */
180 static struct buf *getchainbuf(struct bio *bp, struct vnode *vp, int flags);
181 static void flushchainbuf(struct buf *nbp);
182 static void waitchainbuf(struct bio *bp, int count, int done);
185 * dmmax is in page-sized chunks with the new swap system. It was
186 * dev-bsized chunks in the old. dmmax is always a power of 2.
188 * swap_*() routines are externally accessible. swp_*() routines are
193 static int dmmax_mask;
194 int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */
195 int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */
197 SYSCTL_INT(_vm, OID_AUTO, dmmax,
198 CTLFLAG_RD, &dmmax, 0, "Maximum size of a swap block");
200 static __inline void swp_sizecheck __P((void));
201 static void swp_pager_sync_iodone __P((struct buf *bp));
202 static void swp_pager_async_iodone __P((struct buf *bp));
205 * Swap bitmap functions
208 static __inline void swp_pager_freeswapspace __P((daddr_t blk, int npages));
209 static __inline daddr_t swp_pager_getswapspace __P((int npages));
215 static void swp_pager_meta_build __P((vm_object_t, vm_pindex_t, daddr_t));
216 static void swp_pager_meta_free __P((vm_object_t, vm_pindex_t, daddr_t));
217 static void swp_pager_meta_free_all __P((vm_object_t));
218 static daddr_t swp_pager_meta_ctl __P((vm_object_t, vm_pindex_t, int));
221 * SWP_SIZECHECK() - update swap_pager_full indication
223 * update the swap_pager_almost_full indication and warn when we are
224 * about to run out of swap space, using lowat/hiwat hysteresis.
226 * Clear swap_pager_full ( task killing ) indication when lowat is met.
228 * No restrictions on call
229 * This routine may not block.
230 * This routine must be called at splvm()
238 if (vm_swap_size < nswap_lowat) {
239 if (swap_pager_almost_full == 0) {
240 printf("swap_pager: out of swap space\n");
241 swap_pager_almost_full = 1;
245 if (vm_swap_size > nswap_hiwat)
246 swap_pager_almost_full = 0;
251 * SWAP_PAGER_INIT() - initialize the swap pager!
253 * Expected to be started from system init. NOTE: This code is run
254 * before much else so be careful what you depend on. Most of the VM
255 * system has yet to be initialized at this point.
262 * Initialize object lists
266 for (i = 0; i < NOBJLISTS; ++i)
267 TAILQ_INIT(&swap_pager_object_list[i]);
268 TAILQ_INIT(&swap_pager_un_object_list);
269 mtx_init(&sw_alloc_mtx, "swap_pager list", MTX_DEF);
272 * Device Stripe, in PAGE_SIZE'd blocks
275 dmmax = SWB_NPAGES * 2;
276 dmmax_mask = ~(dmmax - 1);
280 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process
282 * Expected to be started from pageout process once, prior to entering
287 swap_pager_swap_init()
292 * Number of in-transit swap bp operations. Don't
293 * exhaust the pbufs completely. Make sure we
294 * initialize workable values (0 will work for hysteresis
295 * but it isn't very efficient).
297 * The nsw_cluster_max is constrained by the bp->b_pages[]
298 * array (MAXPHYS/PAGE_SIZE) and our locally defined
299 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are
300 * constrained by the swap device interleave stripe size.
302 * Currently we hardwire nsw_wcount_async to 4. This limit is
303 * designed to prevent other I/O from having high latencies due to
304 * our pageout I/O. The value 4 works well for one or two active swap
305 * devices but is probably a little low if you have more. Even so,
306 * a higher value would probably generate only a limited improvement
307 * with three or four active swap devices since the system does not
308 * typically have to pageout at extreme bandwidths. We will want
309 * at least 2 per swap devices, and 4 is a pretty good value if you
310 * have one NFS swap device due to the command/ack latency over NFS.
311 * So it all works out pretty well.
314 nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER);
317 nsw_rcount = (nswbuf + 1) / 2;
318 nsw_wcount_sync = (nswbuf + 3) / 4;
319 nsw_wcount_async = 4;
320 nsw_wcount_async_max = nsw_wcount_async;
321 mtx_unlock(&pbuf_mtx);
324 * Initialize our zone. Right now I'm just guessing on the number
325 * we need based on the number of pages in the system. Each swblock
326 * can hold 16 pages, so this is probably overkill. This reservation
327 * is typically limited to around 70MB by default.
330 n = cnt.v_page_count;
331 if (maxswzone && n > maxswzone / sizeof(struct swblock))
332 n = maxswzone / sizeof(struct swblock);
338 sizeof(struct swblock),
343 if (swap_zone != NULL)
346 * if the allocation failed, try a zone two thirds the
347 * size of the previous attempt.
352 if (swap_zone == NULL)
353 panic("failed to zinit swap_zone.");
355 printf("Swap zone entries reduced from %d to %d.\n", n2, n);
359 * Initialize our meta-data hash table. The swapper does not need to
360 * be quite as efficient as the VM system, so we do not use an
361 * oversized hash table.
363 * n: size of hash table, must be power of 2
364 * swhash_mask: hash table index mask
367 for (n = 1; n < n2 / 8; n *= 2)
370 swhash = malloc(sizeof(struct swblock *) * n, M_VMPGDATA, M_WAITOK | M_ZERO);
376 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate
377 * its metadata structures.
379 * This routine is called from the mmap and fork code to create a new
380 * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object
381 * and then converting it with swp_pager_meta_build().
383 * This routine may block in vm_object_allocate() and create a named
384 * object lookup race, so we must interlock. We must also run at
385 * splvm() for the object lookup to handle races with interrupts, but
386 * we do not have to maintain splvm() in between the lookup and the
387 * add because (I believe) it is not possible to attempt to create
388 * a new swap object w/handle when a default object with that handle
393 swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
402 * Reference existing named region or allocate new one. There
403 * should not be a race here against swp_pager_meta_build()
404 * as called from vm_page_remove() in regards to the lookup
407 sx_xlock(&sw_alloc_sx);
408 object = vm_pager_object_lookup(NOBJLIST(handle), handle);
410 if (object != NULL) {
411 vm_object_reference(object);
413 object = vm_object_allocate(OBJT_DEFAULT,
414 OFF_TO_IDX(offset + PAGE_MASK + size));
415 object->handle = handle;
417 swp_pager_meta_build(object, 0, SWAPBLK_NONE);
419 sx_xunlock(&sw_alloc_sx);
421 object = vm_object_allocate(OBJT_DEFAULT,
422 OFF_TO_IDX(offset + PAGE_MASK + size));
424 swp_pager_meta_build(object, 0, SWAPBLK_NONE);
431 * SWAP_PAGER_DEALLOC() - remove swap metadata from object
433 * The swap backing for the object is destroyed. The code is
434 * designed such that we can reinstantiate it later, but this
435 * routine is typically called only when the entire object is
436 * about to be destroyed.
438 * This routine may block, but no longer does.
440 * The object must be locked or unreferenceable.
444 swap_pager_dealloc(object)
452 * Remove from list right away so lookups will fail if we block for
453 * pageout completion.
455 mtx_lock(&sw_alloc_mtx);
456 if (object->handle == NULL) {
457 TAILQ_REMOVE(&swap_pager_un_object_list, object, pager_object_list);
459 TAILQ_REMOVE(NOBJLIST(object->handle), object, pager_object_list);
461 mtx_unlock(&sw_alloc_mtx);
463 vm_object_pip_wait(object, "swpdea");
466 * Free all remaining metadata. We only bother to free it from
467 * the swap meta data. We do not attempt to free swapblk's still
468 * associated with vm_page_t's for this object. We do not care
469 * if paging is still in progress on some objects.
472 swp_pager_meta_free_all(object);
476 /************************************************************************
477 * SWAP PAGER BITMAP ROUTINES *
478 ************************************************************************/
481 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space
483 * Allocate swap for the requested number of pages. The starting
484 * swap block number (a page index) is returned or SWAPBLK_NONE
485 * if the allocation failed.
487 * Also has the side effect of advising that somebody made a mistake
488 * when they configured swap and didn't configure enough.
490 * Must be called at splvm() to avoid races with bitmap frees from
491 * vm_page_remove() aka swap_pager_page_removed().
493 * This routine may not block
494 * This routine must be called at splvm().
497 static __inline daddr_t
498 swp_pager_getswapspace(npages)
505 if ((blk = blist_alloc(swapblist, npages)) == SWAPBLK_NONE) {
506 if (swap_pager_full != 2) {
507 printf("swap_pager_getswapspace: failed\n");
509 swap_pager_almost_full = 1;
512 vm_swap_size -= npages;
513 /* per-swap area stats */
514 swdevt[BLK2DEVIDX(blk)].sw_used += npages;
521 * SWP_PAGER_FREESWAPSPACE() - free raw swap space
523 * This routine returns the specified swap blocks back to the bitmap.
525 * Note: This routine may not block (it could in the old swap code),
526 * and through the use of the new blist routines it does not block.
528 * We must be called at splvm() to avoid races with bitmap frees from
529 * vm_page_remove() aka swap_pager_page_removed().
531 * This routine may not block
532 * This routine must be called at splvm().
536 swp_pager_freeswapspace(blk, npages)
542 blist_free(swapblist, blk, npages);
543 vm_swap_size += npages;
544 /* per-swap area stats */
545 swdevt[BLK2DEVIDX(blk)].sw_used -= npages;
550 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page
551 * range within an object.
553 * This is a globally accessible routine.
555 * This routine removes swapblk assignments from swap metadata.
557 * The external callers of this routine typically have already destroyed
558 * or renamed vm_page_t's associated with this range in the object so
561 * This routine may be called at any spl. We up our spl to splvm temporarily
562 * in order to perform the metadata removal.
566 swap_pager_freespace(object, start, size)
574 swp_pager_meta_free(object, start, size);
579 * SWAP_PAGER_RESERVE() - reserve swap blocks in object
581 * Assigns swap blocks to the specified range within the object. The
582 * swap blocks are not zerod. Any previous swap assignment is destroyed.
584 * Returns 0 on success, -1 on failure.
588 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
592 daddr_t blk = SWAPBLK_NONE;
593 vm_pindex_t beg = start; /* save start index */
599 while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) {
602 swp_pager_meta_free(object, beg, start - beg);
608 swp_pager_meta_build(object, start, blk);
614 swp_pager_meta_free(object, start, n);
620 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager
621 * and destroy the source.
623 * Copy any valid swapblks from the source to the destination. In
624 * cases where both the source and destination have a valid swapblk,
625 * we keep the destination's.
627 * This routine is allowed to block. It may block allocating metadata
628 * indirectly through swp_pager_meta_build() or if paging is still in
629 * progress on the source.
631 * This routine can be called at any spl
633 * XXX vm_page_collapse() kinda expects us not to block because we
634 * supposedly do not need to allocate memory, but for the moment we
635 * *may* have to get a little memory from the zone allocator, but
636 * it is taken from the interrupt memory. We should be ok.
638 * The source object contains no vm_page_t's (which is just as well)
640 * The source object is of type OBJT_SWAP.
642 * The source and destination objects must be locked or
643 * inaccessible (XXX are they ?)
647 swap_pager_copy(srcobject, dstobject, offset, destroysource)
648 vm_object_t srcobject;
649 vm_object_t dstobject;
660 * If destroysource is set, we remove the source object from the
661 * swap_pager internal queue now.
665 mtx_lock(&sw_alloc_mtx);
666 if (srcobject->handle == NULL) {
668 &swap_pager_un_object_list,
674 NOBJLIST(srcobject->handle),
679 mtx_unlock(&sw_alloc_mtx);
683 * transfer source to destination.
686 for (i = 0; i < dstobject->size; ++i) {
690 * Locate (without changing) the swapblk on the destination,
691 * unless it is invalid in which case free it silently, or
692 * if the destination is a resident page, in which case the
693 * source is thrown away.
696 dstaddr = swp_pager_meta_ctl(dstobject, i, 0);
698 if (dstaddr == SWAPBLK_NONE) {
700 * Destination has no swapblk and is not resident,
705 srcaddr = swp_pager_meta_ctl(
711 if (srcaddr != SWAPBLK_NONE)
712 swp_pager_meta_build(dstobject, i, srcaddr);
715 * Destination has valid swapblk or it is represented
716 * by a resident page. We destroy the sourceblock.
719 swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE);
724 * Free left over swap blocks in source.
726 * We have to revert the type to OBJT_DEFAULT so we do not accidently
727 * double-remove the object from the swap queues.
731 swp_pager_meta_free_all(srcobject);
733 * Reverting the type is not necessary, the caller is going
734 * to destroy srcobject directly, but I'm doing it here
735 * for consistency since we've removed the object from its
738 srcobject->type = OBJT_DEFAULT;
744 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for
745 * the requested page.
747 * We determine whether good backing store exists for the requested
748 * page and return TRUE if it does, FALSE if it doesn't.
750 * If TRUE, we also try to determine how much valid, contiguous backing
751 * store exists before and after the requested page within a reasonable
752 * distance. We do not try to restrict it to the swap device stripe
753 * (that is handled in getpages/putpages). It probably isn't worth
758 swap_pager_haspage(object, pindex, before, after)
768 * do we have good backing store at the requested index ?
772 blk0 = swp_pager_meta_ctl(object, pindex, 0);
774 if (blk0 == SWAPBLK_NONE) {
784 * find backwards-looking contiguous good backing store
787 if (before != NULL) {
790 for (i = 1; i < (SWB_NPAGES/2); ++i) {
795 blk = swp_pager_meta_ctl(object, pindex - i, 0);
803 * find forward-looking contiguous good backing store
809 for (i = 1; i < (SWB_NPAGES/2); ++i) {
812 blk = swp_pager_meta_ctl(object, pindex + i, 0);
823 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page
825 * This removes any associated swap backing store, whether valid or
826 * not, from the page.
828 * This routine is typically called when a page is made dirty, at
829 * which point any associated swap can be freed. MADV_FREE also
830 * calls us in a special-case situation
832 * NOTE!!! If the page is clean and the swap was valid, the caller
833 * should make the page dirty before calling this routine. This routine
834 * does NOT change the m->dirty status of the page. Also: MADV_FREE
837 * This routine may not block
838 * This routine must be called at splvm()
842 swap_pager_unswapped(m)
845 swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE);
849 * SWAP_PAGER_STRATEGY() - read, write, free blocks
851 * This implements the vm_pager_strategy() interface to swap and allows
852 * other parts of the system to directly access swap as backing store
853 * through vm_objects of type OBJT_SWAP. This is intended to be a
854 * cacheless interface ( i.e. caching occurs at higher levels ).
855 * Therefore we do not maintain any resident pages. All I/O goes
856 * directly to and from the swap device.
858 * Note that b_blkno is scaled for PAGE_SIZE
860 * We currently attempt to run I/O synchronously or asynchronously as
861 * the caller requests. This isn't perfect because we loose error
862 * sequencing when we run multiple ops in parallel to satisfy a request.
863 * But this is swap, so we let it all hang out.
867 swap_pager_strategy(vm_object_t object, struct bio *bp)
873 struct buf *nbp = NULL;
877 /* XXX: KASSERT instead ? */
878 if (bp->bio_bcount & PAGE_MASK) {
879 biofinish(bp, NULL, EINVAL);
880 printf("swap_pager_strategy: bp %p blk %d size %d, not page bounded\n", bp, (int)bp->bio_pblkno, (int)bp->bio_bcount);
885 * Clear error indication, initialize page index, count, data pointer.
889 bp->bio_flags &= ~BIO_ERROR;
890 bp->bio_resid = bp->bio_bcount;
891 *(u_int *) &bp->bio_driver1 = 0;
893 start = bp->bio_pblkno;
894 count = howmany(bp->bio_bcount, PAGE_SIZE);
900 * Deal with BIO_DELETE
903 if (bp->bio_cmd == BIO_DELETE) {
905 * FREE PAGE(s) - destroy underlying swap that is no longer
908 swp_pager_meta_free(object, start, count);
916 * Execute read or write
922 * Obtain block. If block not found and writing, allocate a
923 * new block and build it into the object.
926 blk = swp_pager_meta_ctl(object, start, 0);
927 if ((blk == SWAPBLK_NONE) && (bp->bio_cmd == BIO_WRITE)) {
928 blk = swp_pager_getswapspace(1);
929 if (blk == SWAPBLK_NONE) {
930 bp->bio_error = ENOMEM;
931 bp->bio_flags |= BIO_ERROR;
934 swp_pager_meta_build(object, start, blk);
938 * Do we have to flush our current collection? Yes if:
940 * - no swap block at this index
941 * - swap block is not contiguous
942 * - we cross a physical disk boundry in the
947 nbp && (nbp->b_blkno + btoc(nbp->b_bcount) != blk ||
948 ((nbp->b_blkno ^ blk) & dmmax_mask)
952 if (bp->bio_cmd == BIO_READ) {
954 cnt.v_swappgsin += btoc(nbp->b_bcount);
957 cnt.v_swappgsout += btoc(nbp->b_bcount);
958 nbp->b_dirtyend = nbp->b_bcount;
966 * Add new swapblk to nbp, instantiating nbp if necessary.
967 * Zero-fill reads are able to take a shortcut.
970 if (blk == SWAPBLK_NONE) {
972 * We can only get here if we are reading. Since
973 * we are at splvm() we can safely modify b_resid,
974 * even if chain ops are in progress.
976 bzero(data, PAGE_SIZE);
977 bp->bio_resid -= PAGE_SIZE;
980 nbp = getchainbuf(bp, swapdev_vp, B_ASYNC);
985 nbp->b_bcount += PAGE_SIZE;
993 * Flush out last buffer
999 if (nbp->b_iocmd == BIO_READ) {
1001 cnt.v_swappgsin += btoc(nbp->b_bcount);
1004 cnt.v_swappgsout += btoc(nbp->b_bcount);
1005 nbp->b_dirtyend = nbp->b_bcount;
1011 * Wait for completion.
1014 waitchainbuf(bp, 0, 1);
1018 * SWAP_PAGER_GETPAGES() - bring pages in from swap
1020 * Attempt to retrieve (m, count) pages from backing store, but make
1021 * sure we retrieve at least m[reqpage]. We try to load in as large
1022 * a chunk surrounding m[reqpage] as is contiguous in swap and which
1023 * belongs to the same object.
1025 * The code is designed for asynchronous operation and
1026 * immediate-notification of 'reqpage' but tends not to be
1027 * used that way. Please do not optimize-out this algorithmic
1028 * feature, I intend to improve on it in the future.
1030 * The parent has a single vm_object_pip_add() reference prior to
1031 * calling us and we should return with the same.
1033 * The parent has BUSY'd the pages. We should return with 'm'
1034 * left busy, but the others adjusted.
1038 swap_pager_getpages(object, m, count, reqpage)
1050 vm_pindex_t lastpindex;
1056 if (mreq->object != object) {
1057 panic("swap_pager_getpages: object mismatch %p/%p",
1063 * Calculate range to retrieve. The pages have already been assigned
1064 * their swapblks. We require a *contiguous* range that falls entirely
1065 * within a single device stripe. If we do not supply it, bad things
1066 * happen. Note that blk, iblk & jblk can be SWAPBLK_NONE, but the
1067 * loops are set up such that the case(s) are handled implicitly.
1069 * The swp_*() calls must be made at splvm(). vm_page_free() does
1070 * not need to be, but it will go a little faster if it is.
1074 blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0);
1076 for (i = reqpage - 1; i >= 0; --i) {
1079 iblk = swp_pager_meta_ctl(m[i]->object, m[i]->pindex, 0);
1080 if (blk != iblk + (reqpage - i))
1082 if ((blk ^ iblk) & dmmax_mask)
1087 for (j = reqpage + 1; j < count; ++j) {
1090 jblk = swp_pager_meta_ctl(m[j]->object, m[j]->pindex, 0);
1091 if (blk != jblk - (j - reqpage))
1093 if ((blk ^ jblk) & dmmax_mask)
1098 * free pages outside our collection range. Note: we never free
1099 * mreq, it must remain busy throughout.
1105 for (k = 0; k < i; ++k)
1107 for (k = j; k < count; ++k)
1114 * Return VM_PAGER_FAIL if we have nothing to do. Return mreq
1115 * still busy, but the others unbusied.
1118 if (blk == SWAPBLK_NONE)
1119 return(VM_PAGER_FAIL);
1122 * Get a swap buffer header to perform the IO
1125 bp = getpbuf(&nsw_rcount);
1126 kva = (vm_offset_t) bp->b_data;
1129 * map our page(s) into kva for input
1131 * NOTE: B_PAGING is set by pbgetvp()
1134 pmap_qenter(kva, m + i, j - i);
1136 bp->b_iocmd = BIO_READ;
1137 bp->b_iodone = swp_pager_async_iodone;
1138 bp->b_rcred = crhold(proc0.p_ucred);
1139 bp->b_wcred = crhold(proc0.p_ucred);
1140 bp->b_data = (caddr_t) kva;
1141 bp->b_blkno = blk - (reqpage - i);
1142 bp->b_bcount = PAGE_SIZE * (j - i);
1143 bp->b_bufsize = PAGE_SIZE * (j - i);
1144 bp->b_pager.pg_reqpage = reqpage - i;
1149 for (k = i; k < j; ++k) {
1150 bp->b_pages[k - i] = m[k];
1151 vm_page_flag_set(m[k], PG_SWAPINPROG);
1154 bp->b_npages = j - i;
1156 pbgetvp(swapdev_vp, bp);
1159 cnt.v_swappgsin += bp->b_npages;
1162 * We still hold the lock on mreq, and our automatic completion routine
1163 * does not remove it.
1166 vm_object_pip_add(mreq->object, bp->b_npages);
1167 lastpindex = m[j-1]->pindex;
1170 * perform the I/O. NOTE!!! bp cannot be considered valid after
1171 * this point because we automatically release it on completion.
1172 * Instead, we look at the one page we are interested in which we
1173 * still hold a lock on even through the I/O completion.
1175 * The other pages in our m[] array are also released on completion,
1176 * so we cannot assume they are valid anymore either.
1178 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
1184 * wait for the page we want to complete. PG_SWAPINPROG is always
1185 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE
1186 * is set in the meta-data.
1191 while ((mreq->flags & PG_SWAPINPROG) != 0) {
1192 vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED);
1194 if (tsleep(mreq, PSWP, "swread", hz*20)) {
1196 "swap_pager: indefinite wait buffer: device:"
1197 " %s, blkno: %ld, size: %ld\n",
1198 devtoname(bp->b_dev), (long)bp->b_blkno,
1207 * mreq is left bussied after completion, but all the other pages
1208 * are freed. If we had an unrecoverable read error the page will
1212 if (mreq->valid != VM_PAGE_BITS_ALL) {
1213 return(VM_PAGER_ERROR);
1215 return(VM_PAGER_OK);
1219 * A final note: in a low swap situation, we cannot deallocate swap
1220 * and mark a page dirty here because the caller is likely to mark
1221 * the page clean when we return, causing the page to possibly revert
1222 * to all-zero's later.
1227 * swap_pager_putpages:
1229 * Assign swap (if necessary) and initiate I/O on the specified pages.
1231 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects
1232 * are automatically converted to SWAP objects.
1234 * In a low memory situation we may block in VOP_STRATEGY(), but the new
1235 * vm_page reservation system coupled with properly written VFS devices
1236 * should ensure that no low-memory deadlock occurs. This is an area
1239 * The parent has N vm_object_pip_add() references prior to
1240 * calling us and will remove references for rtvals[] that are
1241 * not set to VM_PAGER_PEND. We need to remove the rest on I/O
1244 * The parent has soft-busy'd the pages it passes us and will unbusy
1245 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return.
1246 * We need to unbusy the rest on I/O completion.
1250 swap_pager_putpages(object, m, count, sync, rtvals)
1261 if (count && m[0]->object != object) {
1262 panic("swap_pager_getpages: object mismatch %p/%p",
1270 * Turn object into OBJT_SWAP
1271 * check for bogus sysops
1272 * force sync if not pageout process
1275 if (object->type != OBJT_SWAP)
1276 swp_pager_meta_build(object, 0, SWAPBLK_NONE);
1278 if (curproc != pageproc)
1284 * Update nsw parameters from swap_async_max sysctl values.
1285 * Do not let the sysop crash the machine with bogus numbers.
1288 mtx_lock(&pbuf_mtx);
1289 if (swap_async_max != nsw_wcount_async_max) {
1296 if ((n = swap_async_max) > nswbuf / 2)
1303 * Adjust difference ( if possible ). If the current async
1304 * count is too low, we may not be able to make the adjustment
1308 n -= nsw_wcount_async_max;
1309 if (nsw_wcount_async + n >= 0) {
1310 nsw_wcount_async += n;
1311 nsw_wcount_async_max += n;
1312 wakeup(&nsw_wcount_async);
1316 mtx_unlock(&pbuf_mtx);
1321 * Assign swap blocks and issue I/O. We reallocate swap on the fly.
1322 * The page is left dirty until the pageout operation completes
1326 for (i = 0; i < count; i += n) {
1333 * Maximum I/O size is limited by a number of factors.
1336 n = min(BLIST_MAX_ALLOC, count - i);
1337 n = min(n, nsw_cluster_max);
1342 * Get biggest block of swap we can. If we fail, fall
1343 * back and try to allocate a smaller block. Don't go
1344 * overboard trying to allocate space if it would overly
1348 (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE &&
1353 if (blk == SWAPBLK_NONE) {
1354 for (j = 0; j < n; ++j)
1355 rtvals[i+j] = VM_PAGER_FAIL;
1361 * The I/O we are constructing cannot cross a physical
1362 * disk boundry in the swap stripe. Note: we are still
1365 if ((blk ^ (blk + n)) & dmmax_mask) {
1366 j = ((blk + dmmax) & dmmax_mask) - blk;
1367 swp_pager_freeswapspace(blk + j, n - j);
1372 * All I/O parameters have been satisfied, build the I/O
1373 * request and assign the swap space.
1375 * NOTE: B_PAGING is set by pbgetvp()
1379 bp = getpbuf(&nsw_wcount_sync);
1381 bp = getpbuf(&nsw_wcount_async);
1382 bp->b_flags = B_ASYNC;
1384 bp->b_iocmd = BIO_WRITE;
1385 bp->b_spc = NULL; /* not used, but NULL-out anyway */
1387 pmap_qenter((vm_offset_t)bp->b_data, &m[i], n);
1389 bp->b_rcred = crhold(proc0.p_ucred);
1390 bp->b_wcred = crhold(proc0.p_ucred);
1391 bp->b_bcount = PAGE_SIZE * n;
1392 bp->b_bufsize = PAGE_SIZE * n;
1395 pbgetvp(swapdev_vp, bp);
1397 for (j = 0; j < n; ++j) {
1398 vm_page_t mreq = m[i+j];
1400 swp_pager_meta_build(
1405 vm_page_dirty(mreq);
1406 rtvals[i+j] = VM_PAGER_OK;
1408 vm_page_flag_set(mreq, PG_SWAPINPROG);
1409 bp->b_pages[j] = mreq;
1413 * Must set dirty range for NFS to work.
1416 bp->b_dirtyend = bp->b_bcount;
1419 cnt.v_swappgsout += bp->b_npages;
1420 swapdev_vp->v_numoutput++;
1427 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
1430 if (sync == FALSE) {
1431 bp->b_iodone = swp_pager_async_iodone;
1435 for (j = 0; j < n; ++j)
1436 rtvals[i+j] = VM_PAGER_PEND;
1437 /* restart outter loop */
1444 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
1447 bp->b_iodone = swp_pager_sync_iodone;
1451 * Wait for the sync I/O to complete, then update rtvals.
1452 * We just set the rtvals[] to VM_PAGER_PEND so we can call
1453 * our async completion routine at the end, thus avoiding a
1458 while ((bp->b_flags & B_DONE) == 0) {
1459 tsleep(bp, PVM, "swwrt", 0);
1462 for (j = 0; j < n; ++j)
1463 rtvals[i+j] = VM_PAGER_PEND;
1466 * Now that we are through with the bp, we can call the
1467 * normal async completion, which frees everything up.
1470 swp_pager_async_iodone(bp);
1476 * swap_pager_sync_iodone:
1478 * Completion routine for synchronous reads and writes from/to swap.
1479 * We just mark the bp is complete and wake up anyone waiting on it.
1481 * This routine may not block. This routine is called at splbio() or better.
1485 swp_pager_sync_iodone(bp)
1488 bp->b_flags |= B_DONE;
1489 bp->b_flags &= ~B_ASYNC;
1494 * swp_pager_async_iodone:
1496 * Completion routine for asynchronous reads and writes from/to swap.
1497 * Also called manually by synchronous code to finish up a bp.
1499 * For READ operations, the pages are PG_BUSY'd. For WRITE operations,
1500 * the pages are vm_page_t->busy'd. For READ operations, we PG_BUSY
1501 * unbusy all pages except the 'main' request page. For WRITE
1502 * operations, we vm_page_t->busy'd unbusy all pages ( we can do this
1503 * because we marked them all VM_PAGER_PEND on return from putpages ).
1505 * This routine may not block.
1506 * This routine is called at splbio() or better
1508 * We up ourselves to splvm() as required for various vm_page related
1513 swp_pager_async_iodone(bp)
1518 vm_object_t object = NULL;
1522 bp->b_flags |= B_DONE;
1528 if (bp->b_ioflags & BIO_ERROR) {
1530 "swap_pager: I/O error - %s failed; blkno %ld,"
1531 "size %ld, error %d\n",
1532 ((bp->b_iocmd == BIO_READ) ? "pagein" : "pageout"),
1540 * set object, raise to splvm().
1544 object = bp->b_pages[0]->object;
1548 * remove the mapping for kernel virtual
1550 pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages);
1553 * cleanup pages. If an error occurs writing to swap, we are in
1554 * very serious trouble. If it happens to be a disk error, though,
1555 * we may be able to recover by reassigning the swap later on. So
1556 * in this case we remove the m->swapblk assignment for the page
1557 * but do not free it in the rlist. The errornous block(s) are thus
1558 * never reallocated as swap. Redirty the page and continue.
1561 for (i = 0; i < bp->b_npages; ++i) {
1562 vm_page_t m = bp->b_pages[i];
1564 vm_page_flag_clear(m, PG_SWAPINPROG);
1566 if (bp->b_ioflags & BIO_ERROR) {
1568 * If an error occurs I'd love to throw the swapblk
1569 * away without freeing it back to swapspace, so it
1570 * can never be used again. But I can't from an
1574 if (bp->b_iocmd == BIO_READ) {
1576 * When reading, reqpage needs to stay
1577 * locked for the parent, but all other
1578 * pages can be freed. We still want to
1579 * wakeup the parent waiting on the page,
1580 * though. ( also: pg_reqpage can be -1 and
1581 * not match anything ).
1583 * We have to wake specifically requested pages
1584 * up too because we cleared PG_SWAPINPROG and
1585 * someone may be waiting for that.
1587 * NOTE: for reads, m->dirty will probably
1588 * be overridden by the original caller of
1589 * getpages so don't play cute tricks here.
1591 * XXX IT IS NOT LEGAL TO FREE THE PAGE HERE
1592 * AS THIS MESSES WITH object->memq, and it is
1593 * not legal to mess with object->memq from an
1598 vm_page_flag_clear(m, PG_ZERO);
1600 if (i != bp->b_pager.pg_reqpage)
1605 * If i == bp->b_pager.pg_reqpage, do not wake
1606 * the page up. The caller needs to.
1610 * If a write error occurs, reactivate page
1611 * so it doesn't clog the inactive list,
1612 * then finish the I/O.
1615 vm_page_activate(m);
1616 vm_page_io_finish(m);
1618 } else if (bp->b_iocmd == BIO_READ) {
1620 * For read success, clear dirty bits. Nobody should
1621 * have this page mapped but don't take any chances,
1622 * make sure the pmap modify bits are also cleared.
1624 * NOTE: for reads, m->dirty will probably be
1625 * overridden by the original caller of getpages so
1626 * we cannot set them in order to free the underlying
1627 * swap in a low-swap situation. I don't think we'd
1628 * want to do that anyway, but it was an optimization
1629 * that existed in the old swapper for a time before
1630 * it got ripped out due to precisely this problem.
1632 * clear PG_ZERO in page.
1634 * If not the requested page then deactivate it.
1636 * Note that the requested page, reqpage, is left
1637 * busied, but we still have to wake it up. The
1638 * other pages are released (unbusied) by
1639 * vm_page_wakeup(). We do not set reqpage's
1640 * valid bits here, it is up to the caller.
1643 pmap_clear_modify(m);
1644 m->valid = VM_PAGE_BITS_ALL;
1646 vm_page_flag_clear(m, PG_ZERO);
1649 * We have to wake specifically requested pages
1650 * up too because we cleared PG_SWAPINPROG and
1651 * could be waiting for it in getpages. However,
1652 * be sure to not unbusy getpages specifically
1653 * requested page - getpages expects it to be
1656 if (i != bp->b_pager.pg_reqpage) {
1657 vm_page_deactivate(m);
1664 * For write success, clear the modify and dirty
1665 * status, then finish the I/O ( which decrements the
1666 * busy count and possibly wakes waiter's up ).
1668 pmap_clear_modify(m);
1670 vm_page_io_finish(m);
1671 if (!vm_page_count_severe() || !vm_page_try_to_cache(m))
1672 vm_page_protect(m, VM_PROT_READ);
1677 * adjust pip. NOTE: the original parent may still have its own
1678 * pip refs on the object.
1682 vm_object_pip_wakeupn(object, bp->b_npages);
1685 * release the physical I/O buffer
1690 ((bp->b_iocmd == BIO_READ) ? &nsw_rcount :
1691 ((bp->b_flags & B_ASYNC) ?
1700 /************************************************************************
1702 ************************************************************************
1704 * These routines manipulate the swap metadata stored in the
1705 * OBJT_SWAP object. All swp_*() routines must be called at
1706 * splvm() because swap can be freed up by the low level vm_page
1707 * code which might be called from interrupts beyond what splbio() covers.
1709 * Swap metadata is implemented with a global hash and not directly
1710 * linked into the object. Instead the object simply contains
1711 * appropriate tracking counters.
1715 * SWP_PAGER_HASH() - hash swap meta data
1717 * This is an inline helper function which hashes the swapblk given
1718 * the object and page index. It returns a pointer to a pointer
1719 * to the object, or a pointer to a NULL pointer if it could not
1722 * This routine must be called at splvm().
1725 static __inline struct swblock **
1726 swp_pager_hash(vm_object_t object, vm_pindex_t index)
1728 struct swblock **pswap;
1729 struct swblock *swap;
1731 index &= ~SWAP_META_MASK;
1732 pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask];
1734 while ((swap = *pswap) != NULL) {
1735 if (swap->swb_object == object &&
1736 swap->swb_index == index
1740 pswap = &swap->swb_hnext;
1746 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object
1748 * We first convert the object to a swap object if it is a default
1751 * The specified swapblk is added to the object's swap metadata. If
1752 * the swapblk is not valid, it is freed instead. Any previously
1753 * assigned swapblk is freed.
1755 * This routine must be called at splvm(), except when used to convert
1756 * an OBJT_DEFAULT object into an OBJT_SWAP object.
1760 swp_pager_meta_build(
1765 struct swblock *swap;
1766 struct swblock **pswap;
1770 * Convert default object to swap object if necessary
1773 if (object->type != OBJT_SWAP) {
1774 object->type = OBJT_SWAP;
1775 object->un_pager.swp.swp_bcount = 0;
1777 mtx_lock(&sw_alloc_mtx);
1778 if (object->handle != NULL) {
1780 NOBJLIST(object->handle),
1786 &swap_pager_un_object_list,
1791 mtx_unlock(&sw_alloc_mtx);
1795 * Locate hash entry. If not found create, but if we aren't adding
1796 * anything just return. If we run out of space in the map we wait
1797 * and, since the hash table may have changed, retry.
1801 pswap = swp_pager_hash(object, index);
1803 if ((swap = *pswap) == NULL) {
1806 if (swapblk == SWAPBLK_NONE)
1809 swap = *pswap = zalloc(swap_zone);
1814 swap->swb_hnext = NULL;
1815 swap->swb_object = object;
1816 swap->swb_index = index & ~SWAP_META_MASK;
1817 swap->swb_count = 0;
1819 ++object->un_pager.swp.swp_bcount;
1821 for (i = 0; i < SWAP_META_PAGES; ++i)
1822 swap->swb_pages[i] = SWAPBLK_NONE;
1826 * Delete prior contents of metadata
1829 index &= SWAP_META_MASK;
1831 if (swap->swb_pages[index] != SWAPBLK_NONE) {
1832 swp_pager_freeswapspace(swap->swb_pages[index], 1);
1837 * Enter block into metadata
1840 swap->swb_pages[index] = swapblk;
1841 if (swapblk != SWAPBLK_NONE)
1846 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata
1848 * The requested range of blocks is freed, with any associated swap
1849 * returned to the swap bitmap.
1851 * This routine will free swap metadata structures as they are cleaned
1852 * out. This routine does *NOT* operate on swap metadata associated
1853 * with resident pages.
1855 * This routine must be called at splvm()
1859 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count)
1863 if (object->type != OBJT_SWAP)
1867 struct swblock **pswap;
1868 struct swblock *swap;
1870 pswap = swp_pager_hash(object, index);
1872 if ((swap = *pswap) != NULL) {
1873 daddr_t v = swap->swb_pages[index & SWAP_META_MASK];
1875 if (v != SWAPBLK_NONE) {
1876 swp_pager_freeswapspace(v, 1);
1877 swap->swb_pages[index & SWAP_META_MASK] =
1879 if (--swap->swb_count == 0) {
1880 *pswap = swap->swb_hnext;
1881 zfree(swap_zone, swap);
1882 --object->un_pager.swp.swp_bcount;
1888 int n = SWAP_META_PAGES - (index & SWAP_META_MASK);
1896 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object
1898 * This routine locates and destroys all swap metadata associated with
1901 * This routine must be called at splvm()
1905 swp_pager_meta_free_all(vm_object_t object)
1911 if (object->type != OBJT_SWAP)
1914 while (object->un_pager.swp.swp_bcount) {
1915 struct swblock **pswap;
1916 struct swblock *swap;
1918 pswap = swp_pager_hash(object, index);
1919 if ((swap = *pswap) != NULL) {
1922 for (i = 0; i < SWAP_META_PAGES; ++i) {
1923 daddr_t v = swap->swb_pages[i];
1924 if (v != SWAPBLK_NONE) {
1926 swp_pager_freeswapspace(v, 1);
1929 if (swap->swb_count != 0)
1930 panic("swap_pager_meta_free_all: swb_count != 0");
1931 *pswap = swap->swb_hnext;
1932 zfree(swap_zone, swap);
1933 --object->un_pager.swp.swp_bcount;
1935 index += SWAP_META_PAGES;
1936 if (index > 0x20000000)
1937 panic("swp_pager_meta_free_all: failed to locate all swap meta blocks");
1942 * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data.
1944 * This routine is capable of looking up, popping, or freeing
1945 * swapblk assignments in the swap meta data or in the vm_page_t.
1946 * The routine typically returns the swapblk being looked-up, or popped,
1947 * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block
1948 * was invalid. This routine will automatically free any invalid
1949 * meta-data swapblks.
1951 * It is not possible to store invalid swapblks in the swap meta data
1952 * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking.
1954 * When acting on a busy resident page and paging is in progress, we
1955 * have to wait until paging is complete but otherwise can act on the
1958 * This routine must be called at splvm().
1960 * SWM_FREE remove and free swap block from metadata
1961 * SWM_POP remove from meta data but do not free.. pop it out
1970 struct swblock **pswap;
1971 struct swblock *swap;
1976 * The meta data only exists of the object is OBJT_SWAP
1977 * and even then might not be allocated yet.
1980 if (object->type != OBJT_SWAP)
1981 return(SWAPBLK_NONE);
1984 pswap = swp_pager_hash(object, index);
1986 if ((swap = *pswap) != NULL) {
1987 index &= SWAP_META_MASK;
1988 r1 = swap->swb_pages[index];
1990 if (r1 != SWAPBLK_NONE) {
1991 if (flags & SWM_FREE) {
1992 swp_pager_freeswapspace(r1, 1);
1995 if (flags & (SWM_FREE|SWM_POP)) {
1996 swap->swb_pages[index] = SWAPBLK_NONE;
1997 if (--swap->swb_count == 0) {
1998 *pswap = swap->swb_hnext;
1999 zfree(swap_zone, swap);
2000 --object->un_pager.swp.swp_bcount;
2008 /********************************************************
2009 * CHAINING FUNCTIONS *
2010 ********************************************************
2012 * These functions support recursion of I/O operations
2013 * on bp's, typically by chaining one or more 'child' bp's
2014 * to the parent. Synchronous, asynchronous, and semi-synchronous
2015 * chaining is possible.
2019 * vm_pager_chain_iodone:
2021 * io completion routine for child bp. Currently we fudge a bit
2022 * on dealing with b_resid. Since users of these routines may issue
2023 * multiple children simultaneously, sequencing of the error can be lost.
2027 vm_pager_chain_iodone(struct buf *nbp)
2032 bp = nbp->b_caller1;
2033 count = (u_int *)&(bp->bio_driver1);
2035 if (nbp->b_ioflags & BIO_ERROR) {
2036 bp->bio_flags |= BIO_ERROR;
2037 bp->bio_error = nbp->b_error;
2038 } else if (nbp->b_resid != 0) {
2039 bp->bio_flags |= BIO_ERROR;
2040 bp->bio_error = EINVAL;
2042 bp->bio_resid -= nbp->b_bcount;
2044 nbp->b_caller1 = NULL;
2046 if (bp->bio_flags & BIO_FLAG1) {
2047 bp->bio_flags &= ~BIO_FLAG1;
2051 nbp->b_flags |= B_DONE;
2052 nbp->b_flags &= ~B_ASYNC;
2059 * Obtain a physical buffer and chain it to its parent buffer. When
2060 * I/O completes, the parent buffer will be B_SIGNAL'd. Errors are
2061 * automatically propagated to the parent
2065 getchainbuf(struct bio *bp, struct vnode *vp, int flags)
2071 nbp = getpbuf(NULL);
2072 count = (u_int *)&(bp->bio_driver1);
2074 nbp->b_caller1 = bp;
2078 waitchainbuf(bp, 4, 0);
2080 nbp->b_iocmd = bp->bio_cmd;
2081 nbp->b_ioflags = bp->bio_flags & BIO_ORDERED;
2082 nbp->b_flags = flags;
2083 nbp->b_rcred = crhold(proc0.p_ucred);
2084 nbp->b_wcred = crhold(proc0.p_ucred);
2085 nbp->b_iodone = vm_pager_chain_iodone;
2093 flushchainbuf(struct buf *nbp)
2096 if (nbp->b_bcount) {
2097 nbp->b_bufsize = nbp->b_bcount;
2098 if (nbp->b_iocmd == BIO_WRITE)
2099 nbp->b_dirtyend = nbp->b_bcount;
2108 waitchainbuf(struct bio *bp, int limit, int done)
2114 count = (u_int *)&(bp->bio_driver1);
2116 while (*count > limit) {
2117 bp->bio_flags |= BIO_FLAG1;
2118 tsleep(bp, PRIBIO + 4, "bpchain", 0);
2121 if (bp->bio_resid != 0 && !(bp->bio_flags & BIO_ERROR)) {
2122 bp->bio_flags |= BIO_ERROR;
2123 bp->bio_error = EINVAL;