2 * Copyright (c) 1998 Matthew Dillon,
3 * Copyright (c) 1994 John S. Dyson
4 * Copyright (c) 1990 University of Utah.
5 * Copyright (c) 1991, 1993
6 * The Regents of the University of California. All rights reserved.
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43 * Radix Bitmap 'blists'.
45 * - The new swapper uses the new radix bitmap code. This should scale
46 * to arbitrarily small or arbitrarily large swap spaces and an almost
47 * arbitrary degree of fragmentation.
51 * - on the fly reallocation of swap during putpages. The new system
52 * does not try to keep previously allocated swap blocks for dirty
55 * - on the fly deallocation of swap
57 * - No more garbage collection required. Unnecessarily allocated swap
58 * blocks only exist for dirty vm_page_t's now and these are already
59 * cycled (in a high-load system) by the pager. We also do on-the-fly
60 * removal of invalidated swap blocks when a page is destroyed
63 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
65 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
70 #include <sys/param.h>
71 #include <sys/systm.h>
73 #include <sys/kernel.h>
77 #include <sys/vnode.h>
78 #include <sys/malloc.h>
79 #include <sys/vmmeter.h>
80 #include <sys/sysctl.h>
81 #include <sys/blist.h>
84 #include <sys/vmmeter.h>
86 #ifndef MAX_PAGEOUT_CLUSTER
87 #define MAX_PAGEOUT_CLUSTER 16
90 #define SWB_NPAGES MAX_PAGEOUT_CLUSTER
95 #include <vm/vm_map.h>
96 #include <vm/vm_kern.h>
97 #include <vm/vm_object.h>
98 #include <vm/vm_page.h>
99 #include <vm/vm_pager.h>
100 #include <vm/vm_pageout.h>
101 #include <vm/swap_pager.h>
102 #include <vm/vm_extern.h>
105 #define SWM_FREE 0x02 /* free, period */
106 #define SWM_POP 0x04 /* pop out */
109 * vm_swap_size is in page-sized chunks now. It was DEV_BSIZE'd chunks
112 extern int vm_swap_size; /* number of free swap blocks, in pages */
114 int swap_pager_full; /* swap space exhaustion (task killing) */
115 static int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/
116 static int nsw_rcount; /* free read buffers */
117 static int nsw_wcount_sync; /* limit write buffers / synchronous */
118 static int nsw_wcount_async; /* limit write buffers / asynchronous */
119 static int nsw_wcount_async_max;/* assigned maximum */
120 static int nsw_cluster_max; /* maximum VOP I/O allowed */
122 struct blist *swapblist;
123 static struct swblock **swhash;
124 static int swhash_mask;
125 static int swap_async_max = 4; /* maximum in-progress async I/O's */
126 static struct sx sw_alloc_sx;
129 extern struct vnode *swapdev_vp;
130 extern struct swdevt *swdevt;
133 SYSCTL_INT(_vm, OID_AUTO, swap_async_max,
134 CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops");
136 #define BLK2DEVIDX(blk) (nswdev > 1 ? blk / dmmax % nswdev : 0)
139 * "named" and "unnamed" anon region objects. Try to reduce the overhead
140 * of searching a named list by hashing it just a little.
145 #define NOBJLIST(handle) \
146 (&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)])
148 static struct mtx sw_alloc_mtx; /* protect list manipulation */
149 static struct pagerlst swap_pager_object_list[NOBJLISTS];
150 struct pagerlst swap_pager_un_object_list;
151 uma_zone_t swap_zone;
154 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure
155 * calls hooked from other parts of the VM system and do not appear here.
156 * (see vm/swap_pager.h).
159 swap_pager_alloc(void *handle, vm_ooffset_t size,
160 vm_prot_t prot, vm_ooffset_t offset);
161 static void swap_pager_dealloc(vm_object_t object);
162 static int swap_pager_getpages(vm_object_t, vm_page_t *, int, int);
163 static void swap_pager_init(void);
164 static void swap_pager_unswapped(vm_page_t);
165 static void swap_pager_strategy(vm_object_t, struct bio *);
167 struct pagerops swappagerops = {
168 swap_pager_init, /* early system initialization of pager */
169 swap_pager_alloc, /* allocate an OBJT_SWAP object */
170 swap_pager_dealloc, /* deallocate an OBJT_SWAP object */
171 swap_pager_getpages, /* pagein */
172 swap_pager_putpages, /* pageout */
173 swap_pager_haspage, /* get backing store status for page */
174 swap_pager_unswapped, /* remove swap related to page */
175 swap_pager_strategy /* pager strategy call */
178 static struct buf *getchainbuf(struct bio *bp, struct vnode *vp, int flags);
179 static void flushchainbuf(struct buf *nbp);
180 static void waitchainbuf(struct bio *bp, int count, int done);
183 * dmmax is in page-sized chunks with the new swap system. It was
184 * dev-bsized chunks in the old. dmmax is always a power of 2.
186 * swap_*() routines are externally accessible. swp_*() routines are
190 static int dmmax_mask;
191 int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */
192 int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */
194 SYSCTL_INT(_vm, OID_AUTO, dmmax,
195 CTLFLAG_RD, &dmmax, 0, "Maximum size of a swap block");
197 static __inline void swp_sizecheck(void);
198 static void swp_pager_sync_iodone(struct buf *bp);
199 static void swp_pager_async_iodone(struct buf *bp);
202 * Swap bitmap functions
204 static __inline void swp_pager_freeswapspace(daddr_t blk, int npages);
205 static __inline daddr_t swp_pager_getswapspace(int npages);
210 static void swp_pager_meta_build(vm_object_t, vm_pindex_t, daddr_t);
211 static void swp_pager_meta_free(vm_object_t, vm_pindex_t, daddr_t);
212 static void swp_pager_meta_free_all(vm_object_t);
213 static daddr_t swp_pager_meta_ctl(vm_object_t, vm_pindex_t, int);
216 * SWP_SIZECHECK() - update swap_pager_full indication
218 * update the swap_pager_almost_full indication and warn when we are
219 * about to run out of swap space, using lowat/hiwat hysteresis.
221 * Clear swap_pager_full ( task killing ) indication when lowat is met.
223 * No restrictions on call
224 * This routine may not block.
225 * This routine must be called at splvm()
232 if (vm_swap_size < nswap_lowat) {
233 if (swap_pager_almost_full == 0) {
234 printf("swap_pager: out of swap space\n");
235 swap_pager_almost_full = 1;
239 if (vm_swap_size > nswap_hiwat)
240 swap_pager_almost_full = 0;
245 * SWAP_PAGER_INIT() - initialize the swap pager!
247 * Expected to be started from system init. NOTE: This code is run
248 * before much else so be careful what you depend on. Most of the VM
249 * system has yet to be initialized at this point.
255 * Initialize object lists
259 for (i = 0; i < NOBJLISTS; ++i)
260 TAILQ_INIT(&swap_pager_object_list[i]);
261 TAILQ_INIT(&swap_pager_un_object_list);
262 mtx_init(&sw_alloc_mtx, "swap_pager list", NULL, MTX_DEF);
265 * Device Stripe, in PAGE_SIZE'd blocks
267 dmmax = SWB_NPAGES * 2;
268 dmmax_mask = ~(dmmax - 1);
272 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process
274 * Expected to be started from pageout process once, prior to entering
278 swap_pager_swap_init()
283 * Number of in-transit swap bp operations. Don't
284 * exhaust the pbufs completely. Make sure we
285 * initialize workable values (0 will work for hysteresis
286 * but it isn't very efficient).
288 * The nsw_cluster_max is constrained by the bp->b_pages[]
289 * array (MAXPHYS/PAGE_SIZE) and our locally defined
290 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are
291 * constrained by the swap device interleave stripe size.
293 * Currently we hardwire nsw_wcount_async to 4. This limit is
294 * designed to prevent other I/O from having high latencies due to
295 * our pageout I/O. The value 4 works well for one or two active swap
296 * devices but is probably a little low if you have more. Even so,
297 * a higher value would probably generate only a limited improvement
298 * with three or four active swap devices since the system does not
299 * typically have to pageout at extreme bandwidths. We will want
300 * at least 2 per swap devices, and 4 is a pretty good value if you
301 * have one NFS swap device due to the command/ack latency over NFS.
302 * So it all works out pretty well.
304 nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER);
307 nsw_rcount = (nswbuf + 1) / 2;
308 nsw_wcount_sync = (nswbuf + 3) / 4;
309 nsw_wcount_async = 4;
310 nsw_wcount_async_max = nsw_wcount_async;
311 mtx_unlock(&pbuf_mtx);
314 * Initialize our zone. Right now I'm just guessing on the number
315 * we need based on the number of pages in the system. Each swblock
316 * can hold 16 pages, so this is probably overkill. This reservation
317 * is typically limited to around 32MB by default.
319 n = cnt.v_page_count / 2;
320 if (maxswzone && n > maxswzone / sizeof(struct swblock))
321 n = maxswzone / sizeof(struct swblock);
323 swap_zone = uma_zcreate("SWAPMETA", sizeof(struct swblock), NULL, NULL,
324 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
326 if (uma_zone_set_obj(swap_zone, NULL, n))
329 * if the allocation failed, try a zone two thirds the
330 * size of the previous attempt.
334 if (swap_zone == NULL)
335 panic("failed to create swap_zone.");
337 printf("Swap zone entries reduced from %d to %d.\n", n2, n);
341 * Initialize our meta-data hash table. The swapper does not need to
342 * be quite as efficient as the VM system, so we do not use an
343 * oversized hash table.
345 * n: size of hash table, must be power of 2
346 * swhash_mask: hash table index mask
348 for (n = 1; n < n2 / 8; n *= 2)
350 swhash = malloc(sizeof(struct swblock *) * n, M_VMPGDATA, M_WAITOK | M_ZERO);
355 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate
356 * its metadata structures.
358 * This routine is called from the mmap and fork code to create a new
359 * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object
360 * and then converting it with swp_pager_meta_build().
362 * This routine may block in vm_object_allocate() and create a named
363 * object lookup race, so we must interlock. We must also run at
364 * splvm() for the object lookup to handle races with interrupts, but
365 * we do not have to maintain splvm() in between the lookup and the
366 * add because (I believe) it is not possible to attempt to create
367 * a new swap object w/handle when a default object with that handle
373 swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
381 * Reference existing named region or allocate new one. There
382 * should not be a race here against swp_pager_meta_build()
383 * as called from vm_page_remove() in regards to the lookup
386 sx_xlock(&sw_alloc_sx);
387 object = vm_pager_object_lookup(NOBJLIST(handle), handle);
389 if (object != NULL) {
390 vm_object_reference(object);
392 object = vm_object_allocate(OBJT_DEFAULT,
393 OFF_TO_IDX(offset + PAGE_MASK + size));
394 object->handle = handle;
396 swp_pager_meta_build(object, 0, SWAPBLK_NONE);
398 sx_xunlock(&sw_alloc_sx);
400 object = vm_object_allocate(OBJT_DEFAULT,
401 OFF_TO_IDX(offset + PAGE_MASK + size));
403 swp_pager_meta_build(object, 0, SWAPBLK_NONE);
410 * SWAP_PAGER_DEALLOC() - remove swap metadata from object
412 * The swap backing for the object is destroyed. The code is
413 * designed such that we can reinstantiate it later, but this
414 * routine is typically called only when the entire object is
415 * about to be destroyed.
417 * This routine may block, but no longer does.
419 * The object must be locked or unreferenceable.
422 swap_pager_dealloc(object)
430 * Remove from list right away so lookups will fail if we block for
431 * pageout completion.
433 mtx_lock(&sw_alloc_mtx);
434 if (object->handle == NULL) {
435 TAILQ_REMOVE(&swap_pager_un_object_list, object, pager_object_list);
437 TAILQ_REMOVE(NOBJLIST(object->handle), object, pager_object_list);
439 mtx_unlock(&sw_alloc_mtx);
441 vm_object_pip_wait(object, "swpdea");
444 * Free all remaining metadata. We only bother to free it from
445 * the swap meta data. We do not attempt to free swapblk's still
446 * associated with vm_page_t's for this object. We do not care
447 * if paging is still in progress on some objects.
450 swp_pager_meta_free_all(object);
454 /************************************************************************
455 * SWAP PAGER BITMAP ROUTINES *
456 ************************************************************************/
459 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space
461 * Allocate swap for the requested number of pages. The starting
462 * swap block number (a page index) is returned or SWAPBLK_NONE
463 * if the allocation failed.
465 * Also has the side effect of advising that somebody made a mistake
466 * when they configured swap and didn't configure enough.
468 * Must be called at splvm() to avoid races with bitmap frees from
469 * vm_page_remove() aka swap_pager_page_removed().
471 * This routine may not block
472 * This routine must be called at splvm().
474 static __inline daddr_t
475 swp_pager_getswapspace(npages)
482 if ((blk = blist_alloc(swapblist, npages)) == SWAPBLK_NONE) {
483 if (swap_pager_full != 2) {
484 printf("swap_pager_getswapspace: failed\n");
486 swap_pager_almost_full = 1;
489 vm_swap_size -= npages;
490 /* per-swap area stats */
491 swdevt[BLK2DEVIDX(blk)].sw_used += npages;
498 * SWP_PAGER_FREESWAPSPACE() - free raw swap space
500 * This routine returns the specified swap blocks back to the bitmap.
502 * Note: This routine may not block (it could in the old swap code),
503 * and through the use of the new blist routines it does not block.
505 * We must be called at splvm() to avoid races with bitmap frees from
506 * vm_page_remove() aka swap_pager_page_removed().
508 * This routine may not block
509 * This routine must be called at splvm().
512 swp_pager_freeswapspace(blk, npages)
518 blist_free(swapblist, blk, npages);
519 vm_swap_size += npages;
520 /* per-swap area stats */
521 swdevt[BLK2DEVIDX(blk)].sw_used -= npages;
526 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page
527 * range within an object.
529 * This is a globally accessible routine.
531 * This routine removes swapblk assignments from swap metadata.
533 * The external callers of this routine typically have already destroyed
534 * or renamed vm_page_t's associated with this range in the object so
537 * This routine may be called at any spl. We up our spl to splvm temporarily
538 * in order to perform the metadata removal.
541 swap_pager_freespace(object, start, size)
549 swp_pager_meta_free(object, start, size);
554 * SWAP_PAGER_RESERVE() - reserve swap blocks in object
556 * Assigns swap blocks to the specified range within the object. The
557 * swap blocks are not zerod. Any previous swap assignment is destroyed.
559 * Returns 0 on success, -1 on failure.
562 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
566 daddr_t blk = SWAPBLK_NONE;
567 vm_pindex_t beg = start; /* save start index */
573 while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) {
576 swp_pager_meta_free(object, beg, start - beg);
582 swp_pager_meta_build(object, start, blk);
588 swp_pager_meta_free(object, start, n);
594 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager
595 * and destroy the source.
597 * Copy any valid swapblks from the source to the destination. In
598 * cases where both the source and destination have a valid swapblk,
599 * we keep the destination's.
601 * This routine is allowed to block. It may block allocating metadata
602 * indirectly through swp_pager_meta_build() or if paging is still in
603 * progress on the source.
605 * This routine can be called at any spl
607 * XXX vm_page_collapse() kinda expects us not to block because we
608 * supposedly do not need to allocate memory, but for the moment we
609 * *may* have to get a little memory from the zone allocator, but
610 * it is taken from the interrupt memory. We should be ok.
612 * The source object contains no vm_page_t's (which is just as well)
614 * The source object is of type OBJT_SWAP.
616 * The source and destination objects must be locked or
617 * inaccessible (XXX are they ?)
620 swap_pager_copy(srcobject, dstobject, offset, destroysource)
621 vm_object_t srcobject;
622 vm_object_t dstobject;
633 * If destroysource is set, we remove the source object from the
634 * swap_pager internal queue now.
637 mtx_lock(&sw_alloc_mtx);
638 if (srcobject->handle == NULL) {
640 &swap_pager_un_object_list,
646 NOBJLIST(srcobject->handle),
651 mtx_unlock(&sw_alloc_mtx);
655 * transfer source to destination.
657 for (i = 0; i < dstobject->size; ++i) {
661 * Locate (without changing) the swapblk on the destination,
662 * unless it is invalid in which case free it silently, or
663 * if the destination is a resident page, in which case the
664 * source is thrown away.
666 dstaddr = swp_pager_meta_ctl(dstobject, i, 0);
668 if (dstaddr == SWAPBLK_NONE) {
670 * Destination has no swapblk and is not resident,
675 srcaddr = swp_pager_meta_ctl(
681 if (srcaddr != SWAPBLK_NONE)
682 swp_pager_meta_build(dstobject, i, srcaddr);
685 * Destination has valid swapblk or it is represented
686 * by a resident page. We destroy the sourceblock.
689 swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE);
694 * Free left over swap blocks in source.
696 * We have to revert the type to OBJT_DEFAULT so we do not accidently
697 * double-remove the object from the swap queues.
700 swp_pager_meta_free_all(srcobject);
702 * Reverting the type is not necessary, the caller is going
703 * to destroy srcobject directly, but I'm doing it here
704 * for consistency since we've removed the object from its
707 srcobject->type = OBJT_DEFAULT;
713 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for
714 * the requested page.
716 * We determine whether good backing store exists for the requested
717 * page and return TRUE if it does, FALSE if it doesn't.
719 * If TRUE, we also try to determine how much valid, contiguous backing
720 * store exists before and after the requested page within a reasonable
721 * distance. We do not try to restrict it to the swap device stripe
722 * (that is handled in getpages/putpages). It probably isn't worth
726 swap_pager_haspage(object, pindex, before, after)
736 * do we have good backing store at the requested index ?
739 blk0 = swp_pager_meta_ctl(object, pindex, 0);
741 if (blk0 == SWAPBLK_NONE) {
751 * find backwards-looking contiguous good backing store
753 if (before != NULL) {
756 for (i = 1; i < (SWB_NPAGES/2); ++i) {
761 blk = swp_pager_meta_ctl(object, pindex - i, 0);
769 * find forward-looking contiguous good backing store
774 for (i = 1; i < (SWB_NPAGES/2); ++i) {
777 blk = swp_pager_meta_ctl(object, pindex + i, 0);
788 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page
790 * This removes any associated swap backing store, whether valid or
791 * not, from the page.
793 * This routine is typically called when a page is made dirty, at
794 * which point any associated swap can be freed. MADV_FREE also
795 * calls us in a special-case situation
797 * NOTE!!! If the page is clean and the swap was valid, the caller
798 * should make the page dirty before calling this routine. This routine
799 * does NOT change the m->dirty status of the page. Also: MADV_FREE
802 * This routine may not block
803 * This routine must be called at splvm()
806 swap_pager_unswapped(m)
809 swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE);
813 * SWAP_PAGER_STRATEGY() - read, write, free blocks
815 * This implements the vm_pager_strategy() interface to swap and allows
816 * other parts of the system to directly access swap as backing store
817 * through vm_objects of type OBJT_SWAP. This is intended to be a
818 * cacheless interface ( i.e. caching occurs at higher levels ).
819 * Therefore we do not maintain any resident pages. All I/O goes
820 * directly to and from the swap device.
822 * Note that b_blkno is scaled for PAGE_SIZE
824 * We currently attempt to run I/O synchronously or asynchronously as
825 * the caller requests. This isn't perfect because we loose error
826 * sequencing when we run multiple ops in parallel to satisfy a request.
827 * But this is swap, so we let it all hang out.
830 swap_pager_strategy(vm_object_t object, struct bio *bp)
836 struct buf *nbp = NULL;
840 /* XXX: KASSERT instead ? */
841 if (bp->bio_bcount & PAGE_MASK) {
842 biofinish(bp, NULL, EINVAL);
843 printf("swap_pager_strategy: bp %p blk %d size %d, not page bounded\n", bp, (int)bp->bio_pblkno, (int)bp->bio_bcount);
848 * Clear error indication, initialize page index, count, data pointer.
851 bp->bio_flags &= ~BIO_ERROR;
852 bp->bio_resid = bp->bio_bcount;
853 *(u_int *) &bp->bio_driver1 = 0;
855 start = bp->bio_pblkno;
856 count = howmany(bp->bio_bcount, PAGE_SIZE);
862 * Deal with BIO_DELETE
864 if (bp->bio_cmd == BIO_DELETE) {
866 * FREE PAGE(s) - destroy underlying swap that is no longer
869 swp_pager_meta_free(object, start, count);
877 * Execute read or write
883 * Obtain block. If block not found and writing, allocate a
884 * new block and build it into the object.
887 blk = swp_pager_meta_ctl(object, start, 0);
888 if ((blk == SWAPBLK_NONE) && (bp->bio_cmd == BIO_WRITE)) {
889 blk = swp_pager_getswapspace(1);
890 if (blk == SWAPBLK_NONE) {
891 bp->bio_error = ENOMEM;
892 bp->bio_flags |= BIO_ERROR;
895 swp_pager_meta_build(object, start, blk);
899 * Do we have to flush our current collection? Yes if:
901 * - no swap block at this index
902 * - swap block is not contiguous
903 * - we cross a physical disk boundry in the
907 nbp && (nbp->b_blkno + btoc(nbp->b_bcount) != blk ||
908 ((nbp->b_blkno ^ blk) & dmmax_mask)
912 if (bp->bio_cmd == BIO_READ) {
914 cnt.v_swappgsin += btoc(nbp->b_bcount);
917 cnt.v_swappgsout += btoc(nbp->b_bcount);
918 nbp->b_dirtyend = nbp->b_bcount;
926 * Add new swapblk to nbp, instantiating nbp if necessary.
927 * Zero-fill reads are able to take a shortcut.
929 if (blk == SWAPBLK_NONE) {
931 * We can only get here if we are reading. Since
932 * we are at splvm() we can safely modify b_resid,
933 * even if chain ops are in progress.
935 bzero(data, PAGE_SIZE);
936 bp->bio_resid -= PAGE_SIZE;
939 nbp = getchainbuf(bp, swapdev_vp, B_ASYNC);
944 nbp->b_bcount += PAGE_SIZE;
952 * Flush out last buffer
957 if (nbp->b_iocmd == BIO_READ) {
959 cnt.v_swappgsin += btoc(nbp->b_bcount);
962 cnt.v_swappgsout += btoc(nbp->b_bcount);
963 nbp->b_dirtyend = nbp->b_bcount;
969 * Wait for completion.
971 waitchainbuf(bp, 0, 1);
975 * SWAP_PAGER_GETPAGES() - bring pages in from swap
977 * Attempt to retrieve (m, count) pages from backing store, but make
978 * sure we retrieve at least m[reqpage]. We try to load in as large
979 * a chunk surrounding m[reqpage] as is contiguous in swap and which
980 * belongs to the same object.
982 * The code is designed for asynchronous operation and
983 * immediate-notification of 'reqpage' but tends not to be
984 * used that way. Please do not optimize-out this algorithmic
985 * feature, I intend to improve on it in the future.
987 * The parent has a single vm_object_pip_add() reference prior to
988 * calling us and we should return with the same.
990 * The parent has BUSY'd the pages. We should return with 'm'
991 * left busy, but the others adjusted.
994 swap_pager_getpages(object, m, count, reqpage)
1006 vm_pindex_t lastpindex;
1012 if (mreq->object != object) {
1013 panic("swap_pager_getpages: object mismatch %p/%p",
1019 * Calculate range to retrieve. The pages have already been assigned
1020 * their swapblks. We require a *contiguous* range that falls entirely
1021 * within a single device stripe. If we do not supply it, bad things
1022 * happen. Note that blk, iblk & jblk can be SWAPBLK_NONE, but the
1023 * loops are set up such that the case(s) are handled implicitly.
1025 * The swp_*() calls must be made at splvm(). vm_page_free() does
1026 * not need to be, but it will go a little faster if it is.
1029 blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0);
1031 for (i = reqpage - 1; i >= 0; --i) {
1034 iblk = swp_pager_meta_ctl(m[i]->object, m[i]->pindex, 0);
1035 if (blk != iblk + (reqpage - i))
1037 if ((blk ^ iblk) & dmmax_mask)
1042 for (j = reqpage + 1; j < count; ++j) {
1045 jblk = swp_pager_meta_ctl(m[j]->object, m[j]->pindex, 0);
1046 if (blk != jblk - (j - reqpage))
1048 if ((blk ^ jblk) & dmmax_mask)
1053 * free pages outside our collection range. Note: we never free
1054 * mreq, it must remain busy throughout.
1056 vm_page_lock_queues();
1060 for (k = 0; k < i; ++k)
1062 for (k = j; k < count; ++k)
1065 vm_page_unlock_queues();
1070 * Return VM_PAGER_FAIL if we have nothing to do. Return mreq
1071 * still busy, but the others unbusied.
1073 if (blk == SWAPBLK_NONE)
1074 return (VM_PAGER_FAIL);
1077 * Get a swap buffer header to perform the IO
1079 bp = getpbuf(&nsw_rcount);
1080 kva = (vm_offset_t) bp->b_data;
1083 * map our page(s) into kva for input
1085 * NOTE: B_PAGING is set by pbgetvp()
1087 pmap_qenter(kva, m + i, j - i);
1089 bp->b_iocmd = BIO_READ;
1090 bp->b_iodone = swp_pager_async_iodone;
1091 bp->b_rcred = crhold(thread0.td_ucred);
1092 bp->b_wcred = crhold(thread0.td_ucred);
1093 bp->b_data = (caddr_t) kva;
1094 bp->b_blkno = blk - (reqpage - i);
1095 bp->b_bcount = PAGE_SIZE * (j - i);
1096 bp->b_bufsize = PAGE_SIZE * (j - i);
1097 bp->b_pager.pg_reqpage = reqpage - i;
1102 for (k = i; k < j; ++k) {
1103 bp->b_pages[k - i] = m[k];
1104 vm_page_flag_set(m[k], PG_SWAPINPROG);
1107 bp->b_npages = j - i;
1109 pbgetvp(swapdev_vp, bp);
1112 cnt.v_swappgsin += bp->b_npages;
1115 * We still hold the lock on mreq, and our automatic completion routine
1116 * does not remove it.
1118 vm_object_pip_add(mreq->object, bp->b_npages);
1119 lastpindex = m[j-1]->pindex;
1122 * perform the I/O. NOTE!!! bp cannot be considered valid after
1123 * this point because we automatically release it on completion.
1124 * Instead, we look at the one page we are interested in which we
1125 * still hold a lock on even through the I/O completion.
1127 * The other pages in our m[] array are also released on completion,
1128 * so we cannot assume they are valid anymore either.
1130 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
1136 * wait for the page we want to complete. PG_SWAPINPROG is always
1137 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE
1138 * is set in the meta-data.
1141 while ((mreq->flags & PG_SWAPINPROG) != 0) {
1142 vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED);
1144 if (tsleep(mreq, PSWP, "swread", hz*20)) {
1146 "swap_pager: indefinite wait buffer: device:"
1147 " %s, blkno: %ld, size: %ld\n",
1148 devtoname(bp->b_dev), (long)bp->b_blkno,
1156 * mreq is left busied after completion, but all the other pages
1157 * are freed. If we had an unrecoverable read error the page will
1160 if (mreq->valid != VM_PAGE_BITS_ALL) {
1161 return (VM_PAGER_ERROR);
1163 return (VM_PAGER_OK);
1167 * A final note: in a low swap situation, we cannot deallocate swap
1168 * and mark a page dirty here because the caller is likely to mark
1169 * the page clean when we return, causing the page to possibly revert
1170 * to all-zero's later.
1175 * swap_pager_putpages:
1177 * Assign swap (if necessary) and initiate I/O on the specified pages.
1179 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects
1180 * are automatically converted to SWAP objects.
1182 * In a low memory situation we may block in VOP_STRATEGY(), but the new
1183 * vm_page reservation system coupled with properly written VFS devices
1184 * should ensure that no low-memory deadlock occurs. This is an area
1187 * The parent has N vm_object_pip_add() references prior to
1188 * calling us and will remove references for rtvals[] that are
1189 * not set to VM_PAGER_PEND. We need to remove the rest on I/O
1192 * The parent has soft-busy'd the pages it passes us and will unbusy
1193 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return.
1194 * We need to unbusy the rest on I/O completion.
1197 swap_pager_putpages(object, m, count, sync, rtvals)
1208 if (count && m[0]->object != object) {
1209 panic("swap_pager_getpages: object mismatch %p/%p",
1217 * Turn object into OBJT_SWAP
1218 * check for bogus sysops
1219 * force sync if not pageout process
1221 if (object->type != OBJT_SWAP)
1222 swp_pager_meta_build(object, 0, SWAPBLK_NONE);
1224 if (curproc != pageproc)
1230 * Update nsw parameters from swap_async_max sysctl values.
1231 * Do not let the sysop crash the machine with bogus numbers.
1233 mtx_lock(&pbuf_mtx);
1234 if (swap_async_max != nsw_wcount_async_max) {
1241 if ((n = swap_async_max) > nswbuf / 2)
1248 * Adjust difference ( if possible ). If the current async
1249 * count is too low, we may not be able to make the adjustment
1253 n -= nsw_wcount_async_max;
1254 if (nsw_wcount_async + n >= 0) {
1255 nsw_wcount_async += n;
1256 nsw_wcount_async_max += n;
1257 wakeup(&nsw_wcount_async);
1261 mtx_unlock(&pbuf_mtx);
1266 * Assign swap blocks and issue I/O. We reallocate swap on the fly.
1267 * The page is left dirty until the pageout operation completes
1270 for (i = 0; i < count; i += n) {
1277 * Maximum I/O size is limited by a number of factors.
1279 n = min(BLIST_MAX_ALLOC, count - i);
1280 n = min(n, nsw_cluster_max);
1285 * Get biggest block of swap we can. If we fail, fall
1286 * back and try to allocate a smaller block. Don't go
1287 * overboard trying to allocate space if it would overly
1291 (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE &&
1296 if (blk == SWAPBLK_NONE) {
1297 for (j = 0; j < n; ++j)
1298 rtvals[i+j] = VM_PAGER_FAIL;
1304 * The I/O we are constructing cannot cross a physical
1305 * disk boundry in the swap stripe. Note: we are still
1308 if ((blk ^ (blk + n)) & dmmax_mask) {
1309 j = ((blk + dmmax) & dmmax_mask) - blk;
1310 swp_pager_freeswapspace(blk + j, n - j);
1315 * All I/O parameters have been satisfied, build the I/O
1316 * request and assign the swap space.
1318 * NOTE: B_PAGING is set by pbgetvp()
1321 bp = getpbuf(&nsw_wcount_sync);
1323 bp = getpbuf(&nsw_wcount_async);
1324 bp->b_flags = B_ASYNC;
1326 bp->b_iocmd = BIO_WRITE;
1327 bp->b_spc = NULL; /* not used, but NULL-out anyway */
1329 pmap_qenter((vm_offset_t)bp->b_data, &m[i], n);
1331 bp->b_rcred = crhold(thread0.td_ucred);
1332 bp->b_wcred = crhold(thread0.td_ucred);
1333 bp->b_bcount = PAGE_SIZE * n;
1334 bp->b_bufsize = PAGE_SIZE * n;
1337 pbgetvp(swapdev_vp, bp);
1339 for (j = 0; j < n; ++j) {
1340 vm_page_t mreq = m[i+j];
1342 swp_pager_meta_build(
1347 vm_page_dirty(mreq);
1348 rtvals[i+j] = VM_PAGER_OK;
1350 vm_page_flag_set(mreq, PG_SWAPINPROG);
1351 bp->b_pages[j] = mreq;
1355 * Must set dirty range for NFS to work.
1358 bp->b_dirtyend = bp->b_bcount;
1361 cnt.v_swappgsout += bp->b_npages;
1362 VI_LOCK(swapdev_vp);
1363 swapdev_vp->v_numoutput++;
1364 VI_UNLOCK(swapdev_vp);
1371 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
1373 if (sync == FALSE) {
1374 bp->b_iodone = swp_pager_async_iodone;
1378 for (j = 0; j < n; ++j)
1379 rtvals[i+j] = VM_PAGER_PEND;
1380 /* restart outter loop */
1387 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
1389 bp->b_iodone = swp_pager_sync_iodone;
1393 * Wait for the sync I/O to complete, then update rtvals.
1394 * We just set the rtvals[] to VM_PAGER_PEND so we can call
1395 * our async completion routine at the end, thus avoiding a
1399 while ((bp->b_flags & B_DONE) == 0) {
1400 tsleep(bp, PVM, "swwrt", 0);
1402 for (j = 0; j < n; ++j)
1403 rtvals[i+j] = VM_PAGER_PEND;
1405 * Now that we are through with the bp, we can call the
1406 * normal async completion, which frees everything up.
1408 swp_pager_async_iodone(bp);
1414 * swap_pager_sync_iodone:
1416 * Completion routine for synchronous reads and writes from/to swap.
1417 * We just mark the bp is complete and wake up anyone waiting on it.
1419 * This routine may not block. This routine is called at splbio() or better.
1422 swp_pager_sync_iodone(bp)
1425 bp->b_flags |= B_DONE;
1426 bp->b_flags &= ~B_ASYNC;
1431 * swp_pager_async_iodone:
1433 * Completion routine for asynchronous reads and writes from/to swap.
1434 * Also called manually by synchronous code to finish up a bp.
1436 * For READ operations, the pages are PG_BUSY'd. For WRITE operations,
1437 * the pages are vm_page_t->busy'd. For READ operations, we PG_BUSY
1438 * unbusy all pages except the 'main' request page. For WRITE
1439 * operations, we vm_page_t->busy'd unbusy all pages ( we can do this
1440 * because we marked them all VM_PAGER_PEND on return from putpages ).
1442 * This routine may not block.
1443 * This routine is called at splbio() or better
1445 * We up ourselves to splvm() as required for various vm_page related
1449 swp_pager_async_iodone(bp)
1454 vm_object_t object = NULL;
1457 bp->b_flags |= B_DONE;
1462 if (bp->b_ioflags & BIO_ERROR) {
1464 "swap_pager: I/O error - %s failed; blkno %ld,"
1465 "size %ld, error %d\n",
1466 ((bp->b_iocmd == BIO_READ) ? "pagein" : "pageout"),
1474 * set object, raise to splvm().
1477 object = bp->b_pages[0]->object;
1481 * remove the mapping for kernel virtual
1483 pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages);
1485 vm_page_lock_queues();
1487 * cleanup pages. If an error occurs writing to swap, we are in
1488 * very serious trouble. If it happens to be a disk error, though,
1489 * we may be able to recover by reassigning the swap later on. So
1490 * in this case we remove the m->swapblk assignment for the page
1491 * but do not free it in the rlist. The errornous block(s) are thus
1492 * never reallocated as swap. Redirty the page and continue.
1494 for (i = 0; i < bp->b_npages; ++i) {
1495 vm_page_t m = bp->b_pages[i];
1497 vm_page_flag_clear(m, PG_SWAPINPROG);
1499 if (bp->b_ioflags & BIO_ERROR) {
1501 * If an error occurs I'd love to throw the swapblk
1502 * away without freeing it back to swapspace, so it
1503 * can never be used again. But I can't from an
1506 if (bp->b_iocmd == BIO_READ) {
1508 * When reading, reqpage needs to stay
1509 * locked for the parent, but all other
1510 * pages can be freed. We still want to
1511 * wakeup the parent waiting on the page,
1512 * though. ( also: pg_reqpage can be -1 and
1513 * not match anything ).
1515 * We have to wake specifically requested pages
1516 * up too because we cleared PG_SWAPINPROG and
1517 * someone may be waiting for that.
1519 * NOTE: for reads, m->dirty will probably
1520 * be overridden by the original caller of
1521 * getpages so don't play cute tricks here.
1523 * XXX IT IS NOT LEGAL TO FREE THE PAGE HERE
1524 * AS THIS MESSES WITH object->memq, and it is
1525 * not legal to mess with object->memq from an
1529 vm_page_flag_clear(m, PG_ZERO);
1530 if (i != bp->b_pager.pg_reqpage)
1535 * If i == bp->b_pager.pg_reqpage, do not wake
1536 * the page up. The caller needs to.
1540 * If a write error occurs, reactivate page
1541 * so it doesn't clog the inactive list,
1542 * then finish the I/O.
1545 vm_page_activate(m);
1546 vm_page_io_finish(m);
1548 } else if (bp->b_iocmd == BIO_READ) {
1550 * For read success, clear dirty bits. Nobody should
1551 * have this page mapped but don't take any chances,
1552 * make sure the pmap modify bits are also cleared.
1554 * NOTE: for reads, m->dirty will probably be
1555 * overridden by the original caller of getpages so
1556 * we cannot set them in order to free the underlying
1557 * swap in a low-swap situation. I don't think we'd
1558 * want to do that anyway, but it was an optimization
1559 * that existed in the old swapper for a time before
1560 * it got ripped out due to precisely this problem.
1562 * clear PG_ZERO in page.
1564 * If not the requested page then deactivate it.
1566 * Note that the requested page, reqpage, is left
1567 * busied, but we still have to wake it up. The
1568 * other pages are released (unbusied) by
1569 * vm_page_wakeup(). We do not set reqpage's
1570 * valid bits here, it is up to the caller.
1572 pmap_clear_modify(m);
1573 m->valid = VM_PAGE_BITS_ALL;
1575 vm_page_flag_clear(m, PG_ZERO);
1578 * We have to wake specifically requested pages
1579 * up too because we cleared PG_SWAPINPROG and
1580 * could be waiting for it in getpages. However,
1581 * be sure to not unbusy getpages specifically
1582 * requested page - getpages expects it to be
1585 if (i != bp->b_pager.pg_reqpage) {
1586 vm_page_deactivate(m);
1593 * For write success, clear the modify and dirty
1594 * status, then finish the I/O ( which decrements the
1595 * busy count and possibly wakes waiter's up ).
1597 pmap_clear_modify(m);
1599 vm_page_io_finish(m);
1600 if (!vm_page_count_severe() || !vm_page_try_to_cache(m))
1601 vm_page_protect(m, VM_PROT_READ);
1604 vm_page_unlock_queues();
1607 * adjust pip. NOTE: the original parent may still have its own
1608 * pip refs on the object.
1611 vm_object_pip_wakeupn(object, bp->b_npages);
1614 * release the physical I/O buffer
1618 ((bp->b_iocmd == BIO_READ) ? &nsw_rcount :
1619 ((bp->b_flags & B_ASYNC) ?
1628 /************************************************************************
1630 ************************************************************************
1632 * These routines manipulate the swap metadata stored in the
1633 * OBJT_SWAP object. All swp_*() routines must be called at
1634 * splvm() because swap can be freed up by the low level vm_page
1635 * code which might be called from interrupts beyond what splbio() covers.
1637 * Swap metadata is implemented with a global hash and not directly
1638 * linked into the object. Instead the object simply contains
1639 * appropriate tracking counters.
1643 * SWP_PAGER_HASH() - hash swap meta data
1645 * This is an inline helper function which hashes the swapblk given
1646 * the object and page index. It returns a pointer to a pointer
1647 * to the object, or a pointer to a NULL pointer if it could not
1650 * This routine must be called at splvm().
1652 static __inline struct swblock **
1653 swp_pager_hash(vm_object_t object, vm_pindex_t index)
1655 struct swblock **pswap;
1656 struct swblock *swap;
1658 index &= ~(vm_pindex_t)SWAP_META_MASK;
1659 pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask];
1660 while ((swap = *pswap) != NULL) {
1661 if (swap->swb_object == object &&
1662 swap->swb_index == index
1666 pswap = &swap->swb_hnext;
1672 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object
1674 * We first convert the object to a swap object if it is a default
1677 * The specified swapblk is added to the object's swap metadata. If
1678 * the swapblk is not valid, it is freed instead. Any previously
1679 * assigned swapblk is freed.
1681 * This routine must be called at splvm(), except when used to convert
1682 * an OBJT_DEFAULT object into an OBJT_SWAP object.
1685 swp_pager_meta_build(
1690 struct swblock *swap;
1691 struct swblock **pswap;
1696 * Convert default object to swap object if necessary
1698 if (object->type != OBJT_SWAP) {
1699 object->type = OBJT_SWAP;
1700 object->un_pager.swp.swp_bcount = 0;
1702 mtx_lock(&sw_alloc_mtx);
1703 if (object->handle != NULL) {
1705 NOBJLIST(object->handle),
1711 &swap_pager_un_object_list,
1716 mtx_unlock(&sw_alloc_mtx);
1720 * Locate hash entry. If not found create, but if we aren't adding
1721 * anything just return. If we run out of space in the map we wait
1722 * and, since the hash table may have changed, retry.
1725 pswap = swp_pager_hash(object, pindex);
1727 if ((swap = *pswap) == NULL) {
1730 if (swapblk == SWAPBLK_NONE)
1733 swap = *pswap = uma_zalloc(swap_zone, M_NOWAIT);
1739 swap->swb_hnext = NULL;
1740 swap->swb_object = object;
1741 swap->swb_index = pindex & ~(vm_pindex_t)SWAP_META_MASK;
1742 swap->swb_count = 0;
1744 ++object->un_pager.swp.swp_bcount;
1746 for (i = 0; i < SWAP_META_PAGES; ++i)
1747 swap->swb_pages[i] = SWAPBLK_NONE;
1751 * Delete prior contents of metadata
1753 idx = pindex & SWAP_META_MASK;
1755 if (swap->swb_pages[idx] != SWAPBLK_NONE) {
1756 swp_pager_freeswapspace(swap->swb_pages[idx], 1);
1761 * Enter block into metadata
1763 swap->swb_pages[idx] = swapblk;
1764 if (swapblk != SWAPBLK_NONE)
1769 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata
1771 * The requested range of blocks is freed, with any associated swap
1772 * returned to the swap bitmap.
1774 * This routine will free swap metadata structures as they are cleaned
1775 * out. This routine does *NOT* operate on swap metadata associated
1776 * with resident pages.
1778 * This routine must be called at splvm()
1781 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count)
1785 if (object->type != OBJT_SWAP)
1789 struct swblock **pswap;
1790 struct swblock *swap;
1792 pswap = swp_pager_hash(object, index);
1794 if ((swap = *pswap) != NULL) {
1795 daddr_t v = swap->swb_pages[index & SWAP_META_MASK];
1797 if (v != SWAPBLK_NONE) {
1798 swp_pager_freeswapspace(v, 1);
1799 swap->swb_pages[index & SWAP_META_MASK] =
1801 if (--swap->swb_count == 0) {
1802 *pswap = swap->swb_hnext;
1803 uma_zfree(swap_zone, swap);
1804 --object->un_pager.swp.swp_bcount;
1810 int n = SWAP_META_PAGES - (index & SWAP_META_MASK);
1818 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object
1820 * This routine locates and destroys all swap metadata associated with
1823 * This routine must be called at splvm()
1826 swp_pager_meta_free_all(vm_object_t object)
1832 if (object->type != OBJT_SWAP)
1835 while (object->un_pager.swp.swp_bcount) {
1836 struct swblock **pswap;
1837 struct swblock *swap;
1839 pswap = swp_pager_hash(object, index);
1840 if ((swap = *pswap) != NULL) {
1843 for (i = 0; i < SWAP_META_PAGES; ++i) {
1844 daddr_t v = swap->swb_pages[i];
1845 if (v != SWAPBLK_NONE) {
1847 swp_pager_freeswapspace(v, 1);
1850 if (swap->swb_count != 0)
1851 panic("swap_pager_meta_free_all: swb_count != 0");
1852 *pswap = swap->swb_hnext;
1853 uma_zfree(swap_zone, swap);
1854 --object->un_pager.swp.swp_bcount;
1856 index += SWAP_META_PAGES;
1857 if (index > 0x20000000)
1858 panic("swp_pager_meta_free_all: failed to locate all swap meta blocks");
1863 * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data.
1865 * This routine is capable of looking up, popping, or freeing
1866 * swapblk assignments in the swap meta data or in the vm_page_t.
1867 * The routine typically returns the swapblk being looked-up, or popped,
1868 * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block
1869 * was invalid. This routine will automatically free any invalid
1870 * meta-data swapblks.
1872 * It is not possible to store invalid swapblks in the swap meta data
1873 * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking.
1875 * When acting on a busy resident page and paging is in progress, we
1876 * have to wait until paging is complete but otherwise can act on the
1879 * This routine must be called at splvm().
1881 * SWM_FREE remove and free swap block from metadata
1882 * SWM_POP remove from meta data but do not free.. pop it out
1890 struct swblock **pswap;
1891 struct swblock *swap;
1897 * The meta data only exists of the object is OBJT_SWAP
1898 * and even then might not be allocated yet.
1900 if (object->type != OBJT_SWAP)
1901 return (SWAPBLK_NONE);
1904 pswap = swp_pager_hash(object, pindex);
1906 if ((swap = *pswap) != NULL) {
1907 idx = pindex & SWAP_META_MASK;
1908 r1 = swap->swb_pages[idx];
1910 if (r1 != SWAPBLK_NONE) {
1911 if (flags & SWM_FREE) {
1912 swp_pager_freeswapspace(r1, 1);
1915 if (flags & (SWM_FREE|SWM_POP)) {
1916 swap->swb_pages[idx] = SWAPBLK_NONE;
1917 if (--swap->swb_count == 0) {
1918 *pswap = swap->swb_hnext;
1919 uma_zfree(swap_zone, swap);
1920 --object->un_pager.swp.swp_bcount;
1928 /********************************************************
1929 * CHAINING FUNCTIONS *
1930 ********************************************************
1932 * These functions support recursion of I/O operations
1933 * on bp's, typically by chaining one or more 'child' bp's
1934 * to the parent. Synchronous, asynchronous, and semi-synchronous
1935 * chaining is possible.
1939 * vm_pager_chain_iodone:
1941 * io completion routine for child bp. Currently we fudge a bit
1942 * on dealing with b_resid. Since users of these routines may issue
1943 * multiple children simultaneously, sequencing of the error can be lost.
1946 vm_pager_chain_iodone(struct buf *nbp)
1951 bp = nbp->b_caller1;
1952 count = (u_int *)&(bp->bio_driver1);
1954 if (nbp->b_ioflags & BIO_ERROR) {
1955 bp->bio_flags |= BIO_ERROR;
1956 bp->bio_error = nbp->b_error;
1957 } else if (nbp->b_resid != 0) {
1958 bp->bio_flags |= BIO_ERROR;
1959 bp->bio_error = EINVAL;
1961 bp->bio_resid -= nbp->b_bcount;
1963 nbp->b_caller1 = NULL;
1965 if (bp->bio_flags & BIO_FLAG1) {
1966 bp->bio_flags &= ~BIO_FLAG1;
1970 nbp->b_flags |= B_DONE;
1971 nbp->b_flags &= ~B_ASYNC;
1978 * Obtain a physical buffer and chain it to its parent buffer. When
1979 * I/O completes, the parent buffer will be B_SIGNAL'd. Errors are
1980 * automatically propagated to the parent
1983 getchainbuf(struct bio *bp, struct vnode *vp, int flags)
1989 nbp = getpbuf(NULL);
1990 count = (u_int *)&(bp->bio_driver1);
1992 nbp->b_caller1 = bp;
1996 waitchainbuf(bp, 4, 0);
1998 nbp->b_iocmd = bp->bio_cmd;
2000 nbp->b_flags = flags;
2001 nbp->b_rcred = crhold(thread0.td_ucred);
2002 nbp->b_wcred = crhold(thread0.td_ucred);
2003 nbp->b_iodone = vm_pager_chain_iodone;
2011 flushchainbuf(struct buf *nbp)
2014 if (nbp->b_bcount) {
2015 nbp->b_bufsize = nbp->b_bcount;
2016 if (nbp->b_iocmd == BIO_WRITE)
2017 nbp->b_dirtyend = nbp->b_bcount;
2026 waitchainbuf(struct bio *bp, int limit, int done)
2032 count = (u_int *)&(bp->bio_driver1);
2034 while (*count > limit) {
2035 bp->bio_flags |= BIO_FLAG1;
2036 tsleep(bp, PRIBIO + 4, "bpchain", 0);
2039 if (bp->bio_resid != 0 && !(bp->bio_flags & BIO_ERROR)) {
2040 bp->bio_flags |= BIO_ERROR;
2041 bp->bio_error = EINVAL;