2 * (c)Copyright 1998, Matthew Dillon. Terms for use and redistribution
3 * are covered by the BSD Copyright as found in /usr/src/COPYRIGHT.
11 #include <sys/malloc.h>
12 #include <sys/mutex.h>
14 #include <sys/vmmeter.h>
15 #include <sys/vnode.h>
18 #include <vm/vm_param.h>
19 #include <vm/vm_kern.h>
20 #include <vm/vm_object.h>
21 #include <vm/vm_page.h>
22 #include <vm/vm_pageout.h>
23 #include <vm/vm_pager.h>
24 #include <vm/vm_extern.h>
26 struct vpgqueues vm_page_queues[PQ_COUNT];
27 static struct mtx vm_pageq_mtx[PQ_COUNT];
34 for (i = 0; i < PQ_L2_SIZE; i++) {
35 vm_page_queues[PQ_FREE+i].cnt = &cnt.v_free_count;
37 for (i = 0; i < PQ_L2_SIZE; i++) {
38 vm_page_queues[PQ_CACHE+i].cnt = &cnt.v_cache_count;
40 vm_page_queues[PQ_INACTIVE].cnt = &cnt.v_inactive_count;
41 vm_page_queues[PQ_ACTIVE].cnt = &cnt.v_active_count;
42 vm_page_queues[PQ_HOLD].cnt = &cnt.v_active_count;
44 for (i = 0; i < PQ_COUNT; i++) {
45 TAILQ_INIT(&vm_page_queues[i].pl);
46 mtx_init(&vm_pageq_mtx[i], "vm pageq mutex", MTX_DEF);
51 vm_pageq_aquire(int queue)
53 struct vpgqueues *vpq = NULL;
55 if (queue != PQ_NONE) {
56 vpq = &vm_page_queues[queue];
58 mtx_lock(&vm_pageq_mtx[queue]);
65 vm_pageq_release(struct vpgqueues *vpq)
68 mtx_unlock(&vm_pageq_mtx[vpq - &vm_page_queues[0]]);
73 vm_pageq_requeue(vm_page_t m)
76 struct vpgqueues *vpq;
78 vpq = vm_pageq_aquire(queue);
79 TAILQ_REMOVE(&vpq->pl, m, pageq);
80 TAILQ_INSERT_TAIL(&vpq->pl, m, pageq);
81 vm_pageq_release(vpq);
89 vm_pageq_enqueue(int queue, vm_page_t m)
91 struct vpgqueues *vpq;
93 vpq = &vm_page_queues[queue];
95 TAILQ_INSERT_TAIL(&vpq->pl, m, pageq);
103 * Add a new page to the freelist for use by the system.
104 * Must be called at splhigh().
107 vm_pageq_add_new_page(vm_offset_t pa)
114 m = PHYS_TO_VM_PAGE(pa);
117 m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK;
118 vm_pageq_enqueue(m->pc + PQ_FREE, m);
123 * vm_pageq_remove_nowakeup:
125 * vm_page_unqueue() without any wakeup
127 * This routine must be called at splhigh().
128 * This routine may not block.
131 vm_pageq_remove_nowakeup(vm_page_t m)
133 int queue = m->queue;
134 struct vpgqueues *pq;
135 if (queue != PQ_NONE) {
136 pq = &vm_page_queues[queue];
138 TAILQ_REMOVE(&pq->pl, m, pageq);
147 * Remove a page from its queue.
149 * This routine must be called at splhigh().
150 * This routine may not block.
153 vm_pageq_remove(vm_page_t m)
155 int queue = m->queue;
156 struct vpgqueues *pq;
159 if (queue != PQ_NONE) {
161 pq = &vm_page_queues[queue];
162 TAILQ_REMOVE(&pq->pl, m, pageq);
165 if ((queue - m->pc) == PQ_CACHE) {
166 if (vm_paging_needed())
177 * Find a page on the specified queue with color optimization.
179 * The page coloring optimization attempts to locate a page
180 * that does not overload other nearby pages in the object in
181 * the cpu's L1 or L2 caches. We need this optimization because
182 * cpu caches tend to be physical caches, while object spaces tend
185 * This routine must be called at splvm().
186 * This routine may not block.
188 * This routine may only be called from the vm_page_list_find() macro
191 static __inline vm_page_t
192 _vm_pageq_find(int basequeue, int index)
196 struct vpgqueues *pq;
199 pq = &vm_page_queues[basequeue];
202 * Note that for the first loop, index+i and index-i wind up at the
203 * same place. Even though this is not totally optimal, we've already
204 * blown it by missing the cache case so we do not care.
206 for (i = PQ_L2_SIZE / 2; i > 0; --i) {
207 if ((m = TAILQ_FIRST(&pq[(index + i) & PQ_L2_MASK].pl)) != NULL)
210 if ((m = TAILQ_FIRST(&pq[(index - i) & PQ_L2_MASK].pl)) != NULL)
215 #endif /* PQ_L2_SIZE > 1 */
218 vm_pageq_find(int basequeue, int index, boolean_t prefer_zero)
226 m = TAILQ_LAST(&vm_page_queues[basequeue+index].pl, pglist);
228 m = TAILQ_FIRST(&vm_page_queues[basequeue+index].pl);
231 m = _vm_pageq_find(basequeue, index);
235 m = TAILQ_LAST(&vm_page_queues[basequeue].pl, pglist);
237 m = TAILQ_FIRST(&vm_page_queues[basequeue].pl);