1 /* $NetBSD: t_uvm_physseg.c,v 1.2 2016/12/22 08:15:20 cherry Exp $ */
4 * Copyright (c) 2015, 2016 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Santhosh N. Raju <santhosh.raju@gmail.com> and
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __RCSID("$NetBSD: t_uvm_physseg.c,v 1.2 2016/12/22 08:15:20 cherry Exp $");
37 * If this line is commented out tests related to uvm_physseg_get_pmseg()
40 * Have a look at machine/uvm_physseg.h for more details.
42 #define __HAVE_PMAP_PHYSSEG
45 * This is a dummy struct used for testing purposes
47 * In reality this struct would exist in the MD part of the code residing in
51 #ifdef __HAVE_PMAP_PHYSSEG
53 int dummy_variable; /* Dummy variable use for testing */
57 /* Testing API - assumes userland */
58 /* Provide Kernel API equivalents */
62 #include <string.h> /* memset(3) et. al */
63 #include <stdio.h> /* printf(3) */
64 #include <stdlib.h> /* malloc(3) */
68 #define PRIxPADDR "lx"
69 #define PRIxPSIZE "lx"
70 #define PRIuPSIZE "lu"
71 #define PRIxVADDR "lx"
72 #define PRIxVSIZE "lx"
73 #define PRIuVSIZE "lu"
75 #define UVM_HOTPLUG /* Enable hotplug with rbtree. */
76 #define PMAP_STEAL_MEMORY
77 #define DEBUG /* Enable debug functionality. */
79 typedef unsigned long vaddr_t;
80 typedef unsigned long paddr_t;
81 typedef unsigned long psize_t;
82 typedef unsigned long vsize_t;
84 #include <uvm/uvm_physseg.h>
85 #include <uvm/uvm_page.h>
88 #define KASSERTMSG(e, msg, ...) /* NOTHING */
89 #define KASSERT(e) /* NOTHING */
91 #define KASSERT(a) assert(a)
92 #define KASSERTMSG(exp, ...) printf(__VA_ARGS__); assert((exp))
95 #define VM_PHYSSEG_STRAT VM_PSTRAT_BSEARCH
97 #define VM_NFREELIST 4
98 #define VM_FREELIST_DEFAULT 0
99 #define VM_FREELIST_FIRST16 3
100 #define VM_FREELIST_FIRST1G 2
101 #define VM_FREELIST_FIRST4G 1
104 * Used in tests when Array implementation is tested
106 #if !defined(VM_PHYSSEG_MAX)
107 #define VM_PHYSSEG_MAX 1
110 #define PAGE_SHIFT 12
111 #define PAGE_SIZE (1 << PAGE_SHIFT)
112 #define PAGE_MASK (PAGE_SIZE - 1)
113 #define atop(x) (((paddr_t)(x)) >> PAGE_SHIFT)
114 #define ptoa(x) (((paddr_t)(x)) << PAGE_SHIFT)
116 #define mutex_enter(l)
117 #define mutex_exit(l)
121 struct uvmexp uvmexp; /* decl */
124 * uvm structure borrowed from uvm.h
126 * Remember this is a dummy structure used within the ATF Tests and
127 * uses only necessary fields from the original uvm struct.
128 * See uvm/uvm.h for the full struct.
132 /* vm_page related parameters */
134 bool page_init_done; /* TRUE if uvm_page_init() finished */
137 #include <sys/kmem.h>
140 kmem_alloc(size_t size, km_flag_t flags)
146 kmem_zalloc(size_t size, km_flag_t flags)
151 memset(ptr, 0, size);
157 kmem_free(void *mem, size_t size)
163 panic(const char *fmt, ...)
177 uvm_pagefree(struct vm_page *pg)
182 #if defined(UVM_HOTPLUG)
184 uvmpdpol_reinit(void)
188 #endif /* UVM_HOTPLUG */
190 /* end - Provide Kernel API equivalents */
193 #include "uvm/uvm_physseg.c"
197 #define SIXTYFOUR_KILO (64 * 1024)
198 #define ONETWENTYEIGHT_KILO (128 * 1024)
199 #define TWOFIFTYSIX_KILO (256 * 1024)
200 #define FIVEONETWO_KILO (512 * 1024)
201 #define ONE_MEGABYTE (1024 * 1024)
202 #define TWO_MEGABYTE (2 * 1024 * 1024)
204 /* Sample Page Frame Numbers */
205 #define VALID_START_PFN_1 atop(0)
206 #define VALID_END_PFN_1 atop(ONE_MEGABYTE)
207 #define VALID_AVAIL_START_PFN_1 atop(0)
208 #define VALID_AVAIL_END_PFN_1 atop(ONE_MEGABYTE)
210 #define VALID_START_PFN_2 atop(ONE_MEGABYTE + 1)
211 #define VALID_END_PFN_2 atop(ONE_MEGABYTE * 2)
212 #define VALID_AVAIL_START_PFN_2 atop(ONE_MEGABYTE + 1)
213 #define VALID_AVAIL_END_PFN_2 atop(ONE_MEGABYTE * 2)
215 #define VALID_START_PFN_3 atop((ONE_MEGABYTE * 2) + 1)
216 #define VALID_END_PFN_3 atop(ONE_MEGABYTE * 3)
217 #define VALID_AVAIL_START_PFN_3 atop((ONE_MEGABYTE * 2) + 1)
218 #define VALID_AVAIL_END_PFN_3 atop(ONE_MEGABYTE * 3)
220 #define VALID_START_PFN_4 atop((ONE_MEGABYTE * 3) + 1)
221 #define VALID_END_PFN_4 atop(ONE_MEGABYTE * 4)
222 #define VALID_AVAIL_START_PFN_4 atop((ONE_MEGABYTE * 3) + 1)
223 #define VALID_AVAIL_END_PFN_4 atop(ONE_MEGABYTE * 4)
226 * Total number of pages (of 4K size each) should be 256 for 1MB of memory.
228 #define PAGE_COUNT_1M 256
231 * A debug fucntion to print the content of upm.
234 uvm_physseg_dump_seg(uvm_physseg_t upm)
237 printf("%s: seg->start == %ld\n", __func__,
238 uvm_physseg_get_start(upm));
239 printf("%s: seg->end == %ld\n", __func__,
240 uvm_physseg_get_end(upm));
241 printf("%s: seg->avail_start == %ld\n", __func__,
242 uvm_physseg_get_avail_start(upm));
243 printf("%s: seg->avail_end == %ld\n", __func__,
244 uvm_physseg_get_avail_end(upm));
253 * Private accessor that gets the value of uvm_physseg_graph.nentries
256 uvm_physseg_get_entries(void)
258 #if defined(UVM_HOTPLUG)
259 return uvm_physseg_graph.nentries;
262 #endif /* UVM_HOTPLUG */
265 #if !defined(UVM_HOTPLUG)
267 uvm_physseg_alloc(size_t sz)
269 return &vm_physmem[vm_nphysseg++];
274 * Test Fixture SetUp().
279 /* Prerequisites for running certain calls in uvm_physseg */
280 uvmexp.pagesize = PAGE_SIZE;
282 uvm.page_init_done = false;
287 /* <---- Tests for Internal functions ----> */
288 #if defined(UVM_HOTPLUG)
289 ATF_TC(uvm_physseg_alloc_atboot_mismatch);
290 ATF_TC_HEAD(uvm_physseg_alloc_atboot_mismatch, tc)
292 atf_tc_set_md_var(tc, "descr", "boot time uvm_physseg_alloc() sanity"
293 "size mismatch alloc() test.");
296 ATF_TC_BODY(uvm_physseg_alloc_atboot_mismatch, tc)
298 uvm.page_init_done = false;
300 atf_tc_expect_signal(SIGABRT, "size mismatch alloc()");
302 uvm_physseg_alloc(sizeof(struct uvm_physseg) - 1);
305 ATF_TC(uvm_physseg_alloc_atboot_overrun);
306 ATF_TC_HEAD(uvm_physseg_alloc_atboot_overrun, tc)
308 atf_tc_set_md_var(tc, "descr", "boot time uvm_physseg_alloc() sanity"
309 "array overrun alloc() test.");
312 ATF_TC_BODY(uvm_physseg_alloc_atboot_overrun, tc)
314 uvm.page_init_done = false;
316 atf_tc_expect_signal(SIGABRT, "array overrun alloc()");
318 uvm_physseg_alloc((VM_PHYSSEG_MAX + 1) * sizeof(struct uvm_physseg));
322 ATF_TC(uvm_physseg_alloc_sanity);
323 ATF_TC_HEAD(uvm_physseg_alloc_sanity, tc)
325 atf_tc_set_md_var(tc, "descr", "further uvm_physseg_alloc() sanity checks");
328 ATF_TC_BODY(uvm_physseg_alloc_sanity, tc)
332 uvm.page_init_done = false;
335 ATF_REQUIRE(uvm_physseg_alloc(VM_PHYSSEG_MAX * sizeof(struct uvm_physseg)));
337 /* Retry static alloc()s as dynamic - we expect them to pass */
338 uvm.page_init_done = true;
339 ATF_REQUIRE(uvm_physseg_alloc(sizeof(struct uvm_physseg) - 1));
340 ATF_REQUIRE(uvm_physseg_alloc(2 * VM_PHYSSEG_MAX * sizeof(struct uvm_physseg)));
343 ATF_TC(uvm_physseg_free_atboot_mismatch);
344 ATF_TC_HEAD(uvm_physseg_free_atboot_mismatch, tc)
346 atf_tc_set_md_var(tc, "descr", "boot time uvm_physseg_free() sanity"
347 "size mismatch free() test.");
350 ATF_TC_BODY(uvm_physseg_free_atboot_mismatch, tc)
352 uvm.page_init_done = false;
354 atf_tc_expect_signal(SIGABRT, "size mismatch free()");
356 uvm_physseg_free(&uvm_physseg[0], sizeof(struct uvm_physseg) - 1);
359 ATF_TC(uvm_physseg_free_sanity);
360 ATF_TC_HEAD(uvm_physseg_free_sanity, tc)
362 atf_tc_set_md_var(tc, "descr", "further uvm_physseg_free() sanity checks");
365 ATF_TC_BODY(uvm_physseg_free_sanity, tc)
369 uvm.page_init_done = false;
371 struct uvm_physseg *seg;
373 #if VM_PHYSSEG_MAX > 1
375 * Note: free()ing the entire array is considered to be an
376 * error. Thus VM_PHYSSEG_MAX - 1.
379 seg = uvm_physseg_alloc((VM_PHYSSEG_MAX - 1) * sizeof(*seg));
380 uvm_physseg_free(seg, (VM_PHYSSEG_MAX - 1) * sizeof(struct uvm_physseg));
383 /* Retry static alloc()s as dynamic - we expect them to pass */
384 uvm.page_init_done = true;
386 seg = uvm_physseg_alloc(sizeof(struct uvm_physseg) - 1);
387 uvm_physseg_free(seg, sizeof(struct uvm_physseg) - 1);
389 seg = uvm_physseg_alloc(2 * VM_PHYSSEG_MAX * sizeof(struct uvm_physseg));
391 uvm_physseg_free(seg, 2 * VM_PHYSSEG_MAX * sizeof(struct uvm_physseg));
394 #if VM_PHYSSEG_MAX > 1
395 ATF_TC(uvm_physseg_atboot_free_leak);
396 ATF_TC_HEAD(uvm_physseg_atboot_free_leak, tc)
398 atf_tc_set_md_var(tc, "descr",
399 "does free() leak at boot ?\n"
400 "This test needs VM_PHYSSEG_MAX > 1)");
403 ATF_TC_BODY(uvm_physseg_atboot_free_leak, tc)
407 uvm.page_init_done = false;
409 /* alloc to array size */
410 struct uvm_physseg *seg;
411 seg = uvm_physseg_alloc(VM_PHYSSEG_MAX * sizeof(*seg));
413 uvm_physseg_free(seg, sizeof(*seg));
415 atf_tc_expect_signal(SIGABRT, "array overrun on alloc() after leak");
417 ATF_REQUIRE(uvm_physseg_alloc(sizeof(struct uvm_physseg)));
419 #endif /* VM_PHYSSEG_MAX */
420 #endif /* UVM_HOTPLUG */
423 * Note: This function replicates verbatim what happens in
424 * uvm_page.c:uvm_page_init().
426 * Please track any changes that happen there.
429 uvm_page_init_fake(struct vm_page *pagearray, psize_t pagecount)
434 for (bank = uvm_physseg_get_first(),
435 uvm_physseg_seg_chomp_slab(bank, pagearray, pagecount);
436 uvm_physseg_valid_p(bank);
437 bank = uvm_physseg_get_next(bank)) {
439 n = uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank);
440 uvm_physseg_seg_alloc_from_slab(bank, n);
441 uvm_physseg_init_seg(bank, pagearray);
443 /* set up page array pointers */
448 uvm.page_init_done = true;
451 ATF_TC(uvm_physseg_plug);
452 ATF_TC_HEAD(uvm_physseg_plug, tc)
454 atf_tc_set_md_var(tc, "descr",
455 "Test plug functionality.");
457 /* Note: We only do the second boot time plug if VM_PHYSSEG_MAX > 1 */
458 ATF_TC_BODY(uvm_physseg_plug, tc)
460 int nentries = 0; /* Count of entries via plug done so far */
462 #if VM_PHYSSEG_MAX > 2
466 #if VM_PHYSSEG_MAX > 1
470 psize_t npages1 = (VALID_END_PFN_1 - VALID_START_PFN_1);
471 psize_t npages2 = (VALID_END_PFN_2 - VALID_START_PFN_2);
472 psize_t npages3 = (VALID_END_PFN_3 - VALID_START_PFN_3);
473 psize_t npages4 = (VALID_END_PFN_4 - VALID_START_PFN_4);
474 struct vm_page *pgs, *slab = malloc(sizeof(struct vm_page) * (npages1
475 #if VM_PHYSSEG_MAX > 2
480 /* Fake early boot */
484 /* Vanilla plug x 2 */
485 ATF_REQUIRE_EQ(uvm_physseg_plug(VALID_START_PFN_1, npages1, &upm1), true);
486 ATF_REQUIRE_EQ(++nentries, uvm_physseg_get_entries());
487 ATF_REQUIRE_EQ(0, uvmexp.npages);
489 #if VM_PHYSSEG_MAX > 2
490 ATF_REQUIRE_EQ(uvm_physseg_plug(VALID_START_PFN_2, npages2, &upm2), true);
491 ATF_REQUIRE_EQ(++nentries, uvm_physseg_get_entries());
492 ATF_REQUIRE_EQ(0, uvmexp.npages);
494 /* Post boot: Fake all segments and pages accounted for. */
495 uvm_page_init_fake(slab, npages1 + npages2 + npages3);
498 #if VM_PHYSSEG_MAX > 2
502 #if VM_PHYSSEG_MAX > 1
503 /* Scavenge plug - goes into the same slab */
504 ATF_REQUIRE_EQ(uvm_physseg_plug(VALID_START_PFN_3, npages3, &upm3), true);
505 ATF_REQUIRE_EQ(++nentries, uvm_physseg_get_entries());
506 ATF_REQUIRE_EQ(npages1
507 #if VM_PHYSSEG_MAX > 2
510 + npages3, uvmexp.npages);
512 /* Scavenge plug should fit right in the slab */
513 pgs = uvm_physseg_get_pg(upm3, 0);
514 ATF_REQUIRE(pgs > slab && pgs < (slab + npages1 + npages2 + npages3));
516 /* Hot plug - goes into a brand new slab */
517 ATF_REQUIRE_EQ(uvm_physseg_plug(VALID_START_PFN_4, npages4, &upm4), true);
518 /* The hot plug slab should have nothing to do with the original slab */
519 pgs = uvm_physseg_get_pg(upm4, 0);
520 ATF_REQUIRE(pgs < slab || pgs > (slab + npages1
521 #if VM_PHYSSEG_MAX > 2
527 ATF_TC(uvm_physseg_unplug);
528 ATF_TC_HEAD(uvm_physseg_unplug, tc)
530 atf_tc_set_md_var(tc, "descr",
531 "Test unplug functionality.");
533 ATF_TC_BODY(uvm_physseg_unplug, tc)
537 psize_t npages1 = (VALID_END_PFN_1 - VALID_START_PFN_1);
538 psize_t npages2 = (VALID_END_PFN_2 - VALID_START_PFN_2);
539 psize_t npages3 = (VALID_END_PFN_3 - VALID_START_PFN_3);
541 struct vm_page *slab = malloc(sizeof(struct vm_page) * (npages1 + npages2 + npages3));
548 /* We start with zero segments */
549 ATF_REQUIRE_EQ(true, uvm_physseg_plug(atop(0), atop(ONE_MEGABYTE), NULL));
550 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
551 /* Do we have an arbitrary offset in there ? */
552 uvm_physseg_find(atop(TWOFIFTYSIX_KILO), &pa);
553 ATF_REQUIRE_EQ(pa, atop(TWOFIFTYSIX_KILO));
554 ATF_REQUIRE_EQ(0, uvmexp.npages); /* Boot time sanity */
556 #if VM_PHYSSEG_MAX == 1
558 * This is the curious case at boot time, of having one
559 * extent(9) static entry per segment, which means that a
560 * fragmenting unplug will fail.
562 atf_tc_expect_signal(SIGABRT, "fragmenting unplug for single segment");
565 * In order to test the fragmenting cases, please set
569 /* Now let's unplug from the middle */
570 ATF_REQUIRE_EQ(true, uvm_physseg_unplug(atop(TWOFIFTYSIX_KILO), atop(FIVEONETWO_KILO)));
571 /* verify that a gap exists at TWOFIFTYSIX_KILO */
573 uvm_physseg_find(atop(TWOFIFTYSIX_KILO), &pa);
574 ATF_REQUIRE_EQ(pa, 0);
576 /* Post boot: Fake all segments and pages accounted for. */
577 uvm_page_init_fake(slab, npages1 + npages2 + npages3);
578 /* Account for the unplug */
579 ATF_CHECK_EQ(atop(FIVEONETWO_KILO), uvmexp.npages);
581 /* Original entry should fragment into two */
582 ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
584 upm = uvm_physseg_find(atop(TWOFIFTYSIX_KILO + FIVEONETWO_KILO), NULL);
586 ATF_REQUIRE(uvm_physseg_valid_p(upm));
588 /* Now unplug the tail fragment - should swallow the complete entry */
589 ATF_REQUIRE_EQ(true, uvm_physseg_unplug(atop(TWOFIFTYSIX_KILO + FIVEONETWO_KILO), atop(TWOFIFTYSIX_KILO)));
591 /* The "swallow" above should have invalidated the handle */
592 ATF_REQUIRE_EQ(false, uvm_physseg_valid_p(upm));
594 /* Only the first one is left now */
595 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
597 /* Unplug from the back */
598 ATF_REQUIRE_EQ(true, uvm_physseg_unplug(atop(ONETWENTYEIGHT_KILO), atop(ONETWENTYEIGHT_KILO)));
599 /* Shouldn't change the number of segments */
600 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
602 /* Unplug from the front */
603 ATF_REQUIRE_EQ(true, uvm_physseg_unplug(0, atop(SIXTYFOUR_KILO)));
604 /* Shouldn't change the number of segments */
605 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
607 /* Unplugging the final fragment should fail */
608 atf_tc_expect_signal(SIGABRT, "Unplugging the last segment");
609 ATF_REQUIRE_EQ(true, uvm_physseg_unplug(atop(SIXTYFOUR_KILO), atop(SIXTYFOUR_KILO)));
613 /* <---- end Tests for Internal functions ----> */
615 /* Tests for functions exported via uvm_physseg.h */
616 ATF_TC(uvm_physseg_init);
617 ATF_TC_HEAD(uvm_physseg_init, tc)
619 atf_tc_set_md_var(tc, "descr", "Tests if the basic uvm_page_init() call\
620 initializes the vm_physmem struct which holds the rb_tree.");
622 ATF_TC_BODY(uvm_physseg_init, tc)
626 ATF_REQUIRE_EQ(0, uvm_physseg_get_entries());
629 ATF_TC(uvm_page_physload_preload);
630 ATF_TC_HEAD(uvm_page_physload_preload, tc)
632 atf_tc_set_md_var(tc, "descr", "Tests if the basic uvm_page_physload() \
633 call works without a panic() in a preload scenario.");
635 ATF_TC_BODY(uvm_page_physload_preload, tc)
641 upm = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
642 VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
644 /* Should return a valid handle */
645 ATF_REQUIRE(uvm_physseg_valid_p(upm));
647 /* No pages should be allocated yet */
648 ATF_REQUIRE_EQ(0, uvmexp.npages);
650 /* After the first call one segment should exist */
651 ATF_CHECK_EQ(1, uvm_physseg_get_entries());
653 /* Insert more than one segment iff VM_PHYSSEG_MAX > 1 */
654 #if VM_PHYSSEG_MAX > 1
655 upm = uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
656 VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_DEFAULT);
658 /* Should return a valid handle */
659 ATF_REQUIRE(uvm_physseg_valid_p(upm));
661 ATF_REQUIRE_EQ(0, uvmexp.npages);
663 /* After the second call two segments should exist */
664 ATF_CHECK_EQ(2, uvm_physseg_get_entries());
668 ATF_TC(uvm_page_physload_postboot);
669 ATF_TC_HEAD(uvm_page_physload_postboot, tc)
671 atf_tc_set_md_var(tc, "descr", "Tests if the basic uvm_page_physload() \
672 panic()s in a post boot scenario.");
674 ATF_TC_BODY(uvm_page_physload_postboot, tc)
678 psize_t npages1 = (VALID_END_PFN_1 - VALID_START_PFN_1);
679 psize_t npages2 = (VALID_END_PFN_2 - VALID_START_PFN_2);
681 struct vm_page *slab = malloc(sizeof(struct vm_page) * (npages1 + npages2));
685 upm = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
686 VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
688 /* Should return a valid handle */
689 ATF_REQUIRE(uvm_physseg_valid_p(upm));
691 /* No pages should be allocated yet */
692 ATF_REQUIRE_EQ(0, uvmexp.npages);
694 /* After the first call one segment should exist */
695 ATF_CHECK_EQ(1, uvm_physseg_get_entries());
697 /* Post boot: Fake all segments and pages accounted for. */
698 uvm_page_init_fake(slab, npages1 + npages2);
700 atf_tc_expect_signal(SIGABRT,
701 "uvm_page_physload() called post boot");
703 upm = uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
704 VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_DEFAULT);
706 /* Should return a valid handle */
707 ATF_REQUIRE(uvm_physseg_valid_p(upm));
709 ATF_REQUIRE_EQ(npages1 + npages2, uvmexp.npages);
711 /* After the second call two segments should exist */
712 ATF_CHECK_EQ(2, uvm_physseg_get_entries());
715 ATF_TC(uvm_physseg_handle_immutable);
716 ATF_TC_HEAD(uvm_physseg_handle_immutable, tc)
718 atf_tc_set_md_var(tc, "descr", "Tests if the uvm_physseg_t handle is \
721 ATF_TC_BODY(uvm_physseg_handle_immutable, tc)
725 /* We insert the segments in out of order */
729 upm = uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
730 VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_DEFAULT);
732 ATF_REQUIRE_EQ(0, uvmexp.npages);
734 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
736 ATF_CHECK_EQ(UVM_PHYSSEG_TYPE_INVALID_EMPTY, uvm_physseg_get_prev(upm));
738 /* Insert more than one segment iff VM_PHYSSEG_MAX > 1 */
739 #if VM_PHYSSEG_MAX > 1
740 uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
741 VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
743 ATF_REQUIRE_EQ(0, uvmexp.npages);
745 ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
747 /* Fetch Previous, we inserted a lower value */
748 upm = uvm_physseg_get_prev(upm);
750 #if !defined(UVM_HOTPLUG)
752 * This test is going to fail for the Array Implementation but is
753 * expected to pass in the RB Tree implementation.
755 /* Failure can be expected iff there are more than one handles */
756 atf_tc_expect_fail("Mutable handle in static array impl.");
758 ATF_CHECK(UVM_PHYSSEG_TYPE_INVALID_EMPTY != upm);
759 ATF_CHECK_EQ(VALID_START_PFN_1, uvm_physseg_get_start(upm));
760 ATF_CHECK_EQ(VALID_END_PFN_1, uvm_physseg_get_end(upm));
764 ATF_TC(uvm_physseg_seg_chomp_slab);
765 ATF_TC_HEAD(uvm_physseg_seg_chomp_slab, tc)
767 atf_tc_set_md_var(tc, "descr", "The slab import code.()");
770 ATF_TC_BODY(uvm_physseg_seg_chomp_slab, tc)
774 struct uvm_physseg *seg;
775 struct vm_page *slab, *pgs;
776 const size_t npages = UVM_PHYSSEG_BOOT_UNPLUG_MAX; /* Number of pages */
780 /* This is boot time */
781 slab = malloc(sizeof(struct vm_page) * npages * 2);
783 seg = uvm_physseg_alloc(sizeof(struct uvm_physseg));
785 uvm_physseg_seg_chomp_slab(PHYSSEG_NODE_TO_HANDLE(seg), slab, npages * 2);
787 /* Should be able to allocate two 128 * sizeof(*slab) */
788 ATF_REQUIRE_EQ(0, extent_alloc(seg->ext, sizeof(*slab), 1, 0, EX_BOUNDZERO, (void *)&pgs));
789 err = extent_free(seg->ext, (u_long) pgs, sizeof(*slab), EX_BOUNDZERO);
791 #if VM_PHYSSEG_MAX == 1
793 * free() needs an extra region descriptor, but we only have
794 * one! The classic alloc() at free() problem
797 ATF_REQUIRE_EQ(ENOMEM, err);
799 /* Try alloc/free at static time */
800 for (i = 0; i < npages; i++) {
801 ATF_REQUIRE_EQ(0, extent_alloc(seg->ext, sizeof(*slab), 1, 0, EX_BOUNDZERO, (void *)&pgs));
802 err = extent_free(seg->ext, (u_long) pgs, sizeof(*slab), EX_BOUNDZERO);
803 ATF_REQUIRE_EQ(0, err);
807 /* Now setup post boot */
808 uvm.page_init_done = true;
810 uvm_physseg_seg_chomp_slab(PHYSSEG_NODE_TO_HANDLE(seg), slab, npages * 2);
812 /* Try alloc/free after uvm_page.c:uvm_page_init() as well */
813 for (i = 0; i < npages; i++) {
814 ATF_REQUIRE_EQ(0, extent_alloc(seg->ext, sizeof(*slab), 1, 0, EX_BOUNDZERO, (void *)&pgs));
815 err = extent_free(seg->ext, (u_long) pgs, sizeof(*slab), EX_BOUNDZERO);
816 ATF_REQUIRE_EQ(0, err);
821 ATF_TC(uvm_physseg_alloc_from_slab);
822 ATF_TC_HEAD(uvm_physseg_alloc_from_slab, tc)
824 atf_tc_set_md_var(tc, "descr", "The slab alloc code.()");
827 ATF_TC_BODY(uvm_physseg_alloc_from_slab, tc)
829 struct uvm_physseg *seg;
830 struct vm_page *slab, *pgs;
831 const size_t npages = UVM_PHYSSEG_BOOT_UNPLUG_MAX; /* Number of pages */
835 /* This is boot time */
836 slab = malloc(sizeof(struct vm_page) * npages * 2);
838 seg = uvm_physseg_alloc(sizeof(struct uvm_physseg));
840 uvm_physseg_seg_chomp_slab(PHYSSEG_NODE_TO_HANDLE(seg), slab, npages * 2);
842 pgs = uvm_physseg_seg_alloc_from_slab(PHYSSEG_NODE_TO_HANDLE(seg), npages);
844 ATF_REQUIRE(pgs != NULL);
846 /* Now setup post boot */
847 uvm.page_init_done = true;
849 #if VM_PHYSSEG_MAX > 1
850 pgs = uvm_physseg_seg_alloc_from_slab(PHYSSEG_NODE_TO_HANDLE(seg), npages);
851 ATF_REQUIRE(pgs != NULL);
853 atf_tc_expect_fail("alloc beyond extent");
855 pgs = uvm_physseg_seg_alloc_from_slab(PHYSSEG_NODE_TO_HANDLE(seg), npages);
856 ATF_REQUIRE(pgs != NULL);
859 ATF_TC(uvm_physseg_init_seg);
860 ATF_TC_HEAD(uvm_physseg_init_seg, tc)
862 atf_tc_set_md_var(tc, "descr", "Tests if uvm_physseg_init_seg adds pages to"
865 ATF_TC_BODY(uvm_physseg_init_seg, tc)
867 struct uvm_physseg *seg;
868 struct vm_page *slab, *pgs;
869 const size_t npages = UVM_PHYSSEG_BOOT_UNPLUG_MAX; /* Number of pages */
873 /* This is boot time */
874 slab = malloc(sizeof(struct vm_page) * npages * 2);
876 seg = uvm_physseg_alloc(sizeof(struct uvm_physseg));
878 uvm_physseg_seg_chomp_slab(PHYSSEG_NODE_TO_HANDLE(seg), slab, npages * 2);
880 pgs = uvm_physseg_seg_alloc_from_slab(PHYSSEG_NODE_TO_HANDLE(seg), npages);
882 ATF_REQUIRE_EQ(0, uvmexp.npages);
887 seg->avail_start = 0;
888 seg->avail_end = npages;
890 uvm_physseg_init_seg(PHYSSEG_NODE_TO_HANDLE(seg), pgs);
892 ATF_REQUIRE_EQ(npages, uvmexp.npages);
896 ATF_TC(uvm_physseg_init_seg);
897 ATF_TC_HEAD(uvm_physseg_init_seg, tc)
899 atf_tc_set_md_var(tc, "descr", "Tests if the basic uvm_page_physload() \
900 call works without a panic() after Segment is inited.");
902 ATF_TC_BODY(uvm_physseg_init_seg, tc)
905 psize_t npages = (VALID_END_PFN_1 - VALID_START_PFN_1);
906 struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages);
909 upm = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
910 VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
912 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
914 ATF_CHECK_EQ(0, uvmexp.npages);
917 * Boot time physplug needs explicit external init,
918 * Duplicate what uvm_page.c:uvm_page_init() does.
919 * Note: not everything uvm_page_init() does gets done here.
922 /* suck in backing slab, initialise extent. */
923 uvm_physseg_seg_chomp_slab(upm, pgs, npages);
926 * Actual pgs[] allocation, from extent.
928 uvm_physseg_alloc_from_slab(upm, npages);
930 /* Now we initialize the segment */
931 uvm_physseg_init_seg(upm, pgs);
933 /* Done with boot simulation */
935 uvm.page_init_done = true;
937 /* We have total memory of 1MB */
938 ATF_CHECK_EQ(PAGE_COUNT_1M, uvmexp.npages);
940 upm =uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
941 VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_DEFAULT);
942 ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
944 /* We added another 1MB so PAGE_COUNT_1M + PAGE_COUNT_1M */
945 ATF_CHECK_EQ(PAGE_COUNT_1M + PAGE_COUNT_1M, uvmexp.npages);
950 ATF_TC(uvm_physseg_get_start);
951 ATF_TC_HEAD(uvm_physseg_get_start, tc)
953 atf_tc_set_md_var(tc, "descr", "Tests if the start PFN is returned \
954 correctly from a segment created via uvm_page_physload().");
956 ATF_TC_BODY(uvm_physseg_get_start, tc)
960 /* Fake early boot */
963 upm = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
964 VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
966 ATF_REQUIRE_EQ(0, uvmexp.npages);
968 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
970 ATF_CHECK_EQ(VALID_START_PFN_1, uvm_physseg_get_start(upm));
972 /* This test will be triggered only if there are 2 or more segments. */
973 #if VM_PHYSSEG_MAX > 1
974 upm = uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
975 VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_DEFAULT);
977 ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
979 ATF_REQUIRE_EQ(0, uvmexp.npages);
981 ATF_CHECK_EQ(VALID_START_PFN_2, uvm_physseg_get_start(upm));
985 ATF_TC(uvm_physseg_get_start_invalid);
986 ATF_TC_HEAD(uvm_physseg_get_start_invalid, tc)
988 atf_tc_set_md_var(tc, "descr", "Tests the invalid / error conditions \
989 correctly when uvm_physseg_get_start() is called with invalid \
992 ATF_TC_BODY(uvm_physseg_get_start_invalid, tc)
994 /* Check for pgs == NULL */
996 uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
997 VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
999 /* Force other check conditions */
1000 uvm.page_init_done = true;
1002 ATF_REQUIRE_EQ(0, uvmexp.npages);
1004 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
1006 ATF_REQUIRE_EQ(true, uvm.page_init_done);
1008 /* Invalid uvm_physseg_t */
1009 ATF_CHECK_EQ((paddr_t) -1,
1010 uvm_physseg_get_start(UVM_PHYSSEG_TYPE_INVALID));
1013 ATF_TC(uvm_physseg_get_end);
1014 ATF_TC_HEAD(uvm_physseg_get_end, tc)
1016 atf_tc_set_md_var(tc, "descr", "Tests if the end PFN is returned \
1017 correctly from a segment created via uvm_page_physload().");
1019 ATF_TC_BODY(uvm_physseg_get_end, tc)
1024 upm = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
1025 VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
1027 ATF_REQUIRE_EQ(0, uvmexp.npages);
1029 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
1031 ATF_CHECK_EQ(VALID_END_PFN_1, uvm_physseg_get_end(upm));
1033 /* This test will be triggered only if there are 2 or more segments. */
1034 #if VM_PHYSSEG_MAX > 1
1035 upm = uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
1036 VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_DEFAULT);
1038 ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
1040 ATF_REQUIRE_EQ(0, uvmexp.npages);
1042 ATF_CHECK_EQ(VALID_END_PFN_2, uvm_physseg_get_end(upm));
1046 ATF_TC(uvm_physseg_get_end_invalid);
1047 ATF_TC_HEAD(uvm_physseg_get_end_invalid, tc)
1049 atf_tc_set_md_var(tc, "descr", "Tests the invalid / error conditions \
1050 correctly when uvm_physseg_get_end() is called with invalid \
1051 parameter values.");
1053 ATF_TC_BODY(uvm_physseg_get_end_invalid, tc)
1055 /* Check for pgs == NULL */
1057 uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
1058 VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
1060 /* Force other check conditions */
1061 uvm.page_init_done = true;
1063 ATF_REQUIRE_EQ(0, uvmexp.npages);
1065 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
1067 ATF_REQUIRE_EQ(true, uvm.page_init_done);
1069 /* Invalid uvm_physseg_t */
1070 ATF_CHECK_EQ((paddr_t) -1,
1071 uvm_physseg_get_end(UVM_PHYSSEG_TYPE_INVALID));
1074 ATF_TC(uvm_physseg_get_avail_start);
1075 ATF_TC_HEAD(uvm_physseg_get_avail_start, tc)
1077 atf_tc_set_md_var(tc, "descr", "Tests if the avail_start PFN is \
1078 returned correctly from a segment created via uvm_page_physload().");
1080 ATF_TC_BODY(uvm_physseg_get_avail_start, tc)
1085 upm = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
1086 VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
1088 ATF_REQUIRE_EQ(0, uvmexp.npages);
1090 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
1092 ATF_CHECK_EQ(VALID_AVAIL_START_PFN_1, uvm_physseg_get_avail_start(upm));
1094 /* This test will be triggered only if there are 2 or more segments. */
1095 #if VM_PHYSSEG_MAX > 1
1096 upm = uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
1097 VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_DEFAULT);
1099 ATF_REQUIRE_EQ(0, uvmexp.npages);
1101 ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
1103 ATF_CHECK_EQ(VALID_AVAIL_START_PFN_2, uvm_physseg_get_avail_start(upm));
1107 ATF_TC(uvm_physseg_get_avail_start_invalid);
1108 ATF_TC_HEAD(uvm_physseg_get_avail_start_invalid, tc)
1110 atf_tc_set_md_var(tc, "descr", "Tests the invalid / error conditions \
1111 correctly when uvm_physseg_get_avail_start() is called with invalid\
1112 parameter values.");
1114 ATF_TC_BODY(uvm_physseg_get_avail_start_invalid, tc)
1116 /* Check for pgs == NULL */
1118 uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
1119 VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
1121 /* Force other check conditions */
1122 uvm.page_init_done = true;
1124 ATF_REQUIRE_EQ(0, uvmexp.npages);
1126 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
1128 ATF_REQUIRE_EQ(true, uvm.page_init_done);
1130 /* Invalid uvm_physseg_t */
1131 ATF_CHECK_EQ((paddr_t) -1,
1132 uvm_physseg_get_avail_start(UVM_PHYSSEG_TYPE_INVALID));
1135 ATF_TC(uvm_physseg_get_avail_end);
1136 ATF_TC_HEAD(uvm_physseg_get_avail_end, tc)
1138 atf_tc_set_md_var(tc, "descr", "Tests if the avail_end PFN is \
1139 returned correctly from a segment created via uvm_page_physload().");
1141 ATF_TC_BODY(uvm_physseg_get_avail_end, tc)
1146 upm = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
1147 VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
1149 ATF_REQUIRE_EQ(0, uvmexp.npages);
1151 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
1153 ATF_CHECK_EQ(VALID_AVAIL_END_PFN_1, uvm_physseg_get_avail_end(upm));
1155 /* This test will be triggered only if there are 2 or more segments. */
1156 #if VM_PHYSSEG_MAX > 1
1157 upm = uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
1158 VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_DEFAULT);
1160 ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
1162 ATF_REQUIRE_EQ(0, uvmexp.npages);
1164 ATF_CHECK_EQ(VALID_AVAIL_END_PFN_2, uvm_physseg_get_avail_end(upm));
1168 ATF_TC(uvm_physseg_get_avail_end_invalid);
1169 ATF_TC_HEAD(uvm_physseg_get_avail_end_invalid, tc)
1171 atf_tc_set_md_var(tc, "descr", "Tests the invalid / error conditions \
1172 correctly when uvm_physseg_get_avail_end() is called with invalid\
1173 parameter values.");
1175 ATF_TC_BODY(uvm_physseg_get_avail_end_invalid, tc)
1177 /* Check for pgs == NULL */
1179 uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
1180 VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
1182 /* Force other check conditions */
1183 uvm.page_init_done = true;
1185 ATF_REQUIRE_EQ(0, uvmexp.npages);
1187 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
1189 ATF_REQUIRE_EQ(true, uvm.page_init_done);
1191 /* Invalid uvm_physseg_t */
1192 ATF_CHECK_EQ((paddr_t) -1,
1193 uvm_physseg_get_avail_end(UVM_PHYSSEG_TYPE_INVALID));
1196 ATF_TC(uvm_physseg_get_next);
1197 ATF_TC_HEAD(uvm_physseg_get_next, tc)
1199 atf_tc_set_md_var(tc, "descr", "Tests the pointer values for next \
1200 segment using the uvm_physseg_get_next() call.");
1202 ATF_TC_BODY(uvm_physseg_get_next, tc)
1205 #if VM_PHYSSEG_MAX > 1
1206 uvm_physseg_t upm_next;
1209 /* We insert the segments in ascending order */
1212 upm = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
1213 VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
1215 ATF_REQUIRE_EQ(0, uvmexp.npages);
1217 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
1219 ATF_CHECK_EQ(UVM_PHYSSEG_TYPE_INVALID_OVERFLOW,
1220 uvm_physseg_get_next(upm));
1222 /* This test will be triggered only if there are 2 or more segments. */
1223 #if VM_PHYSSEG_MAX > 1
1224 upm_next = uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
1225 VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_DEFAULT);
1227 ATF_REQUIRE_EQ(0, uvmexp.npages);
1229 ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
1231 upm = uvm_physseg_get_next(upm); /* Fetch Next */
1233 ATF_CHECK_EQ(upm_next, upm);
1234 ATF_CHECK_EQ(VALID_START_PFN_2, uvm_physseg_get_start(upm));
1235 ATF_CHECK_EQ(VALID_END_PFN_2, uvm_physseg_get_end(upm));
1238 /* This test will be triggered only if there are 3 or more segments. */
1239 #if VM_PHYSSEG_MAX > 2
1240 upm_next = uvm_page_physload(VALID_START_PFN_3, VALID_END_PFN_3,
1241 VALID_AVAIL_START_PFN_3, VALID_AVAIL_END_PFN_3, VM_FREELIST_DEFAULT);
1243 ATF_REQUIRE_EQ(0, uvmexp.npages);
1245 ATF_REQUIRE_EQ(3, uvm_physseg_get_entries());
1247 upm = uvm_physseg_get_next(upm); /* Fetch Next */
1249 ATF_CHECK_EQ(upm_next, upm);
1250 ATF_CHECK_EQ(VALID_START_PFN_3, uvm_physseg_get_start(upm));
1251 ATF_CHECK_EQ(VALID_END_PFN_3, uvm_physseg_get_end(upm));
1255 ATF_TC(uvm_physseg_get_next_invalid);
1256 ATF_TC_HEAD(uvm_physseg_get_next_invalid, tc)
1258 atf_tc_set_md_var(tc, "descr", "Tests the invalid / error conditions \
1259 correctly when uvm_physseg_get_next() is called with invalid \
1260 parameter values.");
1262 ATF_TC_BODY(uvm_physseg_get_next_invalid, tc)
1264 uvm_physseg_t upm = UVM_PHYSSEG_TYPE_INVALID;
1266 ATF_CHECK_EQ(UVM_PHYSSEG_TYPE_INVALID, uvm_physseg_get_next(upm));
1269 ATF_TC(uvm_physseg_get_prev);
1270 ATF_TC_HEAD(uvm_physseg_get_prev, tc)
1272 atf_tc_set_md_var(tc, "descr", "Tests the pointer values for previous \
1273 segment using the uvm_physseg_get_prev() call.");
1275 ATF_TC_BODY(uvm_physseg_get_prev, tc)
1277 #if VM_PHYSSEG_MAX > 1
1280 uvm_physseg_t upm_prev;
1284 upm_prev = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
1285 VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
1287 ATF_REQUIRE_EQ(0, uvmexp.npages);
1289 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
1291 ATF_CHECK_EQ(UVM_PHYSSEG_TYPE_INVALID_EMPTY,
1292 uvm_physseg_get_prev(upm_prev));
1294 /* This test will be triggered only if there are 2 or more segments. */
1295 #if VM_PHYSSEG_MAX > 1
1296 upm = uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
1297 VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_DEFAULT);
1299 ATF_REQUIRE_EQ(0, uvmexp.npages);
1301 ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
1303 /* Fetch Previous, we inserted a lower value */
1304 upm = uvm_physseg_get_prev(upm);
1306 ATF_CHECK_EQ(upm_prev, upm);
1307 ATF_CHECK_EQ(VALID_START_PFN_1, uvm_physseg_get_start(upm));
1308 ATF_CHECK_EQ(VALID_END_PFN_1, uvm_physseg_get_end(upm));
1311 /* This test will be triggered only if there are 3 or more segments. */
1312 #if VM_PHYSSEG_MAX > 2
1313 uvm_page_physload(VALID_START_PFN_3, VALID_END_PFN_3,
1314 VALID_AVAIL_START_PFN_3, VALID_AVAIL_END_PFN_3, VM_FREELIST_DEFAULT);
1316 ATF_REQUIRE_EQ(0, uvmexp.npages);
1318 ATF_REQUIRE_EQ(3, uvm_physseg_get_entries());
1321 * This will return a UVM_PHYSSEG_TYPE_INVALID_EMPTY we are at the
1324 upm = uvm_physseg_get_prev(upm);
1326 ATF_CHECK_EQ(UVM_PHYSSEG_TYPE_INVALID_EMPTY, upm);
1330 ATF_TC(uvm_physseg_get_prev_invalid);
1331 ATF_TC_HEAD(uvm_physseg_get_prev_invalid, tc)
1333 atf_tc_set_md_var(tc, "descr", "Tests the invalid / error conditions \
1334 correctly when uvm_physseg_get_prev() is called with invalid \
1335 parameter values.");
1337 ATF_TC_BODY(uvm_physseg_get_prev_invalid, tc)
1339 uvm_physseg_t upm = UVM_PHYSSEG_TYPE_INVALID;
1341 ATF_CHECK_EQ(UVM_PHYSSEG_TYPE_INVALID, uvm_physseg_get_prev(upm));
1344 ATF_TC(uvm_physseg_get_first);
1345 ATF_TC_HEAD(uvm_physseg_get_first, tc)
1347 atf_tc_set_md_var(tc, "descr", "Tests the pointer values for first \
1348 segment (lowest node) using the uvm_physseg_get_first() call.");
1350 ATF_TC_BODY(uvm_physseg_get_first, tc)
1352 uvm_physseg_t upm = UVM_PHYSSEG_TYPE_INVALID_EMPTY;
1353 uvm_physseg_t upm_first;
1355 /* Fake early boot */
1358 /* No nodes exist */
1359 ATF_CHECK_EQ(upm, uvm_physseg_get_first());
1361 upm_first = uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
1362 VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_DEFAULT);
1364 ATF_REQUIRE_EQ(0, uvmexp.npages);
1366 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
1368 /* Pointer to first should be the least valued node */
1369 upm = uvm_physseg_get_first();
1370 ATF_CHECK_EQ(upm_first, upm);
1371 ATF_CHECK_EQ(VALID_START_PFN_2, uvm_physseg_get_start(upm));
1372 ATF_CHECK_EQ(VALID_END_PFN_2, uvm_physseg_get_end(upm));
1373 ATF_CHECK_EQ(VALID_AVAIL_START_PFN_2, uvm_physseg_get_avail_start(upm));
1374 ATF_CHECK_EQ(VALID_AVAIL_END_PFN_2, uvm_physseg_get_avail_end(upm));
1376 /* This test will be triggered only if there are 2 or more segments. */
1377 #if VM_PHYSSEG_MAX > 1
1378 /* Insert a node of lesser value */
1379 upm_first = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
1380 VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
1382 ATF_CHECK_EQ(0, uvmexp.npages);
1384 ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
1386 /* Pointer to first should be the least valued node */
1387 upm = uvm_physseg_get_first();
1388 ATF_CHECK_EQ(upm_first, upm);
1389 ATF_CHECK_EQ(VALID_START_PFN_1, uvm_physseg_get_start(upm));
1390 ATF_CHECK_EQ(VALID_END_PFN_1, uvm_physseg_get_end(upm));
1391 ATF_CHECK_EQ(VALID_AVAIL_START_PFN_1, uvm_physseg_get_avail_start(upm));
1392 ATF_CHECK_EQ(VALID_AVAIL_END_PFN_1, uvm_physseg_get_avail_end(upm));
1395 /* This test will be triggered only if there are 3 or more segments. */
1396 #if VM_PHYSSEG_MAX > 2
1397 /* Insert a node of higher value */
1398 upm_first =uvm_page_physload(VALID_START_PFN_3, VALID_END_PFN_3,
1399 VALID_AVAIL_START_PFN_3, VALID_AVAIL_END_PFN_3, VM_FREELIST_DEFAULT);
1401 ATF_CHECK_EQ(0, uvmexp.npages);
1403 ATF_REQUIRE_EQ(3, uvm_physseg_get_entries());
1405 /* Pointer to first should be the least valued node */
1406 upm = uvm_physseg_get_first();
1407 ATF_CHECK(upm_first != upm);
1408 ATF_CHECK_EQ(VALID_START_PFN_1, uvm_physseg_get_start(upm));
1409 ATF_CHECK_EQ(VALID_END_PFN_1, uvm_physseg_get_end(upm));
1410 ATF_CHECK_EQ(VALID_AVAIL_START_PFN_1, uvm_physseg_get_avail_start(upm));
1411 ATF_CHECK_EQ(VALID_AVAIL_END_PFN_1, uvm_physseg_get_avail_end(upm));
1415 ATF_TC(uvm_physseg_get_last);
1416 ATF_TC_HEAD(uvm_physseg_get_last, tc)
1418 atf_tc_set_md_var(tc, "descr", "Tests the pointer values for last \
1419 segment using the uvm_physseg_get_last() call.");
1421 ATF_TC_BODY(uvm_physseg_get_last, tc)
1423 uvm_physseg_t upm = UVM_PHYSSEG_TYPE_INVALID_EMPTY;
1424 uvm_physseg_t upm_last;
1428 /* No nodes exist */
1429 ATF_CHECK_EQ(upm, uvm_physseg_get_last());
1431 upm_last = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
1432 VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
1434 ATF_REQUIRE_EQ(0, uvmexp.npages);
1436 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
1438 /* Pointer to last should be the most valued node */
1439 upm = uvm_physseg_get_last();
1440 ATF_CHECK_EQ(upm_last, upm);
1441 ATF_CHECK_EQ(VALID_START_PFN_1, uvm_physseg_get_start(upm));
1442 ATF_CHECK_EQ(VALID_END_PFN_1, uvm_physseg_get_end(upm));
1443 ATF_CHECK_EQ(VALID_AVAIL_START_PFN_1, uvm_physseg_get_avail_start(upm));
1444 ATF_CHECK_EQ(VALID_AVAIL_END_PFN_1, uvm_physseg_get_avail_end(upm));
1446 /* This test will be triggered only if there are 2 or more segments. */
1447 #if VM_PHYSSEG_MAX > 1
1448 /* Insert node of greater value */
1449 upm_last = uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
1450 VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_DEFAULT);
1452 ATF_REQUIRE_EQ(0, uvmexp.npages);
1454 ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
1456 /* Pointer to last should be the most valued node */
1457 upm = uvm_physseg_get_last();
1458 ATF_CHECK_EQ(upm_last, upm);
1459 ATF_CHECK_EQ(VALID_START_PFN_2, uvm_physseg_get_start(upm));
1460 ATF_CHECK_EQ(VALID_END_PFN_2, uvm_physseg_get_end(upm));
1461 ATF_CHECK_EQ(VALID_AVAIL_START_PFN_2, uvm_physseg_get_avail_start(upm));
1462 ATF_CHECK_EQ(VALID_AVAIL_END_PFN_2, uvm_physseg_get_avail_end(upm));
1465 /* This test will be triggered only if there are 3 or more segments. */
1466 #if VM_PHYSSEG_MAX > 2
1467 /* Insert node of greater value */
1468 upm_last = uvm_page_physload(VALID_START_PFN_3, VALID_END_PFN_3,
1469 VALID_AVAIL_START_PFN_3, VALID_AVAIL_END_PFN_3, VM_FREELIST_DEFAULT);
1471 ATF_REQUIRE_EQ(0, uvmexp.npages);
1473 ATF_REQUIRE_EQ(3, uvm_physseg_get_entries());
1475 /* Pointer to last should be the most valued node */
1476 upm = uvm_physseg_get_last();
1477 ATF_CHECK_EQ(upm_last, upm);
1478 ATF_CHECK_EQ(VALID_START_PFN_3, uvm_physseg_get_start(upm));
1479 ATF_CHECK_EQ(VALID_END_PFN_3, uvm_physseg_get_end(upm));
1480 ATF_CHECK_EQ(VALID_AVAIL_START_PFN_3, uvm_physseg_get_avail_start(upm));
1481 ATF_CHECK_EQ(VALID_AVAIL_END_PFN_3, uvm_physseg_get_avail_end(upm));
1485 ATF_TC(uvm_physseg_valid);
1486 ATF_TC_HEAD(uvm_physseg_valid, tc)
1488 atf_tc_set_md_var(tc, "descr", "Tests the pointer value for current \
1489 segment is valid using the uvm_physseg_valid_p() call.");
1491 ATF_TC_BODY(uvm_physseg_valid, tc)
1493 psize_t npages = (VALID_END_PFN_1 - VALID_START_PFN_1);
1495 struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages);
1500 upm = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
1501 VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
1503 ATF_REQUIRE_EQ(0, uvmexp.npages);
1505 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
1507 uvm_physseg_init_seg(upm, pgs);
1509 ATF_REQUIRE_EQ(PAGE_COUNT_1M, uvmexp.npages);
1511 ATF_CHECK_EQ(true, uvm_physseg_valid_p(upm));
1514 ATF_TC(uvm_physseg_valid_invalid);
1515 ATF_TC_HEAD(uvm_physseg_valid_invalid, tc)
1517 atf_tc_set_md_var(tc, "descr", "Tests the pointer value for current \
1518 segment is invalid using the uvm_physseg_valid_p() call.");
1520 ATF_TC_BODY(uvm_physseg_valid_invalid, tc)
1525 upm = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
1526 VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
1528 /* Force other check conditions */
1529 uvm.page_init_done = true;
1531 ATF_REQUIRE_EQ(true, uvm.page_init_done);
1533 /* Invalid uvm_physseg_t */
1534 ATF_CHECK_EQ(false, uvm_physseg_valid_p(UVM_PHYSSEG_TYPE_INVALID));
1537 * Without any pages initialized for segment, it is considered
1540 ATF_CHECK_EQ(false, uvm_physseg_valid_p(upm));
1543 ATF_TC(uvm_physseg_get_highest);
1544 ATF_TC_HEAD(uvm_physseg_get_highest, tc)
1546 atf_tc_set_md_var(tc, "descr", "Tests if the returned PFN matches \
1547 the highest PFN in use by the system.");
1549 ATF_TC_BODY(uvm_physseg_get_highest, tc)
1552 uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
1553 VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
1555 /* Only one segment so highest is the current */
1556 ATF_CHECK_EQ(VALID_AVAIL_END_PFN_1 - 1, uvm_physseg_get_highest_frame());
1558 /* This test will be triggered only if there are 2 or more segments. */
1559 #if VM_PHYSSEG_MAX > 1
1560 uvm_page_physload(VALID_START_PFN_3, VALID_END_PFN_3,
1561 VALID_AVAIL_START_PFN_3, VALID_AVAIL_END_PFN_3, VM_FREELIST_DEFAULT);
1564 ATF_CHECK_EQ(VALID_AVAIL_END_PFN_3 - 1, uvm_physseg_get_highest_frame());
1567 /* This test will be triggered only if there are 3 or more segments. */
1568 #if VM_PHYSSEG_MAX > 2
1569 uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
1570 VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_DEFAULT);
1573 ATF_CHECK_EQ(VALID_AVAIL_END_PFN_3 - 1, uvm_physseg_get_highest_frame());
1577 ATF_TC(uvm_physseg_get_free_list);
1578 ATF_TC_HEAD(uvm_physseg_get_free_list, tc)
1580 atf_tc_set_md_var(tc, "descr", "Tests if the returned Free List type \
1581 of a segment matches the one returned from \
1582 uvm_physseg_get_free_list() call.");
1584 ATF_TC_BODY(uvm_physseg_get_free_list, tc)
1588 /* Fake early boot */
1591 /* Insertions are made in ascending order */
1592 upm = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
1593 VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
1595 ATF_CHECK_EQ(VM_FREELIST_DEFAULT, uvm_physseg_get_free_list(upm));
1597 /* This test will be triggered only if there are 2 or more segments. */
1598 #if VM_PHYSSEG_MAX > 1
1599 upm = uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
1600 VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_FIRST16);
1602 ATF_CHECK_EQ(VM_FREELIST_FIRST16, uvm_physseg_get_free_list(upm));
1605 /* This test will be triggered only if there are 3 or more segments. */
1606 #if VM_PHYSSEG_MAX > 2
1607 upm = uvm_page_physload(VALID_START_PFN_3, VALID_END_PFN_3,
1608 VALID_AVAIL_START_PFN_3, VALID_AVAIL_END_PFN_3, VM_FREELIST_FIRST1G);
1610 ATF_CHECK_EQ(VM_FREELIST_FIRST1G, uvm_physseg_get_free_list(upm));
1614 ATF_TC(uvm_physseg_get_start_hint);
1615 ATF_TC_HEAD(uvm_physseg_get_start_hint, tc)
1617 atf_tc_set_md_var(tc, "descr", "Tests if the returned start_hint value \
1618 of a segment matches the one returned from \
1619 uvm_physseg_get_start_hint() call.");
1621 ATF_TC_BODY(uvm_physseg_get_start_hint, tc)
1626 upm = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
1627 VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
1629 /* Will be Zero since no specific value is set during init */
1630 ATF_CHECK_EQ(0, uvm_physseg_get_start_hint(upm));
1633 ATF_TC(uvm_physseg_set_start_hint);
1634 ATF_TC_HEAD(uvm_physseg_set_start_hint, tc)
1636 atf_tc_set_md_var(tc, "descr", "Tests if the returned start_hint value \
1637 of a segment matches the one set by the \
1638 uvm_physseg_set_start_hint() call.");
1640 ATF_TC_BODY(uvm_physseg_set_start_hint, tc)
1642 psize_t npages = (VALID_END_PFN_1 - VALID_START_PFN_1);
1644 struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages);
1649 upm = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
1650 VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
1652 uvm_physseg_init_seg(upm, pgs);
1654 ATF_CHECK_EQ(true, uvm_physseg_set_start_hint(upm, atop(128)));
1656 /* Will be atop(128) since no specific value is set above */
1657 ATF_CHECK_EQ(atop(128), uvm_physseg_get_start_hint(upm));
1660 ATF_TC(uvm_physseg_set_start_hint_invalid);
1661 ATF_TC_HEAD(uvm_physseg_set_start_hint_invalid, tc)
1663 atf_tc_set_md_var(tc, "descr", "Tests if the returned value is false \
1664 when an invalid segment matches the one trying to set by the \
1665 uvm_physseg_set_start_hint() call.");
1667 ATF_TC_BODY(uvm_physseg_set_start_hint_invalid, tc)
1672 upm = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
1673 VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
1675 /* Force other check conditions */
1676 uvm.page_init_done = true;
1678 ATF_REQUIRE_EQ(true, uvm.page_init_done);
1680 ATF_CHECK_EQ(false, uvm_physseg_set_start_hint(upm, atop(128)));
1683 * Will be Zero since no specific value is set after the init
1686 atf_tc_expect_signal(SIGABRT, "invalid uvm_physseg_t handle");
1688 ATF_CHECK_EQ(0, uvm_physseg_get_start_hint(upm));
1691 ATF_TC(uvm_physseg_get_pg);
1692 ATF_TC_HEAD(uvm_physseg_get_pg, tc)
1694 atf_tc_set_md_var(tc, "descr", "Tests if the returned vm_page struct \
1695 is correct when fetched by uvm_physseg_get_pg() call.");
1697 ATF_TC_BODY(uvm_physseg_get_pg, tc)
1699 psize_t npages = (VALID_END_PFN_1 - VALID_START_PFN_1);
1701 struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages);
1703 struct vm_page *extracted_pg = NULL;
1708 upm = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
1709 VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
1711 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
1713 ATF_REQUIRE_EQ(0, uvmexp.npages);
1715 /* Now we initialize the segment */
1716 uvm_physseg_init_seg(upm, pgs);
1718 ATF_REQUIRE_EQ(PAGE_COUNT_1M, uvmexp.npages);
1720 ATF_REQUIRE_EQ(NULL, extracted_pg);
1722 /* Try fetching the 5th Page in the Segment */
1723 extracted_pg = uvm_physseg_get_pg(upm, 5);
1725 /* Values of phys_addr is n * PAGE_SIZE where n is the page number */
1726 ATF_CHECK_EQ(5 * PAGE_SIZE, extracted_pg->phys_addr);
1728 /* Try fetching the 113th Page in the Segment */
1729 extracted_pg = uvm_physseg_get_pg(upm, 113);
1731 ATF_CHECK_EQ(113 * PAGE_SIZE, extracted_pg->phys_addr);
1734 #ifdef __HAVE_PMAP_PHYSSEG
1735 ATF_TC(uvm_physseg_get_pmseg);
1736 ATF_TC_HEAD(uvm_physseg_get_pmseg, tc)
1738 atf_tc_set_md_var(tc, "descr", "Tests if the returned pmap_physseg \
1739 struct is correct when fetched by uvm_physseg_get_pmseg() call.");
1741 ATF_TC_BODY(uvm_physseg_get_pmseg, tc)
1743 psize_t npages = (VALID_END_PFN_1 - VALID_START_PFN_1);
1745 struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages);
1747 struct pmap_physseg pmseg = { true };
1749 struct pmap_physseg *extracted_pmseg = NULL;
1754 upm = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
1755 VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
1757 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
1759 ATF_REQUIRE_EQ(0, uvmexp.npages);
1761 /* Now we initialize the segment */
1762 uvm_physseg_init_seg(upm, pgs);
1764 ATF_REQUIRE_EQ(PAGE_COUNT_1M, uvmexp.npages);
1766 ATF_REQUIRE_EQ(NULL, extracted_pmseg);
1768 ATF_REQUIRE_EQ(true, pmseg.dummy_variable);
1770 /* Extract the current pmseg */
1771 extracted_pmseg = uvm_physseg_get_pmseg(upm);
1774 * We can only check if it is not NULL
1775 * We do not know the value it contains
1777 ATF_CHECK(NULL != extracted_pmseg);
1779 extracted_pmseg->dummy_variable = pmseg.dummy_variable;
1781 /* Invert value to ensure test integrity */
1782 pmseg.dummy_variable = false;
1784 ATF_REQUIRE_EQ(false, pmseg.dummy_variable);
1786 extracted_pmseg = uvm_physseg_get_pmseg(upm);
1788 ATF_CHECK(NULL != extracted_pmseg);
1790 ATF_CHECK_EQ(true, extracted_pmseg->dummy_variable);
1794 ATF_TC(vm_physseg_find);
1795 ATF_TC_HEAD(vm_physseg_find, tc)
1797 atf_tc_set_md_var(tc, "descr", "Tests if the returned segment number \
1798 is correct when an PFN is passed into uvm_physseg_find() call. \
1799 In addition to this the offset of the PFN from the start of \
1800 segment is also set if the parameter is passed in as not NULL.");
1802 ATF_TC_BODY(vm_physseg_find, tc)
1804 psize_t offset = (psize_t) -1;
1806 uvm_physseg_t upm_first, result;
1807 #if VM_PHYSSEG_MAX > 1
1808 uvm_physseg_t upm_second;
1813 upm_first = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
1814 VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
1816 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
1818 ATF_REQUIRE_EQ(0, uvmexp.npages);
1820 /* This test will be triggered only if there are 2 or more segments. */
1821 #if VM_PHYSSEG_MAX > 1
1822 upm_second = uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
1823 VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_DEFAULT);
1825 ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
1827 ATF_REQUIRE_EQ(0, uvmexp.npages);
1830 /* Under ONE_MEGABYTE is segment upm_first */
1831 result = uvm_physseg_find(atop(ONE_MEGABYTE - 1024), NULL);
1832 ATF_CHECK_EQ(upm_first, result);
1833 ATF_CHECK_EQ(uvm_physseg_get_start(upm_first),
1834 uvm_physseg_get_start(result));
1835 ATF_CHECK_EQ(uvm_physseg_get_end(upm_first),
1836 uvm_physseg_get_end(result));
1837 ATF_CHECK_EQ(uvm_physseg_get_avail_start(upm_first),
1838 uvm_physseg_get_avail_start(result));
1839 ATF_CHECK_EQ(uvm_physseg_get_avail_end(upm_first),
1840 uvm_physseg_get_avail_end(result));
1842 ATF_REQUIRE_EQ((psize_t) -1, offset);
1844 /* This test will be triggered only if there are 2 or more segments. */
1845 #if VM_PHYSSEG_MAX > 1
1846 /* Over ONE_MEGABYTE is segment upm_second */
1847 result = uvm_physseg_find(atop(ONE_MEGABYTE + 8192), &offset);
1848 ATF_CHECK_EQ(upm_second, result);
1849 ATF_CHECK_EQ(uvm_physseg_get_start(upm_second),
1850 uvm_physseg_get_start(result));
1851 ATF_CHECK_EQ(uvm_physseg_get_end(upm_second),
1852 uvm_physseg_get_end(result));
1853 ATF_CHECK_EQ(uvm_physseg_get_avail_start(upm_second),
1854 uvm_physseg_get_avail_start(result));
1855 ATF_CHECK_EQ(uvm_physseg_get_avail_end(upm_second),
1856 uvm_physseg_get_avail_end(result));
1858 /* Offset is calculated based on PAGE_SIZE */
1859 /* atop(ONE_MEGABYTE + (2 * PAGE_SIZE)) - VALID_START_PFN1 = 2 */
1860 ATF_CHECK_EQ(2, offset);
1862 /* Under ONE_MEGABYTE is segment upm_first */
1863 result = uvm_physseg_find(atop(ONE_MEGABYTE - 12288), &offset);
1864 ATF_CHECK_EQ(upm_first, result);
1865 ATF_CHECK_EQ(uvm_physseg_get_start(upm_first),
1866 uvm_physseg_get_start(result));
1867 ATF_CHECK_EQ(uvm_physseg_get_end(upm_first),
1868 uvm_physseg_get_end(result));
1869 ATF_CHECK_EQ(uvm_physseg_get_avail_start(upm_first),
1870 uvm_physseg_get_avail_start(result));
1871 ATF_CHECK_EQ(uvm_physseg_get_avail_end(upm_first),
1872 uvm_physseg_get_avail_end(result));
1874 /* Offset is calculated based on PAGE_SIZE */
1875 /* atop(ONE_MEGABYTE - (3 * PAGE_SIZE)) - VALID_START_PFN1 = 253 */
1876 ATF_CHECK_EQ(253, offset);
1880 ATF_TC(vm_physseg_find_invalid);
1881 ATF_TC_HEAD(vm_physseg_find_invalid, tc)
1883 atf_tc_set_md_var(tc, "descr", "Tests if the returned segment number \
1884 is (paddr_t) -1 when a non existant PFN is passed into \
1885 uvm_physseg_find() call.");
1887 ATF_TC_BODY(vm_physseg_find_invalid, tc)
1889 psize_t offset = (psize_t) -1;
1892 uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
1893 VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
1895 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
1897 ATF_REQUIRE_EQ(0, uvmexp.npages);
1899 /* No segments over 3 MB exists at the moment */
1900 ATF_CHECK_EQ(UVM_PHYSSEG_TYPE_INVALID,
1901 uvm_physseg_find(atop(ONE_MEGABYTE * 3), NULL));
1903 ATF_REQUIRE_EQ((psize_t) -1, offset);
1905 /* No segments over 3 MB exists at the moment */
1906 ATF_CHECK_EQ(UVM_PHYSSEG_TYPE_INVALID,
1907 uvm_physseg_find(atop(ONE_MEGABYTE * 3), &offset));
1909 ATF_CHECK_EQ((psize_t) -1, offset);
1912 ATF_TC(uvm_page_physunload_start);
1913 ATF_TC_HEAD(uvm_page_physunload_start, tc)
1915 atf_tc_set_md_var(tc, "descr", "Tests if the basic uvm_page_physunload()\
1916 call works without a panic(). Unloads from Start of the segment.");
1918 ATF_TC_BODY(uvm_page_physunload_start, tc)
1921 * Would uvmexp.npages reduce everytime an uvm_page_physunload is called?
1923 psize_t npages = (VALID_END_PFN_2 - VALID_START_PFN_2);
1925 struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages);
1932 upm = uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
1933 VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_DEFAULT);
1935 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
1937 ATF_REQUIRE_EQ(0, uvmexp.npages);
1939 uvm_physseg_init_seg(upm, pgs);
1941 ATF_CHECK_EQ(true, uvm_page_physunload(upm, VM_FREELIST_DEFAULT, &p));
1944 * When called for first time, uvm_page_physload() removes the first PFN
1946 * New avail start will be VALID_AVAIL_START_PFN_2 + 1
1948 ATF_CHECK_EQ(VALID_START_PFN_2, atop(p));
1950 ATF_CHECK_EQ(VALID_AVAIL_START_PFN_2 + 1,
1951 uvm_physseg_get_avail_start(upm));
1953 ATF_CHECK_EQ(VALID_START_PFN_2 + 1, uvm_physseg_get_start(upm));
1955 /* Rest of the stuff should remain the same */
1956 ATF_CHECK_EQ(VALID_END_PFN_2, uvm_physseg_get_end(upm));
1957 ATF_CHECK_EQ(VALID_AVAIL_END_PFN_2, uvm_physseg_get_avail_end(upm));
1960 ATF_TC(uvm_page_physunload_end);
1961 ATF_TC_HEAD(uvm_page_physunload_end, tc)
1963 atf_tc_set_md_var(tc, "descr", "Tests if the basic uvm_page_physunload()\
1964 call works without a panic(). Unloads from End of the segment.");
1966 ATF_TC_BODY(uvm_page_physunload_end, tc)
1969 * Would uvmexp.npages reduce everytime an uvm_page_physunload is called?
1976 /* Note: start != avail_start to remove from end. */
1977 upm = uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
1978 VALID_AVAIL_START_PFN_2 + 1, VALID_AVAIL_END_PFN_2,
1979 VM_FREELIST_DEFAULT);
1983 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
1985 ATF_REQUIRE_EQ(0, uvmexp.npages);
1988 uvm_physseg_get_avail_start(upm) != uvm_physseg_get_start(upm));
1990 ATF_CHECK_EQ(true, uvm_page_physunload(upm, VM_FREELIST_DEFAULT, &p));
1993 * Remember if X is the upper limit the actual valid pointer is X - 1
1995 * For example if 256 is the upper limit for 1MB memory, last valid
1996 * pointer is 256 - 1 = 255
1999 ATF_CHECK_EQ(VALID_END_PFN_2 - 1, atop(p));
2002 * When called for second time, uvm_page_physload() removes the last PFN
2004 * New avail end will be VALID_AVAIL_END_PFN_2 - 1
2005 * New end will be VALID_AVAIL_PFN_2 - 1
2008 ATF_CHECK_EQ(VALID_AVAIL_END_PFN_2 - 1, uvm_physseg_get_avail_end(upm));
2010 ATF_CHECK_EQ(VALID_END_PFN_2 - 1, uvm_physseg_get_end(upm));
2012 /* Rest of the stuff should remain the same */
2013 ATF_CHECK_EQ(VALID_AVAIL_START_PFN_2 + 1,
2014 uvm_physseg_get_avail_start(upm));
2015 ATF_CHECK_EQ(VALID_START_PFN_2, uvm_physseg_get_start(upm));
2018 ATF_TC(uvm_page_physunload_none);
2019 ATF_TC_HEAD(uvm_page_physunload_none, tc)
2021 atf_tc_set_md_var(tc, "descr", "Tests if the basic uvm_page_physunload()\
2022 call works without a panic(). Does not unload from start or end \
2023 because of non-aligned start / avail_start and end / avail_end \
2026 ATF_TC_BODY(uvm_page_physunload_none, tc)
2028 psize_t npages = (VALID_END_PFN_2 - VALID_START_PFN_2);
2030 struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages);
2038 * Note: start != avail_start and end != avail_end.
2040 * This prevents any unload from occuring.
2042 upm = uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
2043 VALID_AVAIL_START_PFN_2 + 1, VALID_AVAIL_END_PFN_2 - 1,
2044 VM_FREELIST_DEFAULT);
2048 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
2050 ATF_REQUIRE_EQ(0, uvmexp.npages);
2053 uvm_physseg_get_avail_start(upm) != uvm_physseg_get_start(upm));
2055 uvm_physseg_init_seg(upm, pgs);
2057 ATF_CHECK_EQ(false, uvm_page_physunload(upm, VM_FREELIST_DEFAULT, &p));
2059 /* uvm_page_physload() will no longer unload memory */
2062 /* Rest of the stuff should remain the same */
2063 ATF_CHECK_EQ(VALID_AVAIL_START_PFN_2 + 1,
2064 uvm_physseg_get_avail_start(upm));
2065 ATF_CHECK_EQ(VALID_AVAIL_END_PFN_2 - 1,
2066 uvm_physseg_get_avail_end(upm));
2067 ATF_CHECK_EQ(VALID_START_PFN_2, uvm_physseg_get_start(upm));
2068 ATF_CHECK_EQ(VALID_END_PFN_2, uvm_physseg_get_end(upm));
2071 ATF_TC(uvm_page_physunload_delete_start);
2072 ATF_TC_HEAD(uvm_page_physunload_delete_start, tc)
2074 atf_tc_set_md_var(tc, "descr", "Tests if the uvm_page_physunload() \
2075 works when the segment gets small enough to be deleted scenario. \
2076 NOTE: This one works deletes from start.");
2078 ATF_TC_BODY(uvm_page_physunload_delete_start, tc)
2081 * Would uvmexp.npages reduce everytime an uvm_page_physunload is called?
2090 * Setup the Nuke from Starting point
2093 upm = uvm_page_physload(VALID_END_PFN_1 - 1, VALID_END_PFN_1,
2094 VALID_AVAIL_END_PFN_1 - 1, VALID_AVAIL_END_PFN_1,
2095 VM_FREELIST_DEFAULT);
2097 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
2099 ATF_REQUIRE_EQ(0, uvmexp.npages);
2101 /* Insert more than one segment iff VM_PHYSSEG_MAX > 1 */
2102 #if VM_PHYSSEG_MAX > 1
2103 uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
2104 VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_DEFAULT);
2106 ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
2109 #if VM_PHYSSEG_MAX == 1
2110 atf_tc_expect_signal(SIGABRT,
2111 "cannot uvm_page_physunload() the last segment");
2114 ATF_CHECK_EQ(true, uvm_page_physunload(upm, VM_FREELIST_DEFAULT, &p));
2116 ATF_CHECK_EQ(VALID_END_PFN_1 - 1, atop(p));
2118 ATF_CHECK_EQ(1, uvm_physseg_get_entries());
2120 /* The only node now is the one we inserted second. */
2121 upm = uvm_physseg_get_first();
2123 ATF_CHECK_EQ(VALID_START_PFN_2, uvm_physseg_get_start(upm));
2124 ATF_CHECK_EQ(VALID_END_PFN_2, uvm_physseg_get_end(upm));
2125 ATF_CHECK_EQ(VALID_AVAIL_START_PFN_2, uvm_physseg_get_avail_start(upm));
2126 ATF_CHECK_EQ(VALID_AVAIL_END_PFN_2, uvm_physseg_get_avail_end(upm));
2129 ATF_TC(uvm_page_physunload_delete_end);
2130 ATF_TC_HEAD(uvm_page_physunload_delete_end, tc)
2132 atf_tc_set_md_var(tc, "descr", "Tests if the uvm_page_physunload() \
2133 works when the segment gets small enough to be deleted scenario. \
2134 NOTE: This one works deletes from end.");
2136 ATF_TC_BODY(uvm_page_physunload_delete_end, tc)
2139 * Would uvmexp.npages reduce everytime an uvm_page_physunload is called?
2149 * Setup the Nuke from Ending point
2152 upm = uvm_page_physload(VALID_START_PFN_1, VALID_START_PFN_1 + 2,
2153 VALID_AVAIL_START_PFN_1 + 1, VALID_AVAIL_START_PFN_1 + 2,
2154 VM_FREELIST_DEFAULT);
2156 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
2158 ATF_REQUIRE_EQ(0, uvmexp.npages);
2160 /* Insert more than one segment iff VM_PHYSSEG_MAX > 1 */
2161 #if VM_PHYSSEG_MAX > 1
2162 uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
2163 VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_DEFAULT);
2165 ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
2168 #if VM_PHYSSEG_MAX == 1
2169 atf_tc_expect_signal(SIGABRT,
2170 "cannot uvm_page_physunload() the last segment");
2173 ATF_CHECK_EQ(true, uvm_page_physunload(upm, VM_FREELIST_DEFAULT, &p));
2177 ATF_CHECK_EQ(true, uvm_page_physunload(upm, VM_FREELIST_DEFAULT, &p));
2179 ATF_CHECK_EQ(VALID_START_PFN_1 + 2, atop(p));
2181 ATF_CHECK_EQ(1, uvm_physseg_get_entries());
2183 /* The only node now is the one we inserted second. */
2184 upm = uvm_physseg_get_first();
2186 ATF_CHECK_EQ(VALID_START_PFN_2, uvm_physseg_get_start(upm));
2187 ATF_CHECK_EQ(VALID_END_PFN_2, uvm_physseg_get_end(upm));
2188 ATF_CHECK_EQ(VALID_AVAIL_START_PFN_2, uvm_physseg_get_avail_start(upm));
2189 ATF_CHECK_EQ(VALID_AVAIL_END_PFN_2, uvm_physseg_get_avail_end(upm));
2192 ATF_TC(uvm_page_physunload_invalid);
2193 ATF_TC_HEAD(uvm_page_physunload_invalid, tc)
2195 atf_tc_set_md_var(tc, "descr", "Tests if the uvm_page_physunload() \
2196 fails when then Free list does not match.");
2198 ATF_TC_BODY(uvm_page_physunload_invalid, tc)
2200 psize_t npages = (VALID_END_PFN_2 - VALID_START_PFN_2);
2202 struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages);
2209 upm = uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
2210 VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_DEFAULT);
2212 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
2214 ATF_REQUIRE_EQ(0, uvmexp.npages);
2216 uvm_physseg_init_seg(upm, pgs);
2218 ATF_CHECK_EQ(false, uvm_page_physunload(upm, VM_FREELIST_FIRST4G, &p));
2221 ATF_TC(uvm_page_physunload_force);
2222 ATF_TC_HEAD(uvm_page_physunload_force, tc)
2224 atf_tc_set_md_var(tc, "descr", "Tests if the basic \
2225 uvm_page_physunload_force() including delete works without.");
2227 ATF_TC_BODY(uvm_page_physunload_force, tc)
2230 * Would uvmexp.npages reduce everytime an uvm_page_physunload is called?
2237 upm = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
2238 VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
2240 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
2242 ATF_REQUIRE_EQ(0, uvmexp.npages);
2244 /* Insert more than one segment iff VM_PHYSSEG_MAX > 1 */
2245 #if VM_PHYSSEG_MAX > 1
2247 * We have couple of physloads done this is bacause of the fact that if
2248 * we physunload all the PFs from a given range and we have only one
2249 * segment in total a panic() is called
2251 uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
2252 VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_DEFAULT);
2254 ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
2257 #if VM_PHYSSEG_MAX == 1
2258 atf_tc_expect_signal(SIGABRT,
2259 "cannot uvm_page_physunload() the last segment");
2262 ATF_REQUIRE_EQ(VALID_AVAIL_START_PFN_1,
2263 uvm_physseg_get_avail_start(upm));
2265 for(paddr_t i = VALID_AVAIL_START_PFN_1;
2266 i < VALID_AVAIL_END_PFN_1; i++) {
2268 uvm_page_physunload_force(upm, VM_FREELIST_DEFAULT, &p));
2269 ATF_CHECK_EQ(i, atop(p));
2271 if(i + 1 < VALID_AVAIL_END_PFN_1)
2272 ATF_CHECK_EQ(i + 1, uvm_physseg_get_avail_start(upm));
2276 * Now we try to retrieve the segment, which has been removed
2277 * from the system through force unloading all the pages inside it.
2279 upm = uvm_physseg_find(VALID_AVAIL_END_PFN_1 - 1, NULL);
2281 /* It should no longer exist */
2282 ATF_CHECK_EQ(NULL, upm);
2284 ATF_CHECK_EQ(1, uvm_physseg_get_entries());
2287 ATF_TC(uvm_page_physunload_force_invalid);
2288 ATF_TC_HEAD(uvm_page_physunload_force_invalid, tc)
2290 atf_tc_set_md_var(tc, "descr", "Tests if the invalid conditions for \
2291 uvm_page_physunload_force_invalid().");
2293 ATF_TC_BODY(uvm_page_physunload_force_invalid, tc)
2300 upm = uvm_page_physload(VALID_START_PFN_2, VALID_START_PFN_2+ 1,
2301 VALID_START_PFN_2, VALID_START_PFN_2, VM_FREELIST_DEFAULT);
2303 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
2305 ATF_REQUIRE_EQ(0, uvmexp.npages);
2308 uvm_page_physunload_force(upm, VM_FREELIST_DEFAULT, &p));
2315 #if defined(UVM_HOTPLUG)
2317 ATF_TP_ADD_TC(tp, uvm_physseg_alloc_atboot_mismatch);
2318 ATF_TP_ADD_TC(tp, uvm_physseg_alloc_atboot_overrun);
2319 ATF_TP_ADD_TC(tp, uvm_physseg_alloc_sanity);
2320 ATF_TP_ADD_TC(tp, uvm_physseg_free_atboot_mismatch);
2321 ATF_TP_ADD_TC(tp, uvm_physseg_free_sanity);
2322 #if VM_PHYSSEG_MAX > 1
2323 ATF_TP_ADD_TC(tp, uvm_physseg_atboot_free_leak);
2325 #endif /* UVM_HOTPLUG */
2327 ATF_TP_ADD_TC(tp, uvm_physseg_plug);
2328 ATF_TP_ADD_TC(tp, uvm_physseg_unplug);
2331 ATF_TP_ADD_TC(tp, uvm_physseg_init);
2332 ATF_TP_ADD_TC(tp, uvm_page_physload_preload);
2333 ATF_TP_ADD_TC(tp, uvm_page_physload_postboot);
2334 ATF_TP_ADD_TC(tp, uvm_physseg_handle_immutable);
2335 ATF_TP_ADD_TC(tp, uvm_physseg_seg_chomp_slab);
2336 ATF_TP_ADD_TC(tp, uvm_physseg_alloc_from_slab);
2337 ATF_TP_ADD_TC(tp, uvm_physseg_init_seg);
2338 ATF_TP_ADD_TC(tp, uvm_physseg_get_start);
2339 ATF_TP_ADD_TC(tp, uvm_physseg_get_start_invalid);
2340 ATF_TP_ADD_TC(tp, uvm_physseg_get_end);
2341 ATF_TP_ADD_TC(tp, uvm_physseg_get_end_invalid);
2342 ATF_TP_ADD_TC(tp, uvm_physseg_get_avail_start);
2343 ATF_TP_ADD_TC(tp, uvm_physseg_get_avail_start_invalid);
2344 ATF_TP_ADD_TC(tp, uvm_physseg_get_avail_end);
2345 ATF_TP_ADD_TC(tp, uvm_physseg_get_avail_end_invalid);
2346 ATF_TP_ADD_TC(tp, uvm_physseg_get_next);
2347 ATF_TP_ADD_TC(tp, uvm_physseg_get_next_invalid);
2348 ATF_TP_ADD_TC(tp, uvm_physseg_get_prev);
2349 ATF_TP_ADD_TC(tp, uvm_physseg_get_prev_invalid);
2350 ATF_TP_ADD_TC(tp, uvm_physseg_get_first);
2351 ATF_TP_ADD_TC(tp, uvm_physseg_get_last);
2352 ATF_TP_ADD_TC(tp, uvm_physseg_valid);
2353 ATF_TP_ADD_TC(tp, uvm_physseg_valid_invalid);
2354 ATF_TP_ADD_TC(tp, uvm_physseg_get_highest);
2355 ATF_TP_ADD_TC(tp, uvm_physseg_get_free_list);
2356 ATF_TP_ADD_TC(tp, uvm_physseg_get_start_hint);
2357 ATF_TP_ADD_TC(tp, uvm_physseg_set_start_hint);
2358 ATF_TP_ADD_TC(tp, uvm_physseg_set_start_hint_invalid);
2359 ATF_TP_ADD_TC(tp, uvm_physseg_get_pg);
2361 #ifdef __HAVE_PMAP_PHYSSEG
2362 ATF_TP_ADD_TC(tp, uvm_physseg_get_pmseg);
2364 ATF_TP_ADD_TC(tp, vm_physseg_find);
2365 ATF_TP_ADD_TC(tp, vm_physseg_find_invalid);
2367 ATF_TP_ADD_TC(tp, uvm_page_physunload_start);
2368 ATF_TP_ADD_TC(tp, uvm_page_physunload_end);
2369 ATF_TP_ADD_TC(tp, uvm_page_physunload_none);
2370 ATF_TP_ADD_TC(tp, uvm_page_physunload_delete_start);
2371 ATF_TP_ADD_TC(tp, uvm_page_physunload_delete_end);
2372 ATF_TP_ADD_TC(tp, uvm_page_physunload_invalid);
2373 ATF_TP_ADD_TC(tp, uvm_page_physunload_force);
2374 ATF_TP_ADD_TC(tp, uvm_page_physunload_force_invalid);
2376 return atf_no_error();