1 /* $NetBSD: t_uvm_physseg_load.c,v 1.2 2016/12/22 08:15:20 cherry Exp $ */
4 * Copyright (c) 2015, 2016 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Santhosh N. Raju <santhosh.raju@gmail.com> and
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __RCSID("$NetBSD: t_uvm_physseg_load.c,v 1.2 2016/12/22 08:15:20 cherry Exp $");
37 * If this line is commented out tests related touvm_physseg_get_pmseg()
40 * Have a look at machine/uvm_physseg.h for more details.
42 #define __HAVE_PMAP_PHYSSEG
45 * This is a dummy struct used for testing purposes
47 * In reality this struct would exist in the MD part of the code residing in
51 #ifdef __HAVE_PMAP_PHYSSEG
53 int dummy_variable; /* Dummy variable use for testing */
57 /* Testing API - assumes userland */
58 /* Provide Kernel API equivalents */
61 #include <string.h> /* memset(3) et. al */
62 #include <stdio.h> /* printf(3) */
63 #include <stdlib.h> /* malloc(3) */
68 #define PRIxPADDR "lx"
69 #define PRIxPSIZE "lx"
70 #define PRIuPSIZE "lu"
71 #define PRIxVADDR "lx"
72 #define PRIxVSIZE "lx"
73 #define PRIuVSIZE "lu"
75 #define UVM_HOTPLUG /* Enable hotplug with rbtree. */
76 #define PMAP_STEAL_MEMORY
77 #define DEBUG /* Enable debug functionality. */
79 typedef unsigned long vaddr_t;
80 typedef unsigned long paddr_t;
81 typedef unsigned long psize_t;
82 typedef unsigned long vsize_t;
84 #include <uvm/uvm_physseg.h>
85 #include <uvm/uvm_page.h>
88 #define KASSERTMSG(e, msg, ...) /* NOTHING */
89 #define KASSERT(e) /* NOTHING */
91 #define KASSERT(a) assert(a)
92 #define KASSERTMSG(exp, ...) printf(__VA_ARGS__); assert((exp))
95 #define VM_PHYSSEG_STRAT VM_PSTRAT_BSEARCH
97 #define VM_NFREELIST 4
98 #define VM_FREELIST_DEFAULT 0
99 #define VM_FREELIST_FIRST16 3
100 #define VM_FREELIST_FIRST1G 2
101 #define VM_FREELIST_FIRST4G 1
104 * Used in tests when Array implementation is tested
106 #if !defined(VM_PHYSSEG_MAX)
107 #define VM_PHYSSEG_MAX 32
110 #define PAGE_SIZE 4096
111 #define PAGE_SHIFT 12
112 #define atop(x) (((paddr_t)(x)) >> PAGE_SHIFT)
114 #define mutex_enter(l)
115 #define mutex_exit(l)
117 #define _SYS_KMEM_H_ /* Disallow the real kmem API (see below) */
118 /* free(p) XXX: pgs management need more thought */
119 #define kmem_alloc(size, flags) malloc(size)
120 #define kmem_zalloc(size, flags) malloc(size)
121 #define kmem_free(p, size) free(p)
125 struct uvmexp uvmexp; /* decl */
128 * uvm structure borrowed from uvm.h
130 * Remember this is a dummy structure used within the ATF Tests and
131 * uses only necessary fields from the original uvm struct.
132 * See uvm/uvm.h for the full struct.
136 /* vm_page related parameters */
138 bool page_init_done; /* TRUE if uvm_page_init() finished */
142 panic(const char *fmt, ...)
156 uvm_pagefree(struct vm_page *pg)
161 #if defined(UVM_HOTPLUG)
163 uvmpdpol_reinit(void)
167 #endif /* UVM_HOTPLUG */
169 /* end - Provide Kernel API equivalents */
171 #include "uvm/uvm_physseg.c"
175 #define ONE_MEGABYTE 1024 * 1024
177 /* Sample Page Frame Numbers */
178 #define VALID_START_PFN_1 atop(0)
179 #define VALID_END_PFN_1 atop(ONE_MEGABYTE)
180 #define VALID_AVAIL_START_PFN_1 atop(0)
181 #define VALID_AVAIL_END_PFN_1 atop(ONE_MEGABYTE)
183 #define VALID_START_PFN_2 atop(ONE_MEGABYTE + 1)
184 #define VALID_END_PFN_2 atop(ONE_MEGABYTE * 2)
185 #define VALID_AVAIL_START_PFN_2 atop(ONE_MEGABYTE + 1)
186 #define VALID_AVAIL_END_PFN_2 atop(ONE_MEGABYTE * 2)
188 #define VALID_START_PFN_3 atop((ONE_MEGABYTE * 2) + 1)
189 #define VALID_END_PFN_3 atop(ONE_MEGABYTE * 3)
190 #define VALID_AVAIL_START_PFN_3 atop((ONE_MEGABYTE * 2) + 1)
191 #define VALID_AVAIL_END_PFN_3 atop(ONE_MEGABYTE * 3)
193 #define VALID_START_PFN_4 atop(ONE_MEGABYTE + 1)
194 #define VALID_END_PFN_4 atop(ONE_MEGABYTE * 128)
195 #define VALID_AVAIL_START_PFN_4 atop(ONE_MEGABYTE + 1)
196 #define VALID_AVAIL_END_PFN_4 atop(ONE_MEGABYTE * 128)
198 #define VALID_START_PFN_5 atop(ONE_MEGABYTE + 1)
199 #define VALID_END_PFN_5 atop(ONE_MEGABYTE * 256)
200 #define VALID_AVAIL_START_PFN_5 atop(ONE_MEGABYTE + 1)
201 #define VALID_AVAIL_END_PFN_5 atop(ONE_MEGABYTE * 256)
204 * Total number of pages (of 4K size each) should be 256 for 1MB of memory.
206 #define PAGE_COUNT_1M 256
209 * The number of Page Frames to allot per segment
214 * A debug fucntion to print the content of upm.
217 uvm_physseg_dump_seg(uvm_physseg_t upm)
220 printf("%s: seg->start == %ld\n", __func__,
221 uvm_physseg_get_start(upm));
222 printf("%s: seg->end == %ld\n", __func__,
223 uvm_physseg_get_end(upm));
224 printf("%s: seg->avail_start == %ld\n", __func__,
225 uvm_physseg_get_avail_start(upm));
226 printf("%s: seg->avail_end == %ld\n", __func__,
227 uvm_physseg_get_avail_end(upm));
236 * Private accessor that gets the value of vm_physmem.nentries
239 uvm_physseg_get_entries(void)
241 #if defined(UVM_HOTPLUG)
242 return uvm_physseg_graph.nentries;
245 #endif /* UVM_HOTPLUG */
249 * Note: This function replicates verbatim what happens in
250 * uvm_page.c:uvm_page_init().
252 * Please track any changes that happen there.
255 uvm_page_init_fake(struct vm_page *pagearray, psize_t pagecount)
260 for (bank = uvm_physseg_get_first(),
261 uvm_physseg_seg_chomp_slab(bank, pagearray, pagecount);
262 uvm_physseg_valid_p(bank);
263 bank = uvm_physseg_get_next(bank)) {
265 n = uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank);
266 uvm_physseg_seg_alloc_from_slab(bank, n);
267 uvm_physseg_init_seg(bank, pagearray);
269 /* set up page array pointers */
274 uvm.page_init_done = true;
278 * PHYS_TO_VM_PAGE: find vm_page for a PA. used by MI code to get vm_pages
279 * back from an I/O mapping (ugh!). used in some MD code as well.
281 static struct vm_page *
282 uvm_phys_to_vm_page(paddr_t pa)
284 paddr_t pf = atop(pa);
288 psi = uvm_physseg_find(pf, &off);
289 if (psi != UVM_PHYSSEG_TYPE_INVALID)
290 return uvm_physseg_get_pg(psi, off);
295 //uvm_vm_page_to_phys(const struct vm_page *pg)
298 // return pg->phys_addr;
302 * XXX: To do, write control test cases for uvm_vm_page_to_phys().
305 /* #define VM_PAGE_TO_PHYS(entry) uvm_vm_page_to_phys(entry) */
307 #define PHYS_TO_VM_PAGE(pa) uvm_phys_to_vm_page(pa)
310 * Test Fixture SetUp().
315 /* Prerequisites for running certain calls in uvm_physseg */
316 uvmexp.pagesize = PAGE_SIZE;
318 uvm.page_init_done = false;
322 ATF_TC(uvm_physseg_100);
323 ATF_TC_HEAD(uvm_physseg_100, tc)
325 atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
326 100 calls, VM_PHYSSEG_MAX is 32.");
328 ATF_TC_BODY(uvm_physseg_100, tc)
334 for(paddr_t i = VALID_START_PFN_1;
335 i < VALID_END_PFN_1; i += PF_STEP) {
336 uvm_page_physload(i, i + PF_STEP, i, i + PF_STEP,
337 VM_FREELIST_DEFAULT);
340 ATF_REQUIRE_EQ(VM_PHYSSEG_MAX, uvm_physseg_get_entries());
342 srandom((unsigned)time(NULL));
343 for(int i = 0; i < 100; i++) {
344 pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_1);
348 ATF_CHECK_EQ(true, true);
351 ATF_TC(uvm_physseg_1K);
352 ATF_TC_HEAD(uvm_physseg_1K, tc)
354 atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
355 1000 calls, VM_PHYSSEG_MAX is 32.");
357 ATF_TC_BODY(uvm_physseg_1K, tc)
363 for(paddr_t i = VALID_START_PFN_1;
364 i < VALID_END_PFN_1; i += PF_STEP) {
365 uvm_page_physload(i, i + PF_STEP, i, i + PF_STEP,
366 VM_FREELIST_DEFAULT);
369 ATF_REQUIRE_EQ(VM_PHYSSEG_MAX, uvm_physseg_get_entries());
371 srandom((unsigned)time(NULL));
372 for(int i = 0; i < 1000; i++) {
373 pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_1);
377 ATF_CHECK_EQ(true, true);
380 ATF_TC(uvm_physseg_10K);
381 ATF_TC_HEAD(uvm_physseg_10K, tc)
383 atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
384 10,000 calls, VM_PHYSSEG_MAX is 32.");
386 ATF_TC_BODY(uvm_physseg_10K, tc)
392 for(paddr_t i = VALID_START_PFN_1;
393 i < VALID_END_PFN_1; i += PF_STEP) {
394 uvm_page_physload(i, i + PF_STEP, i, i + PF_STEP,
395 VM_FREELIST_DEFAULT);
398 ATF_REQUIRE_EQ(VM_PHYSSEG_MAX, uvm_physseg_get_entries());
400 srandom((unsigned)time(NULL));
401 for(int i = 0; i < 10000; i++) {
402 pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_1);
406 ATF_CHECK_EQ(true, true);
409 ATF_TC(uvm_physseg_100K);
410 ATF_TC_HEAD(uvm_physseg_100K, tc)
412 atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
413 100,000 calls, VM_PHYSSEG_MAX is 32.");
415 ATF_TC_BODY(uvm_physseg_100K, tc)
421 for(paddr_t i = VALID_START_PFN_1;
422 i < VALID_END_PFN_1; i += PF_STEP) {
423 uvm_page_physload(i, i + PF_STEP, i, i + PF_STEP,
424 VM_FREELIST_DEFAULT);
427 ATF_REQUIRE_EQ(VM_PHYSSEG_MAX, uvm_physseg_get_entries());
429 srandom((unsigned)time(NULL));
430 for(int i = 0; i < 100000; i++) {
431 pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_1);
435 ATF_CHECK_EQ(true, true);
438 ATF_TC(uvm_physseg_1M);
439 ATF_TC_HEAD(uvm_physseg_1M, tc)
441 atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
442 1,000,000 calls, VM_PHYSSEG_MAX is 32.");
444 ATF_TC_BODY(uvm_physseg_1M, tc)
450 for(paddr_t i = VALID_START_PFN_1;
451 i < VALID_END_PFN_1; i += PF_STEP) {
452 uvm_page_physload(i, i + PF_STEP, i, i + PF_STEP,
453 VM_FREELIST_DEFAULT);
456 ATF_REQUIRE_EQ(VM_PHYSSEG_MAX, uvm_physseg_get_entries());
458 srandom((unsigned)time(NULL));
459 for(int i = 0; i < 1000000; i++) {
460 pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_1);
464 ATF_CHECK_EQ(true, true);
467 ATF_TC(uvm_physseg_10M);
468 ATF_TC_HEAD(uvm_physseg_10M, tc)
470 atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
471 10,000,000 calls, VM_PHYSSEG_MAX is 32.");
473 ATF_TC_BODY(uvm_physseg_10M, tc)
479 for(paddr_t i = VALID_START_PFN_1;
480 i < VALID_END_PFN_1; i += PF_STEP) {
481 uvm_page_physload(i, i + PF_STEP, i, i + PF_STEP,
482 VM_FREELIST_DEFAULT);
485 ATF_REQUIRE_EQ(VM_PHYSSEG_MAX, uvm_physseg_get_entries());
487 srandom((unsigned)time(NULL));
488 for(int i = 0; i < 10000000; i++) {
489 pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_1);
493 ATF_CHECK_EQ(true, true);
496 ATF_TC(uvm_physseg_100M);
497 ATF_TC_HEAD(uvm_physseg_100M, tc)
499 atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
500 100,000,000 calls, VM_PHYSSEG_MAX is 32.");
502 ATF_TC_BODY(uvm_physseg_100M, tc)
508 for(paddr_t i = VALID_START_PFN_1;
509 i < VALID_END_PFN_1; i += PF_STEP) {
510 uvm_page_physload(i, i + PF_STEP, i, i + PF_STEP,
511 VM_FREELIST_DEFAULT);
514 ATF_REQUIRE_EQ(VM_PHYSSEG_MAX, uvm_physseg_get_entries());
516 srandom((unsigned)time(NULL));
517 for(int i = 0; i < 100000000; i++) {
518 pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_1);
522 ATF_CHECK_EQ(true, true);
525 ATF_TC(uvm_physseg_1MB);
526 ATF_TC_HEAD(uvm_physseg_1MB, tc)
528 atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
529 10,000,000 calls, VM_PHYSSEG_MAX is 32 on 1 MB Segment.");
531 ATF_TC_BODY(uvm_physseg_1MB, t)
537 psize_t pf_chunk_size = 0;
539 psize_t npages1 = (VALID_END_PFN_1 - VALID_START_PFN_1);
541 psize_t npages2 = (VALID_END_PFN_2 - VALID_START_PFN_2);
543 struct vm_page *slab = malloc(sizeof(struct vm_page) *
544 (npages1 + npages2));
548 /* We start with zero segments */
549 ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_1, npages1, NULL));
550 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
552 /* Post boot: Fake all segments and pages accounted for. */
553 uvm_page_init_fake(slab, npages1 + npages2);
555 ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_2, npages2, NULL));
556 ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
558 srandom((unsigned)time(NULL));
559 for(pf = VALID_START_PFN_2; pf < VALID_END_PFN_2; pf += PF_STEP) {
560 pf_chunk_size = (psize_t) random() % (psize_t) (PF_STEP - 1) + 1;
561 uvm_physseg_unplug(pf, pf_chunk_size);
564 for(int i = 0; i < 10000000; i++) {
565 pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_2);
566 if(pa < ctob(VALID_START_PFN_2))
567 pa += ctob(VALID_START_PFN_2);
571 ATF_CHECK_EQ(true, true);
574 ATF_TC(uvm_physseg_64MB);
575 ATF_TC_HEAD(uvm_physseg_64MB, tc)
577 atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
578 10,000,000 calls, VM_PHYSSEG_MAX is 32 on 64 MB Segment.");
580 ATF_TC_BODY(uvm_physseg_64MB, t)
586 psize_t pf_chunk_size = 0;
588 psize_t npages1 = (VALID_END_PFN_1 - VALID_START_PFN_1);
590 psize_t npages2 = (VALID_END_PFN_3 - VALID_START_PFN_3);
592 struct vm_page *slab = malloc(sizeof(struct vm_page) *
593 (npages1 + npages2));
597 /* We start with zero segments */
598 ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_1, npages1, NULL));
599 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
601 /* Post boot: Fake all segments and pages accounted for. */
602 uvm_page_init_fake(slab, npages1 + npages2);
604 ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_3, npages2, NULL));
605 ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
607 srandom((unsigned)time(NULL));
608 for(pf = VALID_START_PFN_3; pf < VALID_END_PFN_3; pf += PF_STEP) {
609 pf_chunk_size = (psize_t) random() % (psize_t) (PF_STEP - 1) + 1;
610 uvm_physseg_unplug(pf, pf_chunk_size);
613 for(int i = 0; i < 10000000; i++) {
614 pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_3);
615 if(pa < ctob(VALID_START_PFN_3))
616 pa += ctob(VALID_START_PFN_3);
620 ATF_CHECK_EQ(true, true);
623 ATF_TC(uvm_physseg_128MB);
624 ATF_TC_HEAD(uvm_physseg_128MB, tc)
626 atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
627 10,000,000 calls, VM_PHYSSEG_MAX is 32 on 128 MB Segment.");
629 ATF_TC_BODY(uvm_physseg_128MB, t)
635 psize_t pf_chunk_size = 0;
637 psize_t npages1 = (VALID_END_PFN_1 - VALID_START_PFN_1);
639 psize_t npages2 = (VALID_END_PFN_4 - VALID_START_PFN_4);
641 struct vm_page *slab = malloc(sizeof(struct vm_page)
642 * (npages1 + npages2));
646 /* We start with zero segments */
647 ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_1, npages1, NULL));
648 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
650 /* Post boot: Fake all segments and pages accounted for. */
651 uvm_page_init_fake(slab, npages1 + npages2);
653 ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_2, npages2, NULL));
654 ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
656 srandom((unsigned)time(NULL));
657 for(pf = VALID_START_PFN_4; pf < VALID_END_PFN_4; pf += PF_STEP) {
658 pf_chunk_size = (psize_t) random() % (psize_t) (PF_STEP - 1) + 1;
659 uvm_physseg_unplug(pf, pf_chunk_size);
662 for(int i = 0; i < 10000000; i++) {
663 pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_4);
664 if(pa < ctob(VALID_START_PFN_4))
665 pa += ctob(VALID_START_PFN_4);
669 ATF_CHECK_EQ(true, true);
672 ATF_TC(uvm_physseg_256MB);
673 ATF_TC_HEAD(uvm_physseg_256MB, tc)
675 atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
676 10,000,000 calls, VM_PHYSSEG_MAX is 32 on 256 MB Segment.");
678 ATF_TC_BODY(uvm_physseg_256MB, t)
684 psize_t pf_chunk_size = 0;
686 psize_t npages1 = (VALID_END_PFN_1 - VALID_START_PFN_1);
688 psize_t npages2 = (VALID_END_PFN_5 - VALID_START_PFN_5);
690 struct vm_page *slab = malloc(sizeof(struct vm_page) * (npages1 + npages2));
694 /* We start with zero segments */
695 ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_1, npages1, NULL));
696 ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
698 /* Post boot: Fake all segments and pages accounted for. */
699 uvm_page_init_fake(slab, npages1 + npages2);
701 ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_2, npages2, NULL));
702 ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
704 srandom((unsigned)time(NULL));
705 for(pf = VALID_START_PFN_5; pf < VALID_END_PFN_5; pf += PF_STEP) {
706 pf_chunk_size = (psize_t) random() % (psize_t) (PF_STEP - 1) + 1;
707 uvm_physseg_unplug(pf, pf_chunk_size);
710 for(int i = 0; i < 10000000; i++) {
711 pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_5);
712 if(pa < ctob(VALID_END_PFN_5))
713 pa += ctob(VALID_START_PFN_5);
717 ATF_CHECK_EQ(true, true);
722 /* Fixed memory size tests. */
723 ATF_TP_ADD_TC(tp, uvm_physseg_100);
724 ATF_TP_ADD_TC(tp, uvm_physseg_1K);
725 ATF_TP_ADD_TC(tp, uvm_physseg_10K);
726 ATF_TP_ADD_TC(tp, uvm_physseg_100K);
727 ATF_TP_ADD_TC(tp, uvm_physseg_1M);
728 ATF_TP_ADD_TC(tp, uvm_physseg_10M);
729 ATF_TP_ADD_TC(tp, uvm_physseg_100M);
731 #if defined(UVM_HOTPLUG)
732 /* Variable memory size tests. */
733 ATF_TP_ADD_TC(tp, uvm_physseg_1MB);
734 ATF_TP_ADD_TC(tp, uvm_physseg_64MB);
735 ATF_TP_ADD_TC(tp, uvm_physseg_128MB);
736 ATF_TP_ADD_TC(tp, uvm_physseg_256MB);
737 #endif /* UVM_HOTPLUG */
739 return atf_no_error();