2 * Copyright (c) 1982, 1986 The Regents of the University of California.
3 * Copyright (c) 1989, 1990 William Jolitz
4 * Copyright (c) 1994 John Dyson
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department, and William Jolitz.
11 * Redistribution and use in source and binary :forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
40 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/malloc.h>
52 #include <sys/socketvar.h>
53 #include <sys/sf_buf.h>
54 #include <sys/unistd.h>
55 #include <machine/cpu.h>
56 #include <machine/pcb.h>
57 #include <machine/sysarch.h>
59 #include <sys/mutex.h>
63 #include <vm/vm_extern.h>
64 #include <vm/vm_kern.h>
65 #include <vm/vm_page.h>
66 #include <vm/vm_map.h>
67 #include <vm/vm_param.h>
68 #include <vm/vm_pageout.h>
70 #include <vm/uma_int.h>
72 #include <machine/md_var.h>
75 #define NSFBUFS (512 + maxusers * 16)
78 #ifndef ARM_USE_SMALL_ALLOC
79 static void sf_buf_init(void *arg);
80 SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL);
82 LIST_HEAD(sf_head, sf_buf);
86 * A hash table of active sendfile(2) buffers
88 static struct sf_head *sf_buf_active;
89 static u_long sf_buf_hashmask;
91 #define SF_BUF_HASH(m) (((m) - vm_page_array) & sf_buf_hashmask)
93 static TAILQ_HEAD(, sf_buf) sf_buf_freelist;
94 static u_int sf_buf_alloc_want;
97 * A lock used to synchronize access to the hash table and free list
99 static struct mtx sf_buf_lock;
103 * Finish a fork operation, with process p2 nearly set up.
104 * Copy and update the pcb, set up the stack so that the child
105 * ready to run and return to user mode.
108 cpu_fork(register struct thread *td1, register struct proc *p2,
109 struct thread *td2, int flags)
112 struct trapframe *tf;
113 struct switchframe *sf;
116 if ((flags & RFPROC) == 0)
118 pcb2 = (struct pcb *)(td2->td_kstack + td2->td_kstack_pages * PAGE_SIZE) - 1;
120 #ifndef CPU_XSCALE_CORE3
121 pmap_use_minicache(td2->td_kstack, td2->td_kstack_pages * PAGE_SIZE);
125 bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
127 bcopy(&td1->td_proc->p_md, mdp2, sizeof(*mdp2));
128 pcb2->un_32.pcb32_und_sp = td2->td_kstack + USPACE_UNDEF_STACK_TOP;
129 pcb2->un_32.pcb32_sp = td2->td_kstack +
130 USPACE_SVC_STACK_TOP - sizeof(*pcb2);
133 (struct trapframe *)pcb2->un_32.pcb32_sp - 1;
134 *tf = *td1->td_frame;
135 sf = (struct switchframe *)tf - 1;
136 sf->sf_r4 = (u_int)fork_return;
137 sf->sf_r5 = (u_int)td2;
138 sf->sf_pc = (u_int)fork_trampoline;
139 tf->tf_spsr &= ~PSR_C_bit;
142 pcb2->un_32.pcb32_sp = (u_int)sf;
144 /* Setup to release spin count in fork_exit(). */
145 td2->td_md.md_spinlock_count = 1;
146 td2->td_md.md_saved_cspr = 0;
147 td2->td_md.md_tp = *(uint32_t **)ARM_TP_ADDRESS;
151 cpu_thread_swapin(struct thread *td)
156 cpu_thread_swapout(struct thread *td)
161 * Detatch mapped page and release resources back to the system.
164 sf_buf_free(struct sf_buf *sf)
166 #ifndef ARM_USE_SMALL_ALLOC
167 mtx_lock(&sf_buf_lock);
169 if (sf->ref_count == 0) {
170 TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry);
172 if (sf_buf_alloc_want > 0)
173 wakeup_one(&sf_buf_freelist);
175 mtx_unlock(&sf_buf_lock);
179 #ifndef ARM_USE_SMALL_ALLOC
181 * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
184 sf_buf_init(void *arg)
186 struct sf_buf *sf_bufs;
191 TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs);
193 sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
194 TAILQ_INIT(&sf_buf_freelist);
195 sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE);
196 sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
198 for (i = 0; i < nsfbufs; i++) {
199 sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
200 TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry);
202 sf_buf_alloc_want = 0;
203 mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF);
208 * Get an sf_buf from the freelist. Will block if none are available.
211 sf_buf_alloc(struct vm_page *m, int flags)
213 #ifdef ARM_USE_SMALL_ALLOC
214 return ((struct sf_buf *)m);
216 struct sf_head *hash_list;
220 hash_list = &sf_buf_active[SF_BUF_HASH(m)];
221 mtx_lock(&sf_buf_lock);
222 LIST_FOREACH(sf, hash_list, list_entry) {
225 if (sf->ref_count == 1) {
226 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
228 nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
233 while ((sf = TAILQ_FIRST(&sf_buf_freelist)) == NULL) {
234 if (flags & SFB_NOWAIT)
237 mbstat.sf_allocwait++;
238 error = msleep(&sf_buf_freelist, &sf_buf_lock,
239 (flags & SFB_CATCH) ? PCATCH | PVM : PVM, "sfbufa", 0);
244 * If we got a signal, don't risk going back to sleep.
249 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
251 LIST_REMOVE(sf, list_entry);
252 LIST_INSERT_HEAD(hash_list, sf, list_entry);
256 nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
257 pmap_kenter(sf->kva, VM_PAGE_TO_PHYS(sf->m));
259 mtx_unlock(&sf_buf_lock);
265 * Initialize machine state (pcb and trap frame) for a new thread about to
266 * upcall. Put enough state in the new thread's PCB to get it to go back
267 * userret(), where we can intercept it again to set the return (upcall)
268 * Address and stack, along with those from upcals that are from other sources
269 * such as those generated in thread_userret() itself.
272 cpu_set_upcall(struct thread *td, struct thread *td0)
274 struct trapframe *tf;
275 struct switchframe *sf;
277 bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe));
278 bcopy(td0->td_pcb, td->td_pcb, sizeof(struct pcb));
280 sf = (struct switchframe *)tf - 1;
281 sf->sf_r4 = (u_int)fork_return;
282 sf->sf_r5 = (u_int)td;
283 sf->sf_pc = (u_int)fork_trampoline;
284 tf->tf_spsr &= ~PSR_C_bit;
286 td->td_pcb->un_32.pcb32_sp = (u_int)sf;
287 td->td_pcb->un_32.pcb32_und_sp = td->td_kstack + USPACE_UNDEF_STACK_TOP;
289 /* Setup to release spin count in fork_exit(). */
290 td->td_md.md_spinlock_count = 1;
291 td->td_md.md_saved_cspr = 0;
295 * Set that machine state for performing an upcall that has to
296 * be done in thread_userret() so that those upcalls generated
297 * in thread_userret() itself can be done as well.
300 cpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg,
303 struct trapframe *tf = td->td_frame;
305 tf->tf_usr_sp = ((int)stack->ss_sp + stack->ss_size
306 - sizeof(struct trapframe)) & ~7;
307 tf->tf_pc = (int)entry;
308 tf->tf_r0 = (int)arg;
309 tf->tf_spsr = PSR_USR32_MODE;
313 cpu_set_user_tls(struct thread *td, void *tls_base)
317 td->td_md.md_tp = tls_base;
320 *(void **)ARM_TP_ADDRESS = tls_base;
327 cpu_thread_exit(struct thread *td)
332 cpu_thread_alloc(struct thread *td)
334 td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_pages *
336 td->td_frame = (struct trapframe *)
337 ((u_int)td->td_kstack + USPACE_SVC_STACK_TOP - sizeof(struct pcb)) - 1;
339 #ifndef CPU_XSCALE_CORE3
340 pmap_use_minicache(td->td_kstack, td->td_kstack_pages * PAGE_SIZE);
346 cpu_thread_free(struct thread *td)
351 cpu_thread_clean(struct thread *td)
356 * Intercept the return address from a freshly forked process that has NOT
357 * been scheduled yet.
359 * This is needed to make kernel threads stay in kernel mode.
362 cpu_set_fork_handler(struct thread *td, void (*func)(void *), void *arg)
364 struct switchframe *sf;
365 struct trapframe *tf;
368 sf = (struct switchframe *)tf - 1;
369 sf->sf_r4 = (u_int)func;
370 sf->sf_r5 = (u_int)arg;
371 td->td_pcb->un_32.pcb32_sp = (u_int)sf;
375 * Software interrupt handler for queued VM system processing.
381 if (busdma_swi_pending)
386 cpu_exit(struct thread *td)
390 #define BITS_PER_INT (8 * sizeof(int))
391 vm_offset_t arm_nocache_startaddr;
392 static int arm_nocache_allocated[ARM_NOCACHE_KVA_SIZE / (PAGE_SIZE *
396 * Functions to map and unmap memory non-cached into KVA the kernel won't try
397 * to allocate. The goal is to provide uncached memory to busdma, to honor
399 * We can allocate at most ARM_NOCACHE_KVA_SIZE bytes.
400 * The allocator is rather dummy, each page is represented by a bit in
401 * a bitfield, 0 meaning the page is not allocated, 1 meaning it is.
402 * As soon as it finds enough contiguous pages to satisfy the request,
403 * it returns the address.
406 arm_remap_nocache(void *addr, vm_size_t size)
410 size = round_page(size);
411 for (i = 0; i < ARM_NOCACHE_KVA_SIZE / PAGE_SIZE; i++) {
412 if (!(arm_nocache_allocated[i / BITS_PER_INT] & (1 << (i %
414 for (j = i; j < i + (size / (PAGE_SIZE)); j++)
415 if (arm_nocache_allocated[j / BITS_PER_INT] &
416 (1 << (j % BITS_PER_INT)))
418 if (j == i + (size / (PAGE_SIZE)))
422 if (i < ARM_NOCACHE_KVA_SIZE / PAGE_SIZE) {
423 vm_offset_t tomap = arm_nocache_startaddr + i * PAGE_SIZE;
424 void *ret = (void *)tomap;
425 vm_paddr_t physaddr = vtophys((vm_offset_t)addr);
426 vm_offset_t vaddr = (vm_offset_t) addr;
428 vaddr = vaddr & ~PAGE_MASK;
429 for (; tomap < (vm_offset_t)ret + size; tomap += PAGE_SIZE,
430 vaddr += PAGE_SIZE, physaddr += PAGE_SIZE, i++) {
431 cpu_idcache_wbinv_range(vaddr, PAGE_SIZE);
432 cpu_l2cache_wbinv_range(vaddr, PAGE_SIZE);
433 pmap_kenter_nocache(tomap, physaddr);
434 cpu_tlb_flushID_SE(vaddr);
435 arm_nocache_allocated[i / BITS_PER_INT] |= 1 << (i %
445 arm_unmap_nocache(void *addr, vm_size_t size)
447 vm_offset_t raddr = (vm_offset_t)addr;
450 size = round_page(size);
451 i = (raddr - arm_nocache_startaddr) / (PAGE_SIZE);
452 for (; size > 0; size -= PAGE_SIZE, i++)
453 arm_nocache_allocated[i / BITS_PER_INT] &= ~(1 << (i %
457 #ifdef ARM_USE_SMALL_ALLOC
459 static TAILQ_HEAD(,arm_small_page) pages_normal =
460 TAILQ_HEAD_INITIALIZER(pages_normal);
461 static TAILQ_HEAD(,arm_small_page) pages_wt =
462 TAILQ_HEAD_INITIALIZER(pages_wt);
463 static TAILQ_HEAD(,arm_small_page) free_pgdesc =
464 TAILQ_HEAD_INITIALIZER(free_pgdesc);
466 extern uma_zone_t l2zone;
468 struct mtx smallalloc_mtx;
470 MALLOC_DEFINE(M_VMSMALLALLOC, "vm_small_alloc", "VM Small alloc data");
472 vm_offset_t alloc_firstaddr;
474 #ifdef ARM_HAVE_SUPERSECTIONS
475 #define S_FRAME L1_SUP_FRAME
476 #define S_SIZE L1_SUP_SIZE
478 #define S_FRAME L1_S_FRAME
479 #define S_SIZE L1_S_SIZE
483 arm_ptovirt(vm_paddr_t pa)
486 vm_offset_t addr = alloc_firstaddr;
488 KASSERT(alloc_firstaddr != 0, ("arm_ptovirt called too early ?"));
489 for (i = 0; dump_avail[i + 1]; i += 2) {
490 if (pa >= dump_avail[i] && pa < dump_avail[i + 1])
492 addr += (dump_avail[i + 1] & S_FRAME) + S_SIZE -
493 (dump_avail[i] & S_FRAME);
495 KASSERT(dump_avail[i + 1] != 0, ("Trying to access invalid physical address"));
496 return (addr + (pa - (dump_avail[i] & S_FRAME)));
500 arm_init_smallalloc(void)
502 vm_offset_t to_map = 0, mapaddr;
506 * We need to use dump_avail and not phys_avail, since we want to
507 * map the whole memory and not just the memory available to the VM
508 * to be able to do a pa => va association for any address.
511 for (i = 0; dump_avail[i + 1]; i+= 2) {
512 to_map += (dump_avail[i + 1] & S_FRAME) + S_SIZE -
513 (dump_avail[i] & S_FRAME);
515 alloc_firstaddr = mapaddr = KERNBASE - to_map;
516 for (i = 0; dump_avail[i + 1]; i+= 2) {
517 vm_offset_t size = (dump_avail[i + 1] & S_FRAME) +
518 S_SIZE - (dump_avail[i] & S_FRAME);
521 #ifdef ARM_HAVE_SUPERSECTIONS
522 pmap_kenter_supersection(mapaddr,
523 (dump_avail[i] & L1_SUP_FRAME) + did,
526 pmap_kenter_section(mapaddr,
527 (dump_avail[i] & L1_S_FRAME) + did, SECTION_CACHE);
537 arm_add_smallalloc_pages(void *list, void *mem, int bytes, int pagetable)
539 struct arm_small_page *pg;
543 pg = (struct arm_small_page *)list;
546 TAILQ_INSERT_HEAD(&pages_wt, pg, pg_list);
548 TAILQ_INSERT_HEAD(&pages_normal, pg, pg_list);
549 list = (char *)list + sizeof(*pg);
550 mem = (char *)mem + PAGE_SIZE;
556 uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
559 struct arm_small_page *sp;
560 TAILQ_HEAD(,arm_small_page) *head;
561 static vm_pindex_t color;
564 *flags = UMA_SLAB_PRIV;
566 * For CPUs where we setup page tables as write back, there's no
567 * need to maintain two separate pools.
569 if (zone == l2zone && pte_l1_s_cache_mode != pte_l1_s_cache_mode_pt)
570 head = (void *)&pages_wt;
572 head = (void *)&pages_normal;
574 mtx_lock(&smallalloc_mtx);
575 sp = TAILQ_FIRST(head);
580 mtx_unlock(&smallalloc_mtx);
581 if (zone == l2zone &&
582 pte_l1_s_cache_mode != pte_l1_s_cache_mode_pt) {
583 *flags = UMA_SLAB_KMEM;
584 ret = ((void *)kmem_malloc(kmem_map, bytes, M_NOWAIT));
587 if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT)
588 pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED;
590 pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED;
592 pflags |= VM_ALLOC_ZERO;
594 m = vm_page_alloc(NULL, color++,
595 pflags | VM_ALLOC_NOOBJ);
603 ret = (void *)arm_ptovirt(VM_PAGE_TO_PHYS(m));
604 if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
605 bzero(ret, PAGE_SIZE);
608 TAILQ_REMOVE(head, sp, pg_list);
609 TAILQ_INSERT_HEAD(&free_pgdesc, sp, pg_list);
611 mtx_unlock(&smallalloc_mtx);
618 uma_small_free(void *mem, int size, u_int8_t flags)
623 if (flags & UMA_SLAB_KMEM)
624 kmem_free(kmem_map, (vm_offset_t)mem, size);
626 struct arm_small_page *sp;
628 if ((vm_offset_t)mem >= KERNBASE) {
629 mtx_lock(&smallalloc_mtx);
630 sp = TAILQ_FIRST(&free_pgdesc);
631 KASSERT(sp != NULL, ("No more free page descriptor ?"));
632 TAILQ_REMOVE(&free_pgdesc, sp, pg_list);
634 pmap_get_pde_pte(kernel_pmap, (vm_offset_t)mem, &pd,
636 if ((*pd & pte_l1_s_cache_mask) ==
637 pte_l1_s_cache_mode_pt &&
638 pte_l1_s_cache_mode_pt != pte_l1_s_cache_mode)
639 TAILQ_INSERT_HEAD(&pages_wt, sp, pg_list);
641 TAILQ_INSERT_HEAD(&pages_normal, sp, pg_list);
642 mtx_unlock(&smallalloc_mtx);
645 vm_paddr_t pa = vtophys((vm_offset_t)mem);
647 m = PHYS_TO_VM_PAGE(pa);
650 atomic_subtract_int(&cnt.v_wire_count, 1);