2 * Copyright (c) 1982, 1986 The Regents of the University of California.
3 * Copyright (c) 1989, 1990 William Jolitz
4 * Copyright (c) 1994 John Dyson
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department, and William Jolitz.
11 * Redistribution and use in source and binary :forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
40 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/malloc.h>
52 #include <sys/socketvar.h>
53 #include <sys/sf_buf.h>
54 #include <sys/syscall.h>
55 #include <sys/sysent.h>
56 #include <sys/unistd.h>
57 #include <machine/cpu.h>
58 #include <machine/pcb.h>
59 #include <machine/sysarch.h>
61 #include <sys/mutex.h>
65 #include <vm/vm_extern.h>
66 #include <vm/vm_kern.h>
67 #include <vm/vm_page.h>
68 #include <vm/vm_map.h>
69 #include <vm/vm_param.h>
70 #include <vm/vm_pageout.h>
72 #include <vm/uma_int.h>
74 #include <machine/md_var.h>
77 #define NSFBUFS (512 + maxusers * 16)
80 #ifndef ARM_USE_SMALL_ALLOC
81 static void sf_buf_init(void *arg);
82 SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL);
84 LIST_HEAD(sf_head, sf_buf);
88 * A hash table of active sendfile(2) buffers
90 static struct sf_head *sf_buf_active;
91 static u_long sf_buf_hashmask;
93 #define SF_BUF_HASH(m) (((m) - vm_page_array) & sf_buf_hashmask)
95 static TAILQ_HEAD(, sf_buf) sf_buf_freelist;
96 static u_int sf_buf_alloc_want;
99 * A lock used to synchronize access to the hash table and free list
101 static struct mtx sf_buf_lock;
105 * Finish a fork operation, with process p2 nearly set up.
106 * Copy and update the pcb, set up the stack so that the child
107 * ready to run and return to user mode.
110 cpu_fork(register struct thread *td1, register struct proc *p2,
111 struct thread *td2, int flags)
114 struct trapframe *tf;
115 struct switchframe *sf;
118 if ((flags & RFPROC) == 0)
120 pcb2 = (struct pcb *)(td2->td_kstack + td2->td_kstack_pages * PAGE_SIZE) - 1;
122 #ifndef CPU_XSCALE_CORE3
123 pmap_use_minicache(td2->td_kstack, td2->td_kstack_pages * PAGE_SIZE);
127 bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
129 bcopy(&td1->td_proc->p_md, mdp2, sizeof(*mdp2));
130 pcb2->un_32.pcb32_und_sp = td2->td_kstack + USPACE_UNDEF_STACK_TOP;
131 pcb2->un_32.pcb32_sp = td2->td_kstack +
132 USPACE_SVC_STACK_TOP - sizeof(*pcb2);
135 (struct trapframe *)pcb2->un_32.pcb32_sp - 1;
136 *tf = *td1->td_frame;
137 sf = (struct switchframe *)tf - 1;
138 sf->sf_r4 = (u_int)fork_return;
139 sf->sf_r5 = (u_int)td2;
140 sf->sf_pc = (u_int)fork_trampoline;
141 tf->tf_spsr &= ~PSR_C_bit;
144 pcb2->un_32.pcb32_sp = (u_int)sf;
146 /* Setup to release spin count in fork_exit(). */
147 td2->td_md.md_spinlock_count = 1;
148 td2->td_md.md_saved_cspr = 0;
149 td2->td_md.md_tp = *(uint32_t **)ARM_TP_ADDRESS;
153 cpu_thread_swapin(struct thread *td)
158 cpu_thread_swapout(struct thread *td)
163 * Detatch mapped page and release resources back to the system.
166 sf_buf_free(struct sf_buf *sf)
168 #ifndef ARM_USE_SMALL_ALLOC
169 mtx_lock(&sf_buf_lock);
171 if (sf->ref_count == 0) {
172 TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry);
174 if (sf_buf_alloc_want > 0)
175 wakeup_one(&sf_buf_freelist);
177 mtx_unlock(&sf_buf_lock);
181 #ifndef ARM_USE_SMALL_ALLOC
183 * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
186 sf_buf_init(void *arg)
188 struct sf_buf *sf_bufs;
193 TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs);
195 sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
196 TAILQ_INIT(&sf_buf_freelist);
197 sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE);
198 sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
200 for (i = 0; i < nsfbufs; i++) {
201 sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
202 TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry);
204 sf_buf_alloc_want = 0;
205 mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF);
210 * Get an sf_buf from the freelist. Will block if none are available.
213 sf_buf_alloc(struct vm_page *m, int flags)
215 #ifdef ARM_USE_SMALL_ALLOC
216 return ((struct sf_buf *)m);
218 struct sf_head *hash_list;
222 hash_list = &sf_buf_active[SF_BUF_HASH(m)];
223 mtx_lock(&sf_buf_lock);
224 LIST_FOREACH(sf, hash_list, list_entry) {
227 if (sf->ref_count == 1) {
228 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
230 nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
235 while ((sf = TAILQ_FIRST(&sf_buf_freelist)) == NULL) {
236 if (flags & SFB_NOWAIT)
239 mbstat.sf_allocwait++;
240 error = msleep(&sf_buf_freelist, &sf_buf_lock,
241 (flags & SFB_CATCH) ? PCATCH | PVM : PVM, "sfbufa", 0);
246 * If we got a signal, don't risk going back to sleep.
251 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
253 LIST_REMOVE(sf, list_entry);
254 LIST_INSERT_HEAD(hash_list, sf, list_entry);
258 nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
259 pmap_kenter(sf->kva, VM_PAGE_TO_PHYS(sf->m));
261 mtx_unlock(&sf_buf_lock);
267 cpu_set_syscall_retval(struct thread *td, int error)
275 frame = td->td_frame;
279 insn = *(u_int32_t *)(frame->tf_pc - INSN_SIZE);
280 if ((insn & 0x000fffff) == SYS___syscall) {
281 register_t *ap = &frame->tf_r0;
282 register_t code = ap[_QUAD_LOWWORD];
283 if (td->td_proc->p_sysent->sv_mask)
284 code &= td->td_proc->p_sysent->sv_mask;
285 fixup = (code != SYS_freebsd6_lseek && code != SYS_lseek)
294 frame->tf_r1 = td->td_retval[0];
296 frame->tf_r0 = td->td_retval[0];
297 frame->tf_r1 = td->td_retval[1];
299 frame->tf_spsr &= ~PSR_C_bit; /* carry bit */
303 * Reconstruct the pc to point at the swi.
305 frame->tf_pc -= INSN_SIZE;
311 frame->tf_r0 = error;
312 frame->tf_spsr |= PSR_C_bit; /* carry bit */
318 * Initialize machine state (pcb and trap frame) for a new thread about to
319 * upcall. Put enough state in the new thread's PCB to get it to go back
320 * userret(), where we can intercept it again to set the return (upcall)
321 * Address and stack, along with those from upcals that are from other sources
322 * such as those generated in thread_userret() itself.
325 cpu_set_upcall(struct thread *td, struct thread *td0)
327 struct trapframe *tf;
328 struct switchframe *sf;
330 bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe));
331 bcopy(td0->td_pcb, td->td_pcb, sizeof(struct pcb));
333 sf = (struct switchframe *)tf - 1;
334 sf->sf_r4 = (u_int)fork_return;
335 sf->sf_r5 = (u_int)td;
336 sf->sf_pc = (u_int)fork_trampoline;
337 tf->tf_spsr &= ~PSR_C_bit;
339 td->td_pcb->un_32.pcb32_sp = (u_int)sf;
340 td->td_pcb->un_32.pcb32_und_sp = td->td_kstack + USPACE_UNDEF_STACK_TOP;
342 /* Setup to release spin count in fork_exit(). */
343 td->td_md.md_spinlock_count = 1;
344 td->td_md.md_saved_cspr = 0;
348 * Set that machine state for performing an upcall that has to
349 * be done in thread_userret() so that those upcalls generated
350 * in thread_userret() itself can be done as well.
353 cpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg,
356 struct trapframe *tf = td->td_frame;
358 tf->tf_usr_sp = ((int)stack->ss_sp + stack->ss_size
359 - sizeof(struct trapframe)) & ~7;
360 tf->tf_pc = (int)entry;
361 tf->tf_r0 = (int)arg;
362 tf->tf_spsr = PSR_USR32_MODE;
366 cpu_set_user_tls(struct thread *td, void *tls_base)
370 td->td_md.md_tp = tls_base;
373 *(void **)ARM_TP_ADDRESS = tls_base;
380 cpu_thread_exit(struct thread *td)
385 cpu_thread_alloc(struct thread *td)
387 td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_pages *
389 td->td_frame = (struct trapframe *)
390 ((u_int)td->td_kstack + USPACE_SVC_STACK_TOP - sizeof(struct pcb)) - 1;
392 #ifndef CPU_XSCALE_CORE3
393 pmap_use_minicache(td->td_kstack, td->td_kstack_pages * PAGE_SIZE);
399 cpu_thread_free(struct thread *td)
404 cpu_thread_clean(struct thread *td)
409 * Intercept the return address from a freshly forked process that has NOT
410 * been scheduled yet.
412 * This is needed to make kernel threads stay in kernel mode.
415 cpu_set_fork_handler(struct thread *td, void (*func)(void *), void *arg)
417 struct switchframe *sf;
418 struct trapframe *tf;
421 sf = (struct switchframe *)tf - 1;
422 sf->sf_r4 = (u_int)func;
423 sf->sf_r5 = (u_int)arg;
424 td->td_pcb->un_32.pcb32_sp = (u_int)sf;
428 * Software interrupt handler for queued VM system processing.
434 if (busdma_swi_pending)
439 cpu_exit(struct thread *td)
443 #define BITS_PER_INT (8 * sizeof(int))
444 vm_offset_t arm_nocache_startaddr;
445 static int arm_nocache_allocated[ARM_NOCACHE_KVA_SIZE / (PAGE_SIZE *
449 * Functions to map and unmap memory non-cached into KVA the kernel won't try
450 * to allocate. The goal is to provide uncached memory to busdma, to honor
452 * We can allocate at most ARM_NOCACHE_KVA_SIZE bytes.
453 * The allocator is rather dummy, each page is represented by a bit in
454 * a bitfield, 0 meaning the page is not allocated, 1 meaning it is.
455 * As soon as it finds enough contiguous pages to satisfy the request,
456 * it returns the address.
459 arm_remap_nocache(void *addr, vm_size_t size)
463 size = round_page(size);
464 for (i = 0; i < ARM_NOCACHE_KVA_SIZE / PAGE_SIZE; i++) {
465 if (!(arm_nocache_allocated[i / BITS_PER_INT] & (1 << (i %
467 for (j = i; j < i + (size / (PAGE_SIZE)); j++)
468 if (arm_nocache_allocated[j / BITS_PER_INT] &
469 (1 << (j % BITS_PER_INT)))
471 if (j == i + (size / (PAGE_SIZE)))
475 if (i < ARM_NOCACHE_KVA_SIZE / PAGE_SIZE) {
476 vm_offset_t tomap = arm_nocache_startaddr + i * PAGE_SIZE;
477 void *ret = (void *)tomap;
478 vm_paddr_t physaddr = vtophys((vm_offset_t)addr);
479 vm_offset_t vaddr = (vm_offset_t) addr;
481 vaddr = vaddr & ~PAGE_MASK;
482 for (; tomap < (vm_offset_t)ret + size; tomap += PAGE_SIZE,
483 vaddr += PAGE_SIZE, physaddr += PAGE_SIZE, i++) {
484 cpu_idcache_wbinv_range(vaddr, PAGE_SIZE);
485 cpu_l2cache_wbinv_range(vaddr, PAGE_SIZE);
486 pmap_kenter_nocache(tomap, physaddr);
487 cpu_tlb_flushID_SE(vaddr);
488 arm_nocache_allocated[i / BITS_PER_INT] |= 1 << (i %
498 arm_unmap_nocache(void *addr, vm_size_t size)
500 vm_offset_t raddr = (vm_offset_t)addr;
503 size = round_page(size);
504 i = (raddr - arm_nocache_startaddr) / (PAGE_SIZE);
505 for (; size > 0; size -= PAGE_SIZE, i++)
506 arm_nocache_allocated[i / BITS_PER_INT] &= ~(1 << (i %
510 #ifdef ARM_USE_SMALL_ALLOC
512 static TAILQ_HEAD(,arm_small_page) pages_normal =
513 TAILQ_HEAD_INITIALIZER(pages_normal);
514 static TAILQ_HEAD(,arm_small_page) pages_wt =
515 TAILQ_HEAD_INITIALIZER(pages_wt);
516 static TAILQ_HEAD(,arm_small_page) free_pgdesc =
517 TAILQ_HEAD_INITIALIZER(free_pgdesc);
519 extern uma_zone_t l2zone;
521 struct mtx smallalloc_mtx;
523 MALLOC_DEFINE(M_VMSMALLALLOC, "vm_small_alloc", "VM Small alloc data");
525 vm_offset_t alloc_firstaddr;
527 #ifdef ARM_HAVE_SUPERSECTIONS
528 #define S_FRAME L1_SUP_FRAME
529 #define S_SIZE L1_SUP_SIZE
531 #define S_FRAME L1_S_FRAME
532 #define S_SIZE L1_S_SIZE
536 arm_ptovirt(vm_paddr_t pa)
539 vm_offset_t addr = alloc_firstaddr;
541 KASSERT(alloc_firstaddr != 0, ("arm_ptovirt called too early ?"));
542 for (i = 0; dump_avail[i + 1]; i += 2) {
543 if (pa >= dump_avail[i] && pa < dump_avail[i + 1])
545 addr += (dump_avail[i + 1] & S_FRAME) + S_SIZE -
546 (dump_avail[i] & S_FRAME);
548 KASSERT(dump_avail[i + 1] != 0, ("Trying to access invalid physical address"));
549 return (addr + (pa - (dump_avail[i] & S_FRAME)));
553 arm_init_smallalloc(void)
555 vm_offset_t to_map = 0, mapaddr;
559 * We need to use dump_avail and not phys_avail, since we want to
560 * map the whole memory and not just the memory available to the VM
561 * to be able to do a pa => va association for any address.
564 for (i = 0; dump_avail[i + 1]; i+= 2) {
565 to_map += (dump_avail[i + 1] & S_FRAME) + S_SIZE -
566 (dump_avail[i] & S_FRAME);
568 alloc_firstaddr = mapaddr = KERNBASE - to_map;
569 for (i = 0; dump_avail[i + 1]; i+= 2) {
570 vm_offset_t size = (dump_avail[i + 1] & S_FRAME) +
571 S_SIZE - (dump_avail[i] & S_FRAME);
574 #ifdef ARM_HAVE_SUPERSECTIONS
575 pmap_kenter_supersection(mapaddr,
576 (dump_avail[i] & L1_SUP_FRAME) + did,
579 pmap_kenter_section(mapaddr,
580 (dump_avail[i] & L1_S_FRAME) + did, SECTION_CACHE);
590 arm_add_smallalloc_pages(void *list, void *mem, int bytes, int pagetable)
592 struct arm_small_page *pg;
596 pg = (struct arm_small_page *)list;
599 TAILQ_INSERT_HEAD(&pages_wt, pg, pg_list);
601 TAILQ_INSERT_HEAD(&pages_normal, pg, pg_list);
602 list = (char *)list + sizeof(*pg);
603 mem = (char *)mem + PAGE_SIZE;
609 uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
612 struct arm_small_page *sp;
613 TAILQ_HEAD(,arm_small_page) *head;
614 static vm_pindex_t color;
617 *flags = UMA_SLAB_PRIV;
619 * For CPUs where we setup page tables as write back, there's no
620 * need to maintain two separate pools.
622 if (zone == l2zone && pte_l1_s_cache_mode != pte_l1_s_cache_mode_pt)
623 head = (void *)&pages_wt;
625 head = (void *)&pages_normal;
627 mtx_lock(&smallalloc_mtx);
628 sp = TAILQ_FIRST(head);
633 mtx_unlock(&smallalloc_mtx);
634 if (zone == l2zone &&
635 pte_l1_s_cache_mode != pte_l1_s_cache_mode_pt) {
636 *flags = UMA_SLAB_KMEM;
637 ret = ((void *)kmem_malloc(kmem_map, bytes, M_NOWAIT));
640 if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT)
641 pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED;
643 pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED;
645 pflags |= VM_ALLOC_ZERO;
647 m = vm_page_alloc(NULL, color++,
648 pflags | VM_ALLOC_NOOBJ);
656 ret = (void *)arm_ptovirt(VM_PAGE_TO_PHYS(m));
657 if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
658 bzero(ret, PAGE_SIZE);
661 TAILQ_REMOVE(head, sp, pg_list);
662 TAILQ_INSERT_HEAD(&free_pgdesc, sp, pg_list);
664 mtx_unlock(&smallalloc_mtx);
671 uma_small_free(void *mem, int size, u_int8_t flags)
676 if (flags & UMA_SLAB_KMEM)
677 kmem_free(kmem_map, (vm_offset_t)mem, size);
679 struct arm_small_page *sp;
681 if ((vm_offset_t)mem >= KERNBASE) {
682 mtx_lock(&smallalloc_mtx);
683 sp = TAILQ_FIRST(&free_pgdesc);
684 KASSERT(sp != NULL, ("No more free page descriptor ?"));
685 TAILQ_REMOVE(&free_pgdesc, sp, pg_list);
687 pmap_get_pde_pte(kernel_pmap, (vm_offset_t)mem, &pd,
689 if ((*pd & pte_l1_s_cache_mask) ==
690 pte_l1_s_cache_mode_pt &&
691 pte_l1_s_cache_mode_pt != pte_l1_s_cache_mode)
692 TAILQ_INSERT_HEAD(&pages_wt, sp, pg_list);
694 TAILQ_INSERT_HEAD(&pages_normal, sp, pg_list);
695 mtx_unlock(&smallalloc_mtx);
698 vm_paddr_t pa = vtophys((vm_offset_t)mem);
700 m = PHYS_TO_VM_PAGE(pa);
703 atomic_subtract_int(&cnt.v_wire_count, 1);