2 * Copyright (c) 1982, 1986 The Regents of the University of California.
3 * Copyright (c) 1989, 1990 William Jolitz
4 * Copyright (c) 1994 John Dyson
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department, and William Jolitz.
11 * Redistribution and use in source and binary :forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
40 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/malloc.h>
52 #include <sys/socketvar.h>
53 #include <sys/sf_buf.h>
54 #include <sys/syscall.h>
55 #include <sys/sysctl.h>
56 #include <sys/sysent.h>
57 #include <sys/unistd.h>
58 #include <machine/cpu.h>
59 #include <machine/frame.h>
60 #include <machine/pcb.h>
61 #include <machine/sysarch.h>
63 #include <sys/mutex.h>
67 #include <vm/vm_extern.h>
68 #include <vm/vm_kern.h>
69 #include <vm/vm_page.h>
70 #include <vm/vm_map.h>
71 #include <vm/vm_param.h>
72 #include <vm/vm_pageout.h>
74 #include <vm/uma_int.h>
76 #include <machine/md_var.h>
79 * struct switchframe and trapframe must both be a multiple of 8
80 * for correct stack alignment.
82 CTASSERT(sizeof(struct switchframe) == 24);
83 CTASSERT(sizeof(struct trapframe) == 80);
85 #ifndef ARM_USE_SMALL_ALLOC
88 #define NSFBUFS (512 + maxusers * 16)
92 static int nsfbufspeak;
93 static int nsfbufsused;
95 SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufs, CTLFLAG_RDTUN, &nsfbufs, 0,
96 "Maximum number of sendfile(2) sf_bufs available");
97 SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufspeak, CTLFLAG_RD, &nsfbufspeak, 0,
98 "Number of sendfile(2) sf_bufs at peak usage");
99 SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufsused, CTLFLAG_RD, &nsfbufsused, 0,
100 "Number of sendfile(2) sf_bufs in use");
102 static void sf_buf_init(void *arg);
103 SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL);
105 LIST_HEAD(sf_head, sf_buf);
108 * A hash table of active sendfile(2) buffers
110 static struct sf_head *sf_buf_active;
111 static u_long sf_buf_hashmask;
113 #define SF_BUF_HASH(m) (((m) - vm_page_array) & sf_buf_hashmask)
115 static TAILQ_HEAD(, sf_buf) sf_buf_freelist;
116 static u_int sf_buf_alloc_want;
119 * A lock used to synchronize access to the hash table and free list
121 static struct mtx sf_buf_lock;
122 #endif /* !ARM_USE_SMALL_ALLOC */
125 * Finish a fork operation, with process p2 nearly set up.
126 * Copy and update the pcb, set up the stack so that the child
127 * ready to run and return to user mode.
130 cpu_fork(register struct thread *td1, register struct proc *p2,
131 struct thread *td2, int flags)
134 struct trapframe *tf;
135 struct switchframe *sf;
138 if ((flags & RFPROC) == 0)
140 pcb2 = (struct pcb *)(td2->td_kstack + td2->td_kstack_pages * PAGE_SIZE) - 1;
142 #ifndef CPU_XSCALE_CORE3
143 pmap_use_minicache(td2->td_kstack, td2->td_kstack_pages * PAGE_SIZE);
147 bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
149 bcopy(&td1->td_proc->p_md, mdp2, sizeof(*mdp2));
150 pcb2->un_32.pcb32_und_sp = td2->td_kstack + USPACE_UNDEF_STACK_TOP;
151 pcb2->un_32.pcb32_sp = td2->td_kstack +
152 USPACE_SVC_STACK_TOP - sizeof(*pcb2);
154 td2->td_frame = tf = (struct trapframe *)STACKALIGN(
155 pcb2->un_32.pcb32_sp - sizeof(struct trapframe));
156 *tf = *td1->td_frame;
157 sf = (struct switchframe *)tf - 1;
158 sf->sf_r4 = (u_int)fork_return;
159 sf->sf_r5 = (u_int)td2;
160 sf->sf_pc = (u_int)fork_trampoline;
161 tf->tf_spsr &= ~PSR_C_bit;
164 pcb2->un_32.pcb32_sp = (u_int)sf;
165 KASSERT((pcb2->un_32.pcb32_sp & 7) == 0,
166 ("cpu_fork: Incorrect stack alignment"));
168 /* Setup to release spin count in fork_exit(). */
169 td2->td_md.md_spinlock_count = 1;
170 td2->td_md.md_saved_cspr = 0;
171 #ifdef ARM_TP_ADDRESS
172 td2->td_md.md_tp = *(register_t *)ARM_TP_ADDRESS;
174 td2->td_md.md_tp = (register_t) get_tls();
179 cpu_thread_swapin(struct thread *td)
184 cpu_thread_swapout(struct thread *td)
189 * Detatch mapped page and release resources back to the system.
192 sf_buf_free(struct sf_buf *sf)
194 #ifndef ARM_USE_SMALL_ALLOC
195 mtx_lock(&sf_buf_lock);
197 if (sf->ref_count == 0) {
198 TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry);
200 pmap_kremove(sf->kva);
202 LIST_REMOVE(sf, list_entry);
203 if (sf_buf_alloc_want > 0)
204 wakeup(&sf_buf_freelist);
206 mtx_unlock(&sf_buf_lock);
210 #ifndef ARM_USE_SMALL_ALLOC
212 * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
215 sf_buf_init(void *arg)
217 struct sf_buf *sf_bufs;
222 TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs);
224 sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
225 TAILQ_INIT(&sf_buf_freelist);
226 sf_base = kva_alloc(nsfbufs * PAGE_SIZE);
227 sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
229 for (i = 0; i < nsfbufs; i++) {
230 sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
231 TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry);
233 sf_buf_alloc_want = 0;
234 mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF);
239 * Get an sf_buf from the freelist. Will block if none are available.
242 sf_buf_alloc(struct vm_page *m, int flags)
244 #ifdef ARM_USE_SMALL_ALLOC
245 return ((struct sf_buf *)m);
247 struct sf_head *hash_list;
251 hash_list = &sf_buf_active[SF_BUF_HASH(m)];
252 mtx_lock(&sf_buf_lock);
253 LIST_FOREACH(sf, hash_list, list_entry) {
256 if (sf->ref_count == 1) {
257 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
259 nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
264 while ((sf = TAILQ_FIRST(&sf_buf_freelist)) == NULL) {
265 if (flags & SFB_NOWAIT)
268 SFSTAT_INC(sf_allocwait);
269 error = msleep(&sf_buf_freelist, &sf_buf_lock,
270 (flags & SFB_CATCH) ? PCATCH | PVM : PVM, "sfbufa", 0);
275 * If we got a signal, don't risk going back to sleep.
280 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
282 LIST_REMOVE(sf, list_entry);
283 LIST_INSERT_HEAD(hash_list, sf, list_entry);
287 nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
288 pmap_kenter(sf->kva, VM_PAGE_TO_PHYS(sf->m));
290 mtx_unlock(&sf_buf_lock);
296 cpu_set_syscall_retval(struct thread *td, int error)
298 struct trapframe *frame;
304 frame = td->td_frame;
308 insn = *(u_int32_t *)(frame->tf_pc - INSN_SIZE);
309 if ((insn & 0x000fffff) == SYS___syscall) {
310 register_t *ap = &frame->tf_r0;
311 register_t code = ap[_QUAD_LOWWORD];
312 if (td->td_proc->p_sysent->sv_mask)
313 code &= td->td_proc->p_sysent->sv_mask;
314 fixup = (code != SYS_freebsd6_lseek && code != SYS_lseek)
323 frame->tf_r1 = td->td_retval[0];
325 frame->tf_r0 = td->td_retval[0];
326 frame->tf_r1 = td->td_retval[1];
328 frame->tf_spsr &= ~PSR_C_bit; /* carry bit */
332 * Reconstruct the pc to point at the swi.
334 frame->tf_pc -= INSN_SIZE;
340 frame->tf_r0 = error;
341 frame->tf_spsr |= PSR_C_bit; /* carry bit */
347 * Initialize machine state (pcb and trap frame) for a new thread about to
348 * upcall. Put enough state in the new thread's PCB to get it to go back
349 * userret(), where we can intercept it again to set the return (upcall)
350 * Address and stack, along with those from upcals that are from other sources
351 * such as those generated in thread_userret() itself.
354 cpu_set_upcall(struct thread *td, struct thread *td0)
356 struct trapframe *tf;
357 struct switchframe *sf;
359 bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe));
360 bcopy(td0->td_pcb, td->td_pcb, sizeof(struct pcb));
362 sf = (struct switchframe *)tf - 1;
363 sf->sf_r4 = (u_int)fork_return;
364 sf->sf_r5 = (u_int)td;
365 sf->sf_pc = (u_int)fork_trampoline;
366 tf->tf_spsr &= ~PSR_C_bit;
368 td->td_pcb->un_32.pcb32_sp = (u_int)sf;
369 td->td_pcb->un_32.pcb32_und_sp = td->td_kstack + USPACE_UNDEF_STACK_TOP;
370 KASSERT((td->td_pcb->un_32.pcb32_sp & 7) == 0,
371 ("cpu_set_upcall: Incorrect stack alignment"));
373 /* Setup to release spin count in fork_exit(). */
374 td->td_md.md_spinlock_count = 1;
375 td->td_md.md_saved_cspr = 0;
379 * Set that machine state for performing an upcall that has to
380 * be done in thread_userret() so that those upcalls generated
381 * in thread_userret() itself can be done as well.
384 cpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg,
387 struct trapframe *tf = td->td_frame;
389 tf->tf_usr_sp = STACKALIGN((int)stack->ss_sp + stack->ss_size
390 - sizeof(struct trapframe));
391 tf->tf_pc = (int)entry;
392 tf->tf_r0 = (int)arg;
393 tf->tf_spsr = PSR_USR32_MODE;
397 cpu_set_user_tls(struct thread *td, void *tls_base)
400 td->td_md.md_tp = (register_t)tls_base;
401 if (td == curthread) {
403 #ifdef ARM_TP_ADDRESS
404 *(register_t *)ARM_TP_ADDRESS = (register_t)tls_base;
406 set_tls((void *)tls_base);
414 cpu_thread_exit(struct thread *td)
419 cpu_thread_alloc(struct thread *td)
421 td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_pages *
424 * Ensure td_frame is aligned to an 8 byte boundary as it will be
425 * placed into the stack pointer which must be 8 byte aligned in
428 td->td_frame = (struct trapframe *)STACKALIGN((u_int)td->td_kstack +
429 USPACE_SVC_STACK_TOP - sizeof(struct pcb) -
430 sizeof(struct trapframe));
432 #ifndef CPU_XSCALE_CORE3
433 pmap_use_minicache(td->td_kstack, td->td_kstack_pages * PAGE_SIZE);
439 cpu_thread_free(struct thread *td)
444 cpu_thread_clean(struct thread *td)
449 * Intercept the return address from a freshly forked process that has NOT
450 * been scheduled yet.
452 * This is needed to make kernel threads stay in kernel mode.
455 cpu_set_fork_handler(struct thread *td, void (*func)(void *), void *arg)
457 struct switchframe *sf;
458 struct trapframe *tf;
461 sf = (struct switchframe *)tf - 1;
462 sf->sf_r4 = (u_int)func;
463 sf->sf_r5 = (u_int)arg;
464 td->td_pcb->un_32.pcb32_sp = (u_int)sf;
465 KASSERT((td->td_pcb->un_32.pcb32_sp & 7) == 0,
466 ("cpu_set_fork_handler: Incorrect stack alignment"));
470 * Software interrupt handler for queued VM system processing.
476 if (busdma_swi_pending)
481 cpu_exit(struct thread *td)
485 #ifdef ARM_USE_SMALL_ALLOC
487 static TAILQ_HEAD(,arm_small_page) pages_normal =
488 TAILQ_HEAD_INITIALIZER(pages_normal);
489 static TAILQ_HEAD(,arm_small_page) pages_wt =
490 TAILQ_HEAD_INITIALIZER(pages_wt);
491 static TAILQ_HEAD(,arm_small_page) free_pgdesc =
492 TAILQ_HEAD_INITIALIZER(free_pgdesc);
494 extern uma_zone_t l2zone;
496 struct mtx smallalloc_mtx;
498 vm_offset_t alloc_firstaddr;
500 #ifdef ARM_HAVE_SUPERSECTIONS
501 #define S_FRAME L1_SUP_FRAME
502 #define S_SIZE L1_SUP_SIZE
504 #define S_FRAME L1_S_FRAME
505 #define S_SIZE L1_S_SIZE
509 arm_ptovirt(vm_paddr_t pa)
512 vm_offset_t addr = alloc_firstaddr;
514 KASSERT(alloc_firstaddr != 0, ("arm_ptovirt called too early ?"));
515 for (i = 0; dump_avail[i + 1]; i += 2) {
516 if (pa >= dump_avail[i] && pa < dump_avail[i + 1])
518 addr += (dump_avail[i + 1] & S_FRAME) + S_SIZE -
519 (dump_avail[i] & S_FRAME);
521 KASSERT(dump_avail[i + 1] != 0, ("Trying to access invalid physical address"));
522 return (addr + (pa - (dump_avail[i] & S_FRAME)));
526 arm_init_smallalloc(void)
528 vm_offset_t to_map = 0, mapaddr;
532 * We need to use dump_avail and not phys_avail, since we want to
533 * map the whole memory and not just the memory available to the VM
534 * to be able to do a pa => va association for any address.
537 for (i = 0; dump_avail[i + 1]; i+= 2) {
538 to_map += (dump_avail[i + 1] & S_FRAME) + S_SIZE -
539 (dump_avail[i] & S_FRAME);
541 alloc_firstaddr = mapaddr = KERNBASE - to_map;
542 for (i = 0; dump_avail[i + 1]; i+= 2) {
543 vm_offset_t size = (dump_avail[i + 1] & S_FRAME) +
544 S_SIZE - (dump_avail[i] & S_FRAME);
547 #ifdef ARM_HAVE_SUPERSECTIONS
548 pmap_kenter_supersection(mapaddr,
549 (dump_avail[i] & L1_SUP_FRAME) + did,
552 pmap_kenter_section(mapaddr,
553 (dump_avail[i] & L1_S_FRAME) + did, SECTION_CACHE);
563 arm_add_smallalloc_pages(void *list, void *mem, int bytes, int pagetable)
565 struct arm_small_page *pg;
569 pg = (struct arm_small_page *)list;
572 TAILQ_INSERT_HEAD(&pages_wt, pg, pg_list);
574 TAILQ_INSERT_HEAD(&pages_normal, pg, pg_list);
575 list = (char *)list + sizeof(*pg);
576 mem = (char *)mem + PAGE_SIZE;
582 uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
585 struct arm_small_page *sp;
586 TAILQ_HEAD(,arm_small_page) *head;
589 *flags = UMA_SLAB_PRIV;
591 * For CPUs where we setup page tables as write back, there's no
592 * need to maintain two separate pools.
594 if (zone == l2zone && pte_l1_s_cache_mode != pte_l1_s_cache_mode_pt)
595 head = (void *)&pages_wt;
597 head = (void *)&pages_normal;
599 mtx_lock(&smallalloc_mtx);
600 sp = TAILQ_FIRST(head);
605 mtx_unlock(&smallalloc_mtx);
606 if (zone == l2zone &&
607 pte_l1_s_cache_mode != pte_l1_s_cache_mode_pt) {
608 *flags = UMA_SLAB_KMEM;
609 ret = ((void *)kmem_malloc(kmem_arena, bytes,
613 pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED;
615 m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ);
623 ret = (void *)arm_ptovirt(VM_PAGE_TO_PHYS(m));
624 if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
625 bzero(ret, PAGE_SIZE);
628 TAILQ_REMOVE(head, sp, pg_list);
629 TAILQ_INSERT_HEAD(&free_pgdesc, sp, pg_list);
631 mtx_unlock(&smallalloc_mtx);
638 uma_small_free(void *mem, int size, u_int8_t flags)
643 if (flags & UMA_SLAB_KMEM)
644 kmem_free(kmem_arena, (vm_offset_t)mem, size);
646 struct arm_small_page *sp;
648 if ((vm_offset_t)mem >= KERNBASE) {
649 mtx_lock(&smallalloc_mtx);
650 sp = TAILQ_FIRST(&free_pgdesc);
651 KASSERT(sp != NULL, ("No more free page descriptor ?"));
652 TAILQ_REMOVE(&free_pgdesc, sp, pg_list);
654 pmap_get_pde_pte(kernel_pmap, (vm_offset_t)mem, &pd,
656 if ((*pd & pte_l1_s_cache_mask) ==
657 pte_l1_s_cache_mode_pt &&
658 pte_l1_s_cache_mode_pt != pte_l1_s_cache_mode)
659 TAILQ_INSERT_HEAD(&pages_wt, sp, pg_list);
661 TAILQ_INSERT_HEAD(&pages_normal, sp, pg_list);
662 mtx_unlock(&smallalloc_mtx);
665 vm_paddr_t pa = vtophys((vm_offset_t)mem);
667 m = PHYS_TO_VM_PAGE(pa);
670 atomic_subtract_int(&cnt.v_wire_count, 1);