2 * Copyright (c) 2001 Jake Burkholder.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #ifndef _MACHINE_SMP_H_
30 #define _MACHINE_SMP_H_
34 #define CPU_BOOTSTRAP 3
38 #include <machine/intr_machdep.h>
39 #include <machine/tte.h>
41 #define IDR_BUSY (1<<0)
42 #define IDR_NACK (1<<1)
44 #define IPI_AST PIL_AST
45 #define IPI_RENDEZVOUS PIL_RENDEZVOUS
46 #define IPI_STOP PIL_STOP
47 #define IPI_PREEMPT PIL_PREEMPT
50 #define IPI_RETRIES 5000
52 struct cpu_start_args {
59 struct ipi_cache_args {
66 struct pmap *ita_pmap;
70 #define ita_va ita_start
74 void cpu_mp_bootstrap(struct pcpu *pc);
75 void cpu_mp_shutdown(void);
77 void cpu_ipi_selected(int cpus, uint16_t *cpulist, u_long d0, u_long d1, u_long d2, uint64_t *ackmask);
78 void cpu_ipi_send(u_int mid, u_long d0, u_long d1, u_long d2);
80 void cpu_ipi_ast(struct trapframe *tf);
81 void cpu_ipi_stop(struct trapframe *tf);
82 void cpu_ipi_preempt(struct trapframe *tf);
84 void ipi_selected(u_int cpus, u_int ipi);
85 void ipi_all_but_self(u_int ipi);
87 vm_offset_t mp_tramp_alloc(void);
88 void mp_set_tsb_desc_ra(vm_paddr_t);
89 void mp_add_nucleus_mapping(vm_offset_t, uint64_t);
90 extern struct mtx ipi_mtx;
91 extern struct ipi_cache_args ipi_cache_args;
92 extern struct ipi_tlb_args ipi_tlb_args;
94 extern vm_offset_t mp_tramp;
95 extern char *mp_tramp_code;
96 extern u_long mp_tramp_code_len;
97 extern u_long mp_tramp_tte_slots;
98 extern u_long mp_tramp_tsb_desc_ra;
99 extern u_long mp_tramp_func;
101 extern void mp_startup(void);
103 extern char tl_ipi_level[];
104 extern char tl_invltlb[];
105 extern char tl_invlctx[];
106 extern char tl_invlpg[];
107 extern char tl_invlrng[];
108 extern char tl_tsbupdate[];
109 extern char tl_ttehashupdate[];
113 #if defined(_MACHINE_PMAP_H_) && defined(_SYS_MUTEX_H_)
115 static __inline void *
116 ipi_dcache_page_inval(void *func, vm_paddr_t pa)
118 struct ipi_cache_args *ica;
122 ica = &ipi_cache_args;
123 mtx_lock_spin(&ipi_mtx);
124 ica->ica_mask = all_cpus;
126 cpu_ipi_selected(PCPU_GET(other_cpus), 0, (u_long)func, (u_long)ica);
127 return (&ica->ica_mask);
130 static __inline void *
131 ipi_icache_page_inval(void *func, vm_paddr_t pa)
133 struct ipi_cache_args *ica;
137 ica = &ipi_cache_args;
138 mtx_lock_spin(&ipi_mtx);
139 ica->ica_mask = all_cpus;
141 cpu_ipi_selected(PCPU_GET(other_cpus), 0, (u_long)func, (u_long)ica);
142 return (&ica->ica_mask);
145 static __inline void *
146 ipi_tlb_context_demap(struct pmap *pm)
148 struct ipi_tlb_args *ita;
153 if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0)
156 mtx_lock_spin(&ipi_mtx);
157 ita->ita_mask = cpus | PCPU_GET(cpumask);
159 cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_context_demap,
161 return (&ita->ita_mask);
164 static __inline void *
165 ipi_tlb_page_demap(struct pmap *pm, vm_offset_t va)
167 struct ipi_tlb_args *ita;
172 if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0)
175 mtx_lock_spin(&ipi_mtx);
176 ita->ita_mask = cpus | PCPU_GET(cpumask);
179 cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_page_demap, (u_long)ita);
180 return (&ita->ita_mask);
183 static __inline void *
184 ipi_tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
186 struct ipi_tlb_args *ita;
191 if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0)
194 mtx_lock_spin(&ipi_mtx);
195 ita->ita_mask = cpus | PCPU_GET(cpumask);
197 ita->ita_start = start;
199 cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_range_demap, (u_long)ita);
200 return (&ita->ita_mask);
204 ipi_wait(void *cookie)
206 volatile u_int *mask;
208 if ((mask = cookie) != NULL) {
209 atomic_clear_int(mask, PCPU_GET(cpumask));
212 mtx_unlock_spin(&ipi_mtx);
216 #endif /* _MACHINE_PMAP_H_ && _SYS_MUTEX_H_ */
220 static __inline void *
221 ipi_dcache_page_inval(void *func, vm_paddr_t pa)
226 static __inline void *
227 ipi_icache_page_inval(void *func, vm_paddr_t pa)
232 static __inline void *
233 ipi_tlb_context_demap(struct pmap *pm)
238 static __inline void *
239 ipi_tlb_page_demap(struct pmap *pm, vm_offset_t va)
244 static __inline void *
245 ipi_tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
251 ipi_wait(void *cookie)
259 #endif /* !_MACHINE_SMP_H_ */