2 * Copyright (c) 2001 Jake Burkholder.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #ifndef _MACHINE_SMP_H_
30 #define _MACHINE_SMP_H_
34 #define CPU_TICKSYNC 1
35 #define CPU_STICKSYNC 2
37 #define CPU_BOOTSTRAP 4
42 #include <sys/sched.h>
44 #include <machine/intr_machdep.h>
45 #include <machine/pcb.h>
46 #include <machine/tte.h>
48 #define IDR_BUSY 0x0000000000000001ULL
49 #define IDR_NACK 0x0000000000000002ULL
50 #define IDR_CHEETAH_ALL_BUSY 0x5555555555555555ULL
51 #define IDR_CHEETAH_ALL_NACK (~IDR_CHEETAH_ALL_BUSY)
52 #define IDR_CHEETAH_MAX_BN_PAIRS 32
53 #define IDR_JALAPENO_MAX_BN_PAIRS 4
55 #define IDC_ITID_SHIFT 14
56 #define IDC_BN_SHIFT 24
58 #define IPI_AST PIL_AST
59 #define IPI_RENDEZVOUS PIL_RENDEZVOUS
60 #define IPI_PREEMPT PIL_PREEMPT
61 #define IPI_STOP PIL_STOP
62 #define IPI_STOP_HARD PIL_STOP
64 #define IPI_RETRIES 5000
66 struct cpu_start_args {
74 struct tte csa_ttes[PCPU_PAGES];
77 struct ipi_cache_args {
84 struct pmap *ita_pmap;
88 #define ita_va ita_start
92 extern struct pcb stoppcbs[];
94 void cpu_mp_bootstrap(struct pcpu *pc);
95 void cpu_mp_shutdown(void);
97 typedef void cpu_ipi_selected_t(u_int, u_long, u_long, u_long);
98 extern cpu_ipi_selected_t *cpu_ipi_selected;
100 void mp_init(u_int cpu_impl);
102 extern struct mtx ipi_mtx;
103 extern struct ipi_cache_args ipi_cache_args;
104 extern struct ipi_tlb_args ipi_tlb_args;
106 extern char *mp_tramp_code;
107 extern u_long mp_tramp_code_len;
108 extern u_long mp_tramp_tlb_slots;
109 extern u_long mp_tramp_func;
111 extern void mp_startup(void);
113 extern char tl_ipi_cheetah_dcache_page_inval[];
114 extern char tl_ipi_spitfire_dcache_page_inval[];
115 extern char tl_ipi_spitfire_icache_page_inval[];
117 extern char tl_ipi_level[];
118 extern char tl_ipi_tlb_context_demap[];
119 extern char tl_ipi_tlb_page_demap[];
120 extern char tl_ipi_tlb_range_demap[];
123 ipi_all_but_self(u_int ipi)
126 cpu_ipi_selected(PCPU_GET(other_cpus), 0, (u_long)tl_ipi_level, ipi);
130 ipi_selected(u_int cpus, u_int ipi)
133 cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_level, ipi);
136 #if defined(_MACHINE_PMAP_H_) && defined(_SYS_MUTEX_H_)
138 static __inline void *
139 ipi_dcache_page_inval(void *func, vm_paddr_t pa)
141 struct ipi_cache_args *ica;
146 ica = &ipi_cache_args;
147 mtx_lock_spin(&ipi_mtx);
148 ica->ica_mask = all_cpus;
150 cpu_ipi_selected(PCPU_GET(other_cpus), 0, (u_long)func, (u_long)ica);
151 return (&ica->ica_mask);
154 static __inline void *
155 ipi_icache_page_inval(void *func, vm_paddr_t pa)
157 struct ipi_cache_args *ica;
162 ica = &ipi_cache_args;
163 mtx_lock_spin(&ipi_mtx);
164 ica->ica_mask = all_cpus;
166 cpu_ipi_selected(PCPU_GET(other_cpus), 0, (u_long)func, (u_long)ica);
167 return (&ica->ica_mask);
170 static __inline void *
171 ipi_tlb_context_demap(struct pmap *pm)
173 struct ipi_tlb_args *ita;
179 if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0) {
184 mtx_lock_spin(&ipi_mtx);
185 ita->ita_mask = cpus | PCPU_GET(cpumask);
187 cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_context_demap,
189 return (&ita->ita_mask);
192 static __inline void *
193 ipi_tlb_page_demap(struct pmap *pm, vm_offset_t va)
195 struct ipi_tlb_args *ita;
201 if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0) {
206 mtx_lock_spin(&ipi_mtx);
207 ita->ita_mask = cpus | PCPU_GET(cpumask);
210 cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_page_demap, (u_long)ita);
211 return (&ita->ita_mask);
214 static __inline void *
215 ipi_tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
217 struct ipi_tlb_args *ita;
223 if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0) {
228 mtx_lock_spin(&ipi_mtx);
229 ita->ita_mask = cpus | PCPU_GET(cpumask);
231 ita->ita_start = start;
233 cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_range_demap, (u_long)ita);
234 return (&ita->ita_mask);
238 ipi_wait(void *cookie)
240 volatile u_int *mask;
242 if ((mask = cookie) != NULL) {
243 atomic_clear_int(mask, PCPU_GET(cpumask));
246 mtx_unlock_spin(&ipi_mtx);
251 #endif /* _MACHINE_PMAP_H_ && _SYS_MUTEX_H_ */
259 static __inline void *
260 ipi_dcache_page_inval(void *func __unused, vm_paddr_t pa __unused)
266 static __inline void *
267 ipi_icache_page_inval(void *func __unused, vm_paddr_t pa __unused)
273 static __inline void *
274 ipi_tlb_context_demap(struct pmap *pm __unused)
280 static __inline void *
281 ipi_tlb_page_demap(struct pmap *pm __unused, vm_offset_t va __unused)
287 static __inline void *
288 ipi_tlb_range_demap(struct pmap *pm __unused, vm_offset_t start __unused,
289 __unused vm_offset_t end)
296 ipi_wait(void *cookie)
302 tl_ipi_cheetah_dcache_page_inval(void)
308 tl_ipi_spitfire_dcache_page_inval(void)
314 tl_ipi_spitfire_icache_page_inval(void)
323 #endif /* !_MACHINE_SMP_H_ */