2 * Copyright (c) 2001 Jake Burkholder.
3 * Copyright (c) 2007 - 2011 Marius Strobl <marius@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #ifndef _MACHINE_SMP_H_
31 #define _MACHINE_SMP_H_
35 #define CPU_TICKSYNC 1
36 #define CPU_STICKSYNC 2
38 #define CPU_BOOTSTRAP 4
42 #include <sys/param.h>
43 #include <sys/cpuset.h>
45 #include <sys/mutex.h>
47 #include <sys/sched.h>
50 #include <machine/atomic.h>
51 #include <machine/intr_machdep.h>
52 #include <machine/tte.h>
54 #define IDR_BUSY 0x0000000000000001ULL
55 #define IDR_NACK 0x0000000000000002ULL
56 #define IDR_CHEETAH_ALL_BUSY 0x5555555555555555ULL
57 #define IDR_CHEETAH_ALL_NACK (~IDR_CHEETAH_ALL_BUSY)
58 #define IDR_CHEETAH_MAX_BN_PAIRS 32
59 #define IDR_JALAPENO_MAX_BN_PAIRS 4
61 #define IDC_ITID_SHIFT 14
62 #define IDC_BN_SHIFT 24
64 #define IPI_AST PIL_AST
65 #define IPI_RENDEZVOUS PIL_RENDEZVOUS
66 #define IPI_PREEMPT PIL_PREEMPT
67 #define IPI_HARDCLOCK PIL_HARDCLOCK
68 #define IPI_STOP PIL_STOP
69 #define IPI_STOP_HARD PIL_STOP
71 #define IPI_RETRIES 5000
73 struct cpu_start_args {
81 struct tte csa_ttes[PCPU_PAGES];
84 struct ipi_cache_args {
96 struct pmap *ita_pmap;
100 #define ita_va ita_start
105 extern struct pcb stoppcbs[];
107 void cpu_mp_bootstrap(struct pcpu *pc);
108 void cpu_mp_shutdown(void);
110 typedef void cpu_ipi_selected_t(cpuset_t, u_long, u_long, u_long);
111 extern cpu_ipi_selected_t *cpu_ipi_selected;
112 typedef void cpu_ipi_single_t(u_int, u_long, u_long, u_long);
113 extern cpu_ipi_single_t *cpu_ipi_single;
117 extern struct mtx ipi_mtx;
118 extern struct ipi_cache_args ipi_cache_args;
119 extern struct ipi_rd_args ipi_rd_args;
120 extern struct ipi_tlb_args ipi_tlb_args;
122 extern char *mp_tramp_code;
123 extern u_long mp_tramp_code_len;
124 extern u_long mp_tramp_tlb_slots;
125 extern u_long mp_tramp_func;
127 extern void mp_startup(void);
129 extern char tl_ipi_cheetah_dcache_page_inval[];
130 extern char tl_ipi_spitfire_dcache_page_inval[];
131 extern char tl_ipi_spitfire_icache_page_inval[];
133 extern char tl_ipi_level[];
135 extern char tl_ipi_stick_rd[];
136 extern char tl_ipi_tick_rd[];
138 extern char tl_ipi_tlb_context_demap[];
139 extern char tl_ipi_tlb_page_demap[];
140 extern char tl_ipi_tlb_range_demap[];
143 ipi_all_but_self(u_int ipi)
147 if (__predict_false(atomic_load_acq_int(&smp_started) == 0))
151 CPU_CLR(PCPU_GET(cpuid), &cpus);
152 mtx_lock_spin(&ipi_mtx);
153 cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_level, ipi);
154 mtx_unlock_spin(&ipi_mtx);
159 ipi_selected(cpuset_t cpus, u_int ipi)
162 if (__predict_false(atomic_load_acq_int(&smp_started) == 0 ||
165 mtx_lock_spin(&ipi_mtx);
166 cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_level, ipi);
167 mtx_unlock_spin(&ipi_mtx);
171 ipi_cpu(int cpu, u_int ipi)
174 if (__predict_false(atomic_load_acq_int(&smp_started) == 0))
176 mtx_lock_spin(&ipi_mtx);
177 cpu_ipi_single(cpu, 0, (u_long)tl_ipi_level, ipi);
178 mtx_unlock_spin(&ipi_mtx);
181 #if defined(_MACHINE_PMAP_H_) && defined(_SYS_MUTEX_H_)
183 static __inline void *
184 ipi_dcache_page_inval(void *func, vm_paddr_t pa)
186 struct ipi_cache_args *ica;
188 if (__predict_false(atomic_load_acq_int(&smp_started) == 0))
191 ica = &ipi_cache_args;
192 mtx_lock_spin(&ipi_mtx);
193 ica->ica_mask = all_cpus;
194 CPU_CLR(PCPU_GET(cpuid), &ica->ica_mask);
196 cpu_ipi_selected(ica->ica_mask, 0, (u_long)func, (u_long)ica);
197 return (&ica->ica_mask);
200 static __inline void *
201 ipi_icache_page_inval(void *func, vm_paddr_t pa)
203 struct ipi_cache_args *ica;
205 if (__predict_false(atomic_load_acq_int(&smp_started) == 0))
208 ica = &ipi_cache_args;
209 mtx_lock_spin(&ipi_mtx);
210 ica->ica_mask = all_cpus;
211 CPU_CLR(PCPU_GET(cpuid), &ica->ica_mask);
213 cpu_ipi_selected(ica->ica_mask, 0, (u_long)func, (u_long)ica);
214 return (&ica->ica_mask);
217 static __inline void *
218 ipi_rd(u_int cpu, void *func, u_long *val)
220 struct ipi_rd_args *ira;
222 if (__predict_false(atomic_load_acq_int(&smp_started) == 0))
226 mtx_lock_spin(&ipi_mtx);
227 CPU_SETOF(cpu, &ira->ira_mask);
229 cpu_ipi_single(cpu, 0, (u_long)func, (u_long)ira);
230 return (&ira->ira_mask);
233 static __inline void *
234 ipi_tlb_context_demap(struct pmap *pm)
236 struct ipi_tlb_args *ita;
239 if (__predict_false(atomic_load_acq_int(&smp_started) == 0))
242 cpus = pm->pm_active;
243 CPU_AND(&cpus, &all_cpus);
244 CPU_CLR(PCPU_GET(cpuid), &cpus);
245 if (CPU_EMPTY(&cpus)) {
250 mtx_lock_spin(&ipi_mtx);
251 ita->ita_mask = cpus;
253 cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_context_demap,
255 return (&ita->ita_mask);
258 static __inline void *
259 ipi_tlb_page_demap(struct pmap *pm, vm_offset_t va)
261 struct ipi_tlb_args *ita;
264 if (__predict_false(atomic_load_acq_int(&smp_started) == 0))
267 cpus = pm->pm_active;
268 CPU_AND(&cpus, &all_cpus);
269 CPU_CLR(PCPU_GET(cpuid), &cpus);
270 if (CPU_EMPTY(&cpus)) {
275 mtx_lock_spin(&ipi_mtx);
276 ita->ita_mask = cpus;
279 cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_page_demap, (u_long)ita);
280 return (&ita->ita_mask);
283 static __inline void *
284 ipi_tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
286 struct ipi_tlb_args *ita;
289 if (__predict_false(atomic_load_acq_int(&smp_started) == 0))
292 cpus = pm->pm_active;
293 CPU_AND(&cpus, &all_cpus);
294 CPU_CLR(PCPU_GET(cpuid), &cpus);
295 if (CPU_EMPTY(&cpus)) {
300 mtx_lock_spin(&ipi_mtx);
301 ita->ita_mask = cpus;
303 ita->ita_start = start;
305 cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_range_demap,
307 return (&ita->ita_mask);
311 ipi_wait(void *cookie)
313 volatile cpuset_t *mask;
315 if (__predict_false((mask = cookie) != NULL)) {
316 while (!CPU_EMPTY(mask))
318 mtx_unlock_spin(&ipi_mtx);
323 #endif /* _MACHINE_PMAP_H_ && _SYS_MUTEX_H_ */
331 static __inline void *
332 ipi_dcache_page_inval(void *func __unused, vm_paddr_t pa __unused)
338 static __inline void *
339 ipi_icache_page_inval(void *func __unused, vm_paddr_t pa __unused)
345 static __inline void *
346 ipi_rd(u_int cpu __unused, void *func __unused, u_long *val __unused)
352 static __inline void *
353 ipi_tlb_context_demap(struct pmap *pm __unused)
359 static __inline void *
360 ipi_tlb_page_demap(struct pmap *pm __unused, vm_offset_t va __unused)
366 static __inline void *
367 ipi_tlb_range_demap(struct pmap *pm __unused, vm_offset_t start __unused,
368 __unused vm_offset_t end)
375 ipi_wait(void *cookie __unused)
381 tl_ipi_cheetah_dcache_page_inval(void)
387 tl_ipi_spitfire_dcache_page_inval(void)
393 tl_ipi_spitfire_icache_page_inval(void)
402 #endif /* !_MACHINE_SMP_H_ */