2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2001 Jake Burkholder.
5 * Copyright (c) 2007 - 2011 Marius Strobl <marius@FreeBSD.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #ifndef _MACHINE_SMP_H_
33 #define _MACHINE_SMP_H_
37 #define CPU_TICKSYNC 1
38 #define CPU_STICKSYNC 2
40 #define CPU_BOOTSTRAP 4
44 #include <sys/param.h>
45 #include <sys/cpuset.h>
47 #include <sys/mutex.h>
49 #include <sys/sched.h>
52 #include <machine/atomic.h>
53 #include <machine/intr_machdep.h>
54 #include <machine/tte.h>
56 #define IDR_BUSY 0x0000000000000001ULL
57 #define IDR_NACK 0x0000000000000002ULL
58 #define IDR_CHEETAH_ALL_BUSY 0x5555555555555555ULL
59 #define IDR_CHEETAH_ALL_NACK (~IDR_CHEETAH_ALL_BUSY)
60 #define IDR_CHEETAH_MAX_BN_PAIRS 32
61 #define IDR_JALAPENO_MAX_BN_PAIRS 4
63 #define IDC_ITID_SHIFT 14
64 #define IDC_BN_SHIFT 24
66 #define IPI_AST PIL_AST
67 #define IPI_RENDEZVOUS PIL_RENDEZVOUS
68 #define IPI_PREEMPT PIL_PREEMPT
69 #define IPI_HARDCLOCK PIL_HARDCLOCK
70 #define IPI_STOP PIL_STOP
71 #define IPI_STOP_HARD PIL_STOP
73 #define IPI_RETRIES 5000
75 struct cpu_start_args {
83 struct tte csa_ttes[PCPU_PAGES];
86 struct ipi_cache_args {
98 struct pmap *ita_pmap;
102 #define ita_va ita_start
107 extern struct pcb stoppcbs[];
109 void cpu_mp_bootstrap(struct pcpu *pc);
110 void cpu_mp_shutdown(void);
112 typedef void cpu_ipi_selected_t(cpuset_t, u_long, u_long, u_long);
113 extern cpu_ipi_selected_t *cpu_ipi_selected;
114 typedef void cpu_ipi_single_t(u_int, u_long, u_long, u_long);
115 extern cpu_ipi_single_t *cpu_ipi_single;
119 extern struct mtx ipi_mtx;
120 extern struct ipi_cache_args ipi_cache_args;
121 extern struct ipi_rd_args ipi_rd_args;
122 extern struct ipi_tlb_args ipi_tlb_args;
124 extern char *mp_tramp_code;
125 extern u_long mp_tramp_code_len;
126 extern u_long mp_tramp_tlb_slots;
127 extern u_long mp_tramp_func;
129 extern void mp_startup(void);
131 extern char tl_ipi_cheetah_dcache_page_inval[];
132 extern char tl_ipi_spitfire_dcache_page_inval[];
133 extern char tl_ipi_spitfire_icache_page_inval[];
135 extern char tl_ipi_level[];
137 extern char tl_ipi_stick_rd[];
138 extern char tl_ipi_tick_rd[];
140 extern char tl_ipi_tlb_context_demap[];
141 extern char tl_ipi_tlb_page_demap[];
142 extern char tl_ipi_tlb_range_demap[];
145 ipi_all_but_self(u_int ipi)
149 if (__predict_false(atomic_load_acq_int(&smp_started) == 0))
153 CPU_CLR(PCPU_GET(cpuid), &cpus);
154 mtx_lock_spin(&ipi_mtx);
155 cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_level, ipi);
156 mtx_unlock_spin(&ipi_mtx);
161 ipi_selected(cpuset_t cpus, u_int ipi)
164 if (__predict_false(atomic_load_acq_int(&smp_started) == 0 ||
167 mtx_lock_spin(&ipi_mtx);
168 cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_level, ipi);
169 mtx_unlock_spin(&ipi_mtx);
173 ipi_cpu(int cpu, u_int ipi)
176 if (__predict_false(atomic_load_acq_int(&smp_started) == 0))
178 mtx_lock_spin(&ipi_mtx);
179 cpu_ipi_single(cpu, 0, (u_long)tl_ipi_level, ipi);
180 mtx_unlock_spin(&ipi_mtx);
183 #if defined(_MACHINE_PMAP_H_) && defined(_SYS_MUTEX_H_)
185 static __inline void *
186 ipi_dcache_page_inval(void *func, vm_paddr_t pa)
188 struct ipi_cache_args *ica;
190 if (__predict_false(atomic_load_acq_int(&smp_started) == 0))
193 ica = &ipi_cache_args;
194 mtx_lock_spin(&ipi_mtx);
195 ica->ica_mask = all_cpus;
196 CPU_CLR(PCPU_GET(cpuid), &ica->ica_mask);
198 cpu_ipi_selected(ica->ica_mask, 0, (u_long)func, (u_long)ica);
199 return (&ica->ica_mask);
202 static __inline void *
203 ipi_icache_page_inval(void *func, vm_paddr_t pa)
205 struct ipi_cache_args *ica;
207 if (__predict_false(atomic_load_acq_int(&smp_started) == 0))
210 ica = &ipi_cache_args;
211 mtx_lock_spin(&ipi_mtx);
212 ica->ica_mask = all_cpus;
213 CPU_CLR(PCPU_GET(cpuid), &ica->ica_mask);
215 cpu_ipi_selected(ica->ica_mask, 0, (u_long)func, (u_long)ica);
216 return (&ica->ica_mask);
219 static __inline void *
220 ipi_rd(u_int cpu, void *func, u_long *val)
222 struct ipi_rd_args *ira;
224 if (__predict_false(atomic_load_acq_int(&smp_started) == 0))
228 mtx_lock_spin(&ipi_mtx);
229 CPU_SETOF(cpu, &ira->ira_mask);
231 cpu_ipi_single(cpu, 0, (u_long)func, (u_long)ira);
232 return (&ira->ira_mask);
235 static __inline void *
236 ipi_tlb_context_demap(struct pmap *pm)
238 struct ipi_tlb_args *ita;
241 if (__predict_false(atomic_load_acq_int(&smp_started) == 0))
244 cpus = pm->pm_active;
245 CPU_AND(&cpus, &all_cpus);
246 CPU_CLR(PCPU_GET(cpuid), &cpus);
247 if (CPU_EMPTY(&cpus)) {
252 mtx_lock_spin(&ipi_mtx);
253 ita->ita_mask = cpus;
255 cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_context_demap,
257 return (&ita->ita_mask);
260 static __inline void *
261 ipi_tlb_page_demap(struct pmap *pm, vm_offset_t va)
263 struct ipi_tlb_args *ita;
266 if (__predict_false(atomic_load_acq_int(&smp_started) == 0))
269 cpus = pm->pm_active;
270 CPU_AND(&cpus, &all_cpus);
271 CPU_CLR(PCPU_GET(cpuid), &cpus);
272 if (CPU_EMPTY(&cpus)) {
277 mtx_lock_spin(&ipi_mtx);
278 ita->ita_mask = cpus;
281 cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_page_demap, (u_long)ita);
282 return (&ita->ita_mask);
285 static __inline void *
286 ipi_tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
288 struct ipi_tlb_args *ita;
291 if (__predict_false(atomic_load_acq_int(&smp_started) == 0))
294 cpus = pm->pm_active;
295 CPU_AND(&cpus, &all_cpus);
296 CPU_CLR(PCPU_GET(cpuid), &cpus);
297 if (CPU_EMPTY(&cpus)) {
302 mtx_lock_spin(&ipi_mtx);
303 ita->ita_mask = cpus;
305 ita->ita_start = start;
307 cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_range_demap,
309 return (&ita->ita_mask);
313 ipi_wait(void *cookie)
315 volatile cpuset_t *mask;
317 if (__predict_false((mask = cookie) != NULL)) {
318 while (!CPU_EMPTY(mask))
320 mtx_unlock_spin(&ipi_mtx);
325 #endif /* _MACHINE_PMAP_H_ && _SYS_MUTEX_H_ */
333 static __inline void *
334 ipi_dcache_page_inval(void *func __unused, vm_paddr_t pa __unused)
340 static __inline void *
341 ipi_icache_page_inval(void *func __unused, vm_paddr_t pa __unused)
347 static __inline void *
348 ipi_rd(u_int cpu __unused, void *func __unused, u_long *val __unused)
354 static __inline void *
355 ipi_tlb_context_demap(struct pmap *pm __unused)
361 static __inline void *
362 ipi_tlb_page_demap(struct pmap *pm __unused, vm_offset_t va __unused)
368 static __inline void *
369 ipi_tlb_range_demap(struct pmap *pm __unused, vm_offset_t start __unused,
370 __unused vm_offset_t end)
377 ipi_wait(void *cookie __unused)
383 tl_ipi_cheetah_dcache_page_inval(void)
389 tl_ipi_spitfire_dcache_page_inval(void)
395 tl_ipi_spitfire_icache_page_inval(void)
404 #endif /* !_MACHINE_SMP_H_ */