2 * Copyright (c) 2001 Jake Burkholder.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #ifndef _MACHINE_SMP_H_
30 #define _MACHINE_SMP_H_
34 #define CPU_BOOTSTRAP 3
38 #include <machine/intr_machdep.h>
39 #include <machine/tte.h>
41 #define IDR_BUSY (1<<0)
42 #define IDR_NACK (1<<1)
44 #define IPI_AST PIL_AST
45 #define IPI_RENDEZVOUS PIL_RENDEZVOUS
46 #define IPI_STOP PIL_STOP
47 #define IPI_STOP_HARD PIL_STOP
48 #define IPI_PREEMPT PIL_PREEMPT
51 #define IPI_RETRIES 5000
53 struct cpu_start_args {
60 struct ipi_cache_args {
67 struct pmap *ita_pmap;
71 #define ita_va ita_start
75 void cpu_mp_bootstrap(struct pcpu *pc);
76 void cpu_mp_shutdown(void);
78 void cpu_ipi_selected(int cpu_count, uint16_t *cpulist, u_long d0, u_long d1, u_long d2, uint64_t *ackmask);
79 void cpu_ipi_send(u_int mid, u_long d0, u_long d1, u_long d2);
81 void cpu_ipi_ast(struct trapframe *tf);
82 void cpu_ipi_stop(struct trapframe *tf);
83 void cpu_ipi_preempt(struct trapframe *tf);
85 void ipi_all_but_self(u_int ipi);
86 void ipi_cpu(int cpu, u_int ipi);
87 void ipi_selected(cpumask_t cpus, u_int ipi);
89 vm_offset_t mp_tramp_alloc(void);
90 void mp_set_tsb_desc_ra(vm_paddr_t);
91 void mp_add_nucleus_mapping(vm_offset_t, uint64_t);
92 extern struct mtx ipi_mtx;
93 extern struct ipi_cache_args ipi_cache_args;
94 extern struct ipi_tlb_args ipi_tlb_args;
96 extern vm_offset_t mp_tramp;
97 extern char *mp_tramp_code;
98 extern u_long mp_tramp_code_len;
99 extern u_long mp_tramp_tte_slots;
100 extern u_long mp_tramp_tsb_desc_ra;
101 extern u_long mp_tramp_func;
103 extern void mp_startup(void);
105 extern char tl_ipi_level[];
106 extern char tl_invltlb[];
107 extern char tl_invlctx[];
108 extern char tl_invlpg[];
109 extern char tl_invlrng[];
110 extern char tl_tsbupdate[];
111 extern char tl_ttehashupdate[];
115 #if defined(_MACHINE_PMAP_H_) && defined(_SYS_MUTEX_H_)
117 static __inline void *
118 ipi_dcache_page_inval(void *func, vm_paddr_t pa)
120 struct ipi_cache_args *ica;
124 ica = &ipi_cache_args;
125 mtx_lock_spin(&ipi_mtx);
126 ica->ica_mask = all_cpus;
128 cpu_ipi_selected(PCPU_GET(other_cpus), 0, (u_long)func, (u_long)ica);
129 return (&ica->ica_mask);
132 static __inline void *
133 ipi_icache_page_inval(void *func, vm_paddr_t pa)
135 struct ipi_cache_args *ica;
139 ica = &ipi_cache_args;
140 mtx_lock_spin(&ipi_mtx);
141 ica->ica_mask = all_cpus;
143 cpu_ipi_selected(PCPU_GET(other_cpus), 0, (u_long)func, (u_long)ica);
144 return (&ica->ica_mask);
147 static __inline void *
148 ipi_tlb_context_demap(struct pmap *pm)
150 struct ipi_tlb_args *ita;
155 if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0)
158 mtx_lock_spin(&ipi_mtx);
159 ita->ita_mask = cpus | PCPU_GET(cpumask);
161 cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_context_demap,
163 return (&ita->ita_mask);
166 static __inline void *
167 ipi_tlb_page_demap(struct pmap *pm, vm_offset_t va)
169 struct ipi_tlb_args *ita;
174 if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0)
177 mtx_lock_spin(&ipi_mtx);
178 ita->ita_mask = cpus | PCPU_GET(cpumask);
181 cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_page_demap, (u_long)ita);
182 return (&ita->ita_mask);
185 static __inline void *
186 ipi_tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
188 struct ipi_tlb_args *ita;
193 if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0)
196 mtx_lock_spin(&ipi_mtx);
197 ita->ita_mask = cpus | PCPU_GET(cpumask);
199 ita->ita_start = start;
201 cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_range_demap, (u_long)ita);
202 return (&ita->ita_mask);
206 ipi_wait(void *cookie)
208 volatile cpumask_t *mask;
210 if ((mask = cookie) != NULL) {
211 atomic_clear_int(mask, PCPU_GET(cpumask));
214 mtx_unlock_spin(&ipi_mtx);
218 #endif /* _MACHINE_PMAP_H_ && _SYS_MUTEX_H_ */
222 static __inline void *
223 ipi_dcache_page_inval(void *func, vm_paddr_t pa)
228 static __inline void *
229 ipi_icache_page_inval(void *func, vm_paddr_t pa)
234 static __inline void *
235 ipi_tlb_context_demap(struct pmap *pm)
240 static __inline void *
241 ipi_tlb_page_demap(struct pmap *pm, vm_offset_t va)
246 static __inline void *
247 ipi_tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
253 ipi_wait(void *cookie)
261 #endif /* !_MACHINE_SMP_H_ */