2 * Copyright (c) 2001 Jake Burkholder.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #ifndef _MACHINE_SMP_H_
30 #define _MACHINE_SMP_H_
34 #define CPU_BOOTSTRAP 3
38 #include <machine/intr_machdep.h>
39 #include <machine/tte.h>
41 #define IDR_BUSY (1<<0)
42 #define IDR_NACK (1<<1)
44 #define IPI_AST PIL_AST
45 #define IPI_RENDEZVOUS PIL_RENDEZVOUS
46 #define IPI_STOP PIL_STOP
47 #define IPI_STOP_HARD PIL_STOP
48 #define IPI_PREEMPT PIL_PREEMPT
51 #define IPI_RETRIES 5000
53 struct cpu_start_args {
60 struct ipi_cache_args {
67 struct pmap *ita_pmap;
71 #define ita_va ita_start
75 void cpu_mp_bootstrap(struct pcpu *pc);
76 void cpu_mp_shutdown(void);
78 void cpu_ipi_selected(int cpus, uint16_t *cpulist, u_long d0, u_long d1, u_long d2, uint64_t *ackmask);
79 void cpu_ipi_send(u_int mid, u_long d0, u_long d1, u_long d2);
81 void cpu_ipi_ast(struct trapframe *tf);
82 void cpu_ipi_stop(struct trapframe *tf);
83 void cpu_ipi_preempt(struct trapframe *tf);
85 void ipi_selected(u_int cpus, u_int ipi);
86 void ipi_all_but_self(u_int ipi);
88 vm_offset_t mp_tramp_alloc(void);
89 void mp_set_tsb_desc_ra(vm_paddr_t);
90 void mp_add_nucleus_mapping(vm_offset_t, uint64_t);
91 extern struct mtx ipi_mtx;
92 extern struct ipi_cache_args ipi_cache_args;
93 extern struct ipi_tlb_args ipi_tlb_args;
95 extern vm_offset_t mp_tramp;
96 extern char *mp_tramp_code;
97 extern u_long mp_tramp_code_len;
98 extern u_long mp_tramp_tte_slots;
99 extern u_long mp_tramp_tsb_desc_ra;
100 extern u_long mp_tramp_func;
102 extern void mp_startup(void);
104 extern char tl_ipi_level[];
105 extern char tl_invltlb[];
106 extern char tl_invlctx[];
107 extern char tl_invlpg[];
108 extern char tl_invlrng[];
109 extern char tl_tsbupdate[];
110 extern char tl_ttehashupdate[];
114 #if defined(_MACHINE_PMAP_H_) && defined(_SYS_MUTEX_H_)
116 static __inline void *
117 ipi_dcache_page_inval(void *func, vm_paddr_t pa)
119 struct ipi_cache_args *ica;
123 ica = &ipi_cache_args;
124 mtx_lock_spin(&ipi_mtx);
125 ica->ica_mask = all_cpus;
127 cpu_ipi_selected(PCPU_GET(other_cpus), 0, (u_long)func, (u_long)ica);
128 return (&ica->ica_mask);
131 static __inline void *
132 ipi_icache_page_inval(void *func, vm_paddr_t pa)
134 struct ipi_cache_args *ica;
138 ica = &ipi_cache_args;
139 mtx_lock_spin(&ipi_mtx);
140 ica->ica_mask = all_cpus;
142 cpu_ipi_selected(PCPU_GET(other_cpus), 0, (u_long)func, (u_long)ica);
143 return (&ica->ica_mask);
146 static __inline void *
147 ipi_tlb_context_demap(struct pmap *pm)
149 struct ipi_tlb_args *ita;
154 if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0)
157 mtx_lock_spin(&ipi_mtx);
158 ita->ita_mask = cpus | PCPU_GET(cpumask);
160 cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_context_demap,
162 return (&ita->ita_mask);
165 static __inline void *
166 ipi_tlb_page_demap(struct pmap *pm, vm_offset_t va)
168 struct ipi_tlb_args *ita;
173 if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0)
176 mtx_lock_spin(&ipi_mtx);
177 ita->ita_mask = cpus | PCPU_GET(cpumask);
180 cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_page_demap, (u_long)ita);
181 return (&ita->ita_mask);
184 static __inline void *
185 ipi_tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
187 struct ipi_tlb_args *ita;
192 if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0)
195 mtx_lock_spin(&ipi_mtx);
196 ita->ita_mask = cpus | PCPU_GET(cpumask);
198 ita->ita_start = start;
200 cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_range_demap, (u_long)ita);
201 return (&ita->ita_mask);
205 ipi_wait(void *cookie)
207 volatile u_int *mask;
209 if ((mask = cookie) != NULL) {
210 atomic_clear_int(mask, PCPU_GET(cpumask));
213 mtx_unlock_spin(&ipi_mtx);
217 #endif /* _MACHINE_PMAP_H_ && _SYS_MUTEX_H_ */
221 static __inline void *
222 ipi_dcache_page_inval(void *func, vm_paddr_t pa)
227 static __inline void *
228 ipi_icache_page_inval(void *func, vm_paddr_t pa)
233 static __inline void *
234 ipi_tlb_context_demap(struct pmap *pm)
239 static __inline void *
240 ipi_tlb_page_demap(struct pmap *pm, vm_offset_t va)
245 static __inline void *
246 ipi_tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
252 ipi_wait(void *cookie)
260 #endif /* !_MACHINE_SMP_H_ */