]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/sys/pcpu.h
bhnd(9): Fix a few mandoc related issues
[FreeBSD/FreeBSD.git] / sys / sys / pcpu.h
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001 Wind River Systems, Inc.
5  * All rights reserved.
6  * Written by: John Baldwin <jhb@FreeBSD.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $FreeBSD$
33  */
34
35 #ifndef _SYS_PCPU_H_
36 #define _SYS_PCPU_H_
37
38 #ifdef LOCORE
39 #error "no assembler-serviceable parts inside"
40 #endif
41
42 #include <sys/param.h>
43 #include <sys/_cpuset.h>
44 #include <sys/_lock.h>
45 #include <sys/_mutex.h>
46 #include <sys/_sx.h>
47 #include <sys/queue.h>
48 #include <sys/_rmlock.h>
49 #include <sys/resource.h>
50 #include <machine/pcpu.h>
51
52 #define DPCPU_SETNAME           "set_pcpu"
53 #define DPCPU_SYMPREFIX         "pcpu_entry_"
54
55 #ifdef _KERNEL
56
57 /*
58  * Define a set for pcpu data.
59  */
60 extern uintptr_t *__start_set_pcpu;
61 __GLOBL(__start_set_pcpu);
62 extern uintptr_t *__stop_set_pcpu;
63 __GLOBL(__stop_set_pcpu);
64
65 /*
66  * Array of dynamic pcpu base offsets.  Indexed by id.
67  */
68 extern uintptr_t dpcpu_off[];
69
70 /*
71  * Convenience defines.
72  */
73 #define DPCPU_START             ((uintptr_t)&__start_set_pcpu)
74 #define DPCPU_STOP              ((uintptr_t)&__stop_set_pcpu)
75 #define DPCPU_BYTES             (DPCPU_STOP - DPCPU_START)
76 #define DPCPU_MODMIN            2048
77 #define DPCPU_SIZE              roundup2(DPCPU_BYTES, PAGE_SIZE)
78 #define DPCPU_MODSIZE           (DPCPU_SIZE - (DPCPU_BYTES - DPCPU_MODMIN))
79
80 /*
81  * Declaration and definition.
82  */
83 #define DPCPU_NAME(n)           pcpu_entry_##n
84 #define DPCPU_DECLARE(t, n)     extern t DPCPU_NAME(n)
85 /* struct _hack is to stop this from being used with the static keyword. */
86 #define DPCPU_DEFINE(t, n)      \
87     struct _hack; t DPCPU_NAME(n) __section(DPCPU_SETNAME) __used
88 #if defined(KLD_MODULE) && (defined(__aarch64__) || defined(__riscv) \
89                 || defined(__powerpc64__))
90 /*
91  * On some architectures the compiler will use PC-relative load to
92  * find the address of DPCPU data with the static keyword. We then
93  * use this to find the offset of the data in a per-CPU region.
94  * This works for in the kernel as we can allocate the space ahead
95  * of time, however modules need to allocate a sepatate space and
96  * then use relocations to fix the address of the data. As
97  * PC-relative data doesn't have a relocation there is nothing for
98  * the kernel module linker to fix so data is accessed from the
99  * wrong location.
100  *
101  * This is a workaround until a better solution can be found.
102  *
103  * VNET_DEFINE_STATIC also has the same workaround.
104  */
105 #define DPCPU_DEFINE_STATIC(t, n)       \
106     t DPCPU_NAME(n) __section(DPCPU_SETNAME) __used
107 #else
108 #define DPCPU_DEFINE_STATIC(t, n)       \
109     static t DPCPU_NAME(n) __section(DPCPU_SETNAME) __used
110 #endif
111
112 /*
113  * Accessors with a given base.
114  */
115 #define _DPCPU_PTR(b, n)                                                \
116     (__typeof(DPCPU_NAME(n))*)((b) + (uintptr_t)&DPCPU_NAME(n))
117 #define _DPCPU_GET(b, n)        (*_DPCPU_PTR(b, n))
118 #define _DPCPU_SET(b, n, v)     (*_DPCPU_PTR(b, n) = v)
119
120 /*
121  * Accessors for the current cpu.
122  */
123 #define DPCPU_PTR(n)            _DPCPU_PTR(PCPU_GET(dynamic), n)
124 #define DPCPU_GET(n)            (*DPCPU_PTR(n))
125 #define DPCPU_SET(n, v)         (*DPCPU_PTR(n) = v)
126
127 /*
128  * Accessors for remote cpus.
129  */
130 #define DPCPU_ID_PTR(i, n)      _DPCPU_PTR(dpcpu_off[(i)], n)
131 #define DPCPU_ID_GET(i, n)      (*DPCPU_ID_PTR(i, n))
132 #define DPCPU_ID_SET(i, n, v)   (*DPCPU_ID_PTR(i, n) = v)
133
134 /*
135  * Utility macros.
136  */
137 #define DPCPU_SUM(n) __extension__                                      \
138 ({                                                                      \
139         u_int _i;                                                       \
140         __typeof(*DPCPU_PTR(n)) sum;                                    \
141                                                                         \
142         sum = 0;                                                        \
143         CPU_FOREACH(_i) {                                               \
144                 sum += *DPCPU_ID_PTR(_i, n);                            \
145         }                                                               \
146         sum;                                                            \
147 })
148
149 #define DPCPU_VARSUM(n, var) __extension__                              \
150 ({                                                                      \
151         u_int _i;                                                       \
152         __typeof((DPCPU_PTR(n))->var) sum;                              \
153                                                                         \
154         sum = 0;                                                        \
155         CPU_FOREACH(_i) {                                               \
156                 sum += (DPCPU_ID_PTR(_i, n))->var;                      \
157         }                                                               \
158         sum;                                                            \
159 })
160
161 #define DPCPU_ZERO(n) do {                                              \
162         u_int _i;                                                       \
163                                                                         \
164         CPU_FOREACH(_i) {                                               \
165                 bzero(DPCPU_ID_PTR(_i, n), sizeof(*DPCPU_PTR(n)));      \
166         }                                                               \
167 } while(0)
168
169 #endif /* _KERNEL */
170
171 /*
172  * This structure maps out the global data that needs to be kept on a
173  * per-cpu basis.  The members are accessed via the PCPU_GET/SET/PTR
174  * macros defined in <machine/pcpu.h>.  Machine dependent fields are
175  * defined in the PCPU_MD_FIELDS macro defined in <machine/pcpu.h>.
176  */
177 struct pcpu {
178         struct thread   *pc_curthread;          /* Current thread */
179         struct thread   *pc_idlethread;         /* Idle thread */
180         struct thread   *pc_fpcurthread;        /* Fp state owner */
181         struct thread   *pc_deadthread;         /* Zombie thread or NULL */
182         struct pcb      *pc_curpcb;             /* Current pcb */
183         void            *pc_sched;              /* Scheduler state */
184         uint64_t        pc_switchtime;          /* cpu_ticks() at last csw */
185         int             pc_switchticks;         /* `ticks' at last csw */
186         u_int           pc_cpuid;               /* This cpu number */
187         STAILQ_ENTRY(pcpu) pc_allcpu;
188         struct lock_list_entry *pc_spinlocks;
189         long            pc_cp_time[CPUSTATES];  /* statclock ticks */
190         struct device   *pc_device;
191         void            *pc_netisr;             /* netisr SWI cookie */
192         int             pc_unused1;             /* unused field */
193         int             pc_domain;              /* Memory domain. */
194         struct rm_queue pc_rm_queue;            /* rmlock list of trackers */
195         uintptr_t       pc_dynamic;             /* Dynamic per-cpu data area */
196         uint64_t        pc_early_dummy_counter; /* Startup time counter(9) */
197         uintptr_t       pc_zpcpu_offset;        /* Offset into zpcpu allocs */
198
199         /*
200          * Keep MD fields last, so that CPU-specific variations on a
201          * single architecture don't result in offset variations of
202          * the machine-independent fields of the pcpu.  Even though
203          * the pcpu structure is private to the kernel, some ports
204          * (e.g., lsof, part of gtop) define _KERNEL and include this
205          * header.  While strictly speaking this is wrong, there's no
206          * reason not to keep the offsets of the MI fields constant
207          * if only to make kernel debugging easier.
208          */
209         PCPU_MD_FIELDS;
210 } __aligned(CACHE_LINE_SIZE);
211
212 #ifdef _KERNEL
213
214 STAILQ_HEAD(cpuhead, pcpu);
215
216 extern struct cpuhead cpuhead;
217 extern struct pcpu *cpuid_to_pcpu[];
218
219 #define curcpu          PCPU_GET(cpuid)
220 #define curvidata       PCPU_GET(vidata)
221
222 #define UMA_PCPU_ALLOC_SIZE             PAGE_SIZE
223
224 #include <machine/pcpu_aux.h>
225
226 #ifndef curthread
227 #define curthread       PCPU_GET(curthread)
228 #endif
229 #define curproc         (curthread->td_proc)
230
231 #ifndef ZPCPU_ASSERT_PROTECTED
232 #define ZPCPU_ASSERT_PROTECTED() MPASS(curthread->td_critnest > 0)
233 #endif
234
235 #ifndef zpcpu_offset_cpu
236 #define zpcpu_offset_cpu(cpu)   (UMA_PCPU_ALLOC_SIZE * cpu)
237 #endif
238 #ifndef zpcpu_offset
239 #define zpcpu_offset()          (PCPU_GET(zpcpu_offset))
240 #endif
241
242 #ifndef zpcpu_base_to_offset
243 #define zpcpu_base_to_offset(base) (base)
244 #endif
245 #ifndef zpcpu_offset_to_base
246 #define zpcpu_offset_to_base(base) (base)
247 #endif
248
249 /* Accessor to elements allocated via UMA_ZONE_PCPU zone. */
250 #define zpcpu_get(base) ({                                                              \
251         __typeof(base) _ptr = (void *)((char *)(base) + zpcpu_offset());                \
252         _ptr;                                                                           \
253 })
254
255 #define zpcpu_get_cpu(base, cpu) ({                                                     \
256         __typeof(base) _ptr = (void *)((char *)(base) + zpcpu_offset_cpu(cpu));         \
257         _ptr;                                                                           \
258 })
259
260 /*
261  * This operation is NOT atomic and does not post any barriers.
262  * If you use this the assumption is that the target CPU will not
263  * be modifying this variable.
264  * If you need atomicity use xchg.
265  * */
266 #define zpcpu_replace(base, val) ({                                     \
267         __typeof(val) *_ptr = zpcpu_get(base);                          \
268         __typeof(val) _old;                                             \
269                                                                         \
270         _old = *_ptr;                                                   \
271         *_ptr = val;                                                    \
272         _old;                                                           \
273 })
274
275 #define zpcpu_replace_cpu(base, val, cpu) ({                            \
276         __typeof(val) *_ptr = zpcpu_get_cpu(base, cpu);                 \
277         __typeof(val) _old;                                             \
278                                                                         \
279         _old = *_ptr;                                                   \
280         *_ptr = val;                                                    \
281         _old;                                                           \
282 })
283
284 #ifndef zpcpu_set_protected
285 #define zpcpu_set_protected(base, val) ({                               \
286         ZPCPU_ASSERT_PROTECTED();                                       \
287         __typeof(val) *_ptr = zpcpu_get(base);                          \
288                                                                         \
289         *_ptr = (val);                                                  \
290 })
291 #endif
292
293 #ifndef zpcpu_add_protected
294 #define zpcpu_add_protected(base, val) ({                               \
295         ZPCPU_ASSERT_PROTECTED();                                       \
296         __typeof(val) *_ptr = zpcpu_get(base);                          \
297                                                                         \
298         *_ptr += (val);                                                 \
299 })
300 #endif
301
302 #ifndef zpcpu_sub_protected
303 #define zpcpu_sub_protected(base, val) ({                               \
304         ZPCPU_ASSERT_PROTECTED();                                       \
305         __typeof(val) *_ptr = zpcpu_get(base);                          \
306                                                                         \
307         *_ptr -= (val);                                                 \
308 })
309 #endif
310
311 /*
312  * Machine dependent callouts.  cpu_pcpu_init() is responsible for
313  * initializing machine dependent fields of struct pcpu, and
314  * db_show_mdpcpu() is responsible for handling machine dependent
315  * fields for the DDB 'show pcpu' command.
316  */
317 void    cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size);
318 void    db_show_mdpcpu(struct pcpu *pcpu);
319
320 void    *dpcpu_alloc(int size);
321 void    dpcpu_copy(void *s, int size);
322 void    dpcpu_free(void *s, int size);
323 void    dpcpu_init(void *dpcpu, int cpuid);
324 void    pcpu_destroy(struct pcpu *pcpu);
325 struct  pcpu *pcpu_find(u_int cpuid);
326 void    pcpu_init(struct pcpu *pcpu, int cpuid, size_t size);
327
328 #endif /* _KERNEL */
329
330 #endif /* !_SYS_PCPU_H_ */