]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/kern_sharedpage.c
vfs: retire kern.minvnodes
[FreeBSD/FreeBSD.git] / sys / kern / kern_sharedpage.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2010, 2012 Konstantin Belousov <kib@FreeBSD.org>
5  * Copyright (c) 2015 The FreeBSD Foundation
6  * All rights reserved.
7  *
8  * Portions of this software were developed by Konstantin Belousov
9  * under sponsorship from the FreeBSD Foundation.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32
33 #include <sys/cdefs.h>
34 #include "opt_vm.h"
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/lock.h>
40 #include <sys/malloc.h>
41 #include <sys/rwlock.h>
42 #include <sys/stddef.h>
43 #include <sys/sysent.h>
44 #include <sys/sysctl.h>
45 #include <sys/vdso.h>
46
47 #include <vm/vm.h>
48 #include <vm/vm_param.h>
49 #include <vm/pmap.h>
50 #include <vm/vm_extern.h>
51 #include <vm/vm_kern.h>
52 #include <vm/vm_map.h>
53 #include <vm/vm_object.h>
54 #include <vm/vm_page.h>
55 #include <vm/vm_pager.h>
56
57 static struct sx shared_page_alloc_sx;
58 static vm_object_t shared_page_obj;
59 static int shared_page_free;
60 char *shared_page_mapping;
61
62 #ifdef RANDOM_FENESTRASX
63 static struct vdso_fxrng_generation *fxrng_shpage_mapping;
64
65 static bool fxrng_enabled = true;
66 SYSCTL_BOOL(_debug, OID_AUTO, fxrng_vdso_enable, CTLFLAG_RWTUN, &fxrng_enabled,
67     0, "Enable FXRNG VDSO");
68 #endif
69
70 void
71 shared_page_write(int base, int size, const void *data)
72 {
73
74         bcopy(data, shared_page_mapping + base, size);
75 }
76
77 static int
78 shared_page_alloc_locked(int size, int align)
79 {
80         int res;
81
82         res = roundup(shared_page_free, align);
83         if (res + size >= IDX_TO_OFF(shared_page_obj->size))
84                 res = -1;
85         else
86                 shared_page_free = res + size;
87         return (res);
88 }
89
90 int
91 shared_page_alloc(int size, int align)
92 {
93         int res;
94
95         sx_xlock(&shared_page_alloc_sx);
96         res = shared_page_alloc_locked(size, align);
97         sx_xunlock(&shared_page_alloc_sx);
98         return (res);
99 }
100
101 int
102 shared_page_fill(int size, int align, const void *data)
103 {
104         int res;
105
106         sx_xlock(&shared_page_alloc_sx);
107         res = shared_page_alloc_locked(size, align);
108         if (res != -1)
109                 shared_page_write(res, size, data);
110         sx_xunlock(&shared_page_alloc_sx);
111         return (res);
112 }
113
114 static void
115 shared_page_init(void *dummy __unused)
116 {
117         vm_page_t m;
118         vm_offset_t addr;
119
120         sx_init(&shared_page_alloc_sx, "shpsx");
121         shared_page_obj = vm_pager_allocate(OBJT_PHYS, 0, PAGE_SIZE,
122             VM_PROT_DEFAULT, 0, NULL);
123         VM_OBJECT_WLOCK(shared_page_obj);
124         m = vm_page_grab(shared_page_obj, 0, VM_ALLOC_ZERO);
125         VM_OBJECT_WUNLOCK(shared_page_obj);
126         vm_page_valid(m);
127         vm_page_xunbusy(m);
128         addr = kva_alloc(PAGE_SIZE);
129         pmap_qenter(addr, &m, 1);
130         shared_page_mapping = (char *)addr;
131 }
132
133 SYSINIT(shp, SI_SUB_EXEC, SI_ORDER_FIRST, (sysinit_cfunc_t)shared_page_init,
134     NULL);
135
136 /*
137  * Push the timehands update to the shared page.
138  *
139  * The lockless update scheme is similar to the one used to update the
140  * in-kernel timehands, see sys/kern/kern_tc.c:tc_windup() (which
141  * calls us after the timehands are updated).
142  */
143 static void
144 timehands_update(struct vdso_sv_tk *svtk)
145 {
146         struct vdso_timehands th;
147         struct vdso_timekeep *tk;
148         uint32_t enabled, idx;
149
150         enabled = tc_fill_vdso_timehands(&th);
151         th.th_gen = 0;
152         idx = svtk->sv_timekeep_curr;
153         if (++idx >= VDSO_TH_NUM)
154                 idx = 0;
155         svtk->sv_timekeep_curr = idx;
156         if (++svtk->sv_timekeep_gen == 0)
157                 svtk->sv_timekeep_gen = 1;
158
159         tk = (struct vdso_timekeep *)(shared_page_mapping +
160             svtk->sv_timekeep_off);
161         tk->tk_th[idx].th_gen = 0;
162         atomic_thread_fence_rel();
163         if (enabled)
164                 tk->tk_th[idx] = th;
165         atomic_store_rel_32(&tk->tk_th[idx].th_gen, svtk->sv_timekeep_gen);
166         atomic_store_rel_32(&tk->tk_current, idx);
167
168         /*
169          * The ordering of the assignment to tk_enabled relative to
170          * the update of the vdso_timehands is not important.
171          */
172         tk->tk_enabled = enabled;
173 }
174
175 #ifdef COMPAT_FREEBSD32
176 static void
177 timehands_update32(struct vdso_sv_tk *svtk)
178 {
179         struct vdso_timehands32 th;
180         struct vdso_timekeep32 *tk;
181         uint32_t enabled, idx;
182
183         enabled = tc_fill_vdso_timehands32(&th);
184         th.th_gen = 0;
185         idx = svtk->sv_timekeep_curr;
186         if (++idx >= VDSO_TH_NUM)
187                 idx = 0;
188         svtk->sv_timekeep_curr = idx;
189         if (++svtk->sv_timekeep_gen == 0)
190                 svtk->sv_timekeep_gen = 1;
191
192         tk = (struct vdso_timekeep32 *)(shared_page_mapping +
193             svtk->sv_timekeep_off);
194         tk->tk_th[idx].th_gen = 0;
195         atomic_thread_fence_rel();
196         if (enabled)
197                 tk->tk_th[idx] = th;
198         atomic_store_rel_32(&tk->tk_th[idx].th_gen, svtk->sv_timekeep_gen);
199         atomic_store_rel_32(&tk->tk_current, idx);
200         tk->tk_enabled = enabled;
201 }
202 #endif
203
204 /*
205  * This is hackish, but easiest way to avoid creating list structures
206  * that needs to be iterated over from the hardclock interrupt
207  * context.
208  */
209 static struct vdso_sv_tk *host_svtk;
210 #ifdef COMPAT_FREEBSD32
211 static struct vdso_sv_tk *compat32_svtk;
212 #endif
213
214 void
215 timekeep_push_vdso(void)
216 {
217
218         if (host_svtk != NULL)
219                 timehands_update(host_svtk);
220 #ifdef COMPAT_FREEBSD32
221         if (compat32_svtk != NULL)
222                 timehands_update32(compat32_svtk);
223 #endif
224 }
225
226 struct vdso_sv_tk *
227 alloc_sv_tk(void)
228 {
229         struct vdso_sv_tk *svtk;
230         int tk_base;
231         uint32_t tk_ver;
232
233         tk_ver = VDSO_TK_VER_CURR;
234         svtk = malloc(sizeof(struct vdso_sv_tk), M_TEMP, M_WAITOK | M_ZERO);
235         tk_base = shared_page_alloc(sizeof(struct vdso_timekeep) +
236             sizeof(struct vdso_timehands) * VDSO_TH_NUM, 16);
237         KASSERT(tk_base != -1, ("tk_base -1 for native"));
238         shared_page_write(tk_base + offsetof(struct vdso_timekeep, tk_ver),
239             sizeof(uint32_t), &tk_ver);
240         svtk->sv_timekeep_off = tk_base;
241         timekeep_push_vdso();
242         return (svtk);
243 }
244
245 #ifdef COMPAT_FREEBSD32
246 struct vdso_sv_tk *
247 alloc_sv_tk_compat32(void)
248 {
249         struct vdso_sv_tk *svtk;
250         int tk_base;
251         uint32_t tk_ver;
252
253         svtk = malloc(sizeof(struct vdso_sv_tk), M_TEMP, M_WAITOK | M_ZERO);
254         tk_ver = VDSO_TK_VER_CURR;
255         tk_base = shared_page_alloc(sizeof(struct vdso_timekeep32) +
256             sizeof(struct vdso_timehands32) * VDSO_TH_NUM, 16);
257         KASSERT(tk_base != -1, ("tk_base -1 for 32bit"));
258         shared_page_write(tk_base + offsetof(struct vdso_timekeep32,
259             tk_ver), sizeof(uint32_t), &tk_ver);
260         svtk->sv_timekeep_off = tk_base;
261         timekeep_push_vdso();
262         return (svtk);
263 }
264 #endif
265
266 #ifdef RANDOM_FENESTRASX
267 void
268 fxrng_push_seed_generation(uint64_t gen)
269 {
270         if (fxrng_shpage_mapping == NULL || !fxrng_enabled)
271                 return;
272         KASSERT(gen < INT32_MAX,
273             ("fxrng seed version shouldn't roll over a 32-bit counter "
274              "for approximately 456,000 years"));
275         atomic_store_rel_32(&fxrng_shpage_mapping->fx_generation32,
276             (uint32_t)gen);
277 }
278
279 static void
280 alloc_sv_fxrng_generation(void)
281 {
282         int base;
283
284         /*
285          * Allocate a full cache line for the fxrng root generation (64-bit
286          * counter, or truncated 32-bit counter on ILP32 userspace).  It is
287          * important that the line is not shared with frequently dirtied data,
288          * and the shared page allocator lacks a __read_mostly mechanism.
289          * However, PAGE_SIZE is typically large relative to the amount of
290          * stuff we've got in it so far, so maybe the possible waste isn't an
291          * issue.
292          */
293         base = shared_page_alloc(CACHE_LINE_SIZE, CACHE_LINE_SIZE);
294         KASSERT(base != -1, ("%s: base allocation failed", __func__));
295         fxrng_shpage_mapping = (void *)(shared_page_mapping + base);
296         *fxrng_shpage_mapping = (struct vdso_fxrng_generation) {
297                 .fx_vdso_version = VDSO_FXRNG_VER_CURR,
298         };
299 }
300 #endif /* RANDOM_FENESTRASX */
301
302 void
303 exec_sysvec_init(void *param)
304 {
305         struct sysentvec *sv;
306         vm_offset_t sb;
307 #ifdef RANDOM_FENESTRASX
308         ptrdiff_t base;
309 #endif
310         u_int flags;
311         int res;
312
313         sv = param;
314         flags = sv->sv_flags;
315         if ((flags & SV_SHP) == 0)
316                 return;
317         MPASS(sv->sv_shared_page_obj == NULL);
318         MPASS(sv->sv_shared_page_base != 0);
319
320         sv->sv_shared_page_obj = shared_page_obj;
321         if ((flags & SV_ABI_MASK) == SV_ABI_FREEBSD) {
322                 if ((flags & SV_DSO_SIG) != 0) {
323                         sb = sv->sv_shared_page_base;
324                         res = shared_page_fill((uintptr_t)sv->sv_szsigcode,
325                             16, sv->sv_sigcode);
326                         if (res == -1)
327                                 panic("copying sigtramp to shared page");
328                         sb += res;
329                         sv->sv_vdso_base = sb;
330                         sb += sv->sv_sigcodeoff;
331                         sv->sv_sigcode_base = sb;
332                 } else {
333                         sv->sv_sigcode_base = sv->sv_shared_page_base +
334                             shared_page_fill(*(sv->sv_szsigcode), 16,
335                             sv->sv_sigcode);
336                 }
337         }
338         if ((flags & SV_TIMEKEEP) != 0) {
339 #ifdef COMPAT_FREEBSD32
340                 if ((flags & SV_ILP32) != 0) {
341                         if ((flags & SV_ABI_MASK) == SV_ABI_FREEBSD) {
342                                 KASSERT(compat32_svtk == NULL,
343                                     ("Compat32 already registered"));
344                                 compat32_svtk = alloc_sv_tk_compat32();
345                         } else {
346                                 KASSERT(compat32_svtk != NULL,
347                                     ("Compat32 not registered"));
348                         }
349                         sv->sv_timekeep_base = sv->sv_shared_page_base +
350                             compat32_svtk->sv_timekeep_off;
351                 } else {
352 #endif
353                         if ((flags & SV_ABI_MASK) == SV_ABI_FREEBSD) {
354                                 KASSERT(host_svtk == NULL,
355                                     ("Host already registered"));
356                                 host_svtk = alloc_sv_tk();
357                         } else {
358                                 KASSERT(host_svtk != NULL,
359                                     ("Host not registered"));
360                         }
361                         sv->sv_timekeep_base = sv->sv_shared_page_base +
362                             host_svtk->sv_timekeep_off;
363 #ifdef COMPAT_FREEBSD32
364                 }
365 #endif
366         }
367 #ifdef RANDOM_FENESTRASX
368         if ((flags & (SV_ABI_MASK | SV_RNG_SEED_VER)) ==
369             (SV_ABI_FREEBSD | SV_RNG_SEED_VER)) {
370                 /*
371                  * Only allocate a single VDSO entry for multiple sysentvecs,
372                  * i.e., native and COMPAT32.
373                  */
374                 if (fxrng_shpage_mapping == NULL)
375                         alloc_sv_fxrng_generation();
376                 base = (char *)fxrng_shpage_mapping - shared_page_mapping;
377                 sv->sv_fxrng_gen_base = sv->sv_shared_page_base + base;
378         }
379 #endif
380 }
381
382 void
383 exec_sysvec_init_secondary(struct sysentvec *sv, struct sysentvec *sv2)
384 {
385         MPASS((sv2->sv_flags & SV_ABI_MASK) == (sv->sv_flags & SV_ABI_MASK));
386         MPASS((sv2->sv_flags & SV_TIMEKEEP) == (sv->sv_flags & SV_TIMEKEEP));
387         MPASS((sv2->sv_flags & SV_SHP) != 0 && (sv->sv_flags & SV_SHP) != 0);
388         MPASS((sv2->sv_flags & SV_DSO_SIG) == (sv->sv_flags & SV_DSO_SIG));
389         MPASS((sv2->sv_flags & SV_RNG_SEED_VER) ==
390             (sv->sv_flags & SV_RNG_SEED_VER));
391
392         sv2->sv_shared_page_obj = sv->sv_shared_page_obj;
393         sv2->sv_sigcode_base = sv2->sv_shared_page_base +
394             (sv->sv_sigcode_base - sv->sv_shared_page_base);
395         if ((sv2->sv_flags & SV_DSO_SIG) != 0) {
396                 sv2->sv_vdso_base = sv2->sv_shared_page_base +
397                     (sv->sv_vdso_base - sv->sv_shared_page_base);
398         }
399         if ((sv2->sv_flags & SV_ABI_MASK) != SV_ABI_FREEBSD)
400                 return;
401         if ((sv2->sv_flags & SV_TIMEKEEP) != 0) {
402                 sv2->sv_timekeep_base = sv2->sv_shared_page_base +
403                     (sv->sv_timekeep_base - sv->sv_shared_page_base);
404         }
405         if ((sv2->sv_flags & SV_RNG_SEED_VER) != 0) {
406                 sv2->sv_fxrng_gen_base = sv2->sv_shared_page_base +
407                     (sv->sv_fxrng_gen_base - sv->sv_shared_page_base);
408         }
409 }