2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2000,2003 Doug Rabson
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/mutex.h>
38 #include <sys/sysctl.h>
40 #include <sys/systm.h>
47 static MALLOC_DEFINE(M_KOBJ, "kobj", "Kernel object structures");
51 u_int kobj_lookup_hits;
52 u_int kobj_lookup_misses;
54 SYSCTL_UINT(_kern, OID_AUTO, kobj_hits, CTLFLAG_RD,
55 &kobj_lookup_hits, 0, "");
56 SYSCTL_UINT(_kern, OID_AUTO, kobj_misses, CTLFLAG_RD,
57 &kobj_lookup_misses, 0, "");
61 static struct mtx kobj_mtx;
62 static int kobj_mutex_inited;
63 static int kobj_next_id = 1;
65 #define KOBJ_LOCK() mtx_lock(&kobj_mtx)
66 #define KOBJ_UNLOCK() mtx_unlock(&kobj_mtx)
67 #define KOBJ_ASSERT(what) mtx_assert(&kobj_mtx, what);
69 SYSCTL_INT(_kern, OID_AUTO, kobj_methodcount, CTLFLAG_RD,
70 &kobj_next_id, 0, "");
73 kobj_init_mutex(void *arg)
75 if (!kobj_mutex_inited) {
76 mtx_init(&kobj_mtx, "kobj", NULL, MTX_DEF);
77 kobj_mutex_inited = 1;
81 SYSINIT(kobj, SI_SUB_LOCK, SI_ORDER_ANY, kobj_init_mutex, NULL);
84 * This method structure is used to initialise new caches. Since the
85 * desc pointer is NULL, it is guaranteed never to match any read
88 static const struct kobj_method null_method = {
93 kobj_error_method(void)
100 kobj_class_compile_common(kobj_class_t cls, kobj_ops_t ops)
106 * Don't do anything if we are already compiled.
112 * First register any methods which need it.
114 for (i = 0, m = cls->methods; m->desc; i++, m++) {
115 if (m->desc->id == 0)
116 m->desc->id = kobj_next_id++;
120 * Then initialise the ops table.
122 for (i = 0; i < KOBJ_CACHE_SIZE; i++)
123 ops->cache[i] = &null_method;
129 kobj_class_compile1(kobj_class_t cls, int mflags)
133 KOBJ_ASSERT(MA_NOTOWNED);
135 ops = malloc(sizeof(struct kobj_ops), M_KOBJ, mflags);
140 * We may have lost a race for kobj_class_compile here - check
141 * to make sure someone else hasn't already compiled this
150 kobj_class_compile_common(cls, ops);
156 kobj_class_compile(kobj_class_t cls)
160 error = kobj_class_compile1(cls, M_WAITOK);
161 KASSERT(error == 0, ("kobj_class_compile1 returned %d", error));
165 kobj_class_compile_static(kobj_class_t cls, kobj_ops_t ops)
168 KASSERT(kobj_mutex_inited == 0,
169 ("%s: only supported during early cycles", __func__));
172 * Increment refs to make sure that the ops table is not freed.
175 kobj_class_compile_common(cls, ops);
178 static kobj_method_t*
179 kobj_lookup_method_class(kobj_class_t cls, kobjop_desc_t desc)
181 kobj_method_t *methods = cls->methods;
184 for (ce = methods; ce && ce->desc; ce++) {
185 if (ce->desc == desc) {
193 static kobj_method_t*
194 kobj_lookup_method_mi(kobj_class_t cls,
200 ce = kobj_lookup_method_class(cls, desc);
204 basep = cls->baseclasses;
206 for (; *basep; basep++) {
207 ce = kobj_lookup_method_mi(*basep, desc);
217 kobj_lookup_method(kobj_class_t cls,
223 ce = kobj_lookup_method_mi(cls, desc);
232 kobj_class_free(kobj_class_t cls)
236 KOBJ_ASSERT(MA_NOTOWNED);
240 * Protect against a race between kobj_create and
243 if (cls->refs == 0) {
245 * For now we don't do anything to unregister any methods
246 * which are no longer used.
250 * Free memory and clean up.
263 kobj_init_common(kobj_t obj, kobj_class_t cls)
271 kobj_init1(kobj_t obj, kobj_class_t cls, int mflags)
276 while (cls->ops == NULL) {
278 * kobj_class_compile doesn't want the lock held
279 * because of the call to malloc - we drop the lock
283 error = kobj_class_compile1(cls, mflags);
288 kobj_init_common(obj, cls);
294 kobj_create(kobj_class_t cls, struct malloc_type *mtype, int mflags)
298 obj = malloc(cls->size, mtype, mflags | M_ZERO);
301 if (kobj_init1(obj, cls, mflags) != 0) {
309 kobj_init(kobj_t obj, kobj_class_t cls)
313 error = kobj_init1(obj, cls, M_NOWAIT);
315 panic("kobj_init1 failed: error %d", error);
319 kobj_init_static(kobj_t obj, kobj_class_t cls)
322 KASSERT(kobj_mutex_inited == 0,
323 ("%s: only supported during early cycles", __func__));
325 kobj_init_common(obj, cls);
329 kobj_delete(kobj_t obj, struct malloc_type *mtype)
331 kobj_class_t cls = obj->ops->cls;
335 * Consider freeing the compiled method table for the class
336 * after its last instance is deleted. As an optimisation, we
337 * should defer this for a short while to avoid thrashing.
339 KOBJ_ASSERT(MA_NOTOWNED);
346 kobj_class_free(cls);