2 * Copyright (c) 2000,2003 Doug Rabson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/mutex.h>
36 #include <sys/sysctl.h>
38 #include <sys/systm.h>
45 static MALLOC_DEFINE(M_KOBJ, "kobj", "Kernel object structures");
49 u_int kobj_lookup_hits;
50 u_int kobj_lookup_misses;
52 SYSCTL_UINT(_kern, OID_AUTO, kobj_hits, CTLFLAG_RD,
53 &kobj_lookup_hits, 0, "");
54 SYSCTL_UINT(_kern, OID_AUTO, kobj_misses, CTLFLAG_RD,
55 &kobj_lookup_misses, 0, "");
59 static struct mtx kobj_mtx;
60 static int kobj_mutex_inited;
61 static int kobj_next_id = 1;
63 #define KOBJ_LOCK() mtx_lock(&kobj_mtx)
64 #define KOBJ_UNLOCK() mtx_unlock(&kobj_mtx)
65 #define KOBJ_ASSERT(what) mtx_assert(&kobj_mtx, what);
67 SYSCTL_INT(_kern, OID_AUTO, kobj_methodcount, CTLFLAG_RD,
68 &kobj_next_id, 0, "");
71 kobj_init_mutex(void *arg)
73 if (!kobj_mutex_inited) {
74 mtx_init(&kobj_mtx, "kobj", NULL, MTX_DEF);
75 kobj_mutex_inited = 1;
79 SYSINIT(kobj, SI_SUB_LOCK, SI_ORDER_ANY, kobj_init_mutex, NULL);
82 * This method structure is used to initialise new caches. Since the
83 * desc pointer is NULL, it is guaranteed never to match any read
86 static const struct kobj_method null_method = {
91 kobj_error_method(void)
98 kobj_class_compile_common(kobj_class_t cls, kobj_ops_t ops)
104 * Don't do anything if we are already compiled.
110 * First register any methods which need it.
112 for (i = 0, m = cls->methods; m->desc; i++, m++) {
113 if (m->desc->id == 0)
114 m->desc->id = kobj_next_id++;
118 * Then initialise the ops table.
120 for (i = 0; i < KOBJ_CACHE_SIZE; i++)
121 ops->cache[i] = &null_method;
127 kobj_class_compile(kobj_class_t cls)
131 KOBJ_ASSERT(MA_NOTOWNED);
134 * Allocate space for the compiled ops table.
136 ops = malloc(sizeof(struct kobj_ops), M_KOBJ, M_NOWAIT);
138 panic("%s: out of memory", __func__);
143 * We may have lost a race for kobj_class_compile here - check
144 * to make sure someone else hasn't already compiled this
153 kobj_class_compile_common(cls, ops);
158 kobj_class_compile_static(kobj_class_t cls, kobj_ops_t ops)
161 KASSERT(kobj_mutex_inited == 0,
162 ("%s: only supported during early cycles", __func__));
165 * Increment refs to make sure that the ops table is not freed.
168 kobj_class_compile_common(cls, ops);
171 static kobj_method_t*
172 kobj_lookup_method_class(kobj_class_t cls, kobjop_desc_t desc)
174 kobj_method_t *methods = cls->methods;
177 for (ce = methods; ce && ce->desc; ce++) {
178 if (ce->desc == desc) {
186 static kobj_method_t*
187 kobj_lookup_method_mi(kobj_class_t cls,
193 ce = kobj_lookup_method_class(cls, desc);
197 basep = cls->baseclasses;
199 for (; *basep; basep++) {
200 ce = kobj_lookup_method_mi(*basep, desc);
210 kobj_lookup_method(kobj_class_t cls,
218 * Correct for the 'hit' assumption in KOBJOPLOOKUP and record
222 kobj_lookup_misses++;
225 ce = kobj_lookup_method_mi(cls, desc);
233 kobj_class_free(kobj_class_t cls)
237 KOBJ_ASSERT(MA_NOTOWNED);
241 * Protect against a race between kobj_create and
244 if (cls->refs == 0) {
246 * For now we don't do anything to unregister any methods
247 * which are no longer used.
251 * Free memory and clean up.
264 kobj_create(kobj_class_t cls,
265 struct malloc_type *mtype,
271 * Allocate and initialise the new object.
273 obj = malloc(cls->size, mtype, mflags | M_ZERO);
282 kobj_init_common(kobj_t obj, kobj_class_t cls)
290 kobj_init(kobj_t obj, kobj_class_t cls)
292 KOBJ_ASSERT(MA_NOTOWNED);
297 * Consider compiling the class' method table.
301 * kobj_class_compile doesn't want the lock held
302 * because of the call to malloc - we drop the lock
306 kobj_class_compile(cls);
310 kobj_init_common(obj, cls);
316 kobj_init_static(kobj_t obj, kobj_class_t cls)
319 KASSERT(kobj_mutex_inited == 0,
320 ("%s: only supported during early cycles", __func__));
322 kobj_init_common(obj, cls);
326 kobj_delete(kobj_t obj, struct malloc_type *mtype)
328 kobj_class_t cls = obj->ops->cls;
332 * Consider freeing the compiled method table for the class
333 * after its last instance is deleted. As an optimisation, we
334 * should defer this for a short while to avoid thrashing.
336 KOBJ_ASSERT(MA_NOTOWNED);
343 kobj_class_free(cls);