2 * Copyright (c) 2010,2013 Lawrence Stewart <lstewart@freebsd.org>
3 * Copyright (c) 2010 The FreeBSD Foundation
6 * This software was developed by Lawrence Stewart while studying at the Centre
7 * for Advanced Internet Architectures, Swinburne University of Technology,
8 * made possible in part by grants from the FreeBSD Foundation and Cisco
9 * University Research Program Fund at Community Foundation Silicon Valley.
11 * Portions of this software were developed at the Centre for Advanced
12 * Internet Architectures, Swinburne University of Technology, Melbourne,
13 * Australia by Lawrence Stewart under sponsorship from the FreeBSD Foundation.
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
40 #include <sys/param.h>
41 #include <sys/kernel.h>
42 #include <sys/hhook.h>
43 #include <sys/khelp.h>
44 #include <sys/malloc.h>
45 #include <sys/module.h>
46 #include <sys/module_khelp.h>
48 #include <sys/queue.h>
49 #include <sys/refcount.h>
50 #include <sys/systm.h>
55 hhook_func_t hhk_func;
56 struct helper *hhk_helper;
58 STAILQ_ENTRY(hhook) hhk_next;
61 static MALLOC_DEFINE(M_HHOOK, "hhook", "Helper hooks are linked off hhook_head lists");
63 LIST_HEAD(hhookheadhead, hhook_head);
64 struct hhookheadhead hhook_head_list;
65 VNET_DEFINE(struct hhookheadhead, hhook_vhead_list);
66 #define V_hhook_vhead_list VNET(hhook_vhead_list)
68 static struct mtx hhook_head_list_lock;
69 MTX_SYSINIT(hhookheadlistlock, &hhook_head_list_lock, "hhook_head list lock",
72 /* Protected by hhook_head_list_lock. */
73 static uint32_t n_hhookheads;
75 /* Private function prototypes. */
76 static void hhook_head_destroy(struct hhook_head *hhh);
77 void khelp_new_hhook_registered(struct hhook_head *hhh, uint32_t flags);
79 #define HHHLIST_LOCK() mtx_lock(&hhook_head_list_lock)
80 #define HHHLIST_UNLOCK() mtx_unlock(&hhook_head_list_lock)
81 #define HHHLIST_LOCK_ASSERT() mtx_assert(&hhook_head_list_lock, MA_OWNED)
83 #define HHH_LOCK_INIT(hhh) rm_init(&(hhh)->hhh_lock, "hhook_head rm lock")
84 #define HHH_LOCK_DESTROY(hhh) rm_destroy(&(hhh)->hhh_lock)
85 #define HHH_WLOCK(hhh) rm_wlock(&(hhh)->hhh_lock)
86 #define HHH_WUNLOCK(hhh) rm_wunlock(&(hhh)->hhh_lock)
87 #define HHH_RLOCK(hhh, rmpt) rm_rlock(&(hhh)->hhh_lock, (rmpt))
88 #define HHH_RUNLOCK(hhh, rmpt) rm_runlock(&(hhh)->hhh_lock, (rmpt))
91 * Run all helper hook functions for a given hook point.
94 hhook_run_hooks(struct hhook_head *hhh, void *ctx_data, struct osd *hosd)
98 struct rm_priotracker rmpt;
100 KASSERT(hhh->hhh_refcount > 0, ("hhook_head %p refcount is 0", hhh));
102 HHH_RLOCK(hhh, &rmpt);
103 STAILQ_FOREACH(hhk, &hhh->hhh_hooks, hhk_next) {
104 if (hhk->hhk_helper->h_flags & HELPER_NEEDS_OSD) {
105 hdata = osd_get(OSD_KHELP, hosd, hhk->hhk_helper->h_id);
112 * XXXLAS: We currently ignore the int returned by the hook,
113 * but will likely want to handle it in future to allow hhook to
114 * be used like pfil and effect changes at the hhook calling
115 * site e.g. we could define a new hook type of HHOOK_TYPE_PFIL
116 * and standardise what particular return values mean and set
117 * the context data to pass exactly the same information as pfil
118 * hooks currently receive, thus replicating pfil with hhook.
120 hhk->hhk_func(hhh->hhh_type, hhh->hhh_id, hhk->hhk_udata,
121 ctx_data, hdata, hosd);
123 HHH_RUNLOCK(hhh, &rmpt);
127 * Register a new helper hook function with a helper hook point.
130 hhook_add_hook(struct hhook_head *hhh, struct hookinfo *hki, uint32_t flags)
132 struct hhook *hhk, *tmp;
140 hhk = malloc(sizeof(struct hhook), M_HHOOK,
141 M_ZERO | ((flags & HHOOK_WAITOK) ? M_WAITOK : M_NOWAIT));
146 hhk->hhk_helper = hki->hook_helper;
147 hhk->hhk_func = hki->hook_func;
148 hhk->hhk_udata = hki->hook_udata;
151 STAILQ_FOREACH(tmp, &hhh->hhh_hooks, hhk_next) {
152 if (tmp->hhk_func == hki->hook_func &&
153 tmp->hhk_udata == hki->hook_udata) {
154 /* The helper hook function is already registered. */
161 STAILQ_INSERT_TAIL(&hhh->hhh_hooks, hhk, hhk_next);
172 * Register a helper hook function with a helper hook point (including all
173 * virtual instances of the hook point if it is virtualised).
175 * The logic is unfortunately far more complex than for
176 * hhook_remove_hook_lookup() because hhook_add_hook() can call malloc() with
177 * M_WAITOK and thus we cannot call hhook_add_hook() with the
178 * hhook_head_list_lock held.
180 * The logic assembles an array of hhook_head structs that correspond to the
181 * helper hook point being hooked and bumps the refcount on each (all done with
182 * the hhook_head_list_lock held). The hhook_head_list_lock is then dropped, and
183 * hhook_add_hook() is called and the refcount dropped for each hhook_head
184 * struct in the array.
187 hhook_add_hook_lookup(struct hookinfo *hki, uint32_t flags)
189 struct hhook_head **heads_to_hook, *hhh;
190 int error, i, n_heads_to_hook;
195 * Accessing n_hhookheads without hhook_head_list_lock held opens up a
196 * race with hhook_head_register() which we are unlikely to lose, but
197 * nonetheless have to cope with - hence the complex goto logic.
199 n_heads_to_hook = n_hhookheads;
200 heads_to_hook = malloc(n_heads_to_hook * sizeof(struct hhook_head *),
201 M_HHOOK, flags & HHOOK_WAITOK ? M_WAITOK : M_NOWAIT);
202 if (heads_to_hook == NULL)
206 LIST_FOREACH(hhh, &hhook_head_list, hhh_next) {
207 if (hhh->hhh_type == hki->hook_type &&
208 hhh->hhh_id == hki->hook_id) {
209 if (i < n_heads_to_hook) {
210 heads_to_hook[i] = hhh;
211 refcount_acquire(&heads_to_hook[i]->hhh_refcount);
215 * We raced with hhook_head_register() which
216 * inserted a hhook_head that we need to hook
217 * but did not malloc space for. Abort this run
220 for (i--; i >= 0; i--)
221 refcount_release(&heads_to_hook[i]->hhh_refcount);
222 free(heads_to_hook, M_HHOOK);
230 for (i--; i >= 0; i--) {
232 error = hhook_add_hook(heads_to_hook[i], hki, flags);
233 refcount_release(&heads_to_hook[i]->hhh_refcount);
236 free(heads_to_hook, M_HHOOK);
242 * Remove a helper hook function from a helper hook point.
245 hhook_remove_hook(struct hhook_head *hhh, struct hookinfo *hki)
253 STAILQ_FOREACH(tmp, &hhh->hhh_hooks, hhk_next) {
254 if (tmp->hhk_func == hki->hook_func &&
255 tmp->hhk_udata == hki->hook_udata) {
256 STAILQ_REMOVE(&hhh->hhh_hooks, tmp, hhook, hhk_next);
268 * Remove a helper hook function from a helper hook point (including all
269 * virtual instances of the hook point if it is virtualised).
272 hhook_remove_hook_lookup(struct hookinfo *hki)
274 struct hhook_head *hhh;
277 LIST_FOREACH(hhh, &hhook_head_list, hhh_next) {
278 if (hhh->hhh_type == hki->hook_type &&
279 hhh->hhh_id == hki->hook_id)
280 hhook_remove_hook(hhh, hki);
288 * Register a new helper hook point.
291 hhook_head_register(int32_t hhook_type, int32_t hhook_id, struct hhook_head **hhh,
294 struct hhook_head *tmphhh;
296 tmphhh = hhook_head_get(hhook_type, hhook_id);
298 if (tmphhh != NULL) {
299 /* Hook point previously registered. */
300 hhook_head_release(tmphhh);
304 tmphhh = malloc(sizeof(struct hhook_head), M_HHOOK,
305 M_ZERO | ((flags & HHOOK_WAITOK) ? M_WAITOK : M_NOWAIT));
310 tmphhh->hhh_type = hhook_type;
311 tmphhh->hhh_id = hhook_id;
312 tmphhh->hhh_nhooks = 0;
313 STAILQ_INIT(&tmphhh->hhh_hooks);
314 HHH_LOCK_INIT(tmphhh);
315 refcount_init(&tmphhh->hhh_refcount, 1);
318 if (flags & HHOOK_HEADISINVNET) {
319 tmphhh->hhh_flags |= HHH_ISINVNET;
321 KASSERT(curvnet != NULL, ("curvnet is NULL"));
322 tmphhh->hhh_vid = (uintptr_t)curvnet;
323 LIST_INSERT_HEAD(&V_hhook_vhead_list, tmphhh, hhh_vnext);
326 LIST_INSERT_HEAD(&hhook_head_list, tmphhh, hhh_next);
330 khelp_new_hhook_registered(tmphhh, flags);
335 refcount_release(&tmphhh->hhh_refcount);
341 hhook_head_destroy(struct hhook_head *hhh)
343 struct hhook *tmp, *tmp2;
345 HHHLIST_LOCK_ASSERT();
346 KASSERT(n_hhookheads > 0, ("n_hhookheads should be > 0"));
348 LIST_REMOVE(hhh, hhh_next);
350 if (hhook_head_is_virtualised(hhh) == HHOOK_HEADISINVNET)
351 LIST_REMOVE(hhh, hhh_vnext);
354 STAILQ_FOREACH_SAFE(tmp, &hhh->hhh_hooks, hhk_next, tmp2)
357 HHH_LOCK_DESTROY(hhh);
363 * Remove a helper hook point.
366 hhook_head_deregister(struct hhook_head *hhh)
375 else if (hhh->hhh_refcount > 1)
378 hhook_head_destroy(hhh);
385 * Remove a helper hook point via a hhook_head lookup.
388 hhook_head_deregister_lookup(int32_t hhook_type, int32_t hhook_id)
390 struct hhook_head *hhh;
393 hhh = hhook_head_get(hhook_type, hhook_id);
394 error = hhook_head_deregister(hhh);
397 hhook_head_release(hhh);
403 * Lookup and return the hhook_head struct associated with the specified type
404 * and id, or NULL if not found. If found, the hhook_head's refcount is bumped.
407 hhook_head_get(int32_t hhook_type, int32_t hhook_id)
409 struct hhook_head *hhh;
412 LIST_FOREACH(hhh, &hhook_head_list, hhh_next) {
413 if (hhh->hhh_type == hhook_type && hhh->hhh_id == hhook_id) {
415 if (hhook_head_is_virtualised(hhh) ==
416 HHOOK_HEADISINVNET) {
417 KASSERT(curvnet != NULL, ("curvnet is NULL"));
418 if (hhh->hhh_vid != (uintptr_t)curvnet)
422 refcount_acquire(&hhh->hhh_refcount);
432 hhook_head_release(struct hhook_head *hhh)
435 refcount_release(&hhh->hhh_refcount);
439 * Check the hhook_head private flags and return the appropriate public
440 * representation of the flag to the caller. The function is implemented in a
441 * way that allows us to cope with other subsystems becoming virtualised in the
445 hhook_head_is_virtualised(struct hhook_head *hhh)
452 if (hhh->hhh_flags & HHH_ISINVNET)
453 ret = HHOOK_HEADISINVNET;
460 hhook_head_is_virtualised_lookup(int32_t hook_type, int32_t hook_id)
462 struct hhook_head *hhh;
465 hhh = hhook_head_get(hook_type, hook_id);
470 ret = hhook_head_is_virtualised(hhh);
471 hhook_head_release(hhh);
477 * Vnet created and being initialised.
480 hhook_vnet_init(const void *unused __unused)
483 LIST_INIT(&V_hhook_vhead_list);
487 * Vnet being torn down and destroyed.
490 hhook_vnet_uninit(const void *unused __unused)
492 struct hhook_head *hhh, *tmphhh;
495 * If subsystems which export helper hook points use the hhook KPI
496 * correctly, the loop below should have no work to do because the
497 * subsystem should have already called hhook_head_deregister().
500 LIST_FOREACH_SAFE(hhh, &V_hhook_vhead_list, hhh_vnext, tmphhh) {
501 printf("%s: hhook_head type=%d, id=%d cleanup required\n",
502 __func__, hhh->hhh_type, hhh->hhh_id);
503 hhook_head_destroy(hhh);
510 * When a vnet is created and being initialised, init the V_hhook_vhead_list.
512 VNET_SYSINIT(hhook_vnet_init, SI_SUB_MBUF, SI_ORDER_FIRST,
513 hhook_vnet_init, NULL);
516 * The hhook KPI provides a mechanism for subsystems which export helper hook
517 * points to clean up on vnet tear down, but in case the KPI is misused,
518 * provide a function to clean up and free memory for a vnet being destroyed.
520 VNET_SYSUNINIT(hhook_vnet_uninit, SI_SUB_MBUF, SI_ORDER_ANY,
521 hhook_vnet_uninit, NULL);