2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2020 Alexander V. Chernikov
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include "opt_inet6.h"
32 #include "opt_route.h"
34 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/malloc.h>
39 #include <sys/socket.h>
40 #include <sys/sysctl.h>
41 #include <sys/syslog.h>
42 #include <sys/sysproto.h>
44 #include <sys/domain.h>
45 #include <sys/kernel.h>
47 #include <sys/rmlock.h>
50 #include <net/if_var.h>
51 #include <net/if_dl.h>
52 #include <net/route.h>
53 #include <net/route/route_ctl.h>
54 #include <net/route/route_var.h>
55 #include <net/route/nhop_utils.h>
56 #include <net/route/nhop.h>
57 #include <net/route/nhop_var.h>
59 #include <netinet/in_fib.h>
62 #include <netinet6/in6_fib.h>
67 * RIB helper functions.
71 rib_walk_ext_locked(struct rib_head *rnh, rib_walktree_f_t *wa_f,
72 rib_walk_hook_f_t *hook_f, void *arg)
75 hook_f(rnh, RIB_WALK_HOOK_PRE, arg);
76 rnh->rnh_walktree(&rnh->head, (walktree_f_t *)wa_f, arg);
78 hook_f(rnh, RIB_WALK_HOOK_POST, arg);
82 * Calls @wa_f with @arg for each entry in the table specified by
85 * @ss_t callback is called before and after the tree traversal
86 * while holding table lock.
88 * Table is traversed under read lock unless @wlock is set.
91 rib_walk_ext_internal(struct rib_head *rnh, bool wlock, rib_walktree_f_t *wa_f,
92 rib_walk_hook_f_t *hook_f, void *arg)
100 rib_walk_ext_locked(rnh, wa_f, hook_f, arg);
108 rib_walk_ext(uint32_t fibnum, int family, bool wlock, rib_walktree_f_t *wa_f,
109 rib_walk_hook_f_t *hook_f, void *arg)
111 struct rib_head *rnh;
113 if ((rnh = rt_tables_get_rnh(fibnum, family)) != NULL)
114 rib_walk_ext_internal(rnh, wlock, wa_f, hook_f, arg);
118 * Calls @wa_f with @arg for each entry in the table specified by
121 * Table is traversed under read lock unless @wlock is set.
124 rib_walk(uint32_t fibnum, int family, bool wlock, rib_walktree_f_t *wa_f,
128 rib_walk_ext(fibnum, family, wlock, wa_f, NULL, arg);
132 * Calls @wa_f with @arg for each entry in the table matching @prefix/@mask.
134 * The following flags are supported:
135 * RIB_FLAG_WLOCK: acquire exclusive lock
136 * RIB_FLAG_LOCKED: Assumes the table is already locked & skip locking
138 * By default, table is traversed under read lock.
141 rib_walk_from(uint32_t fibnum, int family, uint32_t flags, struct sockaddr *prefix,
142 struct sockaddr *mask, rib_walktree_f_t *wa_f, void *arg)
145 struct rib_head *rnh = rt_tables_get_rnh(fibnum, family);
150 if (flags & RIB_FLAG_WLOCK)
152 else if (!(flags & RIB_FLAG_LOCKED))
155 rnh->rnh_walktree_from(&rnh->head, prefix, mask, (walktree_f_t *)wa_f, arg);
157 if (flags & RIB_FLAG_WLOCK)
159 else if (!(flags & RIB_FLAG_LOCKED))
164 * Iterates over all existing fibs in system calling
165 * @hook_f function before/after traversing each fib.
166 * Calls @wa_f function for each element in current fib.
167 * If af is not AF_UNSPEC, iterates over fibs in particular
171 rib_foreach_table_walk(int family, bool wlock, rib_walktree_f_t *wa_f,
172 rib_walk_hook_f_t *hook_f, void *arg)
175 for (uint32_t fibnum = 0; fibnum < rt_numfibs; fibnum++) {
176 /* Do we want some specific family? */
177 if (family != AF_UNSPEC) {
178 rib_walk_ext(fibnum, family, wlock, wa_f, hook_f, arg);
182 for (int i = 1; i <= AF_MAX; i++)
183 rib_walk_ext(fibnum, i, wlock, wa_f, hook_f, arg);
188 * Iterates over all existing fibs in system and deletes each element
189 * for which @filter_f function returns non-zero value.
190 * If @family is not AF_UNSPEC, iterates over fibs in particular
194 rib_foreach_table_walk_del(int family, rib_filter_f_t *filter_f, void *arg)
197 for (uint32_t fibnum = 0; fibnum < rt_numfibs; fibnum++) {
198 /* Do we want some specific family? */
199 if (family != AF_UNSPEC) {
200 rib_walk_del(fibnum, family, filter_f, arg, 0);
204 for (int i = 1; i <= AF_MAX; i++)
205 rib_walk_del(fibnum, i, filter_f, arg, 0);
211 * Wrapper for the control plane functions for performing af-agnostic
213 * @fibnum: fib to perform the lookup.
214 * @dst: sockaddr with family and addr filled in. IPv6 addresses needs to be in
216 * @flags: fib(9) flags.
217 * @flowid: flow id for path selection in multipath use case.
219 * Returns nhop_object or NULL.
221 * Requires NET_EPOCH.
225 rib_lookup(uint32_t fibnum, const struct sockaddr *dst, uint32_t flags,
228 struct nhop_object *nh;
232 switch (dst->sa_family) {
236 const struct sockaddr_in *a = (const struct sockaddr_in *)dst;
237 nh = fib4_lookup(fibnum, a->sin_addr, 0, flags, flowid);
244 const struct sockaddr_in6 *a = (const struct sockaddr_in6*)dst;
245 nh = fib6_lookup(fibnum, &a->sin6_addr, a->sin6_scope_id,
257 decompose_change_notification(struct rib_cmd_info *rc, route_notification_t *cb,
260 uint32_t num_old, num_new;
261 uint32_t nh_idx_old, nh_idx_new;
262 struct weightened_nhop *wn_old, *wn_new;
263 struct weightened_nhop tmp = { NULL, 0 };
264 uint32_t idx_old = 0, idx_new = 0;
266 struct rib_cmd_info rc_del = { .rc_cmd = RTM_DELETE, .rc_rt = rc->rc_rt };
267 struct rib_cmd_info rc_add = { .rc_cmd = RTM_ADD, .rc_rt = rc->rc_rt };
269 if (NH_IS_NHGRP(rc->rc_nh_old)) {
270 wn_old = nhgrp_get_nhops((struct nhgrp_object *)rc->rc_nh_old, &num_old);
272 tmp.nh = rc->rc_nh_old;
273 tmp.weight = rc->rc_nh_weight;
277 if (NH_IS_NHGRP(rc->rc_nh_new)) {
278 wn_new = nhgrp_get_nhops((struct nhgrp_object *)rc->rc_nh_new, &num_new);
280 tmp.nh = rc->rc_nh_new;
281 tmp.weight = rc->rc_nh_weight;
286 /* Use the fact that each @wn array is sorted */
288 * Want to convert into set of add and delete operations
289 * [1] -> [1, 2] = A{2}
290 * [2] -> [1, 2] = A{1}
291 * [1, 2, 4]->[1, 3, 4] = A{2}, D{3}
292 * [1, 2, 4]->[1, 4] = D{2}
293 * [1, 2, 4] -> [3, 4] = D{1}, C{2,3} OR C{1,3}, D{2} OR D{1},D{2},A{3}
298 while ((idx_old < num_old) && (idx_new < num_new)) {
299 nh_idx_old = wn_old[idx_old].nh->nh_priv->nh_idx;
300 nh_idx_new = wn_new[idx_new].nh->nh_priv->nh_idx;
302 if (nh_idx_old == nh_idx_new) {
303 if (wn_old[idx_old].weight != wn_new[idx_new].weight) {
304 /* Update weight by providing del/add notifications */
305 rc_del.rc_nh_old = wn_old[idx_old].nh;
306 rc_del.rc_nh_weight = wn_old[idx_old].weight;
309 rc_add.rc_nh_new = wn_new[idx_new].nh;
310 rc_add.rc_nh_weight = wn_new[idx_new].weight;
315 } else if (nh_idx_old < nh_idx_new) {
317 * [1, ~2~, 4], [1, ~3~, 4]
318 * [1, ~2~, 5], [1, ~3~, 4]
319 * [1, ~2~], [1, ~3~, 4]
321 if ((idx_old + 1 >= num_old) ||
322 (wn_old[idx_old + 1].nh->nh_priv->nh_idx > nh_idx_new)) {
323 /* Add new unless the next old item is still <= new */
324 rc_add.rc_nh_new = wn_new[idx_new].nh;
325 rc_add.rc_nh_weight = wn_new[idx_new].weight;
329 /* In any case, delete current old */
330 rc_del.rc_nh_old = wn_old[idx_old].nh;
331 rc_del.rc_nh_weight = wn_old[idx_old].weight;
336 * nh_idx_old > nh_idx_new
338 * [1, ~3~, 4], [1, ~2~, 4]
339 * [1, ~3~, 5], [1, ~2~, 4]
340 * [1, ~3~, 4], [1, ~2~]
342 if ((idx_new + 1 >= num_new) ||
343 (wn_new[idx_new + 1].nh->nh_priv->nh_idx > nh_idx_old)) {
344 /* No next item or next item is > current one */
345 rc_add.rc_nh_new = wn_new[idx_new].nh;
346 rc_add.rc_nh_weight = wn_new[idx_new].weight;
350 /* In any case, delete current old */
351 rc_del.rc_nh_old = wn_old[idx_old].nh;
352 rc_del.rc_nh_weight = wn_old[idx_old].weight;
358 while (idx_old < num_old) {
359 rc_del.rc_nh_old = wn_old[idx_old].nh;
360 rc_del.rc_nh_weight = wn_old[idx_old].weight;
365 while (idx_new < num_new) {
366 rc_add.rc_nh_new = wn_new[idx_new].nh;
367 rc_add.rc_nh_weight = wn_new[idx_new].weight;
374 * Decompose multipath cmd info @rc into a list of add/del/change
375 * single-path operations, calling @cb callback for each operation.
376 * Assumes at least one of the nexthops in @rc is multipath.
379 rib_decompose_notification(struct rib_cmd_info *rc, route_notification_t *cb,
382 struct weightened_nhop *wn;
384 struct rib_cmd_info rc_new;
387 DPRINTF("cb=%p cmd=%d nh_old=%p nh_new=%p",
388 cb, rc->cmd, rc->nh_old, rc->nh_new);
389 switch (rc->rc_cmd) {
391 if (!NH_IS_NHGRP(rc->rc_nh_new))
393 wn = nhgrp_get_nhops((struct nhgrp_object *)rc->rc_nh_new, &num_nhops);
394 for (uint32_t i = 0; i < num_nhops; i++) {
395 rc_new.rc_nh_new = wn[i].nh;
396 rc_new.rc_nh_weight = wn[i].weight;
401 if (!NH_IS_NHGRP(rc->rc_nh_old))
403 wn = nhgrp_get_nhops((struct nhgrp_object *)rc->rc_nh_old, &num_nhops);
404 for (uint32_t i = 0; i < num_nhops; i++) {
405 rc_new.rc_nh_old = wn[i].nh;
406 rc_new.rc_nh_weight = wn[i].weight;
411 if (!NH_IS_NHGRP(rc->rc_nh_old) && !NH_IS_NHGRP(rc->rc_nh_new))
413 decompose_change_notification(rc, cb, cbdata);