2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2021 Ng Peng Nam Sean
5 * Copyright (c) 2022 Alexander V. Chernikov <melifaro@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
33 #include <sys/malloc.h>
35 #include <sys/mutex.h>
36 #include <sys/socket.h>
37 #include <sys/socketvar.h>
38 #include <sys/syslog.h>
40 #include <netlink/netlink.h>
41 #include <netlink/netlink_ctl.h>
42 #include <netlink/netlink_linux.h>
43 #include <netlink/netlink_var.h>
45 #define DEBUG_MOD_NAME nl_io
46 #define DEBUG_MAX_LEVEL LOG_DEBUG3
47 #include <netlink/netlink_debug.h>
48 _DECLARE_DEBUG(LOG_INFO);
51 * The logic below provide a p2p interface for receiving and
52 * sending netlink data between the kernel and userland.
55 static const struct sockaddr_nl _nl_empty_src = {
56 .nl_len = sizeof(struct sockaddr_nl),
57 .nl_family = PF_NETLINK,
58 .nl_pid = 0 /* comes from the kernel */
60 static const struct sockaddr *nl_empty_src = (const struct sockaddr *)&_nl_empty_src;
62 static struct mbuf *nl_process_mbuf(struct mbuf *m, struct nlpcb *nlp);
66 queue_push(struct nl_io_queue *q, struct mbuf *mq)
73 q->length += m_length(m, NULL);
74 STAILQ_INSERT_TAIL(&q->head, m, m_stailqpkt);
79 queue_push_head(struct nl_io_queue *q, struct mbuf *m)
81 MPASS(m->m_nextpkt == NULL);
83 q->length += m_length(m, NULL);
84 STAILQ_INSERT_HEAD(&q->head, m, m_stailqpkt);
88 queue_pop(struct nl_io_queue *q)
90 if (!STAILQ_EMPTY(&q->head)) {
91 struct mbuf *m = STAILQ_FIRST(&q->head);
92 STAILQ_REMOVE_HEAD(&q->head, m_stailqpkt);
94 q->length -= m_length(m, NULL);
102 queue_head(const struct nl_io_queue *q)
104 return (STAILQ_FIRST(&q->head));
108 queue_empty(const struct nl_io_queue *q)
110 return (q->length == 0);
114 queue_free(struct nl_io_queue *q)
116 while (!STAILQ_EMPTY(&q->head)) {
117 struct mbuf *m = STAILQ_FIRST(&q->head);
118 STAILQ_REMOVE_HEAD(&q->head, m_stailqpkt);
126 nl_add_msg_info(struct mbuf *m)
128 struct nlpcb *nlp = nl_get_thread_nlp(curthread);
129 NL_LOG(LOG_DEBUG2, "Trying to recover nlp from thread %p: %p",
135 /* Prepare what we want to encode - PID, socket PID & msg seq */
141 .nla.nla_len = sizeof(struct nlattr) + sizeof(uint32_t),
142 .nla.nla_type = NLMSGINFO_ATTR_PROCESS_ID,
143 .val = nlp->nl_process_id,
146 .nla.nla_len = sizeof(struct nlattr) + sizeof(uint32_t),
147 .nla.nla_type = NLMSGINFO_ATTR_PORT_ID,
153 while (m->m_next != NULL)
155 m->m_next = sbcreatecontrol(data, sizeof(data),
156 NETLINK_MSG_INFO, SOL_NETLINK, M_NOWAIT);
158 NL_LOG(LOG_DEBUG2, "Storing %u bytes of data, ctl: %p",
159 (unsigned)sizeof(data), m->m_next);
162 static __noinline struct mbuf *
163 extract_msg_info(struct mbuf *m)
165 while (m->m_next != NULL) {
166 if (m->m_next->m_type == MT_CONTROL) {
167 struct mbuf *ctl = m->m_next;
177 nl_schedule_taskqueue(struct nlpcb *nlp)
179 if (!nlp->nl_task_pending) {
180 nlp->nl_task_pending = true;
181 taskqueue_enqueue(nlp->nl_taskqueue, &nlp->nl_task);
182 NL_LOG(LOG_DEBUG3, "taskqueue scheduled");
184 NL_LOG(LOG_DEBUG3, "taskqueue schedule skipped");
189 nl_receive_async(struct mbuf *m, struct socket *so)
191 struct nlpcb *nlp = sotonlpcb(so);
198 if ((__predict_true(nlp->nl_active))) {
199 sbappend(&so->so_snd, m, 0);
200 NL_LOG(LOG_DEBUG3, "enqueue %u bytes", m_length(m, NULL));
201 nl_schedule_taskqueue(nlp);
203 NL_LOG(LOG_DEBUG, "ignoring %u bytes on non-active socket",
215 tx_check_locked(struct nlpcb *nlp)
217 if (queue_empty(&nlp->tx_queue))
221 * Check if something can be moved from the internal TX queue
222 * to the socket queue.
225 bool appended = false;
226 struct sockbuf *sb = &nlp->nl_socket->so_rcv;
230 struct mbuf *m = queue_head(&nlp->tx_queue);
232 struct mbuf *ctl = NULL;
233 if (__predict_false(m->m_next != NULL))
234 ctl = extract_msg_info(m);
235 if (sbappendaddr_locked(sb, nl_empty_src, m, ctl) != 0) {
236 /* appended successfully */
237 queue_pop(&nlp->tx_queue);
248 sorwakeup(nlp->nl_socket);
250 return (queue_empty(&nlp->tx_queue));
254 nl_process_received_one(struct nlpcb *nlp)
256 bool reschedule = false;
259 nlp->nl_task_pending = false;
261 if (!tx_check_locked(nlp)) {
262 /* TX overflow queue still not empty, ignore RX */
267 if (queue_empty(&nlp->rx_queue)) {
269 * Grab all data we have from the socket TX queue
270 * and store it the internal queue, so it can be worked on
271 * w/o holding socket lock.
273 struct sockbuf *sb = &nlp->nl_socket->so_snd;
276 unsigned int avail = sbavail(sb);
278 NL_LOG(LOG_DEBUG3, "grabbed %u bytes", avail);
279 queue_push(&nlp->rx_queue, sbcut_locked(sb, avail));
283 /* Schedule another pass to read from the socket queue */
287 int prev_hiwat = nlp->tx_queue.hiwat;
290 while (!queue_empty(&nlp->rx_queue)) {
291 struct mbuf *m = queue_pop(&nlp->rx_queue);
293 m = nl_process_mbuf(m, nlp);
295 queue_push_head(&nlp->rx_queue, m);
300 if (nlp->tx_queue.hiwat > prev_hiwat) {
301 NLP_LOG(LOG_DEBUG, nlp, "TX override peaked to %d", nlp->tx_queue.hiwat);
309 nl_process_received(struct nlpcb *nlp)
311 NL_LOG(LOG_DEBUG3, "taskqueue called");
313 if (__predict_false(nlp->nl_need_thread_setup)) {
314 nl_set_thread_nlp(curthread, nlp);
316 nlp->nl_need_thread_setup = false;
320 while (nl_process_received_one(nlp))
325 nl_init_io(struct nlpcb *nlp)
327 STAILQ_INIT(&nlp->rx_queue.head);
328 STAILQ_INIT(&nlp->tx_queue.head);
332 nl_free_io(struct nlpcb *nlp)
334 queue_free(&nlp->rx_queue);
335 queue_free(&nlp->tx_queue);
339 * Called after some data have been read from the socket.
342 nl_on_transmit(struct nlpcb *nlp)
346 struct socket *so = nlp->nl_socket;
347 if (__predict_false(nlp->nl_dropped_bytes > 0 && so != NULL)) {
348 unsigned long dropped_bytes = nlp->nl_dropped_bytes;
349 unsigned long dropped_messages = nlp->nl_dropped_messages;
350 nlp->nl_dropped_bytes = 0;
351 nlp->nl_dropped_messages = 0;
353 struct sockbuf *sb = &so->so_rcv;
354 NLP_LOG(LOG_DEBUG, nlp,
355 "socket RX overflowed, %lu messages (%lu bytes) dropped. "
356 "bytes: [%u/%u] mbufs: [%u/%u]", dropped_messages, dropped_bytes,
357 sb->sb_ccc, sb->sb_hiwat, sb->sb_mbcnt, sb->sb_mbmax);
358 /* TODO: send netlink message */
361 nl_schedule_taskqueue(nlp);
366 nl_taskqueue_handler(void *_arg, int pending)
368 struct nlpcb *nlp = (struct nlpcb *)_arg;
370 CURVNET_SET(nlp->nl_socket->so_vnet);
371 nl_process_received(nlp);
375 static __noinline void
376 queue_push_tx(struct nlpcb *nlp, struct mbuf *m)
378 queue_push(&nlp->tx_queue, m);
379 nlp->nl_tx_blocked = true;
381 if (nlp->tx_queue.length > nlp->tx_queue.hiwat)
382 nlp->tx_queue.hiwat = nlp->tx_queue.length;
386 * Tries to send @m to the socket @nlp.
388 * @m: mbuf(s) to send to. Consumed in any case.
389 * @nlp: socket to send to
390 * @cnt: number of messages in @m
391 * @io_flags: combination of NL_IOF_* flags
393 * Returns true on success.
394 * If no queue overrunes happened, wakes up socket owner.
397 nl_send_one(struct mbuf *m, struct nlpcb *nlp, int num_messages, int io_flags)
399 bool untranslated = io_flags & NL_IOF_UNTRANSLATED;
400 bool ignore_limits = io_flags & NL_IOF_IGNORE_LIMIT;
403 IF_DEBUG_LEVEL(LOG_DEBUG2) {
404 struct nlmsghdr *hdr = mtod(m, struct nlmsghdr *);
405 NLP_LOG(LOG_DEBUG2, nlp,
406 "TX mbuf len %u msgs %u msg type %d first hdrlen %u io_flags %X",
407 m_length(m, NULL), num_messages, hdr->nlmsg_type, hdr->nlmsg_len,
411 if (__predict_false(nlp->nl_linux && linux_netlink_p != NULL && untranslated)) {
412 m = linux_netlink_p->mbufs_to_linux(nlp->nl_proto, m, nlp);
419 if (__predict_false(nlp->nl_socket == NULL)) {
425 if (!queue_empty(&nlp->tx_queue)) {
427 queue_push_tx(nlp, m);
436 struct socket *so = nlp->nl_socket;
437 struct mbuf *ctl = NULL;
438 if (__predict_false(m->m_next != NULL))
439 ctl = extract_msg_info(m);
440 if (sbappendaddr(&so->so_rcv, nl_empty_src, m, ctl) != 0) {
442 NLP_LOG(LOG_DEBUG3, nlp, "appended data & woken up");
445 queue_push_tx(nlp, m);
448 * Store dropped data so it can be reported
451 nlp->nl_dropped_bytes += m_length(m, NULL);
452 nlp->nl_dropped_messages += num_messages;
453 NLP_LOG(LOG_DEBUG2, nlp, "RX oveflow: %lu m (+%d), %lu b (+%d)",
454 (unsigned long)nlp->nl_dropped_messages, num_messages,
455 (unsigned long)nlp->nl_dropped_bytes, m_length(m, NULL));
467 nl_receive_message(struct nlmsghdr *hdr, int remaining_length,
468 struct nlpcb *nlp, struct nl_pstate *npt)
470 nl_handler_f handler = nl_handlers[nlp->nl_proto].cb;
473 NLP_LOG(LOG_DEBUG2, nlp, "msg len: %u type: %d: flags: 0x%X seq: %u pid: %u",
474 hdr->nlmsg_len, hdr->nlmsg_type, hdr->nlmsg_flags, hdr->nlmsg_seq,
477 if (__predict_false(hdr->nlmsg_len > remaining_length)) {
478 NLP_LOG(LOG_DEBUG, nlp, "message is not entirely present: want %d got %d",
479 hdr->nlmsg_len, remaining_length);
481 } else if (__predict_false(hdr->nlmsg_len < sizeof(*hdr))) {
482 NL_LOG(LOG_DEBUG, "message too short: %d", hdr->nlmsg_len);
485 /* Stamp each message with sender pid */
486 hdr->nlmsg_pid = nlp->nl_port;
490 if (hdr->nlmsg_flags & NLM_F_REQUEST && hdr->nlmsg_type >= NLMSG_MIN_TYPE) {
491 NL_LOG(LOG_DEBUG2, "handling message with msg type: %d",
494 if (nlp->nl_linux && linux_netlink_p != NULL) {
495 struct nlmsghdr *hdr_orig = hdr;
496 hdr = linux_netlink_p->msg_from_linux(nlp->nl_proto, hdr, npt);
498 /* Failed to translate to kernel format. Report an error back */
501 if (hdr->nlmsg_flags & NLM_F_ACK)
502 nlmsg_ack(nlp, EOPNOTSUPP, hdr, npt);
506 error = handler(hdr, npt);
507 NL_LOG(LOG_DEBUG2, "retcode: %d", error);
509 if ((hdr->nlmsg_flags & NLM_F_ACK) || (error != 0 && error != EINTR)) {
510 if (!npt->nw->suppress_ack) {
511 NL_LOG(LOG_DEBUG3, "ack");
512 nlmsg_ack(nlp, error, hdr, npt);
520 npt_clear(struct nl_pstate *npt)
527 npt->nw->suppress_ack = false;
531 * Processes an incoming packet, which can contain multiple netlink messages
534 nl_process_mbuf(struct mbuf *m, struct nlpcb *nlp)
536 int offset, buffer_length;
537 struct nlmsghdr *hdr;
541 NL_LOG(LOG_DEBUG3, "RX netlink mbuf %p on %p", m, nlp->nl_socket);
543 struct nl_writer nw = {};
544 if (!nlmsg_get_unicast_writer(&nw, NLMSG_SMALL, nlp)) {
546 NL_LOG(LOG_DEBUG, "error allocating socket writer");
550 nlmsg_ignore_limit(&nw);
551 /* TODO: alloc this buf once for nlp */
552 int data_length = m_length(m, NULL);
553 buffer_length = roundup2(data_length, 8) + SCRATCH_BUFFER_SIZE;
555 buffer_length += roundup2(data_length, 8);
556 buffer = malloc(buffer_length, M_NETLINK, M_NOWAIT | M_ZERO);
557 if (buffer == NULL) {
560 NL_LOG(LOG_DEBUG, "Unable to allocate %d bytes of memory",
564 m_copydata(m, 0, data_length, buffer);
566 struct nl_pstate npt = {
568 .lb.base = &buffer[roundup2(data_length, 8)],
569 .lb.size = buffer_length - roundup2(data_length, 8),
571 .strict = nlp->nl_flags & NLF_STRICT,
574 for (offset = 0; offset + sizeof(struct nlmsghdr) <= data_length;) {
575 hdr = (struct nlmsghdr *)&buffer[offset];
576 /* Save length prior to calling handler */
577 int msglen = NLMSG_ALIGN(hdr->nlmsg_len);
578 NL_LOG(LOG_DEBUG3, "parsing offset %d/%d", offset, data_length);
580 error = nl_receive_message(hdr, data_length - offset, nlp, &npt);
582 if (__predict_false(error != 0 || nlp->nl_tx_blocked))
585 NL_LOG(LOG_DEBUG3, "packet parsing done");
586 free(buffer, M_NETLINK);
589 if (nlp->nl_tx_blocked) {
591 nlp->nl_tx_blocked = false;