2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
3 * The Regents of the University of California.
4 * Copyright (c) 2008 Robert N. M. Watson
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 4. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * @(#)udp_usrreq.c 8.6 (Berkeley) 5/23/95
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
38 #include "opt_inet6.h"
39 #include "opt_ipsec.h"
41 #include <sys/param.h>
42 #include <sys/domain.h>
43 #include <sys/eventhandler.h>
45 #include <sys/kernel.h>
47 #include <sys/malloc.h>
51 #include <sys/protosw.h>
52 #include <sys/signalvar.h>
53 #include <sys/socket.h>
54 #include <sys/socketvar.h>
56 #include <sys/sysctl.h>
57 #include <sys/syslog.h>
58 #include <sys/systm.h>
63 #include <net/route.h>
65 #include <netinet/in.h>
66 #include <netinet/in_pcb.h>
67 #include <netinet/in_systm.h>
68 #include <netinet/in_var.h>
69 #include <netinet/ip.h>
71 #include <netinet/ip6.h>
73 #include <netinet/ip_icmp.h>
74 #include <netinet/icmp_var.h>
75 #include <netinet/ip_var.h>
76 #include <netinet/ip_options.h>
78 #include <netinet6/ip6_var.h>
80 #include <netinet/udp.h>
81 #include <netinet/udp_var.h>
84 #include <netipsec/ipsec.h>
85 #include <netipsec/esp.h>
88 #include <machine/in_cksum.h>
90 #include <security/mac/mac_framework.h>
93 * UDP protocol implementation.
94 * Per RFC 768, August, 1980.
98 * BSD 4.2 defaulted the udp checksum to be off. Turning off udp checksums
99 * removes the only data integrity mechanism for packets and malformed
100 * packets that would otherwise be discarded due to bad checksums, and may
101 * cause problems (especially for NFS data blocks).
103 VNET_DEFINE(int, udp_cksum) = 1;
104 SYSCTL_VNET_INT(_net_inet_udp, UDPCTL_CHECKSUM, checksum, CTLFLAG_RW,
105 &VNET_NAME(udp_cksum), 0, "compute udp checksum");
107 int udp_log_in_vain = 0;
108 SYSCTL_INT(_net_inet_udp, OID_AUTO, log_in_vain, CTLFLAG_RW,
109 &udp_log_in_vain, 0, "Log all incoming UDP packets");
111 VNET_DEFINE(int, udp_blackhole) = 0;
112 SYSCTL_VNET_INT(_net_inet_udp, OID_AUTO, blackhole, CTLFLAG_RW,
113 &VNET_NAME(udp_blackhole), 0,
114 "Do not send port unreachables for refused connects");
116 u_long udp_sendspace = 9216; /* really max datagram size */
117 /* 40 1K datagrams */
118 SYSCTL_ULONG(_net_inet_udp, UDPCTL_MAXDGRAM, maxdgram, CTLFLAG_RW,
119 &udp_sendspace, 0, "Maximum outgoing UDP datagram size");
121 u_long udp_recvspace = 40 * (1024 +
123 sizeof(struct sockaddr_in6)
125 sizeof(struct sockaddr_in)
129 SYSCTL_ULONG(_net_inet_udp, UDPCTL_RECVSPACE, recvspace, CTLFLAG_RW,
130 &udp_recvspace, 0, "Maximum space for incoming UDP datagrams");
132 VNET_DEFINE(struct inpcbhead, udb); /* from udp_var.h */
133 VNET_DEFINE(struct inpcbinfo, udbinfo);
134 static VNET_DEFINE(uma_zone_t, udpcb_zone);
135 #define V_udpcb_zone VNET(udpcb_zone)
138 #define UDBHASHSIZE 128
141 VNET_DEFINE(struct udpstat, udpstat); /* from udp_var.h */
142 SYSCTL_VNET_STRUCT(_net_inet_udp, UDPCTL_STATS, stats, CTLFLAG_RW,
143 &VNET_NAME(udpstat), udpstat,
144 "UDP statistics (struct udpstat, netinet/udp_var.h)");
146 static void udp_detach(struct socket *so);
147 static int udp_output(struct inpcb *, struct mbuf *, struct sockaddr *,
148 struct mbuf *, struct thread *);
151 #define UF_ESPINUDP_ALL (UF_ESPINUDP_NON_IKE|UF_ESPINUDP)
153 static struct mbuf *udp4_espdecap(struct inpcb *, struct mbuf *, int);
155 #endif /* IPSEC_NAT_T */
159 udp_zone_change(void *tag)
162 uma_zone_set_max(V_udbinfo.ipi_zone, maxsockets);
163 uma_zone_set_max(V_udpcb_zone, maxsockets);
167 udp_inpcb_init(void *mem, int size, int flags)
172 INP_LOCK_INIT(inp, "inp", "udpinp");
181 INP_INFO_LOCK_INIT(&V_udbinfo, "udp");
184 V_udbinfo.ipi_vnet = curvnet;
186 V_udbinfo.ipi_listhead = &V_udb;
187 V_udbinfo.ipi_hashbase = hashinit(UDBHASHSIZE, M_PCB,
188 &V_udbinfo.ipi_hashmask);
189 V_udbinfo.ipi_porthashbase = hashinit(UDBHASHSIZE, M_PCB,
190 &V_udbinfo.ipi_porthashmask);
191 V_udbinfo.ipi_zone = uma_zcreate("udp_inpcb", sizeof(struct inpcb),
192 NULL, NULL, udp_inpcb_init, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
193 uma_zone_set_max(V_udbinfo.ipi_zone, maxsockets);
195 V_udpcb_zone = uma_zcreate("udpcb", sizeof(struct udpcb),
196 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
197 uma_zone_set_max(V_udpcb_zone, maxsockets);
199 EVENTHANDLER_REGISTER(maxsockets_change, udp_zone_change, NULL,
200 EVENTHANDLER_PRI_ANY);
204 * Kernel module interface for updating udpstat. The argument is an index
205 * into udpstat treated as an array of u_long. While this encodes the
206 * general layout of udpstat into the caller, it doesn't encode its location,
207 * so that future changes to add, for example, per-CPU stats support won't
208 * cause binary compatibility problems for kernel modules.
211 kmod_udpstat_inc(int statnum)
214 (*((u_long *)&V_udpstat + statnum))++;
218 udp_newudpcb(struct inpcb *inp)
222 up = uma_zalloc(V_udpcb_zone, M_NOWAIT | M_ZERO);
230 udp_discardcb(struct udpcb *up)
233 uma_zfree(V_udpcb_zone, up);
241 hashdestroy(V_udbinfo.ipi_hashbase, M_PCB,
242 V_udbinfo.ipi_hashmask);
243 hashdestroy(V_udbinfo.ipi_porthashbase, M_PCB,
244 V_udbinfo.ipi_porthashmask);
246 uma_zdestroy(V_udpcb_zone);
247 uma_zdestroy(V_udbinfo.ipi_zone);
248 INP_INFO_LOCK_DESTROY(&V_udbinfo);
253 * Subroutine of udp_input(), which appends the provided mbuf chain to the
254 * passed pcb/socket. The caller must provide a sockaddr_in via udp_in that
255 * contains the source address. If the socket ends up being an IPv6 socket,
256 * udp_append() will convert to a sockaddr_in6 before passing the address
257 * into the socket code.
260 udp_append(struct inpcb *inp, struct ip *ip, struct mbuf *n, int off,
261 struct sockaddr_in *udp_in)
263 struct sockaddr *append_sa;
265 struct mbuf *opts = 0;
267 struct sockaddr_in6 udp_in6;
277 INP_RLOCK_ASSERT(inp);
280 /* Check AH/ESP integrity. */
281 if (ipsec4_in_reject(n, inp)) {
283 V_ipsec4stat.in_polvio++;
289 KASSERT(up != NULL, ("%s: udpcb NULL", __func__));
290 if (up->u_flags & UF_ESPINUDP_ALL) { /* IPSec UDP encaps. */
291 n = udp4_espdecap(inp, n, off);
292 if (n == NULL) /* Consumed. */
296 #endif /* IPSEC_NAT_T */
299 if (mac_inpcb_check_deliver(inp, n) != 0) {
304 if (inp->inp_flags & INP_CONTROLOPTS ||
305 inp->inp_socket->so_options & (SO_TIMESTAMP | SO_BINTIME)) {
307 if (inp->inp_vflag & INP_IPV6)
308 (void)ip6_savecontrol_v4(inp, n, &opts, NULL);
311 ip_savecontrol(inp, &opts, ip, n);
314 if (inp->inp_vflag & INP_IPV6) {
315 bzero(&udp_in6, sizeof(udp_in6));
316 udp_in6.sin6_len = sizeof(udp_in6);
317 udp_in6.sin6_family = AF_INET6;
318 in6_sin_2_v4mapsin6(udp_in, &udp_in6);
319 append_sa = (struct sockaddr *)&udp_in6;
322 append_sa = (struct sockaddr *)udp_in;
325 so = inp->inp_socket;
326 SOCKBUF_LOCK(&so->so_rcv);
327 if (sbappendaddr_locked(&so->so_rcv, append_sa, n, opts) == 0) {
328 SOCKBUF_UNLOCK(&so->so_rcv);
332 UDPSTAT_INC(udps_fullsock);
334 sorwakeup_locked(so);
338 udp_input(struct mbuf *m, int off)
348 struct sockaddr_in udp_in;
349 #ifdef IPFIREWALL_FORWARD
350 struct m_tag *fwd_tag;
353 ifp = m->m_pkthdr.rcvif;
354 UDPSTAT_INC(udps_ipackets);
357 * Strip IP options, if any; should skip this, make available to
358 * user, and use on returned packets, but we don't yet have a way to
359 * check the checksum with options still present.
361 if (iphlen > sizeof (struct ip)) {
362 ip_stripoptions(m, (struct mbuf *)0);
363 iphlen = sizeof(struct ip);
367 * Get IP and UDP header together in first mbuf.
369 ip = mtod(m, struct ip *);
370 if (m->m_len < iphlen + sizeof(struct udphdr)) {
371 if ((m = m_pullup(m, iphlen + sizeof(struct udphdr))) == 0) {
372 UDPSTAT_INC(udps_hdrops);
375 ip = mtod(m, struct ip *);
377 uh = (struct udphdr *)((caddr_t)ip + iphlen);
380 * Destination port of 0 is illegal, based on RFC768.
382 if (uh->uh_dport == 0)
386 * Construct sockaddr format source address. Stuff source address
387 * and datagram in user buffer.
389 bzero(&udp_in, sizeof(udp_in));
390 udp_in.sin_len = sizeof(udp_in);
391 udp_in.sin_family = AF_INET;
392 udp_in.sin_port = uh->uh_sport;
393 udp_in.sin_addr = ip->ip_src;
396 * Make mbuf data length reflect UDP length. If not enough data to
397 * reflect UDP length, drop.
399 len = ntohs((u_short)uh->uh_ulen);
400 if (ip->ip_len != len) {
401 if (len > ip->ip_len || len < sizeof(struct udphdr)) {
402 UDPSTAT_INC(udps_badlen);
405 m_adj(m, len - ip->ip_len);
406 /* ip->ip_len = len; */
410 * Save a copy of the IP header in case we want restore it for
411 * sending an ICMP error message in response.
413 if (!V_udp_blackhole)
416 memset(&save_ip, 0, sizeof(save_ip));
419 * Checksum extended UDP header and data.
424 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
425 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
426 uh_sum = m->m_pkthdr.csum_data;
428 uh_sum = in_pseudo(ip->ip_src.s_addr,
429 ip->ip_dst.s_addr, htonl((u_short)len +
430 m->m_pkthdr.csum_data + IPPROTO_UDP));
435 bcopy(((struct ipovly *)ip)->ih_x1, b, 9);
436 bzero(((struct ipovly *)ip)->ih_x1, 9);
437 ((struct ipovly *)ip)->ih_len = uh->uh_ulen;
438 uh_sum = in_cksum(m, len + sizeof (struct ip));
439 bcopy(b, ((struct ipovly *)ip)->ih_x1, 9);
442 UDPSTAT_INC(udps_badsum);
447 UDPSTAT_INC(udps_nosum);
449 #ifdef IPFIREWALL_FORWARD
451 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain.
453 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL);
454 if (fwd_tag != NULL) {
455 struct sockaddr_in *next_hop;
460 next_hop = (struct sockaddr_in *)(fwd_tag + 1);
461 ip->ip_dst = next_hop->sin_addr;
462 uh->uh_dport = ntohs(next_hop->sin_port);
465 * Remove the tag from the packet. We don't need it anymore.
467 m_tag_delete(m, fwd_tag);
471 INP_INFO_RLOCK(&V_udbinfo);
472 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
473 in_broadcast(ip->ip_dst, ifp)) {
475 struct ip_moptions *imo;
478 LIST_FOREACH(inp, &V_udb, inp_list) {
479 if (inp->inp_lport != uh->uh_dport)
482 if ((inp->inp_vflag & INP_IPV4) == 0)
485 if (inp->inp_laddr.s_addr != INADDR_ANY &&
486 inp->inp_laddr.s_addr != ip->ip_dst.s_addr)
488 if (inp->inp_faddr.s_addr != INADDR_ANY &&
489 inp->inp_faddr.s_addr != ip->ip_src.s_addr)
491 if (inp->inp_fport != 0 &&
492 inp->inp_fport != uh->uh_sport)
498 * Detached PCBs can linger in the list if someone
499 * holds a reference. (e.g. udp_pcblist)
501 if (inp->inp_socket == NULL) {
507 * Handle socket delivery policy for any-source
508 * and source-specific multicast. [RFC3678]
510 imo = inp->inp_moptions;
511 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) &&
513 struct sockaddr_in group;
516 bzero(&group, sizeof(struct sockaddr_in));
517 group.sin_len = sizeof(struct sockaddr_in);
518 group.sin_family = AF_INET;
519 group.sin_addr = ip->ip_dst;
521 blocked = imo_multi_filter(imo, ifp,
522 (struct sockaddr *)&group,
523 (struct sockaddr *)&udp_in);
524 if (blocked != MCAST_PASS) {
525 if (blocked == MCAST_NOTGMEMBER)
526 IPSTAT_INC(ips_notmember);
527 if (blocked == MCAST_NOTSMEMBER ||
528 blocked == MCAST_MUTED)
529 UDPSTAT_INC(udps_filtermcast);
537 n = m_copy(m, 0, M_COPYALL);
538 up = intoudpcb(last);
539 if (up->u_tun_func == NULL) {
544 sizeof(struct udphdr),
548 * Engage the tunneling protocol we
549 * will have to leave the info_lock
550 * up, since we are hunting through
554 (*up->u_tun_func)(n, iphlen, last);
560 * Don't look for additional matches if this one does
561 * not have either the SO_REUSEPORT or SO_REUSEADDR
562 * socket options set. This heuristic avoids
563 * searching through all pcbs in the common case of a
564 * non-shared port. It assumes that an application
565 * will never clear these options after setting them.
567 if ((last->inp_socket->so_options &
568 (SO_REUSEPORT|SO_REUSEADDR)) == 0)
574 * No matching pcb found; discard datagram. (No need
575 * to send an ICMP Port Unreachable for a broadcast
576 * or multicast datgram.)
578 UDPSTAT_INC(udps_noportbcast);
581 up = intoudpcb(last);
582 if (up->u_tun_func == NULL) {
583 udp_append(last, ip, m, iphlen + sizeof(struct udphdr),
587 * Engage the tunneling protocol.
589 (*up->u_tun_func)(m, iphlen, last);
592 INP_INFO_RUNLOCK(&V_udbinfo);
597 * Locate pcb for datagram.
599 inp = in_pcblookup_hash(&V_udbinfo, ip->ip_src, uh->uh_sport,
600 ip->ip_dst, uh->uh_dport, 1, ifp);
602 if (udp_log_in_vain) {
603 char buf[4*sizeof "123"];
605 strcpy(buf, inet_ntoa(ip->ip_dst));
607 "Connection attempt to UDP %s:%d from %s:%d\n",
608 buf, ntohs(uh->uh_dport), inet_ntoa(ip->ip_src),
609 ntohs(uh->uh_sport));
611 UDPSTAT_INC(udps_noport);
612 if (m->m_flags & (M_BCAST | M_MCAST)) {
613 UDPSTAT_INC(udps_noportbcast);
618 if (badport_bandlim(BANDLIM_ICMP_UNREACH) < 0)
621 ip->ip_len += iphlen;
622 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_PORT, 0, 0);
623 INP_INFO_RUNLOCK(&V_udbinfo);
628 * Check the minimum TTL for socket.
631 INP_INFO_RUNLOCK(&V_udbinfo);
634 * Detached PCBs can linger in the hash table if someone holds a
635 * reference. (e.g. udp_pcblist)
637 if (inp->inp_socket == NULL) {
641 if (inp->inp_ip_minttl && inp->inp_ip_minttl > ip->ip_ttl) {
646 if (up->u_tun_func == NULL) {
647 udp_append(inp, ip, m, iphlen + sizeof(struct udphdr), &udp_in);
650 * Engage the tunneling protocol.
653 (*up->u_tun_func)(m, iphlen, inp);
661 INP_INFO_RUNLOCK(&V_udbinfo);
667 * Notify a udp user of an asynchronous error; just wake up so that they can
668 * collect error status.
671 udp_notify(struct inpcb *inp, int errno)
675 * While udp_ctlinput() always calls udp_notify() with a read lock
676 * when invoking it directly, in_pcbnotifyall() currently uses write
677 * locks due to sharing code with TCP. For now, accept either a read
678 * or a write lock, but a read lock is sufficient.
680 INP_LOCK_ASSERT(inp);
682 inp->inp_socket->so_error = errno;
683 sorwakeup(inp->inp_socket);
684 sowwakeup(inp->inp_socket);
689 udp_ctlinput(int cmd, struct sockaddr *sa, void *vip)
693 struct in_addr faddr;
696 faddr = ((struct sockaddr_in *)sa)->sin_addr;
697 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
701 * Redirects don't need to be handled up here.
703 if (PRC_IS_REDIRECT(cmd))
707 * Hostdead is ugly because it goes linearly through all PCBs.
709 * XXX: We never get this from ICMP, otherwise it makes an excellent
710 * DoS attack on machines with many connections.
712 if (cmd == PRC_HOSTDEAD)
714 else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0)
717 uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2));
718 INP_INFO_RLOCK(&V_udbinfo);
719 inp = in_pcblookup_hash(&V_udbinfo, faddr, uh->uh_dport,
720 ip->ip_src, uh->uh_sport, 0, NULL);
723 if (inp->inp_socket != NULL) {
724 udp_notify(inp, inetctlerrmap[cmd]);
728 INP_INFO_RUNLOCK(&V_udbinfo);
730 in_pcbnotifyall(&V_udbinfo, faddr, inetctlerrmap[cmd],
735 udp_pcblist(SYSCTL_HANDLER_ARGS)
738 struct inpcb *inp, **inp_list;
743 * The process of preparing the PCB list is too time-consuming and
744 * resource-intensive to repeat twice on every request.
746 if (req->oldptr == 0) {
747 n = V_udbinfo.ipi_count;
748 n += imax(n / 8, 10);
749 req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xinpcb);
753 if (req->newptr != 0)
757 * OK, now we're committed to doing something.
759 INP_INFO_RLOCK(&V_udbinfo);
760 gencnt = V_udbinfo.ipi_gencnt;
761 n = V_udbinfo.ipi_count;
762 INP_INFO_RUNLOCK(&V_udbinfo);
764 error = sysctl_wire_old_buffer(req, 2 * (sizeof xig)
765 + n * sizeof(struct xinpcb));
769 xig.xig_len = sizeof xig;
771 xig.xig_gen = gencnt;
772 xig.xig_sogen = so_gencnt;
773 error = SYSCTL_OUT(req, &xig, sizeof xig);
777 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
781 INP_INFO_RLOCK(&V_udbinfo);
782 for (inp = LIST_FIRST(V_udbinfo.ipi_listhead), i = 0; inp && i < n;
783 inp = LIST_NEXT(inp, inp_list)) {
785 if (inp->inp_gencnt <= gencnt &&
786 cr_canseeinpcb(req->td->td_ucred, inp) == 0) {
792 INP_INFO_RUNLOCK(&V_udbinfo);
796 for (i = 0; i < n; i++) {
799 if (inp->inp_gencnt <= gencnt) {
802 bzero(&xi, sizeof(xi));
803 xi.xi_len = sizeof xi;
804 /* XXX should avoid extra copy */
805 bcopy(inp, &xi.xi_inp, sizeof *inp);
807 sotoxsocket(inp->inp_socket, &xi.xi_socket);
808 xi.xi_inp.inp_gencnt = inp->inp_gencnt;
810 error = SYSCTL_OUT(req, &xi, sizeof xi);
814 INP_INFO_WLOCK(&V_udbinfo);
815 for (i = 0; i < n; i++) {
818 if (!in_pcbrele(inp))
821 INP_INFO_WUNLOCK(&V_udbinfo);
825 * Give the user an updated idea of our state. If the
826 * generation differs from what we told her before, she knows
827 * that something happened while we were processing this
828 * request, and it might be necessary to retry.
830 INP_INFO_RLOCK(&V_udbinfo);
831 xig.xig_gen = V_udbinfo.ipi_gencnt;
832 xig.xig_sogen = so_gencnt;
833 xig.xig_count = V_udbinfo.ipi_count;
834 INP_INFO_RUNLOCK(&V_udbinfo);
835 error = SYSCTL_OUT(req, &xig, sizeof xig);
837 free(inp_list, M_TEMP);
841 SYSCTL_PROC(_net_inet_udp, UDPCTL_PCBLIST, pcblist,
842 CTLTYPE_OPAQUE | CTLFLAG_RD, NULL, 0,
843 udp_pcblist, "S,xinpcb", "List of active UDP sockets");
846 udp_getcred(SYSCTL_HANDLER_ARGS)
849 struct sockaddr_in addrs[2];
853 error = priv_check(req->td, PRIV_NETINET_GETCRED);
856 error = SYSCTL_IN(req, addrs, sizeof(addrs));
859 INP_INFO_RLOCK(&V_udbinfo);
860 inp = in_pcblookup_hash(&V_udbinfo, addrs[1].sin_addr, addrs[1].sin_port,
861 addrs[0].sin_addr, addrs[0].sin_port, 1, NULL);
864 INP_INFO_RUNLOCK(&V_udbinfo);
865 if (inp->inp_socket == NULL)
868 error = cr_canseeinpcb(req->td->td_ucred, inp);
870 cru2x(inp->inp_cred, &xuc);
873 INP_INFO_RUNLOCK(&V_udbinfo);
877 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
881 SYSCTL_PROC(_net_inet_udp, OID_AUTO, getcred,
882 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0,
883 udp_getcred, "S,xucred", "Get the xucred of a UDP connection");
886 udp_ctloutput(struct socket *so, struct sockopt *sopt)
888 int error = 0, optval;
895 KASSERT(inp != NULL, ("%s: inp == NULL", __func__));
897 if (sopt->sopt_level != IPPROTO_UDP) {
899 if (INP_CHECK_SOCKAF(so, AF_INET6)) {
901 error = ip6_ctloutput(so, sopt);
905 error = ip_ctloutput(so, sopt);
912 switch (sopt->sopt_dir) {
914 switch (sopt->sopt_name) {
917 error = sooptcopyin(sopt, &optval, sizeof optval,
922 KASSERT(inp != NULL, ("%s: inp == NULL", __func__));
926 KASSERT(up != NULL, ("%s: up == NULL", __func__));
930 /* Clear all UDP encap. */
932 up->u_flags &= ~UF_ESPINUDP_ALL;
936 case UDP_ENCAP_ESPINUDP:
937 case UDP_ENCAP_ESPINUDP_NON_IKE:
938 up->u_flags &= ~UF_ESPINUDP_ALL;
939 if (optval == UDP_ENCAP_ESPINUDP)
940 up->u_flags |= UF_ESPINUDP;
941 else if (optval == UDP_ENCAP_ESPINUDP_NON_IKE)
942 up->u_flags |= UF_ESPINUDP_NON_IKE;
958 switch (sopt->sopt_name) {
962 KASSERT(up != NULL, ("%s: up == NULL", __func__));
963 optval = up->u_flags & UF_ESPINUDP_ALL;
965 error = sooptcopyout(sopt, &optval, sizeof optval);
979 udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr,
980 struct mbuf *control, struct thread *td)
983 int len = m->m_pkthdr.len;
984 struct in_addr faddr, laddr;
986 struct sockaddr_in *sin, src;
989 u_short fport, lport;
994 * udp_output() may need to temporarily bind or connect the current
995 * inpcb. As such, we don't know up front whether we will need the
996 * pcbinfo lock or not. Do any work to decide what is needed up
997 * front before acquiring any locks.
999 if (len + sizeof(struct udpiphdr) > IP_MAXPACKET) {
1007 tos = inp->inp_ip_tos;
1008 if (control != NULL) {
1010 * XXX: Currently, we assume all the optional information is
1011 * stored in a single mbuf.
1013 if (control->m_next) {
1018 for (; control->m_len > 0;
1019 control->m_data += CMSG_ALIGN(cm->cmsg_len),
1020 control->m_len -= CMSG_ALIGN(cm->cmsg_len)) {
1021 cm = mtod(control, struct cmsghdr *);
1022 if (control->m_len < sizeof(*cm) || cm->cmsg_len == 0
1023 || cm->cmsg_len > control->m_len) {
1027 if (cm->cmsg_level != IPPROTO_IP)
1030 switch (cm->cmsg_type) {
1031 case IP_SENDSRCADDR:
1033 CMSG_LEN(sizeof(struct in_addr))) {
1037 bzero(&src, sizeof(src));
1038 src.sin_family = AF_INET;
1039 src.sin_len = sizeof(src);
1040 src.sin_port = inp->inp_lport;
1042 *(struct in_addr *)CMSG_DATA(cm);
1046 if (cm->cmsg_len != CMSG_LEN(sizeof(u_char))) {
1050 tos = *(u_char *)CMSG_DATA(cm);
1054 error = ENOPROTOOPT;
1068 * Depending on whether or not the application has bound or connected
1069 * the socket, we may have to do varying levels of work. The optimal
1070 * case is for a connected UDP socket, as a global lock isn't
1073 * In order to decide which we need, we require stability of the
1074 * inpcb binding, which we ensure by acquiring a read lock on the
1075 * inpcb. This doesn't strictly follow the lock order, so we play
1076 * the trylock and retry game; note that we may end up with more
1077 * conservative locks than required the second time around, so later
1078 * assertions have to accept that. Further analysis of the number of
1079 * misses under contention is required.
1081 sin = (struct sockaddr_in *)addr;
1084 (inp->inp_laddr.s_addr == INADDR_ANY && inp->inp_lport == 0)) {
1086 INP_INFO_WLOCK(&V_udbinfo);
1089 } else if ((sin != NULL && (
1090 (sin->sin_addr.s_addr == INADDR_ANY) ||
1091 (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
1092 (inp->inp_laddr.s_addr == INADDR_ANY) ||
1093 (inp->inp_lport == 0))) ||
1094 (src.sin_family == AF_INET)) {
1095 if (!INP_INFO_TRY_RLOCK(&V_udbinfo)) {
1097 INP_INFO_RLOCK(&V_udbinfo);
1105 * If the IP_SENDSRCADDR control message was specified, override the
1106 * source address for this datagram. Its use is invalidated if the
1107 * address thus specified is incomplete or clobbers other inpcbs.
1109 laddr = inp->inp_laddr;
1110 lport = inp->inp_lport;
1111 if (src.sin_family == AF_INET) {
1112 INP_INFO_LOCK_ASSERT(&V_udbinfo);
1114 (laddr.s_addr == INADDR_ANY &&
1115 src.sin_addr.s_addr == INADDR_ANY)) {
1119 error = in_pcbbind_setup(inp, (struct sockaddr *)&src,
1120 &laddr.s_addr, &lport, td->td_ucred);
1126 * If a UDP socket has been connected, then a local address/port will
1127 * have been selected and bound.
1129 * If a UDP socket has not been connected to, then an explicit
1130 * destination address must be used, in which case a local
1131 * address/port may not have been selected and bound.
1134 INP_LOCK_ASSERT(inp);
1135 if (inp->inp_faddr.s_addr != INADDR_ANY) {
1141 * Jail may rewrite the destination address, so let it do
1142 * that before we use it.
1144 error = prison_remote_ip4(td->td_ucred, &sin->sin_addr);
1149 * If a local address or port hasn't yet been selected, or if
1150 * the destination address needs to be rewritten due to using
1151 * a special INADDR_ constant, invoke in_pcbconnect_setup()
1152 * to do the heavy lifting. Once a port is selected, we
1153 * commit the binding back to the socket; we also commit the
1154 * binding of the address if in jail.
1156 * If we already have a valid binding and we're not
1157 * requesting a destination address rewrite, use a fast path.
1159 if (inp->inp_laddr.s_addr == INADDR_ANY ||
1160 inp->inp_lport == 0 ||
1161 sin->sin_addr.s_addr == INADDR_ANY ||
1162 sin->sin_addr.s_addr == INADDR_BROADCAST) {
1163 INP_INFO_LOCK_ASSERT(&V_udbinfo);
1164 error = in_pcbconnect_setup(inp, addr, &laddr.s_addr,
1165 &lport, &faddr.s_addr, &fport, NULL,
1171 * XXXRW: Why not commit the port if the address is
1174 /* Commit the local port if newly assigned. */
1175 if (inp->inp_laddr.s_addr == INADDR_ANY &&
1176 inp->inp_lport == 0) {
1177 INP_INFO_WLOCK_ASSERT(&V_udbinfo);
1178 INP_WLOCK_ASSERT(inp);
1180 * Remember addr if jailed, to prevent
1183 if (prison_flag(td->td_ucred, PR_IP4))
1184 inp->inp_laddr = laddr;
1185 inp->inp_lport = lport;
1186 if (in_pcbinshash(inp) != 0) {
1191 inp->inp_flags |= INP_ANONPORT;
1194 faddr = sin->sin_addr;
1195 fport = sin->sin_port;
1198 INP_LOCK_ASSERT(inp);
1199 faddr = inp->inp_faddr;
1200 fport = inp->inp_fport;
1201 if (faddr.s_addr == INADDR_ANY) {
1208 * Calculate data length and get a mbuf for UDP, IP, and possible
1209 * link-layer headers. Immediate slide the data pointer back forward
1210 * since we won't use that space at this layer.
1212 M_PREPEND(m, sizeof(struct udpiphdr) + max_linkhdr, M_DONTWAIT);
1217 m->m_data += max_linkhdr;
1218 m->m_len -= max_linkhdr;
1219 m->m_pkthdr.len -= max_linkhdr;
1222 * Fill in mbuf with extended UDP header and addresses and length put
1223 * into network format.
1225 ui = mtod(m, struct udpiphdr *);
1226 bzero(ui->ui_x1, sizeof(ui->ui_x1)); /* XXX still needed? */
1227 ui->ui_pr = IPPROTO_UDP;
1230 ui->ui_sport = lport;
1231 ui->ui_dport = fport;
1232 ui->ui_ulen = htons((u_short)len + sizeof(struct udphdr));
1235 * Set the Don't Fragment bit in the IP header.
1237 if (inp->inp_flags & INP_DONTFRAG) {
1240 ip = (struct ip *)&ui->ui_i;
1241 ip->ip_off |= IP_DF;
1245 if (inp->inp_socket->so_options & SO_DONTROUTE)
1246 ipflags |= IP_ROUTETOIF;
1247 if (inp->inp_socket->so_options & SO_BROADCAST)
1248 ipflags |= IP_ALLOWBROADCAST;
1249 if (inp->inp_flags & INP_ONESBCAST)
1250 ipflags |= IP_SENDONES;
1253 mac_inpcb_create_mbuf(inp, m);
1257 * Set up checksum and output datagram.
1260 if (inp->inp_flags & INP_ONESBCAST)
1261 faddr.s_addr = INADDR_BROADCAST;
1262 ui->ui_sum = in_pseudo(ui->ui_src.s_addr, faddr.s_addr,
1263 htons((u_short)len + sizeof(struct udphdr) + IPPROTO_UDP));
1264 m->m_pkthdr.csum_flags = CSUM_UDP;
1265 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
1268 ((struct ip *)ui)->ip_len = sizeof (struct udpiphdr) + len;
1269 ((struct ip *)ui)->ip_ttl = inp->inp_ip_ttl; /* XXX */
1270 ((struct ip *)ui)->ip_tos = tos; /* XXX */
1271 UDPSTAT_INC(udps_opackets);
1273 if (unlock_udbinfo == 2)
1274 INP_INFO_WUNLOCK(&V_udbinfo);
1275 else if (unlock_udbinfo == 1)
1276 INP_INFO_RUNLOCK(&V_udbinfo);
1277 error = ip_output(m, inp->inp_options, NULL, ipflags,
1278 inp->inp_moptions, inp);
1279 if (unlock_udbinfo == 2)
1286 if (unlock_udbinfo == 2) {
1288 INP_INFO_WUNLOCK(&V_udbinfo);
1289 } else if (unlock_udbinfo == 1) {
1291 INP_INFO_RUNLOCK(&V_udbinfo);
1299 #if defined(IPSEC) && defined(IPSEC_NAT_T)
1302 * Potentially decap ESP in UDP frame. Check for an ESP header
1303 * and optional marker; if present, strip the UDP header and
1304 * push the result through IPSec.
1306 * Returns mbuf to be processed (potentially re-allocated) or
1307 * NULL if consumed and/or processed.
1309 static struct mbuf *
1310 udp4_espdecap(struct inpcb *inp, struct mbuf *m, int off)
1312 size_t minlen, payload, skip, iphlen;
1316 struct udphdr *udphdr;
1319 INP_RLOCK_ASSERT(inp);
1322 * Pull up data so the longest case is contiguous:
1323 * IP/UDP hdr + non ESP marker + ESP hdr.
1325 minlen = off + sizeof(uint64_t) + sizeof(struct esp);
1326 if (minlen > m->m_pkthdr.len)
1327 minlen = m->m_pkthdr.len;
1328 if ((m = m_pullup(m, minlen)) == NULL) {
1329 V_ipsec4stat.in_inval++;
1330 return (NULL); /* Bypass caller processing. */
1332 data = mtod(m, caddr_t); /* Points to ip header. */
1333 payload = m->m_len - off; /* Size of payload. */
1335 if (payload == 1 && data[off] == '\xff')
1336 return (m); /* NB: keepalive packet, no decap. */
1338 up = intoudpcb(inp);
1339 KASSERT(up != NULL, ("%s: udpcb NULL", __func__));
1340 KASSERT((up->u_flags & UF_ESPINUDP_ALL) != 0,
1341 ("u_flags 0x%x", up->u_flags));
1344 * Check that the payload is large enough to hold an
1345 * ESP header and compute the amount of data to remove.
1347 * NB: the caller has already done a pullup for us.
1348 * XXX can we assume alignment and eliminate bcopys?
1350 if (up->u_flags & UF_ESPINUDP_NON_IKE) {
1352 * draft-ietf-ipsec-nat-t-ike-0[01].txt and
1353 * draft-ietf-ipsec-udp-encaps-(00/)01.txt, ignoring
1354 * possible AH mode non-IKE marker+non-ESP marker
1355 * from draft-ietf-ipsec-udp-encaps-00.txt.
1359 if (payload <= sizeof(uint64_t) + sizeof(struct esp))
1360 return (m); /* NB: no decap. */
1361 bcopy(data + off, &marker, sizeof(uint64_t));
1362 if (marker != 0) /* Non-IKE marker. */
1363 return (m); /* NB: no decap. */
1364 skip = sizeof(uint64_t) + sizeof(struct udphdr);
1368 if (payload <= sizeof(struct esp)) {
1369 V_ipsec4stat.in_inval++;
1371 return (NULL); /* Discard. */
1373 bcopy(data + off, &spi, sizeof(uint32_t));
1374 if (spi == 0) /* Non-ESP marker. */
1375 return (m); /* NB: no decap. */
1376 skip = sizeof(struct udphdr);
1380 * Setup a PACKET_TAG_IPSEC_NAT_T_PORT tag to remember
1381 * the UDP ports. This is required if we want to select
1382 * the right SPD for multiple hosts behind same NAT.
1384 * NB: ports are maintained in network byte order everywhere
1385 * in the NAT-T code.
1387 tag = m_tag_get(PACKET_TAG_IPSEC_NAT_T_PORTS,
1388 2 * sizeof(uint16_t), M_NOWAIT);
1390 V_ipsec4stat.in_nomem++;
1392 return (NULL); /* Discard. */
1394 iphlen = off - sizeof(struct udphdr);
1395 udphdr = (struct udphdr *)(data + iphlen);
1396 ((uint16_t *)(tag + 1))[0] = udphdr->uh_sport;
1397 ((uint16_t *)(tag + 1))[1] = udphdr->uh_dport;
1398 m_tag_prepend(m, tag);
1401 * Remove the UDP header (and possibly the non ESP marker)
1402 * IP header length is iphlen
1405 * +----+------+-----+
1406 * | IP | UDP | ESP |
1407 * +----+------+-----+
1415 ovbcopy(data, data + skip, iphlen);
1418 ip = mtod(m, struct ip *);
1420 ip->ip_p = IPPROTO_ESP;
1423 * We cannot yet update the cksums so clear any
1424 * h/w cksum flags as they are no longer valid.
1426 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID)
1427 m->m_pkthdr.csum_flags &= ~(CSUM_DATA_VALID|CSUM_PSEUDO_HDR);
1429 (void) ipsec4_common_input(m, iphlen, ip->ip_p);
1430 return (NULL); /* NB: consumed, bypass processing. */
1433 #endif /* defined(IPSEC) && defined(IPSEC_NAT_T) */
1436 udp_abort(struct socket *so)
1440 inp = sotoinpcb(so);
1441 KASSERT(inp != NULL, ("udp_abort: inp == NULL"));
1442 INP_INFO_WLOCK(&V_udbinfo);
1444 if (inp->inp_faddr.s_addr != INADDR_ANY) {
1445 in_pcbdisconnect(inp);
1446 inp->inp_laddr.s_addr = INADDR_ANY;
1447 soisdisconnected(so);
1450 INP_INFO_WUNLOCK(&V_udbinfo);
1454 udp_attach(struct socket *so, int proto, struct thread *td)
1459 inp = sotoinpcb(so);
1460 KASSERT(inp == NULL, ("udp_attach: inp != NULL"));
1461 error = soreserve(so, udp_sendspace, udp_recvspace);
1464 INP_INFO_WLOCK(&V_udbinfo);
1465 error = in_pcballoc(so, &V_udbinfo);
1467 INP_INFO_WUNLOCK(&V_udbinfo);
1471 inp = sotoinpcb(so);
1472 inp->inp_vflag |= INP_IPV4;
1473 inp->inp_ip_ttl = V_ip_defttl;
1475 error = udp_newudpcb(inp);
1479 INP_INFO_WUNLOCK(&V_udbinfo);
1484 INP_INFO_WUNLOCK(&V_udbinfo);
1489 udp_set_kernel_tunneling(struct socket *so, udp_tun_func_t f)
1494 KASSERT(so->so_type == SOCK_DGRAM,
1495 ("udp_set_kernel_tunneling: !dgram"));
1496 inp = sotoinpcb(so);
1497 KASSERT(inp != NULL, ("udp_set_kernel_tunneling: inp == NULL"));
1499 up = intoudpcb(inp);
1500 if (up->u_tun_func != NULL) {
1510 udp_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
1515 inp = sotoinpcb(so);
1516 KASSERT(inp != NULL, ("udp_bind: inp == NULL"));
1517 INP_INFO_WLOCK(&V_udbinfo);
1519 error = in_pcbbind(inp, nam, td->td_ucred);
1521 INP_INFO_WUNLOCK(&V_udbinfo);
1526 udp_close(struct socket *so)
1530 inp = sotoinpcb(so);
1531 KASSERT(inp != NULL, ("udp_close: inp == NULL"));
1532 INP_INFO_WLOCK(&V_udbinfo);
1534 if (inp->inp_faddr.s_addr != INADDR_ANY) {
1535 in_pcbdisconnect(inp);
1536 inp->inp_laddr.s_addr = INADDR_ANY;
1537 soisdisconnected(so);
1540 INP_INFO_WUNLOCK(&V_udbinfo);
1544 udp_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
1548 struct sockaddr_in *sin;
1550 inp = sotoinpcb(so);
1551 KASSERT(inp != NULL, ("udp_connect: inp == NULL"));
1552 INP_INFO_WLOCK(&V_udbinfo);
1554 if (inp->inp_faddr.s_addr != INADDR_ANY) {
1556 INP_INFO_WUNLOCK(&V_udbinfo);
1559 sin = (struct sockaddr_in *)nam;
1560 error = prison_remote_ip4(td->td_ucred, &sin->sin_addr);
1563 INP_INFO_WUNLOCK(&V_udbinfo);
1566 error = in_pcbconnect(inp, nam, td->td_ucred);
1570 INP_INFO_WUNLOCK(&V_udbinfo);
1575 udp_detach(struct socket *so)
1580 inp = sotoinpcb(so);
1581 KASSERT(inp != NULL, ("udp_detach: inp == NULL"));
1582 KASSERT(inp->inp_faddr.s_addr == INADDR_ANY,
1583 ("udp_detach: not disconnected"));
1584 INP_INFO_WLOCK(&V_udbinfo);
1586 up = intoudpcb(inp);
1587 KASSERT(up != NULL, ("%s: up == NULL", __func__));
1588 inp->inp_ppcb = NULL;
1591 INP_INFO_WUNLOCK(&V_udbinfo);
1596 udp_disconnect(struct socket *so)
1600 inp = sotoinpcb(so);
1601 KASSERT(inp != NULL, ("udp_disconnect: inp == NULL"));
1602 INP_INFO_WLOCK(&V_udbinfo);
1604 if (inp->inp_faddr.s_addr == INADDR_ANY) {
1606 INP_INFO_WUNLOCK(&V_udbinfo);
1610 in_pcbdisconnect(inp);
1611 inp->inp_laddr.s_addr = INADDR_ANY;
1613 so->so_state &= ~SS_ISCONNECTED; /* XXX */
1616 INP_INFO_WUNLOCK(&V_udbinfo);
1621 udp_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
1622 struct mbuf *control, struct thread *td)
1626 inp = sotoinpcb(so);
1627 KASSERT(inp != NULL, ("udp_send: inp == NULL"));
1628 return (udp_output(inp, m, addr, control, td));
1632 udp_shutdown(struct socket *so)
1636 inp = sotoinpcb(so);
1637 KASSERT(inp != NULL, ("udp_shutdown: inp == NULL"));
1644 struct pr_usrreqs udp_usrreqs = {
1645 .pru_abort = udp_abort,
1646 .pru_attach = udp_attach,
1647 .pru_bind = udp_bind,
1648 .pru_connect = udp_connect,
1649 .pru_control = in_control,
1650 .pru_detach = udp_detach,
1651 .pru_disconnect = udp_disconnect,
1652 .pru_peeraddr = in_getpeeraddr,
1653 .pru_send = udp_send,
1654 .pru_soreceive = soreceive_dgram,
1655 .pru_sosend = sosend_dgram,
1656 .pru_shutdown = udp_shutdown,
1657 .pru_sockaddr = in_getsockaddr,
1658 .pru_sosetlabel = in_pcbsosetlabel,
1659 .pru_close = udp_close,