2 * Copyright (c) 1982, 1986, 1988, 1993
3 * The Regents of the University of California.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 4. Neither the name of the University nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * @(#)raw_ip.c 8.7 (Berkeley) 5/15/95
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
37 #include "opt_inet6.h"
38 #include "opt_ipsec.h"
40 #include <sys/param.h>
42 #include <sys/kernel.h>
44 #include <sys/malloc.h>
48 #include <sys/protosw.h>
49 #include <sys/rwlock.h>
50 #include <sys/signalvar.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
54 #include <sys/sysctl.h>
55 #include <sys/systm.h>
60 #include <net/route.h>
63 #include <netinet/in.h>
64 #include <netinet/in_systm.h>
65 #include <netinet/in_pcb.h>
66 #include <netinet/in_var.h>
67 #include <netinet/if_ether.h>
68 #include <netinet/ip.h>
69 #include <netinet/ip_var.h>
70 #include <netinet/ip_mroute.h>
73 #include <netipsec/ipsec.h>
76 #include <security/mac/mac_framework.h>
78 VNET_DEFINE(int, ip_defttl) = IPDEFTTL;
79 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_DEFTTL, ttl, CTLFLAG_RW,
80 &VNET_NAME(ip_defttl), 0,
81 "Maximum TTL on IP packets");
83 VNET_DEFINE(struct inpcbhead, ripcb);
84 VNET_DEFINE(struct inpcbinfo, ripcbinfo);
86 #define V_ripcb VNET(ripcb)
87 #define V_ripcbinfo VNET(ripcbinfo)
90 * Control and data hooks for ipfw, dummynet, divert and so on.
91 * The data hooks are not used here but it is convenient
92 * to keep them all in one place.
94 VNET_DEFINE(ip_fw_chk_ptr_t, ip_fw_chk_ptr) = NULL;
95 VNET_DEFINE(ip_fw_ctl_ptr_t, ip_fw_ctl_ptr) = NULL;
97 int (*ip_dn_ctl_ptr)(struct sockopt *);
98 int (*ip_dn_io_ptr)(struct mbuf **, int, struct ip_fw_args *);
99 void (*ip_divert_ptr)(struct mbuf *, int);
100 int (*ng_ipfw_input_p)(struct mbuf **, int,
101 struct ip_fw_args *, int);
103 /* Hook for telling pf that the destination address changed */
104 void (*m_addr_chg_pf_p)(struct mbuf *m);
108 * Hooks for multicast routing. They all default to NULL, so leave them not
109 * initialized and rely on BSS being set to 0.
113 * The socket used to communicate with the multicast routing daemon.
115 VNET_DEFINE(struct socket *, ip_mrouter);
118 * The various mrouter and rsvp functions.
120 int (*ip_mrouter_set)(struct socket *, struct sockopt *);
121 int (*ip_mrouter_get)(struct socket *, struct sockopt *);
122 int (*ip_mrouter_done)(void);
123 int (*ip_mforward)(struct ip *, struct ifnet *, struct mbuf *,
124 struct ip_moptions *);
125 int (*mrt_ioctl)(u_long, caddr_t, int);
126 int (*legal_vif_num)(int);
127 u_long (*ip_mcast_src)(int);
129 void (*rsvp_input_p)(struct mbuf *m, int off);
130 int (*ip_rsvp_vif)(struct socket *, struct sockopt *);
131 void (*ip_rsvp_force_done)(struct socket *);
134 u_long rip_sendspace = 9216;
135 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW,
136 &rip_sendspace, 0, "Maximum outgoing raw IP datagram size");
138 u_long rip_recvspace = 9216;
139 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW,
140 &rip_recvspace, 0, "Maximum space for incoming raw IP datagrams");
146 #define INP_PCBHASH_RAW_SIZE 256
147 #define INP_PCBHASH_RAW(proto, laddr, faddr, mask) \
148 (((proto) + (laddr) + (faddr)) % (mask) + 1)
152 rip_inshash(struct inpcb *inp)
154 struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
155 struct inpcbhead *pcbhash;
158 INP_INFO_WLOCK_ASSERT(pcbinfo);
159 INP_WLOCK_ASSERT(inp);
161 if (inp->inp_ip_p != 0 &&
162 inp->inp_laddr.s_addr != INADDR_ANY &&
163 inp->inp_faddr.s_addr != INADDR_ANY) {
164 hash = INP_PCBHASH_RAW(inp->inp_ip_p, inp->inp_laddr.s_addr,
165 inp->inp_faddr.s_addr, pcbinfo->ipi_hashmask);
168 pcbhash = &pcbinfo->ipi_hashbase[hash];
169 LIST_INSERT_HEAD(pcbhash, inp, inp_hash);
173 rip_delhash(struct inpcb *inp)
176 INP_INFO_WLOCK_ASSERT(inp->inp_pcbinfo);
177 INP_WLOCK_ASSERT(inp);
179 LIST_REMOVE(inp, inp_hash);
184 * Raw interface to IP protocol.
188 * Initialize raw connection block q.
191 rip_zone_change(void *tag)
194 uma_zone_set_max(V_ripcbinfo.ipi_zone, maxsockets);
198 rip_inpcb_init(void *mem, int size, int flags)
200 struct inpcb *inp = mem;
202 INP_LOCK_INIT(inp, "inp", "rawinp");
210 in_pcbinfo_init(&V_ripcbinfo, "rip", &V_ripcb, INP_PCBHASH_RAW_SIZE,
211 1, "ripcb", rip_inpcb_init, NULL, UMA_ZONE_NOFREE,
212 IPI_HASHFIELDS_NONE);
213 EVENTHANDLER_REGISTER(maxsockets_change, rip_zone_change, NULL,
214 EVENTHANDLER_PRI_ANY);
222 in_pcbinfo_destroy(&V_ripcbinfo);
228 rip_append(struct inpcb *last, struct ip *ip, struct mbuf *n,
229 struct sockaddr_in *ripsrc)
233 INP_LOCK_ASSERT(last);
236 /* check AH/ESP integrity. */
237 if (ipsec4_in_reject(n, last)) {
242 if (!policyfail && mac_inpcb_check_deliver(last, n) != 0)
245 /* Check the minimum TTL for socket. */
246 if (last->inp_ip_minttl && last->inp_ip_minttl > ip->ip_ttl)
249 struct mbuf *opts = NULL;
252 so = last->inp_socket;
253 if ((last->inp_flags & INP_CONTROLOPTS) ||
254 (so->so_options & (SO_TIMESTAMP | SO_BINTIME)))
255 ip_savecontrol(last, &opts, ip, n);
256 SOCKBUF_LOCK(&so->so_rcv);
257 if (sbappendaddr_locked(&so->so_rcv,
258 (struct sockaddr *)ripsrc, n, opts) == 0) {
259 /* should notify about lost packet */
263 SOCKBUF_UNLOCK(&so->so_rcv);
265 sorwakeup_locked(so);
272 * Setup generic address and protocol structures for raw_input routine, then
273 * pass them along with mbuf chain.
276 rip_input(struct mbuf *m, int off)
279 struct ip *ip = mtod(m, struct ip *);
280 int proto = ip->ip_p;
281 struct inpcb *inp, *last;
282 struct sockaddr_in ripsrc;
285 bzero(&ripsrc, sizeof(ripsrc));
286 ripsrc.sin_len = sizeof(ripsrc);
287 ripsrc.sin_family = AF_INET;
288 ripsrc.sin_addr = ip->ip_src;
291 ifp = m->m_pkthdr.rcvif;
293 * Add back the IP header length which was
294 * removed by ip_input(). Raw sockets do
295 * not modify the packet except for some
300 hash = INP_PCBHASH_RAW(proto, ip->ip_src.s_addr,
301 ip->ip_dst.s_addr, V_ripcbinfo.ipi_hashmask);
302 INP_INFO_RLOCK(&V_ripcbinfo);
303 LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[hash], inp_hash) {
304 if (inp->inp_ip_p != proto)
307 /* XXX inp locking */
308 if ((inp->inp_vflag & INP_IPV4) == 0)
311 if (inp->inp_laddr.s_addr != ip->ip_dst.s_addr)
313 if (inp->inp_faddr.s_addr != ip->ip_src.s_addr)
315 if (jailed_without_vnet(inp->inp_cred)) {
317 * XXX: If faddr was bound to multicast group,
318 * jailed raw socket will drop datagram.
320 if (prison_check_ip4(inp->inp_cred, &ip->ip_dst) != 0)
326 n = m_copy(m, 0, (int)M_COPYALL);
328 (void) rip_append(last, ip, n, &ripsrc);
329 /* XXX count dropped packet */
335 LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[0], inp_hash) {
336 if (inp->inp_ip_p && inp->inp_ip_p != proto)
339 /* XXX inp locking */
340 if ((inp->inp_vflag & INP_IPV4) == 0)
343 if (!in_nullhost(inp->inp_laddr) &&
344 !in_hosteq(inp->inp_laddr, ip->ip_dst))
346 if (!in_nullhost(inp->inp_faddr) &&
347 !in_hosteq(inp->inp_faddr, ip->ip_src))
349 if (jailed_without_vnet(inp->inp_cred)) {
351 * Allow raw socket in jail to receive multicast;
352 * assume process had PRIV_NETINET_RAW at attach,
353 * and fall through into normal filter path if so.
355 if (!IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) &&
356 prison_check_ip4(inp->inp_cred, &ip->ip_dst) != 0)
360 * If this raw socket has multicast state, and we
361 * have received a multicast, check if this socket
362 * should receive it, as multicast filtering is now
363 * the responsibility of the transport layer.
365 if (inp->inp_moptions != NULL &&
366 IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
368 * If the incoming datagram is for IGMP, allow it
369 * through unconditionally to the raw socket.
371 * In the case of IGMPv2, we may not have explicitly
372 * joined the group, and may have set IFF_ALLMULTI
373 * on the interface. imo_multi_filter() may discard
374 * control traffic we actually need to see.
376 * Userland multicast routing daemons should continue
377 * filter the control traffic appropriately.
381 blocked = MCAST_PASS;
382 if (proto != IPPROTO_IGMP) {
383 struct sockaddr_in group;
385 bzero(&group, sizeof(struct sockaddr_in));
386 group.sin_len = sizeof(struct sockaddr_in);
387 group.sin_family = AF_INET;
388 group.sin_addr = ip->ip_dst;
390 blocked = imo_multi_filter(inp->inp_moptions,
392 (struct sockaddr *)&group,
393 (struct sockaddr *)&ripsrc);
396 if (blocked != MCAST_PASS) {
397 IPSTAT_INC(ips_notmember);
404 n = m_copy(m, 0, (int)M_COPYALL);
406 (void) rip_append(last, ip, n, &ripsrc);
407 /* XXX count dropped packet */
413 INP_INFO_RUNLOCK(&V_ripcbinfo);
415 if (rip_append(last, ip, m, &ripsrc) != 0)
416 IPSTAT_INC(ips_delivered);
420 IPSTAT_INC(ips_noproto);
421 IPSTAT_DEC(ips_delivered);
426 * Generate IP header and pass packet to ip_output. Tack on options user may
427 * have setup with control call.
430 rip_output(struct mbuf *m, struct socket *so, u_long dst)
434 struct inpcb *inp = sotoinpcb(so);
435 int flags = ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0) |
439 * If the user handed us a complete IP packet, use it. Otherwise,
440 * allocate an mbuf for a header and fill it in.
442 if ((inp->inp_flags & INP_HDRINCL) == 0) {
443 if (m->m_pkthdr.len + sizeof(struct ip) > IP_MAXPACKET) {
447 M_PREPEND(m, sizeof(struct ip), M_DONTWAIT);
452 ip = mtod(m, struct ip *);
453 ip->ip_tos = inp->inp_ip_tos;
454 if (inp->inp_flags & INP_DONTFRAG)
458 ip->ip_p = inp->inp_ip_p;
459 ip->ip_len = m->m_pkthdr.len;
460 ip->ip_src = inp->inp_laddr;
461 if (jailed(inp->inp_cred)) {
463 * prison_local_ip4() would be good enough but would
464 * let a source of INADDR_ANY pass, which we do not
465 * want to see from jails. We do not go through the
466 * pain of in_pcbladdr() for raw sockets.
468 if (ip->ip_src.s_addr == INADDR_ANY)
469 error = prison_get_ip4(inp->inp_cred,
472 error = prison_local_ip4(inp->inp_cred,
480 ip->ip_dst.s_addr = dst;
481 ip->ip_ttl = inp->inp_ip_ttl;
483 if (m->m_pkthdr.len > IP_MAXPACKET) {
488 ip = mtod(m, struct ip *);
489 error = prison_check_ip4(inp->inp_cred, &ip->ip_src);
497 * Don't allow both user specified and setsockopt options,
498 * and don't allow packet length sizes that will crash.
500 if (((ip->ip_hl != (sizeof (*ip) >> 2)) && inp->inp_options)
501 || (ip->ip_len > m->m_pkthdr.len)
502 || (ip->ip_len < (ip->ip_hl << 2))) {
508 ip->ip_id = ip_newid();
511 * XXX prevent ip_output from overwriting header fields.
513 flags |= IP_RAWOUTPUT;
514 IPSTAT_INC(ips_rawout);
517 if (inp->inp_flags & INP_ONESBCAST)
518 flags |= IP_SENDONES;
521 mac_inpcb_create_mbuf(inp, m);
524 error = ip_output(m, inp->inp_options, NULL, flags,
525 inp->inp_moptions, inp);
531 * Raw IP socket option processing.
533 * IMPORTANT NOTE regarding access control: Traditionally, raw sockets could
534 * only be created by a privileged process, and as such, socket option
535 * operations to manage system properties on any raw socket were allowed to
536 * take place without explicit additional access control checks. However,
537 * raw sockets can now also be created in jail(), and therefore explicit
538 * checks are now required. Likewise, raw sockets can be used by a process
539 * after it gives up privilege, so some caution is required. For options
540 * passed down to the IP layer via ip_ctloutput(), checks are assumed to be
541 * performed in ip_ctloutput() and therefore no check occurs here.
542 * Unilaterally checking priv_check() here breaks normal IP socket option
543 * operations on raw sockets.
545 * When adding new socket options here, make sure to add access control
546 * checks here as necessary.
548 * XXX-BZ inp locking?
551 rip_ctloutput(struct socket *so, struct sockopt *sopt)
553 struct inpcb *inp = sotoinpcb(so);
556 if (sopt->sopt_level != IPPROTO_IP) {
557 if ((sopt->sopt_level == SOL_SOCKET) &&
558 (sopt->sopt_name == SO_SETFIB)) {
559 inp->inp_inc.inc_fibnum = so->so_fibnum;
566 switch (sopt->sopt_dir) {
568 switch (sopt->sopt_name) {
570 optval = inp->inp_flags & INP_HDRINCL;
571 error = sooptcopyout(sopt, &optval, sizeof optval);
574 case IP_FW3: /* generic ipfw v.3 functions */
575 case IP_FW_ADD: /* ADD actually returns the body... */
577 case IP_FW_TABLE_GETSIZE:
578 case IP_FW_TABLE_LIST:
579 case IP_FW_NAT_GET_CONFIG:
580 case IP_FW_NAT_GET_LOG:
581 if (V_ip_fw_ctl_ptr != NULL)
582 error = V_ip_fw_ctl_ptr(sopt);
587 case IP_DUMMYNET3: /* generic dummynet v.3 functions */
588 case IP_DUMMYNET_GET:
589 if (ip_dn_ctl_ptr != NULL)
590 error = ip_dn_ctl_ptr(sopt);
603 case MRT_API_SUPPORT:
605 case MRT_ADD_BW_UPCALL:
606 case MRT_DEL_BW_UPCALL:
607 error = priv_check(curthread, PRIV_NETINET_MROUTE);
610 error = ip_mrouter_get ? ip_mrouter_get(so, sopt) :
615 error = ip_ctloutput(so, sopt);
621 switch (sopt->sopt_name) {
623 error = sooptcopyin(sopt, &optval, sizeof optval,
628 inp->inp_flags |= INP_HDRINCL;
630 inp->inp_flags &= ~INP_HDRINCL;
633 case IP_FW3: /* generic ipfw v.3 functions */
639 case IP_FW_TABLE_ADD:
640 case IP_FW_TABLE_DEL:
641 case IP_FW_TABLE_FLUSH:
644 if (V_ip_fw_ctl_ptr != NULL)
645 error = V_ip_fw_ctl_ptr(sopt);
650 case IP_DUMMYNET3: /* generic dummynet v.3 functions */
651 case IP_DUMMYNET_CONFIGURE:
652 case IP_DUMMYNET_DEL:
653 case IP_DUMMYNET_FLUSH:
654 if (ip_dn_ctl_ptr != NULL)
655 error = ip_dn_ctl_ptr(sopt);
657 error = ENOPROTOOPT ;
661 error = priv_check(curthread, PRIV_NETINET_MROUTE);
664 error = ip_rsvp_init(so);
668 error = priv_check(curthread, PRIV_NETINET_MROUTE);
671 error = ip_rsvp_done();
675 case IP_RSVP_VIF_OFF:
676 error = priv_check(curthread, PRIV_NETINET_MROUTE);
679 error = ip_rsvp_vif ?
680 ip_rsvp_vif(so, sopt) : EINVAL;
691 case MRT_API_SUPPORT:
693 case MRT_ADD_BW_UPCALL:
694 case MRT_DEL_BW_UPCALL:
695 error = priv_check(curthread, PRIV_NETINET_MROUTE);
698 error = ip_mrouter_set ? ip_mrouter_set(so, sopt) :
703 error = ip_ctloutput(so, sopt);
713 * This function exists solely to receive the PRC_IFDOWN messages which are
714 * sent by if_down(). It looks for an ifaddr whose ifa_addr is sa, and calls
715 * in_ifadown() to remove all routes corresponding to that address. It also
716 * receives the PRC_IFUP messages from if_up() and reinstalls the interface
720 rip_ctlinput(int cmd, struct sockaddr *sa, void *vip)
722 struct in_ifaddr *ia;
730 TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
731 if (ia->ia_ifa.ifa_addr == sa
732 && (ia->ia_flags & IFA_ROUTE)) {
733 ifa_ref(&ia->ia_ifa);
736 * in_ifscrub kills the interface route.
738 in_ifscrub(ia->ia_ifp, ia, 0);
740 * in_ifadown gets rid of all the rest of the
741 * routes. This is not quite the right thing
742 * to do, but at least if we are running a
743 * routing process they will come back.
745 in_ifadown(&ia->ia_ifa, 0);
746 ifa_free(&ia->ia_ifa);
750 if (ia == NULL) /* If ia matched, already unlocked. */
756 TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
757 if (ia->ia_ifa.ifa_addr == sa)
760 if (ia == NULL || (ia->ia_flags & IFA_ROUTE)) {
764 ifa_ref(&ia->ia_ifa);
767 ifp = ia->ia_ifa.ifa_ifp;
769 if ((ifp->if_flags & IFF_LOOPBACK)
770 || (ifp->if_flags & IFF_POINTOPOINT))
773 err = ifa_del_loopback_route((struct ifaddr *)ia, sa);
775 ia->ia_flags &= ~IFA_RTSELF;
777 err = rtinit(&ia->ia_ifa, RTM_ADD, flags);
779 ia->ia_flags |= IFA_ROUTE;
781 err = ifa_add_loopback_route((struct ifaddr *)ia, sa);
783 ia->ia_flags |= IFA_RTSELF;
785 ifa_free(&ia->ia_ifa);
791 rip_attach(struct socket *so, int proto, struct thread *td)
797 KASSERT(inp == NULL, ("rip_attach: inp != NULL"));
799 error = priv_check(td, PRIV_NETINET_RAW);
802 if (proto >= IPPROTO_MAX || proto < 0)
803 return EPROTONOSUPPORT;
804 error = soreserve(so, rip_sendspace, rip_recvspace);
807 INP_INFO_WLOCK(&V_ripcbinfo);
808 error = in_pcballoc(so, &V_ripcbinfo);
810 INP_INFO_WUNLOCK(&V_ripcbinfo);
813 inp = (struct inpcb *)so->so_pcb;
814 inp->inp_vflag |= INP_IPV4;
815 inp->inp_ip_p = proto;
816 inp->inp_ip_ttl = V_ip_defttl;
818 INP_INFO_WUNLOCK(&V_ripcbinfo);
824 rip_detach(struct socket *so)
829 KASSERT(inp != NULL, ("rip_detach: inp == NULL"));
830 KASSERT(inp->inp_faddr.s_addr == INADDR_ANY,
831 ("rip_detach: not closed"));
833 INP_INFO_WLOCK(&V_ripcbinfo);
836 if (so == V_ip_mrouter && ip_mrouter_done)
838 if (ip_rsvp_force_done)
839 ip_rsvp_force_done(so);
840 if (so == V_ip_rsvpd)
844 INP_INFO_WUNLOCK(&V_ripcbinfo);
848 rip_dodisconnect(struct socket *so, struct inpcb *inp)
850 struct inpcbinfo *pcbinfo;
852 pcbinfo = inp->inp_pcbinfo;
853 INP_INFO_WLOCK(pcbinfo);
856 inp->inp_faddr.s_addr = INADDR_ANY;
859 so->so_state &= ~SS_ISCONNECTED;
862 INP_INFO_WUNLOCK(pcbinfo);
866 rip_abort(struct socket *so)
871 KASSERT(inp != NULL, ("rip_abort: inp == NULL"));
873 rip_dodisconnect(so, inp);
877 rip_close(struct socket *so)
882 KASSERT(inp != NULL, ("rip_close: inp == NULL"));
884 rip_dodisconnect(so, inp);
888 rip_disconnect(struct socket *so)
892 if ((so->so_state & SS_ISCONNECTED) == 0)
896 KASSERT(inp != NULL, ("rip_disconnect: inp == NULL"));
898 rip_dodisconnect(so, inp);
903 rip_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
905 struct sockaddr_in *addr = (struct sockaddr_in *)nam;
909 if (nam->sa_len != sizeof(*addr))
912 error = prison_check_ip4(td->td_ucred, &addr->sin_addr);
917 KASSERT(inp != NULL, ("rip_bind: inp == NULL"));
919 if (TAILQ_EMPTY(&V_ifnet) ||
920 (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK) ||
921 (addr->sin_addr.s_addr &&
922 (inp->inp_flags & INP_BINDANY) == 0 &&
923 ifa_ifwithaddr_check((struct sockaddr *)addr) == 0))
924 return (EADDRNOTAVAIL);
926 INP_INFO_WLOCK(&V_ripcbinfo);
929 inp->inp_laddr = addr->sin_addr;
932 INP_INFO_WUNLOCK(&V_ripcbinfo);
937 rip_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
939 struct sockaddr_in *addr = (struct sockaddr_in *)nam;
942 if (nam->sa_len != sizeof(*addr))
944 if (TAILQ_EMPTY(&V_ifnet))
945 return (EADDRNOTAVAIL);
946 if (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK)
947 return (EAFNOSUPPORT);
950 KASSERT(inp != NULL, ("rip_connect: inp == NULL"));
952 INP_INFO_WLOCK(&V_ripcbinfo);
955 inp->inp_faddr = addr->sin_addr;
959 INP_INFO_WUNLOCK(&V_ripcbinfo);
964 rip_shutdown(struct socket *so)
969 KASSERT(inp != NULL, ("rip_shutdown: inp == NULL"));
978 rip_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
979 struct mbuf *control, struct thread *td)
985 KASSERT(inp != NULL, ("rip_send: inp == NULL"));
988 * Note: 'dst' reads below are unlocked.
990 if (so->so_state & SS_ISCONNECTED) {
995 dst = inp->inp_faddr.s_addr; /* Unlocked read. */
1001 dst = ((struct sockaddr_in *)nam)->sin_addr.s_addr;
1003 return (rip_output(m, so, dst));
1008 rip_pcblist(SYSCTL_HANDLER_ARGS)
1011 struct inpcb *inp, **inp_list;
1016 * The process of preparing the TCB list is too time-consuming and
1017 * resource-intensive to repeat twice on every request.
1019 if (req->oldptr == 0) {
1020 n = V_ripcbinfo.ipi_count;
1021 n += imax(n / 8, 10);
1022 req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xinpcb);
1026 if (req->newptr != 0)
1030 * OK, now we're committed to doing something.
1032 INP_INFO_RLOCK(&V_ripcbinfo);
1033 gencnt = V_ripcbinfo.ipi_gencnt;
1034 n = V_ripcbinfo.ipi_count;
1035 INP_INFO_RUNLOCK(&V_ripcbinfo);
1037 xig.xig_len = sizeof xig;
1039 xig.xig_gen = gencnt;
1040 xig.xig_sogen = so_gencnt;
1041 error = SYSCTL_OUT(req, &xig, sizeof xig);
1045 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
1049 INP_INFO_RLOCK(&V_ripcbinfo);
1050 for (inp = LIST_FIRST(V_ripcbinfo.ipi_listhead), i = 0; inp && i < n;
1051 inp = LIST_NEXT(inp, inp_list)) {
1053 if (inp->inp_gencnt <= gencnt &&
1054 cr_canseeinpcb(req->td->td_ucred, inp) == 0) {
1056 inp_list[i++] = inp;
1060 INP_INFO_RUNLOCK(&V_ripcbinfo);
1064 for (i = 0; i < n; i++) {
1067 if (inp->inp_gencnt <= gencnt) {
1070 bzero(&xi, sizeof(xi));
1071 xi.xi_len = sizeof xi;
1072 /* XXX should avoid extra copy */
1073 bcopy(inp, &xi.xi_inp, sizeof *inp);
1074 if (inp->inp_socket)
1075 sotoxsocket(inp->inp_socket, &xi.xi_socket);
1077 error = SYSCTL_OUT(req, &xi, sizeof xi);
1081 INP_INFO_WLOCK(&V_ripcbinfo);
1082 for (i = 0; i < n; i++) {
1085 if (!in_pcbrele_rlocked(inp))
1088 INP_INFO_WUNLOCK(&V_ripcbinfo);
1092 * Give the user an updated idea of our state. If the
1093 * generation differs from what we told her before, she knows
1094 * that something happened while we were processing this
1095 * request, and it might be necessary to retry.
1097 INP_INFO_RLOCK(&V_ripcbinfo);
1098 xig.xig_gen = V_ripcbinfo.ipi_gencnt;
1099 xig.xig_sogen = so_gencnt;
1100 xig.xig_count = V_ripcbinfo.ipi_count;
1101 INP_INFO_RUNLOCK(&V_ripcbinfo);
1102 error = SYSCTL_OUT(req, &xig, sizeof xig);
1104 free(inp_list, M_TEMP);
1108 SYSCTL_PROC(_net_inet_raw, OID_AUTO/*XXX*/, pcblist,
1109 CTLTYPE_OPAQUE | CTLFLAG_RD, NULL, 0,
1110 rip_pcblist, "S,xinpcb", "List of active raw IP sockets");
1113 struct pr_usrreqs rip_usrreqs = {
1114 .pru_abort = rip_abort,
1115 .pru_attach = rip_attach,
1116 .pru_bind = rip_bind,
1117 .pru_connect = rip_connect,
1118 .pru_control = in_control,
1119 .pru_detach = rip_detach,
1120 .pru_disconnect = rip_disconnect,
1121 .pru_peeraddr = in_getpeeraddr,
1122 .pru_send = rip_send,
1123 .pru_shutdown = rip_shutdown,
1124 .pru_sockaddr = in_getsockaddr,
1125 .pru_sosetlabel = in_pcbsosetlabel,
1126 .pru_close = rip_close,