2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2018 Vincenzo Maffione
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #if defined(__FreeBSD__)
32 #include <sys/cdefs.h> /* prerequisite */
33 #include <sys/types.h>
34 #include <sys/param.h> /* defines used in kernel.h */
35 #include <sys/filio.h> /* FIONBIO */
36 #include <sys/malloc.h>
37 #include <sys/socketvar.h> /* struct socket */
38 #include <sys/socket.h> /* sockaddrs */
39 #include <sys/sysctl.h>
41 #include <net/if_var.h>
42 #include <net/bpf.h> /* BIOCIMMEDIATE */
43 #include <machine/bus.h> /* bus_dmamap_* */
44 #include <sys/endian.h>
47 #elif defined(__APPLE__)
48 #warning OSX support is only partial
50 #elif defined (_WIN32)
57 #include <net/netmap.h>
58 #include <dev/netmap/netmap_kern.h>
59 #include <dev/netmap/netmap_bdg.h>
62 nmreq_register_from_legacy(struct nmreq *nmr, struct nmreq_header *hdr,
63 struct nmreq_register *req)
65 req->nr_offset = nmr->nr_offset;
66 req->nr_memsize = nmr->nr_memsize;
67 req->nr_tx_slots = nmr->nr_tx_slots;
68 req->nr_rx_slots = nmr->nr_rx_slots;
69 req->nr_tx_rings = nmr->nr_tx_rings;
70 req->nr_rx_rings = nmr->nr_rx_rings;
71 req->nr_host_tx_rings = 0;
72 req->nr_host_rx_rings = 0;
73 req->nr_mem_id = nmr->nr_arg2;
74 req->nr_ringid = nmr->nr_ringid & NETMAP_RING_MASK;
75 if ((nmr->nr_flags & NR_REG_MASK) == NR_REG_DEFAULT) {
76 /* Convert the older nmr->nr_ringid (original
77 * netmap control API) to nmr->nr_flags. */
78 u_int regmode = NR_REG_DEFAULT;
79 if (req->nr_ringid & NETMAP_SW_RING) {
81 } else if (req->nr_ringid & NETMAP_HW_RING) {
82 regmode = NR_REG_ONE_NIC;
84 regmode = NR_REG_ALL_NIC;
86 req->nr_mode = regmode;
88 req->nr_mode = nmr->nr_flags & NR_REG_MASK;
91 /* Fix nr_name, nr_mode and nr_ringid to handle pipe requests. */
92 if (req->nr_mode == NR_REG_PIPE_MASTER ||
93 req->nr_mode == NR_REG_PIPE_SLAVE) {
95 snprintf(suffix, sizeof(suffix), "%c%d",
96 (req->nr_mode == NR_REG_PIPE_MASTER ? '{' : '}'),
98 if (strlen(hdr->nr_name) + strlen(suffix)
99 >= sizeof(hdr->nr_name)) {
100 /* No space for the pipe suffix. */
103 strncat(hdr->nr_name, suffix, strlen(suffix));
104 req->nr_mode = NR_REG_ALL_NIC;
107 req->nr_flags = nmr->nr_flags & (~NR_REG_MASK);
108 if (nmr->nr_ringid & NETMAP_NO_TX_POLL) {
109 req->nr_flags |= NR_NO_TX_POLL;
111 if (nmr->nr_ringid & NETMAP_DO_RX_POLL) {
112 req->nr_flags |= NR_DO_RX_POLL;
114 /* nmr->nr_arg1 (nr_pipes) ignored */
115 req->nr_extra_bufs = nmr->nr_arg3;
120 /* Convert the legacy 'nmr' struct into one of the nmreq_xyz structs
121 * (new API). The new struct is dynamically allocated. */
122 static struct nmreq_header *
123 nmreq_from_legacy(struct nmreq *nmr, u_long ioctl_cmd)
125 struct nmreq_header *hdr = nm_os_malloc(sizeof(*hdr));
131 /* Sanitize nmr->nr_name by adding the string terminator. */
132 if (ioctl_cmd == NIOCGINFO || ioctl_cmd == NIOCREGIF) {
133 nmr->nr_name[sizeof(nmr->nr_name) - 1] = '\0';
136 /* First prepare the request header. */
137 hdr->nr_version = NETMAP_API; /* new API */
138 strlcpy(hdr->nr_name, nmr->nr_name, sizeof(nmr->nr_name));
139 hdr->nr_options = (uintptr_t)NULL;
140 hdr->nr_body = (uintptr_t)NULL;
144 switch (nmr->nr_cmd) {
146 /* Regular NIOCREGIF operation. */
147 struct nmreq_register *req = nm_os_malloc(sizeof(*req));
148 if (!req) { goto oom; }
149 hdr->nr_body = (uintptr_t)req;
150 hdr->nr_reqtype = NETMAP_REQ_REGISTER;
151 if (nmreq_register_from_legacy(nmr, hdr, req)) {
156 case NETMAP_BDG_ATTACH: {
157 struct nmreq_vale_attach *req = nm_os_malloc(sizeof(*req));
158 if (!req) { goto oom; }
159 hdr->nr_body = (uintptr_t)req;
160 hdr->nr_reqtype = NETMAP_REQ_VALE_ATTACH;
161 if (nmreq_register_from_legacy(nmr, hdr, &req->reg)) {
164 /* Fix nr_mode, starting from nr_arg1. */
165 if (nmr->nr_arg1 & NETMAP_BDG_HOST) {
166 req->reg.nr_mode = NR_REG_NIC_SW;
168 req->reg.nr_mode = NR_REG_ALL_NIC;
172 case NETMAP_BDG_DETACH: {
173 hdr->nr_reqtype = NETMAP_REQ_VALE_DETACH;
174 hdr->nr_body = (uintptr_t)nm_os_malloc(sizeof(struct nmreq_vale_detach));
177 case NETMAP_BDG_VNET_HDR:
178 case NETMAP_VNET_HDR_GET: {
179 struct nmreq_port_hdr *req = nm_os_malloc(sizeof(*req));
180 if (!req) { goto oom; }
181 hdr->nr_body = (uintptr_t)req;
182 hdr->nr_reqtype = (nmr->nr_cmd == NETMAP_BDG_VNET_HDR) ?
183 NETMAP_REQ_PORT_HDR_SET : NETMAP_REQ_PORT_HDR_GET;
184 req->nr_hdr_len = nmr->nr_arg1;
187 case NETMAP_BDG_NEWIF : {
188 struct nmreq_vale_newif *req = nm_os_malloc(sizeof(*req));
189 if (!req) { goto oom; }
190 hdr->nr_body = (uintptr_t)req;
191 hdr->nr_reqtype = NETMAP_REQ_VALE_NEWIF;
192 req->nr_tx_slots = nmr->nr_tx_slots;
193 req->nr_rx_slots = nmr->nr_rx_slots;
194 req->nr_tx_rings = nmr->nr_tx_rings;
195 req->nr_rx_rings = nmr->nr_rx_rings;
196 req->nr_mem_id = nmr->nr_arg2;
199 case NETMAP_BDG_DELIF: {
200 hdr->nr_reqtype = NETMAP_REQ_VALE_DELIF;
203 case NETMAP_BDG_POLLING_ON:
204 case NETMAP_BDG_POLLING_OFF: {
205 struct nmreq_vale_polling *req = nm_os_malloc(sizeof(*req));
206 if (!req) { goto oom; }
207 hdr->nr_body = (uintptr_t)req;
208 hdr->nr_reqtype = (nmr->nr_cmd == NETMAP_BDG_POLLING_ON) ?
209 NETMAP_REQ_VALE_POLLING_ENABLE :
210 NETMAP_REQ_VALE_POLLING_DISABLE;
211 switch (nmr->nr_flags & NR_REG_MASK) {
213 req->nr_mode = 0; /* invalid */
216 req->nr_mode = NETMAP_POLLING_MODE_MULTI_CPU;
219 req->nr_mode = NETMAP_POLLING_MODE_SINGLE_CPU;
222 req->nr_first_cpu_id = nmr->nr_ringid & NETMAP_RING_MASK;
223 req->nr_num_polling_cpus = nmr->nr_arg1;
226 case NETMAP_PT_HOST_CREATE:
227 case NETMAP_PT_HOST_DELETE: {
228 nm_prerr("Netmap passthrough not supported yet");
236 if (nmr->nr_cmd == NETMAP_BDG_LIST) {
237 struct nmreq_vale_list *req = nm_os_malloc(sizeof(*req));
238 if (!req) { goto oom; }
239 hdr->nr_body = (uintptr_t)req;
240 hdr->nr_reqtype = NETMAP_REQ_VALE_LIST;
241 req->nr_bridge_idx = nmr->nr_arg1;
242 req->nr_port_idx = nmr->nr_arg2;
244 /* Regular NIOCGINFO. */
245 struct nmreq_port_info_get *req = nm_os_malloc(sizeof(*req));
246 if (!req) { goto oom; }
247 hdr->nr_body = (uintptr_t)req;
248 hdr->nr_reqtype = NETMAP_REQ_PORT_INFO_GET;
249 req->nr_memsize = nmr->nr_memsize;
250 req->nr_tx_slots = nmr->nr_tx_slots;
251 req->nr_rx_slots = nmr->nr_rx_slots;
252 req->nr_tx_rings = nmr->nr_tx_rings;
253 req->nr_rx_rings = nmr->nr_rx_rings;
254 req->nr_host_tx_rings = 0;
255 req->nr_host_rx_rings = 0;
256 req->nr_mem_id = nmr->nr_arg2;
266 nm_os_free((void *)(uintptr_t)hdr->nr_body);
270 nm_prerr("Failed to allocate memory for nmreq_xyz struct");
276 nmreq_register_to_legacy(const struct nmreq_register *req, struct nmreq *nmr)
278 nmr->nr_offset = req->nr_offset;
279 nmr->nr_memsize = req->nr_memsize;
280 nmr->nr_tx_slots = req->nr_tx_slots;
281 nmr->nr_rx_slots = req->nr_rx_slots;
282 nmr->nr_tx_rings = req->nr_tx_rings;
283 nmr->nr_rx_rings = req->nr_rx_rings;
284 nmr->nr_arg2 = req->nr_mem_id;
285 nmr->nr_arg3 = req->nr_extra_bufs;
288 /* Convert a nmreq_xyz struct (new API) to the legacy 'nmr' struct.
289 * It also frees the nmreq_xyz struct, as it was allocated by
290 * nmreq_from_legacy(). */
292 nmreq_to_legacy(struct nmreq_header *hdr, struct nmreq *nmr)
296 /* We only write-back the fields that the user expects to be
298 switch (hdr->nr_reqtype) {
299 case NETMAP_REQ_REGISTER: {
300 struct nmreq_register *req =
301 (struct nmreq_register *)(uintptr_t)hdr->nr_body;
302 nmreq_register_to_legacy(req, nmr);
305 case NETMAP_REQ_PORT_INFO_GET: {
306 struct nmreq_port_info_get *req =
307 (struct nmreq_port_info_get *)(uintptr_t)hdr->nr_body;
308 nmr->nr_memsize = req->nr_memsize;
309 nmr->nr_tx_slots = req->nr_tx_slots;
310 nmr->nr_rx_slots = req->nr_rx_slots;
311 nmr->nr_tx_rings = req->nr_tx_rings;
312 nmr->nr_rx_rings = req->nr_rx_rings;
313 nmr->nr_arg2 = req->nr_mem_id;
316 case NETMAP_REQ_VALE_ATTACH: {
317 struct nmreq_vale_attach *req =
318 (struct nmreq_vale_attach *)(uintptr_t)hdr->nr_body;
319 nmreq_register_to_legacy(&req->reg, nmr);
322 case NETMAP_REQ_VALE_DETACH: {
325 case NETMAP_REQ_VALE_LIST: {
326 struct nmreq_vale_list *req =
327 (struct nmreq_vale_list *)(uintptr_t)hdr->nr_body;
328 strlcpy(nmr->nr_name, hdr->nr_name, sizeof(nmr->nr_name));
329 nmr->nr_arg1 = req->nr_bridge_idx;
330 nmr->nr_arg2 = req->nr_port_idx;
333 case NETMAP_REQ_PORT_HDR_SET:
334 case NETMAP_REQ_PORT_HDR_GET: {
335 struct nmreq_port_hdr *req =
336 (struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body;
337 nmr->nr_arg1 = req->nr_hdr_len;
340 case NETMAP_REQ_VALE_NEWIF: {
341 struct nmreq_vale_newif *req =
342 (struct nmreq_vale_newif *)(uintptr_t)hdr->nr_body;
343 nmr->nr_tx_slots = req->nr_tx_slots;
344 nmr->nr_rx_slots = req->nr_rx_slots;
345 nmr->nr_tx_rings = req->nr_tx_rings;
346 nmr->nr_rx_rings = req->nr_rx_rings;
347 nmr->nr_arg2 = req->nr_mem_id;
350 case NETMAP_REQ_VALE_DELIF:
351 case NETMAP_REQ_VALE_POLLING_ENABLE:
352 case NETMAP_REQ_VALE_POLLING_DISABLE: {
361 netmap_ioctl_legacy(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
369 /* Request for the legacy control API. Convert it to a
370 * NIOCCTRL request. */
371 struct nmreq *nmr = (struct nmreq *) data;
372 struct nmreq_header *hdr;
374 if (nmr->nr_version < 14) {
375 nm_prerr("Minimum supported API is 14 (requested %u)",
379 hdr = nmreq_from_legacy(nmr, cmd);
380 if (hdr == NULL) { /* out of memory */
383 error = netmap_ioctl(priv, NIOCCTRL, (caddr_t)hdr, td,
384 /*nr_body_is_user=*/0);
386 nmreq_to_legacy(hdr, nmr);
389 nm_os_free((void *)(uintptr_t)hdr->nr_body);
396 struct nm_ifreq *nr = (struct nm_ifreq *)data;
397 error = netmap_bdg_config(nr);
404 /* FIONBIO/FIOASYNC are no-ops. */
411 /* Ignore these commands. */
414 default: /* allow device-specific ioctls */
416 struct nmreq *nmr = (struct nmreq *)data;
417 struct ifnet *ifp = ifunit_ref(nmr->nr_name);
423 bzero(&so, sizeof(so));
424 so.so_vnet = ifp->if_vnet;
425 // so->so_proto not null.
426 error = ifioctl(&so, cmd, data, td);