2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (C) 2018 Universita` di Pisa
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/types.h>
34 #include <sys/ioctl.h>
44 #include <net/netmap_user.h>
45 #define LIBNETMAP_NOTHREADSAFE
46 #include "libnetmap.h"
48 struct nmport_cleanup_d {
49 struct nmport_cleanup_d *next;
50 void (*cleanup)(struct nmport_cleanup_d *, struct nmport_d *);
54 nmport_push_cleanup(struct nmport_d *d, struct nmport_cleanup_d *c)
61 nmport_pop_cleanup(struct nmport_d *d)
63 struct nmport_cleanup_d *top;
66 d->clist = d->clist->next;
67 (*top->cleanup)(top, d);
68 nmctx_free(d->ctx, top);
71 void nmport_do_cleanup(struct nmport_d *d)
73 while (d->clist != NULL) {
74 nmport_pop_cleanup(d);
78 static struct nmport_d *
79 nmport_new_with_ctx(struct nmctx *ctx)
83 /* allocate a descriptor */
84 d = nmctx_malloc(ctx, sizeof(*d));
86 nmctx_ferror(ctx, "cannot allocate nmport descriptor");
89 memset(d, 0, sizeof(*d));
91 nmreq_header_init(&d->hdr, NETMAP_REQ_REGISTER, &d->reg);
103 struct nmctx *ctx = nmctx_get();
104 return nmport_new_with_ctx(ctx);
109 nmport_delete(struct nmport_d *d)
111 nmctx_free(d->ctx, d);
115 nmport_extmem_cleanup(struct nmport_cleanup_d *c, struct nmport_d *d)
119 if (d->extmem == NULL)
122 nmreq_remove_option(&d->hdr, &d->extmem->nro_opt);
123 nmctx_free(d->ctx, d->extmem);
129 nmport_extmem(struct nmport_d *d, void *base, size_t size)
131 struct nmctx *ctx = d->ctx;
132 struct nmport_cleanup_d *clnup = NULL;
134 if (d->register_done) {
135 nmctx_ferror(ctx, "%s: cannot set extmem of an already registered port", d->hdr.nr_name);
140 if (d->extmem != NULL) {
141 nmctx_ferror(ctx, "%s: extmem already in use", d->hdr.nr_name);
146 clnup = (struct nmport_cleanup_d *)nmctx_malloc(ctx, sizeof(*clnup));
148 nmctx_ferror(ctx, "failed to allocate cleanup descriptor");
153 d->extmem = nmctx_malloc(ctx, sizeof(*d->extmem));
154 if (d->extmem == NULL) {
155 nmctx_ferror(ctx, "%s: cannot allocate extmem option", d->hdr.nr_name);
156 nmctx_free(ctx, clnup);
160 memset(d->extmem, 0, sizeof(*d->extmem));
161 d->extmem->nro_usrptr = (uintptr_t)base;
162 d->extmem->nro_opt.nro_reqtype = NETMAP_REQ_OPT_EXTMEM;
163 d->extmem->nro_info.nr_memsize = size;
164 nmreq_push_option(&d->hdr, &d->extmem->nro_opt);
166 clnup->cleanup = nmport_extmem_cleanup;
167 nmport_push_cleanup(d, clnup);
172 struct nmport_extmem_from_file_cleanup_d {
173 struct nmport_cleanup_d up;
178 void nmport_extmem_from_file_cleanup(struct nmport_cleanup_d *c,
182 struct nmport_extmem_from_file_cleanup_d *cc =
183 (struct nmport_extmem_from_file_cleanup_d *)c;
185 munmap(cc->p, cc->size);
189 nmport_extmem_from_file(struct nmport_d *d, const char *fname)
191 struct nmctx *ctx = d->ctx;
195 struct nmport_extmem_from_file_cleanup_d *clnup = NULL;
197 clnup = nmctx_malloc(ctx, sizeof(*clnup));
199 nmctx_ferror(ctx, "cannot allocate cleanup descriptor");
204 fd = open(fname, O_RDWR);
206 nmctx_ferror(ctx, "cannot open '%s': %s", fname, strerror(errno));
209 mapsize = lseek(fd, 0, SEEK_END);
211 nmctx_ferror(ctx, "failed to obtain filesize of '%s': %s", fname, strerror(errno));
214 p = mmap(0, mapsize, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
215 if (p == MAP_FAILED) {
216 nmctx_ferror(ctx, "cannot mmap '%s': %s", fname, strerror(errno));
222 clnup->size = mapsize;
223 clnup->up.cleanup = nmport_extmem_from_file_cleanup;
224 nmport_push_cleanup(d, &clnup->up);
226 if (nmport_extmem(d, p, mapsize) < 0)
235 if (clnup->p != MAP_FAILED)
236 nmport_pop_cleanup(d);
238 nmctx_free(ctx, clnup);
243 struct nmreq_pools_info*
244 nmport_extmem_getinfo(struct nmport_d *d)
246 if (d->extmem == NULL)
248 return &d->extmem->nro_info;
251 struct nmport_offset_cleanup_d {
252 struct nmport_cleanup_d up;
253 struct nmreq_opt_offsets *opt;
257 nmport_offset_cleanup(struct nmport_cleanup_d *c,
260 struct nmport_offset_cleanup_d *cc =
261 (struct nmport_offset_cleanup_d *)c;
263 nmreq_remove_option(&d->hdr, &cc->opt->nro_opt);
264 nmctx_free(d->ctx, cc->opt);
268 nmport_offset(struct nmport_d *d, uint64_t initial,
269 uint64_t maxoff, uint64_t bits, uint64_t mingap)
271 struct nmctx *ctx = d->ctx;
272 struct nmreq_opt_offsets *opt;
273 struct nmport_offset_cleanup_d *clnup = NULL;
275 clnup = nmctx_malloc(ctx, sizeof(*clnup));
277 nmctx_ferror(ctx, "cannot allocate cleanup descriptor");
282 opt = nmctx_malloc(ctx, sizeof(*opt));
284 nmctx_ferror(ctx, "%s: cannot allocate offset option", d->hdr.nr_name);
285 nmctx_free(ctx, clnup);
289 memset(opt, 0, sizeof(*opt));
290 opt->nro_opt.nro_reqtype = NETMAP_REQ_OPT_OFFSETS;
291 opt->nro_offset_bits = bits;
292 opt->nro_initial_offset = initial;
293 opt->nro_max_offset = maxoff;
294 opt->nro_min_gap = mingap;
295 nmreq_push_option(&d->hdr, &opt->nro_opt);
297 clnup->up.cleanup = nmport_offset_cleanup;
299 nmport_push_cleanup(d, &clnup->up);
304 /* head of the list of options */
305 static struct nmreq_opt_parser *nmport_opt_parsers;
307 #define NPOPT_PARSER(o) nmport_opt_##o##_parser
308 #define NPOPT_DESC(o) nmport_opt_##o##_desc
309 #define NPOPT_NRKEYS(o) (NPOPT_DESC(o).nr_keys)
310 #define NPOPT_DECL(o, f) \
311 static int NPOPT_PARSER(o)(struct nmreq_parse_ctx *); \
312 static struct nmreq_opt_parser NPOPT_DESC(o) = { \
314 .parse = NPOPT_PARSER(o), \
320 static void __attribute__((constructor)) \
321 nmport_opt_##o##_ctor(void) \
323 NPOPT_DESC(o).next = nmport_opt_parsers; \
324 nmport_opt_parsers = &NPOPT_DESC(o); \
326 struct nmport_key_desc {
327 struct nmreq_opt_parser *option;
333 nmport_opt_key_ctor(struct nmport_key_desc *k)
335 struct nmreq_opt_parser *o = k->option;
336 struct nmreq_opt_key *ok;
339 ok = &o->keys[k->id];
342 ok->flags = k->flags;
344 if (ok->flags & NMREQ_OPTK_DEFAULT)
345 o->default_key = ok->id;
347 #define NPKEY_DESC(o, k) nmport_opt_##o##_key_##k##_desc
348 #define NPKEY_ID(o, k) (NPKEY_DESC(o, k).id)
349 #define NPKEY_DECL(o, k, f) \
350 static struct nmport_key_desc NPKEY_DESC(o, k) = { \
351 .option = &NPOPT_DESC(o), \
356 static void __attribute__((constructor)) \
357 nmport_opt_##o##_key_##k##_ctor(void) \
359 nmport_opt_key_ctor(&NPKEY_DESC(o, k)); \
361 #define nmport_key(p, o, k) ((p)->keys[NPKEY_ID(o, k)])
362 #define nmport_defkey(p, o) ((p)->keys[NPOPT_DESC(o).default_key])
365 NPKEY_DECL(share, port, NMREQ_OPTK_DEFAULT|NMREQ_OPTK_MUSTSET)
366 NPOPT_DECL(extmem, 0)
367 NPKEY_DECL(extmem, file, NMREQ_OPTK_DEFAULT|NMREQ_OPTK_MUSTSET)
368 NPKEY_DECL(extmem, if_num, 0)
369 NPKEY_DECL(extmem, if_size, 0)
370 NPKEY_DECL(extmem, ring_num, 0)
371 NPKEY_DECL(extmem, ring_size, 0)
372 NPKEY_DECL(extmem, buf_num, 0)
373 NPKEY_DECL(extmem, buf_size, 0)
375 NPKEY_DECL(conf, rings, 0)
376 NPKEY_DECL(conf, host_rings, 0)
377 NPKEY_DECL(conf, slots, 0)
378 NPKEY_DECL(conf, tx_rings, 0)
379 NPKEY_DECL(conf, rx_rings, 0)
380 NPKEY_DECL(conf, host_tx_rings, 0)
381 NPKEY_DECL(conf, host_rx_rings, 0)
382 NPKEY_DECL(conf, tx_slots, 0)
383 NPKEY_DECL(conf, rx_slots, 0)
384 NPOPT_DECL(offset, NMREQ_OPTF_DISABLED)
385 NPKEY_DECL(offset, initial, NMREQ_OPTK_DEFAULT|NMREQ_OPTK_MUSTSET)
386 NPKEY_DECL(offset, bits, 0)
390 NPOPT_PARSER(share)(struct nmreq_parse_ctx *p)
392 struct nmctx *ctx = p->ctx;
393 struct nmport_d *d = p->token;
395 const char *v = nmport_defkey(p, share);
397 mem_id = nmreq_get_mem_id(&v, ctx);
400 if (d->reg.nr_mem_id && d->reg.nr_mem_id != mem_id) {
401 nmctx_ferror(ctx, "cannot set mem_id to %"PRId32", already set to %"PRIu16"",
402 mem_id, d->reg.nr_mem_id);
406 d->reg.nr_mem_id = mem_id;
411 NPOPT_PARSER(extmem)(struct nmreq_parse_ctx *p)
414 struct nmreq_pools_info *pi;
419 if (nmport_extmem_from_file(d, nmport_key(p, extmem, file)) < 0)
422 pi = &d->extmem->nro_info;
424 for (i = 0; i < NPOPT_NRKEYS(extmem); i++) {
425 const char *k = p->keys[i];
432 if (i == NPKEY_ID(extmem, if_num)) {
433 pi->nr_if_pool_objtotal = v;
434 } else if (i == NPKEY_ID(extmem, if_size)) {
435 pi->nr_if_pool_objsize = v;
436 } else if (i == NPKEY_ID(extmem, ring_num)) {
437 pi->nr_ring_pool_objtotal = v;
438 } else if (i == NPKEY_ID(extmem, ring_size)) {
439 pi->nr_ring_pool_objsize = v;
440 } else if (i == NPKEY_ID(extmem, buf_num)) {
441 pi->nr_buf_pool_objtotal = v;
442 } else if (i == NPKEY_ID(extmem, buf_size)) {
443 pi->nr_buf_pool_objsize = v;
450 NPOPT_PARSER(conf)(struct nmreq_parse_ctx *p)
456 if (nmport_key(p, conf, rings) != NULL) {
457 uint16_t nr_rings = atoi(nmport_key(p, conf, rings));
458 d->reg.nr_tx_rings = nr_rings;
459 d->reg.nr_rx_rings = nr_rings;
461 if (nmport_key(p, conf, host_rings) != NULL) {
462 uint16_t nr_rings = atoi(nmport_key(p, conf, host_rings));
463 d->reg.nr_host_tx_rings = nr_rings;
464 d->reg.nr_host_rx_rings = nr_rings;
466 if (nmport_key(p, conf, slots) != NULL) {
467 uint32_t nr_slots = atoi(nmport_key(p, conf, slots));
468 d->reg.nr_tx_slots = nr_slots;
469 d->reg.nr_rx_slots = nr_slots;
471 if (nmport_key(p, conf, tx_rings) != NULL) {
472 d->reg.nr_tx_rings = atoi(nmport_key(p, conf, tx_rings));
474 if (nmport_key(p, conf, rx_rings) != NULL) {
475 d->reg.nr_rx_rings = atoi(nmport_key(p, conf, rx_rings));
477 if (nmport_key(p, conf, host_tx_rings) != NULL) {
478 d->reg.nr_host_tx_rings = atoi(nmport_key(p, conf, host_tx_rings));
480 if (nmport_key(p, conf, host_rx_rings) != NULL) {
481 d->reg.nr_host_rx_rings = atoi(nmport_key(p, conf, host_rx_rings));
483 if (nmport_key(p, conf, tx_slots) != NULL) {
484 d->reg.nr_tx_slots = atoi(nmport_key(p, conf, tx_slots));
486 if (nmport_key(p, conf, rx_slots) != NULL) {
487 d->reg.nr_rx_slots = atoi(nmport_key(p, conf, rx_slots));
493 NPOPT_PARSER(offset)(struct nmreq_parse_ctx *p)
496 uint64_t initial, bits;
500 initial = atoi(nmport_key(p, offset, initial));
502 if (nmport_key(p, offset, bits) != NULL)
503 bits = atoi(nmport_key(p, offset, bits));
505 return nmport_offset(d, initial, initial, bits, 0);
510 nmport_disable_option(const char *opt)
512 struct nmreq_opt_parser *p;
514 for (p = nmport_opt_parsers; p != NULL; p = p->next) {
515 if (!strcmp(p->prefix, opt)) {
516 p->flags |= NMREQ_OPTF_DISABLED;
522 nmport_enable_option(const char *opt)
524 struct nmreq_opt_parser *p;
526 for (p = nmport_opt_parsers; p != NULL; p = p->next) {
527 if (!strcmp(p->prefix, opt)) {
528 p->flags &= ~NMREQ_OPTF_DISABLED;
538 nmport_parse(struct nmport_d *d, const char *ifname)
540 const char *scan = ifname;
542 if (nmreq_header_decode(&scan, &d->hdr, d->ctx) < 0) {
546 /* parse the register request */
547 if (nmreq_register_decode(&scan, &d->reg, d->ctx) < 0) {
551 /* parse the options, if any */
552 if (nmreq_options_decode(scan, nmport_opt_parsers, d, d->ctx) < 0) {
558 nmport_undo_parse(d);
563 nmport_undo_parse(struct nmport_d *d)
565 nmport_do_cleanup(d);
566 memset(&d->reg, 0, sizeof(d->reg));
567 memset(&d->hdr, 0, sizeof(d->hdr));
571 nmport_prepare(const char *ifname)
575 /* allocate a descriptor */
580 /* parse the header */
581 if (nmport_parse(d, ifname) < 0)
587 nmport_undo_prepare(d);
592 nmport_undo_prepare(struct nmport_d *d)
596 nmport_undo_parse(d);
601 nmport_register(struct nmport_d *d)
603 struct nmctx *ctx = d->ctx;
605 if (d->register_done) {
607 nmctx_ferror(ctx, "%s: already registered", d->hdr.nr_name);
611 d->fd = open("/dev/netmap", O_RDWR);
613 nmctx_ferror(ctx, "/dev/netmap: %s", strerror(errno));
617 if (ioctl(d->fd, NIOCCTRL, &d->hdr) < 0) {
618 struct nmreq_option *o;
619 int option_errors = 0;
621 nmreq_foreach_option(&d->hdr, o) {
623 nmctx_ferror(ctx, "%s: option %s: %s",
625 nmreq_option_name(o->nro_reqtype),
626 strerror(o->nro_status));
632 nmctx_ferror(ctx, "%s: %s", d->hdr.nr_name, strerror(errno));
636 d->register_done = 1;
641 nmport_undo_register(d);
646 nmport_undo_register(struct nmport_d *d)
651 d->register_done = 0;
654 /* lookup the mem_id in the mem-list: do a new mmap() if
655 * not found, reuse existing otherwise
658 nmport_mmap(struct nmport_d *d)
660 struct nmctx *ctx = d->ctx;
661 struct nmem_d *m = NULL;
662 u_int num_tx, num_rx;
667 nmctx_ferror(ctx, "%s: already mapped", d->hdr.nr_name);
671 if (!d->register_done) {
673 nmctx_ferror(ctx, "cannot map unregistered port");
679 for (m = ctx->mem_descs; m != NULL; m = m->next)
680 if (m->mem_id == d->reg.nr_mem_id)
684 m = nmctx_malloc(ctx, sizeof(*m));
686 nmctx_ferror(ctx, "cannot allocate memory descriptor");
689 memset(m, 0, sizeof(*m));
690 if (d->extmem != NULL) {
691 m->mem = (void *)((uintptr_t)d->extmem->nro_usrptr);
692 m->size = d->extmem->nro_info.nr_memsize;
695 m->mem = mmap(NULL, d->reg.nr_memsize, PROT_READ|PROT_WRITE,
696 MAP_SHARED, d->fd, 0);
697 if (m->mem == MAP_FAILED) {
698 nmctx_ferror(ctx, "mmap: %s", strerror(errno));
701 m->size = d->reg.nr_memsize;
703 m->mem_id = d->reg.nr_mem_id;
704 m->next = ctx->mem_descs;
705 if (ctx->mem_descs != NULL)
706 ctx->mem_descs->prev = m;
715 d->nifp = NETMAP_IF(m->mem, d->reg.nr_offset);
717 num_tx = d->reg.nr_tx_rings + d->nifp->ni_host_tx_rings;
718 for (i = 0; i < num_tx && !d->nifp->ring_ofs[i]; i++)
720 d->cur_tx_ring = d->first_tx_ring = i;
721 for ( ; i < num_tx && d->nifp->ring_ofs[i]; i++)
723 d->last_tx_ring = i - 1;
725 num_rx = d->reg.nr_rx_rings + d->nifp->ni_host_rx_rings;
726 for (i = 0; i < num_rx && !d->nifp->ring_ofs[i + num_tx]; i++)
728 d->cur_rx_ring = d->first_rx_ring = i;
729 for ( ; i < num_rx && d->nifp->ring_ofs[i + num_tx]; i++)
731 d->last_rx_ring = i - 1;
744 nmport_undo_mmap(struct nmport_d *d)
747 struct nmctx *ctx = d->ctx;
754 if (m->refcount <= 0) {
755 if (!m->is_extmem && m->mem != MAP_FAILED)
756 munmap(m->mem, m->size);
757 /* extract from the list and free */
759 m->next->prev = m->prev;
761 m->prev->next = m->next;
763 ctx->mem_descs = m->next;
771 d->first_tx_ring = 0;
773 d->first_rx_ring = 0;
780 nmport_open_desc(struct nmport_d *d)
782 if (nmport_register(d) < 0)
785 if (nmport_mmap(d) < 0)
790 nmport_undo_open_desc(d);
795 nmport_undo_open_desc(struct nmport_d *d)
798 nmport_undo_register(d);
803 nmport_open(const char *ifname)
807 /* prepare the descriptor */
808 d = nmport_prepare(ifname);
812 /* open netmap and register */
813 if (nmport_open_desc(d) < 0)
824 nmport_close(struct nmport_d *d)
828 nmport_undo_open_desc(d);
829 nmport_undo_prepare(d);
833 nmport_clone(struct nmport_d *d)
840 if (d->extmem != NULL && !d->register_done) {
842 nmctx_ferror(ctx, "cannot clone unregistered port that is using extmem");
846 c = nmport_new_with_ctx(ctx);
849 /* copy the output of parse */
851 /* redirect the pointer to the body */
852 c->hdr.nr_body = (uintptr_t)&c->reg;
853 /* options are not cloned */
854 c->hdr.nr_options = 0;
855 c->reg = d->reg; /* this also copies the mem_id */
856 /* put the new port in an un-registered, unmapped state */
859 c->register_done = 0;
863 c->first_tx_ring = 0;
865 c->first_rx_ring = 0;
874 nmport_inject(struct nmport_d *d, const void *buf, size_t size)
876 u_int c, n = d->last_tx_ring - d->first_tx_ring + 1,
879 for (c = 0; c < n ; c++, ri++) {
880 /* compute current ring to use */
881 struct netmap_ring *ring;
885 if (ri > d->last_tx_ring)
886 ri = d->first_tx_ring;
887 ring = NETMAP_TXRING(d->nifp, ri);
890 while (rem > ring->nr_buf_size && j != ring->tail) {
891 rem -= ring->nr_buf_size;
892 j = nm_ring_next(ring, j);
894 if (j == ring->tail && rem > 0)
898 idx = ring->slot[i].buf_idx;
899 ring->slot[i].len = ring->nr_buf_size;
900 ring->slot[i].flags = NS_MOREFRAG;
901 nm_pkt_copy(buf, NETMAP_BUF(ring, idx), ring->nr_buf_size);
902 i = nm_ring_next(ring, i);
903 buf = (char *)buf + ring->nr_buf_size;
905 idx = ring->slot[i].buf_idx;
906 ring->slot[i].len = rem;
907 ring->slot[i].flags = 0;
908 nm_pkt_copy(buf, NETMAP_BUF(ring, idx), rem);
909 ring->head = ring->cur = nm_ring_next(ring, i);