14 #include <net/netmap_user.h>
15 #define LIBNETMAP_NOTHREADSAFE
16 #include "libnetmap.h"
18 struct nmport_cleanup_d {
19 struct nmport_cleanup_d *next;
20 void (*cleanup)(struct nmport_cleanup_d *, struct nmport_d *);
24 nmport_push_cleanup(struct nmport_d *d, struct nmport_cleanup_d *c)
31 nmport_pop_cleanup(struct nmport_d *d)
33 struct nmport_cleanup_d *top;
36 d->clist = d->clist->next;
37 (*top->cleanup)(top, d);
38 nmctx_free(d->ctx, top);
41 void nmport_do_cleanup(struct nmport_d *d)
43 while (d->clist != NULL) {
44 nmport_pop_cleanup(d);
48 static struct nmport_d *
49 nmport_new_with_ctx(struct nmctx *ctx)
53 /* allocate a descriptor */
54 d = nmctx_malloc(ctx, sizeof(*d));
56 nmctx_ferror(ctx, "cannot allocate nmport descriptor");
59 memset(d, 0, sizeof(*d));
61 nmreq_header_init(&d->hdr, NETMAP_REQ_REGISTER, &d->reg);
73 struct nmctx *ctx = nmctx_get();
74 return nmport_new_with_ctx(ctx);
79 nmport_delete(struct nmport_d *d)
81 nmctx_free(d->ctx, d);
85 nmport_extmem_cleanup(struct nmport_cleanup_d *c, struct nmport_d *d)
89 if (d->extmem == NULL)
92 nmreq_remove_option(&d->hdr, &d->extmem->nro_opt);
93 nmctx_free(d->ctx, d->extmem);
99 nmport_extmem(struct nmport_d *d, void *base, size_t size)
101 struct nmctx *ctx = d->ctx;
102 struct nmport_cleanup_d *clnup = NULL;
104 if (d->register_done) {
105 nmctx_ferror(ctx, "%s: cannot set extmem of an already registered port", d->hdr.nr_name);
110 if (d->extmem != NULL) {
111 nmctx_ferror(ctx, "%s: extmem already in use", d->hdr.nr_name);
116 clnup = (struct nmport_cleanup_d *)nmctx_malloc(ctx, sizeof(*clnup));
118 nmctx_ferror(ctx, "failed to allocate cleanup descriptor");
123 d->extmem = nmctx_malloc(ctx, sizeof(*d->extmem));
124 if (d->extmem == NULL) {
125 nmctx_ferror(ctx, "%s: cannot allocate extmem option", d->hdr.nr_name);
126 nmctx_free(ctx, clnup);
130 memset(d->extmem, 0, sizeof(*d->extmem));
131 d->extmem->nro_usrptr = (uintptr_t)base;
132 d->extmem->nro_opt.nro_reqtype = NETMAP_REQ_OPT_EXTMEM;
133 d->extmem->nro_info.nr_memsize = size;
134 nmreq_push_option(&d->hdr, &d->extmem->nro_opt);
136 clnup->cleanup = nmport_extmem_cleanup;
137 nmport_push_cleanup(d, clnup);
142 struct nmport_extmem_from_file_cleanup_d {
143 struct nmport_cleanup_d up;
148 void nmport_extmem_from_file_cleanup(struct nmport_cleanup_d *c,
151 struct nmport_extmem_from_file_cleanup_d *cc =
152 (struct nmport_extmem_from_file_cleanup_d *)c;
154 munmap(cc->p, cc->size);
158 nmport_extmem_from_file(struct nmport_d *d, const char *fname)
160 struct nmctx *ctx = d->ctx;
164 struct nmport_extmem_from_file_cleanup_d *clnup = NULL;
166 clnup = nmctx_malloc(ctx, sizeof(*clnup));
168 nmctx_ferror(ctx, "cannot allocate cleanup descriptor");
173 fd = open(fname, O_RDWR);
175 nmctx_ferror(ctx, "cannot open '%s': %s", fname, strerror(errno));
178 mapsize = lseek(fd, 0, SEEK_END);
180 nmctx_ferror(ctx, "failed to obtain filesize of '%s': %s", fname, strerror(errno));
183 p = mmap(0, mapsize, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
184 if (p == MAP_FAILED) {
185 nmctx_ferror(ctx, "cannot mmap '%s': %s", fname, strerror(errno));
191 clnup->size = mapsize;
192 clnup->up.cleanup = nmport_extmem_from_file_cleanup;
193 nmport_push_cleanup(d, &clnup->up);
195 if (nmport_extmem(d, p, mapsize) < 0)
204 if (clnup->p != MAP_FAILED)
205 nmport_pop_cleanup(d);
207 nmctx_free(ctx, clnup);
212 struct nmreq_pools_info*
213 nmport_extmem_getinfo(struct nmport_d *d)
215 if (d->extmem == NULL)
217 return &d->extmem->nro_info;
220 /* head of the list of options */
221 static struct nmreq_opt_parser *nmport_opt_parsers;
223 #define NPOPT_PARSER(o) nmport_opt_##o##_parser
224 #define NPOPT_DESC(o) nmport_opt_##o##_desc
225 #define NPOPT_NRKEYS(o) (NPOPT_DESC(o).nr_keys)
226 #define NPOPT_DECL(o, f) \
227 static int NPOPT_PARSER(o)(struct nmreq_parse_ctx *); \
228 static struct nmreq_opt_parser NPOPT_DESC(o) = { \
230 .parse = NPOPT_PARSER(o), \
236 static void __attribute__((constructor)) \
237 nmport_opt_##o##_ctor(void) \
239 NPOPT_DESC(o).next = nmport_opt_parsers; \
240 nmport_opt_parsers = &NPOPT_DESC(o); \
242 struct nmport_key_desc {
243 struct nmreq_opt_parser *option;
249 nmport_opt_key_ctor(struct nmport_key_desc *k)
251 struct nmreq_opt_parser *o = k->option;
252 struct nmreq_opt_key *ok;
255 ok = &o->keys[k->id];
258 ok->flags = k->flags;
260 if (ok->flags & NMREQ_OPTK_DEFAULT)
261 o->default_key = ok->id;
263 #define NPKEY_DESC(o, k) nmport_opt_##o##_key_##k##_desc
264 #define NPKEY_ID(o, k) (NPKEY_DESC(o, k).id)
265 #define NPKEY_DECL(o, k, f) \
266 static struct nmport_key_desc NPKEY_DESC(o, k) = { \
267 .option = &NPOPT_DESC(o), \
272 static void __attribute__((constructor)) \
273 nmport_opt_##o##_key_##k##_ctor(void) \
275 nmport_opt_key_ctor(&NPKEY_DESC(o, k)); \
277 #define nmport_key(p, o, k) ((p)->keys[NPKEY_ID(o, k)])
278 #define nmport_defkey(p, o) ((p)->keys[NPOPT_DESC(o).default_key])
281 NPKEY_DECL(share, port, NMREQ_OPTK_DEFAULT|NMREQ_OPTK_MUSTSET)
282 NPOPT_DECL(extmem, 0)
283 NPKEY_DECL(extmem, file, NMREQ_OPTK_DEFAULT|NMREQ_OPTK_MUSTSET)
284 NPKEY_DECL(extmem, if_num, 0)
285 NPKEY_DECL(extmem, if_size, 0)
286 NPKEY_DECL(extmem, ring_num, 0)
287 NPKEY_DECL(extmem, ring_size, 0)
288 NPKEY_DECL(extmem, buf_num, 0)
289 NPKEY_DECL(extmem, buf_size, 0)
291 NPKEY_DECL(conf, rings, 0)
292 NPKEY_DECL(conf, host_rings, 0)
293 NPKEY_DECL(conf, slots, 0)
294 NPKEY_DECL(conf, tx_rings, 0)
295 NPKEY_DECL(conf, rx_rings, 0)
296 NPKEY_DECL(conf, host_tx_rings, 0)
297 NPKEY_DECL(conf, host_rx_rings, 0)
298 NPKEY_DECL(conf, tx_slots, 0)
299 NPKEY_DECL(conf, rx_slots, 0)
303 NPOPT_PARSER(share)(struct nmreq_parse_ctx *p)
305 struct nmctx *ctx = p->ctx;
306 struct nmport_d *d = p->token;
308 const char *v = nmport_defkey(p, share);
310 mem_id = nmreq_get_mem_id(&v, ctx);
313 if (d->reg.nr_mem_id && d->reg.nr_mem_id != mem_id) {
314 nmctx_ferror(ctx, "cannot set mem_id to %"PRId32", already set to %"PRIu16"",
315 mem_id, d->reg.nr_mem_id);
319 d->reg.nr_mem_id = mem_id;
324 NPOPT_PARSER(extmem)(struct nmreq_parse_ctx *p)
327 struct nmreq_pools_info *pi;
332 if (nmport_extmem_from_file(d, nmport_key(p, extmem, file)) < 0)
335 pi = &d->extmem->nro_info;
337 for (i = 0; i < NPOPT_NRKEYS(extmem); i++) {
338 const char *k = p->keys[i];
345 if (i == NPKEY_ID(extmem, if_num)) {
346 pi->nr_if_pool_objtotal = v;
347 } else if (i == NPKEY_ID(extmem, if_size)) {
348 pi->nr_if_pool_objsize = v;
349 } else if (i == NPKEY_ID(extmem, ring_num)) {
350 pi->nr_ring_pool_objtotal = v;
351 } else if (i == NPKEY_ID(extmem, ring_size)) {
352 pi->nr_ring_pool_objsize = v;
353 } else if (i == NPKEY_ID(extmem, buf_num)) {
354 pi->nr_buf_pool_objtotal = v;
355 } else if (i == NPKEY_ID(extmem, buf_size)) {
356 pi->nr_buf_pool_objsize = v;
363 NPOPT_PARSER(conf)(struct nmreq_parse_ctx *p)
369 if (nmport_key(p, conf, rings) != NULL) {
370 uint16_t nr_rings = atoi(nmport_key(p, conf, rings));
371 d->reg.nr_tx_rings = nr_rings;
372 d->reg.nr_rx_rings = nr_rings;
374 if (nmport_key(p, conf, host_rings) != NULL) {
375 uint16_t nr_rings = atoi(nmport_key(p, conf, host_rings));
376 d->reg.nr_host_tx_rings = nr_rings;
377 d->reg.nr_host_rx_rings = nr_rings;
379 if (nmport_key(p, conf, slots) != NULL) {
380 uint32_t nr_slots = atoi(nmport_key(p, conf, slots));
381 d->reg.nr_tx_slots = nr_slots;
382 d->reg.nr_rx_slots = nr_slots;
384 if (nmport_key(p, conf, tx_rings) != NULL) {
385 d->reg.nr_tx_rings = atoi(nmport_key(p, conf, tx_rings));
387 if (nmport_key(p, conf, rx_rings) != NULL) {
388 d->reg.nr_rx_rings = atoi(nmport_key(p, conf, rx_rings));
390 if (nmport_key(p, conf, host_tx_rings) != NULL) {
391 d->reg.nr_host_tx_rings = atoi(nmport_key(p, conf, host_tx_rings));
393 if (nmport_key(p, conf, host_rx_rings) != NULL) {
394 d->reg.nr_host_rx_rings = atoi(nmport_key(p, conf, host_rx_rings));
396 if (nmport_key(p, conf, tx_slots) != NULL) {
397 d->reg.nr_tx_slots = atoi(nmport_key(p, conf, tx_slots));
399 if (nmport_key(p, conf, rx_slots) != NULL) {
400 d->reg.nr_rx_slots = atoi(nmport_key(p, conf, rx_slots));
406 nmport_disable_option(const char *opt)
408 struct nmreq_opt_parser *p;
410 for (p = nmport_opt_parsers; p != NULL; p = p->next) {
411 if (!strcmp(p->prefix, opt)) {
412 p->flags |= NMREQ_OPTF_DISABLED;
418 nmport_enable_option(const char *opt)
420 struct nmreq_opt_parser *p;
422 for (p = nmport_opt_parsers; p != NULL; p = p->next) {
423 if (!strcmp(p->prefix, opt)) {
424 p->flags &= ~NMREQ_OPTF_DISABLED;
434 nmport_parse(struct nmport_d *d, const char *ifname)
436 const char *scan = ifname;
438 if (nmreq_header_decode(&scan, &d->hdr, d->ctx) < 0) {
442 /* parse the register request */
443 if (nmreq_register_decode(&scan, &d->reg, d->ctx) < 0) {
447 /* parse the options, if any */
448 if (nmreq_options_decode(scan, nmport_opt_parsers, d, d->ctx) < 0) {
454 nmport_undo_parse(d);
459 nmport_undo_parse(struct nmport_d *d)
461 nmport_do_cleanup(d);
462 memset(&d->reg, 0, sizeof(d->reg));
463 memset(&d->hdr, 0, sizeof(d->hdr));
467 nmport_prepare(const char *ifname)
471 /* allocate a descriptor */
476 /* parse the header */
477 if (nmport_parse(d, ifname) < 0)
483 nmport_undo_prepare(d);
488 nmport_undo_prepare(struct nmport_d *d)
492 nmport_undo_parse(d);
497 nmport_register(struct nmport_d *d)
499 struct nmctx *ctx = d->ctx;
501 if (d->register_done) {
503 nmctx_ferror(ctx, "%s: already registered", d->hdr.nr_name);
507 d->fd = open("/dev/netmap", O_RDWR);
509 nmctx_ferror(ctx, "/dev/netmap: %s", strerror(errno));
513 if (ioctl(d->fd, NIOCCTRL, &d->hdr) < 0) {
514 struct nmreq_option *o;
515 int option_errors = 0;
517 nmreq_foreach_option(&d->hdr, o) {
519 nmctx_ferror(ctx, "%s: option %s: %s",
521 nmreq_option_name(o->nro_reqtype),
522 strerror(o->nro_status));
528 nmctx_ferror(ctx, "%s: %s", d->hdr.nr_name, strerror(errno));
532 d->register_done = 1;
537 nmport_undo_register(d);
542 nmport_undo_register(struct nmport_d *d)
547 d->register_done = 0;
550 /* lookup the mem_id in the mem-list: do a new mmap() if
551 * not found, reuse existing otherwise
554 nmport_mmap(struct nmport_d *d)
556 struct nmctx *ctx = d->ctx;
557 struct nmem_d *m = NULL;
558 u_int num_tx, num_rx;
563 nmctx_ferror(ctx, "%s: already mapped", d->hdr.nr_name);
567 if (!d->register_done) {
569 nmctx_ferror(ctx, "cannot map unregistered port");
575 for (m = ctx->mem_descs; m != NULL; m = m->next)
576 if (m->mem_id == d->reg.nr_mem_id)
580 m = nmctx_malloc(ctx, sizeof(*m));
582 nmctx_ferror(ctx, "cannot allocate memory descriptor");
585 memset(m, 0, sizeof(*m));
586 if (d->extmem != NULL) {
587 m->mem = (void *)d->extmem->nro_usrptr;
588 m->size = d->extmem->nro_info.nr_memsize;
591 m->mem = mmap(NULL, d->reg.nr_memsize, PROT_READ|PROT_WRITE,
592 MAP_SHARED, d->fd, 0);
593 if (m->mem == MAP_FAILED) {
594 nmctx_ferror(ctx, "mmap: %s", strerror(errno));
597 m->size = d->reg.nr_memsize;
599 m->mem_id = d->reg.nr_mem_id;
600 m->next = ctx->mem_descs;
601 if (ctx->mem_descs != NULL)
602 ctx->mem_descs->prev = m;
611 d->nifp = NETMAP_IF(m->mem, d->reg.nr_offset);
613 num_tx = d->reg.nr_tx_rings + d->nifp->ni_host_tx_rings;
614 for (i = 0; i < num_tx && !d->nifp->ring_ofs[i]; i++)
616 d->first_tx_ring = i;
617 for ( ; i < num_tx && d->nifp->ring_ofs[i]; i++)
619 d->last_tx_ring = i - 1;
621 num_rx = d->reg.nr_rx_rings + d->nifp->ni_host_rx_rings;
622 for (i = 0; i < num_rx && !d->nifp->ring_ofs[i + num_tx]; i++)
624 d->first_rx_ring = i;
625 for ( ; i < num_rx && d->nifp->ring_ofs[i + num_tx]; i++)
627 d->last_rx_ring = i - 1;
640 nmport_undo_mmap(struct nmport_d *d)
643 struct nmctx *ctx = d->ctx;
650 if (m->refcount <= 0) {
651 if (!m->is_extmem && m->mem != MAP_FAILED)
652 munmap(m->mem, m->size);
653 /* extract from the list and free */
655 m->next->prev = m->prev;
657 m->prev->next = m->next;
659 ctx->mem_descs = m->next;
667 d->first_tx_ring = 0;
669 d->first_rx_ring = 0;
676 nmport_open_desc(struct nmport_d *d)
678 if (nmport_register(d) < 0)
681 if (nmport_mmap(d) < 0)
686 nmport_undo_open_desc(d);
691 nmport_undo_open_desc(struct nmport_d *d)
694 nmport_undo_register(d);
699 nmport_open(const char *ifname)
703 /* prepare the descriptor */
704 d = nmport_prepare(ifname);
708 /* open netmap and register */
709 if (nmport_open_desc(d) < 0)
720 nmport_close(struct nmport_d *d)
724 nmport_undo_open_desc(d);
725 nmport_undo_prepare(d);
729 nmport_clone(struct nmport_d *d)
736 if (d->extmem != NULL && !d->register_done) {
738 nmctx_ferror(ctx, "cannot clone unregistered port that is using extmem");
742 c = nmport_new_with_ctx(ctx);
745 /* copy the output of parse */
747 /* redirect the pointer to the body */
748 c->hdr.nr_body = (uintptr_t)&c->reg;
749 /* options are not cloned */
750 c->hdr.nr_options = 0;
751 c->reg = d->reg; /* this also copies the mem_id */
752 /* put the new port in an un-registered, unmapped state */
755 c->register_done = 0;
759 c->first_tx_ring = 0;
761 c->first_rx_ring = 0;
770 nmport_inject(struct nmport_d *d, const void *buf, size_t size)
772 u_int c, n = d->last_tx_ring - d->first_tx_ring + 1,
775 for (c = 0; c < n ; c++, ri++) {
776 /* compute current ring to use */
777 struct netmap_ring *ring;
781 if (ri > d->last_tx_ring)
782 ri = d->first_tx_ring;
783 ring = NETMAP_TXRING(d->nifp, ri);
786 while (rem > ring->nr_buf_size && j != ring->tail) {
787 rem -= ring->nr_buf_size;
788 j = nm_ring_next(ring, j);
790 if (j == ring->tail && rem > 0)
794 idx = ring->slot[i].buf_idx;
795 ring->slot[i].len = ring->nr_buf_size;
796 ring->slot[i].flags = NS_MOREFRAG;
797 nm_pkt_copy(buf, NETMAP_BUF(ring, idx), ring->nr_buf_size);
798 i = nm_ring_next(ring, i);
799 buf = (char *)buf + ring->nr_buf_size;
801 idx = ring->slot[i].buf_idx;
802 ring->slot[i].len = rem;
803 ring->slot[i].flags = 0;
804 nm_pkt_copy(buf, NETMAP_BUF(ring, idx), rem);
805 ring->head = ring->cur = nm_ring_next(ring, i);