2 * Copyright (C) 2012-2013 Matteo Landi, Luigi Rizzo, Giuseppe Lettieri. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #endif /* __APPLE__ */
35 #include <sys/cdefs.h> /* prerequisite */
36 __FBSDID("$FreeBSD$");
38 #include <sys/types.h>
39 #include <sys/malloc.h>
41 #include <vm/vm.h> /* vtophys */
42 #include <vm/pmap.h> /* vtophys */
43 #include <sys/socket.h> /* sockaddrs */
44 #include <sys/selinfo.h>
45 #include <sys/sysctl.h>
47 #include <net/if_var.h>
49 #include <machine/bus.h> /* bus_dmamap_* */
51 #endif /* __FreeBSD__ */
53 #include <net/netmap.h>
54 #include <dev/netmap/netmap_kern.h>
55 #include "netmap_mem2.h"
58 #define NMA_LOCK_INIT(n) sema_init(&(n)->nm_mtx, 1)
59 #define NMA_LOCK_DESTROY(n)
60 #define NMA_LOCK(n) down(&(n)->nm_mtx)
61 #define NMA_UNLOCK(n) up(&(n)->nm_mtx)
63 #define NMA_LOCK_INIT(n) mtx_init(&(n)->nm_mtx, "netmap memory allocator lock", NULL, MTX_DEF)
64 #define NMA_LOCK_DESTROY(n) mtx_destroy(&(n)->nm_mtx)
65 #define NMA_LOCK(n) mtx_lock(&(n)->nm_mtx)
66 #define NMA_UNLOCK(n) mtx_unlock(&(n)->nm_mtx)
70 struct netmap_obj_params netmap_params[NETMAP_POOLS_NR] = {
75 [NETMAP_RING_POOL] = {
81 .num = NETMAP_BUF_MAX_NUM,
87 * nm_mem is the memory allocator used for all physical interfaces
88 * running in netmap mode.
89 * Virtual (VALE) ports will have each its own allocator.
91 static int netmap_mem_global_config(struct netmap_mem_d *nmd);
92 static int netmap_mem_global_finalize(struct netmap_mem_d *nmd);
93 static void netmap_mem_global_deref(struct netmap_mem_d *nmd);
94 struct netmap_mem_d nm_mem = { /* Our memory allocator. */
98 .objminsize = sizeof(struct netmap_if),
100 .nummin = 10, /* don't be stingy */
101 .nummax = 10000, /* XXX very large */
103 [NETMAP_RING_POOL] = {
104 .name = "netmap_ring",
105 .objminsize = sizeof(struct netmap_ring),
106 .objmaxsize = 32*PAGE_SIZE,
110 [NETMAP_BUF_POOL] = {
111 .name = "netmap_buf",
115 .nummax = 1000000, /* one million! */
118 .config = netmap_mem_global_config,
119 .finalize = netmap_mem_global_finalize,
120 .deref = netmap_mem_global_deref,
124 // XXX logically belongs to nm_mem
125 struct lut_entry *netmap_buffer_lut; /* exported */
127 /* blueprint for the private memory allocators */
128 static int netmap_mem_private_config(struct netmap_mem_d *nmd);
129 static int netmap_mem_private_finalize(struct netmap_mem_d *nmd);
130 static void netmap_mem_private_deref(struct netmap_mem_d *nmd);
131 const struct netmap_mem_d nm_blueprint = {
135 .objminsize = sizeof(struct netmap_if),
140 [NETMAP_RING_POOL] = {
142 .objminsize = sizeof(struct netmap_ring),
143 .objmaxsize = 32*PAGE_SIZE,
147 [NETMAP_BUF_POOL] = {
152 .nummax = 1000000, /* one million! */
155 .config = netmap_mem_private_config,
156 .finalize = netmap_mem_private_finalize,
157 .deref = netmap_mem_private_deref,
159 .flags = NETMAP_MEM_PRIVATE,
162 /* memory allocator related sysctls */
164 #define STRINGIFY(x) #x
167 #define DECLARE_SYSCTLS(id, name) \
168 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \
169 CTLFLAG_RW, &netmap_params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \
170 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \
171 CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \
172 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \
173 CTLFLAG_RW, &netmap_params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \
174 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \
175 CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s")
177 SYSCTL_DECL(_dev_netmap);
178 DECLARE_SYSCTLS(NETMAP_IF_POOL, if);
179 DECLARE_SYSCTLS(NETMAP_RING_POOL, ring);
180 DECLARE_SYSCTLS(NETMAP_BUF_POOL, buf);
183 * First, find the allocator that contains the requested offset,
184 * then locate the cluster through a lookup table.
187 netmap_mem_ofstophys(struct netmap_mem_d* nmd, vm_ooffset_t offset)
190 vm_ooffset_t o = offset;
192 struct netmap_obj_pool *p;
197 for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i].memtotal, i++) {
198 if (offset >= p[i].memtotal)
200 // now lookup the cluster's address
201 pa = p[i].lut[offset / p[i]._objsize].paddr +
202 offset % p[i]._objsize;
206 /* this is only in case of errors */
207 D("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o,
208 p[NETMAP_IF_POOL].memtotal,
209 p[NETMAP_IF_POOL].memtotal
210 + p[NETMAP_RING_POOL].memtotal,
211 p[NETMAP_IF_POOL].memtotal
212 + p[NETMAP_RING_POOL].memtotal
213 + p[NETMAP_BUF_POOL].memtotal);
215 return 0; // XXX bad address
219 netmap_mem_get_info(struct netmap_mem_d* nmd, u_int* size, u_int *memflags)
223 error = nmd->config(nmd);
226 if (nmd->flags & NETMAP_MEM_FINALIZED) {
227 *size = nmd->nm_totalsize;
231 for (i = 0; i < NETMAP_POOLS_NR; i++) {
232 struct netmap_obj_pool *p = nmd->pools + i;
233 *size += (p->_numclusters * p->_clustsize);
236 *memflags = nmd->flags;
243 * we store objects by kernel address, need to find the offset
244 * within the pool to export the value to userspace.
245 * Algorithm: scan until we find the cluster, then add the
246 * actual offset in the cluster
249 netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr)
251 int i, k = p->_clustentries, n = p->objtotal;
254 for (i = 0; i < n; i += k, ofs += p->_clustsize) {
255 const char *base = p->lut[i].vaddr;
256 ssize_t relofs = (const char *) vaddr - base;
258 if (relofs < 0 || relofs >= p->_clustsize)
262 ND("%s: return offset %d (cluster %d) for pointer %p",
263 p->name, ofs, i, vaddr);
266 D("address %p is not contained inside any cluster (%s)",
268 return 0; /* An error occurred */
271 /* Helper functions which convert virtual addresses to offsets */
272 #define netmap_if_offset(n, v) \
273 netmap_obj_offset(&(n)->pools[NETMAP_IF_POOL], (v))
275 #define netmap_ring_offset(n, v) \
276 ((n)->pools[NETMAP_IF_POOL].memtotal + \
277 netmap_obj_offset(&(n)->pools[NETMAP_RING_POOL], (v)))
279 #define netmap_buf_offset(n, v) \
280 ((n)->pools[NETMAP_IF_POOL].memtotal + \
281 (n)->pools[NETMAP_RING_POOL].memtotal + \
282 netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)))
286 netmap_mem_if_offset(struct netmap_mem_d *nmd, const void *addr)
290 v = netmap_if_offset(nmd, addr);
296 * report the index, and use start position as a hint,
297 * otherwise buffer allocation becomes terribly expensive.
300 netmap_obj_malloc(struct netmap_obj_pool *p, u_int len, uint32_t *start, uint32_t *index)
302 uint32_t i = 0; /* index in the bitmap */
303 uint32_t mask, j; /* slot counter */
306 if (len > p->_objsize) {
307 D("%s request size %d too large", p->name, len);
308 // XXX cannot reduce the size
312 if (p->objfree == 0) {
313 D("%s allocator: run out of memory", p->name);
319 /* termination is guaranteed by p->free, but better check bounds on i */
320 while (vaddr == NULL && i < p->bitmap_slots) {
321 uint32_t cur = p->bitmap[i];
322 if (cur == 0) { /* bitmask is fully used */
327 for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1)
330 p->bitmap[i] &= ~mask; /* mark object as in use */
333 vaddr = p->lut[i * 32 + j].vaddr;
337 ND("%s allocator: allocated object @ [%d][%d]: vaddr %p", i, j, vaddr);
346 * free by index, not by address. This is slow, but is only used
347 * for a small number of objects (rings, nifp)
350 netmap_obj_free(struct netmap_obj_pool *p, uint32_t j)
352 if (j >= p->objtotal) {
353 D("invalid index %u, max %u", j, p->objtotal);
356 p->bitmap[j / 32] |= (1 << (j % 32));
362 netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr)
364 u_int i, j, n = p->numclusters;
366 for (i = 0, j = 0; i < n; i++, j += p->_clustentries) {
367 void *base = p->lut[i * p->_clustentries].vaddr;
368 ssize_t relofs = (ssize_t) vaddr - (ssize_t) base;
370 /* Given address, is out of the scope of the current cluster.*/
371 if (vaddr < base || relofs >= p->_clustsize)
374 j = j + relofs / p->_objsize;
375 /* KASSERT(j != 0, ("Cannot free object 0")); */
376 netmap_obj_free(p, j);
379 D("address %p is not contained inside any cluster (%s)",
383 #define netmap_if_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_IF_POOL], len, NULL, NULL)
384 #define netmap_if_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_IF_POOL], (v))
385 #define netmap_ring_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_RING_POOL], len, NULL, NULL)
386 #define netmap_ring_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_RING_POOL], (v))
387 #define netmap_buf_malloc(n, _pos, _index) \
388 netmap_obj_malloc(&(n)->pools[NETMAP_BUF_POOL], NETMAP_BDG_BUF_SIZE(n), _pos, _index)
391 /* Return the index associated to the given packet buffer */
392 #define netmap_buf_index(n, v) \
393 (netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)) / NETMAP_BDG_BUF_SIZE(n))
396 /* Return nonzero on error */
398 netmap_new_bufs(struct netmap_mem_d *nmd, struct netmap_if *nifp,
399 struct netmap_slot *slot, u_int n)
401 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
402 u_int i = 0; /* slot counter */
403 uint32_t pos = 0; /* slot in p->bitmap */
404 uint32_t index = 0; /* buffer index */
406 (void)nifp; /* UNUSED */
407 for (i = 0; i < n; i++) {
408 void *vaddr = netmap_buf_malloc(nmd, &pos, &index);
410 D("unable to locate empty packet buffer");
413 slot[i].buf_idx = index;
414 slot[i].len = p->_objsize;
415 /* XXX setting flags=NS_BUF_CHANGED forces a pointer reload
416 * in the NIC ring. This is a hack that hides missing
417 * initializations in the drivers, and should go away.
419 // slot[i].flags = NS_BUF_CHANGED;
422 ND("allocated %d buffers, %d available, first at %d", n, p->objfree, pos);
428 netmap_obj_free(p, slot[i].buf_idx);
430 bzero(slot, n * sizeof(slot[0]));
436 netmap_free_buf(struct netmap_mem_d *nmd, struct netmap_if *nifp, uint32_t i)
438 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
441 if (i < 2 || i >= p->objtotal) {
442 D("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal);
445 netmap_obj_free(p, i);
449 netmap_reset_obj_allocator(struct netmap_obj_pool *p)
455 free(p->bitmap, M_NETMAP);
459 size_t sz = p->_clustsize;
461 for (i = 0; i < p->objtotal; i += p->_clustentries) {
463 contigfree(p->lut[i].vaddr, sz, M_NETMAP);
465 bzero(p->lut, sizeof(struct lut_entry) * p->objtotal);
469 free(p->lut, M_NETMAP);
480 * Free all resources related to an allocator.
483 netmap_destroy_obj_allocator(struct netmap_obj_pool *p)
487 netmap_reset_obj_allocator(p);
491 * We receive a request for objtotal objects, of size objsize each.
492 * Internally we may round up both numbers, as we allocate objects
493 * in small clusters multiple of the page size.
494 * We need to keep track of objtotal and clustentries,
495 * as they are needed when freeing memory.
497 * XXX note -- userspace needs the buffers to be contiguous,
498 * so we cannot afford gaps at the end of a cluster.
502 /* call with NMA_LOCK held */
504 netmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize)
507 u_int clustsize; /* the cluster size, multiple of page size */
508 u_int clustentries; /* how many objects per entry */
510 /* we store the current request, so we can
511 * detect configuration changes later */
512 p->r_objtotal = objtotal;
513 p->r_objsize = objsize;
515 #define MAX_CLUSTSIZE (1<<17)
516 #define LINE_ROUND 64
517 if (objsize >= MAX_CLUSTSIZE) {
518 /* we could do it but there is no point */
519 D("unsupported allocation for %d bytes", objsize);
522 /* make sure objsize is a multiple of LINE_ROUND */
523 i = (objsize & (LINE_ROUND - 1));
525 D("XXX aligning object by %d bytes", LINE_ROUND - i);
526 objsize += LINE_ROUND - i;
528 if (objsize < p->objminsize || objsize > p->objmaxsize) {
529 D("requested objsize %d out of range [%d, %d]",
530 objsize, p->objminsize, p->objmaxsize);
533 if (objtotal < p->nummin || objtotal > p->nummax) {
534 D("requested objtotal %d out of range [%d, %d]",
535 objtotal, p->nummin, p->nummax);
539 * Compute number of objects using a brute-force approach:
540 * given a max cluster size,
541 * we try to fill it with objects keeping track of the
542 * wasted space to the next page boundary.
544 for (clustentries = 0, i = 1;; i++) {
545 u_int delta, used = i * objsize;
546 if (used > MAX_CLUSTSIZE)
548 delta = used % PAGE_SIZE;
549 if (delta == 0) { // exact solution
553 if (delta > ( (clustentries*objsize) % PAGE_SIZE) )
556 // D("XXX --- ouch, delta %d (bad for buffers)", delta);
557 /* compute clustsize and round to the next page */
558 clustsize = clustentries * objsize;
559 i = (clustsize & (PAGE_SIZE - 1));
561 clustsize += PAGE_SIZE - i;
563 D("objsize %d clustsize %d objects %d",
564 objsize, clustsize, clustentries);
567 * The number of clusters is n = ceil(objtotal/clustentries)
568 * objtotal' = n * clustentries
570 p->_clustentries = clustentries;
571 p->_clustsize = clustsize;
572 p->_numclusters = (objtotal + clustentries - 1) / clustentries;
574 /* actual values (may be larger than requested) */
575 p->_objsize = objsize;
576 p->_objtotal = p->_numclusters * clustentries;
582 /* call with NMA_LOCK held */
584 netmap_finalize_obj_allocator(struct netmap_obj_pool *p)
586 int i; /* must be signed */
589 /* optimistically assume we have enough memory */
590 p->numclusters = p->_numclusters;
591 p->objtotal = p->_objtotal;
593 n = sizeof(struct lut_entry) * p->objtotal;
597 p->lut = malloc(n, M_NETMAP, M_NOWAIT | M_ZERO);
599 if (p->lut == NULL) {
600 D("Unable to create lookup table (%d bytes) for '%s'", (int)n, p->name);
604 /* Allocate the bitmap */
605 n = (p->objtotal + 31) / 32;
606 p->bitmap = malloc(sizeof(uint32_t) * n, M_NETMAP, M_NOWAIT | M_ZERO);
607 if (p->bitmap == NULL) {
608 D("Unable to create bitmap (%d entries) for allocator '%s'", (int)n,
615 * Allocate clusters, init pointers and bitmap
619 for (i = 0; i < (int)p->objtotal;) {
620 int lim = i + p->_clustentries;
623 clust = contigmalloc(n, M_NETMAP, M_NOWAIT | M_ZERO,
624 (size_t)0, -1UL, PAGE_SIZE, 0);
627 * If we get here, there is a severe memory shortage,
628 * so halve the allocated memory to reclaim some.
630 D("Unable to create cluster at %d for '%s' allocator",
632 if (i < 2) /* nothing to halve */
635 for (i--; i >= lim; i--) {
636 p->bitmap[ (i>>5) ] &= ~( 1 << (i & 31) );
637 if (i % p->_clustentries == 0 && p->lut[i].vaddr)
638 contigfree(p->lut[i].vaddr,
643 /* we may have stopped in the middle of a cluster */
644 p->numclusters = (i + p->_clustentries - 1) / p->_clustentries;
647 for (; i < lim; i++, clust += p->_objsize) {
648 p->bitmap[ (i>>5) ] |= ( 1 << (i & 31) );
649 p->lut[i].vaddr = clust;
650 p->lut[i].paddr = vtophys(clust);
653 p->objfree = p->objtotal;
654 p->memtotal = p->numclusters * p->_clustsize;
658 D("Pre-allocated %d clusters (%d/%dKB) for '%s'",
659 p->numclusters, p->_clustsize >> 10,
660 p->memtotal >> 10, p->name);
665 netmap_reset_obj_allocator(p);
669 /* call with lock held */
671 netmap_memory_config_changed(struct netmap_mem_d *nmd)
675 for (i = 0; i < NETMAP_POOLS_NR; i++) {
676 if (nmd->pools[i].r_objsize != netmap_params[i].size ||
677 nmd->pools[i].r_objtotal != netmap_params[i].num)
684 netmap_mem_reset_all(struct netmap_mem_d *nmd)
687 D("resetting %p", nmd);
688 for (i = 0; i < NETMAP_POOLS_NR; i++) {
689 netmap_reset_obj_allocator(&nmd->pools[i]);
691 nmd->flags &= ~NETMAP_MEM_FINALIZED;
695 netmap_mem_finalize_all(struct netmap_mem_d *nmd)
698 if (nmd->flags & NETMAP_MEM_FINALIZED)
701 nmd->nm_totalsize = 0;
702 for (i = 0; i < NETMAP_POOLS_NR; i++) {
703 nmd->lasterr = netmap_finalize_obj_allocator(&nmd->pools[i]);
706 nmd->nm_totalsize += nmd->pools[i].memtotal;
708 /* buffers 0 and 1 are reserved */
709 nmd->pools[NETMAP_BUF_POOL].objfree -= 2;
710 nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3;
711 nmd->flags |= NETMAP_MEM_FINALIZED;
713 D("Have %d KB for interfaces, %d KB for rings and %d MB for buffers",
714 nmd->pools[NETMAP_IF_POOL].memtotal >> 10,
715 nmd->pools[NETMAP_RING_POOL].memtotal >> 10,
716 nmd->pools[NETMAP_BUF_POOL].memtotal >> 20);
718 D("Free buffers: %d", nmd->pools[NETMAP_BUF_POOL].objfree);
723 netmap_mem_reset_all(nmd);
730 netmap_mem_private_delete(struct netmap_mem_d *nmd)
734 D("deleting %p", nmd);
735 if (nmd->refcount > 0)
736 D("bug: deleting mem allocator with refcount=%d!", nmd->refcount);
737 D("done deleting %p", nmd);
738 NMA_LOCK_DESTROY(nmd);
743 netmap_mem_private_config(struct netmap_mem_d *nmd)
745 /* nothing to do, we are configured on creation
746 * and configuration never changes thereafter
752 netmap_mem_private_finalize(struct netmap_mem_d *nmd)
757 err = netmap_mem_finalize_all(nmd);
763 static void netmap_mem_private_deref(struct netmap_mem_d *nmd)
766 if (--nmd->refcount <= 0)
767 netmap_mem_reset_all(nmd);
771 struct netmap_mem_d *
772 netmap_mem_private_new(const char *name, u_int txr, u_int txd, u_int rxr, u_int rxd)
774 struct netmap_mem_d *d = NULL;
775 struct netmap_obj_params p[NETMAP_POOLS_NR];
779 d = malloc(sizeof(struct netmap_mem_d),
780 M_DEVBUF, M_NOWAIT | M_ZERO);
786 /* XXX the rest of the code assumes the stack rings are alwasy present */
789 p[NETMAP_IF_POOL].size = sizeof(struct netmap_if) +
790 sizeof(ssize_t) * (txr + rxr);
791 p[NETMAP_IF_POOL].num = 2;
792 maxd = (txd > rxd) ? txd : rxd;
793 p[NETMAP_RING_POOL].size = sizeof(struct netmap_ring) +
794 sizeof(struct netmap_slot) * maxd;
795 p[NETMAP_RING_POOL].num = txr + rxr;
796 p[NETMAP_BUF_POOL].size = 2048; /* XXX find a way to let the user choose this */
797 p[NETMAP_BUF_POOL].num = rxr * (rxd + 2) + txr * (txd + 2);
799 D("req if %d*%d ring %d*%d buf %d*%d",
800 p[NETMAP_IF_POOL].num,
801 p[NETMAP_IF_POOL].size,
802 p[NETMAP_RING_POOL].num,
803 p[NETMAP_RING_POOL].size,
804 p[NETMAP_BUF_POOL].num,
805 p[NETMAP_BUF_POOL].size);
807 for (i = 0; i < NETMAP_POOLS_NR; i++) {
808 snprintf(d->pools[i].name, NETMAP_POOL_MAX_NAMSZ,
809 nm_blueprint.pools[i].name,
811 if (netmap_config_obj_allocator(&d->pools[i],
812 p[i].num, p[i].size))
816 d->flags &= ~NETMAP_MEM_FINALIZED;
822 netmap_mem_private_delete(d);
827 /* call with lock held */
829 netmap_mem_global_config(struct netmap_mem_d *nmd)
834 /* already in use, we cannot change the configuration */
837 if (!netmap_memory_config_changed(nmd))
842 if (nmd->flags & NETMAP_MEM_FINALIZED) {
843 /* reset previous allocation */
844 for (i = 0; i < NETMAP_POOLS_NR; i++) {
845 netmap_reset_obj_allocator(&nmd->pools[i]);
847 nmd->flags &= ~NETMAP_MEM_FINALIZED;
850 for (i = 0; i < NETMAP_POOLS_NR; i++) {
851 nmd->lasterr = netmap_config_obj_allocator(&nmd->pools[i],
852 netmap_params[i].num, netmap_params[i].size);
863 netmap_mem_global_finalize(struct netmap_mem_d *nmd)
870 /* update configuration if changed */
871 if (netmap_mem_global_config(nmd))
876 if (nmd->flags & NETMAP_MEM_FINALIZED) {
877 /* may happen if config is not changed */
882 if (netmap_mem_finalize_all(nmd))
885 /* backward compatibility */
886 netmap_buf_size = nmd->pools[NETMAP_BUF_POOL]._objsize;
887 netmap_total_buffers = nmd->pools[NETMAP_BUF_POOL].objtotal;
889 netmap_buffer_lut = nmd->pools[NETMAP_BUF_POOL].lut;
890 netmap_buffer_base = nmd->pools[NETMAP_BUF_POOL].lut[0].vaddr;
906 netmap_mem_init(void)
908 NMA_LOCK_INIT(&nm_mem);
913 netmap_mem_fini(void)
917 for (i = 0; i < NETMAP_POOLS_NR; i++) {
918 netmap_destroy_obj_allocator(&nm_mem.pools[i]);
920 NMA_LOCK_DESTROY(&nm_mem);
924 netmap_free_rings(struct netmap_adapter *na)
929 for (i = 0; i < na->num_tx_rings + 1; i++) {
930 if (na->tx_rings[i].ring) {
931 netmap_ring_free(na->nm_mem, na->tx_rings[i].ring);
932 na->tx_rings[i].ring = NULL;
935 for (i = 0; i < na->num_rx_rings + 1; i++) {
936 if (na->rx_rings[i].ring) {
937 netmap_ring_free(na->nm_mem, na->rx_rings[i].ring);
938 na->rx_rings[i].ring = NULL;
941 free(na->tx_rings, M_DEVBUF);
942 na->tx_rings = na->rx_rings = NULL;
947 /* call with NMA_LOCK held */
949 * Allocate the per-fd structure netmap_if.
950 * If this is the first instance, also allocate the krings, rings etc.
952 * We assume that the configuration stored in na
953 * (number of tx/rx rings and descs) does not change while
954 * the interface is in netmap mode.
956 extern int nma_is_vp(struct netmap_adapter *na);
958 netmap_mem_if_new(const char *ifname, struct netmap_adapter *na)
960 struct netmap_if *nifp;
961 struct netmap_ring *ring;
962 ssize_t base; /* handy for relative offsets between rings and nifp */
963 u_int i, len, ndesc, ntx, nrx;
964 struct netmap_kring *kring;
965 uint32_t *tx_leases = NULL, *rx_leases = NULL;
968 * verify whether virtual port need the stack ring
970 ntx = na->num_tx_rings + 1; /* shorthand, include stack ring */
971 nrx = na->num_rx_rings + 1; /* shorthand, include stack ring */
973 * the descriptor is followed inline by an array of offsets
974 * to the tx and rx rings in the shared memory region.
975 * For virtual rx rings we also allocate an array of
976 * pointers to assign to nkr_leases.
979 NMA_LOCK(na->nm_mem);
981 len = sizeof(struct netmap_if) + (nrx + ntx) * sizeof(ssize_t);
982 nifp = netmap_if_malloc(na->nm_mem, len);
984 NMA_UNLOCK(na->nm_mem);
988 /* initialize base fields -- override const */
989 *(u_int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings;
990 *(u_int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings;
991 strncpy(nifp->ni_name, ifname, (size_t)IFNAMSIZ);
993 if (na->refcount) { /* already setup, we are done */
997 len = (ntx + nrx) * sizeof(struct netmap_kring);
999 * Leases are attached to TX rings on NIC/host ports,
1000 * and to RX rings on VALE ports.
1002 if (nma_is_vp(na)) {
1003 len += sizeof(uint32_t) * na->num_rx_desc * na->num_rx_rings;
1005 len += sizeof(uint32_t) * na->num_tx_desc * ntx;
1008 na->tx_rings = malloc((size_t)len, M_DEVBUF, M_NOWAIT | M_ZERO);
1009 if (na->tx_rings == NULL) {
1010 D("Cannot allocate krings for %s", ifname);
1013 na->rx_rings = na->tx_rings + ntx;
1015 if (nma_is_vp(na)) {
1016 rx_leases = (uint32_t *)(na->rx_rings + nrx);
1018 tx_leases = (uint32_t *)(na->rx_rings + nrx);
1022 * First instance, allocate netmap rings and buffers for this card
1023 * The rings are contiguous, but have variable size.
1025 for (i = 0; i < ntx; i++) { /* Transmit rings */
1026 kring = &na->tx_rings[i];
1027 ndesc = na->num_tx_desc;
1028 bzero(kring, sizeof(*kring));
1029 len = sizeof(struct netmap_ring) +
1030 ndesc * sizeof(struct netmap_slot);
1031 ring = netmap_ring_malloc(na->nm_mem, len);
1033 D("Cannot allocate tx_ring[%d] for %s", i, ifname);
1036 ND("txring[%d] at %p ofs %d", i, ring);
1040 kring->nkr_leases = tx_leases;
1043 *(uint32_t *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = ndesc;
1044 *(ssize_t *)(uintptr_t)&ring->buf_ofs =
1045 (na->nm_mem->pools[NETMAP_IF_POOL].memtotal +
1046 na->nm_mem->pools[NETMAP_RING_POOL].memtotal) -
1047 netmap_ring_offset(na->nm_mem, ring);
1051 * Always keep one slot empty, so we can detect new
1052 * transmissions comparing cur and nr_hwcur (they are
1053 * the same only if there are no new transmissions).
1055 ring->avail = kring->nr_hwavail = ndesc - 1;
1056 ring->cur = kring->nr_hwcur = 0;
1057 *(uint16_t *)(uintptr_t)&ring->nr_buf_size =
1058 NETMAP_BDG_BUF_SIZE(na->nm_mem);
1059 ND("initializing slots for txring[%d]", i);
1060 if (netmap_new_bufs(na->nm_mem, nifp, ring->slot, ndesc)) {
1061 D("Cannot allocate buffers for tx_ring[%d] for %s", i, ifname);
1066 for (i = 0; i < nrx; i++) { /* Receive rings */
1067 kring = &na->rx_rings[i];
1068 ndesc = na->num_rx_desc;
1069 bzero(kring, sizeof(*kring));
1070 len = sizeof(struct netmap_ring) +
1071 ndesc * sizeof(struct netmap_slot);
1072 ring = netmap_ring_malloc(na->nm_mem, len);
1074 D("Cannot allocate rx_ring[%d] for %s", i, ifname);
1077 ND("rxring[%d] at %p ofs %d", i, ring);
1081 if (rx_leases && i < na->num_rx_rings) {
1082 kring->nkr_leases = rx_leases;
1085 *(uint32_t *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = ndesc;
1086 *(ssize_t *)(uintptr_t)&ring->buf_ofs =
1087 (na->nm_mem->pools[NETMAP_IF_POOL].memtotal +
1088 na->nm_mem->pools[NETMAP_RING_POOL].memtotal) -
1089 netmap_ring_offset(na->nm_mem, ring);
1091 ring->cur = kring->nr_hwcur = 0;
1092 ring->avail = kring->nr_hwavail = 0; /* empty */
1093 *(int *)(uintptr_t)&ring->nr_buf_size =
1094 NETMAP_BDG_BUF_SIZE(na->nm_mem);
1095 ND("initializing slots for rxring[%d]", i);
1096 if (netmap_new_bufs(na->nm_mem, nifp, ring->slot, ndesc)) {
1097 D("Cannot allocate buffers for rx_ring[%d] for %s", i, ifname);
1102 // XXX initialize the selrecord structs.
1103 for (i = 0; i < ntx; i++)
1104 init_waitqueue_head(&na->tx_rings[i].si);
1105 for (i = 0; i < nrx; i++)
1106 init_waitqueue_head(&na->rx_rings[i].si);
1107 init_waitqueue_head(&na->tx_si);
1108 init_waitqueue_head(&na->rx_si);
1112 * fill the slots for the rx and tx rings. They contain the offset
1113 * between the ring and nifp, so the information is usable in
1114 * userspace to reach the ring from the nifp.
1116 base = netmap_if_offset(na->nm_mem, nifp);
1117 for (i = 0; i < ntx; i++) {
1118 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] =
1119 netmap_ring_offset(na->nm_mem, na->tx_rings[i].ring) - base;
1121 for (i = 0; i < nrx; i++) {
1122 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+ntx] =
1123 netmap_ring_offset(na->nm_mem, na->rx_rings[i].ring) - base;
1126 NMA_UNLOCK(na->nm_mem);
1130 netmap_free_rings(na);
1131 netmap_if_free(na->nm_mem, nifp);
1133 NMA_UNLOCK(na->nm_mem);
1139 netmap_mem_if_delete(struct netmap_adapter *na, struct netmap_if *nifp)
1144 NMA_LOCK(na->nm_mem);
1146 if (na->refcount <= 0) {
1147 /* last instance, release bufs and rings */
1149 struct netmap_ring *ring;
1151 for (i = 0; i < na->num_tx_rings + 1; i++) {
1152 ring = na->tx_rings[i].ring;
1153 lim = na->tx_rings[i].nkr_num_slots;
1154 for (j = 0; j < lim; j++)
1155 netmap_free_buf(na->nm_mem, nifp, ring->slot[j].buf_idx);
1157 for (i = 0; i < na->num_rx_rings + 1; i++) {
1158 ring = na->rx_rings[i].ring;
1159 lim = na->rx_rings[i].nkr_num_slots;
1160 for (j = 0; j < lim; j++)
1161 netmap_free_buf(na->nm_mem, nifp, ring->slot[j].buf_idx);
1163 netmap_free_rings(na);
1165 netmap_if_free(na->nm_mem, nifp);
1167 NMA_UNLOCK(na->nm_mem);
1171 netmap_mem_global_deref(struct netmap_mem_d *nmd)
1177 D("refcount = %d", nmd->refcount);
1182 int netmap_mem_finalize(struct netmap_mem_d *nmd)
1184 return nmd->finalize(nmd);
1187 void netmap_mem_deref(struct netmap_mem_d *nmd)
1189 return nmd->deref(nmd);