2 * Copyright (C) 2012-2014 Matteo Landi, Luigi Rizzo, Giuseppe Lettieri. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #endif /* __APPLE__ */
35 #include <sys/cdefs.h> /* prerequisite */
36 __FBSDID("$FreeBSD$");
38 #include <sys/types.h>
39 #include <sys/malloc.h>
41 #include <vm/vm.h> /* vtophys */
42 #include <vm/pmap.h> /* vtophys */
43 #include <sys/socket.h> /* sockaddrs */
44 #include <sys/selinfo.h>
45 #include <sys/sysctl.h>
47 #include <net/if_var.h>
49 #include <machine/bus.h> /* bus_dmamap_* */
51 #endif /* __FreeBSD__ */
53 #include <net/netmap.h>
54 #include <dev/netmap/netmap_kern.h>
55 #include "netmap_mem2.h"
57 #define NETMAP_BUF_MAX_NUM 20*4096*2 /* large machine */
59 #define NETMAP_POOL_MAX_NAMSZ 32
70 struct netmap_obj_params {
74 struct netmap_obj_pool {
75 char name[NETMAP_POOL_MAX_NAMSZ]; /* name of the allocator */
77 /* ---------------------------------------------------*/
78 /* these are only meaningful if the pool is finalized */
79 /* (see 'finalized' field in netmap_mem_d) */
80 u_int objtotal; /* actual total number of objects. */
81 u_int memtotal; /* actual total memory space */
82 u_int numclusters; /* actual number of clusters */
84 u_int objfree; /* number of free objects. */
86 struct lut_entry *lut; /* virt,phys addresses, objtotal entries */
87 uint32_t *bitmap; /* one bit per buffer, 1 means free */
88 uint32_t bitmap_slots; /* number of uint32 entries in bitmap */
89 /* ---------------------------------------------------*/
92 u_int objminsize; /* minimum object size */
93 u_int objmaxsize; /* maximum object size */
94 u_int nummin; /* minimum number of objects */
95 u_int nummax; /* maximum number of objects */
97 /* these are changed only by config */
98 u_int _objtotal; /* total number of objects */
99 u_int _objsize; /* object size */
100 u_int _clustsize; /* cluster size */
101 u_int _clustentries; /* objects per cluster */
102 u_int _numclusters; /* number of clusters */
104 /* requested values */
110 // XXX a mtx would suffice here 20130415 lr
111 #define NMA_LOCK_T struct semaphore
113 #define NMA_LOCK_T struct mtx
116 typedef int (*netmap_mem_config_t)(struct netmap_mem_d*);
117 typedef int (*netmap_mem_finalize_t)(struct netmap_mem_d*);
118 typedef void (*netmap_mem_deref_t)(struct netmap_mem_d*);
120 typedef uint16_t nm_memid_t;
122 struct netmap_mem_d {
123 NMA_LOCK_T nm_mtx; /* protect the allocator */
124 u_int nm_totalsize; /* shorthand */
127 #define NETMAP_MEM_FINALIZED 0x1 /* preallocation done */
128 int lasterr; /* last error for curr config */
129 int refcount; /* existing priv structures */
130 /* the three allocators */
131 struct netmap_obj_pool pools[NETMAP_POOLS_NR];
133 netmap_mem_config_t config; /* called with NMA_LOCK held */
134 netmap_mem_finalize_t finalize; /* called with NMA_LOCK held */
135 netmap_mem_deref_t deref; /* called with NMA_LOCK held */
137 nm_memid_t nm_id; /* allocator identifier */
138 int nm_grp; /* iommu groupd id */
140 /* list of all existing allocators, sorted by nm_id */
141 struct netmap_mem_d *prev, *next;
144 /* accessor functions */
146 netmap_mem_get_lut(struct netmap_mem_d *nmd)
148 return nmd->pools[NETMAP_BUF_POOL].lut;
152 netmap_mem_get_buftotal(struct netmap_mem_d *nmd)
154 return nmd->pools[NETMAP_BUF_POOL].objtotal;
158 netmap_mem_get_bufsize(struct netmap_mem_d *nmd)
160 return nmd->pools[NETMAP_BUF_POOL]._objsize;
164 #define NMA_LOCK_INIT(n) sema_init(&(n)->nm_mtx, 1)
165 #define NMA_LOCK_DESTROY(n)
166 #define NMA_LOCK(n) down(&(n)->nm_mtx)
167 #define NMA_UNLOCK(n) up(&(n)->nm_mtx)
169 #define NMA_LOCK_INIT(n) mtx_init(&(n)->nm_mtx, "netmap memory allocator lock", NULL, MTX_DEF)
170 #define NMA_LOCK_DESTROY(n) mtx_destroy(&(n)->nm_mtx)
171 #define NMA_LOCK(n) mtx_lock(&(n)->nm_mtx)
172 #define NMA_UNLOCK(n) mtx_unlock(&(n)->nm_mtx)
176 struct netmap_obj_params netmap_params[NETMAP_POOLS_NR] = {
181 [NETMAP_RING_POOL] = {
185 [NETMAP_BUF_POOL] = {
187 .num = NETMAP_BUF_MAX_NUM,
191 struct netmap_obj_params netmap_min_priv_params[NETMAP_POOLS_NR] = {
196 [NETMAP_RING_POOL] = {
200 [NETMAP_BUF_POOL] = {
208 * nm_mem is the memory allocator used for all physical interfaces
209 * running in netmap mode.
210 * Virtual (VALE) ports will have each its own allocator.
212 static int netmap_mem_global_config(struct netmap_mem_d *nmd);
213 static int netmap_mem_global_finalize(struct netmap_mem_d *nmd);
214 static void netmap_mem_global_deref(struct netmap_mem_d *nmd);
215 struct netmap_mem_d nm_mem = { /* Our memory allocator. */
219 .objminsize = sizeof(struct netmap_if),
221 .nummin = 10, /* don't be stingy */
222 .nummax = 10000, /* XXX very large */
224 [NETMAP_RING_POOL] = {
225 .name = "netmap_ring",
226 .objminsize = sizeof(struct netmap_ring),
227 .objmaxsize = 32*PAGE_SIZE,
231 [NETMAP_BUF_POOL] = {
232 .name = "netmap_buf",
236 .nummax = 1000000, /* one million! */
239 .config = netmap_mem_global_config,
240 .finalize = netmap_mem_global_finalize,
241 .deref = netmap_mem_global_deref,
251 struct netmap_mem_d *netmap_last_mem_d = &nm_mem;
253 /* blueprint for the private memory allocators */
254 static int netmap_mem_private_config(struct netmap_mem_d *nmd);
255 static int netmap_mem_private_finalize(struct netmap_mem_d *nmd);
256 static void netmap_mem_private_deref(struct netmap_mem_d *nmd);
257 const struct netmap_mem_d nm_blueprint = {
261 .objminsize = sizeof(struct netmap_if),
266 [NETMAP_RING_POOL] = {
268 .objminsize = sizeof(struct netmap_ring),
269 .objmaxsize = 32*PAGE_SIZE,
273 [NETMAP_BUF_POOL] = {
278 .nummax = 1000000, /* one million! */
281 .config = netmap_mem_private_config,
282 .finalize = netmap_mem_private_finalize,
283 .deref = netmap_mem_private_deref,
285 .flags = NETMAP_MEM_PRIVATE,
288 /* memory allocator related sysctls */
290 #define STRINGIFY(x) #x
293 #define DECLARE_SYSCTLS(id, name) \
294 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \
295 CTLFLAG_RW, &netmap_params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \
296 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \
297 CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \
298 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \
299 CTLFLAG_RW, &netmap_params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \
300 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \
301 CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s"); \
302 SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_size, \
303 CTLFLAG_RW, &netmap_min_priv_params[id].size, 0, \
304 "Default size of private netmap " STRINGIFY(name) "s"); \
305 SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_num, \
306 CTLFLAG_RW, &netmap_min_priv_params[id].num, 0, \
307 "Default number of private netmap " STRINGIFY(name) "s")
309 SYSCTL_DECL(_dev_netmap);
310 DECLARE_SYSCTLS(NETMAP_IF_POOL, if);
311 DECLARE_SYSCTLS(NETMAP_RING_POOL, ring);
312 DECLARE_SYSCTLS(NETMAP_BUF_POOL, buf);
315 nm_mem_assign_id(struct netmap_mem_d *nmd)
318 struct netmap_mem_d *scan = netmap_last_mem_d;
324 /* we rely on unsigned wrap around */
325 id = scan->nm_id + 1;
326 if (id == 0) /* reserve 0 as error value */
329 if (id != scan->nm_id) {
331 nmd->prev = scan->prev;
333 scan->prev->next = nmd;
335 netmap_last_mem_d = nmd;
339 } while (scan != netmap_last_mem_d);
346 nm_mem_release_id(struct netmap_mem_d *nmd)
350 nmd->prev->next = nmd->next;
351 nmd->next->prev = nmd->prev;
353 if (netmap_last_mem_d == nmd)
354 netmap_last_mem_d = nmd->prev;
356 nmd->prev = nmd->next = NULL;
362 nm_mem_assign_group(struct netmap_mem_d *nmd, struct device *dev)
365 id = nm_iommu_group_id(dev);
367 D("iommu_group %d", id);
374 if (nmd->nm_grp != id)
375 nmd->lasterr = err = ENOMEM;
382 * First, find the allocator that contains the requested offset,
383 * then locate the cluster through a lookup table.
386 netmap_mem_ofstophys(struct netmap_mem_d* nmd, vm_ooffset_t offset)
389 vm_ooffset_t o = offset;
391 struct netmap_obj_pool *p;
396 for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i].memtotal, i++) {
397 if (offset >= p[i].memtotal)
399 // now lookup the cluster's address
400 pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr) +
401 offset % p[i]._objsize;
405 /* this is only in case of errors */
406 D("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o,
407 p[NETMAP_IF_POOL].memtotal,
408 p[NETMAP_IF_POOL].memtotal
409 + p[NETMAP_RING_POOL].memtotal,
410 p[NETMAP_IF_POOL].memtotal
411 + p[NETMAP_RING_POOL].memtotal
412 + p[NETMAP_BUF_POOL].memtotal);
414 return 0; // XXX bad address
418 netmap_mem_get_info(struct netmap_mem_d* nmd, u_int* size, u_int *memflags,
423 error = nmd->config(nmd);
427 if (nmd->flags & NETMAP_MEM_FINALIZED) {
428 *size = nmd->nm_totalsize;
432 for (i = 0; i < NETMAP_POOLS_NR; i++) {
433 struct netmap_obj_pool *p = nmd->pools + i;
434 *size += (p->_numclusters * p->_clustsize);
439 *memflags = nmd->flags;
448 * we store objects by kernel address, need to find the offset
449 * within the pool to export the value to userspace.
450 * Algorithm: scan until we find the cluster, then add the
451 * actual offset in the cluster
454 netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr)
456 int i, k = p->_clustentries, n = p->objtotal;
459 for (i = 0; i < n; i += k, ofs += p->_clustsize) {
460 const char *base = p->lut[i].vaddr;
461 ssize_t relofs = (const char *) vaddr - base;
463 if (relofs < 0 || relofs >= p->_clustsize)
467 ND("%s: return offset %d (cluster %d) for pointer %p",
468 p->name, ofs, i, vaddr);
471 D("address %p is not contained inside any cluster (%s)",
473 return 0; /* An error occurred */
476 /* Helper functions which convert virtual addresses to offsets */
477 #define netmap_if_offset(n, v) \
478 netmap_obj_offset(&(n)->pools[NETMAP_IF_POOL], (v))
480 #define netmap_ring_offset(n, v) \
481 ((n)->pools[NETMAP_IF_POOL].memtotal + \
482 netmap_obj_offset(&(n)->pools[NETMAP_RING_POOL], (v)))
484 #define netmap_buf_offset(n, v) \
485 ((n)->pools[NETMAP_IF_POOL].memtotal + \
486 (n)->pools[NETMAP_RING_POOL].memtotal + \
487 netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)))
491 netmap_mem_if_offset(struct netmap_mem_d *nmd, const void *addr)
495 v = netmap_if_offset(nmd, addr);
501 * report the index, and use start position as a hint,
502 * otherwise buffer allocation becomes terribly expensive.
505 netmap_obj_malloc(struct netmap_obj_pool *p, u_int len, uint32_t *start, uint32_t *index)
507 uint32_t i = 0; /* index in the bitmap */
508 uint32_t mask, j; /* slot counter */
511 if (len > p->_objsize) {
512 D("%s request size %d too large", p->name, len);
513 // XXX cannot reduce the size
517 if (p->objfree == 0) {
518 D("no more %s objects", p->name);
524 /* termination is guaranteed by p->free, but better check bounds on i */
525 while (vaddr == NULL && i < p->bitmap_slots) {
526 uint32_t cur = p->bitmap[i];
527 if (cur == 0) { /* bitmask is fully used */
532 for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1)
535 p->bitmap[i] &= ~mask; /* mark object as in use */
538 vaddr = p->lut[i * 32 + j].vaddr;
542 ND("%s allocator: allocated object @ [%d][%d]: vaddr %p", i, j, vaddr);
551 * free by index, not by address.
552 * XXX should we also cleanup the content ?
555 netmap_obj_free(struct netmap_obj_pool *p, uint32_t j)
559 if (j >= p->objtotal) {
560 D("invalid index %u, max %u", j, p->objtotal);
563 ptr = &p->bitmap[j / 32];
564 mask = (1 << (j % 32));
566 D("ouch, double free on buffer %d", j);
576 * free by address. This is slow but is only used for a few
577 * objects (rings, nifp)
580 netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr)
582 u_int i, j, n = p->numclusters;
584 for (i = 0, j = 0; i < n; i++, j += p->_clustentries) {
585 void *base = p->lut[i * p->_clustentries].vaddr;
586 ssize_t relofs = (ssize_t) vaddr - (ssize_t) base;
588 /* Given address, is out of the scope of the current cluster.*/
589 if (vaddr < base || relofs >= p->_clustsize)
592 j = j + relofs / p->_objsize;
593 /* KASSERT(j != 0, ("Cannot free object 0")); */
594 netmap_obj_free(p, j);
597 D("address %p is not contained inside any cluster (%s)",
601 #define netmap_mem_bufsize(n) \
602 ((n)->pools[NETMAP_BUF_POOL]._objsize)
604 #define netmap_if_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_IF_POOL], len, NULL, NULL)
605 #define netmap_if_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_IF_POOL], (v))
606 #define netmap_ring_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_RING_POOL], len, NULL, NULL)
607 #define netmap_ring_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_RING_POOL], (v))
608 #define netmap_buf_malloc(n, _pos, _index) \
609 netmap_obj_malloc(&(n)->pools[NETMAP_BUF_POOL], netmap_mem_bufsize(n), _pos, _index)
613 /* Return the index associated to the given packet buffer */
614 #define netmap_buf_index(n, v) \
615 (netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)) / NETMAP_BDG_BUF_SIZE(n))
619 * allocate extra buffers in a linked list.
620 * returns the actual number.
623 netmap_extra_alloc(struct netmap_adapter *na, uint32_t *head, uint32_t n)
625 struct netmap_mem_d *nmd = na->nm_mem;
626 uint32_t i, pos = 0; /* opaque, scan position in the bitmap */
630 *head = 0; /* default, 'null' index ie empty list */
631 for (i = 0 ; i < n; i++) {
632 uint32_t cur = *head; /* save current head */
633 uint32_t *p = netmap_buf_malloc(nmd, &pos, head);
635 D("no more buffers after %d of %d", i, n);
636 *head = cur; /* restore */
639 RD(5, "allocate buffer %d -> %d", *head, cur);
640 *p = cur; /* link to previous head */
649 netmap_extra_free(struct netmap_adapter *na, uint32_t head)
651 struct lut_entry *lut = na->na_lut;
652 struct netmap_mem_d *nmd = na->nm_mem;
653 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
654 uint32_t i, cur, *buf;
656 D("freeing the extra list");
657 for (i = 0; head >=2 && head < p->objtotal; i++) {
659 buf = lut[head].vaddr;
662 if (netmap_obj_free(p, cur))
666 D("breaking with head %d", head);
667 D("freed %d buffers", i);
671 /* Return nonzero on error */
673 netmap_new_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n)
675 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
676 u_int i = 0; /* slot counter */
677 uint32_t pos = 0; /* slot in p->bitmap */
678 uint32_t index = 0; /* buffer index */
680 for (i = 0; i < n; i++) {
681 void *vaddr = netmap_buf_malloc(nmd, &pos, &index);
683 D("no more buffers after %d of %d", i, n);
686 slot[i].buf_idx = index;
687 slot[i].len = p->_objsize;
691 ND("allocated %d buffers, %d available, first at %d", n, p->objfree, pos);
697 netmap_obj_free(p, slot[i].buf_idx);
699 bzero(slot, n * sizeof(slot[0]));
704 netmap_mem_set_ring(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n, uint32_t index)
706 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
709 for (i = 0; i < n; i++) {
710 slot[i].buf_idx = index;
711 slot[i].len = p->_objsize;
718 netmap_free_buf(struct netmap_mem_d *nmd, uint32_t i)
720 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
722 if (i < 2 || i >= p->objtotal) {
723 D("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal);
726 netmap_obj_free(p, i);
731 netmap_free_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n)
735 for (i = 0; i < n; i++) {
736 if (slot[i].buf_idx > 2)
737 netmap_free_buf(nmd, slot[i].buf_idx);
742 netmap_reset_obj_allocator(struct netmap_obj_pool *p)
748 free(p->bitmap, M_NETMAP);
752 size_t sz = p->_clustsize;
755 * Free each cluster allocated in
756 * netmap_finalize_obj_allocator(). The cluster start
757 * addresses are stored at multiples of p->_clusterentries
760 for (i = 0; i < p->objtotal; i += p->_clustentries) {
762 contigfree(p->lut[i].vaddr, sz, M_NETMAP);
764 bzero(p->lut, sizeof(struct lut_entry) * p->objtotal);
768 free(p->lut, M_NETMAP);
779 * Free all resources related to an allocator.
782 netmap_destroy_obj_allocator(struct netmap_obj_pool *p)
786 netmap_reset_obj_allocator(p);
790 * We receive a request for objtotal objects, of size objsize each.
791 * Internally we may round up both numbers, as we allocate objects
792 * in small clusters multiple of the page size.
793 * We need to keep track of objtotal and clustentries,
794 * as they are needed when freeing memory.
796 * XXX note -- userspace needs the buffers to be contiguous,
797 * so we cannot afford gaps at the end of a cluster.
801 /* call with NMA_LOCK held */
803 netmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize)
806 u_int clustsize; /* the cluster size, multiple of page size */
807 u_int clustentries; /* how many objects per entry */
809 /* we store the current request, so we can
810 * detect configuration changes later */
811 p->r_objtotal = objtotal;
812 p->r_objsize = objsize;
814 #define MAX_CLUSTSIZE (1<<22) // 4 MB
815 #define LINE_ROUND NM_CACHE_ALIGN // 64
816 if (objsize >= MAX_CLUSTSIZE) {
817 /* we could do it but there is no point */
818 D("unsupported allocation for %d bytes", objsize);
821 /* make sure objsize is a multiple of LINE_ROUND */
822 i = (objsize & (LINE_ROUND - 1));
824 D("XXX aligning object by %d bytes", LINE_ROUND - i);
825 objsize += LINE_ROUND - i;
827 if (objsize < p->objminsize || objsize > p->objmaxsize) {
828 D("requested objsize %d out of range [%d, %d]",
829 objsize, p->objminsize, p->objmaxsize);
832 if (objtotal < p->nummin || objtotal > p->nummax) {
833 D("requested objtotal %d out of range [%d, %d]",
834 objtotal, p->nummin, p->nummax);
838 * Compute number of objects using a brute-force approach:
839 * given a max cluster size,
840 * we try to fill it with objects keeping track of the
841 * wasted space to the next page boundary.
843 for (clustentries = 0, i = 1;; i++) {
844 u_int delta, used = i * objsize;
845 if (used > MAX_CLUSTSIZE)
847 delta = used % PAGE_SIZE;
848 if (delta == 0) { // exact solution
853 /* exact solution not found */
854 if (clustentries == 0) {
855 D("unsupported allocation for %d bytes", objsize);
858 /* compute clustsize */
859 clustsize = clustentries * objsize;
861 D("objsize %d clustsize %d objects %d",
862 objsize, clustsize, clustentries);
865 * The number of clusters is n = ceil(objtotal/clustentries)
866 * objtotal' = n * clustentries
868 p->_clustentries = clustentries;
869 p->_clustsize = clustsize;
870 p->_numclusters = (objtotal + clustentries - 1) / clustentries;
872 /* actual values (may be larger than requested) */
873 p->_objsize = objsize;
874 p->_objtotal = p->_numclusters * clustentries;
880 /* call with NMA_LOCK held */
882 netmap_finalize_obj_allocator(struct netmap_obj_pool *p)
884 int i; /* must be signed */
887 /* optimistically assume we have enough memory */
888 p->numclusters = p->_numclusters;
889 p->objtotal = p->_objtotal;
891 n = sizeof(struct lut_entry) * p->objtotal;
895 p->lut = malloc(n, M_NETMAP, M_NOWAIT | M_ZERO);
897 if (p->lut == NULL) {
898 D("Unable to create lookup table (%d bytes) for '%s'", (int)n, p->name);
902 /* Allocate the bitmap */
903 n = (p->objtotal + 31) / 32;
904 p->bitmap = malloc(sizeof(uint32_t) * n, M_NETMAP, M_NOWAIT | M_ZERO);
905 if (p->bitmap == NULL) {
906 D("Unable to create bitmap (%d entries) for allocator '%s'", (int)n,
913 * Allocate clusters, init pointers and bitmap
917 for (i = 0; i < (int)p->objtotal;) {
918 int lim = i + p->_clustentries;
921 clust = contigmalloc(n, M_NETMAP, M_NOWAIT | M_ZERO,
922 (size_t)0, -1UL, PAGE_SIZE, 0);
925 * If we get here, there is a severe memory shortage,
926 * so halve the allocated memory to reclaim some.
928 D("Unable to create cluster at %d for '%s' allocator",
930 if (i < 2) /* nothing to halve */
933 for (i--; i >= lim; i--) {
934 p->bitmap[ (i>>5) ] &= ~( 1 << (i & 31) );
935 if (i % p->_clustentries == 0 && p->lut[i].vaddr)
936 contigfree(p->lut[i].vaddr,
938 p->lut[i].vaddr = NULL;
942 /* we may have stopped in the middle of a cluster */
943 p->numclusters = (i + p->_clustentries - 1) / p->_clustentries;
947 * Set bitmap and lut state for all buffers in the current
950 * [i, lim) is the set of buffer indexes that cover the
953 * 'clust' is really the address of the current buffer in
954 * the current cluster as we index through it with a stride
957 for (; i < lim; i++, clust += p->_objsize) {
958 p->bitmap[ (i>>5) ] |= ( 1 << (i & 31) );
959 p->lut[i].vaddr = clust;
960 p->lut[i].paddr = vtophys(clust);
963 p->objfree = p->objtotal;
964 p->memtotal = p->numclusters * p->_clustsize;
968 D("Pre-allocated %d clusters (%d/%dKB) for '%s'",
969 p->numclusters, p->_clustsize >> 10,
970 p->memtotal >> 10, p->name);
975 netmap_reset_obj_allocator(p);
979 /* call with lock held */
981 netmap_memory_config_changed(struct netmap_mem_d *nmd)
985 for (i = 0; i < NETMAP_POOLS_NR; i++) {
986 if (nmd->pools[i].r_objsize != netmap_params[i].size ||
987 nmd->pools[i].r_objtotal != netmap_params[i].num)
994 netmap_mem_reset_all(struct netmap_mem_d *nmd)
999 D("resetting %p", nmd);
1000 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1001 netmap_reset_obj_allocator(&nmd->pools[i]);
1003 nmd->flags &= ~NETMAP_MEM_FINALIZED;
1007 netmap_mem_unmap(struct netmap_obj_pool *p, struct netmap_adapter *na)
1009 int i, lim = p->_objtotal;
1011 if (na->pdev == NULL)
1017 D("unsupported on FreeBSD");
1019 for (i = 2; i < lim; i++) {
1020 netmap_unload_map(na, (bus_dma_tag_t) na->pdev, &p->lut[i].paddr);
1028 netmap_mem_map(struct netmap_obj_pool *p, struct netmap_adapter *na)
1031 D("unsupported on FreeBSD");
1033 int i, lim = p->_objtotal;
1035 if (na->pdev == NULL)
1038 for (i = 2; i < lim; i++) {
1039 netmap_load_map(na, (bus_dma_tag_t) na->pdev, &p->lut[i].paddr,
1048 netmap_mem_finalize_all(struct netmap_mem_d *nmd)
1051 if (nmd->flags & NETMAP_MEM_FINALIZED)
1054 nmd->nm_totalsize = 0;
1055 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1056 nmd->lasterr = netmap_finalize_obj_allocator(&nmd->pools[i]);
1059 nmd->nm_totalsize += nmd->pools[i].memtotal;
1061 /* buffers 0 and 1 are reserved */
1062 nmd->pools[NETMAP_BUF_POOL].objfree -= 2;
1063 nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3;
1064 nmd->flags |= NETMAP_MEM_FINALIZED;
1067 D("interfaces %d KB, rings %d KB, buffers %d MB",
1068 nmd->pools[NETMAP_IF_POOL].memtotal >> 10,
1069 nmd->pools[NETMAP_RING_POOL].memtotal >> 10,
1070 nmd->pools[NETMAP_BUF_POOL].memtotal >> 20);
1073 D("Free buffers: %d", nmd->pools[NETMAP_BUF_POOL].objfree);
1078 netmap_mem_reset_all(nmd);
1079 return nmd->lasterr;
1085 netmap_mem_private_delete(struct netmap_mem_d *nmd)
1090 D("deleting %p", nmd);
1091 if (nmd->refcount > 0)
1092 D("bug: deleting mem allocator with refcount=%d!", nmd->refcount);
1093 nm_mem_release_id(nmd);
1095 D("done deleting %p", nmd);
1096 NMA_LOCK_DESTROY(nmd);
1097 free(nmd, M_DEVBUF);
1101 netmap_mem_private_config(struct netmap_mem_d *nmd)
1103 /* nothing to do, we are configured on creation
1104 * and configuration never changes thereafter
1110 netmap_mem_private_finalize(struct netmap_mem_d *nmd)
1114 err = netmap_mem_finalize_all(nmd);
1120 netmap_mem_private_deref(struct netmap_mem_d *nmd)
1122 if (--nmd->refcount <= 0)
1123 netmap_mem_reset_all(nmd);
1128 * allocator for private memory
1130 struct netmap_mem_d *
1131 netmap_mem_private_new(const char *name, u_int txr, u_int txd,
1132 u_int rxr, u_int rxd, u_int extra_bufs, u_int npipes, int *perr)
1134 struct netmap_mem_d *d = NULL;
1135 struct netmap_obj_params p[NETMAP_POOLS_NR];
1139 d = malloc(sizeof(struct netmap_mem_d),
1140 M_DEVBUF, M_NOWAIT | M_ZERO);
1148 err = nm_mem_assign_id(d);
1152 /* account for the fake host rings */
1156 /* copy the min values */
1157 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1158 p[i] = netmap_min_priv_params[i];
1161 /* possibly increase them to fit user request */
1162 v = sizeof(struct netmap_if) + sizeof(ssize_t) * (txr + rxr);
1163 if (p[NETMAP_IF_POOL].size < v)
1164 p[NETMAP_IF_POOL].size = v;
1166 if (p[NETMAP_IF_POOL].num < v)
1167 p[NETMAP_IF_POOL].num = v;
1168 maxd = (txd > rxd) ? txd : rxd;
1169 v = sizeof(struct netmap_ring) + sizeof(struct netmap_slot) * maxd;
1170 if (p[NETMAP_RING_POOL].size < v)
1171 p[NETMAP_RING_POOL].size = v;
1172 /* each pipe endpoint needs two tx rings (1 normal + 1 host, fake)
1173 * and two rx rings (again, 1 normal and 1 fake host)
1175 v = txr + rxr + 8 * npipes;
1176 if (p[NETMAP_RING_POOL].num < v)
1177 p[NETMAP_RING_POOL].num = v;
1178 /* for each pipe we only need the buffers for the 4 "real" rings.
1179 * On the other end, the pipe ring dimension may be different from
1180 * the parent port ring dimension. As a compromise, we allocate twice the
1181 * space actually needed if the pipe rings were the same size as the parent rings
1183 v = (4 * npipes + rxr) * rxd + (4 * npipes + txr) * txd + 2 + extra_bufs;
1184 /* the +2 is for the tx and rx fake buffers (indices 0 and 1) */
1185 if (p[NETMAP_BUF_POOL].num < v)
1186 p[NETMAP_BUF_POOL].num = v;
1189 D("req if %d*%d ring %d*%d buf %d*%d",
1190 p[NETMAP_IF_POOL].num,
1191 p[NETMAP_IF_POOL].size,
1192 p[NETMAP_RING_POOL].num,
1193 p[NETMAP_RING_POOL].size,
1194 p[NETMAP_BUF_POOL].num,
1195 p[NETMAP_BUF_POOL].size);
1197 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1198 snprintf(d->pools[i].name, NETMAP_POOL_MAX_NAMSZ,
1199 nm_blueprint.pools[i].name,
1201 err = netmap_config_obj_allocator(&d->pools[i],
1202 p[i].num, p[i].size);
1207 d->flags &= ~NETMAP_MEM_FINALIZED;
1213 netmap_mem_private_delete(d);
1220 /* call with lock held */
1222 netmap_mem_global_config(struct netmap_mem_d *nmd)
1227 /* already in use, we cannot change the configuration */
1230 if (!netmap_memory_config_changed(nmd))
1235 if (nmd->flags & NETMAP_MEM_FINALIZED) {
1236 /* reset previous allocation */
1237 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1238 netmap_reset_obj_allocator(&nmd->pools[i]);
1240 nmd->flags &= ~NETMAP_MEM_FINALIZED;
1243 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1244 nmd->lasterr = netmap_config_obj_allocator(&nmd->pools[i],
1245 netmap_params[i].num, netmap_params[i].size);
1252 return nmd->lasterr;
1256 netmap_mem_global_finalize(struct netmap_mem_d *nmd)
1260 /* update configuration if changed */
1261 if (netmap_mem_global_config(nmd))
1266 if (nmd->flags & NETMAP_MEM_FINALIZED) {
1267 /* may happen if config is not changed */
1268 ND("nothing to do");
1272 if (netmap_mem_finalize_all(nmd))
1287 netmap_mem_init(void)
1289 NMA_LOCK_INIT(&nm_mem);
1294 netmap_mem_fini(void)
1298 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1299 netmap_destroy_obj_allocator(&nm_mem.pools[i]);
1301 NMA_LOCK_DESTROY(&nm_mem);
1305 netmap_free_rings(struct netmap_adapter *na)
1307 struct netmap_kring *kring;
1308 struct netmap_ring *ring;
1311 for (kring = na->tx_rings; kring != na->rx_rings; kring++) {
1315 netmap_free_bufs(na->nm_mem, ring->slot, kring->nkr_num_slots);
1316 netmap_ring_free(na->nm_mem, ring);
1319 for (/* cont'd from above */; kring != na->tailroom; kring++) {
1323 netmap_free_bufs(na->nm_mem, ring->slot, kring->nkr_num_slots);
1324 netmap_ring_free(na->nm_mem, ring);
1329 /* call with NMA_LOCK held *
1331 * Allocate netmap rings and buffers for this card
1332 * The rings are contiguous, but have variable size.
1333 * The kring array must follow the layout described
1334 * in netmap_krings_create().
1337 netmap_mem_rings_create(struct netmap_adapter *na)
1339 struct netmap_ring *ring;
1341 struct netmap_kring *kring;
1344 NMA_LOCK(na->nm_mem);
1346 /* transmit rings */
1347 for (i =0, kring = na->tx_rings; kring != na->rx_rings; kring++, i++) {
1349 ND("%s %ld already created", kring->name, kring - na->tx_rings);
1350 continue; /* already created by somebody else */
1352 ndesc = kring->nkr_num_slots;
1353 len = sizeof(struct netmap_ring) +
1354 ndesc * sizeof(struct netmap_slot);
1355 ring = netmap_ring_malloc(na->nm_mem, len);
1357 D("Cannot allocate tx_ring");
1360 ND("txring at %p", ring);
1362 *(uint32_t *)(uintptr_t)&ring->num_slots = ndesc;
1363 *(int64_t *)(uintptr_t)&ring->buf_ofs =
1364 (na->nm_mem->pools[NETMAP_IF_POOL].memtotal +
1365 na->nm_mem->pools[NETMAP_RING_POOL].memtotal) -
1366 netmap_ring_offset(na->nm_mem, ring);
1368 /* copy values from kring */
1369 ring->head = kring->rhead;
1370 ring->cur = kring->rcur;
1371 ring->tail = kring->rtail;
1372 *(uint16_t *)(uintptr_t)&ring->nr_buf_size =
1373 netmap_mem_bufsize(na->nm_mem);
1374 ND("%s h %d c %d t %d", kring->name,
1375 ring->head, ring->cur, ring->tail);
1376 ND("initializing slots for txring");
1377 if (i != na->num_tx_rings || (na->na_flags & NAF_HOST_RINGS)) {
1378 /* this is a real ring */
1379 if (netmap_new_bufs(na->nm_mem, ring->slot, ndesc)) {
1380 D("Cannot allocate buffers for tx_ring");
1384 /* this is a fake tx ring, set all indices to 0 */
1385 netmap_mem_set_ring(na->nm_mem, ring->slot, ndesc, 0);
1390 for ( i = 0 /* kring cont'd from above */ ; kring != na->tailroom; kring++, i++) {
1392 ND("%s %ld already created", kring->name, kring - na->rx_rings);
1393 continue; /* already created by somebody else */
1395 ndesc = kring->nkr_num_slots;
1396 len = sizeof(struct netmap_ring) +
1397 ndesc * sizeof(struct netmap_slot);
1398 ring = netmap_ring_malloc(na->nm_mem, len);
1400 D("Cannot allocate rx_ring");
1403 ND("rxring at %p", ring);
1405 *(uint32_t *)(uintptr_t)&ring->num_slots = ndesc;
1406 *(int64_t *)(uintptr_t)&ring->buf_ofs =
1407 (na->nm_mem->pools[NETMAP_IF_POOL].memtotal +
1408 na->nm_mem->pools[NETMAP_RING_POOL].memtotal) -
1409 netmap_ring_offset(na->nm_mem, ring);
1411 /* copy values from kring */
1412 ring->head = kring->rhead;
1413 ring->cur = kring->rcur;
1414 ring->tail = kring->rtail;
1415 *(int *)(uintptr_t)&ring->nr_buf_size =
1416 netmap_mem_bufsize(na->nm_mem);
1417 ND("%s h %d c %d t %d", kring->name,
1418 ring->head, ring->cur, ring->tail);
1419 ND("initializing slots for rxring %p", ring);
1420 if (i != na->num_rx_rings || (na->na_flags & NAF_HOST_RINGS)) {
1421 /* this is a real ring */
1422 if (netmap_new_bufs(na->nm_mem, ring->slot, ndesc)) {
1423 D("Cannot allocate buffers for rx_ring");
1427 /* this is a fake rx ring, set all indices to 1 */
1428 netmap_mem_set_ring(na->nm_mem, ring->slot, ndesc, 1);
1432 NMA_UNLOCK(na->nm_mem);
1437 netmap_free_rings(na);
1439 NMA_UNLOCK(na->nm_mem);
1445 netmap_mem_rings_delete(struct netmap_adapter *na)
1447 /* last instance, release bufs and rings */
1448 NMA_LOCK(na->nm_mem);
1450 netmap_free_rings(na);
1452 NMA_UNLOCK(na->nm_mem);
1456 /* call with NMA_LOCK held */
1458 * Allocate the per-fd structure netmap_if.
1460 * We assume that the configuration stored in na
1461 * (number of tx/rx rings and descs) does not change while
1462 * the interface is in netmap mode.
1465 netmap_mem_if_new(struct netmap_adapter *na)
1467 struct netmap_if *nifp;
1468 ssize_t base; /* handy for relative offsets between rings and nifp */
1469 u_int i, len, ntx, nrx;
1471 /* account for the (eventually fake) host rings */
1472 ntx = na->num_tx_rings + 1;
1473 nrx = na->num_rx_rings + 1;
1475 * the descriptor is followed inline by an array of offsets
1476 * to the tx and rx rings in the shared memory region.
1479 NMA_LOCK(na->nm_mem);
1481 len = sizeof(struct netmap_if) + (nrx + ntx) * sizeof(ssize_t);
1482 nifp = netmap_if_malloc(na->nm_mem, len);
1484 NMA_UNLOCK(na->nm_mem);
1488 /* initialize base fields -- override const */
1489 *(u_int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings;
1490 *(u_int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings;
1491 strncpy(nifp->ni_name, na->name, (size_t)IFNAMSIZ);
1494 * fill the slots for the rx and tx rings. They contain the offset
1495 * between the ring and nifp, so the information is usable in
1496 * userspace to reach the ring from the nifp.
1498 base = netmap_if_offset(na->nm_mem, nifp);
1499 for (i = 0; i < ntx; i++) {
1500 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] =
1501 netmap_ring_offset(na->nm_mem, na->tx_rings[i].ring) - base;
1503 for (i = 0; i < nrx; i++) {
1504 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+ntx] =
1505 netmap_ring_offset(na->nm_mem, na->rx_rings[i].ring) - base;
1508 NMA_UNLOCK(na->nm_mem);
1514 netmap_mem_if_delete(struct netmap_adapter *na, struct netmap_if *nifp)
1519 NMA_LOCK(na->nm_mem);
1520 if (nifp->ni_bufs_head)
1521 netmap_extra_free(na, nifp->ni_bufs_head);
1522 netmap_if_free(na->nm_mem, nifp);
1524 NMA_UNLOCK(na->nm_mem);
1528 netmap_mem_global_deref(struct netmap_mem_d *nmd)
1535 D("refcount = %d", nmd->refcount);
1540 netmap_mem_finalize(struct netmap_mem_d *nmd, struct netmap_adapter *na)
1542 if (nm_mem_assign_group(nmd, na->pdev) < 0) {
1550 if (!nmd->lasterr && na->pdev)
1551 netmap_mem_map(&nmd->pools[NETMAP_BUF_POOL], na);
1553 return nmd->lasterr;
1557 netmap_mem_deref(struct netmap_mem_d *nmd, struct netmap_adapter *na)
1560 netmap_mem_unmap(&nmd->pools[NETMAP_BUF_POOL], na);
1561 if (nmd->refcount == 1) {
1565 * Reset the allocator when it falls out of use so that any
1566 * pool resources leaked by unclean application exits are
1569 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1570 struct netmap_obj_pool *p;
1574 p->objfree = p->objtotal;
1576 * Reproduce the net effect of the M_ZERO malloc()
1577 * and marking of free entries in the bitmap that
1578 * occur in finalize_obj_allocator()
1582 sizeof(uint32_t) * ((p->objtotal + 31) / 32));
1585 * Set all the bits in the bitmap that have
1586 * corresponding buffers to 1 to indicate they are
1589 for (j = 0; j < p->objtotal; j++) {
1590 if (p->lut[j].vaddr != NULL) {
1591 p->bitmap[ (j>>5) ] |= ( 1 << (j & 31) );
1597 * Per netmap_mem_finalize_all(),
1598 * buffers 0 and 1 are reserved
1600 nmd->pools[NETMAP_BUF_POOL].objfree -= 2;
1601 nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3;