2 * Copyright (C) 2012 Matteo Landi, Luigi Rizzo, Giuseppe Lettieri. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * $Id: netmap_mem2.c 11881 2012-10-18 23:24:15Z luigi $
30 * (New) memory allocator for netmap
34 * This allocator creates three memory regions:
35 * nm_if_pool for the struct netmap_if
36 * nm_ring_pool for the struct netmap_ring
37 * nm_buf_pool for the packet buffers.
39 * All regions need to be multiple of a page size as we export them to
40 * userspace through mmap. Only the latter needs to be dma-able,
41 * but for convenience use the same type of allocator for all.
43 * Once mapped, the three regions are exported to userspace
44 * as a contiguous block, starting from nm_if_pool. Each
45 * cluster (and pool) is an integral number of pages.
46 * [ . . . ][ . . . . . .][ . . . . . . . . . .]
47 * nm_if nm_ring nm_buf
49 * The userspace areas contain offsets of the objects in userspace.
50 * When (at init time) we write these offsets, we find out the index
51 * of the object, and from there locate the offset from the beginning
54 * The invididual allocators manage a pool of memory for objects of
56 * The pool is split into smaller clusters, whose size is a
57 * multiple of the page size. The cluster size is chosen
58 * to minimize the waste for a given max cluster size
59 * (we do it by brute force, as we have relatively few object
62 * Objects are aligned to the cache line (64 bytes) rounding up object
63 * sizes when needed. A bitmap contains the state of each object.
64 * Allocation scans the bitmap; this is done only on attach, so we are not
65 * too worried about performance
67 * For each allocator we can define (thorugh sysctl) the size and
68 * number of each object. Memory is allocated at the first use of a
69 * netmap file descriptor, and can be freed when all such descriptors
70 * have been released (including unmapping the memory).
71 * If memory is scarce, the system tries to get as much as possible
72 * and the sysctl values reflect the actual allocation.
73 * Together with desired values, the sysctl export also absolute
74 * min and maximum values that cannot be overridden.
77 * variable size, max 16 bytes per ring pair plus some fixed amount.
78 * 1024 bytes should be large enough in practice.
80 * In the worst case we have one netmap_if per ring in the system.
83 * variable too, 8 byte per slot plus some fixed amount.
84 * Rings can be large (e.g. 4k slots, or >32Kbytes).
85 * We default to 36 KB (9 pages), and a few hundred rings.
87 * struct netmap_buffer
88 * The more the better, both because fast interfaces tend to have
89 * many slots, and because we may want to use buffers to store
90 * packets in userspace avoiding copies.
91 * Must contain a full frame (eg 1518, or more for vlans, jumbo
92 * frames etc.) plus be nicely aligned, plus some NICs restrict
93 * the size to multiple of 1K or so. Default to 2K
97 #define NETMAP_BUF_MAX_NUM 20*4096*2 /* large machine */
98 #else /* CONSERVATIVE */
99 #define NETMAP_BUF_MAX_NUM 20000 /* 40MB */
103 #define NMA_LOCK_T struct semaphore
104 #define NMA_LOCK_INIT() sema_init(&nm_mem.nm_mtx, 1)
105 #define NMA_LOCK_DESTROY()
106 #define NMA_LOCK() down(&nm_mem.nm_mtx)
107 #define NMA_UNLOCK() up(&nm_mem.nm_mtx)
109 #define NMA_LOCK_T struct mtx
110 #define NMA_LOCK_INIT() mtx_init(&nm_mem.nm_mtx, "netmap memory allocator lock", NULL, MTX_DEF)
111 #define NMA_LOCK_DESTROY() mtx_destroy(&nm_mem.nm_mtx)
112 #define NMA_LOCK() mtx_lock(&nm_mem.nm_mtx)
113 #define NMA_UNLOCK() mtx_unlock(&nm_mem.nm_mtx)
124 struct netmap_obj_params {
130 struct netmap_obj_params netmap_params[NETMAP_POOLS_NR] = {
135 [NETMAP_RING_POOL] = {
139 [NETMAP_BUF_POOL] = {
141 .num = NETMAP_BUF_MAX_NUM,
146 struct netmap_obj_pool {
147 char name[16]; /* name of the allocator */
148 u_int objtotal; /* actual total number of objects. */
149 u_int objfree; /* number of free objects. */
150 u_int clustentries; /* actual objects per cluster */
153 u_int objminsize; /* minimum object size */
154 u_int objmaxsize; /* maximum object size */
155 u_int nummin; /* minimum number of objects */
156 u_int nummax; /* maximum number of objects */
158 /* the total memory space is _numclusters*_clustsize */
159 u_int _numclusters; /* how many clusters */
160 u_int _clustsize; /* cluster size */
161 u_int _objsize; /* actual object size */
163 u_int _memtotal; /* _numclusters*_clustsize */
164 struct lut_entry *lut; /* virt,phys addresses, objtotal entries */
165 uint32_t *bitmap; /* one bit per buffer, 1 means free */
166 uint32_t bitmap_slots; /* number of uint32 entries in bitmap */
170 struct netmap_mem_d {
171 NMA_LOCK_T nm_mtx; /* protect the allocator */
172 u_int nm_totalsize; /* shorthand */
174 int finalized; /* !=0 iff preallocation done */
175 int lasterr; /* last error for curr config */
176 int refcount; /* existing priv structures */
177 /* the three allocators */
178 struct netmap_obj_pool pools[NETMAP_POOLS_NR];
182 static struct netmap_mem_d nm_mem = { /* Our memory allocator. */
186 .objminsize = sizeof(struct netmap_if),
188 .nummin = 10, /* don't be stingy */
189 .nummax = 10000, /* XXX very large */
191 [NETMAP_RING_POOL] = {
192 .name = "netmap_ring",
193 .objminsize = sizeof(struct netmap_ring),
194 .objmaxsize = 32*PAGE_SIZE,
198 [NETMAP_BUF_POOL] = {
199 .name = "netmap_buf",
203 .nummax = 1000000, /* one million! */
208 struct lut_entry *netmap_buffer_lut; /* exported */
210 /* memory allocator related sysctls */
212 #define STRINGIFY(x) #x
214 #define DECLARE_SYSCTLS(id, name) \
215 /* TUNABLE_INT("hw.netmap." STRINGIFY(name) "_size", &netmap_params[id].size); */ \
216 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \
217 CTLFLAG_RW, &netmap_params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \
218 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \
219 CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \
220 /* TUNABLE_INT("hw.netmap." STRINGIFY(name) "_num", &netmap_params[id].num); */ \
221 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \
222 CTLFLAG_RW, &netmap_params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \
223 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \
224 CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s")
226 DECLARE_SYSCTLS(NETMAP_IF_POOL, if);
227 DECLARE_SYSCTLS(NETMAP_RING_POOL, ring);
228 DECLARE_SYSCTLS(NETMAP_BUF_POOL, buf);
231 * Convert a userspace offset to a phisical address.
232 * XXX re-do in a simpler way.
234 * The idea here is to hide userspace applications the fact that pre-allocated
235 * memory is not contiguous, but fragmented across different clusters and
236 * smaller memory allocators. Consequently, first of all we need to find which
237 * allocator is owning provided offset, then we need to find out the physical
238 * address associated to target page (this is done using the look-up table.
240 static inline vm_paddr_t
241 netmap_ofstophys(vm_offset_t offset)
244 vm_offset_t o = offset;
245 struct netmap_obj_pool *p = nm_mem.pools;
247 for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i]._memtotal, i++) {
248 if (offset >= p[i]._memtotal)
250 // XXX now scan the clusters
251 return p[i].lut[offset / p[i]._objsize].paddr +
252 offset % p[i]._objsize;
254 /* this is only in case of errors */
255 D("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o,
256 p[NETMAP_IF_POOL]._memtotal,
257 p[NETMAP_IF_POOL]._memtotal
258 + p[NETMAP_RING_POOL]._memtotal,
259 p[NETMAP_IF_POOL]._memtotal
260 + p[NETMAP_RING_POOL]._memtotal
261 + p[NETMAP_BUF_POOL]._memtotal);
262 return 0; // XXX bad address
266 * we store objects by kernel address, need to find the offset
267 * within the pool to export the value to userspace.
268 * Algorithm: scan until we find the cluster, then add the
269 * actual offset in the cluster
272 netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr)
274 int i, k = p->clustentries, n = p->objtotal;
277 for (i = 0; i < n; i += k, ofs += p->_clustsize) {
278 const char *base = p->lut[i].vaddr;
279 ssize_t relofs = (const char *) vaddr - base;
281 if (relofs < 0 || relofs > p->_clustsize)
285 ND("%s: return offset %d (cluster %d) for pointer %p",
286 p->name, ofs, i, vaddr);
289 D("address %p is not contained inside any cluster (%s)",
291 return 0; /* An error occurred */
294 /* Helper functions which convert virtual addresses to offsets */
295 #define netmap_if_offset(v) \
296 netmap_obj_offset(&nm_mem.pools[NETMAP_IF_POOL], (v))
298 #define netmap_ring_offset(v) \
299 (nm_mem.pools[NETMAP_IF_POOL]._memtotal + \
300 netmap_obj_offset(&nm_mem.pools[NETMAP_RING_POOL], (v)))
302 #define netmap_buf_offset(v) \
303 (nm_mem.pools[NETMAP_IF_POOL]._memtotal + \
304 nm_mem.pools[NETMAP_RING_POOL]._memtotal + \
305 netmap_obj_offset(&nm_mem.pools[NETMAP_BUF_POOL], (v)))
309 * report the index, and use start position as a hint,
310 * otherwise buffer allocation becomes terribly expensive.
313 netmap_obj_malloc(struct netmap_obj_pool *p, int len, uint32_t *start, uint32_t *index)
315 uint32_t i = 0; /* index in the bitmap */
316 uint32_t mask, j; /* slot counter */
319 if (len > p->_objsize) {
320 D("%s request size %d too large", p->name, len);
321 // XXX cannot reduce the size
325 if (p->objfree == 0) {
326 D("%s allocator: run out of memory", p->name);
332 /* termination is guaranteed by p->free, but better check bounds on i */
333 while (vaddr == NULL && i < p->bitmap_slots) {
334 uint32_t cur = p->bitmap[i];
335 if (cur == 0) { /* bitmask is fully used */
340 for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1)
343 p->bitmap[i] &= ~mask; /* mark object as in use */
346 vaddr = p->lut[i * 32 + j].vaddr;
350 ND("%s allocator: allocated object @ [%d][%d]: vaddr %p", i, j, vaddr);
359 * free by index, not by address
362 netmap_obj_free(struct netmap_obj_pool *p, uint32_t j)
364 if (j >= p->objtotal) {
365 D("invalid index %u, max %u", j, p->objtotal);
368 p->bitmap[j / 32] |= (1 << (j % 32));
374 netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr)
376 int i, j, n = p->_memtotal / p->_clustsize;
378 for (i = 0, j = 0; i < n; i++, j += p->clustentries) {
379 void *base = p->lut[i * p->clustentries].vaddr;
380 ssize_t relofs = (ssize_t) vaddr - (ssize_t) base;
382 /* Given address, is out of the scope of the current cluster.*/
383 if (vaddr < base || relofs > p->_clustsize)
386 j = j + relofs / p->_objsize;
387 KASSERT(j != 0, ("Cannot free object 0"));
388 netmap_obj_free(p, j);
391 ND("address %p is not contained inside any cluster (%s)",
395 #define netmap_if_malloc(len) netmap_obj_malloc(&nm_mem.pools[NETMAP_IF_POOL], len, NULL, NULL)
396 #define netmap_if_free(v) netmap_obj_free_va(&nm_mem.pools[NETMAP_IF_POOL], (v))
397 #define netmap_ring_malloc(len) netmap_obj_malloc(&nm_mem.pools[NETMAP_RING_POOL], len, NULL, NULL)
398 #define netmap_ring_free(v) netmap_obj_free_va(&nm_mem.pools[NETMAP_RING_POOL], (v))
399 #define netmap_buf_malloc(_pos, _index) \
400 netmap_obj_malloc(&nm_mem.pools[NETMAP_BUF_POOL], NETMAP_BUF_SIZE, _pos, _index)
403 /* Return the index associated to the given packet buffer */
404 #define netmap_buf_index(v) \
405 (netmap_obj_offset(&nm_mem.pools[NETMAP_BUF_POOL], (v)) / nm_mem.pools[NETMAP_BUF_POOL]._objsize)
408 /* Return nonzero on error */
410 netmap_new_bufs(struct netmap_if *nifp,
411 struct netmap_slot *slot, u_int n)
413 struct netmap_obj_pool *p = &nm_mem.pools[NETMAP_BUF_POOL];
414 int i = 0; /* slot counter */
415 uint32_t pos = 0; /* slot in p->bitmap */
416 uint32_t index = 0; /* buffer index */
418 (void)nifp; /* UNUSED */
419 for (i = 0; i < n; i++) {
420 void *vaddr = netmap_buf_malloc(&pos, &index);
422 D("unable to locate empty packet buffer");
425 slot[i].buf_idx = index;
426 slot[i].len = p->_objsize;
427 /* XXX setting flags=NS_BUF_CHANGED forces a pointer reload
428 * in the NIC ring. This is a hack that hides missing
429 * initializations in the drivers, and should go away.
431 slot[i].flags = NS_BUF_CHANGED;
434 ND("allocated %d buffers, %d available, first at %d", n, p->objfree, pos);
440 netmap_obj_free(p, slot[i].buf_idx);
442 bzero(slot, n * sizeof(slot[0]));
448 netmap_free_buf(struct netmap_if *nifp, uint32_t i)
450 struct netmap_obj_pool *p = &nm_mem.pools[NETMAP_BUF_POOL];
452 if (i < 2 || i >= p->objtotal) {
453 D("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal);
456 netmap_obj_free(p, i);
460 netmap_reset_obj_allocator(struct netmap_obj_pool *p)
465 free(p->bitmap, M_NETMAP);
469 for (i = 0; i < p->objtotal; i += p->clustentries) {
471 contigfree(p->lut[i].vaddr, p->_clustsize, M_NETMAP);
473 bzero(p->lut, sizeof(struct lut_entry) * p->objtotal);
477 free(p->lut, M_NETMAP);
484 * Free all resources related to an allocator.
487 netmap_destroy_obj_allocator(struct netmap_obj_pool *p)
491 netmap_reset_obj_allocator(p);
495 * We receive a request for objtotal objects, of size objsize each.
496 * Internally we may round up both numbers, as we allocate objects
497 * in small clusters multiple of the page size.
498 * In the allocator we don't need to store the objsize,
499 * but we do need to keep track of objtotal' and clustentries,
500 * as they are needed when freeing memory.
502 * XXX note -- userspace needs the buffers to be contiguous,
503 * so we cannot afford gaps at the end of a cluster.
507 /* call with NMA_LOCK held */
509 netmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize)
512 u_int clustsize; /* the cluster size, multiple of page size */
513 u_int clustentries; /* how many objects per entry */
515 #define MAX_CLUSTSIZE (1<<17)
516 #define LINE_ROUND 64
517 if (objsize >= MAX_CLUSTSIZE) {
518 /* we could do it but there is no point */
519 D("unsupported allocation for %d bytes", objsize);
522 /* make sure objsize is a multiple of LINE_ROUND */
523 i = (objsize & (LINE_ROUND - 1));
525 D("XXX aligning object by %d bytes", LINE_ROUND - i);
526 objsize += LINE_ROUND - i;
528 if (objsize < p->objminsize || objsize > p->objmaxsize) {
529 D("requested objsize %d out of range [%d, %d]",
530 objsize, p->objminsize, p->objmaxsize);
533 if (objtotal < p->nummin || objtotal > p->nummax) {
534 D("requested objtotal %d out of range [%d, %d]",
535 objtotal, p->nummin, p->nummax);
539 * Compute number of objects using a brute-force approach:
540 * given a max cluster size,
541 * we try to fill it with objects keeping track of the
542 * wasted space to the next page boundary.
544 for (clustentries = 0, i = 1;; i++) {
545 u_int delta, used = i * objsize;
546 if (used > MAX_CLUSTSIZE)
548 delta = used % PAGE_SIZE;
549 if (delta == 0) { // exact solution
553 if (delta > ( (clustentries*objsize) % PAGE_SIZE) )
556 // D("XXX --- ouch, delta %d (bad for buffers)", delta);
557 /* compute clustsize and round to the next page */
558 clustsize = clustentries * objsize;
559 i = (clustsize & (PAGE_SIZE - 1));
561 clustsize += PAGE_SIZE - i;
562 D("objsize %d clustsize %d objects %d",
563 objsize, clustsize, clustentries);
566 * The number of clusters is n = ceil(objtotal/clustentries)
567 * objtotal' = n * clustentries
569 p->clustentries = clustentries;
570 p->_clustsize = clustsize;
571 n = (objtotal + clustentries - 1) / clustentries;
573 p->objtotal = n * clustentries;
574 p->objfree = p->objtotal - 2; /* obj 0 and 1 are reserved */
575 p->_memtotal = p->_numclusters * p->_clustsize;
576 p->_objsize = objsize;
581 p->_objsize = objsize;
582 p->objtotal = objtotal;
588 /* call with NMA_LOCK held */
590 netmap_finalize_obj_allocator(struct netmap_obj_pool *p)
594 n = sizeof(struct lut_entry) * p->objtotal;
598 p->lut = malloc(n, M_NETMAP, M_NOWAIT | M_ZERO);
600 if (p->lut == NULL) {
601 D("Unable to create lookup table (%d bytes) for '%s'", n, p->name);
605 /* Allocate the bitmap */
606 n = (p->objtotal + 31) / 32;
607 p->bitmap = malloc(sizeof(uint32_t) * n, M_NETMAP, M_NOWAIT | M_ZERO);
608 if (p->bitmap == NULL) {
609 D("Unable to create bitmap (%d entries) for allocator '%s'", n,
616 * Allocate clusters, init pointers and bitmap
618 for (i = 0; i < p->objtotal;) {
619 int lim = i + p->clustentries;
622 clust = contigmalloc(p->_clustsize, M_NETMAP, M_NOWAIT | M_ZERO,
623 0, -1UL, PAGE_SIZE, 0);
626 * If we get here, there is a severe memory shortage,
627 * so halve the allocated memory to reclaim some.
628 * XXX check boundaries
630 D("Unable to create cluster at %d for '%s' allocator",
633 for (i--; i >= lim; i--) {
634 p->bitmap[ (i>>5) ] &= ~( 1 << (i & 31) );
635 if (i % p->clustentries == 0 && p->lut[i].vaddr)
636 contigfree(p->lut[i].vaddr,
637 p->_clustsize, M_NETMAP);
640 p->objfree = p->objtotal - 2;
641 p->_numclusters = i / p->clustentries;
642 p->_memtotal = p->_numclusters * p->_clustsize;
645 for (; i < lim; i++, clust += p->_objsize) {
646 p->bitmap[ (i>>5) ] |= ( 1 << (i & 31) );
647 p->lut[i].vaddr = clust;
648 p->lut[i].paddr = vtophys(clust);
651 p->bitmap[0] = ~3; /* objs 0 and 1 is always busy */
652 D("Pre-allocated %d clusters (%d/%dKB) for '%s'",
653 p->_numclusters, p->_clustsize >> 10,
654 p->_memtotal >> 10, p->name);
659 netmap_reset_obj_allocator(p);
663 /* call with lock held */
665 netmap_memory_config_changed(void)
669 for (i = 0; i < NETMAP_POOLS_NR; i++) {
670 if (nm_mem.pools[i]._objsize != netmap_params[i].size ||
671 nm_mem.pools[i].objtotal != netmap_params[i].num)
678 /* call with lock held */
680 netmap_memory_config(void)
685 if (!netmap_memory_config_changed())
690 if (nm_mem.finalized) {
691 /* reset previous allocation */
692 for (i = 0; i < NETMAP_POOLS_NR; i++) {
693 netmap_reset_obj_allocator(&nm_mem.pools[i]);
695 nm_mem.finalized = 0;
698 for (i = 0; i < NETMAP_POOLS_NR; i++) {
699 nm_mem.lasterr = netmap_config_obj_allocator(&nm_mem.pools[i],
700 netmap_params[i].num, netmap_params[i].size);
705 D("Have %d KB for interfaces, %d KB for rings and %d MB for buffers",
706 nm_mem.pools[NETMAP_IF_POOL]._memtotal >> 10,
707 nm_mem.pools[NETMAP_RING_POOL]._memtotal >> 10,
708 nm_mem.pools[NETMAP_BUF_POOL]._memtotal >> 20);
712 return nm_mem.lasterr;
715 /* call with lock held */
717 netmap_memory_finalize(void)
723 if (nm_mem.refcount > 1) {
724 D("busy (refcount %d)", nm_mem.refcount);
728 /* update configuration if changed */
729 if (netmap_memory_config())
732 if (nm_mem.finalized) {
733 /* may happen if config is not changed */
738 for (i = 0; i < NETMAP_POOLS_NR; i++) {
739 nm_mem.lasterr = netmap_finalize_obj_allocator(&nm_mem.pools[i]);
742 totalsize += nm_mem.pools[i]._memtotal;
744 nm_mem.nm_totalsize = totalsize;
746 /* backward compatibility */
747 netmap_buf_size = nm_mem.pools[NETMAP_BUF_POOL]._objsize;
748 netmap_total_buffers = nm_mem.pools[NETMAP_BUF_POOL].objtotal;
750 netmap_buffer_lut = nm_mem.pools[NETMAP_BUF_POOL].lut;
751 netmap_buffer_base = nm_mem.pools[NETMAP_BUF_POOL].lut[0].vaddr;
753 nm_mem.finalized = 1;
756 /* make sysctl values match actual values in the pools */
757 for (i = 0; i < NETMAP_POOLS_NR; i++) {
758 netmap_params[i].size = nm_mem.pools[i]._objsize;
759 netmap_params[i].num = nm_mem.pools[i].objtotal;
766 return nm_mem.lasterr;
769 for (i = 0; i < NETMAP_POOLS_NR; i++) {
770 netmap_reset_obj_allocator(&nm_mem.pools[i]);
774 return nm_mem.lasterr;
778 netmap_memory_init(void)
785 netmap_memory_fini(void)
789 for (i = 0; i < NETMAP_POOLS_NR; i++) {
790 netmap_destroy_obj_allocator(&nm_mem.pools[i]);
796 netmap_free_rings(struct netmap_adapter *na)
799 for (i = 0; i < na->num_tx_rings + 1; i++) {
800 netmap_ring_free(na->tx_rings[i].ring);
801 na->tx_rings[i].ring = NULL;
803 for (i = 0; i < na->num_rx_rings + 1; i++) {
804 netmap_ring_free(na->rx_rings[i].ring);
805 na->rx_rings[i].ring = NULL;
811 /* call with NMA_LOCK held */
813 netmap_if_new(const char *ifname, struct netmap_adapter *na)
815 struct netmap_if *nifp;
816 struct netmap_ring *ring;
817 ssize_t base; /* handy for relative offsets between rings and nifp */
819 u_int ntx = na->num_tx_rings + 1; /* shorthand, include stack ring */
820 u_int nrx = na->num_rx_rings + 1; /* shorthand, include stack ring */
821 struct netmap_kring *kring;
824 * the descriptor is followed inline by an array of offsets
825 * to the tx and rx rings in the shared memory region.
827 len = sizeof(struct netmap_if) + (nrx + ntx) * sizeof(ssize_t);
828 nifp = netmap_if_malloc(len);
833 /* initialize base fields -- override const */
834 *(int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings;
835 *(int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings;
836 strncpy(nifp->ni_name, ifname, IFNAMSIZ);
838 (na->refcount)++; /* XXX atomic ? we are under lock */
839 if (na->refcount > 1) { /* already setup, we are done */
844 * First instance, allocate netmap rings and buffers for this card
845 * The rings are contiguous, but have variable size.
847 for (i = 0; i < ntx; i++) { /* Transmit rings */
848 kring = &na->tx_rings[i];
849 ndesc = na->num_tx_desc;
850 bzero(kring, sizeof(*kring));
851 len = sizeof(struct netmap_ring) +
852 ndesc * sizeof(struct netmap_slot);
853 ring = netmap_ring_malloc(len);
855 D("Cannot allocate tx_ring[%d] for %s", i, ifname);
858 ND("txring[%d] at %p ofs %d", i, ring);
861 *(int *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = ndesc;
862 *(ssize_t *)(uintptr_t)&ring->buf_ofs =
863 (nm_mem.pools[NETMAP_IF_POOL]._memtotal +
864 nm_mem.pools[NETMAP_RING_POOL]._memtotal) -
865 netmap_ring_offset(ring);
869 * Always keep one slot empty, so we can detect new
870 * transmissions comparing cur and nr_hwcur (they are
871 * the same only if there are no new transmissions).
873 ring->avail = kring->nr_hwavail = ndesc - 1;
874 ring->cur = kring->nr_hwcur = 0;
875 *(int *)(uintptr_t)&ring->nr_buf_size = NETMAP_BUF_SIZE;
876 ND("initializing slots for txring[%d]", i);
877 if (netmap_new_bufs(nifp, ring->slot, ndesc)) {
878 D("Cannot allocate buffers for tx_ring[%d] for %s", i, ifname);
883 for (i = 0; i < nrx; i++) { /* Receive rings */
884 kring = &na->rx_rings[i];
885 ndesc = na->num_rx_desc;
886 bzero(kring, sizeof(*kring));
887 len = sizeof(struct netmap_ring) +
888 ndesc * sizeof(struct netmap_slot);
889 ring = netmap_ring_malloc(len);
891 D("Cannot allocate rx_ring[%d] for %s", i, ifname);
894 ND("rxring[%d] at %p ofs %d", i, ring);
898 *(int *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = ndesc;
899 *(ssize_t *)(uintptr_t)&ring->buf_ofs =
900 (nm_mem.pools[NETMAP_IF_POOL]._memtotal +
901 nm_mem.pools[NETMAP_RING_POOL]._memtotal) -
902 netmap_ring_offset(ring);
904 ring->cur = kring->nr_hwcur = 0;
905 ring->avail = kring->nr_hwavail = 0; /* empty */
906 *(int *)(uintptr_t)&ring->nr_buf_size = NETMAP_BUF_SIZE;
907 ND("initializing slots for rxring[%d]", i);
908 if (netmap_new_bufs(nifp, ring->slot, ndesc)) {
909 D("Cannot allocate buffers for rx_ring[%d] for %s", i, ifname);
914 // XXX initialize the selrecord structs.
915 for (i = 0; i < ntx; i++)
916 init_waitqueue_head(&na->tx_rings[i].si);
917 for (i = 0; i < nrx; i++)
918 init_waitqueue_head(&na->rx_rings[i].si);
919 init_waitqueue_head(&na->tx_si);
920 init_waitqueue_head(&na->rx_si);
924 * fill the slots for the rx and tx rings. They contain the offset
925 * between the ring and nifp, so the information is usable in
926 * userspace to reach the ring from the nifp.
928 base = netmap_if_offset(nifp);
929 for (i = 0; i < ntx; i++) {
930 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] =
931 netmap_ring_offset(na->tx_rings[i].ring) - base;
933 for (i = 0; i < nrx; i++) {
934 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+ntx] =
935 netmap_ring_offset(na->rx_rings[i].ring) - base;
939 netmap_free_rings(na);
940 netmap_if_free(nifp);
945 /* call with NMA_LOCK held */
947 netmap_memory_deref(void)
950 D("refcount = %d", nm_mem.refcount);