2 * Copyright (C) 2012-2013 Matteo Landi, Luigi Rizzo, Giuseppe Lettieri. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * (New) memory allocator for netmap
33 * This allocator creates three memory pools:
34 * nm_if_pool for the struct netmap_if
35 * nm_ring_pool for the struct netmap_ring
36 * nm_buf_pool for the packet buffers.
38 * that contain netmap objects. Each pool is made of a number of clusters,
39 * multiple of a page size, each containing an integer number of objects.
40 * The clusters are contiguous in user space but not in the kernel.
41 * Only nm_buf_pool needs to be dma-able,
42 * but for convenience use the same type of allocator for all.
44 * Once mapped, the three pools are exported to userspace
45 * as a contiguous block, starting from nm_if_pool. Each
46 * cluster (and pool) is an integral number of pages.
47 * [ . . . ][ . . . . . .][ . . . . . . . . . .]
48 * nm_if nm_ring nm_buf
50 * The userspace areas contain offsets of the objects in userspace.
51 * When (at init time) we write these offsets, we find out the index
52 * of the object, and from there locate the offset from the beginning
55 * The invididual allocators manage a pool of memory for objects of
57 * The pool is split into smaller clusters, whose size is a
58 * multiple of the page size. The cluster size is chosen
59 * to minimize the waste for a given max cluster size
60 * (we do it by brute force, as we have relatively few objects
63 * Objects are aligned to the cache line (64 bytes) rounding up object
64 * sizes when needed. A bitmap contains the state of each object.
65 * Allocation scans the bitmap; this is done only on attach, so we are not
66 * too worried about performance
68 * For each allocator we can define (thorugh sysctl) the size and
69 * number of each object. Memory is allocated at the first use of a
70 * netmap file descriptor, and can be freed when all such descriptors
71 * have been released (including unmapping the memory).
72 * If memory is scarce, the system tries to get as much as possible
73 * and the sysctl values reflect the actual allocation.
74 * Together with desired values, the sysctl export also absolute
75 * min and maximum values that cannot be overridden.
78 * variable size, max 16 bytes per ring pair plus some fixed amount.
79 * 1024 bytes should be large enough in practice.
81 * In the worst case we have one netmap_if per ring in the system.
84 * variable size, 8 byte per slot plus some fixed amount.
85 * Rings can be large (e.g. 4k slots, or >32Kbytes).
86 * We default to 36 KB (9 pages), and a few hundred rings.
88 * struct netmap_buffer
89 * The more the better, both because fast interfaces tend to have
90 * many slots, and because we may want to use buffers to store
91 * packets in userspace avoiding copies.
92 * Must contain a full frame (eg 1518, or more for vlans, jumbo
93 * frames etc.) plus be nicely aligned, plus some NICs restrict
94 * the size to multiple of 1K or so. Default to 2K
97 #define NETMAP_BUF_MAX_NUM 20*4096*2 /* large machine */
100 // XXX a mtx would suffice here 20130415 lr
101 // #define NMA_LOCK_T safe_spinlock_t
102 #define NMA_LOCK_T struct semaphore
103 #define NMA_LOCK_INIT() sema_init(&nm_mem.nm_mtx, 1)
104 #define NMA_LOCK_DESTROY()
105 #define NMA_LOCK() down(&nm_mem.nm_mtx)
106 #define NMA_UNLOCK() up(&nm_mem.nm_mtx)
108 #define NMA_LOCK_T struct mtx
109 #define NMA_LOCK_INIT() mtx_init(&nm_mem.nm_mtx, "netmap memory allocator lock", NULL, MTX_DEF)
110 #define NMA_LOCK_DESTROY() mtx_destroy(&nm_mem.nm_mtx)
111 #define NMA_LOCK() mtx_lock(&nm_mem.nm_mtx)
112 #define NMA_UNLOCK() mtx_unlock(&nm_mem.nm_mtx)
123 struct netmap_obj_params {
129 struct netmap_obj_params netmap_params[NETMAP_POOLS_NR] = {
134 [NETMAP_RING_POOL] = {
138 [NETMAP_BUF_POOL] = {
140 .num = NETMAP_BUF_MAX_NUM,
145 struct netmap_obj_pool {
146 char name[16]; /* name of the allocator */
147 u_int objtotal; /* actual total number of objects. */
148 u_int objfree; /* number of free objects. */
149 u_int clustentries; /* actual objects per cluster */
152 u_int objminsize; /* minimum object size */
153 u_int objmaxsize; /* maximum object size */
154 u_int nummin; /* minimum number of objects */
155 u_int nummax; /* maximum number of objects */
157 /* the total memory space is _numclusters*_clustsize */
158 u_int _numclusters; /* how many clusters */
159 u_int _clustsize; /* cluster size */
160 u_int _objsize; /* actual object size */
162 u_int _memtotal; /* _numclusters*_clustsize */
163 struct lut_entry *lut; /* virt,phys addresses, objtotal entries */
164 uint32_t *bitmap; /* one bit per buffer, 1 means free */
165 uint32_t bitmap_slots; /* number of uint32 entries in bitmap */
169 struct netmap_mem_d {
170 NMA_LOCK_T nm_mtx; /* protect the allocator */
171 u_int nm_totalsize; /* shorthand */
173 int finalized; /* !=0 iff preallocation done */
174 int lasterr; /* last error for curr config */
175 int refcount; /* existing priv structures */
176 /* the three allocators */
177 struct netmap_obj_pool pools[NETMAP_POOLS_NR];
181 * nm_mem is the memory allocator used for all physical interfaces
182 * running in netmap mode.
183 * Virtual (VALE) ports will have each its own allocator.
185 static struct netmap_mem_d nm_mem = { /* Our memory allocator. */
189 .objminsize = sizeof(struct netmap_if),
191 .nummin = 10, /* don't be stingy */
192 .nummax = 10000, /* XXX very large */
194 [NETMAP_RING_POOL] = {
195 .name = "netmap_ring",
196 .objminsize = sizeof(struct netmap_ring),
197 .objmaxsize = 32*PAGE_SIZE,
201 [NETMAP_BUF_POOL] = {
202 .name = "netmap_buf",
206 .nummax = 1000000, /* one million! */
211 // XXX logically belongs to nm_mem
212 struct lut_entry *netmap_buffer_lut; /* exported */
214 /* memory allocator related sysctls */
216 #define STRINGIFY(x) #x
218 #define DECLARE_SYSCTLS(id, name) \
219 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \
220 CTLFLAG_RW, &netmap_params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \
221 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \
222 CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \
223 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \
224 CTLFLAG_RW, &netmap_params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \
225 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \
226 CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s")
228 DECLARE_SYSCTLS(NETMAP_IF_POOL, if);
229 DECLARE_SYSCTLS(NETMAP_RING_POOL, ring);
230 DECLARE_SYSCTLS(NETMAP_BUF_POOL, buf);
233 * Convert a userspace offset to a physical address.
234 * XXX only called in the FreeBSD's netmap_mmap()
235 * because in linux we map everything at once.
237 * First, find the allocator that contains the requested offset,
238 * then locate the cluster through a lookup table.
240 static inline vm_paddr_t
241 netmap_ofstophys(vm_offset_t offset)
244 vm_offset_t o = offset;
245 struct netmap_obj_pool *p = nm_mem.pools;
247 for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i]._memtotal, i++) {
248 if (offset >= p[i]._memtotal)
250 // now lookup the cluster's address
251 return p[i].lut[offset / p[i]._objsize].paddr +
252 offset % p[i]._objsize;
254 /* this is only in case of errors */
255 D("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o,
256 p[NETMAP_IF_POOL]._memtotal,
257 p[NETMAP_IF_POOL]._memtotal
258 + p[NETMAP_RING_POOL]._memtotal,
259 p[NETMAP_IF_POOL]._memtotal
260 + p[NETMAP_RING_POOL]._memtotal
261 + p[NETMAP_BUF_POOL]._memtotal);
262 return 0; // XXX bad address
266 * we store objects by kernel address, need to find the offset
267 * within the pool to export the value to userspace.
268 * Algorithm: scan until we find the cluster, then add the
269 * actual offset in the cluster
272 netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr)
274 int i, k = p->clustentries, n = p->objtotal;
277 for (i = 0; i < n; i += k, ofs += p->_clustsize) {
278 const char *base = p->lut[i].vaddr;
279 ssize_t relofs = (const char *) vaddr - base;
281 if (relofs < 0 || relofs >= p->_clustsize)
285 ND("%s: return offset %d (cluster %d) for pointer %p",
286 p->name, ofs, i, vaddr);
289 D("address %p is not contained inside any cluster (%s)",
291 return 0; /* An error occurred */
294 /* Helper functions which convert virtual addresses to offsets */
295 #define netmap_if_offset(v) \
296 netmap_obj_offset(&nm_mem.pools[NETMAP_IF_POOL], (v))
298 #define netmap_ring_offset(v) \
299 (nm_mem.pools[NETMAP_IF_POOL]._memtotal + \
300 netmap_obj_offset(&nm_mem.pools[NETMAP_RING_POOL], (v)))
302 #define netmap_buf_offset(v) \
303 (nm_mem.pools[NETMAP_IF_POOL]._memtotal + \
304 nm_mem.pools[NETMAP_RING_POOL]._memtotal + \
305 netmap_obj_offset(&nm_mem.pools[NETMAP_BUF_POOL], (v)))
309 * report the index, and use start position as a hint,
310 * otherwise buffer allocation becomes terribly expensive.
313 netmap_obj_malloc(struct netmap_obj_pool *p, int len, uint32_t *start, uint32_t *index)
315 uint32_t i = 0; /* index in the bitmap */
316 uint32_t mask, j; /* slot counter */
319 if (len > p->_objsize) {
320 D("%s request size %d too large", p->name, len);
321 // XXX cannot reduce the size
325 if (p->objfree == 0) {
326 D("%s allocator: run out of memory", p->name);
332 /* termination is guaranteed by p->free, but better check bounds on i */
333 while (vaddr == NULL && i < p->bitmap_slots) {
334 uint32_t cur = p->bitmap[i];
335 if (cur == 0) { /* bitmask is fully used */
340 for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1)
343 p->bitmap[i] &= ~mask; /* mark object as in use */
346 vaddr = p->lut[i * 32 + j].vaddr;
350 ND("%s allocator: allocated object @ [%d][%d]: vaddr %p", i, j, vaddr);
359 * free by index, not by address. This is slow, but is only used
360 * for a small number of objects (rings, nifp)
363 netmap_obj_free(struct netmap_obj_pool *p, uint32_t j)
365 if (j >= p->objtotal) {
366 D("invalid index %u, max %u", j, p->objtotal);
369 p->bitmap[j / 32] |= (1 << (j % 32));
375 netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr)
377 int i, j, n = p->_memtotal / p->_clustsize;
379 for (i = 0, j = 0; i < n; i++, j += p->clustentries) {
380 void *base = p->lut[i * p->clustentries].vaddr;
381 ssize_t relofs = (ssize_t) vaddr - (ssize_t) base;
383 /* Given address, is out of the scope of the current cluster.*/
384 if (vaddr < base || relofs >= p->_clustsize)
387 j = j + relofs / p->_objsize;
388 KASSERT(j != 0, ("Cannot free object 0"));
389 netmap_obj_free(p, j);
392 D("address %p is not contained inside any cluster (%s)",
396 #define netmap_if_malloc(len) netmap_obj_malloc(&nm_mem.pools[NETMAP_IF_POOL], len, NULL, NULL)
397 #define netmap_if_free(v) netmap_obj_free_va(&nm_mem.pools[NETMAP_IF_POOL], (v))
398 #define netmap_ring_malloc(len) netmap_obj_malloc(&nm_mem.pools[NETMAP_RING_POOL], len, NULL, NULL)
399 #define netmap_ring_free(v) netmap_obj_free_va(&nm_mem.pools[NETMAP_RING_POOL], (v))
400 #define netmap_buf_malloc(_pos, _index) \
401 netmap_obj_malloc(&nm_mem.pools[NETMAP_BUF_POOL], NETMAP_BUF_SIZE, _pos, _index)
404 /* Return the index associated to the given packet buffer */
405 #define netmap_buf_index(v) \
406 (netmap_obj_offset(&nm_mem.pools[NETMAP_BUF_POOL], (v)) / nm_mem.pools[NETMAP_BUF_POOL]._objsize)
409 /* Return nonzero on error */
411 netmap_new_bufs(struct netmap_if *nifp,
412 struct netmap_slot *slot, u_int n)
414 struct netmap_obj_pool *p = &nm_mem.pools[NETMAP_BUF_POOL];
415 int i = 0; /* slot counter */
416 uint32_t pos = 0; /* slot in p->bitmap */
417 uint32_t index = 0; /* buffer index */
419 (void)nifp; /* UNUSED */
420 for (i = 0; i < n; i++) {
421 void *vaddr = netmap_buf_malloc(&pos, &index);
423 D("unable to locate empty packet buffer");
426 slot[i].buf_idx = index;
427 slot[i].len = p->_objsize;
428 /* XXX setting flags=NS_BUF_CHANGED forces a pointer reload
429 * in the NIC ring. This is a hack that hides missing
430 * initializations in the drivers, and should go away.
432 // slot[i].flags = NS_BUF_CHANGED;
435 ND("allocated %d buffers, %d available, first at %d", n, p->objfree, pos);
441 netmap_obj_free(p, slot[i].buf_idx);
443 bzero(slot, n * sizeof(slot[0]));
449 netmap_free_buf(struct netmap_if *nifp, uint32_t i)
451 struct netmap_obj_pool *p = &nm_mem.pools[NETMAP_BUF_POOL];
453 if (i < 2 || i >= p->objtotal) {
454 D("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal);
457 netmap_obj_free(p, i);
461 netmap_reset_obj_allocator(struct netmap_obj_pool *p)
466 free(p->bitmap, M_NETMAP);
470 for (i = 0; i < p->objtotal; i += p->clustentries) {
472 contigfree(p->lut[i].vaddr, p->_clustsize, M_NETMAP);
474 bzero(p->lut, sizeof(struct lut_entry) * p->objtotal);
478 free(p->lut, M_NETMAP);
485 * Free all resources related to an allocator.
488 netmap_destroy_obj_allocator(struct netmap_obj_pool *p)
492 netmap_reset_obj_allocator(p);
496 * We receive a request for objtotal objects, of size objsize each.
497 * Internally we may round up both numbers, as we allocate objects
498 * in small clusters multiple of the page size.
499 * In the allocator we don't need to store the objsize,
500 * but we do need to keep track of objtotal' and clustentries,
501 * as they are needed when freeing memory.
503 * XXX note -- userspace needs the buffers to be contiguous,
504 * so we cannot afford gaps at the end of a cluster.
508 /* call with NMA_LOCK held */
510 netmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize)
513 u_int clustsize; /* the cluster size, multiple of page size */
514 u_int clustentries; /* how many objects per entry */
516 #define MAX_CLUSTSIZE (1<<17)
517 #define LINE_ROUND 64
518 if (objsize >= MAX_CLUSTSIZE) {
519 /* we could do it but there is no point */
520 D("unsupported allocation for %d bytes", objsize);
523 /* make sure objsize is a multiple of LINE_ROUND */
524 i = (objsize & (LINE_ROUND - 1));
526 D("XXX aligning object by %d bytes", LINE_ROUND - i);
527 objsize += LINE_ROUND - i;
529 if (objsize < p->objminsize || objsize > p->objmaxsize) {
530 D("requested objsize %d out of range [%d, %d]",
531 objsize, p->objminsize, p->objmaxsize);
534 if (objtotal < p->nummin || objtotal > p->nummax) {
535 D("requested objtotal %d out of range [%d, %d]",
536 objtotal, p->nummin, p->nummax);
540 * Compute number of objects using a brute-force approach:
541 * given a max cluster size,
542 * we try to fill it with objects keeping track of the
543 * wasted space to the next page boundary.
545 for (clustentries = 0, i = 1;; i++) {
546 u_int delta, used = i * objsize;
547 if (used > MAX_CLUSTSIZE)
549 delta = used % PAGE_SIZE;
550 if (delta == 0) { // exact solution
554 if (delta > ( (clustentries*objsize) % PAGE_SIZE) )
557 // D("XXX --- ouch, delta %d (bad for buffers)", delta);
558 /* compute clustsize and round to the next page */
559 clustsize = clustentries * objsize;
560 i = (clustsize & (PAGE_SIZE - 1));
562 clustsize += PAGE_SIZE - i;
564 D("objsize %d clustsize %d objects %d",
565 objsize, clustsize, clustentries);
568 * The number of clusters is n = ceil(objtotal/clustentries)
569 * objtotal' = n * clustentries
571 p->clustentries = clustentries;
572 p->_clustsize = clustsize;
573 n = (objtotal + clustentries - 1) / clustentries;
575 p->objtotal = n * clustentries;
576 p->objfree = p->objtotal - 2; /* obj 0 and 1 are reserved */
577 p->_memtotal = p->_numclusters * p->_clustsize;
578 p->_objsize = objsize;
583 p->_objsize = objsize;
584 p->objtotal = objtotal;
590 /* call with NMA_LOCK held */
592 netmap_finalize_obj_allocator(struct netmap_obj_pool *p)
596 n = sizeof(struct lut_entry) * p->objtotal;
600 p->lut = malloc(n, M_NETMAP, M_NOWAIT | M_ZERO);
602 if (p->lut == NULL) {
603 D("Unable to create lookup table (%d bytes) for '%s'", n, p->name);
607 /* Allocate the bitmap */
608 n = (p->objtotal + 31) / 32;
609 p->bitmap = malloc(sizeof(uint32_t) * n, M_NETMAP, M_NOWAIT | M_ZERO);
610 if (p->bitmap == NULL) {
611 D("Unable to create bitmap (%d entries) for allocator '%s'", n,
618 * Allocate clusters, init pointers and bitmap
620 for (i = 0; i < p->objtotal;) {
621 int lim = i + p->clustentries;
624 clust = contigmalloc(p->_clustsize, M_NETMAP, M_NOWAIT | M_ZERO,
625 0, -1UL, PAGE_SIZE, 0);
628 * If we get here, there is a severe memory shortage,
629 * so halve the allocated memory to reclaim some.
630 * XXX check boundaries
632 D("Unable to create cluster at %d for '%s' allocator",
635 for (i--; i >= lim; i--) {
636 p->bitmap[ (i>>5) ] &= ~( 1 << (i & 31) );
637 if (i % p->clustentries == 0 && p->lut[i].vaddr)
638 contigfree(p->lut[i].vaddr,
639 p->_clustsize, M_NETMAP);
642 p->objfree = p->objtotal - 2;
643 p->_numclusters = i / p->clustentries;
644 p->_memtotal = p->_numclusters * p->_clustsize;
647 for (; i < lim; i++, clust += p->_objsize) {
648 p->bitmap[ (i>>5) ] |= ( 1 << (i & 31) );
649 p->lut[i].vaddr = clust;
650 p->lut[i].paddr = vtophys(clust);
653 p->bitmap[0] = ~3; /* objs 0 and 1 is always busy */
655 D("Pre-allocated %d clusters (%d/%dKB) for '%s'",
656 p->_numclusters, p->_clustsize >> 10,
657 p->_memtotal >> 10, p->name);
662 netmap_reset_obj_allocator(p);
666 /* call with lock held */
668 netmap_memory_config_changed(void)
672 for (i = 0; i < NETMAP_POOLS_NR; i++) {
673 if (nm_mem.pools[i]._objsize != netmap_params[i].size ||
674 nm_mem.pools[i].objtotal != netmap_params[i].num)
681 /* call with lock held */
683 netmap_memory_config(void)
687 if (!netmap_memory_config_changed())
692 if (nm_mem.finalized) {
693 /* reset previous allocation */
694 for (i = 0; i < NETMAP_POOLS_NR; i++) {
695 netmap_reset_obj_allocator(&nm_mem.pools[i]);
697 nm_mem.finalized = 0;
700 for (i = 0; i < NETMAP_POOLS_NR; i++) {
701 nm_mem.lasterr = netmap_config_obj_allocator(&nm_mem.pools[i],
702 netmap_params[i].num, netmap_params[i].size);
707 D("Have %d KB for interfaces, %d KB for rings and %d MB for buffers",
708 nm_mem.pools[NETMAP_IF_POOL]._memtotal >> 10,
709 nm_mem.pools[NETMAP_RING_POOL]._memtotal >> 10,
710 nm_mem.pools[NETMAP_BUF_POOL]._memtotal >> 20);
714 return nm_mem.lasterr;
717 /* call with lock held */
719 netmap_memory_finalize(void)
725 if (nm_mem.refcount > 1) {
726 ND("busy (refcount %d)", nm_mem.refcount);
730 /* update configuration if changed */
731 if (netmap_memory_config())
734 if (nm_mem.finalized) {
735 /* may happen if config is not changed */
740 for (i = 0; i < NETMAP_POOLS_NR; i++) {
741 nm_mem.lasterr = netmap_finalize_obj_allocator(&nm_mem.pools[i]);
744 totalsize += nm_mem.pools[i]._memtotal;
746 nm_mem.nm_totalsize = totalsize;
748 /* backward compatibility */
749 netmap_buf_size = nm_mem.pools[NETMAP_BUF_POOL]._objsize;
750 netmap_total_buffers = nm_mem.pools[NETMAP_BUF_POOL].objtotal;
752 netmap_buffer_lut = nm_mem.pools[NETMAP_BUF_POOL].lut;
753 netmap_buffer_base = nm_mem.pools[NETMAP_BUF_POOL].lut[0].vaddr;
755 nm_mem.finalized = 1;
758 /* make sysctl values match actual values in the pools */
759 for (i = 0; i < NETMAP_POOLS_NR; i++) {
760 netmap_params[i].size = nm_mem.pools[i]._objsize;
761 netmap_params[i].num = nm_mem.pools[i].objtotal;
768 return nm_mem.lasterr;
771 for (i = 0; i < NETMAP_POOLS_NR; i++) {
772 netmap_reset_obj_allocator(&nm_mem.pools[i]);
776 return nm_mem.lasterr;
780 netmap_memory_init(void)
787 netmap_memory_fini(void)
791 for (i = 0; i < NETMAP_POOLS_NR; i++) {
792 netmap_destroy_obj_allocator(&nm_mem.pools[i]);
798 netmap_free_rings(struct netmap_adapter *na)
803 for (i = 0; i < na->num_tx_rings + 1; i++) {
804 netmap_ring_free(na->tx_rings[i].ring);
805 na->tx_rings[i].ring = NULL;
807 for (i = 0; i < na->num_rx_rings + 1; i++) {
808 netmap_ring_free(na->rx_rings[i].ring);
809 na->rx_rings[i].ring = NULL;
811 free(na->tx_rings, M_DEVBUF);
812 na->tx_rings = na->rx_rings = NULL;
817 /* call with NMA_LOCK held */
819 * Allocate the per-fd structure netmap_if.
820 * If this is the first instance, also allocate the krings, rings etc.
823 netmap_if_new(const char *ifname, struct netmap_adapter *na)
825 struct netmap_if *nifp;
826 struct netmap_ring *ring;
827 ssize_t base; /* handy for relative offsets between rings and nifp */
828 u_int i, len, ndesc, ntx, nrx;
829 struct netmap_kring *kring;
831 if (netmap_update_config(na)) {
832 /* configuration mismatch, report and fail */
835 ntx = na->num_tx_rings + 1; /* shorthand, include stack ring */
836 nrx = na->num_rx_rings + 1; /* shorthand, include stack ring */
838 * the descriptor is followed inline by an array of offsets
839 * to the tx and rx rings in the shared memory region.
841 len = sizeof(struct netmap_if) + (nrx + ntx) * sizeof(ssize_t);
842 nifp = netmap_if_malloc(len);
847 /* initialize base fields -- override const */
848 *(int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings;
849 *(int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings;
850 strncpy(nifp->ni_name, ifname, IFNAMSIZ);
852 (na->refcount)++; /* XXX atomic ? we are under lock */
853 if (na->refcount > 1) { /* already setup, we are done */
857 len = (ntx + nrx) * sizeof(struct netmap_kring);
858 na->tx_rings = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
859 if (na->tx_rings == NULL) {
860 D("Cannot allocate krings for %s", ifname);
863 na->rx_rings = na->tx_rings + ntx;
866 * First instance, allocate netmap rings and buffers for this card
867 * The rings are contiguous, but have variable size.
869 for (i = 0; i < ntx; i++) { /* Transmit rings */
870 kring = &na->tx_rings[i];
871 ndesc = na->num_tx_desc;
872 bzero(kring, sizeof(*kring));
873 len = sizeof(struct netmap_ring) +
874 ndesc * sizeof(struct netmap_slot);
875 ring = netmap_ring_malloc(len);
877 D("Cannot allocate tx_ring[%d] for %s", i, ifname);
880 ND("txring[%d] at %p ofs %d", i, ring);
883 *(int *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = ndesc;
884 *(ssize_t *)(uintptr_t)&ring->buf_ofs =
885 (nm_mem.pools[NETMAP_IF_POOL]._memtotal +
886 nm_mem.pools[NETMAP_RING_POOL]._memtotal) -
887 netmap_ring_offset(ring);
891 * Always keep one slot empty, so we can detect new
892 * transmissions comparing cur and nr_hwcur (they are
893 * the same only if there are no new transmissions).
895 ring->avail = kring->nr_hwavail = ndesc - 1;
896 ring->cur = kring->nr_hwcur = 0;
897 *(int *)(uintptr_t)&ring->nr_buf_size = NETMAP_BUF_SIZE;
898 ND("initializing slots for txring[%d]", i);
899 if (netmap_new_bufs(nifp, ring->slot, ndesc)) {
900 D("Cannot allocate buffers for tx_ring[%d] for %s", i, ifname);
905 for (i = 0; i < nrx; i++) { /* Receive rings */
906 kring = &na->rx_rings[i];
907 ndesc = na->num_rx_desc;
908 bzero(kring, sizeof(*kring));
909 len = sizeof(struct netmap_ring) +
910 ndesc * sizeof(struct netmap_slot);
911 ring = netmap_ring_malloc(len);
913 D("Cannot allocate rx_ring[%d] for %s", i, ifname);
916 ND("rxring[%d] at %p ofs %d", i, ring);
920 *(int *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = ndesc;
921 *(ssize_t *)(uintptr_t)&ring->buf_ofs =
922 (nm_mem.pools[NETMAP_IF_POOL]._memtotal +
923 nm_mem.pools[NETMAP_RING_POOL]._memtotal) -
924 netmap_ring_offset(ring);
926 ring->cur = kring->nr_hwcur = 0;
927 ring->avail = kring->nr_hwavail = 0; /* empty */
928 *(int *)(uintptr_t)&ring->nr_buf_size = NETMAP_BUF_SIZE;
929 ND("initializing slots for rxring[%d]", i);
930 if (netmap_new_bufs(nifp, ring->slot, ndesc)) {
931 D("Cannot allocate buffers for rx_ring[%d] for %s", i, ifname);
936 // XXX initialize the selrecord structs.
937 for (i = 0; i < ntx; i++)
938 init_waitqueue_head(&na->tx_rings[i].si);
939 for (i = 0; i < nrx; i++)
940 init_waitqueue_head(&na->rx_rings[i].si);
941 init_waitqueue_head(&na->tx_si);
942 init_waitqueue_head(&na->rx_si);
946 * fill the slots for the rx and tx rings. They contain the offset
947 * between the ring and nifp, so the information is usable in
948 * userspace to reach the ring from the nifp.
950 base = netmap_if_offset(nifp);
951 for (i = 0; i < ntx; i++) {
952 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] =
953 netmap_ring_offset(na->tx_rings[i].ring) - base;
955 for (i = 0; i < nrx; i++) {
956 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+ntx] =
957 netmap_ring_offset(na->rx_rings[i].ring) - base;
961 netmap_free_rings(na);
962 netmap_if_free(nifp);
967 /* call with NMA_LOCK held */
969 netmap_memory_deref(void)
973 D("refcount = %d", nm_mem.refcount);