2 * Copyright (C) 2012 Matteo Landi, Luigi Rizzo. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * $Id: netmap_mem2.c 10830 2012-03-22 18:06:01Z luigi $
30 * New memory allocator for netmap
34 * The new version allocates three regions:
35 * nm_if_pool for the struct netmap_if
36 * nm_ring_pool for the struct netmap_ring
37 * nm_buf_pool for the packet buffers.
39 * All regions need to be page-sized as we export them to
40 * userspace through mmap. Only the latter need to be dma-able,
41 * but for convenience use the same type of allocator for all.
43 * Once mapped, the three regions are exported to userspace
44 * as a contiguous block, starting from nm_if_pool. Each
45 * cluster (and pool) is an integral number of pages.
46 * [ . . . ][ . . . . . .][ . . . . . . . . . .]
47 * nm_if nm_ring nm_buf
49 * The userspace areas contain offsets of the objects in userspace.
50 * When (at init time) we write these offsets, we find out the index
51 * of the object, and from there locate the offset from the beginning
54 * Allocator for a pool of memory objects of the same size.
55 * The pool is split into smaller clusters, whose size is a
56 * multiple of the page size. The cluster size is chosen
57 * to minimize the waste for a given max cluster size
58 * (we do it by brute force, as we have relatively few object
61 * To be polite with the cache, objects are aligned to
62 * the cache line, or 64 bytes. Sizes are rounded to multiple of 64.
63 * For each object we have
64 * one entry in the bitmap to signal the state. Allocation scans
65 * the bitmap, but since this is done only on attach, we are not
66 * too worried about performance
72 * (all the parameters below will become tunables)
74 * struct netmap_if is variable size but small.
75 * Assuming each NIC has 8+2 rings, (4+1 tx, 4+1 rx) the netmap_if
76 * uses 120 bytes on a 64-bit machine.
77 * We allocate NETMAP_IF_MAX_SIZE (1024) which should work even for
78 * cards with 48 ring pairs.
79 * The total number of 'struct netmap_if' could be slightly larger
80 * that the total number of rings on all interfaces on the system.
82 #define NETMAP_IF_MAX_SIZE 1024
83 #define NETMAP_IF_MAX_NUM 512
86 * netmap rings are up to 2..4k descriptors, 8 bytes each,
87 * plus some glue at the beginning (32 bytes).
88 * We set the default ring size to 9 pages (36K) and enable
89 * a few hundreds of them.
91 #define NETMAP_RING_MAX_SIZE (9*PAGE_SIZE)
92 #define NETMAP_RING_MAX_NUM 200 /* approx 8MB */
95 * Buffers: the more the better. Buffer size is NETMAP_BUF_SIZE,
96 * 2k or slightly less, aligned to 64 bytes.
97 * A large 10G interface can have 2k*18 = 36k buffers per interface,
98 * or about 72MB of memory. Up to us to use more.
101 #define NETMAP_BUF_MAX_NUM 100000 /* 200MB */
102 #else /* CONSERVATIVE */
103 #define NETMAP_BUF_MAX_NUM 20000 /* 40MB */
107 struct netmap_obj_pool {
108 char name[16]; /* name of the allocator */
109 u_int objtotal; /* actual total number of objects. */
110 u_int objfree; /* number of free objects. */
111 u_int clustentries; /* actual objects per cluster */
113 /* the total memory space is _numclusters*_clustsize */
114 u_int _numclusters; /* how many clusters */
115 u_int _clustsize; /* cluster size */
116 u_int _objsize; /* actual object size */
118 u_int _memtotal; /* _numclusters*_clustsize */
119 struct lut_entry *lut; /* virt,phys addresses, objtotal entries */
120 uint32_t *bitmap; /* one bit per buffer, 1 means free */
123 struct netmap_mem_d {
124 NM_LOCK_T nm_mtx; /* protect the allocator ? */
125 u_int nm_totalsize; /* shorthand */
127 /* pointers to the three allocators */
128 struct netmap_obj_pool *nm_if_pool;
129 struct netmap_obj_pool *nm_ring_pool;
130 struct netmap_obj_pool *nm_buf_pool;
133 struct lut_entry *netmap_buffer_lut; /* exported */
137 * Convert a userspace offset to a phisical address.
138 * XXX re-do in a simpler way.
140 * The idea here is to hide userspace applications the fact that pre-allocated
141 * memory is not contiguous, but fragmented across different clusters and
142 * smaller memory allocators. Consequently, first of all we need to find which
143 * allocator is owning provided offset, then we need to find out the physical
144 * address associated to target page (this is done using the look-up table.
146 static inline vm_paddr_t
147 netmap_ofstophys(vm_offset_t offset)
149 const struct netmap_obj_pool *p[] = {
151 nm_mem->nm_ring_pool,
152 nm_mem->nm_buf_pool };
154 vm_offset_t o = offset;
157 for (i = 0; i < 3; offset -= p[i]->_memtotal, i++) {
158 if (offset >= p[i]->_memtotal)
160 // XXX now scan the clusters
161 return p[i]->lut[offset / p[i]->_objsize].paddr +
162 offset % p[i]->_objsize;
164 D("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o,
165 p[0]->_memtotal, p[0]->_memtotal + p[1]->_memtotal,
166 p[0]->_memtotal + p[1]->_memtotal + p[2]->_memtotal);
167 return 0; // XXX bad address
171 * we store objects by kernel address, need to find the offset
172 * within the pool to export the value to userspace.
173 * Algorithm: scan until we find the cluster, then add the
174 * actual offset in the cluster
177 netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr)
179 int i, k = p->clustentries, n = p->objtotal;
182 for (i = 0; i < n; i += k, ofs += p->_clustsize) {
183 const char *base = p->lut[i].vaddr;
184 ssize_t relofs = (const char *) vaddr - base;
186 if (relofs < 0 || relofs > p->_clustsize)
190 ND("%s: return offset %d (cluster %d) for pointer %p",
191 p->name, ofs, i, vaddr);
194 D("address %p is not contained inside any cluster (%s)",
196 return 0; /* An error occurred */
199 /* Helper functions which convert virtual addresses to offsets */
200 #define netmap_if_offset(v) \
201 netmap_obj_offset(nm_mem->nm_if_pool, (v))
203 #define netmap_ring_offset(v) \
204 (nm_mem->nm_if_pool->_memtotal + \
205 netmap_obj_offset(nm_mem->nm_ring_pool, (v)))
207 #define netmap_buf_offset(v) \
208 (nm_mem->nm_if_pool->_memtotal + \
209 nm_mem->nm_ring_pool->_memtotal + \
210 netmap_obj_offset(nm_mem->nm_buf_pool, (v)))
214 netmap_obj_malloc(struct netmap_obj_pool *p, int len)
216 uint32_t i = 0; /* index in the bitmap */
217 uint32_t mask, j; /* slot counter */
220 if (len > p->_objsize) {
221 D("%s request size %d too large", p->name, len);
222 // XXX cannot reduce the size
226 if (p->objfree == 0) {
227 D("%s allocator: run out of memory", p->name);
231 /* termination is guaranteed by p->free */
232 while (vaddr == NULL) {
233 uint32_t cur = p->bitmap[i];
234 if (cur == 0) { /* bitmask is fully used */
239 for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1)
242 p->bitmap[i] &= ~mask; /* mark object as in use */
245 vaddr = p->lut[i * 32 + j].vaddr;
247 ND("%s allocator: allocated object @ [%d][%d]: vaddr %p", i, j, vaddr);
254 * free by index, not by address
257 netmap_obj_free(struct netmap_obj_pool *p, uint32_t j)
259 if (j >= p->objtotal) {
260 D("invalid index %u, max %u", j, p->objtotal);
263 p->bitmap[j / 32] |= (1 << (j % 32));
269 netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr)
271 int i, j, n = p->_memtotal / p->_clustsize;
273 for (i = 0, j = 0; i < n; i++, j += p->clustentries) {
274 void *base = p->lut[i * p->clustentries].vaddr;
275 ssize_t relofs = (ssize_t) vaddr - (ssize_t) base;
277 /* Given address, is out of the scope of the current cluster.*/
278 if (vaddr < base || relofs > p->_clustsize)
281 j = j + relofs / p->_objsize;
282 KASSERT(j != 0, ("Cannot free object 0"));
283 netmap_obj_free(p, j);
286 ND("address %p is not contained inside any cluster (%s)",
290 #define netmap_if_malloc(len) netmap_obj_malloc(nm_mem->nm_if_pool, len)
291 #define netmap_if_free(v) netmap_obj_free_va(nm_mem->nm_if_pool, (v))
292 #define netmap_ring_malloc(len) netmap_obj_malloc(nm_mem->nm_ring_pool, len)
293 #define netmap_buf_malloc() \
294 netmap_obj_malloc(nm_mem->nm_buf_pool, NETMAP_BUF_SIZE)
297 /* Return the index associated to the given packet buffer */
298 #define netmap_buf_index(v) \
299 (netmap_obj_offset(nm_mem->nm_buf_pool, (v)) / nm_mem->nm_buf_pool->_objsize)
303 netmap_new_bufs(struct netmap_if *nifp __unused,
304 struct netmap_slot *slot, u_int n)
306 struct netmap_obj_pool *p = nm_mem->nm_buf_pool;
307 uint32_t i = 0; /* slot counter */
309 for (i = 0; i < n; i++) {
310 void *vaddr = netmap_buf_malloc();
312 D("unable to locate empty packet buffer");
316 slot[i].buf_idx = netmap_buf_index(vaddr);
317 KASSERT(slot[i].buf_idx != 0,
318 ("Assigning buf_idx=0 to just created slot"));
319 slot[i].len = p->_objsize;
320 slot[i].flags = NS_BUF_CHANGED; // XXX GAETANO hack
323 ND("allocated %d buffers, %d available", n, p->objfree);
327 for (i--; i >= 0; i--) {
328 netmap_obj_free(nm_mem->nm_buf_pool, slot[i].buf_idx);
334 netmap_free_buf(struct netmap_if *nifp, uint32_t i)
336 struct netmap_obj_pool *p = nm_mem->nm_buf_pool;
337 if (i < 2 || i >= p->objtotal) {
338 D("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal);
341 netmap_obj_free(nm_mem->nm_buf_pool, i);
346 * Free all resources related to an allocator.
349 netmap_destroy_obj_allocator(struct netmap_obj_pool *p)
354 free(p->bitmap, M_NETMAP);
357 for (i = 0; i < p->objtotal; i += p->clustentries) {
359 contigfree(p->lut[i].vaddr, p->_clustsize, M_NETMAP);
361 bzero(p->lut, sizeof(struct lut_entry) * p->objtotal);
362 free(p->lut, M_NETMAP);
364 bzero(p, sizeof(*p));
369 * We receive a request for objtotal objects, of size objsize each.
370 * Internally we may round up both numbers, as we allocate objects
371 * in small clusters multiple of the page size.
372 * In the allocator we don't need to store the objsize,
373 * but we do need to keep track of objtotal' and clustentries,
374 * as they are needed when freeing memory.
376 * XXX note -- userspace needs the buffers to be contiguous,
377 * so we cannot afford gaps at the end of a cluster.
379 static struct netmap_obj_pool *
380 netmap_new_obj_allocator(const char *name, u_int objtotal, u_int objsize)
382 struct netmap_obj_pool *p;
384 u_int clustsize; /* the cluster size, multiple of page size */
385 u_int clustentries; /* how many objects per entry */
387 #define MAX_CLUSTSIZE (1<<17)
388 #define LINE_ROUND 64
389 if (objsize >= MAX_CLUSTSIZE) {
390 /* we could do it but there is no point */
391 D("unsupported allocation for %d bytes", objsize);
394 /* make sure objsize is a multiple of LINE_ROUND */
395 i = (objsize & (LINE_ROUND - 1));
397 D("XXX aligning object by %d bytes", LINE_ROUND - i);
398 objsize += LINE_ROUND - i;
401 * Compute number of objects using a brute-force approach:
402 * given a max cluster size,
403 * we try to fill it with objects keeping track of the
404 * wasted space to the next page boundary.
406 for (clustentries = 0, i = 1;; i++) {
407 u_int delta, used = i * objsize;
408 if (used > MAX_CLUSTSIZE)
410 delta = used % PAGE_SIZE;
411 if (delta == 0) { // exact solution
415 if (delta > ( (clustentries*objsize) % PAGE_SIZE) )
418 // D("XXX --- ouch, delta %d (bad for buffers)", delta);
419 /* compute clustsize and round to the next page */
420 clustsize = clustentries * objsize;
421 i = (clustsize & (PAGE_SIZE - 1));
423 clustsize += PAGE_SIZE - i;
424 D("objsize %d clustsize %d objects %d",
425 objsize, clustsize, clustentries);
427 p = malloc(sizeof(struct netmap_obj_pool), M_NETMAP,
430 D("Unable to create '%s' allocator", name);
434 * Allocate and initialize the lookup table.
436 * The number of clusters is n = ceil(objtotal/clustentries)
437 * objtotal' = n * clustentries
439 strncpy(p->name, name, sizeof(p->name));
440 p->clustentries = clustentries;
441 p->_clustsize = clustsize;
442 n = (objtotal + clustentries - 1) / clustentries;
444 p->objtotal = n * clustentries;
445 p->objfree = p->objtotal - 2; /* obj 0 and 1 are reserved */
446 p->_objsize = objsize;
447 p->_memtotal = p->_numclusters * p->_clustsize;
449 p->lut = malloc(sizeof(struct lut_entry) * p->objtotal,
450 M_NETMAP, M_WAITOK | M_ZERO);
451 if (p->lut == NULL) {
452 D("Unable to create lookup table for '%s' allocator", name);
456 /* Allocate the bitmap */
457 n = (p->objtotal + 31) / 32;
458 p->bitmap = malloc(sizeof(uint32_t) * n, M_NETMAP, M_WAITOK | M_ZERO);
459 if (p->bitmap == NULL) {
460 D("Unable to create bitmap (%d entries) for allocator '%s'", n,
466 * Allocate clusters, init pointers and bitmap
468 for (i = 0; i < p->objtotal;) {
469 int lim = i + clustentries;
472 clust = contigmalloc(clustsize, M_NETMAP, M_WAITOK | M_ZERO,
473 0, -1UL, PAGE_SIZE, 0);
476 * If we get here, there is a severe memory shortage,
477 * so halve the allocated memory to reclaim some.
479 D("Unable to create cluster at %d for '%s' allocator",
482 for (; i >= lim; i--) {
483 p->bitmap[ (i>>5) ] &= ~( 1 << (i & 31) );
484 if (i % clustentries == 0 && p->lut[i].vaddr)
485 contigfree(p->lut[i].vaddr,
486 p->_clustsize, M_NETMAP);
489 p->objfree = p->objtotal - 2;
490 p->_numclusters = i / clustentries;
491 p->_memtotal = p->_numclusters * p->_clustsize;
494 for (; i < lim; i++, clust += objsize) {
495 p->bitmap[ (i>>5) ] |= ( 1 << (i & 31) );
496 p->lut[i].vaddr = clust;
497 p->lut[i].paddr = vtophys(clust);
500 p->bitmap[0] = ~3; /* objs 0 and 1 is always busy */
501 D("Pre-allocated %d clusters (%d/%dKB) for '%s'",
502 p->_numclusters, p->_clustsize >> 10,
503 p->_memtotal >> 10, name);
508 netmap_destroy_obj_allocator(p);
513 netmap_memory_init(void)
515 struct netmap_obj_pool *p;
517 nm_mem = malloc(sizeof(struct netmap_mem_d), M_NETMAP,
522 p = netmap_new_obj_allocator("netmap_if",
523 NETMAP_IF_MAX_NUM, NETMAP_IF_MAX_SIZE);
526 nm_mem->nm_if_pool = p;
528 p = netmap_new_obj_allocator("netmap_ring",
529 NETMAP_RING_MAX_NUM, NETMAP_RING_MAX_SIZE);
532 nm_mem->nm_ring_pool = p;
534 p = netmap_new_obj_allocator("netmap_buf",
535 NETMAP_BUF_MAX_NUM, NETMAP_BUF_SIZE);
538 netmap_total_buffers = p->objtotal;
539 netmap_buffer_lut = p->lut;
540 nm_mem->nm_buf_pool = p;
541 netmap_buffer_base = p->lut[0].vaddr;
543 mtx_init(&nm_mem->nm_mtx, "netmap memory allocator lock", NULL,
545 nm_mem->nm_totalsize =
546 nm_mem->nm_if_pool->_memtotal +
547 nm_mem->nm_ring_pool->_memtotal +
548 nm_mem->nm_buf_pool->_memtotal;
550 D("Have %d KB for interfaces, %d KB for rings and %d MB for buffers",
551 nm_mem->nm_if_pool->_memtotal >> 10,
552 nm_mem->nm_ring_pool->_memtotal >> 10,
553 nm_mem->nm_buf_pool->_memtotal >> 20);
558 netmap_destroy_obj_allocator(nm_mem->nm_ring_pool);
559 netmap_destroy_obj_allocator(nm_mem->nm_if_pool);
560 free(nm_mem, M_NETMAP);
567 netmap_memory_fini(void)
571 netmap_destroy_obj_allocator(nm_mem->nm_if_pool);
572 netmap_destroy_obj_allocator(nm_mem->nm_ring_pool);
573 netmap_destroy_obj_allocator(nm_mem->nm_buf_pool);
574 mtx_destroy(&nm_mem->nm_mtx);
575 free(nm_mem, M_NETMAP);
581 netmap_if_new(const char *ifname, struct netmap_adapter *na)
583 struct netmap_if *nifp;
584 struct netmap_ring *ring;
585 ssize_t base; /* handy for relative offsets between rings and nifp */
587 u_int ntx = na->num_tx_rings + 1; /* shorthand, include stack ring */
588 u_int nrx = na->num_rx_rings + 1; /* shorthand, include stack ring */
589 struct netmap_kring *kring;
593 * the descriptor is followed inline by an array of offsets
594 * to the tx and rx rings in the shared memory region.
596 len = sizeof(struct netmap_if) + (nrx + ntx) * sizeof(ssize_t);
597 nifp = netmap_if_malloc(len);
603 /* initialize base fields -- override const */
604 *(int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings;
605 *(int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings;
606 strncpy(nifp->ni_name, ifname, IFNAMSIZ);
608 (na->refcount)++; /* XXX atomic ? we are under lock */
609 if (na->refcount > 1) { /* already setup, we are done */
615 * First instance, allocate netmap rings and buffers for this card
616 * The rings are contiguous, but have variable size.
618 for (i = 0; i < ntx; i++) { /* Transmit rings */
619 kring = &na->tx_rings[i];
620 ndesc = na->num_tx_desc;
621 bzero(kring, sizeof(*kring));
622 len = sizeof(struct netmap_ring) +
623 ndesc * sizeof(struct netmap_slot);
624 ring = netmap_ring_malloc(len);
626 D("Cannot allocate tx_ring[%d] for %s", i, ifname);
629 ND("txring[%d] at %p ofs %d", i, ring);
632 *(int *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = ndesc;
633 *(ssize_t *)(uintptr_t)&ring->buf_ofs =
634 (nm_mem->nm_if_pool->_memtotal +
635 nm_mem->nm_ring_pool->_memtotal) -
636 netmap_ring_offset(ring);
640 * Always keep one slot empty, so we can detect new
641 * transmissions comparing cur and nr_hwcur (they are
642 * the same only if there are no new transmissions).
644 ring->avail = kring->nr_hwavail = ndesc - 1;
645 ring->cur = kring->nr_hwcur = 0;
646 *(int *)(uintptr_t)&ring->nr_buf_size = NETMAP_BUF_SIZE;
647 ND("initializing slots for txring[%d]", i);
648 netmap_new_bufs(nifp, ring->slot, ndesc);
651 for (i = 0; i < nrx; i++) { /* Receive rings */
652 kring = &na->rx_rings[i];
653 ndesc = na->num_rx_desc;
654 bzero(kring, sizeof(*kring));
655 len = sizeof(struct netmap_ring) +
656 ndesc * sizeof(struct netmap_slot);
657 ring = netmap_ring_malloc(len);
659 D("Cannot allocate rx_ring[%d] for %s", i, ifname);
662 ND("rxring[%d] at %p ofs %d", i, ring);
666 *(int *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = ndesc;
667 *(ssize_t *)(uintptr_t)&ring->buf_ofs =
668 (nm_mem->nm_if_pool->_memtotal +
669 nm_mem->nm_ring_pool->_memtotal) -
670 netmap_ring_offset(ring);
672 ring->cur = kring->nr_hwcur = 0;
673 ring->avail = kring->nr_hwavail = 0; /* empty */
674 *(int *)(uintptr_t)&ring->nr_buf_size = NETMAP_BUF_SIZE;
675 ND("initializing slots for rxring[%d]", i);
676 netmap_new_bufs(nifp, ring->slot, ndesc);
680 // XXX initialize the selrecord structs.
681 for (i = 0; i < ntx; i++)
682 init_waitqueue_head(&na->rx_rings[i].si);
683 for (i = 0; i < nrx; i++)
684 init_waitqueue_head(&na->tx_rings[i].si);
685 init_waitqueue_head(&na->rx_si);
686 init_waitqueue_head(&na->tx_si);
690 * fill the slots for the rx and tx rings. They contain the offset
691 * between the ring and nifp, so the information is usable in
692 * userspace to reach the ring from the nifp.
694 base = netmap_if_offset(nifp);
695 for (i = 0; i < ntx; i++) {
696 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] =
697 netmap_ring_offset(na->tx_rings[i].ring) - base;
699 for (i = 0; i < nrx; i++) {
700 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+ntx] =
701 netmap_ring_offset(na->rx_rings[i].ring) - base;
711 netmap_free_rings(struct netmap_adapter *na)
714 for (i = 0; i < na->num_tx_rings + 1; i++)
715 netmap_obj_free_va(nm_mem->nm_ring_pool,
716 na->tx_rings[i].ring);
717 for (i = 0; i < na->num_rx_rings + 1; i++)
718 netmap_obj_free_va(nm_mem->nm_ring_pool,
719 na->rx_rings[i].ring);