2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2012-2014 Matteo Landi
5 * Copyright (C) 2012-2016 Luigi Rizzo
6 * Copyright (C) 2012-2016 Giuseppe Lettieri
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 #endif /* __APPLE__ */
40 #include <sys/cdefs.h> /* prerequisite */
41 __FBSDID("$FreeBSD$");
43 #include <sys/types.h>
44 #include <sys/malloc.h>
45 #include <sys/kernel.h> /* MALLOC_DEFINE */
47 #include <vm/vm.h> /* vtophys */
48 #include <vm/pmap.h> /* vtophys */
49 #include <sys/socket.h> /* sockaddrs */
50 #include <sys/selinfo.h>
51 #include <sys/sysctl.h>
53 #include <net/if_var.h>
55 #include <machine/bus.h> /* bus_dmamap_* */
57 /* M_NETMAP only used in here */
58 MALLOC_DECLARE(M_NETMAP);
59 MALLOC_DEFINE(M_NETMAP, "netmap", "Network memory map");
61 #endif /* __FreeBSD__ */
67 #include <net/netmap.h>
68 #include <dev/netmap/netmap_kern.h>
69 #include <net/netmap_virt.h>
70 #include "netmap_mem2.h"
72 #ifdef _WIN32_USE_SMALL_GENERIC_DEVICES_MEMORY
73 #define NETMAP_BUF_MAX_NUM 8*4096 /* if too big takes too much time to allocate */
75 #define NETMAP_BUF_MAX_NUM 20*4096*2 /* large machine */
78 #define NETMAP_POOL_MAX_NAMSZ 32
89 struct netmap_obj_params {
97 struct netmap_obj_pool {
98 char name[NETMAP_POOL_MAX_NAMSZ]; /* name of the allocator */
100 /* ---------------------------------------------------*/
101 /* these are only meaningful if the pool is finalized */
102 /* (see 'finalized' field in netmap_mem_d) */
103 size_t memtotal; /* actual total memory space */
105 struct lut_entry *lut; /* virt,phys addresses, objtotal entries */
106 uint32_t *bitmap; /* one bit per buffer, 1 means free */
107 uint32_t *invalid_bitmap;/* one bit per buffer, 1 means invalid */
108 uint32_t bitmap_slots; /* number of uint32 entries in bitmap */
110 u_int objtotal; /* actual total number of objects. */
111 u_int numclusters; /* actual number of clusters */
112 u_int objfree; /* number of free objects. */
114 int alloc_done; /* we have allocated the memory */
115 /* ---------------------------------------------------*/
118 u_int objminsize; /* minimum object size */
119 u_int objmaxsize; /* maximum object size */
120 u_int nummin; /* minimum number of objects */
121 u_int nummax; /* maximum number of objects */
123 /* these are changed only by config */
124 u_int _objtotal; /* total number of objects */
125 u_int _objsize; /* object size */
126 u_int _clustsize; /* cluster size */
127 u_int _clustentries; /* objects per cluster */
128 u_int _numclusters; /* number of clusters */
130 /* requested values */
135 #define NMA_LOCK_T NM_MTX_T
136 #define NMA_LOCK_INIT(n) NM_MTX_INIT((n)->nm_mtx)
137 #define NMA_LOCK_DESTROY(n) NM_MTX_DESTROY((n)->nm_mtx)
138 #define NMA_LOCK(n) NM_MTX_LOCK((n)->nm_mtx)
139 #define NMA_SPINLOCK(n) NM_MTX_SPINLOCK((n)->nm_mtx)
140 #define NMA_UNLOCK(n) NM_MTX_UNLOCK((n)->nm_mtx)
142 struct netmap_mem_ops {
143 int (*nmd_get_lut)(struct netmap_mem_d *, struct netmap_lut*);
144 int (*nmd_get_info)(struct netmap_mem_d *, uint64_t *size,
145 u_int *memflags, uint16_t *id);
147 vm_paddr_t (*nmd_ofstophys)(struct netmap_mem_d *, vm_ooffset_t);
148 int (*nmd_config)(struct netmap_mem_d *);
149 int (*nmd_finalize)(struct netmap_mem_d *);
150 void (*nmd_deref)(struct netmap_mem_d *);
151 ssize_t (*nmd_if_offset)(struct netmap_mem_d *, const void *vaddr);
152 void (*nmd_delete)(struct netmap_mem_d *);
154 struct netmap_if * (*nmd_if_new)(struct netmap_adapter *,
155 struct netmap_priv_d *);
156 void (*nmd_if_delete)(struct netmap_adapter *, struct netmap_if *);
157 int (*nmd_rings_create)(struct netmap_adapter *);
158 void (*nmd_rings_delete)(struct netmap_adapter *);
161 struct netmap_mem_d {
162 NMA_LOCK_T nm_mtx; /* protect the allocator */
163 size_t nm_totalsize; /* shorthand */
166 #define NETMAP_MEM_FINALIZED 0x1 /* preallocation done */
167 #define NETMAP_MEM_HIDDEN 0x8 /* beeing prepared */
168 int lasterr; /* last error for curr config */
169 int active; /* active users */
171 /* the three allocators */
172 struct netmap_obj_pool pools[NETMAP_POOLS_NR];
174 nm_memid_t nm_id; /* allocator identifier */
175 int nm_grp; /* iommu groupd id */
177 /* list of all existing allocators, sorted by nm_id */
178 struct netmap_mem_d *prev, *next;
180 struct netmap_mem_ops *ops;
182 struct netmap_obj_params params[NETMAP_POOLS_NR];
184 #define NM_MEM_NAMESZ 16
185 char name[NM_MEM_NAMESZ];
189 netmap_mem_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut)
194 rv = nmd->ops->nmd_get_lut(nmd, lut);
201 netmap_mem_get_info(struct netmap_mem_d *nmd, uint64_t *size,
202 u_int *memflags, nm_memid_t *memid)
207 rv = nmd->ops->nmd_get_info(nmd, size, memflags, memid);
214 netmap_mem_ofstophys(struct netmap_mem_d *nmd, vm_ooffset_t off)
218 #if defined(__FreeBSD__)
219 /* This function is called by netmap_dev_pager_fault(), which holds a
220 * non-sleepable lock since FreeBSD 12. Since we cannot sleep, we
221 * spin on the trylock. */
226 pa = nmd->ops->nmd_ofstophys(nmd, off);
233 netmap_mem_config(struct netmap_mem_d *nmd)
236 /* already in use. Not fatal, but we
237 * cannot change the configuration
242 return nmd->ops->nmd_config(nmd);
246 netmap_mem_if_offset(struct netmap_mem_d *nmd, const void *off)
251 rv = nmd->ops->nmd_if_offset(nmd, off);
258 netmap_mem_delete(struct netmap_mem_d *nmd)
260 nmd->ops->nmd_delete(nmd);
264 netmap_mem_if_new(struct netmap_adapter *na, struct netmap_priv_d *priv)
266 struct netmap_if *nifp;
267 struct netmap_mem_d *nmd = na->nm_mem;
270 nifp = nmd->ops->nmd_if_new(na, priv);
277 netmap_mem_if_delete(struct netmap_adapter *na, struct netmap_if *nif)
279 struct netmap_mem_d *nmd = na->nm_mem;
282 nmd->ops->nmd_if_delete(na, nif);
287 netmap_mem_rings_create(struct netmap_adapter *na)
290 struct netmap_mem_d *nmd = na->nm_mem;
293 rv = nmd->ops->nmd_rings_create(na);
300 netmap_mem_rings_delete(struct netmap_adapter *na)
302 struct netmap_mem_d *nmd = na->nm_mem;
305 nmd->ops->nmd_rings_delete(na);
309 static int netmap_mem_map(struct netmap_obj_pool *, struct netmap_adapter *);
310 static int netmap_mem_unmap(struct netmap_obj_pool *, struct netmap_adapter *);
311 static int nm_mem_assign_group(struct netmap_mem_d *, struct device *);
312 static void nm_mem_release_id(struct netmap_mem_d *);
315 netmap_mem_get_id(struct netmap_mem_d *nmd)
320 #ifdef NM_DEBUG_MEM_PUTGET
321 #define NM_DBG_REFC(nmd, func, line) \
322 nm_prinf("%d mem[%d] -> %d", line, (nmd)->nm_id, (nmd)->refcount);
324 #define NM_DBG_REFC(nmd, func, line)
327 /* circular list of all existing allocators */
328 static struct netmap_mem_d *netmap_last_mem_d = &nm_mem;
329 NM_MTX_T nm_mem_list_lock;
331 struct netmap_mem_d *
332 __netmap_mem_get(struct netmap_mem_d *nmd, const char *func, int line)
334 NM_MTX_LOCK(nm_mem_list_lock);
336 NM_DBG_REFC(nmd, func, line);
337 NM_MTX_UNLOCK(nm_mem_list_lock);
342 __netmap_mem_put(struct netmap_mem_d *nmd, const char *func, int line)
345 NM_MTX_LOCK(nm_mem_list_lock);
346 last = (--nmd->refcount == 0);
348 nm_mem_release_id(nmd);
349 NM_DBG_REFC(nmd, func, line);
350 NM_MTX_UNLOCK(nm_mem_list_lock);
352 netmap_mem_delete(nmd);
356 netmap_mem_finalize(struct netmap_mem_d *nmd, struct netmap_adapter *na)
359 if (nm_mem_assign_group(nmd, na->pdev) < 0) {
365 if (netmap_mem_config(nmd))
370 nmd->lasterr = nmd->ops->nmd_finalize(nmd);
372 if (!nmd->lasterr && na->pdev) {
373 nmd->lasterr = netmap_mem_map(&nmd->pools[NETMAP_BUF_POOL], na);
377 lasterr = nmd->lasterr;
381 netmap_mem_deref(nmd, na);
387 nm_isset(uint32_t *bitmap, u_int i)
389 return bitmap[ (i>>5) ] & ( 1U << (i & 31U) );
394 netmap_init_obj_allocator_bitmap(struct netmap_obj_pool *p)
398 if (p->bitmap == NULL) {
399 /* Allocate the bitmap */
400 n = (p->objtotal + 31) / 32;
401 p->bitmap = nm_os_malloc(sizeof(p->bitmap[0]) * n);
402 if (p->bitmap == NULL) {
403 nm_prerr("Unable to create bitmap (%d entries) for allocator '%s'", (int)n,
409 memset(p->bitmap, 0, p->bitmap_slots * sizeof(p->bitmap[0]));
414 * Set all the bits in the bitmap that have
415 * corresponding buffers to 1 to indicate they are
418 for (j = 0; j < p->objtotal; j++) {
419 if (p->invalid_bitmap && nm_isset(p->invalid_bitmap, j)) {
420 if (netmap_debug & NM_DEBUG_MEM)
421 nm_prinf("skipping %s %d", p->name, j);
424 p->bitmap[ (j>>5) ] |= ( 1U << (j & 31U) );
429 nm_prinf("%s free %u", p->name, p->objfree);
430 if (p->objfree == 0) {
432 nm_prerr("%s: no objects available", p->name);
440 netmap_mem_init_bitmaps(struct netmap_mem_d *nmd)
444 for (i = 0; i < NETMAP_POOLS_NR; i++) {
445 struct netmap_obj_pool *p = &nmd->pools[i];
447 error = netmap_init_obj_allocator_bitmap(p);
453 * buffers 0 and 1 are reserved
455 if (nmd->pools[NETMAP_BUF_POOL].objfree < 2) {
456 nm_prerr("%s: not enough buffers", nmd->pools[NETMAP_BUF_POOL].name);
460 nmd->pools[NETMAP_BUF_POOL].objfree -= 2;
461 if (nmd->pools[NETMAP_BUF_POOL].bitmap) {
462 /* XXX This check is a workaround that prevents a
463 * NULL pointer crash which currently happens only
464 * with ptnetmap guests.
465 * Removed shared-info --> is the bug still there? */
466 nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3U;
472 netmap_mem_deref(struct netmap_mem_d *nmd, struct netmap_adapter *na)
476 if (na->active_fds <= 0)
477 netmap_mem_unmap(&nmd->pools[NETMAP_BUF_POOL], na);
478 if (nmd->active == 1) {
481 * Reset the allocator when it falls out of use so that any
482 * pool resources leaked by unclean application exits are
485 netmap_mem_init_bitmaps(nmd);
487 nmd->ops->nmd_deref(nmd);
500 /* accessor functions */
502 netmap_mem2_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut)
504 lut->lut = nmd->pools[NETMAP_BUF_POOL].lut;
506 lut->plut = lut->lut;
508 lut->objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal;
509 lut->objsize = nmd->pools[NETMAP_BUF_POOL]._objsize;
514 static struct netmap_obj_params netmap_min_priv_params[NETMAP_POOLS_NR] = {
519 [NETMAP_RING_POOL] = {
523 [NETMAP_BUF_POOL] = {
531 * nm_mem is the memory allocator used for all physical interfaces
532 * running in netmap mode.
533 * Virtual (VALE) ports will have each its own allocator.
535 extern struct netmap_mem_ops netmap_mem_global_ops; /* forward */
536 struct netmap_mem_d nm_mem = { /* Our memory allocator. */
540 .objminsize = sizeof(struct netmap_if),
542 .nummin = 10, /* don't be stingy */
543 .nummax = 10000, /* XXX very large */
545 [NETMAP_RING_POOL] = {
546 .name = "netmap_ring",
547 .objminsize = sizeof(struct netmap_ring),
548 .objmaxsize = 32*PAGE_SIZE,
552 [NETMAP_BUF_POOL] = {
553 .name = "netmap_buf",
557 .nummax = 1000000, /* one million! */
566 [NETMAP_RING_POOL] = {
570 [NETMAP_BUF_POOL] = {
572 .num = NETMAP_BUF_MAX_NUM,
582 .ops = &netmap_mem_global_ops,
588 /* blueprint for the private memory allocators */
589 /* XXX clang is not happy about using name as a print format */
590 static const struct netmap_mem_d nm_blueprint = {
594 .objminsize = sizeof(struct netmap_if),
599 [NETMAP_RING_POOL] = {
601 .objminsize = sizeof(struct netmap_ring),
602 .objmaxsize = 32*PAGE_SIZE,
606 [NETMAP_BUF_POOL] = {
611 .nummax = 1000000, /* one million! */
617 .flags = NETMAP_MEM_PRIVATE,
619 .ops = &netmap_mem_global_ops,
622 /* memory allocator related sysctls */
624 #define STRINGIFY(x) #x
627 #define DECLARE_SYSCTLS(id, name) \
628 SYSBEGIN(mem2_ ## name); \
629 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \
630 CTLFLAG_RW, &nm_mem.params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \
631 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \
632 CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \
633 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \
634 CTLFLAG_RW, &nm_mem.params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \
635 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \
636 CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s"); \
637 SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_size, \
638 CTLFLAG_RW, &netmap_min_priv_params[id].size, 0, \
639 "Default size of private netmap " STRINGIFY(name) "s"); \
640 SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_num, \
641 CTLFLAG_RW, &netmap_min_priv_params[id].num, 0, \
642 "Default number of private netmap " STRINGIFY(name) "s"); \
645 SYSCTL_DECL(_dev_netmap);
646 DECLARE_SYSCTLS(NETMAP_IF_POOL, if);
647 DECLARE_SYSCTLS(NETMAP_RING_POOL, ring);
648 DECLARE_SYSCTLS(NETMAP_BUF_POOL, buf);
650 /* call with nm_mem_list_lock held */
652 nm_mem_assign_id_locked(struct netmap_mem_d *nmd)
655 struct netmap_mem_d *scan = netmap_last_mem_d;
659 /* we rely on unsigned wrap around */
660 id = scan->nm_id + 1;
661 if (id == 0) /* reserve 0 as error value */
664 if (id != scan->nm_id) {
666 nmd->prev = scan->prev;
668 scan->prev->next = nmd;
670 netmap_last_mem_d = nmd;
672 NM_DBG_REFC(nmd, __FUNCTION__, __LINE__);
676 } while (scan != netmap_last_mem_d);
681 /* call with nm_mem_list_lock *not* held */
683 nm_mem_assign_id(struct netmap_mem_d *nmd)
687 NM_MTX_LOCK(nm_mem_list_lock);
688 ret = nm_mem_assign_id_locked(nmd);
689 NM_MTX_UNLOCK(nm_mem_list_lock);
694 /* call with nm_mem_list_lock held */
696 nm_mem_release_id(struct netmap_mem_d *nmd)
698 nmd->prev->next = nmd->next;
699 nmd->next->prev = nmd->prev;
701 if (netmap_last_mem_d == nmd)
702 netmap_last_mem_d = nmd->prev;
704 nmd->prev = nmd->next = NULL;
707 struct netmap_mem_d *
708 netmap_mem_find(nm_memid_t id)
710 struct netmap_mem_d *nmd;
712 NM_MTX_LOCK(nm_mem_list_lock);
713 nmd = netmap_last_mem_d;
715 if (!(nmd->flags & NETMAP_MEM_HIDDEN) && nmd->nm_id == id) {
717 NM_DBG_REFC(nmd, __FUNCTION__, __LINE__);
718 NM_MTX_UNLOCK(nm_mem_list_lock);
722 } while (nmd != netmap_last_mem_d);
723 NM_MTX_UNLOCK(nm_mem_list_lock);
728 nm_mem_assign_group(struct netmap_mem_d *nmd, struct device *dev)
731 id = nm_iommu_group_id(dev);
732 if (netmap_debug & NM_DEBUG_MEM)
733 nm_prinf("iommu_group %d", id);
740 if (nmd->nm_grp != id) {
742 nm_prerr("iommu group mismatch: %u vs %u",
744 nmd->lasterr = err = ENOMEM;
751 static struct lut_entry *
752 nm_alloc_lut(u_int nobj)
754 size_t n = sizeof(struct lut_entry) * nobj;
755 struct lut_entry *lut;
759 lut = nm_os_malloc(n);
765 nm_free_lut(struct lut_entry *lut, u_int objtotal)
767 bzero(lut, sizeof(struct lut_entry) * objtotal);
775 #if defined(linux) || defined(_WIN32)
776 static struct plut_entry *
777 nm_alloc_plut(u_int nobj)
779 size_t n = sizeof(struct plut_entry) * nobj;
780 struct plut_entry *lut;
786 nm_free_plut(struct plut_entry * lut)
790 #endif /* linux or _WIN32 */
794 * First, find the allocator that contains the requested offset,
795 * then locate the cluster through a lookup table.
798 netmap_mem2_ofstophys(struct netmap_mem_d* nmd, vm_ooffset_t offset)
801 vm_ooffset_t o = offset;
803 struct netmap_obj_pool *p;
807 for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i].memtotal, i++) {
808 if (offset >= p[i].memtotal)
810 // now lookup the cluster's address
812 pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr) +
813 offset % p[i]._objsize;
815 pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr);
816 pa.QuadPart += offset % p[i]._objsize;
820 /* this is only in case of errors */
821 nm_prerr("invalid ofs 0x%x out of 0x%zx 0x%zx 0x%zx", (u_int)o,
822 p[NETMAP_IF_POOL].memtotal,
823 p[NETMAP_IF_POOL].memtotal
824 + p[NETMAP_RING_POOL].memtotal,
825 p[NETMAP_IF_POOL].memtotal
826 + p[NETMAP_RING_POOL].memtotal
827 + p[NETMAP_BUF_POOL].memtotal);
829 return 0; /* bad address */
840 * win32_build_virtual_memory_for_userspace
842 * This function get all the object making part of the pools and maps
843 * a contiguous virtual memory space for the userspace
845 * 1 - allocate a Memory Descriptor List wide as the sum
846 * of the memory needed for the pools
847 * 2 - cycle all the objects in every pool and for every object do
849 * 2a - cycle all the objects in every pool, get the list
850 * of the physical address descriptors
851 * 2b - calculate the offset in the array of pages desciptor in the
853 * 2c - copy the descriptors of the object in the main MDL
855 * 3 - return the resulting MDL that needs to be mapped in userland
857 * In this way we will have an MDL that describes all the memory for the
858 * objects in a single object
862 win32_build_user_vm_map(struct netmap_mem_d* nmd)
864 u_int memflags, ofs = 0;
865 PMDL mainMdl, tempMdl;
869 if (netmap_mem_get_info(nmd, &memsize, &memflags, NULL)) {
870 nm_prerr("memory not finalised yet");
874 mainMdl = IoAllocateMdl(NULL, memsize, FALSE, FALSE, NULL);
875 if (mainMdl == NULL) {
876 nm_prerr("failed to allocate mdl");
881 for (i = 0; i < NETMAP_POOLS_NR; i++) {
882 struct netmap_obj_pool *p = &nmd->pools[i];
883 int clsz = p->_clustsize;
884 int clobjs = p->_clustentries; /* objects per cluster */
885 int mdl_len = sizeof(PFN_NUMBER) * BYTES_TO_PAGES(clsz);
886 PPFN_NUMBER pSrc, pDst;
888 /* each pool has a different cluster size so we need to reallocate */
889 tempMdl = IoAllocateMdl(p->lut[0].vaddr, clsz, FALSE, FALSE, NULL);
890 if (tempMdl == NULL) {
892 nm_prerr("fail to allocate tempMdl");
896 pSrc = MmGetMdlPfnArray(tempMdl);
897 /* create one entry per cluster, the lut[] has one entry per object */
898 for (j = 0; j < p->numclusters; j++, ofs += clsz) {
899 pDst = &MmGetMdlPfnArray(mainMdl)[BYTES_TO_PAGES(ofs)];
900 MmInitializeMdl(tempMdl, p->lut[j*clobjs].vaddr, clsz);
901 MmBuildMdlForNonPagedPool(tempMdl); /* compute physical page addresses */
902 RtlCopyMemory(pDst, pSrc, mdl_len); /* copy the page descriptors */
903 mainMdl->MdlFlags = tempMdl->MdlFlags; /* XXX what is in here ? */
914 * helper function for OS-specific mmap routines (currently only windows).
915 * Given an nmd and a pool index, returns the cluster size and number of clusters.
916 * Returns 0 if memory is finalised and the pool is valid, otherwise 1.
917 * It should be called under NMA_LOCK(nmd) otherwise the underlying info can change.
921 netmap_mem2_get_pool_info(struct netmap_mem_d* nmd, u_int pool, u_int *clustsize, u_int *numclusters)
923 if (!nmd || !clustsize || !numclusters || pool >= NETMAP_POOLS_NR)
924 return 1; /* invalid arguments */
925 // NMA_LOCK_ASSERT(nmd);
926 if (!(nmd->flags & NETMAP_MEM_FINALIZED)) {
927 *clustsize = *numclusters = 0;
928 return 1; /* not ready yet */
930 *clustsize = nmd->pools[pool]._clustsize;
931 *numclusters = nmd->pools[pool].numclusters;
932 return 0; /* success */
936 netmap_mem2_get_info(struct netmap_mem_d* nmd, uint64_t* size,
937 u_int *memflags, nm_memid_t *id)
940 error = netmap_mem_config(nmd);
944 if (nmd->flags & NETMAP_MEM_FINALIZED) {
945 *size = nmd->nm_totalsize;
949 for (i = 0; i < NETMAP_POOLS_NR; i++) {
950 struct netmap_obj_pool *p = nmd->pools + i;
951 *size += ((size_t)p->_numclusters * (size_t)p->_clustsize);
956 *memflags = nmd->flags;
964 * we store objects by kernel address, need to find the offset
965 * within the pool to export the value to userspace.
966 * Algorithm: scan until we find the cluster, then add the
967 * actual offset in the cluster
970 netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr)
972 int i, k = p->_clustentries, n = p->objtotal;
975 for (i = 0; i < n; i += k, ofs += p->_clustsize) {
976 const char *base = p->lut[i].vaddr;
977 ssize_t relofs = (const char *) vaddr - base;
979 if (relofs < 0 || relofs >= p->_clustsize)
983 nm_prdis("%s: return offset %d (cluster %d) for pointer %p",
984 p->name, ofs, i, vaddr);
987 nm_prerr("address %p is not contained inside any cluster (%s)",
989 return 0; /* An error occurred */
992 /* Helper functions which convert virtual addresses to offsets */
993 #define netmap_if_offset(n, v) \
994 netmap_obj_offset(&(n)->pools[NETMAP_IF_POOL], (v))
996 #define netmap_ring_offset(n, v) \
997 ((n)->pools[NETMAP_IF_POOL].memtotal + \
998 netmap_obj_offset(&(n)->pools[NETMAP_RING_POOL], (v)))
1001 netmap_mem2_if_offset(struct netmap_mem_d *nmd, const void *addr)
1003 return netmap_if_offset(nmd, addr);
1007 * report the index, and use start position as a hint,
1008 * otherwise buffer allocation becomes terribly expensive.
1011 netmap_obj_malloc(struct netmap_obj_pool *p, u_int len, uint32_t *start, uint32_t *index)
1013 uint32_t i = 0; /* index in the bitmap */
1014 uint32_t mask, j = 0; /* slot counter */
1017 if (len > p->_objsize) {
1018 nm_prerr("%s request size %d too large", p->name, len);
1022 if (p->objfree == 0) {
1023 nm_prerr("no more %s objects", p->name);
1029 /* termination is guaranteed by p->free, but better check bounds on i */
1030 while (vaddr == NULL && i < p->bitmap_slots) {
1031 uint32_t cur = p->bitmap[i];
1032 if (cur == 0) { /* bitmask is fully used */
1037 for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1)
1040 p->bitmap[i] &= ~mask; /* mark object as in use */
1043 vaddr = p->lut[i * 32 + j].vaddr;
1045 *index = i * 32 + j;
1047 nm_prdis("%s allocator: allocated object @ [%d][%d]: vaddr %p",p->name, i, j, vaddr);
1056 * free by index, not by address.
1057 * XXX should we also cleanup the content ?
1060 netmap_obj_free(struct netmap_obj_pool *p, uint32_t j)
1062 uint32_t *ptr, mask;
1064 if (j >= p->objtotal) {
1065 nm_prerr("invalid index %u, max %u", j, p->objtotal);
1068 ptr = &p->bitmap[j / 32];
1069 mask = (1 << (j % 32));
1071 nm_prerr("ouch, double free on buffer %d", j);
1081 * free by address. This is slow but is only used for a few
1082 * objects (rings, nifp)
1085 netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr)
1087 u_int i, j, n = p->numclusters;
1089 for (i = 0, j = 0; i < n; i++, j += p->_clustentries) {
1090 void *base = p->lut[i * p->_clustentries].vaddr;
1091 ssize_t relofs = (ssize_t) vaddr - (ssize_t) base;
1093 /* Given address, is out of the scope of the current cluster.*/
1094 if (base == NULL || vaddr < base || relofs >= p->_clustsize)
1097 j = j + relofs / p->_objsize;
1098 /* KASSERT(j != 0, ("Cannot free object 0")); */
1099 netmap_obj_free(p, j);
1102 nm_prerr("address %p is not contained inside any cluster (%s)",
1107 netmap_mem_bufsize(struct netmap_mem_d *nmd)
1109 return nmd->pools[NETMAP_BUF_POOL]._objsize;
1112 #define netmap_if_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_IF_POOL], len, NULL, NULL)
1113 #define netmap_if_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_IF_POOL], (v))
1114 #define netmap_ring_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_RING_POOL], len, NULL, NULL)
1115 #define netmap_ring_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_RING_POOL], (v))
1116 #define netmap_buf_malloc(n, _pos, _index) \
1117 netmap_obj_malloc(&(n)->pools[NETMAP_BUF_POOL], netmap_mem_bufsize(n), _pos, _index)
1120 #if 0 /* currently unused */
1121 /* Return the index associated to the given packet buffer */
1122 #define netmap_buf_index(n, v) \
1123 (netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)) / NETMAP_BDG_BUF_SIZE(n))
1127 * allocate extra buffers in a linked list.
1128 * returns the actual number.
1131 netmap_extra_alloc(struct netmap_adapter *na, uint32_t *head, uint32_t n)
1133 struct netmap_mem_d *nmd = na->nm_mem;
1134 uint32_t i, pos = 0; /* opaque, scan position in the bitmap */
1138 *head = 0; /* default, 'null' index ie empty list */
1139 for (i = 0 ; i < n; i++) {
1140 uint32_t cur = *head; /* save current head */
1141 uint32_t *p = netmap_buf_malloc(nmd, &pos, head);
1143 nm_prerr("no more buffers after %d of %d", i, n);
1144 *head = cur; /* restore */
1147 nm_prdis(5, "allocate buffer %d -> %d", *head, cur);
1148 *p = cur; /* link to previous head */
1157 netmap_extra_free(struct netmap_adapter *na, uint32_t head)
1159 struct lut_entry *lut = na->na_lut.lut;
1160 struct netmap_mem_d *nmd = na->nm_mem;
1161 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1162 uint32_t i, cur, *buf;
1164 nm_prdis("freeing the extra list");
1165 for (i = 0; head >=2 && head < p->objtotal; i++) {
1167 buf = lut[head].vaddr;
1170 if (netmap_obj_free(p, cur))
1174 nm_prerr("breaking with head %d", head);
1175 if (netmap_debug & NM_DEBUG_MEM)
1176 nm_prinf("freed %d buffers", i);
1180 /* Return nonzero on error */
1182 netmap_new_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n)
1184 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1185 u_int i = 0; /* slot counter */
1186 uint32_t pos = 0; /* slot in p->bitmap */
1187 uint32_t index = 0; /* buffer index */
1189 for (i = 0; i < n; i++) {
1190 void *vaddr = netmap_buf_malloc(nmd, &pos, &index);
1191 if (vaddr == NULL) {
1192 nm_prerr("no more buffers after %d of %d", i, n);
1195 slot[i].buf_idx = index;
1196 slot[i].len = p->_objsize;
1201 nm_prdis("%s: allocated %d buffers, %d available, first at %d", p->name, n, p->objfree, pos);
1207 netmap_obj_free(p, slot[i].buf_idx);
1209 bzero(slot, n * sizeof(slot[0]));
1214 netmap_mem_set_ring(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n, uint32_t index)
1216 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1219 for (i = 0; i < n; i++) {
1220 slot[i].buf_idx = index;
1221 slot[i].len = p->_objsize;
1228 netmap_free_buf(struct netmap_mem_d *nmd, uint32_t i)
1230 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1232 if (i < 2 || i >= p->objtotal) {
1233 nm_prerr("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal);
1236 netmap_obj_free(p, i);
1241 netmap_free_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n)
1245 for (i = 0; i < n; i++) {
1246 if (slot[i].buf_idx > 1)
1247 netmap_free_buf(nmd, slot[i].buf_idx);
1249 nm_prdis("%s: released some buffers, available: %u",
1250 p->name, p->objfree);
1254 netmap_reset_obj_allocator(struct netmap_obj_pool *p)
1260 nm_os_free(p->bitmap);
1262 if (p->invalid_bitmap)
1263 nm_os_free(p->invalid_bitmap);
1264 p->invalid_bitmap = NULL;
1265 if (!p->alloc_done) {
1266 /* allocation was done by somebody else.
1267 * Let them clean up after themselves.
1275 * Free each cluster allocated in
1276 * netmap_finalize_obj_allocator(). The cluster start
1277 * addresses are stored at multiples of p->_clusterentries
1280 for (i = 0; i < p->objtotal; i += p->_clustentries) {
1281 contigfree(p->lut[i].vaddr, p->_clustsize, M_NETMAP);
1283 nm_free_lut(p->lut, p->objtotal);
1294 * Free all resources related to an allocator.
1297 netmap_destroy_obj_allocator(struct netmap_obj_pool *p)
1301 netmap_reset_obj_allocator(p);
1305 * We receive a request for objtotal objects, of size objsize each.
1306 * Internally we may round up both numbers, as we allocate objects
1307 * in small clusters multiple of the page size.
1308 * We need to keep track of objtotal and clustentries,
1309 * as they are needed when freeing memory.
1311 * XXX note -- userspace needs the buffers to be contiguous,
1312 * so we cannot afford gaps at the end of a cluster.
1316 /* call with NMA_LOCK held */
1318 netmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize)
1321 u_int clustsize; /* the cluster size, multiple of page size */
1322 u_int clustentries; /* how many objects per entry */
1324 /* we store the current request, so we can
1325 * detect configuration changes later */
1326 p->r_objtotal = objtotal;
1327 p->r_objsize = objsize;
1329 #define MAX_CLUSTSIZE (1<<22) // 4 MB
1330 #define LINE_ROUND NM_CACHE_ALIGN // 64
1331 if (objsize >= MAX_CLUSTSIZE) {
1332 /* we could do it but there is no point */
1333 nm_prerr("unsupported allocation for %d bytes", objsize);
1336 /* make sure objsize is a multiple of LINE_ROUND */
1337 i = (objsize & (LINE_ROUND - 1));
1339 nm_prinf("aligning object by %d bytes", LINE_ROUND - i);
1340 objsize += LINE_ROUND - i;
1342 if (objsize < p->objminsize || objsize > p->objmaxsize) {
1343 nm_prerr("requested objsize %d out of range [%d, %d]",
1344 objsize, p->objminsize, p->objmaxsize);
1347 if (objtotal < p->nummin || objtotal > p->nummax) {
1348 nm_prerr("requested objtotal %d out of range [%d, %d]",
1349 objtotal, p->nummin, p->nummax);
1353 * Compute number of objects using a brute-force approach:
1354 * given a max cluster size,
1355 * we try to fill it with objects keeping track of the
1356 * wasted space to the next page boundary.
1358 for (clustentries = 0, i = 1;; i++) {
1359 u_int delta, used = i * objsize;
1360 if (used > MAX_CLUSTSIZE)
1362 delta = used % PAGE_SIZE;
1363 if (delta == 0) { // exact solution
1368 /* exact solution not found */
1369 if (clustentries == 0) {
1370 nm_prerr("unsupported allocation for %d bytes", objsize);
1373 /* compute clustsize */
1374 clustsize = clustentries * objsize;
1375 if (netmap_debug & NM_DEBUG_MEM)
1376 nm_prinf("objsize %d clustsize %d objects %d",
1377 objsize, clustsize, clustentries);
1380 * The number of clusters is n = ceil(objtotal/clustentries)
1381 * objtotal' = n * clustentries
1383 p->_clustentries = clustentries;
1384 p->_clustsize = clustsize;
1385 p->_numclusters = (objtotal + clustentries - 1) / clustentries;
1387 /* actual values (may be larger than requested) */
1388 p->_objsize = objsize;
1389 p->_objtotal = p->_numclusters * clustentries;
1394 /* call with NMA_LOCK held */
1396 netmap_finalize_obj_allocator(struct netmap_obj_pool *p)
1398 int i; /* must be signed */
1402 /* if the lut is already there we assume that also all the
1403 * clusters have already been allocated, possibily by somebody
1404 * else (e.g., extmem). In the latter case, the alloc_done flag
1405 * will remain at zero, so that we will not attempt to
1406 * deallocate the clusters by ourselves in
1407 * netmap_reset_obj_allocator.
1412 /* optimistically assume we have enough memory */
1413 p->numclusters = p->_numclusters;
1414 p->objtotal = p->_objtotal;
1417 p->lut = nm_alloc_lut(p->objtotal);
1418 if (p->lut == NULL) {
1419 nm_prerr("Unable to create lookup table for '%s'", p->name);
1424 * Allocate clusters, init pointers
1428 for (i = 0; i < (int)p->objtotal;) {
1429 int lim = i + p->_clustentries;
1433 * XXX Note, we only need contigmalloc() for buffers attached
1434 * to native interfaces. In all other cases (nifp, netmap rings
1435 * and even buffers for VALE ports or emulated interfaces) we
1436 * can live with standard malloc, because the hardware will not
1437 * access the pages directly.
1439 clust = contigmalloc(n, M_NETMAP, M_NOWAIT | M_ZERO,
1440 (size_t)0, -1UL, PAGE_SIZE, 0);
1441 if (clust == NULL) {
1443 * If we get here, there is a severe memory shortage,
1444 * so halve the allocated memory to reclaim some.
1446 nm_prerr("Unable to create cluster at %d for '%s' allocator",
1448 if (i < 2) /* nothing to halve */
1451 for (i--; i >= lim; i--) {
1452 if (i % p->_clustentries == 0 && p->lut[i].vaddr)
1453 contigfree(p->lut[i].vaddr,
1455 p->lut[i].vaddr = NULL;
1459 /* we may have stopped in the middle of a cluster */
1460 p->numclusters = (i + p->_clustentries - 1) / p->_clustentries;
1464 * Set lut state for all buffers in the current cluster.
1466 * [i, lim) is the set of buffer indexes that cover the
1469 * 'clust' is really the address of the current buffer in
1470 * the current cluster as we index through it with a stride
1473 for (; i < lim; i++, clust += p->_objsize) {
1474 p->lut[i].vaddr = clust;
1475 #if !defined(linux) && !defined(_WIN32)
1476 p->lut[i].paddr = vtophys(clust);
1480 p->memtotal = (size_t)p->numclusters * (size_t)p->_clustsize;
1482 nm_prinf("Pre-allocated %d clusters (%d/%zuKB) for '%s'",
1483 p->numclusters, p->_clustsize >> 10,
1484 p->memtotal >> 10, p->name);
1489 netmap_reset_obj_allocator(p);
1493 /* call with lock held */
1495 netmap_mem_params_changed(struct netmap_obj_params* p)
1499 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1500 if (p[i].last_size != p[i].size || p[i].last_num != p[i].num) {
1501 p[i].last_size = p[i].size;
1502 p[i].last_num = p[i].num;
1510 netmap_mem_reset_all(struct netmap_mem_d *nmd)
1514 if (netmap_debug & NM_DEBUG_MEM)
1515 nm_prinf("resetting %p", nmd);
1516 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1517 netmap_reset_obj_allocator(&nmd->pools[i]);
1519 nmd->flags &= ~NETMAP_MEM_FINALIZED;
1523 netmap_mem_unmap(struct netmap_obj_pool *p, struct netmap_adapter *na)
1525 int i, lim = p->objtotal;
1526 struct netmap_lut *lut;
1528 if (na == NULL || na->pdev == NULL)
1532 #if defined(__FreeBSD__)
1533 /* On FreeBSD mapping and unmapping is performed by the txsync
1534 * and rxsync routine, packet by packet. */
1538 #elif defined(_WIN32)
1542 nm_prerr("unsupported on Windows");
1544 nm_prdis("unmapping and freeing plut for %s", na->name);
1545 if (lut->plut == NULL)
1547 for (i = 0; i < lim; i += p->_clustentries) {
1548 if (lut->plut[i].paddr)
1549 netmap_unload_map(na, (bus_dma_tag_t) na->pdev, &lut->plut[i].paddr, p->_clustsize);
1551 nm_free_plut(lut->plut);
1559 netmap_mem_map(struct netmap_obj_pool *p, struct netmap_adapter *na)
1562 int i, lim = p->objtotal;
1563 struct netmap_lut *lut = &na->na_lut;
1565 if (na->pdev == NULL)
1568 #if defined(__FreeBSD__)
1569 /* On FreeBSD mapping and unmapping is performed by the txsync
1570 * and rxsync routine, packet by packet. */
1574 #elif defined(_WIN32)
1578 nm_prerr("unsupported on Windows");
1581 if (lut->plut != NULL) {
1582 nm_prdis("plut already allocated for %s", na->name);
1586 nm_prdis("allocating physical lut for %s", na->name);
1587 lut->plut = nm_alloc_plut(lim);
1588 if (lut->plut == NULL) {
1589 nm_prerr("Failed to allocate physical lut for %s", na->name);
1593 for (i = 0; i < lim; i += p->_clustentries) {
1594 lut->plut[i].paddr = 0;
1597 for (i = 0; i < lim; i += p->_clustentries) {
1600 if (p->lut[i].vaddr == NULL)
1603 error = netmap_load_map(na, (bus_dma_tag_t) na->pdev, &lut->plut[i].paddr,
1604 p->lut[i].vaddr, p->_clustsize);
1606 nm_prerr("Failed to map cluster #%d from the %s pool", i, p->name);
1610 for (j = 1; j < p->_clustentries; j++) {
1611 lut->plut[i + j].paddr = lut->plut[i + j - 1].paddr + p->_objsize;
1616 netmap_mem_unmap(p, na);
1624 netmap_mem_finalize_all(struct netmap_mem_d *nmd)
1627 if (nmd->flags & NETMAP_MEM_FINALIZED)
1630 nmd->nm_totalsize = 0;
1631 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1632 nmd->lasterr = netmap_finalize_obj_allocator(&nmd->pools[i]);
1635 nmd->nm_totalsize += nmd->pools[i].memtotal;
1637 nmd->lasterr = netmap_mem_init_bitmaps(nmd);
1641 nmd->flags |= NETMAP_MEM_FINALIZED;
1644 nm_prinf("interfaces %zd KB, rings %zd KB, buffers %zd MB",
1645 nmd->pools[NETMAP_IF_POOL].memtotal >> 10,
1646 nmd->pools[NETMAP_RING_POOL].memtotal >> 10,
1647 nmd->pools[NETMAP_BUF_POOL].memtotal >> 20);
1650 nm_prinf("Free buffers: %d", nmd->pools[NETMAP_BUF_POOL].objfree);
1655 netmap_mem_reset_all(nmd);
1656 return nmd->lasterr;
1660 * allocator for private memory
1663 _netmap_mem_private_new(size_t size, struct netmap_obj_params *p,
1664 struct netmap_mem_ops *ops, int *perr)
1666 struct netmap_mem_d *d = NULL;
1669 d = nm_os_malloc(size);
1678 err = nm_mem_assign_id(d);
1681 snprintf(d->name, NM_MEM_NAMESZ, "%d", d->nm_id);
1683 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1684 snprintf(d->pools[i].name, NETMAP_POOL_MAX_NAMSZ,
1685 nm_blueprint.pools[i].name,
1687 d->params[i].num = p[i].num;
1688 d->params[i].size = p[i].size;
1693 err = netmap_mem_config(d);
1697 d->flags &= ~NETMAP_MEM_FINALIZED;
1702 NMA_LOCK_DESTROY(d);
1703 nm_mem_release_id(d);
1712 struct netmap_mem_d *
1713 netmap_mem_private_new(u_int txr, u_int txd, u_int rxr, u_int rxd,
1714 u_int extra_bufs, u_int npipes, int *perr)
1716 struct netmap_mem_d *d = NULL;
1717 struct netmap_obj_params p[NETMAP_POOLS_NR];
1720 /* account for the fake host rings */
1724 /* copy the min values */
1725 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1726 p[i] = netmap_min_priv_params[i];
1729 /* possibly increase them to fit user request */
1730 v = sizeof(struct netmap_if) + sizeof(ssize_t) * (txr + rxr);
1731 if (p[NETMAP_IF_POOL].size < v)
1732 p[NETMAP_IF_POOL].size = v;
1734 if (p[NETMAP_IF_POOL].num < v)
1735 p[NETMAP_IF_POOL].num = v;
1736 maxd = (txd > rxd) ? txd : rxd;
1737 v = sizeof(struct netmap_ring) + sizeof(struct netmap_slot) * maxd;
1738 if (p[NETMAP_RING_POOL].size < v)
1739 p[NETMAP_RING_POOL].size = v;
1740 /* each pipe endpoint needs two tx rings (1 normal + 1 host, fake)
1741 * and two rx rings (again, 1 normal and 1 fake host)
1743 v = txr + rxr + 8 * npipes;
1744 if (p[NETMAP_RING_POOL].num < v)
1745 p[NETMAP_RING_POOL].num = v;
1746 /* for each pipe we only need the buffers for the 4 "real" rings.
1747 * On the other end, the pipe ring dimension may be different from
1748 * the parent port ring dimension. As a compromise, we allocate twice the
1749 * space actually needed if the pipe rings were the same size as the parent rings
1751 v = (4 * npipes + rxr) * rxd + (4 * npipes + txr) * txd + 2 + extra_bufs;
1752 /* the +2 is for the tx and rx fake buffers (indices 0 and 1) */
1753 if (p[NETMAP_BUF_POOL].num < v)
1754 p[NETMAP_BUF_POOL].num = v;
1757 nm_prinf("req if %d*%d ring %d*%d buf %d*%d",
1758 p[NETMAP_IF_POOL].num,
1759 p[NETMAP_IF_POOL].size,
1760 p[NETMAP_RING_POOL].num,
1761 p[NETMAP_RING_POOL].size,
1762 p[NETMAP_BUF_POOL].num,
1763 p[NETMAP_BUF_POOL].size);
1765 d = _netmap_mem_private_new(sizeof(*d), p, &netmap_mem_global_ops, perr);
1771 /* call with lock held */
1773 netmap_mem2_config(struct netmap_mem_d *nmd)
1777 if (!netmap_mem_params_changed(nmd->params))
1780 nm_prdis("reconfiguring");
1782 if (nmd->flags & NETMAP_MEM_FINALIZED) {
1783 /* reset previous allocation */
1784 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1785 netmap_reset_obj_allocator(&nmd->pools[i]);
1787 nmd->flags &= ~NETMAP_MEM_FINALIZED;
1790 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1791 nmd->lasterr = netmap_config_obj_allocator(&nmd->pools[i],
1792 nmd->params[i].num, nmd->params[i].size);
1799 return nmd->lasterr;
1803 netmap_mem2_finalize(struct netmap_mem_d *nmd)
1805 if (nmd->flags & NETMAP_MEM_FINALIZED)
1808 if (netmap_mem_finalize_all(nmd))
1814 return nmd->lasterr;
1818 netmap_mem2_delete(struct netmap_mem_d *nmd)
1822 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1823 netmap_destroy_obj_allocator(&nmd->pools[i]);
1826 NMA_LOCK_DESTROY(nmd);
1832 /* doubly linekd list of all existing external allocators */
1833 static struct netmap_mem_ext *netmap_mem_ext_list = NULL;
1834 NM_MTX_T nm_mem_ext_list_lock;
1835 #endif /* WITH_EXTMEM */
1838 netmap_mem_init(void)
1840 NM_MTX_INIT(nm_mem_list_lock);
1841 NMA_LOCK_INIT(&nm_mem);
1842 netmap_mem_get(&nm_mem);
1844 NM_MTX_INIT(nm_mem_ext_list_lock);
1845 #endif /* WITH_EXTMEM */
1850 netmap_mem_fini(void)
1852 netmap_mem_put(&nm_mem);
1856 netmap_free_rings(struct netmap_adapter *na)
1862 for (i = 0; i < netmap_all_rings(na, t); i++) {
1863 struct netmap_kring *kring = NMR(na, t)[i];
1864 struct netmap_ring *ring = kring->ring;
1866 if (ring == NULL || kring->users > 0 || (kring->nr_kflags & NKR_NEEDRING)) {
1867 if (netmap_debug & NM_DEBUG_MEM)
1868 nm_prinf("NOT deleting ring %s (ring %p, users %d neekring %d)",
1869 kring->name, ring, kring->users, kring->nr_kflags & NKR_NEEDRING);
1872 if (netmap_debug & NM_DEBUG_MEM)
1873 nm_prinf("deleting ring %s", kring->name);
1874 if (!(kring->nr_kflags & NKR_FAKERING)) {
1875 nm_prdis("freeing bufs for %s", kring->name);
1876 netmap_free_bufs(na->nm_mem, ring->slot, kring->nkr_num_slots);
1878 nm_prdis("NOT freeing bufs for %s", kring->name);
1880 netmap_ring_free(na->nm_mem, ring);
1886 /* call with NMA_LOCK held *
1888 * Allocate netmap rings and buffers for this card
1889 * The rings are contiguous, but have variable size.
1890 * The kring array must follow the layout described
1891 * in netmap_krings_create().
1894 netmap_mem2_rings_create(struct netmap_adapter *na)
1901 for (i = 0; i < netmap_all_rings(na, t); i++) {
1902 struct netmap_kring *kring = NMR(na, t)[i];
1903 struct netmap_ring *ring = kring->ring;
1906 if (ring || (!kring->users && !(kring->nr_kflags & NKR_NEEDRING))) {
1907 /* uneeded, or already created by somebody else */
1908 if (netmap_debug & NM_DEBUG_MEM)
1909 nm_prinf("NOT creating ring %s (ring %p, users %d neekring %d)",
1910 kring->name, ring, kring->users, kring->nr_kflags & NKR_NEEDRING);
1913 if (netmap_debug & NM_DEBUG_MEM)
1914 nm_prinf("creating %s", kring->name);
1915 ndesc = kring->nkr_num_slots;
1916 len = sizeof(struct netmap_ring) +
1917 ndesc * sizeof(struct netmap_slot);
1918 ring = netmap_ring_malloc(na->nm_mem, len);
1920 nm_prerr("Cannot allocate %s_ring", nm_txrx2str(t));
1923 nm_prdis("txring at %p", ring);
1925 *(uint32_t *)(uintptr_t)&ring->num_slots = ndesc;
1926 *(int64_t *)(uintptr_t)&ring->buf_ofs =
1927 (na->nm_mem->pools[NETMAP_IF_POOL].memtotal +
1928 na->nm_mem->pools[NETMAP_RING_POOL].memtotal) -
1929 netmap_ring_offset(na->nm_mem, ring);
1931 /* copy values from kring */
1932 ring->head = kring->rhead;
1933 ring->cur = kring->rcur;
1934 ring->tail = kring->rtail;
1935 *(uint32_t *)(uintptr_t)&ring->nr_buf_size =
1936 netmap_mem_bufsize(na->nm_mem);
1937 nm_prdis("%s h %d c %d t %d", kring->name,
1938 ring->head, ring->cur, ring->tail);
1939 nm_prdis("initializing slots for %s_ring", nm_txrx2str(t));
1940 if (!(kring->nr_kflags & NKR_FAKERING)) {
1941 /* this is a real ring */
1942 if (netmap_debug & NM_DEBUG_MEM)
1943 nm_prinf("allocating buffers for %s", kring->name);
1944 if (netmap_new_bufs(na->nm_mem, ring->slot, ndesc)) {
1945 nm_prerr("Cannot allocate buffers for %s_ring", nm_txrx2str(t));
1949 /* this is a fake ring, set all indices to 0 */
1950 if (netmap_debug & NM_DEBUG_MEM)
1951 nm_prinf("NOT allocating buffers for %s", kring->name);
1952 netmap_mem_set_ring(na->nm_mem, ring->slot, ndesc, 0);
1955 *(uint16_t *)(uintptr_t)&ring->ringid = kring->ring_id;
1956 *(uint16_t *)(uintptr_t)&ring->dir = kring->tx;
1963 /* we cannot actually cleanup here, since we don't own kring->users
1964 * and kring->nr_klags & NKR_NEEDRING. The caller must decrement
1965 * the first or zero-out the second, then call netmap_free_rings()
1973 netmap_mem2_rings_delete(struct netmap_adapter *na)
1975 /* last instance, release bufs and rings */
1976 netmap_free_rings(na);
1980 /* call with NMA_LOCK held */
1982 * Allocate the per-fd structure netmap_if.
1984 * We assume that the configuration stored in na
1985 * (number of tx/rx rings and descs) does not change while
1986 * the interface is in netmap mode.
1988 static struct netmap_if *
1989 netmap_mem2_if_new(struct netmap_adapter *na, struct netmap_priv_d *priv)
1991 struct netmap_if *nifp;
1992 ssize_t base; /* handy for relative offsets between rings and nifp */
1993 u_int i, len, n[NR_TXRX], ntot;
1998 /* account for the (eventually fake) host rings */
1999 n[t] = netmap_all_rings(na, t);
2003 * the descriptor is followed inline by an array of offsets
2004 * to the tx and rx rings in the shared memory region.
2007 len = sizeof(struct netmap_if) + (ntot * sizeof(ssize_t));
2008 nifp = netmap_if_malloc(na->nm_mem, len);
2010 NMA_UNLOCK(na->nm_mem);
2014 /* initialize base fields -- override const */
2015 *(u_int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings;
2016 *(u_int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings;
2017 *(u_int *)(uintptr_t)&nifp->ni_host_tx_rings =
2018 (na->num_host_tx_rings ? na->num_host_tx_rings : 1);
2019 *(u_int *)(uintptr_t)&nifp->ni_host_rx_rings =
2020 (na->num_host_rx_rings ? na->num_host_rx_rings : 1);
2021 strlcpy(nifp->ni_name, na->name, sizeof(nifp->ni_name));
2024 * fill the slots for the rx and tx rings. They contain the offset
2025 * between the ring and nifp, so the information is usable in
2026 * userspace to reach the ring from the nifp.
2028 base = netmap_if_offset(na->nm_mem, nifp);
2029 for (i = 0; i < n[NR_TX]; i++) {
2030 /* XXX instead of ofs == 0 maybe use the offset of an error
2031 * ring, like we do for buffers? */
2034 if (na->tx_rings[i]->ring != NULL && i >= priv->np_qfirst[NR_TX]
2035 && i < priv->np_qlast[NR_TX]) {
2036 ofs = netmap_ring_offset(na->nm_mem,
2037 na->tx_rings[i]->ring) - base;
2039 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] = ofs;
2041 for (i = 0; i < n[NR_RX]; i++) {
2042 /* XXX instead of ofs == 0 maybe use the offset of an error
2043 * ring, like we do for buffers? */
2046 if (na->rx_rings[i]->ring != NULL && i >= priv->np_qfirst[NR_RX]
2047 && i < priv->np_qlast[NR_RX]) {
2048 ofs = netmap_ring_offset(na->nm_mem,
2049 na->rx_rings[i]->ring) - base;
2051 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+n[NR_TX]] = ofs;
2058 netmap_mem2_if_delete(struct netmap_adapter *na, struct netmap_if *nifp)
2063 if (nifp->ni_bufs_head)
2064 netmap_extra_free(na, nifp->ni_bufs_head);
2065 netmap_if_free(na->nm_mem, nifp);
2069 netmap_mem2_deref(struct netmap_mem_d *nmd)
2072 if (netmap_debug & NM_DEBUG_MEM)
2073 nm_prinf("active = %d", nmd->active);
2077 struct netmap_mem_ops netmap_mem_global_ops = {
2078 .nmd_get_lut = netmap_mem2_get_lut,
2079 .nmd_get_info = netmap_mem2_get_info,
2080 .nmd_ofstophys = netmap_mem2_ofstophys,
2081 .nmd_config = netmap_mem2_config,
2082 .nmd_finalize = netmap_mem2_finalize,
2083 .nmd_deref = netmap_mem2_deref,
2084 .nmd_delete = netmap_mem2_delete,
2085 .nmd_if_offset = netmap_mem2_if_offset,
2086 .nmd_if_new = netmap_mem2_if_new,
2087 .nmd_if_delete = netmap_mem2_if_delete,
2088 .nmd_rings_create = netmap_mem2_rings_create,
2089 .nmd_rings_delete = netmap_mem2_rings_delete
2093 netmap_mem_pools_info_get(struct nmreq_pools_info *req,
2094 struct netmap_mem_d *nmd)
2098 ret = netmap_mem_get_info(nmd, &req->nr_memsize, NULL,
2105 req->nr_if_pool_offset = 0;
2106 req->nr_if_pool_objtotal = nmd->pools[NETMAP_IF_POOL].objtotal;
2107 req->nr_if_pool_objsize = nmd->pools[NETMAP_IF_POOL]._objsize;
2109 req->nr_ring_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal;
2110 req->nr_ring_pool_objtotal = nmd->pools[NETMAP_RING_POOL].objtotal;
2111 req->nr_ring_pool_objsize = nmd->pools[NETMAP_RING_POOL]._objsize;
2113 req->nr_buf_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal +
2114 nmd->pools[NETMAP_RING_POOL].memtotal;
2115 req->nr_buf_pool_objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal;
2116 req->nr_buf_pool_objsize = nmd->pools[NETMAP_BUF_POOL]._objsize;
2123 struct netmap_mem_ext {
2124 struct netmap_mem_d up;
2126 struct nm_os_extmem *os;
2127 struct netmap_mem_ext *next, *prev;
2130 /* call with nm_mem_list_lock held */
2132 netmap_mem_ext_register(struct netmap_mem_ext *e)
2134 NM_MTX_LOCK(nm_mem_ext_list_lock);
2135 if (netmap_mem_ext_list)
2136 netmap_mem_ext_list->prev = e;
2137 e->next = netmap_mem_ext_list;
2138 netmap_mem_ext_list = e;
2140 NM_MTX_UNLOCK(nm_mem_ext_list_lock);
2143 /* call with nm_mem_list_lock held */
2145 netmap_mem_ext_unregister(struct netmap_mem_ext *e)
2148 e->prev->next = e->next;
2150 netmap_mem_ext_list = e->next;
2152 e->next->prev = e->prev;
2153 e->prev = e->next = NULL;
2156 static struct netmap_mem_ext *
2157 netmap_mem_ext_search(struct nm_os_extmem *os)
2159 struct netmap_mem_ext *e;
2161 NM_MTX_LOCK(nm_mem_ext_list_lock);
2162 for (e = netmap_mem_ext_list; e; e = e->next) {
2163 if (nm_os_extmem_isequal(e->os, os)) {
2164 netmap_mem_get(&e->up);
2168 NM_MTX_UNLOCK(nm_mem_ext_list_lock);
2174 netmap_mem_ext_delete(struct netmap_mem_d *d)
2177 struct netmap_mem_ext *e =
2178 (struct netmap_mem_ext *)d;
2180 netmap_mem_ext_unregister(e);
2182 for (i = 0; i < NETMAP_POOLS_NR; i++) {
2183 struct netmap_obj_pool *p = &d->pools[i];
2186 nm_free_lut(p->lut, p->objtotal);
2191 nm_os_extmem_delete(e->os);
2192 netmap_mem2_delete(d);
2196 netmap_mem_ext_config(struct netmap_mem_d *nmd)
2201 struct netmap_mem_ops netmap_mem_ext_ops = {
2202 .nmd_get_lut = netmap_mem2_get_lut,
2203 .nmd_get_info = netmap_mem2_get_info,
2204 .nmd_ofstophys = netmap_mem2_ofstophys,
2205 .nmd_config = netmap_mem_ext_config,
2206 .nmd_finalize = netmap_mem2_finalize,
2207 .nmd_deref = netmap_mem2_deref,
2208 .nmd_delete = netmap_mem_ext_delete,
2209 .nmd_if_offset = netmap_mem2_if_offset,
2210 .nmd_if_new = netmap_mem2_if_new,
2211 .nmd_if_delete = netmap_mem2_if_delete,
2212 .nmd_rings_create = netmap_mem2_rings_create,
2213 .nmd_rings_delete = netmap_mem2_rings_delete
2216 struct netmap_mem_d *
2217 netmap_mem_ext_create(uint64_t usrptr, struct nmreq_pools_info *pi, int *perror)
2221 struct netmap_mem_ext *nme;
2224 struct nm_os_extmem *os = NULL;
2227 // XXX sanity checks
2228 if (pi->nr_if_pool_objtotal == 0)
2229 pi->nr_if_pool_objtotal = netmap_min_priv_params[NETMAP_IF_POOL].num;
2230 if (pi->nr_if_pool_objsize == 0)
2231 pi->nr_if_pool_objsize = netmap_min_priv_params[NETMAP_IF_POOL].size;
2232 if (pi->nr_ring_pool_objtotal == 0)
2233 pi->nr_ring_pool_objtotal = netmap_min_priv_params[NETMAP_RING_POOL].num;
2234 if (pi->nr_ring_pool_objsize == 0)
2235 pi->nr_ring_pool_objsize = netmap_min_priv_params[NETMAP_RING_POOL].size;
2236 if (pi->nr_buf_pool_objtotal == 0)
2237 pi->nr_buf_pool_objtotal = netmap_min_priv_params[NETMAP_BUF_POOL].num;
2238 if (pi->nr_buf_pool_objsize == 0)
2239 pi->nr_buf_pool_objsize = netmap_min_priv_params[NETMAP_BUF_POOL].size;
2240 if (netmap_verbose & NM_DEBUG_MEM)
2241 nm_prinf("if %d %d ring %d %d buf %d %d",
2242 pi->nr_if_pool_objtotal, pi->nr_if_pool_objsize,
2243 pi->nr_ring_pool_objtotal, pi->nr_ring_pool_objsize,
2244 pi->nr_buf_pool_objtotal, pi->nr_buf_pool_objsize);
2246 os = nm_os_extmem_create(usrptr, pi, &error);
2248 nm_prerr("os extmem creation failed");
2252 nme = netmap_mem_ext_search(os);
2254 nm_os_extmem_delete(os);
2257 if (netmap_verbose & NM_DEBUG_MEM)
2258 nm_prinf("not found, creating new");
2260 nme = _netmap_mem_private_new(sizeof(*nme),
2261 (struct netmap_obj_params[]){
2262 { pi->nr_if_pool_objsize, pi->nr_if_pool_objtotal },
2263 { pi->nr_ring_pool_objsize, pi->nr_ring_pool_objtotal },
2264 { pi->nr_buf_pool_objsize, pi->nr_buf_pool_objtotal }},
2265 &netmap_mem_ext_ops,
2270 nr_pages = nm_os_extmem_nr_pages(os);
2272 /* from now on pages will be released by nme destructor;
2273 * we let res = 0 to prevent release in out_unmap below
2276 os = NULL; /* pass ownership */
2278 clust = nm_os_extmem_nextpage(nme->os);
2280 for (i = 0; i < NETMAP_POOLS_NR; i++) {
2281 struct netmap_obj_pool *p = &nme->up.pools[i];
2282 struct netmap_obj_params *o = &nme->up.params[i];
2284 p->_objsize = o->size;
2285 p->_clustsize = o->size;
2286 p->_clustentries = 1;
2288 p->lut = nm_alloc_lut(o->num);
2289 if (p->lut == NULL) {
2294 p->bitmap_slots = (o->num + sizeof(uint32_t) - 1) / sizeof(uint32_t);
2295 p->invalid_bitmap = nm_os_malloc(sizeof(uint32_t) * p->bitmap_slots);
2296 if (p->invalid_bitmap == NULL) {
2301 if (nr_pages == 0) {
2308 for (j = 0; j < o->num && nr_pages > 0; j++) {
2311 p->lut[j].vaddr = clust + off;
2312 #if !defined(linux) && !defined(_WIN32)
2313 p->lut[j].paddr = vtophys(p->lut[j].vaddr);
2315 nm_prdis("%s %d at %p", p->name, j, p->lut[j].vaddr);
2316 noff = off + p->_objsize;
2317 if (noff < PAGE_SIZE) {
2321 nm_prdis("too big, recomputing offset...");
2322 while (noff >= PAGE_SIZE) {
2323 char *old_clust = clust;
2325 clust = nm_os_extmem_nextpage(nme->os);
2327 nm_prdis("noff %zu page %p nr_pages %d", noff,
2328 page_to_virt(*pages), nr_pages);
2329 if (noff > 0 && !nm_isset(p->invalid_bitmap, j) &&
2331 old_clust + PAGE_SIZE != clust))
2333 /* out of space or non contiguous,
2336 p->invalid_bitmap[ (j>>5) ] |= 1U << (j & 31U);
2337 nm_prdis("non contiguous at off %zu, drop", noff);
2345 p->numclusters = p->objtotal;
2346 p->memtotal = j * (size_t)p->_objsize;
2347 nm_prdis("%d memtotal %zu", j, p->memtotal);
2350 netmap_mem_ext_register(nme);
2355 netmap_mem_put(&nme->up);
2358 nm_os_extmem_delete(os);
2365 #endif /* WITH_EXTMEM */
2368 #ifdef WITH_PTNETMAP
2370 struct mem_pt_if *next;
2372 unsigned int nifp_offset;
2375 /* Netmap allocator for ptnetmap guests. */
2376 struct netmap_mem_ptg {
2377 struct netmap_mem_d up;
2379 vm_paddr_t nm_paddr; /* physical address in the guest */
2380 void *nm_addr; /* virtual address in the guest */
2381 struct netmap_lut buf_lut; /* lookup table for BUF pool in the guest */
2382 nm_memid_t host_mem_id; /* allocator identifier in the host */
2383 struct ptnetmap_memdev *ptn_dev;/* ptnetmap memdev */
2384 struct mem_pt_if *pt_ifs; /* list of interfaces in passthrough */
2387 /* Link a passthrough interface to a passthrough netmap allocator. */
2389 netmap_mem_pt_guest_ifp_add(struct netmap_mem_d *nmd, struct ifnet *ifp,
2390 unsigned int nifp_offset)
2392 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2393 struct mem_pt_if *ptif = nm_os_malloc(sizeof(*ptif));
2402 ptif->nifp_offset = nifp_offset;
2404 if (ptnmd->pt_ifs) {
2405 ptif->next = ptnmd->pt_ifs;
2407 ptnmd->pt_ifs = ptif;
2411 nm_prinf("ifp=%s,nifp_offset=%u",
2412 ptif->ifp->if_xname, ptif->nifp_offset);
2417 /* Called with NMA_LOCK(nmd) held. */
2418 static struct mem_pt_if *
2419 netmap_mem_pt_guest_ifp_lookup(struct netmap_mem_d *nmd, struct ifnet *ifp)
2421 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2422 struct mem_pt_if *curr;
2424 for (curr = ptnmd->pt_ifs; curr; curr = curr->next) {
2425 if (curr->ifp == ifp) {
2433 /* Unlink a passthrough interface from a passthrough netmap allocator. */
2435 netmap_mem_pt_guest_ifp_del(struct netmap_mem_d *nmd, struct ifnet *ifp)
2437 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2438 struct mem_pt_if *prev = NULL;
2439 struct mem_pt_if *curr;
2444 for (curr = ptnmd->pt_ifs; curr; curr = curr->next) {
2445 if (curr->ifp == ifp) {
2447 prev->next = curr->next;
2449 ptnmd->pt_ifs = curr->next;
2451 nm_prinf("removed (ifp=%s,nifp_offset=%u)",
2452 curr->ifp->if_xname, curr->nifp_offset);
2466 netmap_mem_pt_guest_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut)
2468 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2470 if (!(nmd->flags & NETMAP_MEM_FINALIZED)) {
2474 *lut = ptnmd->buf_lut;
2479 netmap_mem_pt_guest_get_info(struct netmap_mem_d *nmd, uint64_t *size,
2480 u_int *memflags, uint16_t *id)
2484 error = nmd->ops->nmd_config(nmd);
2489 *size = nmd->nm_totalsize;
2491 *memflags = nmd->flags;
2501 netmap_mem_pt_guest_ofstophys(struct netmap_mem_d *nmd, vm_ooffset_t off)
2503 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2505 /* if the offset is valid, just return csb->base_addr + off */
2506 paddr = (vm_paddr_t)(ptnmd->nm_paddr + off);
2507 nm_prdis("off %lx padr %lx", off, (unsigned long)paddr);
2512 netmap_mem_pt_guest_config(struct netmap_mem_d *nmd)
2514 /* nothing to do, we are configured on creation
2515 * and configuration never changes thereafter
2521 netmap_mem_pt_guest_finalize(struct netmap_mem_d *nmd)
2523 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2533 if (nmd->flags & NETMAP_MEM_FINALIZED)
2536 if (ptnmd->ptn_dev == NULL) {
2537 nm_prerr("ptnetmap memdev not attached");
2541 /* Map memory through ptnetmap-memdev BAR. */
2542 error = nm_os_pt_memdev_iomap(ptnmd->ptn_dev, &ptnmd->nm_paddr,
2543 &ptnmd->nm_addr, &mem_size);
2547 /* Initialize the lut using the information contained in the
2548 * ptnetmap memory device. */
2549 bufsize = nm_os_pt_memdev_ioread(ptnmd->ptn_dev,
2550 PTNET_MDEV_IO_BUF_POOL_OBJSZ);
2551 nbuffers = nm_os_pt_memdev_ioread(ptnmd->ptn_dev,
2552 PTNET_MDEV_IO_BUF_POOL_OBJNUM);
2554 /* allocate the lut */
2555 if (ptnmd->buf_lut.lut == NULL) {
2556 nm_prinf("allocating lut");
2557 ptnmd->buf_lut.lut = nm_alloc_lut(nbuffers);
2558 if (ptnmd->buf_lut.lut == NULL) {
2559 nm_prerr("lut allocation failed");
2564 /* we have physically contiguous memory mapped through PCI BAR */
2565 poolofs = nm_os_pt_memdev_ioread(ptnmd->ptn_dev,
2566 PTNET_MDEV_IO_BUF_POOL_OFS);
2567 vaddr = (char *)(ptnmd->nm_addr) + poolofs;
2568 paddr = ptnmd->nm_paddr + poolofs;
2570 for (i = 0; i < nbuffers; i++) {
2571 ptnmd->buf_lut.lut[i].vaddr = vaddr;
2576 ptnmd->buf_lut.objtotal = nbuffers;
2577 ptnmd->buf_lut.objsize = bufsize;
2578 nmd->nm_totalsize = mem_size;
2580 /* Initialize these fields as are needed by
2581 * netmap_mem_bufsize().
2582 * XXX please improve this, why do we need this
2583 * replication? maybe we nmd->pools[] should no be
2584 * there for the guest allocator? */
2585 nmd->pools[NETMAP_BUF_POOL]._objsize = bufsize;
2586 nmd->pools[NETMAP_BUF_POOL]._objtotal = nbuffers;
2588 nmd->flags |= NETMAP_MEM_FINALIZED;
2594 netmap_mem_pt_guest_deref(struct netmap_mem_d *nmd)
2596 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2598 if (nmd->active == 1 &&
2599 (nmd->flags & NETMAP_MEM_FINALIZED)) {
2600 nmd->flags &= ~NETMAP_MEM_FINALIZED;
2601 /* unmap ptnetmap-memdev memory */
2602 if (ptnmd->ptn_dev) {
2603 nm_os_pt_memdev_iounmap(ptnmd->ptn_dev);
2605 ptnmd->nm_addr = NULL;
2606 ptnmd->nm_paddr = 0;
2611 netmap_mem_pt_guest_if_offset(struct netmap_mem_d *nmd, const void *vaddr)
2613 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2615 return (const char *)(vaddr) - (char *)(ptnmd->nm_addr);
2619 netmap_mem_pt_guest_delete(struct netmap_mem_d *nmd)
2624 nm_prinf("deleting %p", nmd);
2625 if (nmd->active > 0)
2626 nm_prerr("bug: deleting mem allocator with active=%d!", nmd->active);
2628 nm_prinf("done deleting %p", nmd);
2629 NMA_LOCK_DESTROY(nmd);
2633 static struct netmap_if *
2634 netmap_mem_pt_guest_if_new(struct netmap_adapter *na, struct netmap_priv_d *priv)
2636 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)na->nm_mem;
2637 struct mem_pt_if *ptif;
2638 struct netmap_if *nifp = NULL;
2640 ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp);
2642 nm_prerr("interface %s is not in passthrough", na->name);
2646 nifp = (struct netmap_if *)((char *)(ptnmd->nm_addr) +
2653 netmap_mem_pt_guest_if_delete(struct netmap_adapter *na, struct netmap_if *nifp)
2655 struct mem_pt_if *ptif;
2657 ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp);
2659 nm_prerr("interface %s is not in passthrough", na->name);
2664 netmap_mem_pt_guest_rings_create(struct netmap_adapter *na)
2666 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)na->nm_mem;
2667 struct mem_pt_if *ptif;
2668 struct netmap_if *nifp;
2671 ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp);
2673 nm_prerr("interface %s is not in passthrough", na->name);
2678 /* point each kring to the corresponding backend ring */
2679 nifp = (struct netmap_if *)((char *)ptnmd->nm_addr + ptif->nifp_offset);
2680 for (i = 0; i < netmap_all_rings(na, NR_TX); i++) {
2681 struct netmap_kring *kring = na->tx_rings[i];
2684 kring->ring = (struct netmap_ring *)
2685 ((char *)nifp + nifp->ring_ofs[i]);
2687 for (i = 0; i < netmap_all_rings(na, NR_RX); i++) {
2688 struct netmap_kring *kring = na->rx_rings[i];
2691 kring->ring = (struct netmap_ring *)
2693 nifp->ring_ofs[netmap_all_rings(na, NR_TX) + i]);
2702 netmap_mem_pt_guest_rings_delete(struct netmap_adapter *na)
2709 for (i = 0; i < nma_get_nrings(na, t) + 1; i++) {
2710 struct netmap_kring *kring = &NMR(na, t)[i];
2718 static struct netmap_mem_ops netmap_mem_pt_guest_ops = {
2719 .nmd_get_lut = netmap_mem_pt_guest_get_lut,
2720 .nmd_get_info = netmap_mem_pt_guest_get_info,
2721 .nmd_ofstophys = netmap_mem_pt_guest_ofstophys,
2722 .nmd_config = netmap_mem_pt_guest_config,
2723 .nmd_finalize = netmap_mem_pt_guest_finalize,
2724 .nmd_deref = netmap_mem_pt_guest_deref,
2725 .nmd_if_offset = netmap_mem_pt_guest_if_offset,
2726 .nmd_delete = netmap_mem_pt_guest_delete,
2727 .nmd_if_new = netmap_mem_pt_guest_if_new,
2728 .nmd_if_delete = netmap_mem_pt_guest_if_delete,
2729 .nmd_rings_create = netmap_mem_pt_guest_rings_create,
2730 .nmd_rings_delete = netmap_mem_pt_guest_rings_delete
2733 /* Called with nm_mem_list_lock held. */
2734 static struct netmap_mem_d *
2735 netmap_mem_pt_guest_find_memid(nm_memid_t mem_id)
2737 struct netmap_mem_d *mem = NULL;
2738 struct netmap_mem_d *scan = netmap_last_mem_d;
2741 /* find ptnetmap allocator through host ID */
2742 if (scan->ops->nmd_deref == netmap_mem_pt_guest_deref &&
2743 ((struct netmap_mem_ptg *)(scan))->host_mem_id == mem_id) {
2746 NM_DBG_REFC(mem, __FUNCTION__, __LINE__);
2750 } while (scan != netmap_last_mem_d);
2755 /* Called with nm_mem_list_lock held. */
2756 static struct netmap_mem_d *
2757 netmap_mem_pt_guest_create(nm_memid_t mem_id)
2759 struct netmap_mem_ptg *ptnmd;
2762 ptnmd = nm_os_malloc(sizeof(struct netmap_mem_ptg));
2763 if (ptnmd == NULL) {
2768 ptnmd->up.ops = &netmap_mem_pt_guest_ops;
2769 ptnmd->host_mem_id = mem_id;
2770 ptnmd->pt_ifs = NULL;
2772 /* Assign new id in the guest (We have the lock) */
2773 err = nm_mem_assign_id_locked(&ptnmd->up);
2777 ptnmd->up.flags &= ~NETMAP_MEM_FINALIZED;
2778 ptnmd->up.flags |= NETMAP_MEM_IO;
2780 NMA_LOCK_INIT(&ptnmd->up);
2782 snprintf(ptnmd->up.name, NM_MEM_NAMESZ, "%d", ptnmd->up.nm_id);
2787 netmap_mem_pt_guest_delete(&ptnmd->up);
2792 * find host id in guest allocators and create guest allocator
2793 * if it is not there
2795 static struct netmap_mem_d *
2796 netmap_mem_pt_guest_get(nm_memid_t mem_id)
2798 struct netmap_mem_d *nmd;
2800 NM_MTX_LOCK(nm_mem_list_lock);
2801 nmd = netmap_mem_pt_guest_find_memid(mem_id);
2803 nmd = netmap_mem_pt_guest_create(mem_id);
2805 NM_MTX_UNLOCK(nm_mem_list_lock);
2811 * The guest allocator can be created by ptnetmap_memdev (during the device
2812 * attach) or by ptnetmap device (ptnet), during the netmap_attach.
2814 * The order is not important (we have different order in LINUX and FreeBSD).
2815 * The first one, creates the device, and the second one simply attaches it.
2818 /* Called when ptnetmap_memdev is attaching, to attach a new allocator in
2820 struct netmap_mem_d *
2821 netmap_mem_pt_guest_attach(struct ptnetmap_memdev *ptn_dev, nm_memid_t mem_id)
2823 struct netmap_mem_d *nmd;
2824 struct netmap_mem_ptg *ptnmd;
2826 nmd = netmap_mem_pt_guest_get(mem_id);
2828 /* assign this device to the guest allocator */
2830 ptnmd = (struct netmap_mem_ptg *)nmd;
2831 ptnmd->ptn_dev = ptn_dev;
2837 /* Called when ptnet device is attaching */
2838 struct netmap_mem_d *
2839 netmap_mem_pt_guest_new(struct ifnet *ifp,
2840 unsigned int nifp_offset,
2843 struct netmap_mem_d *nmd;
2849 nmd = netmap_mem_pt_guest_get((nm_memid_t)memid);
2852 netmap_mem_pt_guest_ifp_add(nmd, ifp, nifp_offset);
2858 #endif /* WITH_PTNETMAP */