2 * Copyright (c) 2002-2007 Neterion, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <dev/nxge/include/xge-os-pal.h>
30 #include <dev/nxge/include/xgehal-mm.h>
31 #include <dev/nxge/include/xge-debug.h>
36 * Will resize mempool up to %num_allocate value.
39 __hal_mempool_grow(xge_hal_mempool_t *mempool, int num_allocate,
42 int i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0;
43 int n_items = mempool->items_per_memblock;
47 if ((mempool->memblocks_allocated + num_allocate) >
48 mempool->memblocks_max) {
49 xge_debug_mm(XGE_ERR, "%s",
50 "__hal_mempool_grow: can grow anymore");
51 return XGE_HAL_ERR_OUT_OF_MEMORY;
54 for (i = mempool->memblocks_allocated;
55 i < mempool->memblocks_allocated + num_allocate; i++) {
58 ((mempool->memblocks_allocated+num_allocate-1) == i);
59 xge_hal_mempool_dma_t *dma_object =
60 mempool->memblocks_dma_arr + i;
64 dma_flags = XGE_OS_DMA_CACHELINE_ALIGNED;
65 #ifdef XGE_HAL_DMA_DTR_CONSISTENT
66 dma_flags |= XGE_OS_DMA_CONSISTENT;
68 dma_flags |= XGE_OS_DMA_STREAMING;
71 /* allocate DMA-capable memblock */
72 mempool->memblocks_arr[i] = xge_os_dma_malloc(mempool->pdev,
73 mempool->memblock_size,
76 &dma_object->acc_handle);
77 if (mempool->memblocks_arr[i] == NULL) {
79 "memblock[%d]: out of DMA memory", i);
80 return XGE_HAL_ERR_OUT_OF_MEMORY;
82 xge_os_memzero(mempool->memblocks_arr[i],
83 mempool->memblock_size);
84 the_memblock = mempool->memblocks_arr[i];
86 /* allocate memblock's private part. Each DMA memblock
87 * has a space allocated for item's private usage upon
88 * mempool's user request. Each time mempool grows, it will
89 * allocate new memblock and its private part at once.
90 * This helps to minimize memory usage a lot. */
91 mempool->memblocks_priv_arr[i] = xge_os_malloc(mempool->pdev,
92 mempool->items_priv_size * n_items);
93 if (mempool->memblocks_priv_arr[i] == NULL) {
94 xge_os_dma_free(mempool->pdev,
96 mempool->memblock_size,
97 &dma_object->acc_handle,
100 "memblock_priv[%d]: out of virtual memory, "
101 "requested %d(%d:%d) bytes", i,
102 mempool->items_priv_size * n_items,
103 mempool->items_priv_size, n_items);
104 return XGE_HAL_ERR_OUT_OF_MEMORY;
106 xge_os_memzero(mempool->memblocks_priv_arr[i],
107 mempool->items_priv_size * n_items);
109 /* map memblock to physical memory */
110 dma_object->addr = xge_os_dma_map(mempool->pdev,
113 mempool->memblock_size,
114 XGE_OS_DMA_DIR_BIDIRECTIONAL,
115 #ifdef XGE_HAL_DMA_DTR_CONSISTENT
116 XGE_OS_DMA_CONSISTENT
121 if (dma_object->addr == XGE_OS_INVALID_DMA_ADDR) {
122 xge_os_free(mempool->pdev, mempool->memblocks_priv_arr[i],
123 mempool->items_priv_size *
125 xge_os_dma_free(mempool->pdev,
127 mempool->memblock_size,
128 &dma_object->acc_handle,
129 &dma_object->handle);
130 return XGE_HAL_ERR_OUT_OF_MAPPING;
133 /* fill the items hash array */
134 for (j=0; j<n_items; j++) {
135 int index = i*n_items + j;
137 if (first_time && index >= mempool->items_initial) {
141 mempool->items_arr[index] =
142 ((char *)the_memblock + j*mempool->item_size);
144 /* let caller to do more job on each item */
145 if (mempool->item_func_alloc != NULL) {
146 xge_hal_status_e status;
148 if ((status = mempool->item_func_alloc(
153 mempool->items_arr[index],
156 mempool->userdata)) != XGE_HAL_OK) {
158 if (mempool->item_func_free != NULL) {
161 for (k=0; k<j; k++) {
163 index =i*n_items + k;
165 (void)mempool->item_func_free(
166 mempool, the_memblock,
168 mempool->items_arr[index],
174 xge_os_free(mempool->pdev,
175 mempool->memblocks_priv_arr[i],
176 mempool->items_priv_size *
178 xge_os_dma_unmap(mempool->pdev,
181 mempool->memblock_size,
182 XGE_OS_DMA_DIR_BIDIRECTIONAL);
183 xge_os_dma_free(mempool->pdev,
185 mempool->memblock_size,
186 &dma_object->acc_handle,
187 &dma_object->handle);
192 mempool->items_current = index + 1;
195 xge_debug_mm(XGE_TRACE,
196 "memblock%d: allocated %dk, vaddr 0x"XGE_OS_LLXFMT", "
197 "dma_addr 0x"XGE_OS_LLXFMT, i, mempool->memblock_size / 1024,
198 (unsigned long long)(ulong_t)mempool->memblocks_arr[i],
199 (unsigned long long)dma_object->addr);
203 if (first_time && mempool->items_current ==
204 mempool->items_initial) {
209 /* increment actual number of allocated memblocks */
210 mempool->memblocks_allocated += *num_allocated;
216 * xge_hal_mempool_create
223 * This function will create memory pool object. Pool may grow but will
224 * never shrink. Pool consists of number of dynamically allocated blocks
225 * with size enough to hold %items_initial number of items. Memory is
226 * DMA-able but client must map/unmap before interoperating with the device.
227 * See also: xge_os_dma_map(), xge_hal_dma_unmap(), xge_hal_status_e{}.
230 __hal_mempool_create(pci_dev_h pdev, int memblock_size, int item_size,
231 int items_priv_size, int items_initial, int items_max,
232 xge_hal_mempool_item_f item_func_alloc,
233 xge_hal_mempool_item_f item_func_free, void *userdata)
235 xge_hal_status_e status;
236 int memblocks_to_allocate;
237 xge_hal_mempool_t *mempool;
240 if (memblock_size < item_size) {
241 xge_debug_mm(XGE_ERR,
242 "memblock_size %d < item_size %d: misconfiguration",
243 memblock_size, item_size);
247 mempool = (xge_hal_mempool_t *) \
248 xge_os_malloc(pdev, sizeof(xge_hal_mempool_t));
249 if (mempool == NULL) {
250 xge_debug_mm(XGE_ERR, "mempool allocation failure");
253 xge_os_memzero(mempool, sizeof(xge_hal_mempool_t));
255 mempool->pdev = pdev;
256 mempool->memblock_size = memblock_size;
257 mempool->items_max = items_max;
258 mempool->items_initial = items_initial;
259 mempool->item_size = item_size;
260 mempool->items_priv_size = items_priv_size;
261 mempool->item_func_alloc = item_func_alloc;
262 mempool->item_func_free = item_func_free;
263 mempool->userdata = userdata;
265 mempool->memblocks_allocated = 0;
267 mempool->items_per_memblock = memblock_size / item_size;
269 mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) /
270 mempool->items_per_memblock;
272 /* allocate array of memblocks */
273 mempool->memblocks_arr = (void ** ) xge_os_malloc(mempool->pdev,
274 sizeof(void*) * mempool->memblocks_max);
275 if (mempool->memblocks_arr == NULL) {
276 xge_debug_mm(XGE_ERR, "memblocks_arr allocation failure");
277 __hal_mempool_destroy(mempool);
280 xge_os_memzero(mempool->memblocks_arr,
281 sizeof(void*) * mempool->memblocks_max);
283 /* allocate array of private parts of items per memblocks */
284 mempool->memblocks_priv_arr = (void **) xge_os_malloc(mempool->pdev,
285 sizeof(void*) * mempool->memblocks_max);
286 if (mempool->memblocks_priv_arr == NULL) {
287 xge_debug_mm(XGE_ERR, "memblocks_priv_arr allocation failure");
288 __hal_mempool_destroy(mempool);
291 xge_os_memzero(mempool->memblocks_priv_arr,
292 sizeof(void*) * mempool->memblocks_max);
294 /* allocate array of memblocks DMA objects */
295 mempool->memblocks_dma_arr =
296 (xge_hal_mempool_dma_t *) xge_os_malloc(mempool->pdev,
297 sizeof(xge_hal_mempool_dma_t) * mempool->memblocks_max);
299 if (mempool->memblocks_dma_arr == NULL) {
300 xge_debug_mm(XGE_ERR, "memblocks_dma_arr allocation failure");
301 __hal_mempool_destroy(mempool);
304 xge_os_memzero(mempool->memblocks_dma_arr,
305 sizeof(xge_hal_mempool_dma_t) * mempool->memblocks_max);
307 /* allocate hash array of items */
308 mempool->items_arr = (void **) xge_os_malloc(mempool->pdev,
309 sizeof(void*) * mempool->items_max);
310 if (mempool->items_arr == NULL) {
311 xge_debug_mm(XGE_ERR, "items_arr allocation failure");
312 __hal_mempool_destroy(mempool);
315 xge_os_memzero(mempool->items_arr, sizeof(void *) * mempool->items_max);
317 mempool->shadow_items_arr = (void **) xge_os_malloc(mempool->pdev,
318 sizeof(void*) * mempool->items_max);
319 if (mempool->shadow_items_arr == NULL) {
320 xge_debug_mm(XGE_ERR, "shadow_items_arr allocation failure");
321 __hal_mempool_destroy(mempool);
324 xge_os_memzero(mempool->shadow_items_arr,
325 sizeof(void *) * mempool->items_max);
327 /* calculate initial number of memblocks */
328 memblocks_to_allocate = (mempool->items_initial +
329 mempool->items_per_memblock - 1) /
330 mempool->items_per_memblock;
332 xge_debug_mm(XGE_TRACE, "allocating %d memblocks, "
333 "%d items per memblock", memblocks_to_allocate,
334 mempool->items_per_memblock);
336 /* pre-allocate the mempool */
337 status = __hal_mempool_grow(mempool, memblocks_to_allocate, &allocated);
338 xge_os_memcpy(mempool->shadow_items_arr, mempool->items_arr,
339 sizeof(void*) * mempool->items_max);
340 if (status != XGE_HAL_OK) {
341 xge_debug_mm(XGE_ERR, "mempool_grow failure");
342 __hal_mempool_destroy(mempool);
346 xge_debug_mm(XGE_TRACE,
347 "total: allocated %dk of DMA-capable memory",
348 mempool->memblock_size * allocated / 1024);
354 * xge_hal_mempool_destroy
357 __hal_mempool_destroy(xge_hal_mempool_t *mempool)
361 for (i=0; i<mempool->memblocks_allocated; i++) {
362 xge_hal_mempool_dma_t *dma_object;
364 xge_assert(mempool->memblocks_arr[i]);
365 xge_assert(mempool->memblocks_dma_arr + i);
367 dma_object = mempool->memblocks_dma_arr + i;
369 for (j=0; j<mempool->items_per_memblock; j++) {
370 int index = i*mempool->items_per_memblock + j;
372 /* to skip last partially filled(if any) memblock */
373 if (index >= mempool->items_current) {
377 /* let caller to do more job on each item */
378 if (mempool->item_func_free != NULL) {
380 mempool->item_func_free(mempool,
381 mempool->memblocks_arr[i],
383 mempool->shadow_items_arr[index],
384 index, /* unused */ -1,
389 xge_os_dma_unmap(mempool->pdev,
390 dma_object->handle, dma_object->addr,
391 mempool->memblock_size, XGE_OS_DMA_DIR_BIDIRECTIONAL);
393 xge_os_free(mempool->pdev, mempool->memblocks_priv_arr[i],
394 mempool->items_priv_size * mempool->items_per_memblock);
396 xge_os_dma_free(mempool->pdev, mempool->memblocks_arr[i],
397 mempool->memblock_size, &dma_object->acc_handle,
398 &dma_object->handle);
401 if (mempool->items_arr) {
402 xge_os_free(mempool->pdev, mempool->items_arr, sizeof(void*) *
406 if (mempool->shadow_items_arr) {
407 xge_os_free(mempool->pdev, mempool->shadow_items_arr,
408 sizeof(void*) * mempool->items_max);
411 if (mempool->memblocks_dma_arr) {
412 xge_os_free(mempool->pdev, mempool->memblocks_dma_arr,
413 sizeof(xge_hal_mempool_dma_t) *
414 mempool->memblocks_max);
417 if (mempool->memblocks_priv_arr) {
418 xge_os_free(mempool->pdev, mempool->memblocks_priv_arr,
419 sizeof(void*) * mempool->memblocks_max);
422 if (mempool->memblocks_arr) {
423 xge_os_free(mempool->pdev, mempool->memblocks_arr,
424 sizeof(void*) * mempool->memblocks_max);
427 xge_os_free(mempool->pdev, mempool, sizeof(xge_hal_mempool_t));