2 * Copyright (c) 2012 Mellanox Technologies, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
45 static int mlx5_bitmap_init(struct mlx5_bitmap *bitmap, uint32_t num,
53 bitmap->avail = bitmap->max;
54 bitmap->table = calloc(BITS_TO_LONGS(bitmap->max), sizeof(uint32_t));
61 static void bitmap_free_range(struct mlx5_bitmap *bitmap, uint32_t obj,
66 obj &= bitmap->max - 1;
68 for (i = 0; i < cnt; i++)
69 mlx5_clear_bit(obj + i, bitmap->table);
70 bitmap->last = min(bitmap->last, obj);
71 bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask;
75 static int bitmap_empty(struct mlx5_bitmap *bitmap)
77 return (bitmap->avail == bitmap->max) ? 1 : 0;
80 static int bitmap_avail(struct mlx5_bitmap *bitmap)
85 static void mlx5_bitmap_cleanup(struct mlx5_bitmap *bitmap)
91 static void free_huge_mem(struct mlx5_hugetlb_mem *hmem)
93 mlx5_bitmap_cleanup(&hmem->bitmap);
94 if (shmdt(hmem->shmaddr) == -1)
95 mlx5_dbg(stderr, MLX5_DBG_CONTIG, "%s\n", strerror(errno));
96 shmctl(hmem->shmid, IPC_RMID, NULL);
100 static int mlx5_bitmap_alloc(struct mlx5_bitmap *bitmap)
105 obj = mlx5_find_first_zero_bit(bitmap->table, bitmap->max);
106 if (obj < bitmap->max) {
107 mlx5_set_bit(obj, bitmap->table);
108 bitmap->last = (obj + 1);
109 if (bitmap->last == bitmap->max)
122 static uint32_t find_aligned_range(unsigned long *bitmap,
123 uint32_t start, uint32_t nbits,
124 int len, int alignment)
129 start = align(start, alignment);
131 while ((start < nbits) && mlx5_test_bit(start, bitmap))
141 for (i = start + 1; i < end; i++) {
142 if (mlx5_test_bit(i, bitmap)) {
151 static int bitmap_alloc_range(struct mlx5_bitmap *bitmap, int cnt,
157 if (cnt == 1 && align == 1)
158 return mlx5_bitmap_alloc(bitmap);
160 if (cnt > bitmap->max)
163 obj = find_aligned_range(bitmap->table, bitmap->last,
164 bitmap->max, cnt, align);
165 if (obj >= bitmap->max) {
166 bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask;
167 obj = find_aligned_range(bitmap->table, 0, bitmap->max,
171 if (obj < bitmap->max) {
172 for (i = 0; i < cnt; i++)
173 mlx5_set_bit(obj + i, bitmap->table);
174 if (obj == bitmap->last) {
175 bitmap->last = (obj + cnt);
176 if (bitmap->last >= bitmap->max)
185 bitmap->avail -= cnt;
190 static struct mlx5_hugetlb_mem *alloc_huge_mem(size_t size)
192 struct mlx5_hugetlb_mem *hmem;
195 hmem = malloc(sizeof(*hmem));
199 shm_len = align(size, MLX5_SHM_LENGTH);
200 hmem->shmid = shmget(IPC_PRIVATE, shm_len, SHM_HUGETLB | SHM_R | SHM_W);
201 if (hmem->shmid == -1) {
202 mlx5_dbg(stderr, MLX5_DBG_CONTIG, "%s\n", strerror(errno));
206 hmem->shmaddr = shmat(hmem->shmid, MLX5_SHM_ADDR, MLX5_SHMAT_FLAGS);
207 if (hmem->shmaddr == (void *)-1) {
208 mlx5_dbg(stderr, MLX5_DBG_CONTIG, "%s\n", strerror(errno));
212 if (mlx5_bitmap_init(&hmem->bitmap, shm_len / MLX5_Q_CHUNK_SIZE,
213 shm_len / MLX5_Q_CHUNK_SIZE - 1)) {
214 mlx5_dbg(stderr, MLX5_DBG_CONTIG, "%s\n", strerror(errno));
219 * Marked to be destroyed when process detaches from shmget segment
221 shmctl(hmem->shmid, IPC_RMID, NULL);
226 if (shmdt(hmem->shmaddr) == -1)
227 mlx5_dbg(stderr, MLX5_DBG_CONTIG, "%s\n", strerror(errno));
230 shmctl(hmem->shmid, IPC_RMID, NULL);
237 static int alloc_huge_buf(struct mlx5_context *mctx, struct mlx5_buf *buf,
238 size_t size, int page_size)
242 struct mlx5_hugetlb_mem *hmem;
245 buf->length = align(size, MLX5_Q_CHUNK_SIZE);
246 nchunk = buf->length / MLX5_Q_CHUNK_SIZE;
248 mlx5_spin_lock(&mctx->hugetlb_lock);
249 TAILQ_FOREACH(hmem, &mctx->hugetlb_list, entry) {
250 if (bitmap_avail(&hmem->bitmap)) {
251 buf->base = bitmap_alloc_range(&hmem->bitmap, nchunk, 1);
252 if (buf->base != -1) {
259 mlx5_spin_unlock(&mctx->hugetlb_lock);
262 hmem = alloc_huge_mem(buf->length);
266 buf->base = bitmap_alloc_range(&hmem->bitmap, nchunk, 1);
267 if (buf->base == -1) {
269 /* TBD: remove after proven stability */
270 fprintf(stderr, "BUG: huge allocation\n");
276 mlx5_spin_lock(&mctx->hugetlb_lock);
277 if (bitmap_avail(&hmem->bitmap))
278 TAILQ_INSERT_HEAD(&mctx->hugetlb_list, hmem, entry);
280 TAILQ_INSERT_TAIL(&mctx->hugetlb_list, hmem, entry);
281 mlx5_spin_unlock(&mctx->hugetlb_lock);
284 buf->buf = hmem->shmaddr + buf->base * MLX5_Q_CHUNK_SIZE;
286 ret = ibv_dontfork_range(buf->buf, buf->length);
288 mlx5_dbg(stderr, MLX5_DBG_CONTIG, "\n");
291 buf->type = MLX5_ALLOC_TYPE_HUGE;
296 mlx5_spin_lock(&mctx->hugetlb_lock);
297 bitmap_free_range(&hmem->bitmap, buf->base, nchunk);
298 if (bitmap_empty(&hmem->bitmap)) {
299 TAILQ_REMOVE(&mctx->hugetlb_list, hmem, entry);
300 mlx5_spin_unlock(&mctx->hugetlb_lock);
303 mlx5_spin_unlock(&mctx->hugetlb_lock);
308 static void free_huge_buf(struct mlx5_context *ctx, struct mlx5_buf *buf)
312 nchunk = buf->length / MLX5_Q_CHUNK_SIZE;
313 mlx5_spin_lock(&ctx->hugetlb_lock);
314 bitmap_free_range(&buf->hmem->bitmap, buf->base, nchunk);
315 if (bitmap_empty(&buf->hmem->bitmap)) {
316 TAILQ_REMOVE(&ctx->hugetlb_list, buf->hmem, entry);
317 mlx5_spin_unlock(&ctx->hugetlb_lock);
318 free_huge_mem(buf->hmem);
320 mlx5_spin_unlock(&ctx->hugetlb_lock);
323 int mlx5_alloc_prefered_buf(struct mlx5_context *mctx,
324 struct mlx5_buf *buf,
325 size_t size, int page_size,
326 enum mlx5_alloc_type type,
327 const char *component)
332 * Fallback mechanism priority:
337 if (type == MLX5_ALLOC_TYPE_HUGE ||
338 type == MLX5_ALLOC_TYPE_PREFER_HUGE ||
339 type == MLX5_ALLOC_TYPE_ALL) {
340 ret = alloc_huge_buf(mctx, buf, size, page_size);
344 if (type == MLX5_ALLOC_TYPE_HUGE)
347 mlx5_dbg(stderr, MLX5_DBG_CONTIG,
348 "Huge mode allocation failed, fallback to %s mode\n",
349 MLX5_ALLOC_TYPE_ALL ? "contig" : "default");
352 if (type == MLX5_ALLOC_TYPE_CONTIG ||
353 type == MLX5_ALLOC_TYPE_PREFER_CONTIG ||
354 type == MLX5_ALLOC_TYPE_ALL) {
355 ret = mlx5_alloc_buf_contig(mctx, buf, size, page_size, component);
359 if (type == MLX5_ALLOC_TYPE_CONTIG)
361 mlx5_dbg(stderr, MLX5_DBG_CONTIG,
362 "Contig allocation failed, fallback to default mode\n");
365 return mlx5_alloc_buf(buf, size, page_size);
369 int mlx5_free_actual_buf(struct mlx5_context *ctx, struct mlx5_buf *buf)
374 case MLX5_ALLOC_TYPE_ANON:
378 case MLX5_ALLOC_TYPE_HUGE:
379 free_huge_buf(ctx, buf);
382 case MLX5_ALLOC_TYPE_CONTIG:
383 mlx5_free_buf_contig(ctx, buf);
386 fprintf(stderr, "Bad allocation type\n");
392 /* This function computes log2(v) rounded up.
393 We don't want to have a dependency to libm which exposes ceil & log2 APIs.
394 Code was written based on public domain code:
395 URL: http://graphics.stanford.edu/~seander/bithacks.html#IntegerLog.
397 static uint32_t mlx5_get_block_order(uint32_t v)
399 static const uint32_t bits_arr[] = {0x2, 0xC, 0xF0, 0xFF00, 0xFFFF0000};
400 static const uint32_t shift_arr[] = {1, 2, 4, 8, 16};
402 uint32_t input_val = v;
404 register uint32_t r = 0;/* result of log2(v) will go here */
405 for (i = 4; i >= 0; i--) {
406 if (v & bits_arr[i]) {
411 /* Rounding up if required */
412 r += !!(input_val & ((1 << r) - 1));
417 void mlx5_get_alloc_type(const char *component,
418 enum mlx5_alloc_type *alloc_type,
419 enum mlx5_alloc_type default_type)
425 snprintf(name, sizeof(name), "%s_ALLOC_TYPE", component);
427 *alloc_type = default_type;
429 env_value = getenv(name);
431 if (!strcasecmp(env_value, "ANON"))
432 *alloc_type = MLX5_ALLOC_TYPE_ANON;
433 else if (!strcasecmp(env_value, "HUGE"))
434 *alloc_type = MLX5_ALLOC_TYPE_HUGE;
435 else if (!strcasecmp(env_value, "CONTIG"))
436 *alloc_type = MLX5_ALLOC_TYPE_CONTIG;
437 else if (!strcasecmp(env_value, "PREFER_CONTIG"))
438 *alloc_type = MLX5_ALLOC_TYPE_PREFER_CONTIG;
439 else if (!strcasecmp(env_value, "PREFER_HUGE"))
440 *alloc_type = MLX5_ALLOC_TYPE_PREFER_HUGE;
441 else if (!strcasecmp(env_value, "ALL"))
442 *alloc_type = MLX5_ALLOC_TYPE_ALL;
446 static void mlx5_alloc_get_env_info(int *max_block_log,
448 const char *component)
455 /* First set defaults */
456 *max_block_log = MLX5_MAX_LOG2_CONTIG_BLOCK_SIZE;
457 *min_block_log = MLX5_MIN_LOG2_CONTIG_BLOCK_SIZE;
459 snprintf(name, sizeof(name), "%s_MAX_LOG2_CONTIG_BSIZE", component);
463 if (value <= MLX5_MAX_LOG2_CONTIG_BLOCK_SIZE &&
464 value >= MLX5_MIN_LOG2_CONTIG_BLOCK_SIZE)
465 *max_block_log = value;
467 fprintf(stderr, "Invalid value %d for %s\n",
470 sprintf(name, "%s_MIN_LOG2_CONTIG_BSIZE", component);
474 if (value >= MLX5_MIN_LOG2_CONTIG_BLOCK_SIZE &&
475 value <= *max_block_log)
476 *min_block_log = value;
478 fprintf(stderr, "Invalid value %d for %s\n",
483 int mlx5_alloc_buf_contig(struct mlx5_context *mctx,
484 struct mlx5_buf *buf, size_t size,
486 const char *component)
488 void *addr = MAP_FAILED;
492 struct ibv_context *context = &mctx->ibv_ctx;
495 mlx5_alloc_get_env_info(&max_block_log,
499 block_size_exp = mlx5_get_block_order(size);
501 if (block_size_exp > max_block_log)
502 block_size_exp = max_block_log;
506 set_command(MLX5_MMAP_GET_CONTIGUOUS_PAGES_CMD, &offset);
507 set_order(block_size_exp, &offset);
508 addr = mmap(NULL , size, PROT_WRITE | PROT_READ, MAP_SHARED,
509 context->cmd_fd, page_size * offset);
510 if (addr != MAP_FAILED)
514 * The kernel returns EINVAL if not supported
520 } while (block_size_exp >= min_block_log);
521 mlx5_dbg(mctx->dbg_fp, MLX5_DBG_CONTIG, "block order %d, addr %p\n",
522 block_size_exp, addr);
524 if (addr == MAP_FAILED)
527 if (ibv_dontfork_range(addr, size)) {
534 buf->type = MLX5_ALLOC_TYPE_CONTIG;
539 void mlx5_free_buf_contig(struct mlx5_context *mctx, struct mlx5_buf *buf)
541 ibv_dofork_range(buf->buf, buf->length);
542 munmap(buf->buf, buf->length);
545 int mlx5_alloc_buf(struct mlx5_buf *buf, size_t size, int page_size)
550 al_size = align(size, page_size);
551 ret = posix_memalign(&buf->buf, page_size, al_size);
555 ret = ibv_dontfork_range(buf->buf, al_size);
560 buf->length = al_size;
561 buf->type = MLX5_ALLOC_TYPE_ANON;
567 void mlx5_free_buf(struct mlx5_buf *buf)
569 ibv_dofork_range(buf->buf, buf->length);