]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/ofed/drivers/infiniband/core/ib_fmr_pool.c
Remove redundant integer cast in ibcore. The "ref_count" field already
[FreeBSD/FreeBSD.git] / sys / ofed / drivers / infiniband / core / ib_fmr_pool.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0
3  *
4  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
5  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  *
35  * $FreeBSD$
36  */
37
38 #include <linux/errno.h>
39 #include <linux/spinlock.h>
40 #include <linux/slab.h>
41 #include <linux/jhash.h>
42 #include <linux/kthread.h>
43 #include <linux/wait.h>
44
45 #include <rdma/ib_fmr_pool.h>
46
47 #include "core_priv.h"
48
49 #define PFX "fmr_pool: "
50
51 enum {
52         IB_FMR_MAX_REMAPS = 32,
53
54         IB_FMR_HASH_BITS  = 8,
55         IB_FMR_HASH_SIZE  = 1 << IB_FMR_HASH_BITS,
56         IB_FMR_HASH_MASK  = IB_FMR_HASH_SIZE - 1
57 };
58
59 /*
60  * If an FMR is not in use, then the list member will point to either
61  * its pool's free_list (if the FMR can be mapped again; that is,
62  * remap_count < pool->max_remaps) or its pool's dirty_list (if the
63  * FMR needs to be unmapped before being remapped).  In either of
64  * these cases it is a bug if the ref_count is not 0.  In other words,
65  * if ref_count is > 0, then the list member must not be linked into
66  * either free_list or dirty_list.
67  *
68  * The cache_node member is used to link the FMR into a cache bucket
69  * (if caching is enabled).  This is independent of the reference
70  * count of the FMR.  When a valid FMR is released, its ref_count is
71  * decremented, and if ref_count reaches 0, the FMR is placed in
72  * either free_list or dirty_list as appropriate.  However, it is not
73  * removed from the cache and may be "revived" if a call to
74  * ib_fmr_register_physical() occurs before the FMR is remapped.  In
75  * this case we just increment the ref_count and remove the FMR from
76  * free_list/dirty_list.
77  *
78  * Before we remap an FMR from free_list, we remove it from the cache
79  * (to prevent another user from obtaining a stale FMR).  When an FMR
80  * is released, we add it to the tail of the free list, so that our
81  * cache eviction policy is "least recently used."
82  *
83  * All manipulation of ref_count, list and cache_node is protected by
84  * pool_lock to maintain consistency.
85  */
86
87 struct ib_fmr_pool {
88         spinlock_t                pool_lock;
89
90         int                       pool_size;
91         int                       max_pages;
92         int                       max_remaps;
93         int                       dirty_watermark;
94         int                       dirty_len;
95         struct list_head          free_list;
96         struct list_head          dirty_list;
97         struct hlist_head        *cache_bucket;
98
99         void                     (*flush_function)(struct ib_fmr_pool *pool,
100                                                    void *              arg);
101         void                     *flush_arg;
102
103         struct task_struct       *thread;
104
105         atomic_t                  req_ser;
106         atomic_t                  flush_ser;
107
108         wait_queue_head_t         force_wait;
109 };
110
111 static inline u32 ib_fmr_hash(u64 first_page)
112 {
113         return jhash_2words((u32) first_page, (u32) (first_page >> 32), 0) &
114                 (IB_FMR_HASH_SIZE - 1);
115 }
116
117 /* Caller must hold pool_lock */
118 static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
119                                                       u64 *page_list,
120                                                       int  page_list_len,
121                                                       u64  io_virtual_address)
122 {
123         struct hlist_head *bucket;
124         struct ib_pool_fmr *fmr;
125
126         if (!pool->cache_bucket)
127                 return NULL;
128
129         bucket = pool->cache_bucket + ib_fmr_hash(*page_list);
130
131         hlist_for_each_entry(fmr, bucket, cache_node)
132                 if (io_virtual_address == fmr->io_virtual_address &&
133                     page_list_len      == fmr->page_list_len      &&
134                     !memcmp(page_list, fmr->page_list,
135                             page_list_len * sizeof *page_list))
136                         return fmr;
137
138         return NULL;
139 }
140
141 static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
142 {
143         int                 ret;
144         struct ib_pool_fmr *fmr;
145         LIST_HEAD(unmap_list);
146         LIST_HEAD(fmr_list);
147
148         spin_lock_irq(&pool->pool_lock);
149
150         list_for_each_entry(fmr, &pool->dirty_list, list) {
151                 hlist_del_init(&fmr->cache_node);
152                 fmr->remap_count = 0;
153                 list_add_tail(&fmr->fmr->list, &fmr_list);
154
155 #ifdef DEBUG
156                 if (fmr->ref_count !=0) {
157                         pr_warn(PFX "Unmapping FMR %p with ref count %d\n",
158                                 fmr, fmr->ref_count);
159                 }
160 #endif
161         }
162
163         list_splice_init(&pool->dirty_list, &unmap_list);
164         pool->dirty_len = 0;
165
166         spin_unlock_irq(&pool->pool_lock);
167
168         if (list_empty(&unmap_list)) {
169                 return;
170         }
171
172         ret = ib_unmap_fmr(&fmr_list);
173         if (ret)
174                 pr_warn(PFX "ib_unmap_fmr returned %d\n", ret);
175
176         spin_lock_irq(&pool->pool_lock);
177         list_splice(&unmap_list, &pool->free_list);
178         spin_unlock_irq(&pool->pool_lock);
179 }
180
181 static int ib_fmr_cleanup_thread(void *pool_ptr)
182 {
183         struct ib_fmr_pool *pool = pool_ptr;
184
185         do {
186                 if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
187                         ib_fmr_batch_release(pool);
188
189                         atomic_inc(&pool->flush_ser);
190                         wake_up_interruptible(&pool->force_wait);
191
192                         if (pool->flush_function)
193                                 pool->flush_function(pool, pool->flush_arg);
194                 }
195
196                 set_current_state(TASK_INTERRUPTIBLE);
197                 if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
198                     !kthread_should_stop())
199                         schedule();
200                 __set_current_state(TASK_RUNNING);
201         } while (!kthread_should_stop());
202
203         return 0;
204 }
205
206 /**
207  * ib_create_fmr_pool - Create an FMR pool
208  * @pd:Protection domain for FMRs
209  * @params:FMR pool parameters
210  *
211  * Create a pool of FMRs.  Return value is pointer to new pool or
212  * error code if creation failed.
213  */
214 struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd             *pd,
215                                        struct ib_fmr_pool_param *params)
216 {
217         struct ib_device   *device;
218         struct ib_fmr_pool *pool;
219         int i;
220         int ret;
221         int max_remaps;
222
223         if (!params)
224                 return ERR_PTR(-EINVAL);
225
226         device = pd->device;
227         if (!device->alloc_fmr    || !device->dealloc_fmr  ||
228             !device->map_phys_fmr || !device->unmap_fmr) {
229                 pr_info(PFX "Device %s does not support FMRs\n", device->name);
230                 return ERR_PTR(-ENOSYS);
231         }
232
233         if (!device->attrs.max_map_per_fmr)
234                 max_remaps = IB_FMR_MAX_REMAPS;
235         else
236                 max_remaps = device->attrs.max_map_per_fmr;
237
238         pool = kmalloc(sizeof *pool, GFP_KERNEL);
239         if (!pool)
240                 return ERR_PTR(-ENOMEM);
241
242         pool->cache_bucket   = NULL;
243         pool->flush_function = params->flush_function;
244         pool->flush_arg      = params->flush_arg;
245
246         INIT_LIST_HEAD(&pool->free_list);
247         INIT_LIST_HEAD(&pool->dirty_list);
248
249         if (params->cache) {
250                 pool->cache_bucket =
251                         kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket,
252                                 GFP_KERNEL);
253                 if (!pool->cache_bucket) {
254                         pr_warn(PFX "Failed to allocate cache in pool\n");
255                         ret = -ENOMEM;
256                         goto out_free_pool;
257                 }
258
259                 for (i = 0; i < IB_FMR_HASH_SIZE; ++i)
260                         INIT_HLIST_HEAD(pool->cache_bucket + i);
261         }
262
263         pool->pool_size       = 0;
264         pool->max_pages       = params->max_pages_per_fmr;
265         pool->max_remaps      = max_remaps;
266         pool->dirty_watermark = params->dirty_watermark;
267         pool->dirty_len       = 0;
268         spin_lock_init(&pool->pool_lock);
269         atomic_set(&pool->req_ser,   0);
270         atomic_set(&pool->flush_ser, 0);
271         init_waitqueue_head(&pool->force_wait);
272
273         pool->thread = kthread_run(ib_fmr_cleanup_thread,
274                                    pool,
275                                    "ib_fmr(%s)",
276                                    device->name);
277         if (IS_ERR(pool->thread)) {
278                 pr_warn(PFX "couldn't start cleanup thread\n");
279                 ret = PTR_ERR(pool->thread);
280                 goto out_free_pool;
281         }
282
283         {
284                 struct ib_pool_fmr *fmr;
285                 struct ib_fmr_attr fmr_attr = {
286                         .max_pages  = params->max_pages_per_fmr,
287                         .max_maps   = pool->max_remaps,
288                         .page_shift = params->page_shift
289                 };
290                 int bytes_per_fmr = sizeof *fmr;
291
292                 if (pool->cache_bucket)
293                         bytes_per_fmr += params->max_pages_per_fmr * sizeof (u64);
294
295                 for (i = 0; i < params->pool_size; ++i) {
296                         fmr = kmalloc(bytes_per_fmr, GFP_KERNEL);
297                         if (!fmr)
298                                 goto out_fail;
299
300                         fmr->pool             = pool;
301                         fmr->remap_count      = 0;
302                         fmr->ref_count        = 0;
303                         INIT_HLIST_NODE(&fmr->cache_node);
304
305                         fmr->fmr = ib_alloc_fmr(pd, params->access, &fmr_attr);
306                         if (IS_ERR(fmr->fmr)) {
307                                 pr_warn(PFX "fmr_create failed for FMR %d\n",
308                                         i);
309                                 kfree(fmr);
310                                 goto out_fail;
311                         }
312
313                         list_add_tail(&fmr->list, &pool->free_list);
314                         ++pool->pool_size;
315                 }
316         }
317
318         return pool;
319
320  out_free_pool:
321         kfree(pool->cache_bucket);
322         kfree(pool);
323
324         return ERR_PTR(ret);
325
326  out_fail:
327         ib_destroy_fmr_pool(pool);
328
329         return ERR_PTR(-ENOMEM);
330 }
331 EXPORT_SYMBOL(ib_create_fmr_pool);
332
333 /**
334  * ib_destroy_fmr_pool - Free FMR pool
335  * @pool:FMR pool to free
336  *
337  * Destroy an FMR pool and free all associated resources.
338  */
339 void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
340 {
341         struct ib_pool_fmr *fmr;
342         struct ib_pool_fmr *tmp;
343         LIST_HEAD(fmr_list);
344         int                 i;
345
346         kthread_stop(pool->thread);
347         ib_fmr_batch_release(pool);
348
349         i = 0;
350         list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) {
351                 if (fmr->remap_count) {
352                         INIT_LIST_HEAD(&fmr_list);
353                         list_add_tail(&fmr->fmr->list, &fmr_list);
354                         ib_unmap_fmr(&fmr_list);
355                 }
356                 ib_dealloc_fmr(fmr->fmr);
357                 list_del(&fmr->list);
358                 kfree(fmr);
359                 ++i;
360         }
361
362         if (i < pool->pool_size)
363                 pr_warn(PFX "pool still has %d regions registered\n",
364                         pool->pool_size - i);
365
366         kfree(pool->cache_bucket);
367         kfree(pool);
368 }
369 EXPORT_SYMBOL(ib_destroy_fmr_pool);
370
371 /**
372  * ib_flush_fmr_pool - Invalidate all unmapped FMRs
373  * @pool:FMR pool to flush
374  *
375  * Ensure that all unmapped FMRs are fully invalidated.
376  */
377 int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
378 {
379         int serial;
380         struct ib_pool_fmr *fmr, *next;
381
382         /*
383          * The free_list holds FMRs that may have been used
384          * but have not been remapped enough times to be dirty.
385          * Put them on the dirty list now so that the cleanup
386          * thread will reap them too.
387          */
388         spin_lock_irq(&pool->pool_lock);
389         list_for_each_entry_safe(fmr, next, &pool->free_list, list) {
390                 if (fmr->remap_count > 0)
391                         list_move(&fmr->list, &pool->dirty_list);
392         }
393         spin_unlock_irq(&pool->pool_lock);
394
395         serial = atomic_inc_return(&pool->req_ser);
396         wake_up_process(pool->thread);
397
398         if (wait_event_interruptible(pool->force_wait,
399                                      atomic_read(&pool->flush_ser) - serial >= 0))
400                 return -EINTR;
401
402         return 0;
403 }
404 EXPORT_SYMBOL(ib_flush_fmr_pool);
405
406 /**
407  * ib_fmr_pool_map_phys -
408  * @pool:FMR pool to allocate FMR from
409  * @page_list:List of pages to map
410  * @list_len:Number of pages in @page_list
411  * @io_virtual_address:I/O virtual address for new FMR
412  *
413  * Map an FMR from an FMR pool.
414  */
415 struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
416                                          u64                *page_list,
417                                          int                 list_len,
418                                          u64                 io_virtual_address)
419 {
420         struct ib_fmr_pool *pool = pool_handle;
421         struct ib_pool_fmr *fmr;
422         unsigned long       flags;
423         int                 result;
424
425         if (list_len < 1 || list_len > pool->max_pages)
426                 return ERR_PTR(-EINVAL);
427
428         spin_lock_irqsave(&pool->pool_lock, flags);
429         fmr = ib_fmr_cache_lookup(pool,
430                                   page_list,
431                                   list_len,
432                                   io_virtual_address);
433         if (fmr) {
434                 /* found in cache */
435                 ++fmr->ref_count;
436                 if (fmr->ref_count == 1) {
437                         list_del(&fmr->list);
438                 }
439
440                 spin_unlock_irqrestore(&pool->pool_lock, flags);
441
442                 return fmr;
443         }
444
445         if (list_empty(&pool->free_list)) {
446                 spin_unlock_irqrestore(&pool->pool_lock, flags);
447                 return ERR_PTR(-EAGAIN);
448         }
449
450         fmr = list_entry(pool->free_list.next, struct ib_pool_fmr, list);
451         list_del(&fmr->list);
452         hlist_del_init(&fmr->cache_node);
453         spin_unlock_irqrestore(&pool->pool_lock, flags);
454
455         result = ib_map_phys_fmr(fmr->fmr, page_list, list_len,
456                                  io_virtual_address);
457
458         if (result) {
459                 spin_lock_irqsave(&pool->pool_lock, flags);
460                 list_add(&fmr->list, &pool->free_list);
461                 spin_unlock_irqrestore(&pool->pool_lock, flags);
462
463                 pr_warn(PFX "fmr_map returns %d\n", result);
464
465                 return ERR_PTR(result);
466         }
467
468         ++fmr->remap_count;
469         fmr->ref_count = 1;
470
471         if (pool->cache_bucket) {
472                 fmr->io_virtual_address = io_virtual_address;
473                 fmr->page_list_len      = list_len;
474                 memcpy(fmr->page_list, page_list, list_len * sizeof(*page_list));
475
476                 spin_lock_irqsave(&pool->pool_lock, flags);
477                 hlist_add_head(&fmr->cache_node,
478                                pool->cache_bucket + ib_fmr_hash(fmr->page_list[0]));
479                 spin_unlock_irqrestore(&pool->pool_lock, flags);
480         }
481
482         return fmr;
483 }
484 EXPORT_SYMBOL(ib_fmr_pool_map_phys);
485
486 /**
487  * ib_fmr_pool_unmap - Unmap FMR
488  * @fmr:FMR to unmap
489  *
490  * Unmap an FMR.  The FMR mapping may remain valid until the FMR is
491  * reused (or until ib_flush_fmr_pool() is called).
492  */
493 int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
494 {
495         struct ib_fmr_pool *pool;
496         unsigned long flags;
497
498         pool = fmr->pool;
499
500         spin_lock_irqsave(&pool->pool_lock, flags);
501
502         --fmr->ref_count;
503         if (!fmr->ref_count) {
504                 if (fmr->remap_count < pool->max_remaps) {
505                         list_add_tail(&fmr->list, &pool->free_list);
506                 } else {
507                         list_add_tail(&fmr->list, &pool->dirty_list);
508                         if (++pool->dirty_len >= pool->dirty_watermark) {
509                                 atomic_inc(&pool->req_ser);
510                                 wake_up_process(pool->thread);
511                         }
512                 }
513         }
514
515 #ifdef DEBUG
516         if (fmr->ref_count < 0)
517                 pr_warn(PFX "FMR %p has ref count %d < 0\n",
518                         fmr, fmr->ref_count);
519 #endif
520
521         spin_unlock_irqrestore(&pool->pool_lock, flags);
522
523         return 0;
524 }
525 EXPORT_SYMBOL(ib_fmr_pool_unmap);