2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0
4 * Copyright (c) 2004 Topspin Communications. All rights reserved.
5 * Copyright (c) 2005 Intel Corporation. All rights reserved.
6 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
7 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
41 #include <linux/module.h>
42 #include <linux/errno.h>
43 #include <linux/slab.h>
44 #include <linux/workqueue.h>
45 #include <linux/netdevice.h>
46 #include <linux/in6.h>
48 #include <rdma/ib_cache.h>
50 #include "core_priv.h"
52 struct ib_pkey_cache {
57 struct ib_update_work {
58 struct work_struct work;
59 struct ib_device *device;
66 static const struct ib_gid_attr zattr;
68 enum gid_attr_find_mask {
69 GID_ATTR_FIND_MASK_GID = 1UL << 0,
70 GID_ATTR_FIND_MASK_NETDEV = 1UL << 1,
71 GID_ATTR_FIND_MASK_DEFAULT = 1UL << 2,
72 GID_ATTR_FIND_MASK_GID_TYPE = 1UL << 3,
75 enum gid_table_entry_props {
76 GID_TABLE_ENTRY_INVALID = 1UL << 0,
77 GID_TABLE_ENTRY_DEFAULT = 1UL << 1,
80 enum gid_table_write_action {
81 GID_TABLE_WRITE_ACTION_ADD,
82 GID_TABLE_WRITE_ACTION_DEL,
83 /* MODIFY only updates the GID table. Currently only used by
86 GID_TABLE_WRITE_ACTION_MODIFY
89 struct ib_gid_table_entry {
92 struct ib_gid_attr attr;
98 /* In RoCE, adding a GID to the table requires:
99 * (a) Find if this GID is already exists.
100 * (b) Find a free space.
101 * (c) Write the new GID
103 * Delete requires different set of operations:
107 * Add/delete should be carried out atomically.
108 * This is done by locking this mutex from multiple
109 * writers. We don't need this lock for IB, as the MAD
110 * layer replaces all entries. All data_vec entries
111 * are locked by this lock.
114 /* This lock protects the table entries from being
115 * read and written simultaneously.
118 struct ib_gid_table_entry *data_vec;
121 static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
123 if (rdma_cap_roce_gid_table(ib_dev, port)) {
124 struct ib_event event;
126 event.device = ib_dev;
127 event.element.port_num = port;
128 event.event = IB_EVENT_GID_CHANGE;
130 ib_dispatch_event(&event);
134 static const char * const gid_type_str[] = {
135 [IB_GID_TYPE_IB] = "IB/RoCE v1",
136 [IB_GID_TYPE_ROCE_UDP_ENCAP] = "RoCE v2",
139 const char *ib_cache_gid_type_str(enum ib_gid_type gid_type)
141 if (gid_type < ARRAY_SIZE(gid_type_str) && gid_type_str[gid_type])
142 return gid_type_str[gid_type];
144 return "Invalid GID type";
146 EXPORT_SYMBOL(ib_cache_gid_type_str);
148 int ib_cache_gid_parse_type_str(const char *buf)
158 if (buf[len - 1] == '\n')
161 for (i = 0; i < ARRAY_SIZE(gid_type_str); ++i)
162 if (gid_type_str[i] && !strncmp(buf, gid_type_str[i], len) &&
163 len == strlen(gid_type_str[i])) {
170 EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
172 /* This function expects that rwlock will be write locked in all
173 * scenarios and that lock will be locked in sleep-able (RoCE)
176 static int write_gid(struct ib_device *ib_dev, u8 port,
177 struct ib_gid_table *table, int ix,
178 const union ib_gid *gid,
179 const struct ib_gid_attr *attr,
180 enum gid_table_write_action action,
182 __releases(&table->rwlock) __acquires(&table->rwlock)
185 struct net_device *old_net_dev;
186 enum ib_gid_type old_gid_type;
188 /* in rdma_cap_roce_gid_table, this funciton should be protected by a
192 if (rdma_cap_roce_gid_table(ib_dev, port)) {
193 table->data_vec[ix].props |= GID_TABLE_ENTRY_INVALID;
194 write_unlock_irq(&table->rwlock);
195 /* GID_TABLE_WRITE_ACTION_MODIFY currently isn't supported by
196 * RoCE providers and thus only updates the cache.
198 if (action == GID_TABLE_WRITE_ACTION_ADD)
199 ret = ib_dev->add_gid(ib_dev, port, ix, gid, attr,
200 &table->data_vec[ix].context);
201 else if (action == GID_TABLE_WRITE_ACTION_DEL)
202 ret = ib_dev->del_gid(ib_dev, port, ix,
203 &table->data_vec[ix].context);
204 write_lock_irq(&table->rwlock);
207 old_net_dev = table->data_vec[ix].attr.ndev;
208 old_gid_type = table->data_vec[ix].attr.gid_type;
209 if (old_net_dev && old_net_dev != attr->ndev)
210 dev_put(old_net_dev);
211 /* if modify_gid failed, just delete the old gid */
212 if (ret || action == GID_TABLE_WRITE_ACTION_DEL) {
215 table->data_vec[ix].context = NULL;
218 memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid));
219 memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr));
221 table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
222 if (action == GID_TABLE_WRITE_ACTION_DEL)
223 table->data_vec[ix].attr.gid_type = old_gid_type;
225 if (table->data_vec[ix].attr.ndev &&
226 table->data_vec[ix].attr.ndev != old_net_dev)
227 dev_hold(table->data_vec[ix].attr.ndev);
229 table->data_vec[ix].props &= ~GID_TABLE_ENTRY_INVALID;
234 static int add_gid(struct ib_device *ib_dev, u8 port,
235 struct ib_gid_table *table, int ix,
236 const union ib_gid *gid,
237 const struct ib_gid_attr *attr,
239 return write_gid(ib_dev, port, table, ix, gid, attr,
240 GID_TABLE_WRITE_ACTION_ADD, default_gid);
243 static int modify_gid(struct ib_device *ib_dev, u8 port,
244 struct ib_gid_table *table, int ix,
245 const union ib_gid *gid,
246 const struct ib_gid_attr *attr,
248 return write_gid(ib_dev, port, table, ix, gid, attr,
249 GID_TABLE_WRITE_ACTION_MODIFY, default_gid);
252 static int del_gid(struct ib_device *ib_dev, u8 port,
253 struct ib_gid_table *table, int ix,
255 return write_gid(ib_dev, port, table, ix, &zgid, &zattr,
256 GID_TABLE_WRITE_ACTION_DEL, default_gid);
259 /* rwlock should be read locked */
260 static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
261 const struct ib_gid_attr *val, bool default_gid,
262 unsigned long mask, int *pempty)
266 int empty = pempty ? -1 : 0;
268 while (i < table->sz && (found < 0 || empty < 0)) {
269 struct ib_gid_table_entry *data = &table->data_vec[i];
270 struct ib_gid_attr *attr = &data->attr;
275 if (data->props & GID_TABLE_ENTRY_INVALID)
279 if (!memcmp(&data->gid, &zgid, sizeof(*gid)) &&
280 !memcmp(attr, &zattr, sizeof(*attr)) &&
287 if (mask & GID_ATTR_FIND_MASK_GID_TYPE &&
288 attr->gid_type != val->gid_type)
291 if (mask & GID_ATTR_FIND_MASK_GID &&
292 memcmp(gid, &data->gid, sizeof(*gid)))
295 if (mask & GID_ATTR_FIND_MASK_NETDEV &&
296 attr->ndev != val->ndev)
299 if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
300 !!(data->props & GID_TABLE_ENTRY_DEFAULT) !=
313 static void addrconf_ifid_eui48(u8 *eui, struct net_device *dev)
315 if (dev->if_addrlen != ETH_ALEN)
317 memcpy(eui, IF_LLADDR(dev), 3);
318 memcpy(eui + 5, IF_LLADDR(dev) + 3, 3);
320 /* NOTE: The scope ID is added by the GID to IP conversion */
327 static void make_default_gid(struct net_device *dev, union ib_gid *gid)
329 gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
330 addrconf_ifid_eui48(&gid->raw[8], dev);
333 int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
334 union ib_gid *gid, struct ib_gid_attr *attr)
336 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
337 struct ib_gid_table *table;
342 table = ports_table[port - rdma_start_port(ib_dev)];
344 if (!memcmp(gid, &zgid, sizeof(*gid)))
347 mutex_lock(&table->lock);
348 write_lock_irq(&table->rwlock);
350 ix = find_gid(table, gid, attr, false, GID_ATTR_FIND_MASK_GID |
351 GID_ATTR_FIND_MASK_GID_TYPE |
352 GID_ATTR_FIND_MASK_NETDEV, &empty);
361 ret = add_gid(ib_dev, port, table, empty, gid, attr, false);
363 dispatch_gid_change_event(ib_dev, port);
366 write_unlock_irq(&table->rwlock);
367 mutex_unlock(&table->lock);
371 int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
372 union ib_gid *gid, struct ib_gid_attr *attr)
374 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
375 struct ib_gid_table *table;
378 table = ports_table[port - rdma_start_port(ib_dev)];
380 mutex_lock(&table->lock);
381 write_lock_irq(&table->rwlock);
383 ix = find_gid(table, gid, attr, false,
384 GID_ATTR_FIND_MASK_GID |
385 GID_ATTR_FIND_MASK_GID_TYPE |
386 GID_ATTR_FIND_MASK_NETDEV |
387 GID_ATTR_FIND_MASK_DEFAULT,
392 if (!del_gid(ib_dev, port, table, ix, false))
393 dispatch_gid_change_event(ib_dev, port);
396 write_unlock_irq(&table->rwlock);
397 mutex_unlock(&table->lock);
401 int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
402 struct net_device *ndev)
404 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
405 struct ib_gid_table *table;
407 bool deleted = false;
409 table = ports_table[port - rdma_start_port(ib_dev)];
411 mutex_lock(&table->lock);
412 write_lock_irq(&table->rwlock);
414 for (ix = 0; ix < table->sz; ix++)
415 if (table->data_vec[ix].attr.ndev == ndev)
416 if (!del_gid(ib_dev, port, table, ix,
417 !!(table->data_vec[ix].props &
418 GID_TABLE_ENTRY_DEFAULT)))
421 write_unlock_irq(&table->rwlock);
422 mutex_unlock(&table->lock);
425 dispatch_gid_change_event(ib_dev, port);
430 static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
431 union ib_gid *gid, struct ib_gid_attr *attr)
433 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
434 struct ib_gid_table *table;
436 table = ports_table[port - rdma_start_port(ib_dev)];
438 if (index < 0 || index >= table->sz)
441 if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
444 memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
446 memcpy(attr, &table->data_vec[index].attr, sizeof(*attr));
447 /* make sure network device is valid and attached */
448 if (attr->ndev != NULL &&
449 (attr->ndev->if_flags & IFF_DYING) == 0 &&
450 attr->ndev->if_addr != NULL)
451 dev_hold(attr->ndev);
459 static int _ib_cache_gid_table_find(struct ib_device *ib_dev,
460 const union ib_gid *gid,
461 const struct ib_gid_attr *val,
463 u8 *port, u16 *index)
465 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
466 struct ib_gid_table *table;
471 for (p = 0; p < ib_dev->phys_port_cnt; p++) {
472 table = ports_table[p];
473 read_lock_irqsave(&table->rwlock, flags);
474 local_index = find_gid(table, gid, val, false, mask, NULL);
475 if (local_index >= 0) {
477 *index = local_index;
479 *port = p + rdma_start_port(ib_dev);
480 read_unlock_irqrestore(&table->rwlock, flags);
483 read_unlock_irqrestore(&table->rwlock, flags);
489 static int ib_cache_gid_find(struct ib_device *ib_dev,
490 const union ib_gid *gid,
491 enum ib_gid_type gid_type,
492 struct net_device *ndev, u8 *port,
495 unsigned long mask = GID_ATTR_FIND_MASK_GID |
496 GID_ATTR_FIND_MASK_GID_TYPE;
497 struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
500 mask |= GID_ATTR_FIND_MASK_NETDEV;
502 return _ib_cache_gid_table_find(ib_dev, gid, &gid_attr_val,
506 int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
507 const union ib_gid *gid,
508 enum ib_gid_type gid_type,
509 u8 port, struct net_device *ndev,
513 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
514 struct ib_gid_table *table;
515 unsigned long mask = GID_ATTR_FIND_MASK_GID |
516 GID_ATTR_FIND_MASK_GID_TYPE;
517 struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type};
520 if (port < rdma_start_port(ib_dev) ||
521 port > rdma_end_port(ib_dev))
524 table = ports_table[port - rdma_start_port(ib_dev)];
527 mask |= GID_ATTR_FIND_MASK_NETDEV;
529 read_lock_irqsave(&table->rwlock, flags);
530 local_index = find_gid(table, gid, &val, false, mask, NULL);
531 if (local_index >= 0) {
533 *index = local_index;
534 read_unlock_irqrestore(&table->rwlock, flags);
538 read_unlock_irqrestore(&table->rwlock, flags);
541 EXPORT_SYMBOL(ib_find_cached_gid_by_port);
544 * ib_find_gid_by_filter - Returns the GID table index where a specified
546 * @device: The device to query.
547 * @gid: The GID value to search for.
548 * @port_num: The port number of the device where the GID value could be
550 * @filter: The filter function is executed on any matching GID in the table.
551 * If the filter function returns true, the corresponding index is returned,
552 * otherwise, we continue searching the GID table. It's guaranteed that
553 * while filter is executed, ndev field is valid and the structure won't
554 * change. filter is executed in an atomic context. filter must not be NULL.
555 * @index: The index into the cached GID table where the GID was found. This
556 * parameter may be NULL.
558 * ib_cache_gid_find_by_filter() searches for the specified GID value
559 * of which the filter function returns true in the port's GID table.
560 * This function is only supported on RoCE ports.
563 static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
564 const union ib_gid *gid,
566 bool (*filter)(const union ib_gid *,
567 const struct ib_gid_attr *,
572 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
573 struct ib_gid_table *table;
581 if (port < rdma_start_port(ib_dev) ||
582 port > rdma_end_port(ib_dev) ||
583 !rdma_protocol_roce(ib_dev, port))
584 return -EPROTONOSUPPORT;
586 table = ports_table[port - rdma_start_port(ib_dev)];
588 read_lock_irqsave(&table->rwlock, flags);
589 for (i = 0; i < table->sz; i++) {
590 struct ib_gid_attr attr;
592 if (table->data_vec[i].props & GID_TABLE_ENTRY_INVALID)
595 if (memcmp(gid, &table->data_vec[i].gid, sizeof(*gid)))
598 memcpy(&attr, &table->data_vec[i].attr, sizeof(attr));
600 if (filter(gid, &attr, context))
607 read_unlock_irqrestore(&table->rwlock, flags);
617 static struct ib_gid_table *alloc_gid_table(int sz)
619 struct ib_gid_table *table =
620 kzalloc(sizeof(struct ib_gid_table), GFP_KERNEL);
625 table->data_vec = kcalloc(sz, sizeof(*table->data_vec), GFP_KERNEL);
626 if (!table->data_vec)
629 mutex_init(&table->lock);
632 rwlock_init(&table->rwlock);
641 static void release_gid_table(struct ib_gid_table *table)
644 kfree(table->data_vec);
649 static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
650 struct ib_gid_table *table)
653 bool deleted = false;
658 write_lock_irq(&table->rwlock);
659 for (i = 0; i < table->sz; ++i) {
660 if (memcmp(&table->data_vec[i].gid, &zgid,
661 sizeof(table->data_vec[i].gid)))
662 if (!del_gid(ib_dev, port, table, i,
663 table->data_vec[i].props &
664 GID_ATTR_FIND_MASK_DEFAULT))
667 write_unlock_irq(&table->rwlock);
670 dispatch_gid_change_event(ib_dev, port);
673 void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
674 struct net_device *ndev,
675 unsigned long gid_type_mask,
676 enum ib_cache_gid_default_mode mode)
678 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
680 struct ib_gid_attr gid_attr;
681 struct ib_gid_attr zattr_type = zattr;
682 struct ib_gid_table *table;
683 unsigned int gid_type;
685 table = ports_table[port - rdma_start_port(ib_dev)];
687 make_default_gid(ndev, &gid);
688 memset(&gid_attr, 0, sizeof(gid_attr));
689 gid_attr.ndev = ndev;
691 /* Default GID is created using unique GUID and local subnet prefix,
692 * as described in section 4.1.1 and 3.5.10 in IB spec 1.3.
693 * Therefore don't create RoCEv2 default GID based on it that
694 * resembles as IPv6 GID based on link local address when IPv6 is
695 * disabled in kernel.
698 gid_type_mask &= ~BIT(IB_GID_TYPE_ROCE_UDP_ENCAP);
701 for (gid_type = 0; gid_type < IB_GID_TYPE_SIZE; ++gid_type) {
703 union ib_gid current_gid;
704 struct ib_gid_attr current_gid_attr = {};
706 if (1UL << gid_type & ~gid_type_mask)
709 gid_attr.gid_type = gid_type;
711 mutex_lock(&table->lock);
712 write_lock_irq(&table->rwlock);
713 ix = find_gid(table, NULL, &gid_attr, true,
714 GID_ATTR_FIND_MASK_GID_TYPE |
715 GID_ATTR_FIND_MASK_DEFAULT,
718 /* Coudn't find default GID location */
722 zattr_type.gid_type = gid_type;
724 if (!__ib_cache_gid_get(ib_dev, port, ix,
725 ¤t_gid, ¤t_gid_attr) &&
726 mode == IB_CACHE_GID_DEFAULT_MODE_SET &&
727 !memcmp(&gid, ¤t_gid, sizeof(gid)) &&
728 !memcmp(&gid_attr, ¤t_gid_attr, sizeof(gid_attr)))
731 if (memcmp(¤t_gid, &zgid, sizeof(current_gid)) ||
732 memcmp(¤t_gid_attr, &zattr_type,
733 sizeof(current_gid_attr))) {
734 if (del_gid(ib_dev, port, table, ix, true)) {
735 pr_warn("ib_cache_gid: can't delete index %d for default gid %pI6\n",
739 dispatch_gid_change_event(ib_dev, port);
743 if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) {
744 if (add_gid(ib_dev, port, table, ix, &gid, &gid_attr, true))
745 pr_warn("ib_cache_gid: unable to add default gid %pI6\n",
748 dispatch_gid_change_event(ib_dev, port);
752 if (current_gid_attr.ndev)
753 dev_put(current_gid_attr.ndev);
754 write_unlock_irq(&table->rwlock);
755 mutex_unlock(&table->lock);
759 static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
760 struct ib_gid_table *table)
763 unsigned long roce_gid_type_mask;
764 unsigned int num_default_gids;
765 unsigned int current_gid = 0;
767 roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
768 num_default_gids = hweight_long(roce_gid_type_mask);
769 for (i = 0; i < num_default_gids && i < table->sz; i++) {
770 struct ib_gid_table_entry *entry =
773 entry->props |= GID_TABLE_ENTRY_DEFAULT;
774 current_gid = find_next_bit(&roce_gid_type_mask,
777 entry->attr.gid_type = current_gid++;
783 static int _gid_table_setup_one(struct ib_device *ib_dev)
786 struct ib_gid_table **table;
789 table = kcalloc(ib_dev->phys_port_cnt, sizeof(*table), GFP_KERNEL);
792 pr_warn("failed to allocate ib gid cache for %s\n",
797 for (port = 0; port < ib_dev->phys_port_cnt; port++) {
798 u8 rdma_port = port + rdma_start_port(ib_dev);
802 ib_dev->port_immutable[rdma_port].gid_tbl_len);
805 goto rollback_table_setup;
808 err = gid_table_reserve_default(ib_dev,
809 port + rdma_start_port(ib_dev),
812 goto rollback_table_setup;
815 ib_dev->cache.gid_cache = table;
818 rollback_table_setup:
819 for (port = 0; port < ib_dev->phys_port_cnt; port++) {
820 cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
822 release_gid_table(table[port]);
829 static void gid_table_release_one(struct ib_device *ib_dev)
831 struct ib_gid_table **table = ib_dev->cache.gid_cache;
837 for (port = 0; port < ib_dev->phys_port_cnt; port++)
838 release_gid_table(table[port]);
841 ib_dev->cache.gid_cache = NULL;
844 static void gid_table_cleanup_one(struct ib_device *ib_dev)
846 struct ib_gid_table **table = ib_dev->cache.gid_cache;
852 for (port = 0; port < ib_dev->phys_port_cnt; port++)
853 cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
857 static int gid_table_setup_one(struct ib_device *ib_dev)
861 err = _gid_table_setup_one(ib_dev);
866 err = roce_rescan_device(ib_dev);
869 gid_table_cleanup_one(ib_dev);
870 gid_table_release_one(ib_dev);
876 int ib_get_cached_gid(struct ib_device *device,
880 struct ib_gid_attr *gid_attr)
884 struct ib_gid_table **ports_table = device->cache.gid_cache;
885 struct ib_gid_table *table = ports_table[port_num - rdma_start_port(device)];
887 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
890 read_lock_irqsave(&table->rwlock, flags);
891 res = __ib_cache_gid_get(device, port_num, index, gid, gid_attr);
892 read_unlock_irqrestore(&table->rwlock, flags);
896 EXPORT_SYMBOL(ib_get_cached_gid);
898 int ib_find_cached_gid(struct ib_device *device,
899 const union ib_gid *gid,
900 enum ib_gid_type gid_type,
901 struct net_device *ndev,
905 return ib_cache_gid_find(device, gid, gid_type, ndev, port_num, index);
907 EXPORT_SYMBOL(ib_find_cached_gid);
909 int ib_find_gid_by_filter(struct ib_device *device,
910 const union ib_gid *gid,
912 bool (*filter)(const union ib_gid *gid,
913 const struct ib_gid_attr *,
915 void *context, u16 *index)
917 /* Only RoCE GID table supports filter function */
918 if (!rdma_cap_roce_gid_table(device, port_num) && filter)
919 return -EPROTONOSUPPORT;
921 return ib_cache_gid_find_by_filter(device, gid,
925 EXPORT_SYMBOL(ib_find_gid_by_filter);
927 int ib_get_cached_pkey(struct ib_device *device,
932 struct ib_pkey_cache *cache;
936 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
939 read_lock_irqsave(&device->cache.lock, flags);
941 cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
943 if (index < 0 || index >= cache->table_len)
946 *pkey = cache->table[index];
948 read_unlock_irqrestore(&device->cache.lock, flags);
952 EXPORT_SYMBOL(ib_get_cached_pkey);
954 int ib_find_cached_pkey(struct ib_device *device,
959 struct ib_pkey_cache *cache;
965 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
968 read_lock_irqsave(&device->cache.lock, flags);
970 cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
974 for (i = 0; i < cache->table_len; ++i)
975 if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
976 if (cache->table[i] & 0x8000) {
984 if (ret && partial_ix >= 0) {
989 read_unlock_irqrestore(&device->cache.lock, flags);
993 EXPORT_SYMBOL(ib_find_cached_pkey);
995 int ib_find_exact_cached_pkey(struct ib_device *device,
1000 struct ib_pkey_cache *cache;
1001 unsigned long flags;
1005 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
1008 read_lock_irqsave(&device->cache.lock, flags);
1010 cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
1014 for (i = 0; i < cache->table_len; ++i)
1015 if (cache->table[i] == pkey) {
1021 read_unlock_irqrestore(&device->cache.lock, flags);
1025 EXPORT_SYMBOL(ib_find_exact_cached_pkey);
1027 int ib_get_cached_lmc(struct ib_device *device,
1031 unsigned long flags;
1034 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
1037 read_lock_irqsave(&device->cache.lock, flags);
1038 *lmc = device->cache.lmc_cache[port_num - rdma_start_port(device)];
1039 read_unlock_irqrestore(&device->cache.lock, flags);
1043 EXPORT_SYMBOL(ib_get_cached_lmc);
1045 static void ib_cache_update(struct ib_device *device,
1048 struct ib_port_attr *tprops = NULL;
1049 struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache;
1050 struct ib_gid_cache {
1052 union ib_gid table[0];
1053 } *gid_cache = NULL;
1056 struct ib_gid_table *table;
1057 struct ib_gid_table **ports_table = device->cache.gid_cache;
1058 bool use_roce_gid_table =
1059 rdma_cap_roce_gid_table(device, port);
1061 if (port < rdma_start_port(device) || port > rdma_end_port(device))
1064 table = ports_table[port - rdma_start_port(device)];
1066 tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
1070 ret = ib_query_port(device, port, tprops);
1072 pr_warn("ib_query_port failed (%d) for %s\n",
1077 pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
1078 sizeof *pkey_cache->table, GFP_KERNEL);
1082 pkey_cache->table_len = tprops->pkey_tbl_len;
1084 if (!use_roce_gid_table) {
1085 gid_cache = kmalloc(sizeof(*gid_cache) + tprops->gid_tbl_len *
1086 sizeof(*gid_cache->table), GFP_KERNEL);
1090 gid_cache->table_len = tprops->gid_tbl_len;
1093 for (i = 0; i < pkey_cache->table_len; ++i) {
1094 ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
1096 pr_warn("ib_query_pkey failed (%d) for %s (index %d)\n",
1097 ret, device->name, i);
1102 if (!use_roce_gid_table) {
1103 for (i = 0; i < gid_cache->table_len; ++i) {
1104 ret = ib_query_gid(device, port, i,
1105 gid_cache->table + i, NULL);
1107 pr_warn("ib_query_gid failed (%d) for %s (index %d)\n",
1108 ret, device->name, i);
1114 write_lock_irq(&device->cache.lock);
1116 old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)];
1118 device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache;
1119 if (!use_roce_gid_table) {
1120 write_lock(&table->rwlock);
1121 for (i = 0; i < gid_cache->table_len; i++) {
1122 modify_gid(device, port, table, i, gid_cache->table + i,
1125 write_unlock(&table->rwlock);
1128 device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc;
1130 write_unlock_irq(&device->cache.lock);
1133 kfree(old_pkey_cache);
1143 static void ib_cache_task(struct work_struct *_work)
1145 struct ib_update_work *work =
1146 container_of(_work, struct ib_update_work, work);
1148 ib_cache_update(work->device, work->port_num);
1152 static void ib_cache_event(struct ib_event_handler *handler,
1153 struct ib_event *event)
1155 struct ib_update_work *work;
1157 if (event->event == IB_EVENT_PORT_ERR ||
1158 event->event == IB_EVENT_PORT_ACTIVE ||
1159 event->event == IB_EVENT_LID_CHANGE ||
1160 event->event == IB_EVENT_PKEY_CHANGE ||
1161 event->event == IB_EVENT_SM_CHANGE ||
1162 event->event == IB_EVENT_CLIENT_REREGISTER ||
1163 event->event == IB_EVENT_GID_CHANGE) {
1164 work = kmalloc(sizeof *work, GFP_ATOMIC);
1166 INIT_WORK(&work->work, ib_cache_task);
1167 work->device = event->device;
1168 work->port_num = event->element.port_num;
1169 queue_work(ib_wq, &work->work);
1174 int ib_cache_setup_one(struct ib_device *device)
1179 rwlock_init(&device->cache.lock);
1181 device->cache.pkey_cache =
1182 kzalloc(sizeof *device->cache.pkey_cache *
1183 (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
1184 device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
1185 (rdma_end_port(device) -
1186 rdma_start_port(device) + 1),
1188 if (!device->cache.pkey_cache ||
1189 !device->cache.lmc_cache) {
1190 pr_warn("Couldn't allocate cache for %s\n", device->name);
1194 err = gid_table_setup_one(device);
1196 /* Allocated memory will be cleaned in the release function */
1199 for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
1200 ib_cache_update(device, p + rdma_start_port(device));
1202 INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
1203 device, ib_cache_event);
1204 err = ib_register_event_handler(&device->cache.event_handler);
1211 gid_table_cleanup_one(device);
1215 void ib_cache_release_one(struct ib_device *device)
1220 * The release function frees all the cache elements.
1221 * This function should be called as part of freeing
1222 * all the device's resources when the cache could no
1223 * longer be accessed.
1225 if (device->cache.pkey_cache)
1227 p <= rdma_end_port(device) - rdma_start_port(device); ++p)
1228 kfree(device->cache.pkey_cache[p]);
1230 gid_table_release_one(device);
1231 kfree(device->cache.pkey_cache);
1232 kfree(device->cache.lmc_cache);
1235 void ib_cache_cleanup_one(struct ib_device *device)
1237 /* The cleanup function unregisters the event handler,
1238 * waits for all in-progress workqueue elements and cleans
1239 * up the GID cache. This function should be called after
1240 * the device was removed from the devices list and all
1241 * clients were removed, so the cache exists but is
1242 * non-functional and shouldn't be updated anymore.
1244 ib_unregister_event_handler(&device->cache.event_handler);
1245 flush_workqueue(ib_wq);
1246 gid_table_cleanup_one(device);
1249 void __init ib_cache_setup(void)
1251 roce_gid_mgmt_init();
1254 void __exit ib_cache_cleanup(void)
1256 roce_gid_mgmt_cleanup();