2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #define IPV6_DEFAULT_HOPLIMIT 64
32 int mlx5_ib_resolve_grh(const struct ib_ah_attr *ah_attr, u8 *mac, int *is_mcast)
39 memcpy(&in6, ah_attr->grh.dgid.raw, sizeof in6);
40 if (rdma_link_local_addr(&in6)) {
41 rdma_get_ll_mac(&in6, mac);
42 } else if (rdma_is_multicast_addr(&in6)) {
43 rdma_get_mcast_mac(&in6, mac);
53 struct ib_ah *create_ib_ah(struct mlx5_ib_dev *dev,
54 struct ib_ah_attr *ah_attr,
55 struct mlx5_ib_ah *ah, enum rdma_link_layer ll)
60 if (ah_attr->ah_flags & IB_AH_GRH) {
61 memcpy(ah->av.rgid, &ah_attr->grh.dgid, 16);
62 ah->av.grh_gid_fl = cpu_to_be32(ah_attr->grh.flow_label |
64 ah_attr->grh.sgid_index << 20);
65 ah->av.hop_limit = ah_attr->grh.hop_limit;
66 ah->av.tclass = ah_attr->grh.traffic_class;
69 ah->av.stat_rate_sl = (ah_attr->static_rate << 4);
71 if (ll == IB_LINK_LAYER_ETHERNET) {
72 err = mlx5_get_roce_gid_type(dev, ah_attr->port_num,
73 ah_attr->grh.sgid_index,
78 mlx5_ib_resolve_grh(ah_attr, ah->av.rmac, NULL);
79 ah->av.udp_sport = mlx5_get_roce_udp_sport(
82 ah_attr->grh.sgid_index,
84 ah->av.stat_rate_sl |= (ah_attr->sl & 0x7) << 1;
85 ah->av.hop_limit = ah_attr->grh.hop_limit;
86 /* TODO: initialize other eth fields */
88 ah->av.rlid = cpu_to_be16(ah_attr->dlid);
89 ah->av.fl_mlid = ah_attr->src_path_bits & 0x7f;
90 ah->av.stat_rate_sl |= (ah_attr->sl & 0xf);
96 struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
98 struct mlx5_ib_ah *ah;
99 struct mlx5_ib_dev *dev = to_mdev(pd->device);
100 enum rdma_link_layer ll;
101 struct ib_ah *ret = ERR_PTR(-EINVAL);
103 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
105 return ERR_PTR(-ENOMEM);
107 ll = pd->device->get_link_layer(pd->device, ah_attr->port_num);
109 if (ll == IB_LINK_LAYER_ETHERNET && !(ah_attr->ah_flags & IB_AH_GRH))
112 return create_ib_ah(dev, ah_attr, ah, ll); /* never fails */
119 int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
121 struct mlx5_ib_ah *ah = to_mah(ibah);
124 memset(ah_attr, 0, sizeof(*ah_attr));
126 tmp = be32_to_cpu(ah->av.grh_gid_fl);
127 if (tmp & (1 << 30)) {
128 ah_attr->ah_flags = IB_AH_GRH;
129 ah_attr->grh.sgid_index = (tmp >> 20) & 0xff;
130 ah_attr->grh.flow_label = tmp & 0xfffff;
131 memcpy(&ah_attr->grh.dgid, ah->av.rgid, 16);
132 ah_attr->grh.hop_limit = ah->av.hop_limit;
133 ah_attr->grh.traffic_class = ah->av.tclass;
135 ah_attr->dlid = be16_to_cpu(ah->av.rlid);
136 ah_attr->static_rate = ah->av.stat_rate_sl >> 4;
137 ah_attr->sl = ah->av.stat_rate_sl & 0xf;
142 int mlx5_ib_destroy_ah(struct ib_ah *ah)