2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <linux/etherdevice.h>
29 #include <linux/netdevice.h>
30 #include <linux/inetdevice.h>
31 #include <dev/mlx5/vport.h>
33 #include <rdma/ib_addr.h>
34 #include <rdma/ib_cache.h>
37 struct net_device *mlx5_ib_get_netdev(struct ib_device *ib_dev, u8 port)
39 struct mlx5_ib_dev *dev = to_mdev(ib_dev);
41 return mlx5_get_protocol_dev(dev->mdev, MLX5_INTERFACE_PROTOCOL_ETH);
45 static void ib_gid_to_mlx5_roce_addr(const union ib_gid *gid,
46 struct net_device *ndev,
49 #define MLX5_SET_RA(p, f, v) MLX5_SET(roce_addr_layout, p, f, v)
50 char *mlx5_addr_l3_addr = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr,
52 void *mlx5_addr_mac = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr,
57 memset(&zgid, 0, sizeof(zgid));
58 if (0 == memcmp(gid, &zgid, sizeof(zgid)))
61 ether_addr_copy(mlx5_addr_mac, IF_LLADDR(ndev));
63 if (VLAN_TAG(ndev, &vtag) == 0) {
64 MLX5_SET_RA(mlx5_addr, vlan_valid, 1);
65 MLX5_SET_RA(mlx5_addr, vlan_id, vtag);
68 #ifndef MLX5_USE_ROCE_VERSION_2
69 MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_1);
71 memcpy(mlx5_addr_l3_addr, gid, sizeof(*gid));
73 MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_2);
75 if (ipv6_addr_v4mapped((void *)gid)) {
76 MLX5_SET_RA(mlx5_addr, roce_l3_type,
77 MLX5_ROCE_L3_TYPE_IPV4);
78 memcpy(&mlx5_addr_l3_addr[12], &gid->raw[12], 4);
80 MLX5_SET_RA(mlx5_addr, roce_l3_type,
81 MLX5_ROCE_L3_TYPE_IPV6);
82 memcpy(mlx5_addr_l3_addr, gid, sizeof(*gid));
87 int modify_gid_roce(struct ib_device *ib_dev, u8 port, unsigned int index,
88 const union ib_gid *gid, struct net_device *ndev)
90 struct mlx5_ib_dev *dev = to_mdev(ib_dev);
91 u32 in[MLX5_ST_SZ_DW(set_roce_address_in)];
92 u32 out[MLX5_ST_SZ_DW(set_roce_address_out)];
93 void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address);
95 memset(in, 0, sizeof(in));
97 ib_gid_to_mlx5_roce_addr(gid, ndev, in_addr);
99 MLX5_SET(set_roce_address_in, in, roce_address_index, index);
100 MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS);
102 memset(out, 0, sizeof(out));
103 return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
106 static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed,
109 switch (eth_proto_oper) {
110 case MLX5_PROT_MASK(MLX5_1000BASE_CX_SGMII):
111 case MLX5_PROT_MASK(MLX5_1000BASE_KX):
112 case MLX5_PROT_MASK(MLX5_100BASE_TX):
113 case MLX5_PROT_MASK(MLX5_1000BASE_T):
114 *active_width = IB_WIDTH_1X;
115 *active_speed = IB_SPEED_SDR;
117 case MLX5_PROT_MASK(MLX5_10GBASE_T):
118 case MLX5_PROT_MASK(MLX5_10GBASE_CX4):
119 case MLX5_PROT_MASK(MLX5_10GBASE_KX4):
120 case MLX5_PROT_MASK(MLX5_10GBASE_KR):
121 case MLX5_PROT_MASK(MLX5_10GBASE_CR):
122 case MLX5_PROT_MASK(MLX5_10GBASE_SR):
123 case MLX5_PROT_MASK(MLX5_10GBASE_ER):
124 *active_width = IB_WIDTH_1X;
125 *active_speed = IB_SPEED_QDR;
127 case MLX5_PROT_MASK(MLX5_25GBASE_CR):
128 case MLX5_PROT_MASK(MLX5_25GBASE_KR):
129 case MLX5_PROT_MASK(MLX5_25GBASE_SR):
130 *active_width = IB_WIDTH_1X;
131 *active_speed = IB_SPEED_EDR;
133 case MLX5_PROT_MASK(MLX5_40GBASE_CR4):
134 case MLX5_PROT_MASK(MLX5_40GBASE_KR4):
135 case MLX5_PROT_MASK(MLX5_40GBASE_SR4):
136 case MLX5_PROT_MASK(MLX5_40GBASE_LR4):
137 *active_width = IB_WIDTH_4X;
138 *active_speed = IB_SPEED_QDR;
140 case MLX5_PROT_MASK(MLX5_50GBASE_CR2):
141 case MLX5_PROT_MASK(MLX5_50GBASE_KR2):
142 *active_width = IB_WIDTH_1X;
143 *active_speed = IB_SPEED_FDR;
145 case MLX5_PROT_MASK(MLX5_56GBASE_R4):
146 *active_width = IB_WIDTH_4X;
147 *active_speed = IB_SPEED_FDR;
149 case MLX5_PROT_MASK(MLX5_100GBASE_CR4):
150 case MLX5_PROT_MASK(MLX5_100GBASE_SR4):
151 case MLX5_PROT_MASK(MLX5_100GBASE_KR4):
152 case MLX5_PROT_MASK(MLX5_100GBASE_LR4):
153 *active_width = IB_WIDTH_4X;
154 *active_speed = IB_SPEED_EDR;
163 static int mlx5_query_roce_port_ptys(struct ib_device *ib_dev,
164 struct ib_port_attr *props, u8 port)
166 struct mlx5_ib_dev *dev = to_mdev(ib_dev);
167 struct mlx5_core_dev *mdev = dev->mdev;
168 struct mlx5_ptys_reg *ptys;
171 ptys = kzalloc(sizeof(*ptys), GFP_KERNEL);
175 ptys->proto_mask |= MLX5_PTYS_EN;
176 ptys->local_port = port;
178 err = mlx5_core_access_ptys(mdev, ptys, 0);
182 err = translate_eth_proto_oper(ptys->eth_proto_oper,
183 &props->active_speed,
184 &props->active_width);
190 int mlx5_query_port_roce(struct ib_device *ib_dev, u8 port,
191 struct ib_port_attr *props)
193 struct net_device *netdev = mlx5_ib_get_netdev(ib_dev, port);
194 struct mlx5_ib_dev *dev = to_mdev(ib_dev);
195 enum ib_mtu netdev_ib_mtu;
197 memset(props, 0, sizeof(*props));
199 props->port_cap_flags |= IB_PORT_CM_SUP;
201 props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev,
202 roce_address_table_size);
203 props->max_mtu = IB_MTU_4096;
204 props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
205 props->pkey_tbl_len = 1;
206 props->state = IB_PORT_DOWN;
207 props->phys_state = 3;
209 if (mlx5_query_nic_vport_qkey_viol_cntr(dev->mdev,
210 (u16 *)&props->qkey_viol_cntr))
211 printf("mlx5_ib: WARN: ""%s failed to query qkey violations counter\n", __func__);
217 if (netif_running(netdev) && netif_carrier_ok(netdev)) {
218 props->state = IB_PORT_ACTIVE;
219 props->phys_state = 5;
222 netdev_ib_mtu = iboe_get_mtu(netdev->if_mtu);
223 props->active_mtu = min(props->max_mtu, netdev_ib_mtu);
225 mlx5_query_roce_port_ptys(ib_dev, props, port);
230 __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port,
231 int index, __be16 ah_s_udp_port)
233 #ifndef MLX5_USE_ROCE_VERSION_2
236 return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
240 int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port,
241 int index, int *gid_type)
246 ret = ib_get_cached_gid(&dev->ib_dev, port, index, &gid);