2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <dev/mlx5/driver.h>
29 #include <linux/module.h>
30 #include "mlx5_core.h"
32 static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out,
35 u32 in[MLX5_ST_SZ_DW(query_adapter_in)];
38 memset(in, 0, sizeof(in));
40 MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER);
42 err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
46 int mlx5_query_board_id(struct mlx5_core_dev *dev)
49 int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
52 out = kzalloc(outlen, GFP_KERNEL);
54 err = mlx5_cmd_query_adapter(dev, out, outlen);
59 MLX5_ADDR_OF(query_adapter_out, out,
60 query_adapter_struct.vsd_contd_psid),
61 MLX5_FLD_SZ_BYTES(query_adapter_out,
62 query_adapter_struct.vsd_contd_psid));
70 int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id)
73 int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
76 out = kzalloc(outlen, GFP_KERNEL);
78 err = mlx5_cmd_query_adapter(mdev, out, outlen);
82 *vendor_id = MLX5_GET(query_adapter_out, out,
83 query_adapter_struct.ieee_vendor_id);
90 EXPORT_SYMBOL(mlx5_core_query_vendor_id);
92 static int mlx5_core_query_special_contexts(struct mlx5_core_dev *dev)
94 u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)];
95 u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)];
98 memset(in, 0, sizeof(in));
99 memset(out, 0, sizeof(out));
101 MLX5_SET(query_special_contexts_in, in, opcode,
102 MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
103 err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
108 dev->special_contexts.resd_lkey = MLX5_GET(query_special_contexts_out,
114 int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
118 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
122 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
126 if (MLX5_CAP_GEN(dev, eth_net_offloads)) {
127 err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS,
128 HCA_CAP_OPMOD_GET_CUR);
131 err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS,
132 HCA_CAP_OPMOD_GET_MAX);
137 if (MLX5_CAP_GEN(dev, pg)) {
138 err = mlx5_core_get_caps(dev, MLX5_CAP_ODP,
139 HCA_CAP_OPMOD_GET_CUR);
142 err = mlx5_core_get_caps(dev, MLX5_CAP_ODP,
143 HCA_CAP_OPMOD_GET_MAX);
148 if (MLX5_CAP_GEN(dev, atomic)) {
149 err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
150 HCA_CAP_OPMOD_GET_CUR);
153 err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
154 HCA_CAP_OPMOD_GET_MAX);
159 if (MLX5_CAP_GEN(dev, roce)) {
160 err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE,
161 HCA_CAP_OPMOD_GET_CUR);
164 err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE,
165 HCA_CAP_OPMOD_GET_MAX);
170 if ((MLX5_CAP_GEN(dev, port_type) ==
171 MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET &&
172 MLX5_CAP_GEN(dev, nic_flow_table)) ||
173 (MLX5_CAP_GEN(dev, port_type) == MLX5_CMD_HCA_CAP_PORT_TYPE_IB &&
174 MLX5_CAP_GEN(dev, ipoib_enhanced_offloads))) {
175 err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE,
176 HCA_CAP_OPMOD_GET_CUR);
179 err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE,
180 HCA_CAP_OPMOD_GET_MAX);
186 MLX5_CAP_GEN(dev, eswitch_flow_table)) {
187 err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE,
188 HCA_CAP_OPMOD_GET_CUR);
191 err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE,
192 HCA_CAP_OPMOD_GET_MAX);
197 if (MLX5_CAP_GEN(dev, vport_group_manager)) {
198 err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH,
199 HCA_CAP_OPMOD_GET_CUR);
202 err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH,
203 HCA_CAP_OPMOD_GET_MAX);
208 if (MLX5_CAP_GEN(dev, snapshot)) {
209 err = mlx5_core_get_caps(dev, MLX5_CAP_SNAPSHOT,
210 HCA_CAP_OPMOD_GET_CUR);
213 err = mlx5_core_get_caps(dev, MLX5_CAP_SNAPSHOT,
214 HCA_CAP_OPMOD_GET_MAX);
219 if (MLX5_CAP_GEN(dev, ipoib_enhanced_offloads)) {
220 err = mlx5_core_get_caps(dev, MLX5_CAP_EOIB_OFFLOADS,
221 HCA_CAP_OPMOD_GET_CUR);
224 err = mlx5_core_get_caps(dev, MLX5_CAP_EOIB_OFFLOADS,
225 HCA_CAP_OPMOD_GET_MAX);
230 if (MLX5_CAP_GEN(dev, debug)) {
231 err = mlx5_core_get_caps(dev, MLX5_CAP_DEBUG,
232 HCA_CAP_OPMOD_GET_CUR);
235 err = mlx5_core_get_caps(dev, MLX5_CAP_DEBUG,
236 HCA_CAP_OPMOD_GET_MAX);
241 if (MLX5_CAP_GEN(dev, qos)) {
242 err = mlx5_core_get_caps(dev, MLX5_CAP_QOS,
243 HCA_CAP_OPMOD_GET_CUR);
246 err = mlx5_core_get_caps(dev, MLX5_CAP_QOS,
247 HCA_CAP_OPMOD_GET_MAX);
252 err = mlx5_core_query_special_contexts(dev);
259 int mlx5_cmd_init_hca(struct mlx5_core_dev *dev)
261 u32 in[MLX5_ST_SZ_DW(init_hca_in)];
262 u32 out[MLX5_ST_SZ_DW(init_hca_out)];
264 memset(in, 0, sizeof(in));
266 MLX5_SET(init_hca_in, in, opcode, MLX5_CMD_OP_INIT_HCA);
268 memset(out, 0, sizeof(out));
269 return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
273 int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev)
275 u32 in[MLX5_ST_SZ_DW(teardown_hca_in)];
276 u32 out[MLX5_ST_SZ_DW(teardown_hca_out)];
278 memset(in, 0, sizeof(in));
280 MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
282 memset(out, 0, sizeof(out));
283 return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
287 int mlx5_core_set_dc_cnak_trace(struct mlx5_core_dev *dev, int enable,
290 struct mlx5_cmd_set_dc_cnak_mbox_in *in;
291 struct mlx5_cmd_set_dc_cnak_mbox_out out;
294 in = kzalloc(sizeof(*in), GFP_KERNEL);
298 memset(&out, 0, sizeof(out));
299 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_DC_CNAK_TRACE);
300 in->enable = !!enable << 7;
301 in->pa = cpu_to_be64(addr);
302 err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
307 err = mlx5_cmd_status_to_err(&out.hdr);