2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <dev/mlx5/driver.h>
29 #include <linux/module.h>
30 #include "mlx5_core.h"
32 static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out,
35 u32 in[MLX5_ST_SZ_DW(query_adapter_in)];
38 memset(in, 0, sizeof(in));
40 MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER);
42 err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
46 int mlx5_query_board_id(struct mlx5_core_dev *dev)
49 int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
52 out = kzalloc(outlen, GFP_KERNEL);
54 err = mlx5_cmd_query_adapter(dev, out, outlen);
59 MLX5_ADDR_OF(query_adapter_out, out,
60 query_adapter_struct.vsd_contd_psid),
61 MLX5_FLD_SZ_BYTES(query_adapter_out,
62 query_adapter_struct.vsd_contd_psid));
70 int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id)
73 int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
76 out = kzalloc(outlen, GFP_KERNEL);
78 err = mlx5_cmd_query_adapter(mdev, out, outlen);
82 *vendor_id = MLX5_GET(query_adapter_out, out,
83 query_adapter_struct.ieee_vendor_id);
90 EXPORT_SYMBOL(mlx5_core_query_vendor_id);
92 static int mlx5_core_query_special_contexts(struct mlx5_core_dev *dev)
94 u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)];
95 u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)];
98 memset(in, 0, sizeof(in));
99 memset(out, 0, sizeof(out));
101 MLX5_SET(query_special_contexts_in, in, opcode,
102 MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
103 err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
108 dev->special_contexts.resd_lkey = MLX5_GET(query_special_contexts_out,
114 int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
118 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
122 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
126 if (MLX5_CAP_GEN(dev, eth_net_offloads)) {
127 err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS,
128 HCA_CAP_OPMOD_GET_CUR);
131 err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS,
132 HCA_CAP_OPMOD_GET_MAX);
137 if (MLX5_CAP_GEN(dev, pg)) {
138 err = mlx5_core_get_caps(dev, MLX5_CAP_ODP,
139 HCA_CAP_OPMOD_GET_CUR);
142 err = mlx5_core_get_caps(dev, MLX5_CAP_ODP,
143 HCA_CAP_OPMOD_GET_MAX);
148 if (MLX5_CAP_GEN(dev, atomic)) {
149 err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
150 HCA_CAP_OPMOD_GET_CUR);
153 err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
154 HCA_CAP_OPMOD_GET_MAX);
159 if (MLX5_CAP_GEN(dev, roce)) {
160 err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE,
161 HCA_CAP_OPMOD_GET_CUR);
164 err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE,
165 HCA_CAP_OPMOD_GET_MAX);
170 if (MLX5_CAP_GEN(dev, nic_flow_table)) {
171 err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE,
172 HCA_CAP_OPMOD_GET_CUR);
175 err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE,
176 HCA_CAP_OPMOD_GET_MAX);
182 MLX5_CAP_GEN(dev, eswitch_flow_table)) {
183 err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE,
184 HCA_CAP_OPMOD_GET_CUR);
187 err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE,
188 HCA_CAP_OPMOD_GET_MAX);
193 if (MLX5_CAP_GEN(dev, vport_group_manager)) {
194 err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH,
195 HCA_CAP_OPMOD_GET_CUR);
198 err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH,
199 HCA_CAP_OPMOD_GET_MAX);
204 err = mlx5_core_query_special_contexts(dev);
211 int mlx5_cmd_init_hca(struct mlx5_core_dev *dev)
213 u32 in[MLX5_ST_SZ_DW(init_hca_in)];
214 u32 out[MLX5_ST_SZ_DW(init_hca_out)];
216 memset(in, 0, sizeof(in));
218 MLX5_SET(init_hca_in, in, opcode, MLX5_CMD_OP_INIT_HCA);
220 memset(out, 0, sizeof(out));
221 return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
225 int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev)
227 u32 in[MLX5_ST_SZ_DW(teardown_hca_in)];
228 u32 out[MLX5_ST_SZ_DW(teardown_hca_out)];
230 memset(in, 0, sizeof(in));
232 MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
234 memset(out, 0, sizeof(out));
235 return mlx5_cmd_exec_check_status(dev, in, sizeof(in),