2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #define LINUXKPI_PARAM_PREFIX mlx4_
37 #include <linux/etherdevice.h>
38 #include <dev/mlx4/cmd.h>
39 #include <linux/module.h>
40 #include <linux/cache.h>
46 MLX4_COMMAND_INTERFACE_MIN_REV = 2,
47 MLX4_COMMAND_INTERFACE_MAX_REV = 3,
48 MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS = 3,
51 extern void __buggy_use_of_MLX4_GET(void);
52 extern void __buggy_use_of_MLX4_PUT(void);
55 module_param(enable_qos, byte, 0444);
56 MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)");
58 #define MLX4_GET(dest, source, offset) \
60 void *__p = (char *) (source) + (offset); \
61 switch (sizeof (dest)) { \
62 case 1: (dest) = *(u8 *) __p; break; \
63 case 2: (dest) = be16_to_cpup(__p); break; \
64 case 4: (dest) = be32_to_cpup(__p); break; \
65 case 8: (dest) = be64_to_cpup(__p); break; \
66 default: __buggy_use_of_MLX4_GET(); \
70 #define MLX4_PUT(dest, source, offset) \
72 void *__d = ((char *) (dest) + (offset)); \
73 switch (sizeof(source)) { \
74 case 1: *(u8 *) __d = (source); break; \
75 case 2: *(__be16 *) __d = cpu_to_be16(source); break; \
76 case 4: *(__be32 *) __d = cpu_to_be32(source); break; \
77 case 8: *(__be64 *) __d = cpu_to_be64(source); break; \
78 default: __buggy_use_of_MLX4_PUT(); \
82 static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
84 static const char *fname[] = {
85 [ 0] = "RC transport",
86 [ 1] = "UC transport",
87 [ 2] = "UD transport",
88 [ 3] = "XRC transport",
89 [ 4] = "reliable multicast",
90 [ 5] = "FCoIB support",
92 [ 7] = "IPoIB checksum offload",
93 [ 8] = "P_Key violation counter",
94 [ 9] = "Q_Key violation counter",
97 [15] = "Big LSO headers",
100 [18] = "Atomic ops support",
101 [19] = "Raw multicast support",
102 [20] = "Address vector port checking support",
103 [21] = "UD multicast support",
104 [24] = "Demand paging support",
105 [25] = "Router support",
106 [30] = "IBoE support",
107 [32] = "Unicast loopback support",
108 [34] = "FCS header control",
109 [38] = "Wake On LAN support",
110 [40] = "UDP RSS support",
111 [41] = "Unicast VEP steering support",
112 [42] = "Multicast VEP steering support",
113 [44] = "Cross-channel (sync_qp) operations support",
114 [48] = "Counters support",
115 [59] = "Port management change event support",
116 [60] = "eSwitch support",
117 [61] = "64 byte EQE support",
118 [62] = "64 byte CQE support",
122 mlx4_dbg(dev, "DEV_CAP flags:\n");
123 for (i = 0; i < ARRAY_SIZE(fname); ++i)
124 if (fname[i] && (flags & (1LL << i)))
125 mlx4_dbg(dev, " %s\n", fname[i]);
128 static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
130 static const char * const fname[] = {
132 [1] = "RSS Toeplitz Hash Function support",
133 [2] = "RSS XOR Hash Function support",
134 [3] = "Device manage flow steering support",
135 [4] = "FSM (MAC unti-spoofing) support",
136 [5] = "VST (control vlan insertion/stripping) support",
137 [6] = "Dynamic QP updates support",
138 [7] = "Loopback source checks support",
139 [8] = "Device managed flow steering IPoIB support",
140 [9] = "ETS configuration support",
141 [10] = "ETH backplane autoneg report",
142 [11] = "Ethernet Flow control statistics support",
143 [12] = "Recoverable error events support",
144 [13] = "Time stamping support",
145 [14] = "Report driver version to FW support"
149 for (i = 0; i < ARRAY_SIZE(fname); ++i)
150 if (fname[i] && (flags & (1LL << i)))
151 mlx4_dbg(dev, " %s\n", fname[i]);
154 int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
156 struct mlx4_cmd_mailbox *mailbox;
160 #define MOD_STAT_CFG_IN_SIZE 0x100
162 #define MOD_STAT_CFG_PG_SZ_M_OFFSET 0x002
163 #define MOD_STAT_CFG_PG_SZ_OFFSET 0x003
165 mailbox = mlx4_alloc_cmd_mailbox(dev);
167 return PTR_ERR(mailbox);
168 inbox = mailbox->buf;
170 memset(inbox, 0, MOD_STAT_CFG_IN_SIZE);
172 MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET);
173 MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET);
175 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG,
176 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
178 mlx4_free_cmd_mailbox(dev, mailbox);
182 int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
183 struct mlx4_vhcr *vhcr,
184 struct mlx4_cmd_mailbox *inbox,
185 struct mlx4_cmd_mailbox *outbox,
186 struct mlx4_cmd_info *cmd)
188 struct mlx4_priv *priv = mlx4_priv(dev);
193 #define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0
194 #define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1
195 #define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4
196 #define QUERY_FUNC_CAP_FMR_OFFSET 0x8
197 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP 0x10
198 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP 0x14
199 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP 0x18
200 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP 0x20
201 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP 0x24
202 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28
203 #define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
204 #define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30
206 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50
207 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54
208 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x58
209 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x60
210 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x64
211 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x68
213 #define QUERY_FUNC_CAP_FMR_FLAG 0x80
214 #define QUERY_FUNC_CAP_FLAG_RDMA 0x40
215 #define QUERY_FUNC_CAP_FLAG_ETH 0x80
216 #define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10
218 /* when opcode modifier = 1 */
219 #define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3
220 #define QUERY_FUNC_CAP_FLAGS0_OFFSET 0x8
221 #define QUERY_FUNC_CAP_FLAGS1_OFFSET 0xc
222 #define QUERY_FUNC_CAP_COUNTER_INDEX_OFFSET 0xd
224 #define QUERY_FUNC_CAP_QP0_TUNNEL 0x10
225 #define QUERY_FUNC_CAP_QP0_PROXY 0x14
226 #define QUERY_FUNC_CAP_QP1_TUNNEL 0x18
227 #define QUERY_FUNC_CAP_QP1_PROXY 0x1c
229 #define QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC 0x40
230 #define QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN 0x80
231 #define QUERY_FUNC_CAP_PROPS_DEF_COUNTER 0x20
233 #define QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID 0x80
235 if (vhcr->op_modifier == 1) {
236 port = vhcr->in_modifier; /* phys-port = logical-port */
237 MLX4_PUT(outbox->buf, port, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
240 /* ensure that phy_wqe_gid bit is not set */
241 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS0_OFFSET);
243 /* ensure force vlan and force mac bits are not set
244 * and that default counter bit is set
246 field = QUERY_FUNC_CAP_PROPS_DEF_COUNTER; /* def counter */
247 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET);
249 /* There is always default counter legal or sink counter */
250 field = mlx4_get_default_counter_index(dev, slave, vhcr->in_modifier);
251 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_COUNTER_INDEX_OFFSET);
253 /* size is now the QP number */
254 size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + port - 1;
255 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL);
258 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_TUNNEL);
260 size = dev->phys_caps.base_proxy_sqpn + 8 * slave + port - 1;
261 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_PROXY);
264 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_PROXY);
266 } else if (vhcr->op_modifier == 0) {
267 /* enable rdma and ethernet interfaces, and new quota locations */
268 field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
269 QUERY_FUNC_CAP_FLAG_QUOTAS);
270 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
272 field = dev->caps.num_ports;
273 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
275 size = dev->caps.function_caps; /* set PF behaviours */
276 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
278 field = 0; /* protected FMR support not available as yet */
279 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET);
281 size = priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[slave];
282 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
283 size = dev->caps.num_qps;
284 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
286 size = priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[slave];
287 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
288 size = dev->caps.num_srqs;
289 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
291 size = priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[slave];
292 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
293 size = dev->caps.num_cqs;
294 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
296 size = dev->caps.num_eqs;
297 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
299 size = dev->caps.reserved_eqs;
300 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
302 size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave];
303 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
304 size = dev->caps.num_mpts;
305 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
307 size = priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[slave];
308 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
309 size = dev->caps.num_mtts;
310 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
312 size = dev->caps.num_mgms + dev->caps.num_amgms;
313 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
314 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
322 int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
323 struct mlx4_func_cap *func_cap)
325 struct mlx4_cmd_mailbox *mailbox;
327 u8 field, op_modifier;
329 int err = 0, quotas = 0;
331 op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */
333 mailbox = mlx4_alloc_cmd_mailbox(dev);
335 return PTR_ERR(mailbox);
337 err = mlx4_cmd_box(dev, 0, mailbox->dma, gen_or_port, op_modifier,
338 MLX4_CMD_QUERY_FUNC_CAP,
339 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
343 outbox = mailbox->buf;
346 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET);
347 if (!(field & (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA))) {
348 mlx4_err(dev, "The host supports neither eth nor rdma interfaces\n");
349 err = -EPROTONOSUPPORT;
352 func_cap->flags = field;
353 quotas = !!(func_cap->flags & QUERY_FUNC_CAP_FLAG_QUOTAS);
355 MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
356 func_cap->num_ports = field;
358 MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
359 func_cap->pf_context_behaviour = size;
362 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
363 func_cap->qp_quota = size & 0xFFFFFF;
365 MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
366 func_cap->srq_quota = size & 0xFFFFFF;
368 MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
369 func_cap->cq_quota = size & 0xFFFFFF;
371 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
372 func_cap->mpt_quota = size & 0xFFFFFF;
374 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
375 func_cap->mtt_quota = size & 0xFFFFFF;
377 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
378 func_cap->mcg_quota = size & 0xFFFFFF;
381 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
382 func_cap->qp_quota = size & 0xFFFFFF;
384 MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
385 func_cap->srq_quota = size & 0xFFFFFF;
387 MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
388 func_cap->cq_quota = size & 0xFFFFFF;
390 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
391 func_cap->mpt_quota = size & 0xFFFFFF;
393 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
394 func_cap->mtt_quota = size & 0xFFFFFF;
396 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
397 func_cap->mcg_quota = size & 0xFFFFFF;
399 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
400 func_cap->max_eq = size & 0xFFFFFF;
402 MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
403 func_cap->reserved_eq = size & 0xFFFFFF;
408 /* logical port query */
409 if (gen_or_port > dev->caps.num_ports) {
414 if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) {
415 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET);
416 if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN) {
417 mlx4_err(dev, "VLAN is enforced on this port\n");
418 err = -EPROTONOSUPPORT;
422 if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC) {
423 mlx4_err(dev, "Force mac is enabled on this port\n");
424 err = -EPROTONOSUPPORT;
427 } else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) {
428 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
429 if (field & QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID) {
430 mlx4_err(dev, "phy_wqe_gid is "
431 "enforced on this ib port\n");
432 err = -EPROTONOSUPPORT;
437 MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
438 func_cap->physical_port = field;
439 if (func_cap->physical_port != gen_or_port) {
444 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET);
445 if (field & QUERY_FUNC_CAP_PROPS_DEF_COUNTER) {
446 MLX4_GET(field, outbox, QUERY_FUNC_CAP_COUNTER_INDEX_OFFSET);
447 func_cap->def_counter_index = field;
449 func_cap->def_counter_index = MLX4_SINK_COUNTER_INDEX;
452 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_TUNNEL);
453 func_cap->qp0_tunnel_qpn = size & 0xFFFFFF;
455 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_PROXY);
456 func_cap->qp0_proxy_qpn = size & 0xFFFFFF;
458 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_TUNNEL);
459 func_cap->qp1_tunnel_qpn = size & 0xFFFFFF;
461 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_PROXY);
462 func_cap->qp1_proxy_qpn = size & 0xFFFFFF;
464 /* All other resources are allocated by the master, but we still report
465 * 'num' and 'reserved' capabilities as follows:
466 * - num remains the maximum resource index
467 * - 'num - reserved' is the total available objects of a resource, but
468 * resource indices may be less than 'reserved'
469 * TODO: set per-resource quotas */
472 mlx4_free_cmd_mailbox(dev, mailbox);
477 int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
479 struct mlx4_cmd_mailbox *mailbox;
482 u32 field32, flags, ext_flags;
488 #define QUERY_DEV_CAP_OUT_SIZE 0x100
489 #define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10
490 #define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET 0x11
491 #define QUERY_DEV_CAP_RSVD_QP_OFFSET 0x12
492 #define QUERY_DEV_CAP_MAX_QP_OFFSET 0x13
493 #define QUERY_DEV_CAP_RSVD_SRQ_OFFSET 0x14
494 #define QUERY_DEV_CAP_MAX_SRQ_OFFSET 0x15
495 #define QUERY_DEV_CAP_RSVD_EEC_OFFSET 0x16
496 #define QUERY_DEV_CAP_MAX_EEC_OFFSET 0x17
497 #define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET 0x19
498 #define QUERY_DEV_CAP_RSVD_CQ_OFFSET 0x1a
499 #define QUERY_DEV_CAP_MAX_CQ_OFFSET 0x1b
500 #define QUERY_DEV_CAP_MAX_MPT_OFFSET 0x1d
501 #define QUERY_DEV_CAP_RSVD_EQ_OFFSET 0x1e
502 #define QUERY_DEV_CAP_MAX_EQ_OFFSET 0x1f
503 #define QUERY_DEV_CAP_RSVD_MTT_OFFSET 0x20
504 #define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21
505 #define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22
506 #define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23
507 #define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27
508 #define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29
509 #define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b
510 #define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d
511 #define QUERY_DEV_CAP_RSS_OFFSET 0x2e
512 #define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f
513 #define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33
514 #define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35
515 #define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36
516 #define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37
517 #define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38
518 #define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b
519 #define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c
520 #define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e
521 #define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
522 #define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40
523 #define QUERY_DEV_CAP_SYNC_QP_OFFSET 0x42
524 #define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
525 #define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
526 #define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
527 #define QUERY_DEV_CAP_PAGE_SZ_OFFSET 0x4b
528 #define QUERY_DEV_CAP_BF_OFFSET 0x4c
529 #define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET 0x4d
530 #define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET 0x4e
531 #define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET 0x4f
532 #define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET 0x51
533 #define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52
534 #define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55
535 #define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56
536 #define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61
537 #define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62
538 #define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63
539 #define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64
540 #define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65
541 #define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66
542 #define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67
543 #define QUERY_DEV_CAP_MAX_BASIC_COUNTERS_OFFSET 0x68
544 #define QUERY_DEV_CAP_MAX_EXTENDED_COUNTERS_OFFSET 0x6c
545 #define QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET 0x70
546 #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
547 #define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET 0x70
548 #define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET 0x74
549 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
550 #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
551 #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
552 #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
553 #define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET 0x86
554 #define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET 0x88
555 #define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET 0x8a
556 #define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET 0x8c
557 #define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e
558 #define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90
559 #define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92
560 #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
561 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
562 #define QUERY_DEV_CAP_ETS_CFG_OFFSET 0x9c
563 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
566 mailbox = mlx4_alloc_cmd_mailbox(dev);
568 return PTR_ERR(mailbox);
569 outbox = mailbox->buf;
571 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
572 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
576 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET);
577 dev_cap->reserved_qps = 1 << (field & 0xf);
578 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET);
579 dev_cap->max_qps = 1 << (field & 0x1f);
580 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_SRQ_OFFSET);
581 dev_cap->reserved_srqs = 1 << (field >> 4);
582 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_OFFSET);
583 dev_cap->max_srqs = 1 << (field & 0x1f);
584 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET);
585 dev_cap->max_cq_sz = 1 << field;
586 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_CQ_OFFSET);
587 dev_cap->reserved_cqs = 1 << (field & 0xf);
588 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_OFFSET);
589 dev_cap->max_cqs = 1 << (field & 0x1f);
590 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET);
591 dev_cap->max_mpts = 1 << (field & 0x3f);
592 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET);
593 dev_cap->reserved_eqs = field & 0xf;
594 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET);
595 dev_cap->max_eqs = 1 << (field & 0xf);
596 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET);
597 dev_cap->reserved_mtts = 1 << (field >> 4);
598 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET);
599 dev_cap->max_mrw_sz = 1 << field;
600 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET);
601 dev_cap->reserved_mrws = 1 << (field & 0xf);
602 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET);
603 dev_cap->max_mtt_seg = 1 << (field & 0x3f);
604 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET);
605 dev_cap->max_requester_per_qp = 1 << (field & 0x3f);
606 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET);
607 dev_cap->max_responder_per_qp = 1 << (field & 0x3f);
608 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET);
611 dev_cap->max_gso_sz = 0;
613 dev_cap->max_gso_sz = 1 << field;
615 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSS_OFFSET);
617 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_XOR;
619 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_TOP;
622 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS;
623 dev_cap->max_rss_tbl_sz = 1 << field;
625 dev_cap->max_rss_tbl_sz = 0;
626 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET);
627 dev_cap->max_rdma_global = 1 << (field & 0x3f);
628 MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET);
629 dev_cap->local_ca_ack_delay = field & 0x1f;
630 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
631 dev_cap->num_ports = field & 0xf;
632 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET);
633 dev_cap->max_msg_sz = 1 << (field & 0x1f);
634 MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET);
636 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN;
637 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
639 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN;
640 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
642 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB;
643 dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f;
644 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET);
645 dev_cap->fs_max_num_qp_per_entry = field;
646 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
647 dev_cap->stat_rate_support = stat_rate;
648 MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
650 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_TS;
651 MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
652 MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
653 dev_cap->flags = flags | (u64)ext_flags << 32;
654 MLX4_GET(field, outbox, QUERY_DEV_CAP_SYNC_QP_OFFSET);
655 dev_cap->sync_qp = field & 0x10;
656 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
657 dev_cap->reserved_uars = field >> 4;
658 MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
659 dev_cap->uar_size = 1 << ((field & 0x3f) + 20);
660 MLX4_GET(field, outbox, QUERY_DEV_CAP_PAGE_SZ_OFFSET);
661 dev_cap->min_page_sz = 1 << field;
663 MLX4_GET(field, outbox, QUERY_DEV_CAP_BF_OFFSET);
665 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
666 dev_cap->bf_reg_size = 1 << (field & 0x1f);
667 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
668 if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size))
670 dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
671 mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
672 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
674 dev_cap->bf_reg_size = 0;
675 mlx4_dbg(dev, "BlueFlame not available\n");
678 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET);
679 dev_cap->max_sq_sg = field;
680 MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET);
681 dev_cap->max_sq_desc_sz = size;
683 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET);
684 dev_cap->max_qp_per_mcg = 1 << field;
685 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET);
686 dev_cap->reserved_mgms = field & 0xf;
687 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MCG_OFFSET);
688 dev_cap->max_mcgs = 1 << field;
689 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_PD_OFFSET);
690 dev_cap->reserved_pds = field >> 4;
691 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET);
692 dev_cap->max_pds = 1 << (field & 0x3f);
693 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_XRC_OFFSET);
694 dev_cap->reserved_xrcds = field >> 4;
695 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_XRC_OFFSET);
696 dev_cap->max_xrcds = 1 << (field & 0x1f);
698 MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET);
699 dev_cap->rdmarc_entry_sz = size;
700 MLX4_GET(size, outbox, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET);
701 dev_cap->qpc_entry_sz = size;
702 MLX4_GET(size, outbox, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET);
703 dev_cap->aux_entry_sz = size;
704 MLX4_GET(size, outbox, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET);
705 dev_cap->altc_entry_sz = size;
706 MLX4_GET(size, outbox, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET);
707 dev_cap->eqc_entry_sz = size;
708 MLX4_GET(size, outbox, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET);
709 dev_cap->cqc_entry_sz = size;
710 MLX4_GET(size, outbox, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET);
711 dev_cap->srq_entry_sz = size;
712 MLX4_GET(size, outbox, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET);
713 dev_cap->cmpt_entry_sz = size;
714 MLX4_GET(size, outbox, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET);
715 dev_cap->mtt_entry_sz = size;
716 MLX4_GET(size, outbox, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET);
717 dev_cap->dmpt_entry_sz = size;
719 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET);
720 dev_cap->max_srq_sz = 1 << field;
721 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET);
722 dev_cap->max_qp_sz = 1 << field;
723 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSZ_SRQ_OFFSET);
724 dev_cap->resize_srq = field & 1;
725 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET);
726 dev_cap->max_rq_sg = field;
727 MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET);
728 dev_cap->max_rq_desc_sz = size;
730 MLX4_GET(dev_cap->bmme_flags, outbox,
731 QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
732 MLX4_GET(dev_cap->reserved_lkey, outbox,
733 QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
734 MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETS_CFG_OFFSET);
735 if (field32 & (1 << 0))
736 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP;
737 if (field32 & (1 << 7))
738 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT;
739 if (field32 & (1 << 8))
740 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW;
741 if (field32 & (1 << 13))
742 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG;
744 MLX4_GET(dev_cap->max_icm_sz, outbox,
745 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
746 if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
747 MLX4_GET(dev_cap->max_basic_counters, outbox,
748 QUERY_DEV_CAP_MAX_BASIC_COUNTERS_OFFSET);
749 /* FW reports 256 however real value is 255 */
750 dev_cap->max_basic_counters = min_t(u32, dev_cap->max_basic_counters, 255);
751 if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS_EXT)
752 MLX4_GET(dev_cap->max_extended_counters, outbox,
753 QUERY_DEV_CAP_MAX_EXTENDED_COUNTERS_OFFSET);
755 MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
756 if (field32 & (1 << 16))
757 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP;
758 if (field32 & (1 << 19))
759 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_LB_SRC_CHK;
760 if (field32 & (1 << 20))
761 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FSM;
762 if (field32 & (1 << 26))
763 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL;
765 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
766 for (i = 1; i <= dev_cap->num_ports; ++i) {
767 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
768 dev_cap->max_vl[i] = field >> 4;
769 MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET);
770 dev_cap->ib_mtu[i] = field >> 4;
771 dev_cap->max_port_width[i] = field & 0xf;
772 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET);
773 dev_cap->max_gids[i] = 1 << (field & 0xf);
774 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET);
775 dev_cap->max_pkeys[i] = 1 << (field & 0xf);
778 #define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00
779 #define QUERY_PORT_MTU_OFFSET 0x01
780 #define QUERY_PORT_ETH_MTU_OFFSET 0x02
781 #define QUERY_PORT_WIDTH_OFFSET 0x06
782 #define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07
783 #define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a
784 #define QUERY_PORT_MAX_VL_OFFSET 0x0b
785 #define QUERY_PORT_MAC_OFFSET 0x10
786 #define QUERY_PORT_TRANS_VENDOR_OFFSET 0x18
787 #define QUERY_PORT_WAVELENGTH_OFFSET 0x1c
788 #define QUERY_PORT_TRANS_CODE_OFFSET 0x20
790 for (i = 1; i <= dev_cap->num_ports; ++i) {
791 err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
792 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
796 MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET);
797 dev_cap->supported_port_types[i] = field & 3;
798 dev_cap->suggested_type[i] = (field >> 3) & 1;
799 dev_cap->default_sense[i] = (field >> 4) & 1;
800 MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
801 dev_cap->ib_mtu[i] = field & 0xf;
802 MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
803 dev_cap->max_port_width[i] = field & 0xf;
804 MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET);
805 dev_cap->max_gids[i] = 1 << (field >> 4);
806 dev_cap->max_pkeys[i] = 1 << (field & 0xf);
807 MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
808 dev_cap->max_vl[i] = field & 0xf;
809 MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET);
810 dev_cap->log_max_macs[i] = field & 0xf;
811 dev_cap->log_max_vlans[i] = field >> 4;
812 MLX4_GET(dev_cap->eth_mtu[i], outbox, QUERY_PORT_ETH_MTU_OFFSET);
813 MLX4_GET(dev_cap->def_mac[i], outbox, QUERY_PORT_MAC_OFFSET);
814 MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET);
815 dev_cap->trans_type[i] = field32 >> 24;
816 dev_cap->vendor_oui[i] = field32 & 0xffffff;
817 MLX4_GET(dev_cap->wavelength[i], outbox, QUERY_PORT_WAVELENGTH_OFFSET);
818 MLX4_GET(dev_cap->trans_code[i], outbox, QUERY_PORT_TRANS_CODE_OFFSET);
822 mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
823 dev_cap->bmme_flags, dev_cap->reserved_lkey);
826 * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
827 * we can't use any EQs whose doorbell falls on that page,
828 * even if the EQ itself isn't reserved.
830 dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4,
831 dev_cap->reserved_eqs);
833 mlx4_dbg(dev, "Max ICM size %lld MB\n",
834 (unsigned long long) dev_cap->max_icm_sz >> 20);
835 mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
836 dev_cap->max_qps, dev_cap->reserved_qps, dev_cap->qpc_entry_sz);
837 mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
838 dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz);
839 mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
840 dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz);
841 mlx4_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n",
842 dev_cap->max_eqs, dev_cap->reserved_eqs, dev_cap->eqc_entry_sz);
843 mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n",
844 dev_cap->reserved_mrws, dev_cap->reserved_mtts);
845 mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
846 dev_cap->max_pds, dev_cap->reserved_pds, dev_cap->reserved_uars);
847 mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n",
848 dev_cap->max_pds, dev_cap->reserved_mgms);
849 mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
850 dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz);
851 mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
852 dev_cap->local_ca_ack_delay, 128 << dev_cap->ib_mtu[1],
853 dev_cap->max_port_width[1]);
854 mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n",
855 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
856 mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
857 dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg);
858 mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz);
859 mlx4_dbg(dev, "Max basic counters: %d\n", dev_cap->max_basic_counters);
860 mlx4_dbg(dev, "Max extended counters: %d\n", dev_cap->max_extended_counters);
861 mlx4_dbg(dev, "Max RSS Table size: %d\n", dev_cap->max_rss_tbl_sz);
863 dump_dev_cap_flags(dev, dev_cap->flags);
864 dump_dev_cap_flags2(dev, dev_cap->flags2);
867 mlx4_free_cmd_mailbox(dev, mailbox);
871 int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
872 struct mlx4_vhcr *vhcr,
873 struct mlx4_cmd_mailbox *inbox,
874 struct mlx4_cmd_mailbox *outbox,
875 struct mlx4_cmd_info *cmd)
881 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
882 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
886 /* add port mng change event capability unconditionally to slaves */
887 MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
888 flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV;
889 MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
891 /* For guests, report Blueflame disabled */
892 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
894 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
896 /* turn off device-managed steering capability if not enabled */
897 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) {
898 MLX4_GET(field, outbox->buf,
899 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
901 MLX4_PUT(outbox->buf, field,
902 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
907 int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
908 struct mlx4_vhcr *vhcr,
909 struct mlx4_cmd_mailbox *inbox,
910 struct mlx4_cmd_mailbox *outbox,
911 struct mlx4_cmd_info *cmd)
913 struct mlx4_priv *priv = mlx4_priv(dev);
918 int admin_link_state;
920 #define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0
921 #define MLX4_PORT_LINK_UP_MASK 0x80
922 #define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c
923 #define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e
925 err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0,
926 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
929 if (!err && dev->caps.function != slave) {
930 /* set slave default_mac address to be zero MAC */
931 def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
932 MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
934 /* get port type - currently only eth is enabled */
935 MLX4_GET(port_type, outbox->buf,
936 QUERY_PORT_SUPPORTED_TYPE_OFFSET);
938 /* No link sensing allowed */
939 port_type &= MLX4_VF_PORT_NO_LINK_SENSE_MASK;
940 /* set port type to currently operating port type */
941 port_type |= (dev->caps.port_type[vhcr->in_modifier] & 0x3);
943 admin_link_state = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.link_state;
944 if (IFLA_VF_LINK_STATE_ENABLE == admin_link_state)
945 port_type |= MLX4_PORT_LINK_UP_MASK;
946 else if (IFLA_VF_LINK_STATE_DISABLE == admin_link_state)
947 port_type &= ~MLX4_PORT_LINK_UP_MASK;
949 MLX4_PUT(outbox->buf, port_type,
950 QUERY_PORT_SUPPORTED_TYPE_OFFSET);
952 if (dev->caps.port_type[vhcr->in_modifier] == MLX4_PORT_TYPE_ETH)
953 short_field = mlx4_get_slave_num_gids(dev, slave);
955 short_field = 1; /* slave max gids */
956 MLX4_PUT(outbox->buf, short_field,
957 QUERY_PORT_CUR_MAX_GID_OFFSET);
959 short_field = dev->caps.pkey_table_len[vhcr->in_modifier];
960 MLX4_PUT(outbox->buf, short_field,
961 QUERY_PORT_CUR_MAX_PKEY_OFFSET);
967 int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port,
968 int *gid_tbl_len, int *pkey_tbl_len)
970 struct mlx4_cmd_mailbox *mailbox;
975 mailbox = mlx4_alloc_cmd_mailbox(dev);
977 return PTR_ERR(mailbox);
979 err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0,
980 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
985 outbox = mailbox->buf;
987 MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_GID_OFFSET);
988 *gid_tbl_len = field;
990 MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_PKEY_OFFSET);
991 *pkey_tbl_len = field;
994 mlx4_free_cmd_mailbox(dev, mailbox);
997 EXPORT_SYMBOL(mlx4_get_slave_pkey_gid_tbl_len);
999 int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
1001 struct mlx4_cmd_mailbox *mailbox;
1002 struct mlx4_icm_iter iter;
1010 mailbox = mlx4_alloc_cmd_mailbox(dev);
1011 if (IS_ERR(mailbox))
1012 return PTR_ERR(mailbox);
1013 memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
1014 pages = mailbox->buf;
1016 for (mlx4_icm_first(icm, &iter);
1017 !mlx4_icm_last(&iter);
1018 mlx4_icm_next(&iter)) {
1020 * We have to pass pages that are aligned to their
1021 * size, so find the least significant 1 in the
1022 * address or size and use that as our log2 size.
1024 lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1;
1025 if (lg < MLX4_ICM_PAGE_SHIFT) {
1026 mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n",
1028 (unsigned long long) mlx4_icm_addr(&iter),
1029 mlx4_icm_size(&iter));
1034 for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) {
1036 pages[nent * 2] = cpu_to_be64(virt);
1040 pages[nent * 2 + 1] =
1041 cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) |
1042 (lg - MLX4_ICM_PAGE_SHIFT));
1043 ts += 1 << (lg - 10);
1046 if (++nent == MLX4_MAILBOX_SIZE / 16) {
1047 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
1048 MLX4_CMD_TIME_CLASS_B,
1058 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
1059 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1064 case MLX4_CMD_MAP_FA:
1065 mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts);
1067 case MLX4_CMD_MAP_ICM_AUX:
1068 mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts);
1070 case MLX4_CMD_MAP_ICM:
1071 mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n",
1072 tc, ts, (unsigned long long) virt - (ts << 10));
1077 mlx4_free_cmd_mailbox(dev, mailbox);
1081 int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm)
1083 return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1);
1086 int mlx4_UNMAP_FA(struct mlx4_dev *dev)
1088 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA,
1089 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1093 int mlx4_RUN_FW(struct mlx4_dev *dev)
1095 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW,
1096 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1099 int mlx4_QUERY_FW(struct mlx4_dev *dev)
1101 struct mlx4_fw *fw = &mlx4_priv(dev)->fw;
1102 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
1103 struct mlx4_cmd_mailbox *mailbox;
1110 #define QUERY_FW_OUT_SIZE 0x100
1111 #define QUERY_FW_VER_OFFSET 0x00
1112 #define QUERY_FW_PPF_ID 0x09
1113 #define QUERY_FW_CMD_IF_REV_OFFSET 0x0a
1114 #define QUERY_FW_MAX_CMD_OFFSET 0x0f
1115 #define QUERY_FW_ERR_START_OFFSET 0x30
1116 #define QUERY_FW_ERR_SIZE_OFFSET 0x38
1117 #define QUERY_FW_ERR_BAR_OFFSET 0x3c
1119 #define QUERY_FW_SIZE_OFFSET 0x00
1120 #define QUERY_FW_CLR_INT_BASE_OFFSET 0x20
1121 #define QUERY_FW_CLR_INT_BAR_OFFSET 0x28
1123 #define QUERY_FW_COMM_BASE_OFFSET 0x40
1124 #define QUERY_FW_COMM_BAR_OFFSET 0x48
1126 #define QUERY_FW_CLOCK_OFFSET 0x50
1127 #define QUERY_FW_CLOCK_BAR 0x58
1129 mailbox = mlx4_alloc_cmd_mailbox(dev);
1130 if (IS_ERR(mailbox))
1131 return PTR_ERR(mailbox);
1132 outbox = mailbox->buf;
1134 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
1135 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1139 MLX4_GET(fw_ver, outbox, QUERY_FW_VER_OFFSET);
1141 * FW subminor version is at more significant bits than minor
1142 * version, so swap here.
1144 dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) |
1145 ((fw_ver & 0xffff0000ull) >> 16) |
1146 ((fw_ver & 0x0000ffffull) << 16);
1148 MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
1149 dev->caps.function = lg;
1151 if (mlx4_is_slave(dev))
1155 MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
1156 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
1157 cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
1158 mlx4_err(dev, "Installed FW has unsupported "
1159 "command interface revision %d.\n",
1161 mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n",
1162 (int) (dev->caps.fw_ver >> 32),
1163 (int) (dev->caps.fw_ver >> 16) & 0xffff,
1164 (int) dev->caps.fw_ver & 0xffff);
1165 mlx4_err(dev, "This driver version supports only revisions %d to %d.\n",
1166 MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV);
1171 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS)
1172 dev->flags |= MLX4_FLAG_OLD_PORT_CMDS;
1174 MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
1175 cmd->max_cmds = 1 << lg;
1177 mlx4_dbg(dev, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n",
1178 (int) (dev->caps.fw_ver >> 32),
1179 (int) (dev->caps.fw_ver >> 16) & 0xffff,
1180 (int) dev->caps.fw_ver & 0xffff,
1181 cmd_if_rev, cmd->max_cmds);
1183 MLX4_GET(fw->catas_offset, outbox, QUERY_FW_ERR_START_OFFSET);
1184 MLX4_GET(fw->catas_size, outbox, QUERY_FW_ERR_SIZE_OFFSET);
1185 MLX4_GET(fw->catas_bar, outbox, QUERY_FW_ERR_BAR_OFFSET);
1186 fw->catas_bar = (fw->catas_bar >> 6) * 2;
1188 mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n",
1189 (unsigned long long) fw->catas_offset, fw->catas_size, fw->catas_bar);
1191 MLX4_GET(fw->fw_pages, outbox, QUERY_FW_SIZE_OFFSET);
1192 MLX4_GET(fw->clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET);
1193 MLX4_GET(fw->clr_int_bar, outbox, QUERY_FW_CLR_INT_BAR_OFFSET);
1194 fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2;
1196 MLX4_GET(fw->comm_base, outbox, QUERY_FW_COMM_BASE_OFFSET);
1197 MLX4_GET(fw->comm_bar, outbox, QUERY_FW_COMM_BAR_OFFSET);
1198 fw->comm_bar = (fw->comm_bar >> 6) * 2;
1199 mlx4_dbg(dev, "Communication vector bar:%d offset:0x%llx\n",
1200 fw->comm_bar, (unsigned long long)fw->comm_base);
1201 mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2);
1203 MLX4_GET(fw->clock_offset, outbox, QUERY_FW_CLOCK_OFFSET);
1204 MLX4_GET(fw->clock_bar, outbox, QUERY_FW_CLOCK_BAR);
1205 fw->clock_bar = (fw->clock_bar >> 6) * 2;
1206 mlx4_dbg(dev, "Internal clock bar:%d offset:0x%llx\n",
1207 fw->comm_bar, (unsigned long long)fw->comm_base);
1210 * Round up number of system pages needed in case
1211 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
1214 ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
1215 (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
1217 mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n",
1218 (unsigned long long) fw->clr_int_base, fw->clr_int_bar);
1221 mlx4_free_cmd_mailbox(dev, mailbox);
1225 int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
1226 struct mlx4_vhcr *vhcr,
1227 struct mlx4_cmd_mailbox *inbox,
1228 struct mlx4_cmd_mailbox *outbox,
1229 struct mlx4_cmd_info *cmd)
1234 outbuf = outbox->buf;
1235 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
1236 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1240 /* for slaves, set pci PPF ID to invalid and zero out everything
1241 * else except FW version */
1242 outbuf[0] = outbuf[1] = 0;
1243 memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8);
1244 outbuf[QUERY_FW_PPF_ID] = MLX4_INVALID_SLAVE_ID;
1249 static void get_board_id(void *vsd, char *board_id, char *vsdstr)
1253 #define VSD_OFFSET_SIG1 0x00
1254 #define VSD_OFFSET_SIG2 0xde
1255 #define VSD_OFFSET_MLX_BOARD_ID 0xd0
1256 #define VSD_OFFSET_TS_BOARD_ID 0x20
1257 #define VSD_LEN 0xd0
1259 #define VSD_SIGNATURE_TOPSPIN 0x5ad
1261 memset(vsdstr, 0, MLX4_VSD_LEN);
1263 for (i = 0; i < VSD_LEN / 4; i++)
1264 ((u32 *)vsdstr)[i] =
1265 swab32(*(u32 *)(vsd + i * 4));
1267 memset(board_id, 0, MLX4_BOARD_ID_LEN);
1269 if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
1270 be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
1271 strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN);
1274 * The board ID is a string but the firmware byte
1275 * swaps each 4-byte word before passing it back to
1276 * us. Therefore we need to swab it before printing.
1278 for (i = 0; i < 4; ++i)
1279 ((u32 *) board_id)[i] =
1280 swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4));
1284 int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
1286 struct mlx4_cmd_mailbox *mailbox;
1290 #define QUERY_ADAPTER_OUT_SIZE 0x100
1291 #define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10
1292 #define QUERY_ADAPTER_VSD_OFFSET 0x20
1293 #define QUERY_ADAPTER_VSD_VENDOR_ID_OFFSET 0x1e
1295 mailbox = mlx4_alloc_cmd_mailbox(dev);
1296 if (IS_ERR(mailbox))
1297 return PTR_ERR(mailbox);
1298 outbox = mailbox->buf;
1300 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER,
1301 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1305 MLX4_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET);
1307 adapter->vsd_vendor_id = be16_to_cpup((u16 *)outbox +
1308 QUERY_ADAPTER_VSD_VENDOR_ID_OFFSET / 2);
1310 get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
1311 adapter->board_id, adapter->vsd);
1314 mlx4_free_cmd_mailbox(dev, mailbox);
1318 int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1320 struct mlx4_cmd_mailbox *mailbox;
1325 #define INIT_HCA_IN_SIZE 0x200
1326 #define INIT_HCA_DRV_NAME_FOR_FW_MAX_SIZE 64
1327 #define INIT_HCA_VERSION_OFFSET 0x000
1328 #define INIT_HCA_VERSION 2
1329 #define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e
1330 #define INIT_HCA_FLAGS_OFFSET 0x014
1331 #define INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET 0x018
1332 #define INIT_HCA_QPC_OFFSET 0x020
1333 #define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10)
1334 #define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17)
1335 #define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28)
1336 #define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f)
1337 #define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30)
1338 #define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37)
1339 #define INIT_HCA_EQE_CQE_OFFSETS (INIT_HCA_QPC_OFFSET + 0x38)
1340 #define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40)
1341 #define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50)
1342 #define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60)
1343 #define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67)
1344 #define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70)
1345 #define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77)
1346 #define INIT_HCA_MCAST_OFFSET 0x0c0
1347 #define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00)
1348 #define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
1349 #define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
1350 #define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18)
1351 #define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
1352 #define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6
1353 #define INIT_HCA_DRIVER_VERSION_OFFSET 0x140
1354 #define INIT_HCA_FS_PARAM_OFFSET 0x1d0
1355 #define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00)
1356 #define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12)
1357 #define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b)
1358 #define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21)
1359 #define INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22)
1360 #define INIT_HCA_FS_IB_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x25)
1361 #define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26)
1362 #define INIT_HCA_TPT_OFFSET 0x0f0
1363 #define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
1364 #define INIT_HCA_TPT_MW_OFFSET (INIT_HCA_TPT_OFFSET + 0x08)
1365 #define INIT_HCA_TPT_MW_ENABLE (1 << 31)
1366 #define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b)
1367 #define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10)
1368 #define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18)
1369 #define INIT_HCA_UAR_OFFSET 0x120
1370 #define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a)
1371 #define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b)
1373 mailbox = mlx4_alloc_cmd_mailbox(dev);
1374 if (IS_ERR(mailbox))
1375 return PTR_ERR(mailbox);
1376 inbox = mailbox->buf;
1378 memset(inbox, 0, INIT_HCA_IN_SIZE);
1380 *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
1382 *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) =
1383 ((ilog2(cache_line_size()) - 4) << 5) | (1 << 4);
1385 #if defined(__LITTLE_ENDIAN)
1386 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
1387 #elif defined(__BIG_ENDIAN)
1388 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1);
1390 #error Host endianness not defined
1392 /* Check port for UD address vector: */
1393 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1);
1395 /* Enable IPoIB checksumming if we can: */
1396 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
1397 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3);
1399 /* Enable QoS support if module parameter set */
1401 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2);
1403 /* Enable fast drop performance optimization */
1404 if (dev->caps.fast_drop)
1405 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 7);
1407 /* enable counters */
1408 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)
1409 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4);
1411 /* CX3 is capable of extending CQEs\EQEs from 32 to 64 bytes */
1412 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_EQE) {
1413 *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 29);
1414 dev->caps.eqe_size = 64;
1415 dev->caps.eqe_factor = 1;
1417 dev->caps.eqe_size = 32;
1418 dev->caps.eqe_factor = 0;
1421 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_CQE) {
1422 *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 30);
1423 dev->caps.cqe_size = 64;
1424 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_64B_CQE;
1426 dev->caps.cqe_size = 32;
1429 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT)
1430 *(inbox + INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET / 4) |= cpu_to_be32(1 << 31);
1432 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW) {
1433 strncpy((u8 *)mailbox->buf + INIT_HCA_DRIVER_VERSION_OFFSET,
1435 INIT_HCA_DRV_NAME_FOR_FW_MAX_SIZE - 1);
1436 mlx4_dbg(dev, "Reporting Driver Version to FW: %s\n",
1437 (u8 *)mailbox->buf + INIT_HCA_DRIVER_VERSION_OFFSET);
1440 /* QPC/EEC/CQC/EQC/RDMARC attributes */
1442 MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
1443 MLX4_PUT(inbox, param->log_num_qps, INIT_HCA_LOG_QP_OFFSET);
1444 MLX4_PUT(inbox, param->srqc_base, INIT_HCA_SRQC_BASE_OFFSET);
1445 MLX4_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET);
1446 MLX4_PUT(inbox, param->cqc_base, INIT_HCA_CQC_BASE_OFFSET);
1447 MLX4_PUT(inbox, param->log_num_cqs, INIT_HCA_LOG_CQ_OFFSET);
1448 MLX4_PUT(inbox, param->altc_base, INIT_HCA_ALTC_BASE_OFFSET);
1449 MLX4_PUT(inbox, param->auxc_base, INIT_HCA_AUXC_BASE_OFFSET);
1450 MLX4_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET);
1451 MLX4_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET);
1452 MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET);
1453 MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET);
1455 /* steering attributes */
1456 if (dev->caps.steering_mode ==
1457 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1458 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |=
1460 INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN);
1462 MLX4_PUT(inbox, param->mc_base, INIT_HCA_FS_BASE_OFFSET);
1463 MLX4_PUT(inbox, param->log_mc_entry_sz,
1464 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
1465 MLX4_PUT(inbox, param->log_mc_table_sz,
1466 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
1467 /* Enable Ethernet flow steering
1468 * with udp unicast and tcp unicast
1470 MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
1471 INIT_HCA_FS_ETH_BITS_OFFSET);
1472 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
1473 INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET);
1474 /* Enable IPoIB flow steering
1475 * with udp unicast and tcp unicast
1477 MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
1478 INIT_HCA_FS_IB_BITS_OFFSET);
1479 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
1480 INIT_HCA_FS_IB_NUM_ADDRS_OFFSET);
1482 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
1483 MLX4_PUT(inbox, param->log_mc_entry_sz,
1484 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
1485 MLX4_PUT(inbox, param->log_mc_hash_sz,
1486 INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
1487 MLX4_PUT(inbox, param->log_mc_table_sz,
1488 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
1489 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0)
1490 MLX4_PUT(inbox, (u8) (1 << 3),
1491 INIT_HCA_UC_STEERING_OFFSET);
1494 /* TPT attributes */
1496 MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET);
1497 mw_enable = param->mw_enable ? INIT_HCA_TPT_MW_ENABLE : 0;
1498 MLX4_PUT(inbox, mw_enable, INIT_HCA_TPT_MW_OFFSET);
1499 MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
1500 MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET);
1501 MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET);
1503 /* UAR attributes */
1505 MLX4_PUT(inbox, param->uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET);
1506 MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET);
1508 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000,
1512 mlx4_err(dev, "INIT_HCA returns %d\n", err);
1514 mlx4_free_cmd_mailbox(dev, mailbox);
1518 int mlx4_QUERY_HCA(struct mlx4_dev *dev,
1519 struct mlx4_init_hca_param *param)
1521 struct mlx4_cmd_mailbox *mailbox;
1528 #define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04
1529 #define QUERY_HCA_CORE_CLOCK_OFFSET 0x0c
1531 mailbox = mlx4_alloc_cmd_mailbox(dev);
1532 if (IS_ERR(mailbox))
1533 return PTR_ERR(mailbox);
1534 outbox = mailbox->buf;
1536 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
1538 MLX4_CMD_TIME_CLASS_B,
1539 !mlx4_is_slave(dev));
1543 MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET);
1544 MLX4_GET(param->hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET);
1546 /* QPC/EEC/CQC/EQC/RDMARC attributes */
1548 MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET);
1549 MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET);
1550 MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET);
1551 MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET);
1552 MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET);
1553 MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET);
1554 MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET);
1555 MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET);
1556 MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET);
1557 MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET);
1558 MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
1559 MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
1561 MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
1562 if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
1563 param->steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
1565 MLX4_GET(byte_field, outbox, INIT_HCA_UC_STEERING_OFFSET);
1566 if (byte_field & 0x8)
1567 param->steering_mode = MLX4_STEERING_MODE_B0;
1569 param->steering_mode = MLX4_STEERING_MODE_A0;
1571 /* steering attributes */
1572 if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
1573 MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
1574 MLX4_GET(param->log_mc_entry_sz, outbox,
1575 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
1576 MLX4_GET(param->log_mc_table_sz, outbox,
1577 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
1579 MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
1580 MLX4_GET(param->log_mc_entry_sz, outbox,
1581 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
1582 MLX4_GET(param->log_mc_hash_sz, outbox,
1583 INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
1584 MLX4_GET(param->log_mc_table_sz, outbox,
1585 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
1588 /* CX3 is capable of extending CQEs\EQEs from 32 to 64 bytes */
1589 MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_OFFSETS);
1590 if (byte_field & 0x20) /* 64-bytes eqe enabled */
1591 param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED;
1592 if (byte_field & 0x40) /* 64-bytes cqe enabled */
1593 param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED;
1595 /* TPT attributes */
1597 MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET);
1598 MLX4_GET(mw_enable, outbox, INIT_HCA_TPT_MW_OFFSET);
1599 param->mw_enable = (mw_enable & INIT_HCA_TPT_MW_ENABLE) ==
1600 INIT_HCA_TPT_MW_ENABLE;
1601 MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
1602 MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET);
1603 MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET);
1605 /* UAR attributes */
1607 MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
1608 MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
1611 mlx4_free_cmd_mailbox(dev, mailbox);
1616 /* for IB-type ports only in SRIOV mode. Checks that both proxy QP0
1617 * and real QP0 are active, so that the paravirtualized QP0 is ready
1619 static int check_qp0_state(struct mlx4_dev *dev, int function, int port)
1621 struct mlx4_priv *priv = mlx4_priv(dev);
1622 /* irrelevant if not infiniband */
1623 if (priv->mfunc.master.qp0_state[port].proxy_qp0_active &&
1624 priv->mfunc.master.qp0_state[port].qp0_active)
1629 int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
1630 struct mlx4_vhcr *vhcr,
1631 struct mlx4_cmd_mailbox *inbox,
1632 struct mlx4_cmd_mailbox *outbox,
1633 struct mlx4_cmd_info *cmd)
1635 struct mlx4_priv *priv = mlx4_priv(dev);
1636 int port = vhcr->in_modifier;
1639 if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port))
1642 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
1643 /* Enable port only if it was previously disabled */
1644 if (!priv->mfunc.master.init_port_ref[port]) {
1645 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
1646 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1650 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
1652 if (slave == mlx4_master_func_num(dev)) {
1653 if (check_qp0_state(dev, slave, port) &&
1654 !priv->mfunc.master.qp0_state[port].port_active) {
1655 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
1656 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1659 priv->mfunc.master.qp0_state[port].port_active = 1;
1660 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
1663 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
1665 ++priv->mfunc.master.init_port_ref[port];
1669 int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
1671 struct mlx4_cmd_mailbox *mailbox;
1677 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
1678 #define INIT_PORT_IN_SIZE 256
1679 #define INIT_PORT_FLAGS_OFFSET 0x00
1680 #define INIT_PORT_FLAG_SIG (1 << 18)
1681 #define INIT_PORT_FLAG_NG (1 << 17)
1682 #define INIT_PORT_FLAG_G0 (1 << 16)
1683 #define INIT_PORT_VL_SHIFT 4
1684 #define INIT_PORT_PORT_WIDTH_SHIFT 8
1685 #define INIT_PORT_MTU_OFFSET 0x04
1686 #define INIT_PORT_MAX_GID_OFFSET 0x06
1687 #define INIT_PORT_MAX_PKEY_OFFSET 0x0a
1688 #define INIT_PORT_GUID0_OFFSET 0x10
1689 #define INIT_PORT_NODE_GUID_OFFSET 0x18
1690 #define INIT_PORT_SI_GUID_OFFSET 0x20
1692 mailbox = mlx4_alloc_cmd_mailbox(dev);
1693 if (IS_ERR(mailbox))
1694 return PTR_ERR(mailbox);
1695 inbox = mailbox->buf;
1697 memset(inbox, 0, INIT_PORT_IN_SIZE);
1700 flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT;
1701 flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
1702 MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET);
1704 field = 128 << dev->caps.ib_mtu_cap[port];
1705 MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET);
1706 field = dev->caps.gid_table_len[port];
1707 MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET);
1708 field = dev->caps.pkey_table_len[port];
1709 MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET);
1711 err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT,
1712 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1714 mlx4_free_cmd_mailbox(dev, mailbox);
1716 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
1717 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
1721 EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
1723 int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
1724 struct mlx4_vhcr *vhcr,
1725 struct mlx4_cmd_mailbox *inbox,
1726 struct mlx4_cmd_mailbox *outbox,
1727 struct mlx4_cmd_info *cmd)
1729 struct mlx4_priv *priv = mlx4_priv(dev);
1730 int port = vhcr->in_modifier;
1733 if (!(priv->mfunc.master.slave_state[slave].init_port_mask &
1737 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
1738 if (priv->mfunc.master.init_port_ref[port] == 1) {
1739 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
1740 1000, MLX4_CMD_NATIVE);
1744 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
1746 /* infiniband port */
1747 if (slave == mlx4_master_func_num(dev)) {
1748 if (!priv->mfunc.master.qp0_state[port].qp0_active &&
1749 priv->mfunc.master.qp0_state[port].port_active) {
1750 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
1751 1000, MLX4_CMD_NATIVE);
1754 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
1755 priv->mfunc.master.qp0_state[port].port_active = 0;
1758 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
1760 --priv->mfunc.master.init_port_ref[port];
1764 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
1766 return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000,
1769 EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
1771 int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
1773 return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000,
1777 int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
1779 int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0,
1780 MLX4_CMD_SET_ICM_SIZE,
1781 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1786 * Round up number of system pages needed in case
1787 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
1789 *aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
1790 (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
1795 int mlx4_NOP(struct mlx4_dev *dev)
1797 /* Input modifier of 0x1f means "finish as soon as possible." */
1798 return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1801 int mlx4_query_diag_counters(struct mlx4_dev *dev, int array_length,
1802 u8 op_modifier, u32 in_offset[],
1805 struct mlx4_cmd_mailbox *mailbox;
1810 mailbox = mlx4_alloc_cmd_mailbox(dev);
1811 if (IS_ERR(mailbox))
1812 return PTR_ERR(mailbox);
1813 outbox = mailbox->buf;
1815 ret = mlx4_cmd_box(dev, 0, mailbox->dma, 0, op_modifier,
1816 MLX4_CMD_DIAG_RPRT, MLX4_CMD_TIME_CLASS_A,
1821 for (i = 0; i < array_length; i++) {
1822 if (in_offset[i] > MLX4_MAILBOX_SIZE) {
1827 MLX4_GET(counter_out[i], outbox, in_offset[i]);
1831 mlx4_free_cmd_mailbox(dev, mailbox);
1834 EXPORT_SYMBOL_GPL(mlx4_query_diag_counters);
1836 int mlx4_MOD_STAT_CFG_wrapper(struct mlx4_dev *dev, int slave,
1837 struct mlx4_vhcr *vhcr,
1838 struct mlx4_cmd_mailbox *inbox,
1839 struct mlx4_cmd_mailbox *outbox,
1840 struct mlx4_cmd_info *cmd)
1845 #define MLX4_WOL_SETUP_MODE (5 << 28)
1846 int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
1848 u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
1850 return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3,
1851 MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
1854 EXPORT_SYMBOL_GPL(mlx4_wol_read);
1856 int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
1858 u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
1860 return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG,
1861 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1863 EXPORT_SYMBOL_GPL(mlx4_wol_write);
1870 void mlx4_opreq_action(struct work_struct *work)
1872 struct mlx4_priv *priv = container_of(work, struct mlx4_priv, opreq_task);
1873 struct mlx4_dev *dev = &priv->dev;
1874 int num_tasks = atomic_read(&priv->opreq_count);
1875 struct mlx4_cmd_mailbox *mailbox;
1876 struct mlx4_mgm *mgm;
1889 #define GET_OP_REQ_MODIFIER_OFFSET 0x08
1890 #define GET_OP_REQ_TOKEN_OFFSET 0x14
1891 #define GET_OP_REQ_TYPE_OFFSET 0x1a
1892 #define GET_OP_REQ_DATA_OFFSET 0x20
1894 mailbox = mlx4_alloc_cmd_mailbox(dev);
1895 if (IS_ERR(mailbox)) {
1896 mlx4_err(dev, "Failed to allocate mailbox for GET_OP_REQ\n");
1899 outbox = mailbox->buf;
1902 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
1903 MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
1906 mlx4_err(dev, "Failed to retrieve required operation: %d\n", err);
1909 MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET);
1910 MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
1911 MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET);
1912 type_m = type >> 12;
1917 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
1918 mlx4_warn(dev, "ADD MCG operation is not supported in "
1919 "DEVICE_MANAGED steerign mode\n");
1923 mgm = (struct mlx4_mgm *) ((u8 *) (outbox) + GET_OP_REQ_DATA_OFFSET);
1924 num_qps = be32_to_cpu(mgm->members_count) & MGM_QPN_MASK;
1925 rem_mcg = ((u8 *) (&mgm->members_count))[0] & 1;
1926 prot = ((u8 *) (&mgm->members_count))[0] >> 6;
1928 for (i = 0; i < num_qps; i++) {
1929 qp.qpn = be32_to_cpu(mgm->qp[i]);
1931 err = mlx4_multicast_detach(dev, &qp, mgm->gid, prot, 0);
1933 err = mlx4_multicast_attach(dev, &qp, mgm->gid, mgm->gid[5] ,0, prot, NULL);
1939 mlx4_warn(dev, "Bad type for required operation\n");
1943 err = mlx4_cmd(dev, 0, ((u32) err | cpu_to_be32(token) << 16), 1,
1944 MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
1947 mlx4_err(dev, "Failed to acknowledge required request: %d\n", err);
1950 memset(outbox, 0, 0xffc);
1951 num_tasks = atomic_dec_return(&priv->opreq_count);
1955 mlx4_free_cmd_mailbox(dev, mailbox);