2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/etherdevice.h>
36 #include <linux/mlx4/cmd.h>
37 #include <linux/module.h>
38 #include <linux/cache.h>
44 MLX4_COMMAND_INTERFACE_MIN_REV = 2,
45 MLX4_COMMAND_INTERFACE_MAX_REV = 3,
46 MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS = 3,
49 extern void __buggy_use_of_MLX4_GET(void);
50 extern void __buggy_use_of_MLX4_PUT(void);
52 static bool enable_qos;
53 module_param(enable_qos, bool, 0444);
54 MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)");
56 #define MLX4_GET(dest, source, offset) \
58 void *__p = (char *) (source) + (offset); \
59 switch (sizeof (dest)) { \
60 case 1: (dest) = *(u8 *) __p; break; \
61 case 2: (dest) = be16_to_cpup(__p); break; \
62 case 4: (dest) = be32_to_cpup(__p); break; \
63 case 8: (dest) = be64_to_cpup(__p); break; \
64 default: __buggy_use_of_MLX4_GET(); \
68 #define MLX4_PUT(dest, source, offset) \
70 void *__d = ((char *) (dest) + (offset)); \
71 switch (sizeof(source)) { \
72 case 1: *(u8 *) __d = (source); break; \
73 case 2: *(__be16 *) __d = cpu_to_be16(source); break; \
74 case 4: *(__be32 *) __d = cpu_to_be32(source); break; \
75 case 8: *(__be64 *) __d = cpu_to_be64(source); break; \
76 default: __buggy_use_of_MLX4_PUT(); \
80 static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
82 static const char *fname[] = {
83 [ 0] = "RC transport",
84 [ 1] = "UC transport",
85 [ 2] = "UD transport",
86 [ 3] = "XRC transport",
87 [ 4] = "reliable multicast",
88 [ 5] = "FCoIB support",
90 [ 7] = "IPoIB checksum offload",
91 [ 8] = "P_Key violation counter",
92 [ 9] = "Q_Key violation counter",
95 [15] = "Big LSO headers",
98 [18] = "Atomic ops support",
99 [19] = "Raw multicast support",
100 [20] = "Address vector port checking support",
101 [21] = "UD multicast support",
102 [24] = "Demand paging support",
103 [25] = "Router support",
104 [30] = "IBoE support",
105 [32] = "Unicast loopback support",
106 [34] = "FCS header control",
107 [38] = "Wake On LAN support",
108 [40] = "UDP RSS support",
109 [41] = "Unicast VEP steering support",
110 [42] = "Multicast VEP steering support",
111 [44] = "Cross-channel (sync_qp) operations support",
112 [48] = "Counters support",
113 [59] = "Port management change event support",
114 [60] = "eSwitch support",
115 [61] = "64 byte EQE support",
116 [62] = "64 byte CQE support",
120 mlx4_dbg(dev, "DEV_CAP flags:\n");
121 for (i = 0; i < ARRAY_SIZE(fname); ++i)
122 if (fname[i] && (flags & (1LL << i)))
123 mlx4_dbg(dev, " %s\n", fname[i]);
126 static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
128 static const char * const fname[] = {
130 [1] = "RSS Toeplitz Hash Function support",
131 [2] = "RSS XOR Hash Function support",
132 [3] = "Device manage flow steering support",
133 [4] = "FSM (MAC unti-spoofing) support",
134 [5] = "VST (control vlan insertion/stripping) support",
135 [6] = "Dynamic QP updates support",
136 [7] = "Loopback source checks support",
137 [8] = "Device managed flow steering IPoIB support",
138 [9] = "ETS configuration support",
139 [10] = "ETH backplane autoneg report",
140 [11] = "Ethernet Flow control statistics support",
141 [12] = "Recoverable error events support",
142 [13] = "Time stamping support",
143 [14] = "Report driver version to FW support"
147 for (i = 0; i < ARRAY_SIZE(fname); ++i)
148 if (fname[i] && (flags & (1LL << i)))
149 mlx4_dbg(dev, " %s\n", fname[i]);
152 int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
154 struct mlx4_cmd_mailbox *mailbox;
158 #define MOD_STAT_CFG_IN_SIZE 0x100
160 #define MOD_STAT_CFG_PG_SZ_M_OFFSET 0x002
161 #define MOD_STAT_CFG_PG_SZ_OFFSET 0x003
163 mailbox = mlx4_alloc_cmd_mailbox(dev);
165 return PTR_ERR(mailbox);
166 inbox = mailbox->buf;
168 memset(inbox, 0, MOD_STAT_CFG_IN_SIZE);
170 MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET);
171 MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET);
173 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG,
174 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
176 mlx4_free_cmd_mailbox(dev, mailbox);
180 int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
181 struct mlx4_vhcr *vhcr,
182 struct mlx4_cmd_mailbox *inbox,
183 struct mlx4_cmd_mailbox *outbox,
184 struct mlx4_cmd_info *cmd)
186 struct mlx4_priv *priv = mlx4_priv(dev);
191 #define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0
192 #define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1
193 #define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4
194 #define QUERY_FUNC_CAP_FMR_OFFSET 0x8
195 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP 0x10
196 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP 0x14
197 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP 0x18
198 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP 0x20
199 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP 0x24
200 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28
201 #define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
202 #define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30
204 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50
205 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54
206 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x58
207 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x60
208 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x64
209 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x68
211 #define QUERY_FUNC_CAP_FMR_FLAG 0x80
212 #define QUERY_FUNC_CAP_FLAG_RDMA 0x40
213 #define QUERY_FUNC_CAP_FLAG_ETH 0x80
214 #define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10
216 /* when opcode modifier = 1 */
217 #define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3
218 #define QUERY_FUNC_CAP_FLAGS0_OFFSET 0x8
219 #define QUERY_FUNC_CAP_FLAGS1_OFFSET 0xc
220 #define QUERY_FUNC_CAP_COUNTER_INDEX_OFFSET 0xd
222 #define QUERY_FUNC_CAP_QP0_TUNNEL 0x10
223 #define QUERY_FUNC_CAP_QP0_PROXY 0x14
224 #define QUERY_FUNC_CAP_QP1_TUNNEL 0x18
225 #define QUERY_FUNC_CAP_QP1_PROXY 0x1c
227 #define QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC 0x40
228 #define QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN 0x80
229 #define QUERY_FUNC_CAP_PROPS_DEF_COUNTER 0x20
231 #define QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID 0x80
233 if (vhcr->op_modifier == 1) {
234 port = vhcr->in_modifier; /* phys-port = logical-port */
235 MLX4_PUT(outbox->buf, port, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
238 /* ensure that phy_wqe_gid bit is not set */
239 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS0_OFFSET);
241 /* ensure force vlan and force mac bits are not set
242 * and that default counter bit is set
244 field = QUERY_FUNC_CAP_PROPS_DEF_COUNTER; /* def counter */
245 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET);
247 /* There is always default counter legal or sink counter */
248 field = mlx4_get_default_counter_index(dev, slave, vhcr->in_modifier);
249 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_COUNTER_INDEX_OFFSET);
251 /* size is now the QP number */
252 size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + port - 1;
253 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL);
256 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_TUNNEL);
258 size = dev->phys_caps.base_proxy_sqpn + 8 * slave + port - 1;
259 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_PROXY);
262 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_PROXY);
264 } else if (vhcr->op_modifier == 0) {
265 /* enable rdma and ethernet interfaces, and new quota locations */
266 field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
267 QUERY_FUNC_CAP_FLAG_QUOTAS);
268 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
270 field = dev->caps.num_ports;
271 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
273 size = dev->caps.function_caps; /* set PF behaviours */
274 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
276 field = 0; /* protected FMR support not available as yet */
277 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET);
279 size = priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[slave];
280 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
281 size = dev->caps.num_qps;
282 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
284 size = priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[slave];
285 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
286 size = dev->caps.num_srqs;
287 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
289 size = priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[slave];
290 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
291 size = dev->caps.num_cqs;
292 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
294 size = dev->caps.num_eqs;
295 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
297 size = dev->caps.reserved_eqs;
298 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
300 size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave];
301 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
302 size = dev->caps.num_mpts;
303 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
305 size = priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[slave];
306 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
307 size = dev->caps.num_mtts;
308 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
310 size = dev->caps.num_mgms + dev->caps.num_amgms;
311 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
312 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
320 int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
321 struct mlx4_func_cap *func_cap)
323 struct mlx4_cmd_mailbox *mailbox;
325 u8 field, op_modifier;
327 int err = 0, quotas = 0;
329 op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */
331 mailbox = mlx4_alloc_cmd_mailbox(dev);
333 return PTR_ERR(mailbox);
335 err = mlx4_cmd_box(dev, 0, mailbox->dma, gen_or_port, op_modifier,
336 MLX4_CMD_QUERY_FUNC_CAP,
337 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
341 outbox = mailbox->buf;
344 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET);
345 if (!(field & (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA))) {
346 mlx4_err(dev, "The host supports neither eth nor rdma interfaces\n");
347 err = -EPROTONOSUPPORT;
350 func_cap->flags = field;
351 quotas = !!(func_cap->flags & QUERY_FUNC_CAP_FLAG_QUOTAS);
353 MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
354 func_cap->num_ports = field;
356 MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
357 func_cap->pf_context_behaviour = size;
360 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
361 func_cap->qp_quota = size & 0xFFFFFF;
363 MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
364 func_cap->srq_quota = size & 0xFFFFFF;
366 MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
367 func_cap->cq_quota = size & 0xFFFFFF;
369 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
370 func_cap->mpt_quota = size & 0xFFFFFF;
372 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
373 func_cap->mtt_quota = size & 0xFFFFFF;
375 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
376 func_cap->mcg_quota = size & 0xFFFFFF;
379 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
380 func_cap->qp_quota = size & 0xFFFFFF;
382 MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
383 func_cap->srq_quota = size & 0xFFFFFF;
385 MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
386 func_cap->cq_quota = size & 0xFFFFFF;
388 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
389 func_cap->mpt_quota = size & 0xFFFFFF;
391 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
392 func_cap->mtt_quota = size & 0xFFFFFF;
394 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
395 func_cap->mcg_quota = size & 0xFFFFFF;
397 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
398 func_cap->max_eq = size & 0xFFFFFF;
400 MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
401 func_cap->reserved_eq = size & 0xFFFFFF;
406 /* logical port query */
407 if (gen_or_port > dev->caps.num_ports) {
412 if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) {
413 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET);
414 if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN) {
415 mlx4_err(dev, "VLAN is enforced on this port\n");
416 err = -EPROTONOSUPPORT;
420 if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC) {
421 mlx4_err(dev, "Force mac is enabled on this port\n");
422 err = -EPROTONOSUPPORT;
425 } else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) {
426 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
427 if (field & QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID) {
428 mlx4_err(dev, "phy_wqe_gid is "
429 "enforced on this ib port\n");
430 err = -EPROTONOSUPPORT;
435 MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
436 func_cap->physical_port = field;
437 if (func_cap->physical_port != gen_or_port) {
442 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET);
443 if (field & QUERY_FUNC_CAP_PROPS_DEF_COUNTER) {
444 MLX4_GET(field, outbox, QUERY_FUNC_CAP_COUNTER_INDEX_OFFSET);
445 func_cap->def_counter_index = field;
447 func_cap->def_counter_index = MLX4_SINK_COUNTER_INDEX;
450 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_TUNNEL);
451 func_cap->qp0_tunnel_qpn = size & 0xFFFFFF;
453 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_PROXY);
454 func_cap->qp0_proxy_qpn = size & 0xFFFFFF;
456 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_TUNNEL);
457 func_cap->qp1_tunnel_qpn = size & 0xFFFFFF;
459 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_PROXY);
460 func_cap->qp1_proxy_qpn = size & 0xFFFFFF;
462 /* All other resources are allocated by the master, but we still report
463 * 'num' and 'reserved' capabilities as follows:
464 * - num remains the maximum resource index
465 * - 'num - reserved' is the total available objects of a resource, but
466 * resource indices may be less than 'reserved'
467 * TODO: set per-resource quotas */
470 mlx4_free_cmd_mailbox(dev, mailbox);
475 int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
477 struct mlx4_cmd_mailbox *mailbox;
480 u32 field32, flags, ext_flags;
486 #define QUERY_DEV_CAP_OUT_SIZE 0x100
487 #define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10
488 #define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET 0x11
489 #define QUERY_DEV_CAP_RSVD_QP_OFFSET 0x12
490 #define QUERY_DEV_CAP_MAX_QP_OFFSET 0x13
491 #define QUERY_DEV_CAP_RSVD_SRQ_OFFSET 0x14
492 #define QUERY_DEV_CAP_MAX_SRQ_OFFSET 0x15
493 #define QUERY_DEV_CAP_RSVD_EEC_OFFSET 0x16
494 #define QUERY_DEV_CAP_MAX_EEC_OFFSET 0x17
495 #define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET 0x19
496 #define QUERY_DEV_CAP_RSVD_CQ_OFFSET 0x1a
497 #define QUERY_DEV_CAP_MAX_CQ_OFFSET 0x1b
498 #define QUERY_DEV_CAP_MAX_MPT_OFFSET 0x1d
499 #define QUERY_DEV_CAP_RSVD_EQ_OFFSET 0x1e
500 #define QUERY_DEV_CAP_MAX_EQ_OFFSET 0x1f
501 #define QUERY_DEV_CAP_RSVD_MTT_OFFSET 0x20
502 #define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21
503 #define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22
504 #define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23
505 #define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27
506 #define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29
507 #define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b
508 #define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d
509 #define QUERY_DEV_CAP_RSS_OFFSET 0x2e
510 #define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f
511 #define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33
512 #define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35
513 #define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36
514 #define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37
515 #define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38
516 #define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b
517 #define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c
518 #define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e
519 #define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
520 #define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40
521 #define QUERY_DEV_CAP_SYNC_QP_OFFSET 0x42
522 #define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
523 #define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
524 #define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
525 #define QUERY_DEV_CAP_PAGE_SZ_OFFSET 0x4b
526 #define QUERY_DEV_CAP_BF_OFFSET 0x4c
527 #define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET 0x4d
528 #define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET 0x4e
529 #define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET 0x4f
530 #define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET 0x51
531 #define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52
532 #define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55
533 #define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56
534 #define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61
535 #define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62
536 #define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63
537 #define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64
538 #define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65
539 #define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66
540 #define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67
541 #define QUERY_DEV_CAP_MAX_BASIC_COUNTERS_OFFSET 0x68
542 #define QUERY_DEV_CAP_MAX_EXTENDED_COUNTERS_OFFSET 0x6c
543 #define QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET 0x70
544 #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
545 #define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET 0x70
546 #define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET 0x74
547 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
548 #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
549 #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
550 #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
551 #define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET 0x86
552 #define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET 0x88
553 #define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET 0x8a
554 #define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET 0x8c
555 #define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e
556 #define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90
557 #define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92
558 #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
559 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
560 #define QUERY_DEV_CAP_ETS_CFG_OFFSET 0x9c
561 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
564 mailbox = mlx4_alloc_cmd_mailbox(dev);
566 return PTR_ERR(mailbox);
567 outbox = mailbox->buf;
569 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
570 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
574 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET);
575 dev_cap->reserved_qps = 1 << (field & 0xf);
576 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET);
577 dev_cap->max_qps = 1 << (field & 0x1f);
578 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_SRQ_OFFSET);
579 dev_cap->reserved_srqs = 1 << (field >> 4);
580 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_OFFSET);
581 dev_cap->max_srqs = 1 << (field & 0x1f);
582 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET);
583 dev_cap->max_cq_sz = 1 << field;
584 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_CQ_OFFSET);
585 dev_cap->reserved_cqs = 1 << (field & 0xf);
586 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_OFFSET);
587 dev_cap->max_cqs = 1 << (field & 0x1f);
588 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET);
589 dev_cap->max_mpts = 1 << (field & 0x3f);
590 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET);
591 dev_cap->reserved_eqs = field & 0xf;
592 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET);
593 dev_cap->max_eqs = 1 << (field & 0xf);
594 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET);
595 dev_cap->reserved_mtts = 1 << (field >> 4);
596 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET);
597 dev_cap->max_mrw_sz = 1 << field;
598 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET);
599 dev_cap->reserved_mrws = 1 << (field & 0xf);
600 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET);
601 dev_cap->max_mtt_seg = 1 << (field & 0x3f);
602 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET);
603 dev_cap->max_requester_per_qp = 1 << (field & 0x3f);
604 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET);
605 dev_cap->max_responder_per_qp = 1 << (field & 0x3f);
606 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET);
609 dev_cap->max_gso_sz = 0;
611 dev_cap->max_gso_sz = 1 << field;
613 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSS_OFFSET);
615 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_XOR;
617 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_TOP;
620 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS;
621 dev_cap->max_rss_tbl_sz = 1 << field;
623 dev_cap->max_rss_tbl_sz = 0;
624 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET);
625 dev_cap->max_rdma_global = 1 << (field & 0x3f);
626 MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET);
627 dev_cap->local_ca_ack_delay = field & 0x1f;
628 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
629 dev_cap->num_ports = field & 0xf;
630 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET);
631 dev_cap->max_msg_sz = 1 << (field & 0x1f);
632 MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET);
634 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN;
635 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
637 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN;
638 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
640 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB;
641 dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f;
642 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET);
643 dev_cap->fs_max_num_qp_per_entry = field;
644 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
645 dev_cap->stat_rate_support = stat_rate;
646 MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
648 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_TS;
649 MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
650 MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
651 dev_cap->flags = flags | (u64)ext_flags << 32;
652 MLX4_GET(field, outbox, QUERY_DEV_CAP_SYNC_QP_OFFSET);
653 dev_cap->sync_qp = field & 0x10;
654 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
655 dev_cap->reserved_uars = field >> 4;
656 MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
657 dev_cap->uar_size = 1 << ((field & 0x3f) + 20);
658 MLX4_GET(field, outbox, QUERY_DEV_CAP_PAGE_SZ_OFFSET);
659 dev_cap->min_page_sz = 1 << field;
661 MLX4_GET(field, outbox, QUERY_DEV_CAP_BF_OFFSET);
663 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
664 dev_cap->bf_reg_size = 1 << (field & 0x1f);
665 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
666 if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size))
668 dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
669 mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
670 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
672 dev_cap->bf_reg_size = 0;
673 mlx4_dbg(dev, "BlueFlame not available\n");
676 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET);
677 dev_cap->max_sq_sg = field;
678 MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET);
679 dev_cap->max_sq_desc_sz = size;
681 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET);
682 dev_cap->max_qp_per_mcg = 1 << field;
683 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET);
684 dev_cap->reserved_mgms = field & 0xf;
685 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MCG_OFFSET);
686 dev_cap->max_mcgs = 1 << field;
687 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_PD_OFFSET);
688 dev_cap->reserved_pds = field >> 4;
689 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET);
690 dev_cap->max_pds = 1 << (field & 0x3f);
691 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_XRC_OFFSET);
692 dev_cap->reserved_xrcds = field >> 4;
693 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_XRC_OFFSET);
694 dev_cap->max_xrcds = 1 << (field & 0x1f);
696 MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET);
697 dev_cap->rdmarc_entry_sz = size;
698 MLX4_GET(size, outbox, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET);
699 dev_cap->qpc_entry_sz = size;
700 MLX4_GET(size, outbox, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET);
701 dev_cap->aux_entry_sz = size;
702 MLX4_GET(size, outbox, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET);
703 dev_cap->altc_entry_sz = size;
704 MLX4_GET(size, outbox, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET);
705 dev_cap->eqc_entry_sz = size;
706 MLX4_GET(size, outbox, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET);
707 dev_cap->cqc_entry_sz = size;
708 MLX4_GET(size, outbox, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET);
709 dev_cap->srq_entry_sz = size;
710 MLX4_GET(size, outbox, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET);
711 dev_cap->cmpt_entry_sz = size;
712 MLX4_GET(size, outbox, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET);
713 dev_cap->mtt_entry_sz = size;
714 MLX4_GET(size, outbox, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET);
715 dev_cap->dmpt_entry_sz = size;
717 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET);
718 dev_cap->max_srq_sz = 1 << field;
719 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET);
720 dev_cap->max_qp_sz = 1 << field;
721 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSZ_SRQ_OFFSET);
722 dev_cap->resize_srq = field & 1;
723 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET);
724 dev_cap->max_rq_sg = field;
725 MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET);
726 dev_cap->max_rq_desc_sz = size;
728 MLX4_GET(dev_cap->bmme_flags, outbox,
729 QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
730 MLX4_GET(dev_cap->reserved_lkey, outbox,
731 QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
732 MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETS_CFG_OFFSET);
733 if (field32 & (1 << 0))
734 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP;
735 if (field32 & (1 << 7))
736 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT;
737 if (field32 & (1 << 8))
738 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW;
739 if (field32 & (1 << 13))
740 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG;
742 MLX4_GET(dev_cap->max_icm_sz, outbox,
743 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
744 if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
745 MLX4_GET(dev_cap->max_basic_counters, outbox,
746 QUERY_DEV_CAP_MAX_BASIC_COUNTERS_OFFSET);
747 /* FW reports 256 however real value is 255 */
748 dev_cap->max_basic_counters = min_t(u32, dev_cap->max_basic_counters, 255);
749 if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS_EXT)
750 MLX4_GET(dev_cap->max_extended_counters, outbox,
751 QUERY_DEV_CAP_MAX_EXTENDED_COUNTERS_OFFSET);
753 MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
754 if (field32 & (1 << 16))
755 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP;
756 if (field32 & (1 << 19))
757 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_LB_SRC_CHK;
758 if (field32 & (1 << 20))
759 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FSM;
760 if (field32 & (1 << 26))
761 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL;
763 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
764 for (i = 1; i <= dev_cap->num_ports; ++i) {
765 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
766 dev_cap->max_vl[i] = field >> 4;
767 MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET);
768 dev_cap->ib_mtu[i] = field >> 4;
769 dev_cap->max_port_width[i] = field & 0xf;
770 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET);
771 dev_cap->max_gids[i] = 1 << (field & 0xf);
772 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET);
773 dev_cap->max_pkeys[i] = 1 << (field & 0xf);
776 #define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00
777 #define QUERY_PORT_MTU_OFFSET 0x01
778 #define QUERY_PORT_ETH_MTU_OFFSET 0x02
779 #define QUERY_PORT_WIDTH_OFFSET 0x06
780 #define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07
781 #define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a
782 #define QUERY_PORT_MAX_VL_OFFSET 0x0b
783 #define QUERY_PORT_MAC_OFFSET 0x10
784 #define QUERY_PORT_TRANS_VENDOR_OFFSET 0x18
785 #define QUERY_PORT_WAVELENGTH_OFFSET 0x1c
786 #define QUERY_PORT_TRANS_CODE_OFFSET 0x20
788 for (i = 1; i <= dev_cap->num_ports; ++i) {
789 err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
790 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
794 MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET);
795 dev_cap->supported_port_types[i] = field & 3;
796 dev_cap->suggested_type[i] = (field >> 3) & 1;
797 dev_cap->default_sense[i] = (field >> 4) & 1;
798 MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
799 dev_cap->ib_mtu[i] = field & 0xf;
800 MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
801 dev_cap->max_port_width[i] = field & 0xf;
802 MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET);
803 dev_cap->max_gids[i] = 1 << (field >> 4);
804 dev_cap->max_pkeys[i] = 1 << (field & 0xf);
805 MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
806 dev_cap->max_vl[i] = field & 0xf;
807 MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET);
808 dev_cap->log_max_macs[i] = field & 0xf;
809 dev_cap->log_max_vlans[i] = field >> 4;
810 MLX4_GET(dev_cap->eth_mtu[i], outbox, QUERY_PORT_ETH_MTU_OFFSET);
811 MLX4_GET(dev_cap->def_mac[i], outbox, QUERY_PORT_MAC_OFFSET);
812 MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET);
813 dev_cap->trans_type[i] = field32 >> 24;
814 dev_cap->vendor_oui[i] = field32 & 0xffffff;
815 MLX4_GET(dev_cap->wavelength[i], outbox, QUERY_PORT_WAVELENGTH_OFFSET);
816 MLX4_GET(dev_cap->trans_code[i], outbox, QUERY_PORT_TRANS_CODE_OFFSET);
820 mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
821 dev_cap->bmme_flags, dev_cap->reserved_lkey);
824 * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
825 * we can't use any EQs whose doorbell falls on that page,
826 * even if the EQ itself isn't reserved.
828 dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4,
829 dev_cap->reserved_eqs);
831 mlx4_dbg(dev, "Max ICM size %lld MB\n",
832 (unsigned long long) dev_cap->max_icm_sz >> 20);
833 mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
834 dev_cap->max_qps, dev_cap->reserved_qps, dev_cap->qpc_entry_sz);
835 mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
836 dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz);
837 mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
838 dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz);
839 mlx4_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n",
840 dev_cap->max_eqs, dev_cap->reserved_eqs, dev_cap->eqc_entry_sz);
841 mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n",
842 dev_cap->reserved_mrws, dev_cap->reserved_mtts);
843 mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
844 dev_cap->max_pds, dev_cap->reserved_pds, dev_cap->reserved_uars);
845 mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n",
846 dev_cap->max_pds, dev_cap->reserved_mgms);
847 mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
848 dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz);
849 mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
850 dev_cap->local_ca_ack_delay, 128 << dev_cap->ib_mtu[1],
851 dev_cap->max_port_width[1]);
852 mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n",
853 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
854 mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
855 dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg);
856 mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz);
857 mlx4_dbg(dev, "Max basic counters: %d\n", dev_cap->max_basic_counters);
858 mlx4_dbg(dev, "Max extended counters: %d\n", dev_cap->max_extended_counters);
859 mlx4_dbg(dev, "Max RSS Table size: %d\n", dev_cap->max_rss_tbl_sz);
861 dump_dev_cap_flags(dev, dev_cap->flags);
862 dump_dev_cap_flags2(dev, dev_cap->flags2);
865 mlx4_free_cmd_mailbox(dev, mailbox);
869 int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
870 struct mlx4_vhcr *vhcr,
871 struct mlx4_cmd_mailbox *inbox,
872 struct mlx4_cmd_mailbox *outbox,
873 struct mlx4_cmd_info *cmd)
879 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
880 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
884 /* add port mng change event capability unconditionally to slaves */
885 MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
886 flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV;
887 MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
889 /* For guests, report Blueflame disabled */
890 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
892 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
894 /* turn off device-managed steering capability if not enabled */
895 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) {
896 MLX4_GET(field, outbox->buf,
897 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
899 MLX4_PUT(outbox->buf, field,
900 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
905 int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
906 struct mlx4_vhcr *vhcr,
907 struct mlx4_cmd_mailbox *inbox,
908 struct mlx4_cmd_mailbox *outbox,
909 struct mlx4_cmd_info *cmd)
911 struct mlx4_priv *priv = mlx4_priv(dev);
916 int admin_link_state;
918 #define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0
919 #define MLX4_PORT_LINK_UP_MASK 0x80
920 #define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c
921 #define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e
923 err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0,
924 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
927 if (!err && dev->caps.function != slave) {
928 /* set slave default_mac address to be zero MAC */
929 def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
930 MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
932 /* get port type - currently only eth is enabled */
933 MLX4_GET(port_type, outbox->buf,
934 QUERY_PORT_SUPPORTED_TYPE_OFFSET);
936 /* No link sensing allowed */
937 port_type &= MLX4_VF_PORT_NO_LINK_SENSE_MASK;
938 /* set port type to currently operating port type */
939 port_type |= (dev->caps.port_type[vhcr->in_modifier] & 0x3);
941 admin_link_state = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.link_state;
942 if (IFLA_VF_LINK_STATE_ENABLE == admin_link_state)
943 port_type |= MLX4_PORT_LINK_UP_MASK;
944 else if (IFLA_VF_LINK_STATE_DISABLE == admin_link_state)
945 port_type &= ~MLX4_PORT_LINK_UP_MASK;
947 MLX4_PUT(outbox->buf, port_type,
948 QUERY_PORT_SUPPORTED_TYPE_OFFSET);
950 if (dev->caps.port_type[vhcr->in_modifier] == MLX4_PORT_TYPE_ETH)
951 short_field = mlx4_get_slave_num_gids(dev, slave);
953 short_field = 1; /* slave max gids */
954 MLX4_PUT(outbox->buf, short_field,
955 QUERY_PORT_CUR_MAX_GID_OFFSET);
957 short_field = dev->caps.pkey_table_len[vhcr->in_modifier];
958 MLX4_PUT(outbox->buf, short_field,
959 QUERY_PORT_CUR_MAX_PKEY_OFFSET);
965 int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port,
966 int *gid_tbl_len, int *pkey_tbl_len)
968 struct mlx4_cmd_mailbox *mailbox;
973 mailbox = mlx4_alloc_cmd_mailbox(dev);
975 return PTR_ERR(mailbox);
977 err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0,
978 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
983 outbox = mailbox->buf;
985 MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_GID_OFFSET);
986 *gid_tbl_len = field;
988 MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_PKEY_OFFSET);
989 *pkey_tbl_len = field;
992 mlx4_free_cmd_mailbox(dev, mailbox);
995 EXPORT_SYMBOL(mlx4_get_slave_pkey_gid_tbl_len);
997 int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
999 struct mlx4_cmd_mailbox *mailbox;
1000 struct mlx4_icm_iter iter;
1008 mailbox = mlx4_alloc_cmd_mailbox(dev);
1009 if (IS_ERR(mailbox))
1010 return PTR_ERR(mailbox);
1011 memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
1012 pages = mailbox->buf;
1014 for (mlx4_icm_first(icm, &iter);
1015 !mlx4_icm_last(&iter);
1016 mlx4_icm_next(&iter)) {
1018 * We have to pass pages that are aligned to their
1019 * size, so find the least significant 1 in the
1020 * address or size and use that as our log2 size.
1022 lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1;
1023 if (lg < MLX4_ICM_PAGE_SHIFT) {
1024 mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n",
1026 (unsigned long long) mlx4_icm_addr(&iter),
1027 mlx4_icm_size(&iter));
1032 for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) {
1034 pages[nent * 2] = cpu_to_be64(virt);
1038 pages[nent * 2 + 1] =
1039 cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) |
1040 (lg - MLX4_ICM_PAGE_SHIFT));
1041 ts += 1 << (lg - 10);
1044 if (++nent == MLX4_MAILBOX_SIZE / 16) {
1045 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
1046 MLX4_CMD_TIME_CLASS_B,
1056 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
1057 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1062 case MLX4_CMD_MAP_FA:
1063 mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts);
1065 case MLX4_CMD_MAP_ICM_AUX:
1066 mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts);
1068 case MLX4_CMD_MAP_ICM:
1069 mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n",
1070 tc, ts, (unsigned long long) virt - (ts << 10));
1075 mlx4_free_cmd_mailbox(dev, mailbox);
1079 int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm)
1081 return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1);
1084 int mlx4_UNMAP_FA(struct mlx4_dev *dev)
1086 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA,
1087 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1091 int mlx4_RUN_FW(struct mlx4_dev *dev)
1093 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW,
1094 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1097 int mlx4_QUERY_FW(struct mlx4_dev *dev)
1099 struct mlx4_fw *fw = &mlx4_priv(dev)->fw;
1100 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
1101 struct mlx4_cmd_mailbox *mailbox;
1108 #define QUERY_FW_OUT_SIZE 0x100
1109 #define QUERY_FW_VER_OFFSET 0x00
1110 #define QUERY_FW_PPF_ID 0x09
1111 #define QUERY_FW_CMD_IF_REV_OFFSET 0x0a
1112 #define QUERY_FW_MAX_CMD_OFFSET 0x0f
1113 #define QUERY_FW_ERR_START_OFFSET 0x30
1114 #define QUERY_FW_ERR_SIZE_OFFSET 0x38
1115 #define QUERY_FW_ERR_BAR_OFFSET 0x3c
1117 #define QUERY_FW_SIZE_OFFSET 0x00
1118 #define QUERY_FW_CLR_INT_BASE_OFFSET 0x20
1119 #define QUERY_FW_CLR_INT_BAR_OFFSET 0x28
1121 #define QUERY_FW_COMM_BASE_OFFSET 0x40
1122 #define QUERY_FW_COMM_BAR_OFFSET 0x48
1124 #define QUERY_FW_CLOCK_OFFSET 0x50
1125 #define QUERY_FW_CLOCK_BAR 0x58
1127 mailbox = mlx4_alloc_cmd_mailbox(dev);
1128 if (IS_ERR(mailbox))
1129 return PTR_ERR(mailbox);
1130 outbox = mailbox->buf;
1132 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
1133 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1137 MLX4_GET(fw_ver, outbox, QUERY_FW_VER_OFFSET);
1139 * FW subminor version is at more significant bits than minor
1140 * version, so swap here.
1142 dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) |
1143 ((fw_ver & 0xffff0000ull) >> 16) |
1144 ((fw_ver & 0x0000ffffull) << 16);
1146 MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
1147 dev->caps.function = lg;
1149 if (mlx4_is_slave(dev))
1153 MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
1154 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
1155 cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
1156 mlx4_err(dev, "Installed FW has unsupported "
1157 "command interface revision %d.\n",
1159 mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n",
1160 (int) (dev->caps.fw_ver >> 32),
1161 (int) (dev->caps.fw_ver >> 16) & 0xffff,
1162 (int) dev->caps.fw_ver & 0xffff);
1163 mlx4_err(dev, "This driver version supports only revisions %d to %d.\n",
1164 MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV);
1169 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS)
1170 dev->flags |= MLX4_FLAG_OLD_PORT_CMDS;
1172 MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
1173 cmd->max_cmds = 1 << lg;
1175 mlx4_dbg(dev, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n",
1176 (int) (dev->caps.fw_ver >> 32),
1177 (int) (dev->caps.fw_ver >> 16) & 0xffff,
1178 (int) dev->caps.fw_ver & 0xffff,
1179 cmd_if_rev, cmd->max_cmds);
1181 MLX4_GET(fw->catas_offset, outbox, QUERY_FW_ERR_START_OFFSET);
1182 MLX4_GET(fw->catas_size, outbox, QUERY_FW_ERR_SIZE_OFFSET);
1183 MLX4_GET(fw->catas_bar, outbox, QUERY_FW_ERR_BAR_OFFSET);
1184 fw->catas_bar = (fw->catas_bar >> 6) * 2;
1186 mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n",
1187 (unsigned long long) fw->catas_offset, fw->catas_size, fw->catas_bar);
1189 MLX4_GET(fw->fw_pages, outbox, QUERY_FW_SIZE_OFFSET);
1190 MLX4_GET(fw->clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET);
1191 MLX4_GET(fw->clr_int_bar, outbox, QUERY_FW_CLR_INT_BAR_OFFSET);
1192 fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2;
1194 MLX4_GET(fw->comm_base, outbox, QUERY_FW_COMM_BASE_OFFSET);
1195 MLX4_GET(fw->comm_bar, outbox, QUERY_FW_COMM_BAR_OFFSET);
1196 fw->comm_bar = (fw->comm_bar >> 6) * 2;
1197 mlx4_dbg(dev, "Communication vector bar:%d offset:0x%llx\n",
1198 fw->comm_bar, (unsigned long long)fw->comm_base);
1199 mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2);
1201 MLX4_GET(fw->clock_offset, outbox, QUERY_FW_CLOCK_OFFSET);
1202 MLX4_GET(fw->clock_bar, outbox, QUERY_FW_CLOCK_BAR);
1203 fw->clock_bar = (fw->clock_bar >> 6) * 2;
1204 mlx4_dbg(dev, "Internal clock bar:%d offset:0x%llx\n",
1205 fw->comm_bar, (unsigned long long)fw->comm_base);
1208 * Round up number of system pages needed in case
1209 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
1212 ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
1213 (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
1215 mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n",
1216 (unsigned long long) fw->clr_int_base, fw->clr_int_bar);
1219 mlx4_free_cmd_mailbox(dev, mailbox);
1223 int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
1224 struct mlx4_vhcr *vhcr,
1225 struct mlx4_cmd_mailbox *inbox,
1226 struct mlx4_cmd_mailbox *outbox,
1227 struct mlx4_cmd_info *cmd)
1232 outbuf = outbox->buf;
1233 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
1234 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1238 /* for slaves, set pci PPF ID to invalid and zero out everything
1239 * else except FW version */
1240 outbuf[0] = outbuf[1] = 0;
1241 memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8);
1242 outbuf[QUERY_FW_PPF_ID] = MLX4_INVALID_SLAVE_ID;
1247 static void get_board_id(void *vsd, char *board_id, char *vsdstr)
1251 #define VSD_OFFSET_SIG1 0x00
1252 #define VSD_OFFSET_SIG2 0xde
1253 #define VSD_OFFSET_MLX_BOARD_ID 0xd0
1254 #define VSD_OFFSET_TS_BOARD_ID 0x20
1255 #define VSD_LEN 0xd0
1257 #define VSD_SIGNATURE_TOPSPIN 0x5ad
1259 memset(vsdstr, 0, MLX4_VSD_LEN);
1261 for (i = 0; i < VSD_LEN / 4; i++)
1262 ((u32 *)vsdstr)[i] =
1263 swab32(*(u32 *)(vsd + i * 4));
1265 memset(board_id, 0, MLX4_BOARD_ID_LEN);
1267 if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
1268 be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
1269 strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN);
1272 * The board ID is a string but the firmware byte
1273 * swaps each 4-byte word before passing it back to
1274 * us. Therefore we need to swab it before printing.
1276 for (i = 0; i < 4; ++i)
1277 ((u32 *) board_id)[i] =
1278 swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4));
1282 int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
1284 struct mlx4_cmd_mailbox *mailbox;
1288 #define QUERY_ADAPTER_OUT_SIZE 0x100
1289 #define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10
1290 #define QUERY_ADAPTER_VSD_OFFSET 0x20
1291 #define QUERY_ADAPTER_VSD_VENDOR_ID_OFFSET 0x1e
1293 mailbox = mlx4_alloc_cmd_mailbox(dev);
1294 if (IS_ERR(mailbox))
1295 return PTR_ERR(mailbox);
1296 outbox = mailbox->buf;
1298 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER,
1299 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1303 MLX4_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET);
1305 adapter->vsd_vendor_id = be16_to_cpup((u16 *)outbox +
1306 QUERY_ADAPTER_VSD_VENDOR_ID_OFFSET / 2);
1308 get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
1309 adapter->board_id, adapter->vsd);
1312 mlx4_free_cmd_mailbox(dev, mailbox);
1316 int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1318 struct mlx4_cmd_mailbox *mailbox;
1323 #define INIT_HCA_IN_SIZE 0x200
1324 #define INIT_HCA_DRV_NAME_FOR_FW_MAX_SIZE 64
1325 #define INIT_HCA_VERSION_OFFSET 0x000
1326 #define INIT_HCA_VERSION 2
1327 #define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e
1328 #define INIT_HCA_FLAGS_OFFSET 0x014
1329 #define INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET 0x018
1330 #define INIT_HCA_QPC_OFFSET 0x020
1331 #define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10)
1332 #define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17)
1333 #define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28)
1334 #define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f)
1335 #define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30)
1336 #define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37)
1337 #define INIT_HCA_EQE_CQE_OFFSETS (INIT_HCA_QPC_OFFSET + 0x38)
1338 #define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40)
1339 #define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50)
1340 #define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60)
1341 #define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67)
1342 #define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70)
1343 #define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77)
1344 #define INIT_HCA_MCAST_OFFSET 0x0c0
1345 #define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00)
1346 #define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
1347 #define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
1348 #define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18)
1349 #define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
1350 #define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6
1351 #define INIT_HCA_DRIVER_VERSION_OFFSET 0x140
1352 #define INIT_HCA_FS_PARAM_OFFSET 0x1d0
1353 #define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00)
1354 #define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12)
1355 #define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b)
1356 #define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21)
1357 #define INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22)
1358 #define INIT_HCA_FS_IB_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x25)
1359 #define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26)
1360 #define INIT_HCA_TPT_OFFSET 0x0f0
1361 #define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
1362 #define INIT_HCA_TPT_MW_OFFSET (INIT_HCA_TPT_OFFSET + 0x08)
1363 #define INIT_HCA_TPT_MW_ENABLE (1 << 31)
1364 #define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b)
1365 #define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10)
1366 #define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18)
1367 #define INIT_HCA_UAR_OFFSET 0x120
1368 #define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a)
1369 #define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b)
1371 mailbox = mlx4_alloc_cmd_mailbox(dev);
1372 if (IS_ERR(mailbox))
1373 return PTR_ERR(mailbox);
1374 inbox = mailbox->buf;
1376 memset(inbox, 0, INIT_HCA_IN_SIZE);
1378 *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
1380 *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) =
1381 ((ilog2(cache_line_size()) - 4) << 5) | (1 << 4);
1383 #if defined(__LITTLE_ENDIAN)
1384 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
1385 #elif defined(__BIG_ENDIAN)
1386 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1);
1388 #error Host endianness not defined
1390 /* Check port for UD address vector: */
1391 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1);
1393 /* Enable IPoIB checksumming if we can: */
1394 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
1395 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3);
1397 /* Enable QoS support if module parameter set */
1399 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2);
1401 /* Enable fast drop performance optimization */
1402 if (dev->caps.fast_drop)
1403 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 7);
1405 /* enable counters */
1406 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)
1407 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4);
1409 /* CX3 is capable of extending CQEs\EQEs from 32 to 64 bytes */
1410 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_EQE) {
1411 *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 29);
1412 dev->caps.eqe_size = 64;
1413 dev->caps.eqe_factor = 1;
1415 dev->caps.eqe_size = 32;
1416 dev->caps.eqe_factor = 0;
1419 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_CQE) {
1420 *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 30);
1421 dev->caps.cqe_size = 64;
1422 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_64B_CQE;
1424 dev->caps.cqe_size = 32;
1427 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT)
1428 *(inbox + INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET / 4) |= cpu_to_be32(1 << 31);
1430 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW) {
1431 strncpy((u8 *)mailbox->buf + INIT_HCA_DRIVER_VERSION_OFFSET,
1433 INIT_HCA_DRV_NAME_FOR_FW_MAX_SIZE - 1);
1434 mlx4_dbg(dev, "Reporting Driver Version to FW: %s\n",
1435 (u8 *)mailbox->buf + INIT_HCA_DRIVER_VERSION_OFFSET);
1438 /* QPC/EEC/CQC/EQC/RDMARC attributes */
1440 MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
1441 MLX4_PUT(inbox, param->log_num_qps, INIT_HCA_LOG_QP_OFFSET);
1442 MLX4_PUT(inbox, param->srqc_base, INIT_HCA_SRQC_BASE_OFFSET);
1443 MLX4_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET);
1444 MLX4_PUT(inbox, param->cqc_base, INIT_HCA_CQC_BASE_OFFSET);
1445 MLX4_PUT(inbox, param->log_num_cqs, INIT_HCA_LOG_CQ_OFFSET);
1446 MLX4_PUT(inbox, param->altc_base, INIT_HCA_ALTC_BASE_OFFSET);
1447 MLX4_PUT(inbox, param->auxc_base, INIT_HCA_AUXC_BASE_OFFSET);
1448 MLX4_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET);
1449 MLX4_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET);
1450 MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET);
1451 MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET);
1453 /* steering attributes */
1454 if (dev->caps.steering_mode ==
1455 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1456 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |=
1458 INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN);
1460 MLX4_PUT(inbox, param->mc_base, INIT_HCA_FS_BASE_OFFSET);
1461 MLX4_PUT(inbox, param->log_mc_entry_sz,
1462 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
1463 MLX4_PUT(inbox, param->log_mc_table_sz,
1464 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
1465 /* Enable Ethernet flow steering
1466 * with udp unicast and tcp unicast
1468 MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
1469 INIT_HCA_FS_ETH_BITS_OFFSET);
1470 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
1471 INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET);
1472 /* Enable IPoIB flow steering
1473 * with udp unicast and tcp unicast
1475 MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
1476 INIT_HCA_FS_IB_BITS_OFFSET);
1477 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
1478 INIT_HCA_FS_IB_NUM_ADDRS_OFFSET);
1480 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
1481 MLX4_PUT(inbox, param->log_mc_entry_sz,
1482 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
1483 MLX4_PUT(inbox, param->log_mc_hash_sz,
1484 INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
1485 MLX4_PUT(inbox, param->log_mc_table_sz,
1486 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
1487 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0)
1488 MLX4_PUT(inbox, (u8) (1 << 3),
1489 INIT_HCA_UC_STEERING_OFFSET);
1492 /* TPT attributes */
1494 MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET);
1495 mw_enable = param->mw_enable ? INIT_HCA_TPT_MW_ENABLE : 0;
1496 MLX4_PUT(inbox, mw_enable, INIT_HCA_TPT_MW_OFFSET);
1497 MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
1498 MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET);
1499 MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET);
1501 /* UAR attributes */
1503 MLX4_PUT(inbox, param->uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET);
1504 MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET);
1506 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000,
1510 mlx4_err(dev, "INIT_HCA returns %d\n", err);
1512 mlx4_free_cmd_mailbox(dev, mailbox);
1516 int mlx4_QUERY_HCA(struct mlx4_dev *dev,
1517 struct mlx4_init_hca_param *param)
1519 struct mlx4_cmd_mailbox *mailbox;
1526 #define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04
1527 #define QUERY_HCA_CORE_CLOCK_OFFSET 0x0c
1529 mailbox = mlx4_alloc_cmd_mailbox(dev);
1530 if (IS_ERR(mailbox))
1531 return PTR_ERR(mailbox);
1532 outbox = mailbox->buf;
1534 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
1536 MLX4_CMD_TIME_CLASS_B,
1537 !mlx4_is_slave(dev));
1541 MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET);
1542 MLX4_GET(param->hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET);
1544 /* QPC/EEC/CQC/EQC/RDMARC attributes */
1546 MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET);
1547 MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET);
1548 MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET);
1549 MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET);
1550 MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET);
1551 MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET);
1552 MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET);
1553 MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET);
1554 MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET);
1555 MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET);
1556 MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
1557 MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
1559 MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
1560 if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
1561 param->steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
1563 MLX4_GET(byte_field, outbox, INIT_HCA_UC_STEERING_OFFSET);
1564 if (byte_field & 0x8)
1565 param->steering_mode = MLX4_STEERING_MODE_B0;
1567 param->steering_mode = MLX4_STEERING_MODE_A0;
1569 /* steering attributes */
1570 if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
1571 MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
1572 MLX4_GET(param->log_mc_entry_sz, outbox,
1573 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
1574 MLX4_GET(param->log_mc_table_sz, outbox,
1575 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
1577 MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
1578 MLX4_GET(param->log_mc_entry_sz, outbox,
1579 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
1580 MLX4_GET(param->log_mc_hash_sz, outbox,
1581 INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
1582 MLX4_GET(param->log_mc_table_sz, outbox,
1583 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
1586 /* CX3 is capable of extending CQEs\EQEs from 32 to 64 bytes */
1587 MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_OFFSETS);
1588 if (byte_field & 0x20) /* 64-bytes eqe enabled */
1589 param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED;
1590 if (byte_field & 0x40) /* 64-bytes cqe enabled */
1591 param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED;
1593 /* TPT attributes */
1595 MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET);
1596 MLX4_GET(mw_enable, outbox, INIT_HCA_TPT_MW_OFFSET);
1597 param->mw_enable = (mw_enable & INIT_HCA_TPT_MW_ENABLE) ==
1598 INIT_HCA_TPT_MW_ENABLE;
1599 MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
1600 MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET);
1601 MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET);
1603 /* UAR attributes */
1605 MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
1606 MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
1609 mlx4_free_cmd_mailbox(dev, mailbox);
1614 /* for IB-type ports only in SRIOV mode. Checks that both proxy QP0
1615 * and real QP0 are active, so that the paravirtualized QP0 is ready
1617 static int check_qp0_state(struct mlx4_dev *dev, int function, int port)
1619 struct mlx4_priv *priv = mlx4_priv(dev);
1620 /* irrelevant if not infiniband */
1621 if (priv->mfunc.master.qp0_state[port].proxy_qp0_active &&
1622 priv->mfunc.master.qp0_state[port].qp0_active)
1627 int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
1628 struct mlx4_vhcr *vhcr,
1629 struct mlx4_cmd_mailbox *inbox,
1630 struct mlx4_cmd_mailbox *outbox,
1631 struct mlx4_cmd_info *cmd)
1633 struct mlx4_priv *priv = mlx4_priv(dev);
1634 int port = vhcr->in_modifier;
1637 if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port))
1640 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
1641 /* Enable port only if it was previously disabled */
1642 if (!priv->mfunc.master.init_port_ref[port]) {
1643 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
1644 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1648 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
1650 if (slave == mlx4_master_func_num(dev)) {
1651 if (check_qp0_state(dev, slave, port) &&
1652 !priv->mfunc.master.qp0_state[port].port_active) {
1653 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
1654 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1657 priv->mfunc.master.qp0_state[port].port_active = 1;
1658 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
1661 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
1663 ++priv->mfunc.master.init_port_ref[port];
1667 int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
1669 struct mlx4_cmd_mailbox *mailbox;
1675 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
1676 #define INIT_PORT_IN_SIZE 256
1677 #define INIT_PORT_FLAGS_OFFSET 0x00
1678 #define INIT_PORT_FLAG_SIG (1 << 18)
1679 #define INIT_PORT_FLAG_NG (1 << 17)
1680 #define INIT_PORT_FLAG_G0 (1 << 16)
1681 #define INIT_PORT_VL_SHIFT 4
1682 #define INIT_PORT_PORT_WIDTH_SHIFT 8
1683 #define INIT_PORT_MTU_OFFSET 0x04
1684 #define INIT_PORT_MAX_GID_OFFSET 0x06
1685 #define INIT_PORT_MAX_PKEY_OFFSET 0x0a
1686 #define INIT_PORT_GUID0_OFFSET 0x10
1687 #define INIT_PORT_NODE_GUID_OFFSET 0x18
1688 #define INIT_PORT_SI_GUID_OFFSET 0x20
1690 mailbox = mlx4_alloc_cmd_mailbox(dev);
1691 if (IS_ERR(mailbox))
1692 return PTR_ERR(mailbox);
1693 inbox = mailbox->buf;
1695 memset(inbox, 0, INIT_PORT_IN_SIZE);
1698 flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT;
1699 flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
1700 MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET);
1702 field = 128 << dev->caps.ib_mtu_cap[port];
1703 MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET);
1704 field = dev->caps.gid_table_len[port];
1705 MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET);
1706 field = dev->caps.pkey_table_len[port];
1707 MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET);
1709 err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT,
1710 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1712 mlx4_free_cmd_mailbox(dev, mailbox);
1714 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
1715 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
1719 EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
1721 int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
1722 struct mlx4_vhcr *vhcr,
1723 struct mlx4_cmd_mailbox *inbox,
1724 struct mlx4_cmd_mailbox *outbox,
1725 struct mlx4_cmd_info *cmd)
1727 struct mlx4_priv *priv = mlx4_priv(dev);
1728 int port = vhcr->in_modifier;
1731 if (!(priv->mfunc.master.slave_state[slave].init_port_mask &
1735 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
1736 if (priv->mfunc.master.init_port_ref[port] == 1) {
1737 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
1738 1000, MLX4_CMD_NATIVE);
1742 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
1744 /* infiniband port */
1745 if (slave == mlx4_master_func_num(dev)) {
1746 if (!priv->mfunc.master.qp0_state[port].qp0_active &&
1747 priv->mfunc.master.qp0_state[port].port_active) {
1748 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
1749 1000, MLX4_CMD_NATIVE);
1752 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
1753 priv->mfunc.master.qp0_state[port].port_active = 0;
1756 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
1758 --priv->mfunc.master.init_port_ref[port];
1762 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
1764 return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000,
1767 EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
1769 int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
1771 return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000,
1775 int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
1777 int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0,
1778 MLX4_CMD_SET_ICM_SIZE,
1779 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1784 * Round up number of system pages needed in case
1785 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
1787 *aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
1788 (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
1793 int mlx4_NOP(struct mlx4_dev *dev)
1795 /* Input modifier of 0x1f means "finish as soon as possible." */
1796 return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1799 int mlx4_query_diag_counters(struct mlx4_dev *dev, int array_length,
1800 u8 op_modifier, u32 in_offset[],
1803 struct mlx4_cmd_mailbox *mailbox;
1808 mailbox = mlx4_alloc_cmd_mailbox(dev);
1809 if (IS_ERR(mailbox))
1810 return PTR_ERR(mailbox);
1811 outbox = mailbox->buf;
1813 ret = mlx4_cmd_box(dev, 0, mailbox->dma, 0, op_modifier,
1814 MLX4_CMD_DIAG_RPRT, MLX4_CMD_TIME_CLASS_A,
1819 for (i = 0; i < array_length; i++) {
1820 if (in_offset[i] > MLX4_MAILBOX_SIZE) {
1825 MLX4_GET(counter_out[i], outbox, in_offset[i]);
1829 mlx4_free_cmd_mailbox(dev, mailbox);
1832 EXPORT_SYMBOL_GPL(mlx4_query_diag_counters);
1834 int mlx4_MOD_STAT_CFG_wrapper(struct mlx4_dev *dev, int slave,
1835 struct mlx4_vhcr *vhcr,
1836 struct mlx4_cmd_mailbox *inbox,
1837 struct mlx4_cmd_mailbox *outbox,
1838 struct mlx4_cmd_info *cmd)
1843 #define MLX4_WOL_SETUP_MODE (5 << 28)
1844 int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
1846 u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
1848 return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3,
1849 MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
1852 EXPORT_SYMBOL_GPL(mlx4_wol_read);
1854 int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
1856 u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
1858 return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG,
1859 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1861 EXPORT_SYMBOL_GPL(mlx4_wol_write);
1868 void mlx4_opreq_action(struct work_struct *work)
1870 struct mlx4_priv *priv = container_of(work, struct mlx4_priv, opreq_task);
1871 struct mlx4_dev *dev = &priv->dev;
1872 int num_tasks = atomic_read(&priv->opreq_count);
1873 struct mlx4_cmd_mailbox *mailbox;
1874 struct mlx4_mgm *mgm;
1887 #define GET_OP_REQ_MODIFIER_OFFSET 0x08
1888 #define GET_OP_REQ_TOKEN_OFFSET 0x14
1889 #define GET_OP_REQ_TYPE_OFFSET 0x1a
1890 #define GET_OP_REQ_DATA_OFFSET 0x20
1892 mailbox = mlx4_alloc_cmd_mailbox(dev);
1893 if (IS_ERR(mailbox)) {
1894 mlx4_err(dev, "Failed to allocate mailbox for GET_OP_REQ\n");
1897 outbox = mailbox->buf;
1900 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
1901 MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
1904 mlx4_err(dev, "Failed to retreive required operation: %d\n", err);
1907 MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET);
1908 MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
1909 MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET);
1910 type_m = type >> 12;
1915 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
1916 mlx4_warn(dev, "ADD MCG operation is not supported in "
1917 "DEVICE_MANAGED steerign mode\n");
1921 mgm = (struct mlx4_mgm *) ((u8 *) (outbox) + GET_OP_REQ_DATA_OFFSET);
1922 num_qps = be32_to_cpu(mgm->members_count) & MGM_QPN_MASK;
1923 rem_mcg = ((u8 *) (&mgm->members_count))[0] & 1;
1924 prot = ((u8 *) (&mgm->members_count))[0] >> 6;
1926 for (i = 0; i < num_qps; i++) {
1927 qp.qpn = be32_to_cpu(mgm->qp[i]);
1929 err = mlx4_multicast_detach(dev, &qp, mgm->gid, prot, 0);
1931 err = mlx4_multicast_attach(dev, &qp, mgm->gid, mgm->gid[5] ,0, prot, NULL);
1937 mlx4_warn(dev, "Bad type for required operation\n");
1941 err = mlx4_cmd(dev, 0, ((u32) err | cpu_to_be32(token) << 16), 1,
1942 MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
1945 mlx4_err(dev, "Failed to acknowledge required request: %d\n", err);
1948 memset(outbox, 0, 0xffc);
1949 num_tasks = atomic_dec_return(&priv->opreq_count);
1953 mlx4_free_cmd_mailbox(dev, mailbox);