2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/mlx4/cmd.h>
36 #include <linux/module.h>
42 MLX4_COMMAND_INTERFACE_MIN_REV = 2,
43 MLX4_COMMAND_INTERFACE_MAX_REV = 3,
44 MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS = 3,
47 extern void __buggy_use_of_MLX4_GET(void);
48 extern void __buggy_use_of_MLX4_PUT(void);
50 static bool enable_qos;
51 module_param(enable_qos, bool, 0444);
52 MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)");
54 #define MLX4_GET(dest, source, offset) \
56 void *__p = (char *) (source) + (offset); \
57 switch (sizeof (dest)) { \
58 case 1: (dest) = *(u8 *) __p; break; \
59 case 2: (dest) = be16_to_cpup(__p); break; \
60 case 4: (dest) = be32_to_cpup(__p); break; \
61 case 8: (dest) = be64_to_cpup(__p); break; \
62 default: __buggy_use_of_MLX4_GET(); \
66 #define MLX4_PUT(dest, source, offset) \
68 void *__d = ((char *) (dest) + (offset)); \
69 switch (sizeof(source)) { \
70 case 1: *(u8 *) __d = (source); break; \
71 case 2: *(__be16 *) __d = cpu_to_be16(source); break; \
72 case 4: *(__be32 *) __d = cpu_to_be32(source); break; \
73 case 8: *(__be64 *) __d = cpu_to_be64(source); break; \
74 default: __buggy_use_of_MLX4_PUT(); \
78 static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
80 static const char *fname[] = {
81 [ 0] = "RC transport",
82 [ 1] = "UC transport",
83 [ 2] = "UD transport",
84 [ 3] = "XRC transport",
85 [ 4] = "reliable multicast",
86 [ 5] = "FCoIB support",
88 [ 7] = "IPoIB checksum offload",
89 [ 8] = "P_Key violation counter",
90 [ 9] = "Q_Key violation counter",
93 [15] = "Big LSO headers",
96 [18] = "Atomic ops support",
97 [19] = "Raw multicast support",
98 [20] = "Address vector port checking support",
99 [21] = "UD multicast support",
100 [24] = "Demand paging support",
101 [25] = "Router support",
102 [30] = "IBoE support",
103 [32] = "Unicast loopback support",
104 [34] = "FCS header control",
105 [38] = "Wake On LAN support",
106 [40] = "UDP RSS support",
107 [41] = "Unicast VEP steering support",
108 [42] = "Multicast VEP steering support",
109 [48] = "Counters support",
110 [59] = "Port management change event support",
111 [60] = "eSwitch support",
112 [61] = "64 byte EQE support",
113 [62] = "64 byte CQE support",
117 mlx4_dbg(dev, "DEV_CAP flags:\n");
118 for (i = 0; i < ARRAY_SIZE(fname); ++i)
119 if (fname[i] && (flags & (1LL << i)))
120 mlx4_dbg(dev, " %s\n", fname[i]);
123 static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
125 static const char * const fname[] = {
127 [1] = "RSS Toeplitz Hash Function support",
128 [2] = "RSS XOR Hash Function support",
129 [3] = "Device manage flow steering support"
133 for (i = 0; i < ARRAY_SIZE(fname); ++i)
134 if (fname[i] && (flags & (1LL << i)))
135 mlx4_dbg(dev, " %s\n", fname[i]);
138 int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
140 struct mlx4_cmd_mailbox *mailbox;
144 #define MOD_STAT_CFG_IN_SIZE 0x100
146 #define MOD_STAT_CFG_PG_SZ_M_OFFSET 0x002
147 #define MOD_STAT_CFG_PG_SZ_OFFSET 0x003
149 mailbox = mlx4_alloc_cmd_mailbox(dev);
151 return PTR_ERR(mailbox);
152 inbox = mailbox->buf;
154 memset(inbox, 0, MOD_STAT_CFG_IN_SIZE);
156 MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET);
157 MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET);
159 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG,
160 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
162 mlx4_free_cmd_mailbox(dev, mailbox);
166 int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
167 struct mlx4_vhcr *vhcr,
168 struct mlx4_cmd_mailbox *inbox,
169 struct mlx4_cmd_mailbox *outbox,
170 struct mlx4_cmd_info *cmd)
172 struct mlx4_priv *priv = mlx4_priv(dev);
177 #define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0
178 #define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1
179 #define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4
180 #define QUERY_FUNC_CAP_FMR_OFFSET 0x8
181 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x10
182 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x14
183 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x18
184 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x20
185 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x24
186 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x28
187 #define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
188 #define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30
190 #define QUERY_FUNC_CAP_FMR_FLAG 0x80
191 #define QUERY_FUNC_CAP_FLAG_RDMA 0x40
192 #define QUERY_FUNC_CAP_FLAG_ETH 0x80
194 /* when opcode modifier = 1 */
195 #define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3
196 #define QUERY_FUNC_CAP_RDMA_PROPS_OFFSET 0x8
197 #define QUERY_FUNC_CAP_ETH_PROPS_OFFSET 0xc
199 #define QUERY_FUNC_CAP_QP0_TUNNEL 0x10
200 #define QUERY_FUNC_CAP_QP0_PROXY 0x14
201 #define QUERY_FUNC_CAP_QP1_TUNNEL 0x18
202 #define QUERY_FUNC_CAP_QP1_PROXY 0x1c
204 #define QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC 0x40
205 #define QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN 0x80
207 #define QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID 0x80
209 if (vhcr->op_modifier == 1) {
211 /* ensure force vlan and force mac bits are not set */
212 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
213 /* ensure that phy_wqe_gid bit is not set */
214 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET);
216 field = vhcr->in_modifier; /* phys-port = logical-port */
217 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
219 /* size is now the QP number */
220 size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + field - 1;
221 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL);
224 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_TUNNEL);
226 size = dev->phys_caps.base_proxy_sqpn + 8 * slave + field - 1;
227 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_PROXY);
230 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_PROXY);
232 } else if (vhcr->op_modifier == 0) {
233 /* enable rdma and ethernet interfaces */
234 field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA);
235 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
237 field = dev->caps.num_ports;
238 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
240 size = dev->caps.function_caps; /* set PF behaviours */
241 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
243 field = 0; /* protected FMR support not available as yet */
244 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET);
246 size = priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[slave];
247 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
249 size = priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[slave];
250 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
252 size = priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[slave];
253 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
255 size = dev->caps.num_eqs;
256 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
258 size = dev->caps.reserved_eqs;
259 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
261 size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave];
262 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
264 size = priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[slave];
265 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
267 size = dev->caps.num_mgms + dev->caps.num_amgms;
268 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
276 int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
277 struct mlx4_func_cap *func_cap)
279 struct mlx4_cmd_mailbox *mailbox;
281 u8 field, op_modifier;
285 op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */
287 mailbox = mlx4_alloc_cmd_mailbox(dev);
289 return PTR_ERR(mailbox);
291 err = mlx4_cmd_box(dev, 0, mailbox->dma, gen_or_port, op_modifier,
292 MLX4_CMD_QUERY_FUNC_CAP,
293 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
297 outbox = mailbox->buf;
300 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET);
301 if (!(field & (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA))) {
302 mlx4_err(dev, "The host supports neither eth nor rdma interfaces\n");
303 err = -EPROTONOSUPPORT;
306 func_cap->flags = field;
308 MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
309 func_cap->num_ports = field;
311 MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
312 func_cap->pf_context_behaviour = size;
314 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
315 func_cap->qp_quota = size & 0xFFFFFF;
317 MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
318 func_cap->srq_quota = size & 0xFFFFFF;
320 MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
321 func_cap->cq_quota = size & 0xFFFFFF;
323 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
324 func_cap->max_eq = size & 0xFFFFFF;
326 MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
327 func_cap->reserved_eq = size & 0xFFFFFF;
329 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
330 func_cap->mpt_quota = size & 0xFFFFFF;
332 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
333 func_cap->mtt_quota = size & 0xFFFFFF;
335 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
336 func_cap->mcg_quota = size & 0xFFFFFF;
340 /* logical port query */
341 if (gen_or_port > dev->caps.num_ports) {
346 if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) {
347 MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
348 if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN) {
349 mlx4_err(dev, "VLAN is enforced on this port\n");
350 err = -EPROTONOSUPPORT;
354 if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC) {
355 mlx4_err(dev, "Force mac is enabled on this port\n");
356 err = -EPROTONOSUPPORT;
359 } else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) {
360 MLX4_GET(field, outbox, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET);
361 if (field & QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID) {
362 mlx4_err(dev, "phy_wqe_gid is "
363 "enforced on this ib port\n");
364 err = -EPROTONOSUPPORT;
369 MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
370 func_cap->physical_port = field;
371 if (func_cap->physical_port != gen_or_port) {
376 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_TUNNEL);
377 func_cap->qp0_tunnel_qpn = size & 0xFFFFFF;
379 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_PROXY);
380 func_cap->qp0_proxy_qpn = size & 0xFFFFFF;
382 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_TUNNEL);
383 func_cap->qp1_tunnel_qpn = size & 0xFFFFFF;
385 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_PROXY);
386 func_cap->qp1_proxy_qpn = size & 0xFFFFFF;
388 /* All other resources are allocated by the master, but we still report
389 * 'num' and 'reserved' capabilities as follows:
390 * - num remains the maximum resource index
391 * - 'num - reserved' is the total available objects of a resource, but
392 * resource indices may be less than 'reserved'
393 * TODO: set per-resource quotas */
396 mlx4_free_cmd_mailbox(dev, mailbox);
401 int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
403 struct mlx4_cmd_mailbox *mailbox;
406 u32 field32, flags, ext_flags;
412 #define QUERY_DEV_CAP_OUT_SIZE 0x100
413 #define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10
414 #define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET 0x11
415 #define QUERY_DEV_CAP_RSVD_QP_OFFSET 0x12
416 #define QUERY_DEV_CAP_MAX_QP_OFFSET 0x13
417 #define QUERY_DEV_CAP_RSVD_SRQ_OFFSET 0x14
418 #define QUERY_DEV_CAP_MAX_SRQ_OFFSET 0x15
419 #define QUERY_DEV_CAP_RSVD_EEC_OFFSET 0x16
420 #define QUERY_DEV_CAP_MAX_EEC_OFFSET 0x17
421 #define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET 0x19
422 #define QUERY_DEV_CAP_RSVD_CQ_OFFSET 0x1a
423 #define QUERY_DEV_CAP_MAX_CQ_OFFSET 0x1b
424 #define QUERY_DEV_CAP_MAX_MPT_OFFSET 0x1d
425 #define QUERY_DEV_CAP_RSVD_EQ_OFFSET 0x1e
426 #define QUERY_DEV_CAP_MAX_EQ_OFFSET 0x1f
427 #define QUERY_DEV_CAP_RSVD_MTT_OFFSET 0x20
428 #define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21
429 #define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22
430 #define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23
431 #define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27
432 #define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29
433 #define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b
434 #define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d
435 #define QUERY_DEV_CAP_RSS_OFFSET 0x2e
436 #define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f
437 #define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33
438 #define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35
439 #define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36
440 #define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37
441 #define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38
442 #define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b
443 #define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c
444 #define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e
445 #define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
446 #define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40
447 #define QUERY_DEV_CAP_SYNC_QP_OFFSET 0x42
448 #define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
449 #define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
450 #define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
451 #define QUERY_DEV_CAP_PAGE_SZ_OFFSET 0x4b
452 #define QUERY_DEV_CAP_BF_OFFSET 0x4c
453 #define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET 0x4d
454 #define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET 0x4e
455 #define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET 0x4f
456 #define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET 0x51
457 #define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52
458 #define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55
459 #define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56
460 #define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61
461 #define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62
462 #define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63
463 #define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64
464 #define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65
465 #define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66
466 #define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67
467 #define QUERY_DEV_CAP_MAX_BASIC_COUNTERS_OFFSET 0x68
468 #define QUERY_DEV_CAP_MAX_EXTENDED_COUNTERS_OFFSET 0x6c
469 #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
470 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
471 #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
472 #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
473 #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
474 #define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET 0x86
475 #define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET 0x88
476 #define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET 0x8a
477 #define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET 0x8c
478 #define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e
479 #define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90
480 #define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92
481 #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
482 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
483 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
486 mailbox = mlx4_alloc_cmd_mailbox(dev);
488 return PTR_ERR(mailbox);
489 outbox = mailbox->buf;
491 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
492 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
496 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET);
497 dev_cap->reserved_qps = 1 << (field & 0xf);
498 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET);
499 dev_cap->max_qps = 1 << (field & 0x1f);
500 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_SRQ_OFFSET);
501 dev_cap->reserved_srqs = 1 << (field >> 4);
502 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_OFFSET);
503 dev_cap->max_srqs = 1 << (field & 0x1f);
504 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET);
505 dev_cap->max_cq_sz = 1 << field;
506 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_CQ_OFFSET);
507 dev_cap->reserved_cqs = 1 << (field & 0xf);
508 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_OFFSET);
509 dev_cap->max_cqs = 1 << (field & 0x1f);
510 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET);
511 dev_cap->max_mpts = 1 << (field & 0x3f);
512 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET);
513 dev_cap->reserved_eqs = field & 0xf;
514 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET);
515 dev_cap->max_eqs = 1 << (field & 0xf);
516 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET);
517 dev_cap->reserved_mtts = 1 << (field >> 4);
518 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET);
519 dev_cap->max_mrw_sz = 1 << field;
520 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET);
521 dev_cap->reserved_mrws = 1 << (field & 0xf);
522 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET);
523 dev_cap->max_mtt_seg = 1 << (field & 0x3f);
524 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET);
525 dev_cap->max_requester_per_qp = 1 << (field & 0x3f);
526 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET);
527 dev_cap->max_responder_per_qp = 1 << (field & 0x3f);
528 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET);
531 dev_cap->max_gso_sz = 0;
533 dev_cap->max_gso_sz = 1 << field;
535 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSS_OFFSET);
537 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_XOR;
539 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_TOP;
542 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS;
543 dev_cap->max_rss_tbl_sz = 1 << field;
545 dev_cap->max_rss_tbl_sz = 0;
546 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET);
547 dev_cap->max_rdma_global = 1 << (field & 0x3f);
548 MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET);
549 dev_cap->local_ca_ack_delay = field & 0x1f;
550 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
551 dev_cap->num_ports = field & 0xf;
552 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET);
553 dev_cap->max_msg_sz = 1 << (field & 0x1f);
554 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
556 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN;
557 dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f;
558 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET);
559 dev_cap->fs_max_num_qp_per_entry = field;
560 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
561 dev_cap->stat_rate_support = stat_rate;
562 MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
563 dev_cap->timestamp_support = field & 0x80;
564 MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
565 MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
566 dev_cap->flags = flags | (u64)ext_flags << 32;
567 MLX4_GET(field, outbox, QUERY_DEV_CAP_SYNC_QP_OFFSET);
568 dev_cap->sync_qp = field & 0x10;
569 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
570 dev_cap->reserved_uars = field >> 4;
571 MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
572 dev_cap->uar_size = 1 << ((field & 0x3f) + 20);
573 MLX4_GET(field, outbox, QUERY_DEV_CAP_PAGE_SZ_OFFSET);
574 dev_cap->min_page_sz = 1 << field;
576 MLX4_GET(field, outbox, QUERY_DEV_CAP_BF_OFFSET);
578 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
579 dev_cap->bf_reg_size = 1 << (field & 0x1f);
580 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
581 if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size))
583 dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
584 mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
585 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
587 dev_cap->bf_reg_size = 0;
588 mlx4_dbg(dev, "BlueFlame not available\n");
591 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET);
592 dev_cap->max_sq_sg = field;
593 MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET);
594 dev_cap->max_sq_desc_sz = size;
596 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET);
597 dev_cap->max_qp_per_mcg = 1 << field;
598 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET);
599 dev_cap->reserved_mgms = field & 0xf;
600 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MCG_OFFSET);
601 dev_cap->max_mcgs = 1 << field;
602 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_PD_OFFSET);
603 dev_cap->reserved_pds = field >> 4;
604 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET);
605 dev_cap->max_pds = 1 << (field & 0x3f);
606 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_XRC_OFFSET);
607 dev_cap->reserved_xrcds = field >> 4;
608 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_XRC_OFFSET);
609 dev_cap->max_xrcds = 1 << (field & 0x1f);
611 MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET);
612 dev_cap->rdmarc_entry_sz = size;
613 MLX4_GET(size, outbox, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET);
614 dev_cap->qpc_entry_sz = size;
615 MLX4_GET(size, outbox, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET);
616 dev_cap->aux_entry_sz = size;
617 MLX4_GET(size, outbox, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET);
618 dev_cap->altc_entry_sz = size;
619 MLX4_GET(size, outbox, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET);
620 dev_cap->eqc_entry_sz = size;
621 MLX4_GET(size, outbox, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET);
622 dev_cap->cqc_entry_sz = size;
623 MLX4_GET(size, outbox, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET);
624 dev_cap->srq_entry_sz = size;
625 MLX4_GET(size, outbox, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET);
626 dev_cap->cmpt_entry_sz = size;
627 MLX4_GET(size, outbox, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET);
628 dev_cap->mtt_entry_sz = size;
629 MLX4_GET(size, outbox, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET);
630 dev_cap->dmpt_entry_sz = size;
632 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET);
633 dev_cap->max_srq_sz = 1 << field;
634 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET);
635 dev_cap->max_qp_sz = 1 << field;
636 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSZ_SRQ_OFFSET);
637 dev_cap->resize_srq = field & 1;
638 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET);
639 dev_cap->max_rq_sg = field;
640 MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET);
641 dev_cap->max_rq_desc_sz = size;
643 MLX4_GET(dev_cap->bmme_flags, outbox,
644 QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
645 MLX4_GET(dev_cap->reserved_lkey, outbox,
646 QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
647 MLX4_GET(dev_cap->max_icm_sz, outbox,
648 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
649 if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
650 MLX4_GET(dev_cap->max_basic_counters, outbox,
651 QUERY_DEV_CAP_MAX_BASIC_COUNTERS_OFFSET);
652 /* FW reports 256 however real value is 255 */
653 dev_cap->max_basic_counters = min_t(u32, dev_cap->max_basic_counters, 255);
654 if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS_EXT)
655 MLX4_GET(dev_cap->max_extended_counters, outbox,
656 QUERY_DEV_CAP_MAX_EXTENDED_COUNTERS_OFFSET);
658 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
659 for (i = 1; i <= dev_cap->num_ports; ++i) {
660 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
661 dev_cap->max_vl[i] = field >> 4;
662 MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET);
663 dev_cap->ib_mtu[i] = field >> 4;
664 dev_cap->max_port_width[i] = field & 0xf;
665 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET);
666 dev_cap->max_gids[i] = 1 << (field & 0xf);
667 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET);
668 dev_cap->max_pkeys[i] = 1 << (field & 0xf);
671 #define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00
672 #define QUERY_PORT_MTU_OFFSET 0x01
673 #define QUERY_PORT_ETH_MTU_OFFSET 0x02
674 #define QUERY_PORT_WIDTH_OFFSET 0x06
675 #define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07
676 #define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a
677 #define QUERY_PORT_MAX_VL_OFFSET 0x0b
678 #define QUERY_PORT_MAC_OFFSET 0x10
679 #define QUERY_PORT_TRANS_VENDOR_OFFSET 0x18
680 #define QUERY_PORT_WAVELENGTH_OFFSET 0x1c
681 #define QUERY_PORT_TRANS_CODE_OFFSET 0x20
683 for (i = 1; i <= dev_cap->num_ports; ++i) {
684 err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
685 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
689 MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET);
690 dev_cap->supported_port_types[i] = field & 3;
691 dev_cap->suggested_type[i] = (field >> 3) & 1;
692 dev_cap->default_sense[i] = (field >> 4) & 1;
693 MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
694 dev_cap->ib_mtu[i] = field & 0xf;
695 MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
696 dev_cap->max_port_width[i] = field & 0xf;
697 MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET);
698 dev_cap->max_gids[i] = 1 << (field >> 4);
699 dev_cap->max_pkeys[i] = 1 << (field & 0xf);
700 MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
701 dev_cap->max_vl[i] = field & 0xf;
702 MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET);
703 dev_cap->log_max_macs[i] = field & 0xf;
704 dev_cap->log_max_vlans[i] = field >> 4;
705 MLX4_GET(dev_cap->eth_mtu[i], outbox, QUERY_PORT_ETH_MTU_OFFSET);
706 MLX4_GET(dev_cap->def_mac[i], outbox, QUERY_PORT_MAC_OFFSET);
707 MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET);
708 dev_cap->trans_type[i] = field32 >> 24;
709 dev_cap->vendor_oui[i] = field32 & 0xffffff;
710 MLX4_GET(dev_cap->wavelength[i], outbox, QUERY_PORT_WAVELENGTH_OFFSET);
711 MLX4_GET(dev_cap->trans_code[i], outbox, QUERY_PORT_TRANS_CODE_OFFSET);
715 mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
716 dev_cap->bmme_flags, dev_cap->reserved_lkey);
719 * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
720 * we can't use any EQs whose doorbell falls on that page,
721 * even if the EQ itself isn't reserved.
723 dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4,
724 dev_cap->reserved_eqs);
726 mlx4_dbg(dev, "Max ICM size %lld MB\n",
727 (unsigned long long) dev_cap->max_icm_sz >> 20);
728 mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
729 dev_cap->max_qps, dev_cap->reserved_qps, dev_cap->qpc_entry_sz);
730 mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
731 dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz);
732 mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
733 dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz);
734 mlx4_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n",
735 dev_cap->max_eqs, dev_cap->reserved_eqs, dev_cap->eqc_entry_sz);
736 mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n",
737 dev_cap->reserved_mrws, dev_cap->reserved_mtts);
738 mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
739 dev_cap->max_pds, dev_cap->reserved_pds, dev_cap->reserved_uars);
740 mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n",
741 dev_cap->max_pds, dev_cap->reserved_mgms);
742 mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
743 dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz);
744 mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
745 dev_cap->local_ca_ack_delay, 128 << dev_cap->ib_mtu[1],
746 dev_cap->max_port_width[1]);
747 mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n",
748 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
749 mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
750 dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg);
751 mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz);
752 mlx4_dbg(dev, "Max basic counters: %d\n", dev_cap->max_basic_counters);
753 mlx4_dbg(dev, "Max extended counters: %d\n", dev_cap->max_extended_counters);
754 mlx4_dbg(dev, "Max RSS Table size: %d\n", dev_cap->max_rss_tbl_sz);
756 dump_dev_cap_flags(dev, dev_cap->flags);
757 dump_dev_cap_flags2(dev, dev_cap->flags2);
760 mlx4_free_cmd_mailbox(dev, mailbox);
764 int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
765 struct mlx4_vhcr *vhcr,
766 struct mlx4_cmd_mailbox *inbox,
767 struct mlx4_cmd_mailbox *outbox,
768 struct mlx4_cmd_info *cmd)
774 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
775 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
779 /* add port mng change event capability unconditionally to slaves */
780 MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
781 flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV;
782 MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
784 /* For guests, report Blueflame disabled */
785 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
787 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
792 int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
793 struct mlx4_vhcr *vhcr,
794 struct mlx4_cmd_mailbox *inbox,
795 struct mlx4_cmd_mailbox *outbox,
796 struct mlx4_cmd_info *cmd)
798 struct mlx4_priv *priv = mlx4_priv(dev);
804 #define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0
805 #define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c
806 #define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e
808 err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0,
809 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
812 if (!err && dev->caps.function != slave) {
813 /* set slave default_mac address */
814 MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET);
815 def_mac += slave << 8;
816 /* if config MAC in DB use it */
817 if (priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac)
818 def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
819 MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
821 /* get port type - currently only eth is enabled */
822 MLX4_GET(port_type, outbox->buf,
823 QUERY_PORT_SUPPORTED_TYPE_OFFSET);
825 /* No link sensing allowed */
826 port_type &= MLX4_VF_PORT_NO_LINK_SENSE_MASK;
827 /* set port type to currently operating port type */
828 port_type |= (dev->caps.port_type[vhcr->in_modifier] & 0x3);
830 MLX4_PUT(outbox->buf, port_type,
831 QUERY_PORT_SUPPORTED_TYPE_OFFSET);
833 if (dev->caps.port_type[vhcr->in_modifier] == MLX4_PORT_TYPE_ETH)
834 short_field = mlx4_get_slave_num_gids(dev, slave);
836 short_field = 1; /* slave max gids */
837 MLX4_PUT(outbox->buf, short_field,
838 QUERY_PORT_CUR_MAX_GID_OFFSET);
840 short_field = dev->caps.pkey_table_len[vhcr->in_modifier];
841 MLX4_PUT(outbox->buf, short_field,
842 QUERY_PORT_CUR_MAX_PKEY_OFFSET);
848 int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port,
849 int *gid_tbl_len, int *pkey_tbl_len)
851 struct mlx4_cmd_mailbox *mailbox;
856 mailbox = mlx4_alloc_cmd_mailbox(dev);
858 return PTR_ERR(mailbox);
860 err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0,
861 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
866 outbox = mailbox->buf;
868 MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_GID_OFFSET);
869 *gid_tbl_len = field;
871 MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_PKEY_OFFSET);
872 *pkey_tbl_len = field;
875 mlx4_free_cmd_mailbox(dev, mailbox);
878 EXPORT_SYMBOL(mlx4_get_slave_pkey_gid_tbl_len);
880 int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
882 struct mlx4_cmd_mailbox *mailbox;
883 struct mlx4_icm_iter iter;
891 mailbox = mlx4_alloc_cmd_mailbox(dev);
893 return PTR_ERR(mailbox);
894 memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
895 pages = mailbox->buf;
897 for (mlx4_icm_first(icm, &iter);
898 !mlx4_icm_last(&iter);
899 mlx4_icm_next(&iter)) {
901 * We have to pass pages that are aligned to their
902 * size, so find the least significant 1 in the
903 * address or size and use that as our log2 size.
905 lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1;
906 if (lg < MLX4_ICM_PAGE_SHIFT) {
907 mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n",
909 (unsigned long long) mlx4_icm_addr(&iter),
910 mlx4_icm_size(&iter));
915 for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) {
917 pages[nent * 2] = cpu_to_be64(virt);
921 pages[nent * 2 + 1] =
922 cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) |
923 (lg - MLX4_ICM_PAGE_SHIFT));
924 ts += 1 << (lg - 10);
927 if (++nent == MLX4_MAILBOX_SIZE / 16) {
928 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
929 MLX4_CMD_TIME_CLASS_B,
939 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
940 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
945 case MLX4_CMD_MAP_FA:
946 mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts);
948 case MLX4_CMD_MAP_ICM_AUX:
949 mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts);
951 case MLX4_CMD_MAP_ICM:
952 mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n",
953 tc, ts, (unsigned long long) virt - (ts << 10));
958 mlx4_free_cmd_mailbox(dev, mailbox);
962 int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm)
964 return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1);
967 int mlx4_UNMAP_FA(struct mlx4_dev *dev)
969 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA,
970 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
974 int mlx4_RUN_FW(struct mlx4_dev *dev)
976 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW,
977 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
980 int mlx4_QUERY_FW(struct mlx4_dev *dev)
982 struct mlx4_fw *fw = &mlx4_priv(dev)->fw;
983 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
984 struct mlx4_cmd_mailbox *mailbox;
991 #define QUERY_FW_OUT_SIZE 0x100
992 #define QUERY_FW_VER_OFFSET 0x00
993 #define QUERY_FW_PPF_ID 0x09
994 #define QUERY_FW_CMD_IF_REV_OFFSET 0x0a
995 #define QUERY_FW_MAX_CMD_OFFSET 0x0f
996 #define QUERY_FW_ERR_START_OFFSET 0x30
997 #define QUERY_FW_ERR_SIZE_OFFSET 0x38
998 #define QUERY_FW_ERR_BAR_OFFSET 0x3c
1000 #define QUERY_FW_SIZE_OFFSET 0x00
1001 #define QUERY_FW_CLR_INT_BASE_OFFSET 0x20
1002 #define QUERY_FW_CLR_INT_BAR_OFFSET 0x28
1004 #define QUERY_FW_COMM_BASE_OFFSET 0x40
1005 #define QUERY_FW_COMM_BAR_OFFSET 0x48
1007 #define QUERY_FW_CLOCK_OFFSET 0x50
1008 #define QUERY_FW_CLOCK_BAR 0x58
1010 mailbox = mlx4_alloc_cmd_mailbox(dev);
1011 if (IS_ERR(mailbox))
1012 return PTR_ERR(mailbox);
1013 outbox = mailbox->buf;
1015 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
1016 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1020 MLX4_GET(fw_ver, outbox, QUERY_FW_VER_OFFSET);
1022 * FW subminor version is at more significant bits than minor
1023 * version, so swap here.
1025 dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) |
1026 ((fw_ver & 0xffff0000ull) >> 16) |
1027 ((fw_ver & 0x0000ffffull) << 16);
1029 MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
1030 dev->caps.function = lg;
1032 if (mlx4_is_slave(dev))
1036 MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
1037 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
1038 cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
1039 mlx4_err(dev, "Installed FW has unsupported "
1040 "command interface revision %d.\n",
1042 mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n",
1043 (int) (dev->caps.fw_ver >> 32),
1044 (int) (dev->caps.fw_ver >> 16) & 0xffff,
1045 (int) dev->caps.fw_ver & 0xffff);
1046 mlx4_err(dev, "This driver version supports only revisions %d to %d.\n",
1047 MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV);
1052 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS)
1053 dev->flags |= MLX4_FLAG_OLD_PORT_CMDS;
1055 MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
1056 cmd->max_cmds = 1 << lg;
1058 mlx4_dbg(dev, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n",
1059 (int) (dev->caps.fw_ver >> 32),
1060 (int) (dev->caps.fw_ver >> 16) & 0xffff,
1061 (int) dev->caps.fw_ver & 0xffff,
1062 cmd_if_rev, cmd->max_cmds);
1064 MLX4_GET(fw->catas_offset, outbox, QUERY_FW_ERR_START_OFFSET);
1065 MLX4_GET(fw->catas_size, outbox, QUERY_FW_ERR_SIZE_OFFSET);
1066 MLX4_GET(fw->catas_bar, outbox, QUERY_FW_ERR_BAR_OFFSET);
1067 fw->catas_bar = (fw->catas_bar >> 6) * 2;
1069 mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n",
1070 (unsigned long long) fw->catas_offset, fw->catas_size, fw->catas_bar);
1072 MLX4_GET(fw->fw_pages, outbox, QUERY_FW_SIZE_OFFSET);
1073 MLX4_GET(fw->clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET);
1074 MLX4_GET(fw->clr_int_bar, outbox, QUERY_FW_CLR_INT_BAR_OFFSET);
1075 fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2;
1077 MLX4_GET(fw->comm_base, outbox, QUERY_FW_COMM_BASE_OFFSET);
1078 MLX4_GET(fw->comm_bar, outbox, QUERY_FW_COMM_BAR_OFFSET);
1079 fw->comm_bar = (fw->comm_bar >> 6) * 2;
1080 mlx4_dbg(dev, "Communication vector bar:%d offset:0x%llx\n",
1081 fw->comm_bar, fw->comm_base);
1082 mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2);
1084 MLX4_GET(fw->clock_offset, outbox, QUERY_FW_CLOCK_OFFSET);
1085 MLX4_GET(fw->clock_bar, outbox, QUERY_FW_CLOCK_BAR);
1086 fw->clock_bar = (fw->clock_bar >> 6) * 2;
1087 mlx4_dbg(dev, "Internal clock bar:%d offset:0x%llx\n",
1088 fw->comm_bar, fw->comm_base);
1091 * Round up number of system pages needed in case
1092 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
1095 ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
1096 (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
1098 mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n",
1099 (unsigned long long) fw->clr_int_base, fw->clr_int_bar);
1102 mlx4_free_cmd_mailbox(dev, mailbox);
1106 int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
1107 struct mlx4_vhcr *vhcr,
1108 struct mlx4_cmd_mailbox *inbox,
1109 struct mlx4_cmd_mailbox *outbox,
1110 struct mlx4_cmd_info *cmd)
1115 outbuf = outbox->buf;
1116 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
1117 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1121 /* for slaves, set pci PPF ID to invalid and zero out everything
1122 * else except FW version */
1123 outbuf[0] = outbuf[1] = 0;
1124 memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8);
1125 outbuf[QUERY_FW_PPF_ID] = MLX4_INVALID_SLAVE_ID;
1130 static void get_board_id(void *vsd, char *board_id)
1134 #define VSD_OFFSET_SIG1 0x00
1135 #define VSD_OFFSET_SIG2 0xde
1136 #define VSD_OFFSET_MLX_BOARD_ID 0xd0
1137 #define VSD_OFFSET_TS_BOARD_ID 0x20
1139 #define VSD_SIGNATURE_TOPSPIN 0x5ad
1141 memset(board_id, 0, MLX4_BOARD_ID_LEN);
1143 if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
1144 be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
1145 strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN);
1148 * The board ID is a string but the firmware byte
1149 * swaps each 4-byte word before passing it back to
1150 * us. Therefore we need to swab it before printing.
1152 for (i = 0; i < 4; ++i)
1153 ((u32 *) board_id)[i] =
1154 swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4));
1158 int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
1160 struct mlx4_cmd_mailbox *mailbox;
1164 #define QUERY_ADAPTER_OUT_SIZE 0x100
1165 #define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10
1166 #define QUERY_ADAPTER_VSD_OFFSET 0x20
1168 mailbox = mlx4_alloc_cmd_mailbox(dev);
1169 if (IS_ERR(mailbox))
1170 return PTR_ERR(mailbox);
1171 outbox = mailbox->buf;
1173 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER,
1174 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1178 MLX4_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET);
1180 get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
1184 mlx4_free_cmd_mailbox(dev, mailbox);
1188 int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1190 struct mlx4_cmd_mailbox *mailbox;
1194 #define INIT_HCA_IN_SIZE 0x200
1195 #define INIT_HCA_VERSION_OFFSET 0x000
1196 #define INIT_HCA_VERSION 2
1197 #define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e
1198 #define INIT_HCA_FLAGS_OFFSET 0x014
1199 #define INIT_HCA_QPC_OFFSET 0x020
1200 #define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10)
1201 #define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17)
1202 #define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28)
1203 #define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f)
1204 #define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30)
1205 #define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37)
1206 #define INIT_HCA_EQE_CQE_OFFSETS (INIT_HCA_QPC_OFFSET + 0x38)
1207 #define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40)
1208 #define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50)
1209 #define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60)
1210 #define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67)
1211 #define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70)
1212 #define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77)
1213 #define INIT_HCA_MCAST_OFFSET 0x0c0
1214 #define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00)
1215 #define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
1216 #define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
1217 #define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18)
1218 #define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
1219 #define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6
1220 #define INIT_HCA_FS_PARAM_OFFSET 0x1d0
1221 #define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00)
1222 #define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12)
1223 #define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b)
1224 #define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21)
1225 #define INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22)
1226 #define INIT_HCA_FS_IB_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x25)
1227 #define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26)
1228 #define INIT_HCA_TPT_OFFSET 0x0f0
1229 #define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
1230 #define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b)
1231 #define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10)
1232 #define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18)
1233 #define INIT_HCA_UAR_OFFSET 0x120
1234 #define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a)
1235 #define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b)
1237 mailbox = mlx4_alloc_cmd_mailbox(dev);
1238 if (IS_ERR(mailbox))
1239 return PTR_ERR(mailbox);
1240 inbox = mailbox->buf;
1242 memset(inbox, 0, INIT_HCA_IN_SIZE);
1244 *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
1246 *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) =
1247 ((ilog2(CACHE_LINE_SIZE) - 4) << 5) | (1 << 4);
1249 #if defined(__LITTLE_ENDIAN)
1250 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
1251 #elif defined(__BIG_ENDIAN)
1252 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1);
1254 #error Host endianness not defined
1256 /* Check port for UD address vector: */
1257 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1);
1259 /* Enable IPoIB checksumming if we can: */
1260 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
1261 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3);
1263 /* Enable QoS support if module parameter set */
1265 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2);
1267 /* Enable fast drop performance optimization */
1268 if (dev->caps.fast_drop)
1269 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 7);
1271 /* enable counters */
1272 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)
1273 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4);
1275 /* CX3 is capable of extending CQEs\EQEs from 32 to 64 bytes */
1276 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_EQE) {
1277 *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 29);
1278 dev->caps.eqe_size = 64;
1279 dev->caps.eqe_factor = 1;
1281 dev->caps.eqe_size = 32;
1282 dev->caps.eqe_factor = 0;
1285 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_CQE) {
1286 *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 30);
1287 dev->caps.cqe_size = 64;
1288 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_64B_CQE;
1290 dev->caps.cqe_size = 32;
1293 /* QPC/EEC/CQC/EQC/RDMARC attributes */
1295 MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
1296 MLX4_PUT(inbox, param->log_num_qps, INIT_HCA_LOG_QP_OFFSET);
1297 MLX4_PUT(inbox, param->srqc_base, INIT_HCA_SRQC_BASE_OFFSET);
1298 MLX4_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET);
1299 MLX4_PUT(inbox, param->cqc_base, INIT_HCA_CQC_BASE_OFFSET);
1300 MLX4_PUT(inbox, param->log_num_cqs, INIT_HCA_LOG_CQ_OFFSET);
1301 MLX4_PUT(inbox, param->altc_base, INIT_HCA_ALTC_BASE_OFFSET);
1302 MLX4_PUT(inbox, param->auxc_base, INIT_HCA_AUXC_BASE_OFFSET);
1303 MLX4_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET);
1304 MLX4_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET);
1305 MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET);
1306 MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET);
1308 /* steering attributes */
1309 if (dev->caps.steering_mode ==
1310 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1311 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |=
1313 INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN);
1315 MLX4_PUT(inbox, param->mc_base, INIT_HCA_FS_BASE_OFFSET);
1316 MLX4_PUT(inbox, param->log_mc_entry_sz,
1317 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
1318 MLX4_PUT(inbox, param->log_mc_table_sz,
1319 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
1320 /* Enable Ethernet flow steering
1321 * with udp unicast and tcp unicast
1323 MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
1324 INIT_HCA_FS_ETH_BITS_OFFSET);
1325 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
1326 INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET);
1327 /* Enable IPoIB flow steering
1328 * with udp unicast and tcp unicast
1330 MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
1331 INIT_HCA_FS_IB_BITS_OFFSET);
1332 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
1333 INIT_HCA_FS_IB_NUM_ADDRS_OFFSET);
1335 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
1336 MLX4_PUT(inbox, param->log_mc_entry_sz,
1337 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
1338 MLX4_PUT(inbox, param->log_mc_hash_sz,
1339 INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
1340 MLX4_PUT(inbox, param->log_mc_table_sz,
1341 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
1342 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0) {
1343 MLX4_PUT(inbox, (u8) (1 << 3),
1344 INIT_HCA_UC_STEERING_OFFSET);
1348 /* TPT attributes */
1350 MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET);
1351 MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
1352 MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET);
1353 MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET);
1355 /* UAR attributes */
1357 MLX4_PUT(inbox, param->uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET);
1358 MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET);
1360 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000,
1364 mlx4_err(dev, "INIT_HCA returns %d\n", err);
1366 mlx4_free_cmd_mailbox(dev, mailbox);
1370 int mlx4_QUERY_HCA(struct mlx4_dev *dev,
1371 struct mlx4_init_hca_param *param)
1373 struct mlx4_cmd_mailbox *mailbox;
1379 #define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04
1380 #define QUERY_HCA_CORE_CLOCK_OFFSET 0x0c
1382 mailbox = mlx4_alloc_cmd_mailbox(dev);
1383 if (IS_ERR(mailbox))
1384 return PTR_ERR(mailbox);
1385 outbox = mailbox->buf;
1387 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
1389 MLX4_CMD_TIME_CLASS_B,
1390 !mlx4_is_slave(dev));
1394 MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET);
1395 MLX4_GET(param->hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET);
1397 /* QPC/EEC/CQC/EQC/RDMARC attributes */
1399 MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET);
1400 MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET);
1401 MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET);
1402 MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET);
1403 MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET);
1404 MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET);
1405 MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET);
1406 MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET);
1407 MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET);
1408 MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET);
1409 MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
1410 MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
1412 MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
1413 if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
1414 param->steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
1416 MLX4_GET(byte_field, outbox, INIT_HCA_UC_STEERING_OFFSET);
1417 if (byte_field & 0x8) {
1418 param->steering_mode = MLX4_STEERING_MODE_B0;
1421 param->steering_mode = MLX4_STEERING_MODE_A0;
1424 if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
1425 MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
1426 MLX4_GET(param->log_mc_entry_sz, outbox,
1427 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
1428 MLX4_GET(param->log_mc_table_sz, outbox,
1429 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
1431 MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
1432 MLX4_GET(param->log_mc_entry_sz, outbox,
1433 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
1434 MLX4_GET(param->log_mc_hash_sz, outbox,
1435 INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
1436 MLX4_GET(param->log_mc_table_sz, outbox,
1437 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
1440 /* CX3 is capable of extending CQEs\EQEs from 32 to 64 bytes */
1441 MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_OFFSETS);
1442 if (byte_field & 0x20) /* 64-bytes eqe enabled */
1443 param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED;
1444 if (byte_field & 0x40) /* 64-bytes cqe enabled */
1445 param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED;
1447 /* TPT attributes */
1449 MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET);
1450 MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
1451 MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET);
1452 MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET);
1454 /* UAR attributes */
1456 MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
1457 MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
1460 mlx4_free_cmd_mailbox(dev, mailbox);
1465 /* for IB-type ports only in SRIOV mode. Checks that both proxy QP0
1466 * and real QP0 are active, so that the paravirtualized QP0 is ready
1468 static int check_qp0_state(struct mlx4_dev *dev, int function, int port)
1470 struct mlx4_priv *priv = mlx4_priv(dev);
1471 /* irrelevant if not infiniband */
1472 if (priv->mfunc.master.qp0_state[port].proxy_qp0_active &&
1473 priv->mfunc.master.qp0_state[port].qp0_active)
1478 int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
1479 struct mlx4_vhcr *vhcr,
1480 struct mlx4_cmd_mailbox *inbox,
1481 struct mlx4_cmd_mailbox *outbox,
1482 struct mlx4_cmd_info *cmd)
1484 struct mlx4_priv *priv = mlx4_priv(dev);
1485 int port = vhcr->in_modifier;
1488 if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port))
1491 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
1492 /* Enable port only if it was previously disabled */
1493 if (!priv->mfunc.master.init_port_ref[port]) {
1494 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
1495 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1499 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
1501 if (slave == mlx4_master_func_num(dev)) {
1502 if (check_qp0_state(dev, slave, port) &&
1503 !priv->mfunc.master.qp0_state[port].port_active) {
1504 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
1505 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1508 priv->mfunc.master.qp0_state[port].port_active = 1;
1509 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
1512 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
1514 ++priv->mfunc.master.init_port_ref[port];
1518 int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
1520 struct mlx4_cmd_mailbox *mailbox;
1526 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
1527 #define INIT_PORT_IN_SIZE 256
1528 #define INIT_PORT_FLAGS_OFFSET 0x00
1529 #define INIT_PORT_FLAG_SIG (1 << 18)
1530 #define INIT_PORT_FLAG_NG (1 << 17)
1531 #define INIT_PORT_FLAG_G0 (1 << 16)
1532 #define INIT_PORT_VL_SHIFT 4
1533 #define INIT_PORT_PORT_WIDTH_SHIFT 8
1534 #define INIT_PORT_MTU_OFFSET 0x04
1535 #define INIT_PORT_MAX_GID_OFFSET 0x06
1536 #define INIT_PORT_MAX_PKEY_OFFSET 0x0a
1537 #define INIT_PORT_GUID0_OFFSET 0x10
1538 #define INIT_PORT_NODE_GUID_OFFSET 0x18
1539 #define INIT_PORT_SI_GUID_OFFSET 0x20
1541 mailbox = mlx4_alloc_cmd_mailbox(dev);
1542 if (IS_ERR(mailbox))
1543 return PTR_ERR(mailbox);
1544 inbox = mailbox->buf;
1546 memset(inbox, 0, INIT_PORT_IN_SIZE);
1549 flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT;
1550 flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
1551 MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET);
1553 field = 128 << dev->caps.ib_mtu_cap[port];
1554 MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET);
1555 field = dev->caps.gid_table_len[port];
1556 MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET);
1557 field = dev->caps.pkey_table_len[port];
1558 MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET);
1560 err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT,
1561 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1563 mlx4_free_cmd_mailbox(dev, mailbox);
1565 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
1566 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
1570 EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
1572 int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
1573 struct mlx4_vhcr *vhcr,
1574 struct mlx4_cmd_mailbox *inbox,
1575 struct mlx4_cmd_mailbox *outbox,
1576 struct mlx4_cmd_info *cmd)
1578 struct mlx4_priv *priv = mlx4_priv(dev);
1579 int port = vhcr->in_modifier;
1582 if (!(priv->mfunc.master.slave_state[slave].init_port_mask &
1586 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
1587 if (priv->mfunc.master.init_port_ref[port] == 1) {
1588 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
1589 1000, MLX4_CMD_NATIVE);
1593 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
1595 /* infiniband port */
1596 if (slave == mlx4_master_func_num(dev)) {
1597 if (!priv->mfunc.master.qp0_state[port].qp0_active &&
1598 priv->mfunc.master.qp0_state[port].port_active) {
1599 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
1600 1000, MLX4_CMD_NATIVE);
1603 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
1604 priv->mfunc.master.qp0_state[port].port_active = 0;
1607 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
1609 --priv->mfunc.master.init_port_ref[port];
1613 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
1615 return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000,
1618 EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
1620 int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
1622 return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000,
1626 int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
1628 int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0,
1629 MLX4_CMD_SET_ICM_SIZE,
1630 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1635 * Round up number of system pages needed in case
1636 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
1638 *aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
1639 (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
1644 int mlx4_NOP(struct mlx4_dev *dev)
1646 /* Input modifier of 0x1f means "finish as soon as possible." */
1647 return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1650 int mlx4_query_diag_counters(struct mlx4_dev *dev, int array_length,
1651 u8 op_modifier, u32 in_offset[],
1654 struct mlx4_cmd_mailbox *mailbox;
1659 mailbox = mlx4_alloc_cmd_mailbox(dev);
1660 if (IS_ERR(mailbox))
1661 return PTR_ERR(mailbox);
1662 outbox = mailbox->buf;
1664 ret = mlx4_cmd_box(dev, 0, mailbox->dma, 0, op_modifier,
1665 MLX4_CMD_DIAG_RPRT, MLX4_CMD_TIME_CLASS_A,
1670 for (i = 0; i < array_length; i++) {
1671 if (in_offset[i] > MLX4_MAILBOX_SIZE) {
1676 MLX4_GET(counter_out[i], outbox, in_offset[i]);
1680 mlx4_free_cmd_mailbox(dev, mailbox);
1683 EXPORT_SYMBOL_GPL(mlx4_query_diag_counters);
1685 #define MLX4_WOL_SETUP_MODE (5 << 28)
1686 int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
1688 u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
1690 return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3,
1691 MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
1694 EXPORT_SYMBOL_GPL(mlx4_wol_read);
1696 int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
1698 u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
1700 return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG,
1701 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1703 EXPORT_SYMBOL_GPL(mlx4_wol_write);
1710 void mlx4_opreq_action(struct work_struct *work)
1712 struct mlx4_priv *priv = container_of(work, struct mlx4_priv, opreq_task);
1713 struct mlx4_dev *dev = &priv->dev;
1714 int num_tasks = atomic_read(&priv->opreq_count);
1715 struct mlx4_cmd_mailbox *mailbox;
1716 struct mlx4_mgm *mgm;
1729 #define GET_OP_REQ_MODIFIER_OFFSET 0x08
1730 #define GET_OP_REQ_TOKEN_OFFSET 0x14
1731 #define GET_OP_REQ_TYPE_OFFSET 0x1a
1732 #define GET_OP_REQ_DATA_OFFSET 0x20
1734 mailbox = mlx4_alloc_cmd_mailbox(dev);
1735 if (IS_ERR(mailbox)) {
1736 mlx4_err(dev, "Failed to allocate mailbox for GET_OP_REQ\n");
1739 outbox = mailbox->buf;
1742 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
1743 MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
1746 mlx4_err(dev, "Failed to retreive required operation: %d\n", err);
1749 MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET);
1750 MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
1751 MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET);
1752 type_m = type >> 12;
1757 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
1758 mlx4_warn(dev, "ADD MCG operation is not supported in "
1759 "DEVICE_MANAGED steerign mode\n");
1763 mgm = (struct mlx4_mgm *) ((u8 *) (outbox) + GET_OP_REQ_DATA_OFFSET);
1764 num_qps = be32_to_cpu(mgm->members_count) & MGM_QPN_MASK;
1765 rem_mcg = ((u8 *) (&mgm->members_count))[0] & 1;
1766 prot = ((u8 *) (&mgm->members_count))[0] >> 6;
1768 for (i = 0; i < num_qps; i++) {
1769 qp.qpn = be32_to_cpu(mgm->qp[i]);
1771 err = mlx4_multicast_detach(dev, &qp, mgm->gid, prot, 0);
1773 err = mlx4_multicast_attach(dev, &qp, mgm->gid, mgm->gid[5] ,0, prot, NULL);
1779 mlx4_warn(dev, "Bad type for required operation\n");
1783 err = mlx4_cmd(dev, 0, ((u32) err | cpu_to_be32(token) << 16), 1,
1784 MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
1787 mlx4_err(dev, "Failed to acknowledge required request: %d\n", err);
1790 memset(outbox, 0, 0xffc);
1791 num_tasks = atomic_dec_return(&priv->opreq_count);
1795 mlx4_free_cmd_mailbox(dev, mailbox);