2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved.
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/hardirq.h>
38 #include <linux/module.h>
39 #include <linux/mlx4/cmd.h>
40 #include <linux/mlx4/cq.h>
45 #define MLX4_CQ_STATUS_OK ( 0 << 28)
46 #define MLX4_CQ_STATUS_OVERFLOW ( 9 << 28)
47 #define MLX4_CQ_STATUS_WRITE_FAIL (10 << 28)
48 #define MLX4_CQ_FLAG_CC ( 1 << 18)
49 #define MLX4_CQ_FLAG_OI ( 1 << 17)
50 #define MLX4_CQ_STATE_ARMED ( 9 << 8)
51 #define MLX4_CQ_STATE_ARMED_SOL ( 6 << 8)
52 #define MLX4_EQ_STATE_FIRED (10 << 8)
54 void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
56 struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
59 read_lock(&cq_table->cq_table_lock);
61 cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
62 cqn & (dev->caps.num_cqs - 1));
64 atomic_inc(&cq->refcount);
66 read_unlock(&cq_table->cq_table_lock);
69 mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
77 if (atomic_dec_and_test(&cq->refcount))
81 void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
83 struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
86 read_lock(&cq_table->cq_table_lock);
88 cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
90 atomic_inc(&cq->refcount);
92 read_unlock(&cq_table->cq_table_lock);
95 mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn);
99 cq->event(cq, event_type);
101 if (atomic_dec_and_test(&cq->refcount))
105 static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
108 return mlx4_cmd(dev, mailbox->dma, cq_num, 0,
109 MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A,
113 static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
114 int cq_num, u32 opmod)
116 return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ,
117 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
120 static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
123 return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0,
124 cq_num, mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
125 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
128 int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
129 u16 count, u16 period)
131 struct mlx4_cmd_mailbox *mailbox;
132 struct mlx4_cq_context *cq_context;
135 mailbox = mlx4_alloc_cmd_mailbox(dev);
137 return PTR_ERR(mailbox);
139 cq_context = mailbox->buf;
140 memset(cq_context, 0, sizeof *cq_context);
142 cq_context->cq_max_count = cpu_to_be16(count);
143 cq_context->cq_period = cpu_to_be16(period);
145 err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1);
147 mlx4_free_cmd_mailbox(dev, mailbox);
150 EXPORT_SYMBOL_GPL(mlx4_cq_modify);
152 int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
153 int entries, struct mlx4_mtt *mtt)
155 struct mlx4_cmd_mailbox *mailbox;
156 struct mlx4_cq_context *cq_context;
160 mailbox = mlx4_alloc_cmd_mailbox(dev);
162 return PTR_ERR(mailbox);
164 cq_context = mailbox->buf;
165 memset(cq_context, 0, sizeof *cq_context);
167 cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24);
168 cq_context->log_page_size = mtt->page_shift - 12;
169 mtt_addr = mlx4_mtt_addr(dev, mtt);
170 cq_context->mtt_base_addr_h = mtt_addr >> 32;
171 cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
173 err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 0);
175 mlx4_free_cmd_mailbox(dev, mailbox);
178 EXPORT_SYMBOL_GPL(mlx4_cq_resize);
180 int mlx4_cq_ignore_overrun(struct mlx4_dev *dev, struct mlx4_cq *cq)
182 struct mlx4_cmd_mailbox *mailbox;
183 struct mlx4_cq_context *cq_context;
186 mailbox = mlx4_alloc_cmd_mailbox(dev);
188 return PTR_ERR(mailbox);
190 cq_context = mailbox->buf;
191 memset(cq_context, 0, sizeof *cq_context);
193 cq_context->flags |= cpu_to_be32(MLX4_CQ_FLAG_OI);
195 err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 3);
197 mlx4_free_cmd_mailbox(dev, mailbox);
200 EXPORT_SYMBOL_GPL(mlx4_cq_ignore_overrun);
202 int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
204 struct mlx4_priv *priv = mlx4_priv(dev);
205 struct mlx4_cq_table *cq_table = &priv->cq_table;
208 *cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
212 err = mlx4_table_get(dev, &cq_table->table, *cqn);
216 err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn);
222 mlx4_table_put(dev, &cq_table->table, *cqn);
225 mlx4_bitmap_free(&cq_table->bitmap, *cqn, MLX4_NO_RR);
229 static int mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
234 if (mlx4_is_mfunc(dev)) {
235 err = mlx4_cmd_imm(dev, 0, &out_param, RES_CQ,
236 RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
237 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
241 *cqn = get_param_l(&out_param);
245 return __mlx4_cq_alloc_icm(dev, cqn);
248 void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
250 struct mlx4_priv *priv = mlx4_priv(dev);
251 struct mlx4_cq_table *cq_table = &priv->cq_table;
253 mlx4_table_put(dev, &cq_table->cmpt_table, cqn);
254 mlx4_table_put(dev, &cq_table->table, cqn);
255 mlx4_bitmap_free(&cq_table->bitmap, cqn, MLX4_NO_RR);
258 static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
263 if (mlx4_is_mfunc(dev)) {
264 set_param_l(&in_param, cqn);
265 err = mlx4_cmd(dev, in_param, RES_CQ, RES_OP_RESERVE_AND_MAP,
267 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
269 mlx4_warn(dev, "Failed freeing cq:%d\n", cqn);
271 __mlx4_cq_free_icm(dev, cqn);
274 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
275 struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec,
276 struct mlx4_cq *cq, unsigned vector, int collapsed,
279 struct mlx4_priv *priv = mlx4_priv(dev);
280 struct mlx4_cq_table *cq_table = &priv->cq_table;
281 struct mlx4_cmd_mailbox *mailbox;
282 struct mlx4_cq_context *cq_context;
286 if (vector > dev->caps.num_comp_vectors + dev->caps.comp_pool)
291 err = mlx4_cq_alloc_icm(dev, &cq->cqn);
295 spin_lock_irq(&cq_table->lock);
296 err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
297 spin_unlock_irq(&cq_table->lock);
301 mailbox = mlx4_alloc_cmd_mailbox(dev);
302 if (IS_ERR(mailbox)) {
303 err = PTR_ERR(mailbox);
307 cq_context = mailbox->buf;
308 memset(cq_context, 0, sizeof *cq_context);
310 cq_context->flags = cpu_to_be32(!!collapsed << 18);
312 cq_context->flags |= cpu_to_be32(1 << 19);
314 cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index);
315 cq_context->comp_eqn = priv->eq_table.eq[vector].eqn;
316 cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
318 mtt_addr = mlx4_mtt_addr(dev, mtt);
319 cq_context->mtt_base_addr_h = mtt_addr >> 32;
320 cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
321 cq_context->db_rec_addr = cpu_to_be64(db_rec);
323 err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn);
324 mlx4_free_cmd_mailbox(dev, mailbox);
331 atomic_set(&cq->refcount, 1);
332 init_completion(&cq->free);
334 cq->eqn = priv->eq_table.eq[cq->vector].eqn;
335 cq->irq = priv->eq_table.eq[cq->vector].irq;
340 spin_lock_irq(&cq_table->lock);
341 radix_tree_delete(&cq_table->tree, cq->cqn);
342 spin_unlock_irq(&cq_table->lock);
345 mlx4_cq_free_icm(dev, cq->cqn);
349 EXPORT_SYMBOL_GPL(mlx4_cq_alloc);
351 void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
353 struct mlx4_priv *priv = mlx4_priv(dev);
354 struct mlx4_cq_table *cq_table = &priv->cq_table;
357 err = mlx4_HW2SW_CQ(dev, NULL, cq->cqn);
359 mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
361 synchronize_irq(priv->eq_table.eq[cq->vector].irq);
363 spin_lock_irq(&cq_table->lock);
364 radix_tree_delete(&cq_table->tree, cq->cqn);
365 spin_unlock_irq(&cq_table->lock);
367 if (atomic_dec_and_test(&cq->refcount))
369 wait_for_completion(&cq->free);
371 mlx4_cq_free_icm(dev, cq->cqn);
373 EXPORT_SYMBOL_GPL(mlx4_cq_free);
375 int mlx4_init_cq_table(struct mlx4_dev *dev)
377 struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
380 spin_lock_init(&cq_table->lock);
381 rwlock_init(&cq_table->cq_table_lock);
382 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
383 if (mlx4_is_slave(dev))
386 err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
387 dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
394 void mlx4_cleanup_cq_table(struct mlx4_dev *dev)
396 if (mlx4_is_slave(dev))
398 /* Nothing to do to clean up radix_tree */
399 mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap);