2 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/init.h>
35 #include <linux/interrupt.h>
36 #include <linux/slab.h>
37 #include <linux/export.h>
39 #include <linux/dma-mapping.h>
41 #include <linux/mlx4/cmd.h>
47 MLX4_IRQNAME_SIZE = 32
51 MLX4_NUM_ASYNC_EQE = 0x100,
52 MLX4_NUM_SPARE_EQE = 0x80,
53 MLX4_EQ_ENTRY_SIZE = 0x20
56 #define MLX4_EQ_STATUS_OK ( 0 << 28)
57 #define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
58 #define MLX4_EQ_OWNER_SW ( 0 << 24)
59 #define MLX4_EQ_OWNER_HW ( 1 << 24)
60 #define MLX4_EQ_FLAG_EC ( 1 << 18)
61 #define MLX4_EQ_FLAG_OI ( 1 << 17)
62 #define MLX4_EQ_STATE_ARMED ( 9 << 8)
63 #define MLX4_EQ_STATE_FIRED (10 << 8)
64 #define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8)
66 #define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \
67 (1ull << MLX4_EVENT_TYPE_COMM_EST) | \
68 (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \
69 (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \
70 (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \
71 (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \
72 (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \
73 (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
74 (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \
75 (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \
76 (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \
77 (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \
78 (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
79 (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
80 (1ull << MLX4_EVENT_TYPE_CMD) | \
81 (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \
82 (1ull << MLX4_EVENT_TYPE_FLR_EVENT))
84 static void eq_set_ci(struct mlx4_eq *eq, int req_not)
86 __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) |
89 /* We still want ordering, just not swabbing, so add a barrier */
93 static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry)
95 unsigned long off = (entry & (eq->nent - 1)) * MLX4_EQ_ENTRY_SIZE;
96 return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
99 static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq)
101 struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index);
102 return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
105 static struct mlx4_eqe *next_slave_event_eqe(struct mlx4_slave_event_eq *slave_eq)
107 struct mlx4_eqe *eqe =
108 &slave_eq->event_eqe[slave_eq->cons & (SLAVE_EVENT_EQ_SIZE - 1)];
109 return (!!(eqe->owner & 0x80) ^
110 !!(slave_eq->cons & SLAVE_EVENT_EQ_SIZE)) ?
114 void mlx4_gen_slave_eqe(struct work_struct *work)
116 struct mlx4_mfunc_master_ctx *master =
117 container_of(work, struct mlx4_mfunc_master_ctx,
119 struct mlx4_mfunc *mfunc =
120 container_of(master, struct mlx4_mfunc, master);
121 struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc);
122 struct mlx4_dev *dev = &priv->dev;
123 struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq;
124 struct mlx4_eqe *eqe;
128 for (eqe = next_slave_event_eqe(slave_eq); eqe;
129 eqe = next_slave_event_eqe(slave_eq)) {
130 slave = eqe->slave_id;
132 /* All active slaves need to receive the event */
133 if (slave == ALL_SLAVES) {
134 for (i = 0; i < dev->num_slaves; i++) {
135 if (i != dev->caps.function &&
136 master->slave_state[i].active)
137 if (mlx4_GEN_EQE(dev, i, eqe))
138 mlx4_warn(dev, "Failed to "
140 "for slave %d\n", i);
143 if (mlx4_GEN_EQE(dev, slave, eqe))
144 mlx4_warn(dev, "Failed to generate event "
145 "for slave %d\n", slave);
152 static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
154 struct mlx4_priv *priv = mlx4_priv(dev);
155 struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq;
156 struct mlx4_eqe *s_eqe =
157 &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
159 if ((!!(s_eqe->owner & 0x80)) ^
160 (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
161 mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. "
162 "No free EQE on slave events queue\n", slave);
166 memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
167 s_eqe->slave_id = slave;
168 /* ensure all information is written before setting the ownersip bit */
170 s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80;
173 queue_work(priv->mfunc.master.comm_wq,
174 &priv->mfunc.master.slave_event_work);
177 static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
178 struct mlx4_eqe *eqe)
180 struct mlx4_priv *priv = mlx4_priv(dev);
181 struct mlx4_slave_state *s_slave =
182 &priv->mfunc.master.slave_state[slave];
184 if (!s_slave->active) {
185 /*mlx4_warn(dev, "Trying to pass event to inactive slave\n");*/
189 slave_event(dev, slave, eqe);
192 void mlx4_master_handle_slave_flr(struct work_struct *work)
194 struct mlx4_mfunc_master_ctx *master =
195 container_of(work, struct mlx4_mfunc_master_ctx,
196 slave_flr_event_work);
197 struct mlx4_mfunc *mfunc =
198 container_of(master, struct mlx4_mfunc, master);
199 struct mlx4_priv *priv =
200 container_of(mfunc, struct mlx4_priv, mfunc);
201 struct mlx4_dev *dev = &priv->dev;
202 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
206 mlx4_dbg(dev, "mlx4_handle_slave_flr\n");
208 for (i = 0 ; i < dev->num_slaves; i++) {
210 if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
211 mlx4_dbg(dev, "mlx4_handle_slave_flr: "
212 "clean slave: %d\n", i);
214 mlx4_delete_all_resources_for_slave(dev, i);
215 /*return the slave to running mode*/
216 spin_lock(&priv->mfunc.master.slave_state_lock);
217 slave_state[i].last_cmd = MLX4_COMM_CMD_RESET;
218 slave_state[i].is_slave_going_down = 0;
219 spin_unlock(&priv->mfunc.master.slave_state_lock);
221 err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
222 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
224 mlx4_warn(dev, "Failed to notify FW on "
225 "FLR done (slave:%d)\n", i);
230 static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
232 struct mlx4_priv *priv = mlx4_priv(dev);
233 struct mlx4_eqe *eqe;
241 u8 update_slave_state;
244 while ((eqe = next_eqe_sw(eq))) {
246 * Make sure we read EQ entry contents after we've
247 * checked the ownership bit.
252 case MLX4_EVENT_TYPE_COMP:
253 cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
254 mlx4_cq_completion(dev, cqn);
257 case MLX4_EVENT_TYPE_PATH_MIG:
258 case MLX4_EVENT_TYPE_COMM_EST:
259 case MLX4_EVENT_TYPE_SQ_DRAINED:
260 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
261 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
262 case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
263 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
264 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
265 mlx4_dbg(dev, "event %d arrived\n", eqe->type);
266 if (mlx4_is_master(dev)) {
267 /* forward only to slave owning the QP */
268 ret = mlx4_get_slave_from_resource_id(dev,
270 be32_to_cpu(eqe->event.qp.qpn)
272 if (ret && ret != -ENOENT) {
273 mlx4_dbg(dev, "QP event %02x(%02x) on "
274 "EQ %d at index %u: could "
275 "not get slave id (%d)\n",
276 eqe->type, eqe->subtype,
277 eq->eqn, eq->cons_index, ret);
281 if (!ret && slave != dev->caps.function) {
282 mlx4_slave_event(dev, slave, eqe);
287 mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) &
288 0xffffff, eqe->type);
291 case MLX4_EVENT_TYPE_SRQ_LIMIT:
292 mlx4_warn(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
294 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
295 if (mlx4_is_master(dev)) {
296 /* forward only to slave owning the SRQ */
297 ret = mlx4_get_slave_from_resource_id(dev,
299 be32_to_cpu(eqe->event.srq.srqn)
302 if (ret && ret != -ENOENT) {
303 mlx4_warn(dev, "SRQ event %02x(%02x) "
304 "on EQ %d at index %u: could"
305 " not get slave id (%d)\n",
306 eqe->type, eqe->subtype,
307 eq->eqn, eq->cons_index, ret);
310 mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x,"
311 " event: %02x(%02x)\n", __func__,
313 be32_to_cpu(eqe->event.srq.srqn),
314 eqe->type, eqe->subtype);
316 if (!ret && slave != dev->caps.function) {
317 mlx4_warn(dev, "%s: sending event "
318 "%02x(%02x) to slave:%d\n",
320 eqe->subtype, slave);
321 mlx4_slave_event(dev, slave, eqe);
325 mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) &
326 0xffffff, eqe->type);
329 case MLX4_EVENT_TYPE_CMD:
331 be16_to_cpu(eqe->event.cmd.token),
332 eqe->event.cmd.status,
333 be64_to_cpu(eqe->event.cmd.out_param));
336 case MLX4_EVENT_TYPE_PORT_CHANGE:
337 port = be32_to_cpu(eqe->event.port_change.port) >> 28;
338 if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
339 mlx4_dispatch_event(dev,
340 MLX4_DEV_EVENT_PORT_DOWN,
342 mlx4_priv(dev)->sense.do_sense_port[port] = 1;
343 if (mlx4_is_master(dev))
344 /*change the state of all slave's port
346 for (i = 0; i < dev->num_slaves; i++) {
347 mlx4_dbg(dev, "%s: Sending "
348 "MLX4_PORT_CHANGE_SUBTYPE_DOWN"
349 " to slave: %d, port:%d\n",
351 if (i == dev->caps.function)
353 mlx4_slave_event(dev, i, eqe);
356 mlx4_dispatch_event(dev,
357 MLX4_DEV_EVENT_PORT_UP,
359 mlx4_priv(dev)->sense.do_sense_port[port] = 0;
361 if (mlx4_is_master(dev)) {
362 for (i = 0; i < dev->num_slaves; i++) {
363 if (i == dev->caps.function)
365 mlx4_slave_event(dev, i, eqe);
371 case MLX4_EVENT_TYPE_CQ_ERROR:
372 mlx4_warn(dev, "CQ %s on CQN %06x\n",
373 eqe->event.cq_err.syndrome == 1 ?
374 "overrun" : "access violation",
375 be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
376 if (mlx4_is_master(dev)) {
377 ret = mlx4_get_slave_from_resource_id(dev,
379 be32_to_cpu(eqe->event.cq_err.cqn)
381 if (ret && ret != -ENOENT) {
382 mlx4_dbg(dev, "CQ event %02x(%02x) on "
383 "EQ %d at index %u: could "
384 "not get slave id (%d)\n",
385 eqe->type, eqe->subtype,
386 eq->eqn, eq->cons_index, ret);
390 if (!ret && slave != dev->caps.function) {
391 mlx4_slave_event(dev, slave, eqe);
396 be32_to_cpu(eqe->event.cq_err.cqn)
401 case MLX4_EVENT_TYPE_EQ_OVERFLOW:
402 mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
405 case MLX4_EVENT_TYPE_COMM_CHANNEL:
406 if (!mlx4_is_master(dev)) {
407 mlx4_warn(dev, "Received comm channel event "
408 "for non master device\n");
411 memcpy(&priv->mfunc.master.comm_arm_bit_vector,
412 eqe->event.comm_channel_arm.bit_vec,
413 sizeof eqe->event.comm_channel_arm.bit_vec);
414 queue_work(priv->mfunc.master.comm_wq,
415 &priv->mfunc.master.comm_work);
418 case MLX4_EVENT_TYPE_FLR_EVENT:
419 flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id);
420 if (!mlx4_is_master(dev)) {
421 mlx4_warn(dev, "Non-master function received"
426 mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave);
428 if (flr_slave > dev->num_slaves) {
430 "Got FLR for unknown function: %d\n",
432 update_slave_state = 0;
434 update_slave_state = 1;
436 spin_lock(&priv->mfunc.master.slave_state_lock);
437 if (update_slave_state) {
438 priv->mfunc.master.slave_state[flr_slave].active = false;
439 priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR;
440 priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1;
442 spin_unlock(&priv->mfunc.master.slave_state_lock);
443 queue_work(priv->mfunc.master.comm_wq,
444 &priv->mfunc.master.slave_flr_event_work);
446 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
447 case MLX4_EVENT_TYPE_ECC_DETECT:
449 mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at "
450 "index %u. owner=%x, nent=0x%x, slave=%x, "
452 eqe->type, eqe->subtype, eq->eqn,
453 eq->cons_index, eqe->owner, eq->nent,
455 !!(eqe->owner & 0x80) ^
456 !!(eq->cons_index & eq->nent) ? "HW" : "SW");
465 * The HCA will think the queue has overflowed if we
466 * don't tell it we've been processing events. We
467 * create our EQs with MLX4_NUM_SPARE_EQE extra
468 * entries, so we must update our consumer index at
471 if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) {
482 static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)
484 struct mlx4_dev *dev = dev_ptr;
485 struct mlx4_priv *priv = mlx4_priv(dev);
489 writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);
491 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
492 work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);
494 return IRQ_RETVAL(work);
497 static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
499 struct mlx4_eq *eq = eq_ptr;
500 struct mlx4_dev *dev = eq->dev;
502 mlx4_eq_int(dev, eq);
504 /* MSI-X vectors always belong to us */
508 int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
509 struct mlx4_vhcr *vhcr,
510 struct mlx4_cmd_mailbox *inbox,
511 struct mlx4_cmd_mailbox *outbox,
512 struct mlx4_cmd_info *cmd)
514 struct mlx4_priv *priv = mlx4_priv(dev);
515 struct mlx4_slave_event_eq_info *event_eq =
516 &priv->mfunc.master.slave_state[slave].event_eq;
517 u32 in_modifier = vhcr->in_modifier;
518 u32 eqn = in_modifier & 0x1FF;
519 u64 in_param = vhcr->in_param;
522 if (slave == dev->caps.function)
523 err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn,
524 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
527 if (in_modifier >> 31) {
529 event_eq->event_type &= ~in_param;
532 event_eq->event_type = in_param;
538 static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
541 return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num,
542 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
546 static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
549 return mlx4_cmd(dev, mailbox->dma | dev->caps.function, eq_num, 0,
550 MLX4_CMD_SW2HW_EQ, MLX4_CMD_TIME_CLASS_A,
554 static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
557 return mlx4_cmd_box(dev, dev->caps.function, mailbox->dma, eq_num,
558 0, MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A,
562 static int mlx4_num_eq_uar(struct mlx4_dev *dev)
565 * Each UAR holds 4 EQ doorbells. To figure out how many UARs
566 * we need to map, take the difference of highest index and
567 * the lowest index we'll use and add 1.
569 return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs +
570 dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1;
573 static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
575 struct mlx4_priv *priv = mlx4_priv(dev);
578 index = eq->eqn / 4 - dev->caps.reserved_eqs / 4;
580 if (!priv->eq_table.uar_map[index]) {
581 priv->eq_table.uar_map[index] =
582 ioremap(pci_resource_start(dev->pdev, 2) +
583 ((eq->eqn / 4) << PAGE_SHIFT),
585 if (!priv->eq_table.uar_map[index]) {
586 mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n",
592 return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4);
595 static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
596 u8 intr, struct mlx4_eq *eq)
598 struct mlx4_priv *priv = mlx4_priv(dev);
599 struct mlx4_cmd_mailbox *mailbox;
600 struct mlx4_eq_context *eq_context;
602 u64 *dma_list = NULL;
609 eq->nent = roundup_pow_of_two(max(nent, 2));
610 npages = PAGE_ALIGN(eq->nent * MLX4_EQ_ENTRY_SIZE) / PAGE_SIZE;
612 eq->page_list = kmalloc(npages * sizeof *eq->page_list,
617 for (i = 0; i < npages; ++i)
618 eq->page_list[i].buf = NULL;
620 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
624 mailbox = mlx4_alloc_cmd_mailbox(dev);
627 eq_context = mailbox->buf;
629 for (i = 0; i < npages; ++i) {
630 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
631 PAGE_SIZE, &t, GFP_KERNEL);
632 if (!eq->page_list[i].buf)
633 goto err_out_free_pages;
636 eq->page_list[i].map = t;
638 memset(eq->page_list[i].buf, 0, PAGE_SIZE);
641 eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap);
643 goto err_out_free_pages;
645 eq->doorbell = mlx4_get_eq_uar(dev, eq);
648 goto err_out_free_eq;
651 err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt);
653 goto err_out_free_eq;
655 err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list);
657 goto err_out_free_mtt;
659 memset(eq_context, 0, sizeof *eq_context);
660 eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK |
661 MLX4_EQ_STATE_ARMED);
662 eq_context->log_eq_size = ilog2(eq->nent);
663 eq_context->intr = intr;
664 eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT;
666 mtt_addr = mlx4_mtt_addr(dev, &eq->mtt);
667 eq_context->mtt_base_addr_h = mtt_addr >> 32;
668 eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
670 err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn);
672 mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err);
673 goto err_out_free_mtt;
677 mlx4_free_cmd_mailbox(dev, mailbox);
684 mlx4_mtt_cleanup(dev, &eq->mtt);
687 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
690 for (i = 0; i < npages; ++i)
691 if (eq->page_list[i].buf)
692 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
693 eq->page_list[i].buf,
694 eq->page_list[i].map);
696 mlx4_free_cmd_mailbox(dev, mailbox);
699 kfree(eq->page_list);
706 static void mlx4_free_eq(struct mlx4_dev *dev,
709 struct mlx4_priv *priv = mlx4_priv(dev);
710 struct mlx4_cmd_mailbox *mailbox;
712 int npages = PAGE_ALIGN(MLX4_EQ_ENTRY_SIZE * eq->nent) / PAGE_SIZE;
715 mailbox = mlx4_alloc_cmd_mailbox(dev);
719 err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn);
721 mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err);
724 mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
725 for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) {
727 pr_cont("[%02x] ", i * 4);
728 pr_cont(" %08x", be32_to_cpup(mailbox->buf + i * 4));
729 if ((i + 1) % 4 == 0)
734 mlx4_mtt_cleanup(dev, &eq->mtt);
735 for (i = 0; i < npages; ++i)
736 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
737 eq->page_list[i].buf,
738 eq->page_list[i].map);
740 kfree(eq->page_list);
741 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
742 mlx4_free_cmd_mailbox(dev, mailbox);
745 static void mlx4_free_irqs(struct mlx4_dev *dev)
747 struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
748 struct mlx4_priv *priv = mlx4_priv(dev);
751 if (eq_table->have_irq)
752 free_irq(dev->pdev->irq, dev);
754 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
755 if (eq_table->eq[i].have_irq) {
756 free_irq(eq_table->eq[i].irq, eq_table->eq + i);
757 eq_table->eq[i].have_irq = 0;
760 for (i = 0; i < dev->caps.comp_pool; i++) {
762 * Freeing the assigned irq's
763 * all bits should be 0, but we need to validate
765 if (priv->msix_ctl.pool_bm & 1ULL << i) {
766 /* NO need protecting*/
767 vec = dev->caps.num_comp_vectors + 1 + i;
768 free_irq(priv->eq_table.eq[vec].irq,
769 &priv->eq_table.eq[vec]);
774 kfree(eq_table->irq_names);
777 static int mlx4_map_clr_int(struct mlx4_dev *dev)
779 struct mlx4_priv *priv = mlx4_priv(dev);
781 priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) +
782 priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
783 if (!priv->clr_base) {
784 mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n");
791 static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
793 struct mlx4_priv *priv = mlx4_priv(dev);
795 iounmap(priv->clr_base);
798 int mlx4_alloc_eq_table(struct mlx4_dev *dev)
800 struct mlx4_priv *priv = mlx4_priv(dev);
802 priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs,
803 sizeof *priv->eq_table.eq, GFP_KERNEL);
804 if (!priv->eq_table.eq)
810 void mlx4_free_eq_table(struct mlx4_dev *dev)
812 kfree(mlx4_priv(dev)->eq_table.eq);
815 int mlx4_init_eq_table(struct mlx4_dev *dev)
817 struct mlx4_priv *priv = mlx4_priv(dev);
821 priv->eq_table.uar_map = kcalloc(sizeof *priv->eq_table.uar_map,
822 mlx4_num_eq_uar(dev), GFP_KERNEL);
823 if (!priv->eq_table.uar_map) {
828 err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs,
829 dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0);
833 for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
834 priv->eq_table.uar_map[i] = NULL;
836 if (!mlx4_is_slave(dev)) {
837 err = mlx4_map_clr_int(dev);
841 priv->eq_table.clr_mask =
842 swab32(1 << (priv->eq_table.inta_pin & 31));
843 priv->eq_table.clr_int = priv->clr_base +
844 (priv->eq_table.inta_pin < 32 ? 4 : 0);
847 priv->eq_table.irq_names =
848 kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 +
849 dev->caps.comp_pool),
851 if (!priv->eq_table.irq_names) {
856 for (i = 0; i < dev->caps.num_comp_vectors; ++i) {
857 err = mlx4_create_eq(dev, dev->caps.num_cqs -
858 dev->caps.reserved_cqs +
860 (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
861 &priv->eq_table.eq[i]);
868 err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
869 (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0,
870 &priv->eq_table.eq[dev->caps.num_comp_vectors]);
874 /*if additional completion vectors poolsize is 0 this loop will not run*/
875 for (i = dev->caps.num_comp_vectors + 1;
876 i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) {
878 err = mlx4_create_eq(dev, dev->caps.num_cqs -
879 dev->caps.reserved_cqs +
881 (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
882 &priv->eq_table.eq[i]);
890 if (dev->flags & MLX4_FLAG_MSI_X) {
893 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
894 if (i < dev->caps.num_comp_vectors) {
895 snprintf(priv->eq_table.irq_names +
896 i * MLX4_IRQNAME_SIZE,
898 "mlx4-comp-%d@pci:%s", i,
899 pci_name(dev->pdev));
901 snprintf(priv->eq_table.irq_names +
902 i * MLX4_IRQNAME_SIZE,
905 pci_name(dev->pdev));
908 eq_name = priv->eq_table.irq_names +
909 i * MLX4_IRQNAME_SIZE;
910 err = request_irq(priv->eq_table.eq[i].irq,
911 mlx4_msi_x_interrupt, 0, eq_name,
912 priv->eq_table.eq + i);
916 priv->eq_table.eq[i].have_irq = 1;
919 snprintf(priv->eq_table.irq_names,
922 pci_name(dev->pdev));
923 err = request_irq(dev->pdev->irq, mlx4_interrupt,
924 IRQF_SHARED, priv->eq_table.irq_names, dev);
928 priv->eq_table.have_irq = 1;
931 err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
932 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
934 mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
935 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err);
937 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
938 eq_set_ci(&priv->eq_table.eq[i], 1);
943 mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]);
946 i = dev->caps.num_comp_vectors - 1;
950 mlx4_free_eq(dev, &priv->eq_table.eq[i]);
953 if (!mlx4_is_slave(dev))
954 mlx4_unmap_clr_int(dev);
958 mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
961 kfree(priv->eq_table.uar_map);
966 void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
968 struct mlx4_priv *priv = mlx4_priv(dev);
971 mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1,
972 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
976 for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i)
977 mlx4_free_eq(dev, &priv->eq_table.eq[i]);
979 if (!mlx4_is_slave(dev))
980 mlx4_unmap_clr_int(dev);
982 for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
983 if (priv->eq_table.uar_map[i])
984 iounmap(priv->eq_table.uar_map[i]);
986 mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
988 kfree(priv->eq_table.uar_map);
991 /* A test that verifies that we can accept interrupts on all
992 * the irq vectors of the device.
993 * Interrupts are checked using the NOP command.
995 int mlx4_test_interrupts(struct mlx4_dev *dev)
997 struct mlx4_priv *priv = mlx4_priv(dev);
1001 err = mlx4_NOP(dev);
1002 /* When not in MSI_X, there is only one irq to check */
1003 if (!(dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(dev))
1006 /* A loop over all completion vectors, for each vector we will check
1007 * whether it works by mapping command completions to that vector
1008 * and performing a NOP command
1010 for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) {
1011 /* Temporary use polling for command completions */
1012 mlx4_cmd_use_polling(dev);
1014 /* Map the new eq to handle all asyncronous events */
1015 err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
1016 priv->eq_table.eq[i].eqn);
1018 mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
1019 mlx4_cmd_use_events(dev);
1023 /* Go back to using events */
1024 mlx4_cmd_use_events(dev);
1025 err = mlx4_NOP(dev);
1028 /* Return to default */
1029 mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
1030 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
1033 EXPORT_SYMBOL(mlx4_test_interrupts);
1035 int mlx4_assign_eq(struct mlx4_dev *dev, char* name, int * vector)
1038 struct mlx4_priv *priv = mlx4_priv(dev);
1039 int vec = 0, err = 0, i;
1041 spin_lock(&priv->msix_ctl.pool_lock);
1042 for (i = 0; !vec && i < dev->caps.comp_pool; i++) {
1043 if (~priv->msix_ctl.pool_bm & 1ULL << i) {
1044 priv->msix_ctl.pool_bm |= 1ULL << i;
1045 vec = dev->caps.num_comp_vectors + 1 + i;
1046 snprintf(priv->eq_table.irq_names +
1047 vec * MLX4_IRQNAME_SIZE,
1048 MLX4_IRQNAME_SIZE, "%s", name);
1049 err = request_irq(priv->eq_table.eq[vec].irq,
1050 mlx4_msi_x_interrupt, 0,
1051 &priv->eq_table.irq_names[vec<<5],
1052 priv->eq_table.eq + vec);
1054 /*zero out bit by fliping it*/
1055 priv->msix_ctl.pool_bm ^= 1 << i;
1058 /*we dont want to break here*/
1060 eq_set_ci(&priv->eq_table.eq[vec], 1);
1063 spin_unlock(&priv->msix_ctl.pool_lock);
1069 err = (i == dev->caps.comp_pool) ? -ENOSPC : err;
1073 EXPORT_SYMBOL(mlx4_assign_eq);
1075 void mlx4_release_eq(struct mlx4_dev *dev, int vec)
1077 struct mlx4_priv *priv = mlx4_priv(dev);
1079 int i = vec - dev->caps.num_comp_vectors - 1;
1081 if (likely(i >= 0)) {
1082 /*sanity check , making sure were not trying to free irq's
1083 Belonging to a legacy EQ*/
1084 spin_lock(&priv->msix_ctl.pool_lock);
1085 if (priv->msix_ctl.pool_bm & 1ULL << i) {
1086 free_irq(priv->eq_table.eq[vec].irq,
1087 &priv->eq_table.eq[vec]);
1088 priv->msix_ctl.pool_bm &= ~(1ULL << i);
1090 spin_unlock(&priv->msix_ctl.pool_lock);
1094 EXPORT_SYMBOL(mlx4_release_eq);