2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/export.h>
38 #include <linux/pci.h>
39 #include <linux/errno.h>
41 #include <linux/mlx4/cmd.h>
42 #include <linux/semaphore.h>
49 #define CMD_POLL_TOKEN 0xffff
50 #define INBOX_MASK 0xffffffffffffff00ULL
52 #define CMD_CHAN_VER 1
53 #define CMD_CHAN_IF_REV 1
56 /* command completed successfully: */
58 /* Internal error (such as a bus error) occurred while processing command: */
59 CMD_STAT_INTERNAL_ERR = 0x01,
60 /* Operation/command not supported or opcode modifier not supported: */
61 CMD_STAT_BAD_OP = 0x02,
62 /* Parameter not supported or parameter out of range: */
63 CMD_STAT_BAD_PARAM = 0x03,
64 /* System not enabled or bad system state: */
65 CMD_STAT_BAD_SYS_STATE = 0x04,
66 /* Attempt to access reserved or unallocaterd resource: */
67 CMD_STAT_BAD_RESOURCE = 0x05,
68 /* Requested resource is currently executing a command, or is otherwise busy: */
69 CMD_STAT_RESOURCE_BUSY = 0x06,
70 /* Required capability exceeds device limits: */
71 CMD_STAT_EXCEED_LIM = 0x08,
72 /* Resource is not in the appropriate state or ownership: */
73 CMD_STAT_BAD_RES_STATE = 0x09,
74 /* Index out of range: */
75 CMD_STAT_BAD_INDEX = 0x0a,
76 /* FW image corrupted: */
77 CMD_STAT_BAD_NVMEM = 0x0b,
78 /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
79 CMD_STAT_ICM_ERROR = 0x0c,
80 /* Attempt to modify a QP/EE which is not in the presumed state: */
81 CMD_STAT_BAD_QP_STATE = 0x10,
82 /* Bad segment parameters (Address/Size): */
83 CMD_STAT_BAD_SEG_PARAM = 0x20,
84 /* Memory Region has Memory Windows bound to: */
85 CMD_STAT_REG_BOUND = 0x21,
86 /* HCA local attached memory not present: */
87 CMD_STAT_LAM_NOT_PRE = 0x22,
88 /* Bad management packet (silently discarded): */
89 CMD_STAT_BAD_PKT = 0x30,
90 /* More outstanding CQEs in CQ than new CQ size: */
91 CMD_STAT_BAD_SIZE = 0x40,
92 /* Multi Function device support required: */
93 CMD_STAT_MULTI_FUNC_REQ = 0x50,
97 HCR_IN_PARAM_OFFSET = 0x00,
98 HCR_IN_MODIFIER_OFFSET = 0x08,
99 HCR_OUT_PARAM_OFFSET = 0x0c,
100 HCR_TOKEN_OFFSET = 0x14,
101 HCR_STATUS_OFFSET = 0x18,
103 HCR_OPMOD_SHIFT = 12,
110 GO_BIT_TIMEOUT_MSECS = 10000
113 struct mlx4_cmd_context {
114 struct completion done;
122 static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
123 struct mlx4_vhcr_cmd *in_vhcr);
125 static int mlx4_status_to_errno(u8 status)
127 static const int trans_table[] = {
128 [CMD_STAT_INTERNAL_ERR] = -EIO,
129 [CMD_STAT_BAD_OP] = -EPERM,
130 [CMD_STAT_BAD_PARAM] = -EINVAL,
131 [CMD_STAT_BAD_SYS_STATE] = -ENXIO,
132 [CMD_STAT_BAD_RESOURCE] = -EBADF,
133 [CMD_STAT_RESOURCE_BUSY] = -EBUSY,
134 [CMD_STAT_EXCEED_LIM] = -ENOMEM,
135 [CMD_STAT_BAD_RES_STATE] = -EBADF,
136 [CMD_STAT_BAD_INDEX] = -EBADF,
137 [CMD_STAT_BAD_NVMEM] = -EFAULT,
138 [CMD_STAT_ICM_ERROR] = -ENFILE,
139 [CMD_STAT_BAD_QP_STATE] = -EINVAL,
140 [CMD_STAT_BAD_SEG_PARAM] = -EFAULT,
141 [CMD_STAT_REG_BOUND] = -EBUSY,
142 [CMD_STAT_LAM_NOT_PRE] = -EAGAIN,
143 [CMD_STAT_BAD_PKT] = -EINVAL,
144 [CMD_STAT_BAD_SIZE] = -ENOMEM,
145 [CMD_STAT_MULTI_FUNC_REQ] = -EACCES,
148 if (status >= ARRAY_SIZE(trans_table) ||
149 (status != CMD_STAT_OK && trans_table[status] == 0))
152 return trans_table[status];
155 static u8 mlx4_errno_to_status(int errno)
159 return CMD_STAT_BAD_OP;
161 return CMD_STAT_BAD_PARAM;
163 return CMD_STAT_BAD_SYS_STATE;
165 return CMD_STAT_RESOURCE_BUSY;
167 return CMD_STAT_EXCEED_LIM;
169 return CMD_STAT_ICM_ERROR;
171 return CMD_STAT_INTERNAL_ERR;
175 static int comm_pending(struct mlx4_dev *dev)
177 struct mlx4_priv *priv = mlx4_priv(dev);
178 u32 status = readl(&priv->mfunc.comm->slave_read);
180 return (swab32(status) >> 31) != priv->cmd.comm_toggle;
183 static void mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
185 struct mlx4_priv *priv = mlx4_priv(dev);
188 priv->cmd.comm_toggle ^= 1;
189 val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
190 __raw_writel((__force u32) cpu_to_be32(val),
191 &priv->mfunc.comm->slave_write);
195 static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
196 unsigned long timeout)
198 struct mlx4_priv *priv = mlx4_priv(dev);
201 int ret_from_pending = 0;
203 /* First, verify that the master reports correct status */
204 if (comm_pending(dev)) {
205 mlx4_warn(dev, "Communication channel is not idle."
206 "my toggle is %d (cmd:0x%x)\n",
207 priv->cmd.comm_toggle, cmd);
212 down(&priv->cmd.poll_sem);
213 mlx4_comm_cmd_post(dev, cmd, param);
215 end = msecs_to_jiffies(timeout) + jiffies;
216 while (comm_pending(dev) && time_before(jiffies, end))
218 ret_from_pending = comm_pending(dev);
219 if (ret_from_pending) {
220 /* check if the slave is trying to boot in the middle of
221 * FLR process. The only non-zero result in the RESET command
222 * is MLX4_DELAY_RESET_SLAVE*/
223 if ((MLX4_COMM_CMD_RESET == cmd)) {
224 mlx4_warn(dev, "Got slave FLRed from Communication"
225 " channel (ret:0x%x)\n", ret_from_pending);
226 err = MLX4_DELAY_RESET_SLAVE;
228 mlx4_warn(dev, "Communication channel timed out\n");
233 up(&priv->cmd.poll_sem);
237 static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op,
238 u16 param, unsigned long timeout)
240 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
241 struct mlx4_cmd_context *context;
245 down(&cmd->event_sem);
247 spin_lock(&cmd->context_lock);
248 BUG_ON(cmd->free_head < 0);
249 context = &cmd->context[cmd->free_head];
250 context->token += cmd->token_mask + 1;
251 cmd->free_head = context->next;
252 spin_unlock(&cmd->context_lock);
254 init_completion(&context->done);
256 mlx4_comm_cmd_post(dev, op, param);
258 if (!wait_for_completion_timeout(&context->done,
259 msecs_to_jiffies(timeout))) {
264 err = context->result;
265 if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
266 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
267 op, context->fw_status);
272 /* wait for comm channel ready
273 * this is necessary for prevention the race
274 * when switching between event to polling mode
276 end = msecs_to_jiffies(timeout) + jiffies;
277 while (comm_pending(dev) && time_before(jiffies, end))
280 spin_lock(&cmd->context_lock);
281 context->next = cmd->free_head;
282 cmd->free_head = context - cmd->context;
283 spin_unlock(&cmd->context_lock);
289 int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
290 unsigned long timeout)
292 if (mlx4_priv(dev)->cmd.use_events)
293 return mlx4_comm_cmd_wait(dev, cmd, param, timeout);
294 return mlx4_comm_cmd_poll(dev, cmd, param, timeout);
297 static int cmd_pending(struct mlx4_dev *dev)
301 if (pci_channel_offline(dev->pdev))
304 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
306 return (status & swab32(1 << HCR_GO_BIT)) ||
307 (mlx4_priv(dev)->cmd.toggle ==
308 !!(status & swab32(1 << HCR_T_BIT)));
311 static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
312 u32 in_modifier, u8 op_modifier, u16 op, u16 token,
315 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
316 u32 __iomem *hcr = cmd->hcr;
320 mutex_lock(&cmd->hcr_mutex);
322 if (pci_channel_offline(dev->pdev)) {
324 * Device is going through error recovery
325 * and cannot accept commands.
333 end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
335 while (cmd_pending(dev)) {
336 if (pci_channel_offline(dev->pdev)) {
338 * Device is going through error recovery
339 * and cannot accept commands.
345 if (time_after_eq(jiffies, end)) {
346 mlx4_err(dev, "%s:cmd_pending failed\n", __func__);
353 * We use writel (instead of something like memcpy_toio)
354 * because writes of less than 32 bits to the HCR don't work
355 * (and some architectures such as ia64 implement memcpy_toio
356 * in terms of writeb).
358 __raw_writel((__force u32) cpu_to_be32(in_param >> 32), hcr + 0);
359 __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), hcr + 1);
360 __raw_writel((__force u32) cpu_to_be32(in_modifier), hcr + 2);
361 __raw_writel((__force u32) cpu_to_be32(out_param >> 32), hcr + 3);
362 __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4);
363 __raw_writel((__force u32) cpu_to_be32(token << 16), hcr + 5);
365 /* __raw_writel may not order writes. */
368 __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) |
369 (cmd->toggle << HCR_T_BIT) |
370 (event ? (1 << HCR_E_BIT) : 0) |
371 (op_modifier << HCR_OPMOD_SHIFT) |
375 * Make sure that our HCR writes don't get mixed in with
376 * writes from another CPU starting a FW command.
380 cmd->toggle = cmd->toggle ^ 1;
385 mutex_unlock(&cmd->hcr_mutex);
389 static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
390 int out_is_imm, u32 in_modifier, u8 op_modifier,
391 u16 op, unsigned long timeout)
393 struct mlx4_priv *priv = mlx4_priv(dev);
394 struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr;
397 down(&priv->cmd.slave_sem);
398 vhcr->in_param = cpu_to_be64(in_param);
399 vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0;
400 vhcr->in_modifier = cpu_to_be32(in_modifier);
401 vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff));
402 vhcr->token = cpu_to_be16(CMD_POLL_TOKEN);
404 vhcr->flags = !!(priv->cmd.use_events) << 6;
405 if (mlx4_is_master(dev)) {
406 ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr);
411 be64_to_cpu(vhcr->out_param);
413 mlx4_err(dev, "response expected while"
414 "output mailbox is NULL for "
415 "command 0x%x\n", op);
416 vhcr->status = CMD_STAT_BAD_PARAM;
419 ret = mlx4_status_to_errno(vhcr->status);
422 ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0,
423 MLX4_COMM_TIME + timeout);
428 be64_to_cpu(vhcr->out_param);
430 mlx4_err(dev, "response expected while"
431 "output mailbox is NULL for "
432 "command 0x%x\n", op);
433 vhcr->status = CMD_STAT_BAD_PARAM;
436 ret = mlx4_status_to_errno(vhcr->status);
438 mlx4_err(dev, "failed execution of VHCR_POST command"
439 "opcode 0x%x\n", op);
441 up(&priv->cmd.slave_sem);
445 static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
446 int out_is_imm, u32 in_modifier, u8 op_modifier,
447 u16 op, unsigned long timeout)
449 struct mlx4_priv *priv = mlx4_priv(dev);
450 void __iomem *hcr = priv->cmd.hcr;
455 down(&priv->cmd.poll_sem);
457 if (pci_channel_offline(dev->pdev)) {
459 * Device is going through error recovery
460 * and cannot accept commands.
466 err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
467 in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
471 end = msecs_to_jiffies(timeout) + jiffies;
472 while (cmd_pending(dev) && time_before(jiffies, end)) {
473 if (pci_channel_offline(dev->pdev)) {
475 * Device is going through error recovery
476 * and cannot accept commands.
485 if (cmd_pending(dev)) {
492 (u64) be32_to_cpu((__force __be32)
493 __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
494 (u64) be32_to_cpu((__force __be32)
495 __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
496 stat = be32_to_cpu((__force __be32)
497 __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
498 err = mlx4_status_to_errno(stat);
500 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
504 up(&priv->cmd.poll_sem);
508 void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
510 struct mlx4_priv *priv = mlx4_priv(dev);
511 struct mlx4_cmd_context *context =
512 &priv->cmd.context[token & priv->cmd.token_mask];
514 /* previously timed out command completing at long last */
515 if (token != context->token)
518 context->fw_status = status;
519 context->result = mlx4_status_to_errno(status);
520 context->out_param = out_param;
522 complete(&context->done);
525 static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
526 int out_is_imm, u32 in_modifier, u8 op_modifier,
527 u16 op, unsigned long timeout)
529 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
530 struct mlx4_cmd_context *context;
533 down(&cmd->event_sem);
535 spin_lock(&cmd->context_lock);
536 BUG_ON(cmd->free_head < 0);
537 context = &cmd->context[cmd->free_head];
538 context->token += cmd->token_mask + 1;
539 cmd->free_head = context->next;
540 spin_unlock(&cmd->context_lock);
542 init_completion(&context->done);
544 mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
545 in_modifier, op_modifier, op, context->token, 1);
547 if (!wait_for_completion_timeout(&context->done,
548 msecs_to_jiffies(timeout))) {
553 err = context->result;
555 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
556 op, context->fw_status);
561 *out_param = context->out_param;
564 spin_lock(&cmd->context_lock);
565 context->next = cmd->free_head;
566 cmd->free_head = context - cmd->context;
567 spin_unlock(&cmd->context_lock);
573 int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
574 int out_is_imm, u32 in_modifier, u8 op_modifier,
575 u16 op, unsigned long timeout, int native)
577 if (pci_channel_offline(dev->pdev))
580 if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
581 if (mlx4_priv(dev)->cmd.use_events)
582 return mlx4_cmd_wait(dev, in_param, out_param,
583 out_is_imm, in_modifier,
584 op_modifier, op, timeout);
586 return mlx4_cmd_poll(dev, in_param, out_param,
587 out_is_imm, in_modifier,
588 op_modifier, op, timeout);
590 return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm,
591 in_modifier, op_modifier, op, timeout);
593 EXPORT_SYMBOL_GPL(__mlx4_cmd);
596 static int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
598 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL,
599 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
602 static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
603 int slave, u64 slave_addr,
604 int size, int is_read)
609 if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
610 (slave & ~0x7f) | (size & 0xff)) {
611 mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx "
612 "master_addr:0x%llx slave_id:%d size:%d\n",
613 slave_addr, master_addr, slave, size);
618 in_param = (u64) slave | slave_addr;
619 out_param = (u64) dev->caps.function | master_addr;
621 in_param = (u64) dev->caps.function | master_addr;
622 out_param = (u64) slave | slave_addr;
625 return mlx4_cmd_imm(dev, in_param, &out_param, size, 0,
627 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
630 int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
631 struct mlx4_vhcr *vhcr,
632 struct mlx4_cmd_mailbox *inbox,
633 struct mlx4_cmd_mailbox *outbox,
634 struct mlx4_cmd_info *cmd)
640 in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param;
641 out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param;
642 if (cmd->encode_slave_id) {
643 in_param &= 0xffffffffffffff00ll;
647 err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm,
648 vhcr->in_modifier, vhcr->op_modifier, vhcr->op,
649 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
652 vhcr->out_param = out_param;
657 static struct mlx4_cmd_info cmd_info[] = {
659 .opcode = MLX4_CMD_QUERY_FW,
663 .encode_slave_id = false,
665 .wrapper = mlx4_QUERY_FW_wrapper
668 .opcode = MLX4_CMD_QUERY_HCA,
672 .encode_slave_id = false,
677 .opcode = MLX4_CMD_QUERY_DEV_CAP,
681 .encode_slave_id = false,
683 .wrapper = mlx4_QUERY_DEV_CAP_wrapper
686 .opcode = MLX4_CMD_QUERY_FUNC_CAP,
690 .encode_slave_id = false,
692 .wrapper = mlx4_QUERY_FUNC_CAP_wrapper
695 .opcode = MLX4_CMD_QUERY_ADAPTER,
699 .encode_slave_id = false,
704 .opcode = MLX4_CMD_INIT_PORT,
708 .encode_slave_id = false,
710 .wrapper = mlx4_INIT_PORT_wrapper
713 .opcode = MLX4_CMD_CLOSE_PORT,
717 .encode_slave_id = false,
719 .wrapper = mlx4_CLOSE_PORT_wrapper
722 .opcode = MLX4_CMD_QUERY_PORT,
726 .encode_slave_id = false,
728 .wrapper = mlx4_QUERY_PORT_wrapper
731 .opcode = MLX4_CMD_SET_PORT,
735 .encode_slave_id = false,
737 .wrapper = mlx4_SET_PORT_wrapper
740 .opcode = MLX4_CMD_MAP_EQ,
744 .encode_slave_id = false,
746 .wrapper = mlx4_MAP_EQ_wrapper
749 .opcode = MLX4_CMD_SW2HW_EQ,
753 .encode_slave_id = true,
755 .wrapper = mlx4_SW2HW_EQ_wrapper
758 .opcode = MLX4_CMD_HW_HEALTH_CHECK,
762 .encode_slave_id = false,
767 .opcode = MLX4_CMD_NOP,
771 .encode_slave_id = false,
776 .opcode = MLX4_CMD_ALLOC_RES,
780 .encode_slave_id = false,
782 .wrapper = mlx4_ALLOC_RES_wrapper
785 .opcode = MLX4_CMD_FREE_RES,
789 .encode_slave_id = false,
791 .wrapper = mlx4_FREE_RES_wrapper
794 .opcode = MLX4_CMD_SW2HW_MPT,
798 .encode_slave_id = true,
800 .wrapper = mlx4_SW2HW_MPT_wrapper
803 .opcode = MLX4_CMD_QUERY_MPT,
807 .encode_slave_id = false,
809 .wrapper = mlx4_QUERY_MPT_wrapper
812 .opcode = MLX4_CMD_HW2SW_MPT,
816 .encode_slave_id = false,
818 .wrapper = mlx4_HW2SW_MPT_wrapper
821 .opcode = MLX4_CMD_READ_MTT,
825 .encode_slave_id = false,
830 .opcode = MLX4_CMD_WRITE_MTT,
834 .encode_slave_id = false,
836 .wrapper = mlx4_WRITE_MTT_wrapper
839 .opcode = MLX4_CMD_SYNC_TPT,
843 .encode_slave_id = false,
848 .opcode = MLX4_CMD_HW2SW_EQ,
852 .encode_slave_id = true,
854 .wrapper = mlx4_HW2SW_EQ_wrapper
857 .opcode = MLX4_CMD_QUERY_EQ,
861 .encode_slave_id = true,
863 .wrapper = mlx4_QUERY_EQ_wrapper
866 .opcode = MLX4_CMD_SW2HW_CQ,
870 .encode_slave_id = true,
872 .wrapper = mlx4_SW2HW_CQ_wrapper
875 .opcode = MLX4_CMD_HW2SW_CQ,
879 .encode_slave_id = false,
881 .wrapper = mlx4_HW2SW_CQ_wrapper
884 .opcode = MLX4_CMD_QUERY_CQ,
888 .encode_slave_id = false,
890 .wrapper = mlx4_QUERY_CQ_wrapper
893 .opcode = MLX4_CMD_MODIFY_CQ,
897 .encode_slave_id = false,
899 .wrapper = mlx4_MODIFY_CQ_wrapper
902 .opcode = MLX4_CMD_SW2HW_SRQ,
906 .encode_slave_id = true,
908 .wrapper = mlx4_SW2HW_SRQ_wrapper
911 .opcode = MLX4_CMD_HW2SW_SRQ,
915 .encode_slave_id = false,
917 .wrapper = mlx4_HW2SW_SRQ_wrapper
920 .opcode = MLX4_CMD_QUERY_SRQ,
924 .encode_slave_id = false,
926 .wrapper = mlx4_QUERY_SRQ_wrapper
929 .opcode = MLX4_CMD_ARM_SRQ,
933 .encode_slave_id = false,
935 .wrapper = mlx4_ARM_SRQ_wrapper
938 .opcode = MLX4_CMD_RST2INIT_QP,
942 .encode_slave_id = true,
944 .wrapper = mlx4_RST2INIT_QP_wrapper
947 .opcode = MLX4_CMD_INIT2INIT_QP,
951 .encode_slave_id = false,
953 .wrapper = mlx4_GEN_QP_wrapper
956 .opcode = MLX4_CMD_INIT2RTR_QP,
960 .encode_slave_id = false,
962 .wrapper = mlx4_INIT2RTR_QP_wrapper
965 .opcode = MLX4_CMD_RTR2RTS_QP,
969 .encode_slave_id = false,
971 .wrapper = mlx4_GEN_QP_wrapper
974 .opcode = MLX4_CMD_RTS2RTS_QP,
978 .encode_slave_id = false,
980 .wrapper = mlx4_GEN_QP_wrapper
983 .opcode = MLX4_CMD_SQERR2RTS_QP,
987 .encode_slave_id = false,
989 .wrapper = mlx4_GEN_QP_wrapper
992 .opcode = MLX4_CMD_2ERR_QP,
996 .encode_slave_id = false,
998 .wrapper = mlx4_GEN_QP_wrapper
1001 .opcode = MLX4_CMD_RTS2SQD_QP,
1003 .has_outbox = false,
1004 .out_is_imm = false,
1005 .encode_slave_id = false,
1007 .wrapper = mlx4_GEN_QP_wrapper
1010 .opcode = MLX4_CMD_SQD2SQD_QP,
1012 .has_outbox = false,
1013 .out_is_imm = false,
1014 .encode_slave_id = false,
1016 .wrapper = mlx4_GEN_QP_wrapper
1019 .opcode = MLX4_CMD_SQD2RTS_QP,
1021 .has_outbox = false,
1022 .out_is_imm = false,
1023 .encode_slave_id = false,
1025 .wrapper = mlx4_GEN_QP_wrapper
1028 .opcode = MLX4_CMD_2RST_QP,
1030 .has_outbox = false,
1031 .out_is_imm = false,
1032 .encode_slave_id = false,
1034 .wrapper = mlx4_2RST_QP_wrapper
1037 .opcode = MLX4_CMD_QUERY_QP,
1040 .out_is_imm = false,
1041 .encode_slave_id = false,
1043 .wrapper = mlx4_GEN_QP_wrapper
1046 .opcode = MLX4_CMD_SUSPEND_QP,
1048 .has_outbox = false,
1049 .out_is_imm = false,
1050 .encode_slave_id = false,
1052 .wrapper = mlx4_GEN_QP_wrapper
1055 .opcode = MLX4_CMD_UNSUSPEND_QP,
1057 .has_outbox = false,
1058 .out_is_imm = false,
1059 .encode_slave_id = false,
1061 .wrapper = mlx4_GEN_QP_wrapper
1064 .opcode = MLX4_CMD_QUERY_IF_STAT,
1067 .out_is_imm = false,
1068 .encode_slave_id = false,
1070 .wrapper = mlx4_QUERY_IF_STAT_wrapper
1072 /* Native multicast commands are not available for guests */
1074 .opcode = MLX4_CMD_QP_ATTACH,
1076 .has_outbox = false,
1077 .out_is_imm = false,
1078 .encode_slave_id = false,
1080 .wrapper = mlx4_QP_ATTACH_wrapper
1083 .opcode = MLX4_CMD_PROMISC,
1085 .has_outbox = false,
1086 .out_is_imm = false,
1087 .encode_slave_id = false,
1089 .wrapper = mlx4_PROMISC_wrapper
1091 /* Ethernet specific commands */
1093 .opcode = MLX4_CMD_SET_VLAN_FLTR,
1095 .has_outbox = false,
1096 .out_is_imm = false,
1097 .encode_slave_id = false,
1099 .wrapper = mlx4_SET_VLAN_FLTR_wrapper
1102 .opcode = MLX4_CMD_SET_MCAST_FLTR,
1104 .has_outbox = false,
1105 .out_is_imm = false,
1106 .encode_slave_id = false,
1108 .wrapper = mlx4_SET_MCAST_FLTR_wrapper
1111 .opcode = MLX4_CMD_DUMP_ETH_STATS,
1114 .out_is_imm = false,
1115 .encode_slave_id = false,
1117 .wrapper = mlx4_DUMP_ETH_STATS_wrapper
1120 .opcode = MLX4_CMD_INFORM_FLR_DONE,
1122 .has_outbox = false,
1123 .out_is_imm = false,
1124 .encode_slave_id = false,
1128 /* flow steering commands */
1130 .opcode = MLX4_QP_FLOW_STEERING_ATTACH,
1132 .has_outbox = false,
1134 .encode_slave_id = false,
1136 .wrapper = mlx4_QP_FLOW_STEERING_ATTACH_wrapper
1139 .opcode = MLX4_QP_FLOW_STEERING_DETACH,
1141 .has_outbox = false,
1142 .out_is_imm = false,
1143 .encode_slave_id = false,
1145 .wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper
1149 static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1150 struct mlx4_vhcr_cmd *in_vhcr)
1152 struct mlx4_priv *priv = mlx4_priv(dev);
1153 struct mlx4_cmd_info *cmd = NULL;
1154 struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr;
1155 struct mlx4_vhcr *vhcr;
1156 struct mlx4_cmd_mailbox *inbox = NULL;
1157 struct mlx4_cmd_mailbox *outbox = NULL;
1164 /* Create sw representation of Virtual HCR */
1165 vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL);
1169 /* DMA in the vHCR */
1171 ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1172 priv->mfunc.master.slave_state[slave].vhcr_dma,
1173 ALIGN(sizeof(struct mlx4_vhcr_cmd),
1174 MLX4_ACCESS_MEM_ALIGN), 1);
1176 mlx4_err(dev, "%s:Failed reading vhcr"
1177 "ret: 0x%x\n", __func__, ret);
1183 /* Fill SW VHCR fields */
1184 vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param);
1185 vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param);
1186 vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier);
1187 vhcr->token = be16_to_cpu(vhcr_cmd->token);
1188 vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff;
1189 vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12);
1190 vhcr->e_bit = vhcr_cmd->flags & (1 << 6);
1192 /* Lookup command */
1193 for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) {
1194 if (vhcr->op == cmd_info[i].opcode) {
1200 mlx4_err(dev, "Unknown command:0x%x accepted from slave:%d\n",
1202 vhcr_cmd->status = CMD_STAT_BAD_PARAM;
1207 if (cmd->has_inbox) {
1208 vhcr->in_param &= INBOX_MASK;
1209 inbox = mlx4_alloc_cmd_mailbox(dev);
1210 if (IS_ERR(inbox)) {
1211 vhcr_cmd->status = CMD_STAT_BAD_SIZE;
1216 if (mlx4_ACCESS_MEM(dev, inbox->dma, slave,
1218 MLX4_MAILBOX_SIZE, 1)) {
1219 mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n",
1220 __func__, cmd->opcode);
1221 vhcr_cmd->status = CMD_STAT_INTERNAL_ERR;
1226 /* Apply permission and bound checks if applicable */
1227 if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
1228 mlx4_warn(dev, "Command:0x%x from slave: %d failed protection "
1229 "checks for resource_id:%d\n", vhcr->op, slave,
1231 vhcr_cmd->status = CMD_STAT_BAD_OP;
1235 /* Allocate outbox */
1236 if (cmd->has_outbox) {
1237 outbox = mlx4_alloc_cmd_mailbox(dev);
1238 if (IS_ERR(outbox)) {
1239 vhcr_cmd->status = CMD_STAT_BAD_SIZE;
1245 /* Execute the command! */
1247 err = cmd->wrapper(dev, slave, vhcr, inbox, outbox,
1249 if (cmd->out_is_imm)
1250 vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1252 in_param = cmd->has_inbox ? (u64) inbox->dma :
1254 out_param = cmd->has_outbox ? (u64) outbox->dma :
1256 err = __mlx4_cmd(dev, in_param, &out_param,
1257 cmd->out_is_imm, vhcr->in_modifier,
1258 vhcr->op_modifier, vhcr->op,
1259 MLX4_CMD_TIME_CLASS_A,
1262 if (cmd->out_is_imm) {
1263 vhcr->out_param = out_param;
1264 vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1269 mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with"
1270 " error:%d, status %d\n",
1271 vhcr->op, slave, vhcr->errno, err);
1272 vhcr_cmd->status = mlx4_errno_to_status(err);
1277 /* Write outbox if command completed successfully */
1278 if (cmd->has_outbox && !vhcr_cmd->status) {
1279 ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave,
1281 MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED);
1283 /* If we failed to write back the outbox after the
1284 *command was successfully executed, we must fail this
1285 * slave, as it is now in undefined state */
1286 mlx4_err(dev, "%s:Failed writing outbox\n", __func__);
1292 /* DMA back vhcr result */
1294 ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1295 priv->mfunc.master.slave_state[slave].vhcr_dma,
1296 ALIGN(sizeof(struct mlx4_vhcr),
1297 MLX4_ACCESS_MEM_ALIGN),
1300 mlx4_err(dev, "%s:Failed writing vhcr result\n",
1302 else if (vhcr->e_bit &&
1303 mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
1304 mlx4_warn(dev, "Failed to generate command completion "
1305 "eqe for slave %d\n", slave);
1310 mlx4_free_cmd_mailbox(dev, inbox);
1311 mlx4_free_cmd_mailbox(dev, outbox);
1315 static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1316 u16 param, u8 toggle)
1318 struct mlx4_priv *priv = mlx4_priv(dev);
1319 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
1321 u8 is_going_down = 0;
1324 slave_state[slave].comm_toggle ^= 1;
1325 reply = (u32) slave_state[slave].comm_toggle << 31;
1326 if (toggle != slave_state[slave].comm_toggle) {
1327 mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER"
1328 "STATE COMPROMISIED ***\n", toggle, slave);
1331 if (cmd == MLX4_COMM_CMD_RESET) {
1332 mlx4_warn(dev, "Received reset from slave:%d\n", slave);
1333 slave_state[slave].active = false;
1334 for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
1335 slave_state[slave].event_eq[i].eqn = -1;
1336 slave_state[slave].event_eq[i].token = 0;
1338 /*check if we are in the middle of FLR process,
1339 if so return "retry" status to the slave*/
1340 if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd)
1341 goto inform_slave_state;
1343 /* write the version in the event field */
1344 reply |= mlx4_comm_get_version();
1348 /*command from slave in the middle of FLR*/
1349 if (cmd != MLX4_COMM_CMD_RESET &&
1350 MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
1351 mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) "
1352 "in the middle of FLR\n", slave, cmd);
1357 case MLX4_COMM_CMD_VHCR0:
1358 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET)
1360 slave_state[slave].vhcr_dma = ((u64) param) << 48;
1361 priv->mfunc.master.slave_state[slave].cookie = 0;
1362 mutex_init(&priv->mfunc.master.gen_eqe_mutex[slave]);
1364 case MLX4_COMM_CMD_VHCR1:
1365 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
1367 slave_state[slave].vhcr_dma |= ((u64) param) << 32;
1369 case MLX4_COMM_CMD_VHCR2:
1370 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1)
1372 slave_state[slave].vhcr_dma |= ((u64) param) << 16;
1374 case MLX4_COMM_CMD_VHCR_EN:
1375 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2)
1377 slave_state[slave].vhcr_dma |= param;
1378 slave_state[slave].active = true;
1380 case MLX4_COMM_CMD_VHCR_POST:
1381 if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
1382 (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST))
1384 down(&priv->cmd.slave_sem);
1385 if (mlx4_master_process_vhcr(dev, slave, NULL)) {
1386 mlx4_err(dev, "Failed processing vhcr for slave:%d,"
1387 " resetting slave.\n", slave);
1388 up(&priv->cmd.slave_sem);
1391 up(&priv->cmd.slave_sem);
1394 mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
1397 spin_lock(&priv->mfunc.master.slave_state_lock);
1398 if (!slave_state[slave].is_slave_going_down)
1399 slave_state[slave].last_cmd = cmd;
1402 spin_unlock(&priv->mfunc.master.slave_state_lock);
1403 if (is_going_down) {
1404 mlx4_warn(dev, "Slave is going down aborting command(%d)"
1405 " executing from slave:%d\n",
1409 __raw_writel((__force u32) cpu_to_be32(reply),
1410 &priv->mfunc.comm[slave].slave_read);
1416 /* cleanup any slave resources */
1417 mlx4_delete_all_resources_for_slave(dev, slave);
1418 spin_lock(&priv->mfunc.master.slave_state_lock);
1419 if (!slave_state[slave].is_slave_going_down)
1420 slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
1421 spin_unlock(&priv->mfunc.master.slave_state_lock);
1422 /*with slave in the middle of flr, no need to clean resources again.*/
1424 memset(&slave_state[slave].event_eq, 0,
1425 sizeof(struct mlx4_slave_event_eq_info));
1426 __raw_writel((__force u32) cpu_to_be32(reply),
1427 &priv->mfunc.comm[slave].slave_read);
1431 /* master command processing */
1432 void mlx4_master_comm_channel(struct work_struct *work)
1434 struct mlx4_mfunc_master_ctx *master =
1436 struct mlx4_mfunc_master_ctx,
1438 struct mlx4_mfunc *mfunc =
1439 container_of(master, struct mlx4_mfunc, master);
1440 struct mlx4_priv *priv =
1441 container_of(mfunc, struct mlx4_priv, mfunc);
1442 struct mlx4_dev *dev = &priv->dev;
1452 bit_vec = master->comm_arm_bit_vector;
1453 for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) {
1454 vec = be32_to_cpu(bit_vec[i]);
1455 for (j = 0; j < 32; j++) {
1456 if (!(vec & (1 << j)))
1459 slave = (i * 32) + j;
1460 comm_cmd = swab32(readl(
1461 &mfunc->comm[slave].slave_write));
1462 slt = swab32(readl(&mfunc->comm[slave].slave_read))
1464 toggle = comm_cmd >> 31;
1465 if (toggle != slt) {
1466 if (master->slave_state[slave].comm_toggle
1468 printk(KERN_INFO "slave %d out of sync."
1469 " read toggle %d, state toggle %d. "
1470 "Resynching.\n", slave, slt,
1471 master->slave_state[slave].comm_toggle);
1472 master->slave_state[slave].comm_toggle =
1475 mlx4_master_do_cmd(dev, slave,
1476 comm_cmd >> 16 & 0xff,
1477 comm_cmd & 0xffff, toggle);
1483 if (reported && reported != served)
1484 mlx4_warn(dev, "Got command event with bitmask from %d slaves"
1485 " but %d were served\n",
1488 if (mlx4_ARM_COMM_CHANNEL(dev))
1489 mlx4_warn(dev, "Failed to arm comm channel events\n");
1492 static int sync_toggles(struct mlx4_dev *dev)
1494 struct mlx4_priv *priv = mlx4_priv(dev);
1499 wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write)) >> 31;
1500 end = jiffies + msecs_to_jiffies(5000);
1502 while (time_before(jiffies, end)) {
1503 rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)) >> 31;
1504 if (rd_toggle == wr_toggle) {
1505 priv->cmd.comm_toggle = rd_toggle;
1513 * we could reach here if for example the previous VM using this
1514 * function misbehaved and left the channel with unsynced state. We
1515 * should fix this here and give this VM a chance to use a properly
1518 mlx4_warn(dev, "recovering from previously mis-behaved VM\n");
1519 __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read);
1520 __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write);
1521 priv->cmd.comm_toggle = 0;
1526 int mlx4_multi_func_init(struct mlx4_dev *dev)
1528 struct mlx4_priv *priv = mlx4_priv(dev);
1529 struct mlx4_slave_state *s_state;
1530 int i, j, err, port;
1532 priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
1533 &priv->mfunc.vhcr_dma,
1535 if (!priv->mfunc.vhcr) {
1536 mlx4_err(dev, "Couldn't allocate vhcr.\n");
1540 if (mlx4_is_master(dev))
1542 ioremap(pci_resource_start(dev->pdev, priv->fw.comm_bar) +
1543 priv->fw.comm_base, MLX4_COMM_PAGESIZE);
1546 ioremap(pci_resource_start(dev->pdev, 2) +
1547 MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
1548 if (!priv->mfunc.comm) {
1549 mlx4_err(dev, "Couldn't map communication vector.\n");
1553 if (mlx4_is_master(dev)) {
1554 priv->mfunc.master.slave_state =
1555 kzalloc(dev->num_slaves *
1556 sizeof(struct mlx4_slave_state), GFP_KERNEL);
1557 if (!priv->mfunc.master.slave_state)
1560 for (i = 0; i < dev->num_slaves; ++i) {
1561 s_state = &priv->mfunc.master.slave_state[i];
1562 s_state->last_cmd = MLX4_COMM_CMD_RESET;
1563 for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
1564 s_state->event_eq[j].eqn = -1;
1565 __raw_writel((__force u32) 0,
1566 &priv->mfunc.comm[i].slave_write);
1567 __raw_writel((__force u32) 0,
1568 &priv->mfunc.comm[i].slave_read);
1570 for (port = 1; port <= MLX4_MAX_PORTS; port++) {
1571 s_state->vlan_filter[port] =
1572 kzalloc(sizeof(struct mlx4_vlan_fltr),
1574 if (!s_state->vlan_filter[port]) {
1576 kfree(s_state->vlan_filter[port]);
1579 INIT_LIST_HEAD(&s_state->mcast_filters[port]);
1581 spin_lock_init(&s_state->lock);
1584 memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe));
1585 priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
1586 INIT_WORK(&priv->mfunc.master.comm_work,
1587 mlx4_master_comm_channel);
1588 INIT_WORK(&priv->mfunc.master.slave_event_work,
1589 mlx4_gen_slave_eqe);
1590 INIT_WORK(&priv->mfunc.master.slave_flr_event_work,
1591 mlx4_master_handle_slave_flr);
1592 spin_lock_init(&priv->mfunc.master.slave_state_lock);
1593 priv->mfunc.master.comm_wq =
1594 create_singlethread_workqueue("mlx4_comm");
1595 if (!priv->mfunc.master.comm_wq)
1598 if (mlx4_init_resource_tracker(dev))
1601 sema_init(&priv->cmd.slave_sem, 1);
1602 err = mlx4_ARM_COMM_CHANNEL(dev);
1604 mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
1610 err = sync_toggles(dev);
1612 mlx4_err(dev, "Couldn't sync toggles\n");
1616 sema_init(&priv->cmd.slave_sem, 1);
1621 mlx4_free_resource_tracker(dev, RES_TR_FREE_ALL);
1623 flush_workqueue(priv->mfunc.master.comm_wq);
1624 destroy_workqueue(priv->mfunc.master.comm_wq);
1627 for (port = 1; port <= MLX4_MAX_PORTS; port++)
1628 kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
1630 kfree(priv->mfunc.master.slave_state);
1632 iounmap(priv->mfunc.comm);
1634 dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
1636 priv->mfunc.vhcr_dma);
1637 priv->mfunc.vhcr = NULL;
1641 int mlx4_cmd_init(struct mlx4_dev *dev)
1643 struct mlx4_priv *priv = mlx4_priv(dev);
1645 mutex_init(&priv->cmd.hcr_mutex);
1646 sema_init(&priv->cmd.poll_sem, 1);
1647 priv->cmd.use_events = 0;
1648 priv->cmd.toggle = 1;
1650 priv->cmd.hcr = NULL;
1651 priv->mfunc.vhcr = NULL;
1653 if (!mlx4_is_slave(dev)) {
1654 priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) +
1655 MLX4_HCR_BASE, MLX4_HCR_SIZE);
1656 if (!priv->cmd.hcr) {
1657 mlx4_err(dev, "Couldn't map command register.\n");
1662 priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
1664 MLX4_MAILBOX_SIZE, 0);
1665 if (!priv->cmd.pool)
1671 if (!mlx4_is_slave(dev))
1672 iounmap(priv->cmd.hcr);
1676 void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
1678 struct mlx4_priv *priv = mlx4_priv(dev);
1681 if (mlx4_is_master(dev)) {
1682 flush_workqueue(priv->mfunc.master.comm_wq);
1683 destroy_workqueue(priv->mfunc.master.comm_wq);
1684 for (i = 0; i < dev->num_slaves; i++) {
1685 for (port = 1; port <= MLX4_MAX_PORTS; port++)
1686 kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
1688 kfree(priv->mfunc.master.slave_state);
1691 iounmap(priv->mfunc.comm);
1692 dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
1693 priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
1694 priv->mfunc.vhcr = NULL;
1697 void mlx4_cmd_cleanup(struct mlx4_dev *dev)
1699 struct mlx4_priv *priv = mlx4_priv(dev);
1701 pci_pool_destroy(priv->cmd.pool);
1703 if (!mlx4_is_slave(dev))
1704 iounmap(priv->cmd.hcr);
1708 * Switch to using events to issue FW commands (can only be called
1709 * after event queue for command events has been initialized).
1711 int mlx4_cmd_use_events(struct mlx4_dev *dev)
1713 struct mlx4_priv *priv = mlx4_priv(dev);
1717 priv->cmd.context = kmalloc(priv->cmd.max_cmds *
1718 sizeof (struct mlx4_cmd_context),
1720 if (!priv->cmd.context)
1723 for (i = 0; i < priv->cmd.max_cmds; ++i) {
1724 priv->cmd.context[i].token = i;
1725 priv->cmd.context[i].next = i + 1;
1728 priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
1729 priv->cmd.free_head = 0;
1731 sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
1732 spin_lock_init(&priv->cmd.context_lock);
1734 for (priv->cmd.token_mask = 1;
1735 priv->cmd.token_mask < priv->cmd.max_cmds;
1736 priv->cmd.token_mask <<= 1)
1738 --priv->cmd.token_mask;
1740 down(&priv->cmd.poll_sem);
1741 priv->cmd.use_events = 1;
1747 * Switch back to polling (used when shutting down the device)
1749 void mlx4_cmd_use_polling(struct mlx4_dev *dev)
1751 struct mlx4_priv *priv = mlx4_priv(dev);
1754 priv->cmd.use_events = 0;
1756 for (i = 0; i < priv->cmd.max_cmds; ++i)
1757 down(&priv->cmd.event_sem);
1759 kfree(priv->cmd.context);
1761 up(&priv->cmd.poll_sem);
1764 struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
1766 struct mlx4_cmd_mailbox *mailbox;
1768 mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL);
1770 return ERR_PTR(-ENOMEM);
1772 mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
1774 if (!mailbox->buf) {
1776 return ERR_PTR(-ENOMEM);
1781 EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
1783 void mlx4_free_cmd_mailbox(struct mlx4_dev *dev,
1784 struct mlx4_cmd_mailbox *mailbox)
1789 pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma);
1792 EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
1794 u32 mlx4_comm_get_version(void)
1796 return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER;