2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <asm-generic/kmap_types.h>
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/errno.h>
37 #include <linux/pci.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/slab.h>
40 #include <linux/delay.h>
41 #include <linux/random.h>
42 #include <linux/io-mapping.h>
43 #include <linux/mlx5/driver.h>
44 #include <linux/debugfs.h>
46 #include "mlx5_core.h"
60 LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 +
61 MLX5_CMD_DATA_BLOCK_SIZE,
62 MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE,
66 MLX5_CMD_DELIVERY_STAT_OK = 0x0,
67 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1,
68 MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2,
69 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3,
70 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4,
71 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5,
72 MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6,
73 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7,
74 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8,
75 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9,
76 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10,
80 MLX5_CMD_STAT_OK = 0x0,
81 MLX5_CMD_STAT_INT_ERR = 0x1,
82 MLX5_CMD_STAT_BAD_OP_ERR = 0x2,
83 MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3,
84 MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4,
85 MLX5_CMD_STAT_BAD_RES_ERR = 0x5,
86 MLX5_CMD_STAT_RES_BUSY = 0x6,
87 MLX5_CMD_STAT_LIM_ERR = 0x8,
88 MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9,
89 MLX5_CMD_STAT_IX_ERR = 0xa,
90 MLX5_CMD_STAT_NO_RES_ERR = 0xf,
91 MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50,
92 MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51,
93 MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10,
94 MLX5_CMD_STAT_BAD_PKT_ERR = 0x30,
95 MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40,
98 static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
99 struct mlx5_cmd_msg *in,
100 struct mlx5_cmd_msg *out,
102 void *context, int page_queue)
104 gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL;
105 struct mlx5_cmd_work_ent *ent;
107 ent = kzalloc(sizeof(*ent), alloc_flags);
109 return ERR_PTR(-ENOMEM);
114 ent->context = context;
116 ent->page_queue = page_queue;
121 static u8 alloc_token(struct mlx5_cmd *cmd)
125 spin_lock(&cmd->token_lock);
126 token = cmd->token++ % 255 + 1;
127 spin_unlock(&cmd->token_lock);
132 static int alloc_ent(struct mlx5_cmd *cmd)
137 spin_lock_irqsave(&cmd->alloc_lock, flags);
138 ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
139 if (ret < cmd->max_reg_cmds)
140 clear_bit(ret, &cmd->bitmask);
141 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
143 return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
146 static void free_ent(struct mlx5_cmd *cmd, int idx)
150 spin_lock_irqsave(&cmd->alloc_lock, flags);
151 set_bit(idx, &cmd->bitmask);
152 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
155 static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
157 return cmd->cmd_buf + (idx << cmd->log_stride);
160 static u8 xor8_buf(void *buf, int len)
166 for (i = 0; i < len; i++)
172 static int verify_block_sig(struct mlx5_cmd_prot_block *block)
174 if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff)
177 if (xor8_buf(block, sizeof(*block)) != 0xff)
183 static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token)
185 block->token = token;
186 block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 2);
187 block->sig = ~xor8_buf(block, sizeof(*block) - 1);
190 static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token)
192 struct mlx5_cmd_mailbox *next = msg->next;
195 calc_block_sig(next->buf, token);
200 static void set_signature(struct mlx5_cmd_work_ent *ent)
202 ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
203 calc_chain_sig(ent->in, ent->token);
204 calc_chain_sig(ent->out, ent->token);
207 static void poll_timeout(struct mlx5_cmd_work_ent *ent)
209 unsigned long poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000);
213 own = ent->lay->status_own;
214 if (!(own & CMD_OWNER_HW)) {
218 usleep_range(5000, 10000);
219 } while (time_before(jiffies, poll_end));
221 ent->ret = -ETIMEDOUT;
224 static void free_cmd(struct mlx5_cmd_work_ent *ent)
230 static int verify_signature(struct mlx5_cmd_work_ent *ent)
232 struct mlx5_cmd_mailbox *next = ent->out->next;
236 sig = xor8_buf(ent->lay, sizeof(*ent->lay));
241 err = verify_block_sig(next->buf);
251 static void dump_buf(void *buf, int size, int data_only, int offset)
256 for (i = 0; i < size; i += 16) {
257 pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]),
258 be32_to_cpu(p[1]), be32_to_cpu(p[2]),
267 const char *mlx5_command_str(int command)
270 case MLX5_CMD_OP_QUERY_HCA_CAP:
271 return "QUERY_HCA_CAP";
273 case MLX5_CMD_OP_SET_HCA_CAP:
274 return "SET_HCA_CAP";
276 case MLX5_CMD_OP_QUERY_ADAPTER:
277 return "QUERY_ADAPTER";
279 case MLX5_CMD_OP_INIT_HCA:
282 case MLX5_CMD_OP_TEARDOWN_HCA:
283 return "TEARDOWN_HCA";
285 case MLX5_CMD_OP_QUERY_PAGES:
286 return "QUERY_PAGES";
288 case MLX5_CMD_OP_MANAGE_PAGES:
289 return "MANAGE_PAGES";
291 case MLX5_CMD_OP_CREATE_MKEY:
292 return "CREATE_MKEY";
294 case MLX5_CMD_OP_QUERY_MKEY:
297 case MLX5_CMD_OP_DESTROY_MKEY:
298 return "DESTROY_MKEY";
300 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
301 return "QUERY_SPECIAL_CONTEXTS";
303 case MLX5_CMD_OP_CREATE_EQ:
306 case MLX5_CMD_OP_DESTROY_EQ:
309 case MLX5_CMD_OP_QUERY_EQ:
312 case MLX5_CMD_OP_CREATE_CQ:
315 case MLX5_CMD_OP_DESTROY_CQ:
318 case MLX5_CMD_OP_QUERY_CQ:
321 case MLX5_CMD_OP_MODIFY_CQ:
324 case MLX5_CMD_OP_CREATE_QP:
327 case MLX5_CMD_OP_DESTROY_QP:
330 case MLX5_CMD_OP_RST2INIT_QP:
331 return "RST2INIT_QP";
333 case MLX5_CMD_OP_INIT2RTR_QP:
334 return "INIT2RTR_QP";
336 case MLX5_CMD_OP_RTR2RTS_QP:
339 case MLX5_CMD_OP_RTS2RTS_QP:
342 case MLX5_CMD_OP_SQERR2RTS_QP:
343 return "SQERR2RTS_QP";
345 case MLX5_CMD_OP_2ERR_QP:
348 case MLX5_CMD_OP_RTS2SQD_QP:
351 case MLX5_CMD_OP_SQD2RTS_QP:
354 case MLX5_CMD_OP_2RST_QP:
357 case MLX5_CMD_OP_QUERY_QP:
360 case MLX5_CMD_OP_CONF_SQP:
363 case MLX5_CMD_OP_MAD_IFC:
366 case MLX5_CMD_OP_INIT2INIT_QP:
367 return "INIT2INIT_QP";
369 case MLX5_CMD_OP_SUSPEND_QP:
372 case MLX5_CMD_OP_UNSUSPEND_QP:
373 return "UNSUSPEND_QP";
375 case MLX5_CMD_OP_SQD2SQD_QP:
378 case MLX5_CMD_OP_ALLOC_QP_COUNTER_SET:
379 return "ALLOC_QP_COUNTER_SET";
381 case MLX5_CMD_OP_DEALLOC_QP_COUNTER_SET:
382 return "DEALLOC_QP_COUNTER_SET";
384 case MLX5_CMD_OP_QUERY_QP_COUNTER_SET:
385 return "QUERY_QP_COUNTER_SET";
387 case MLX5_CMD_OP_CREATE_PSV:
390 case MLX5_CMD_OP_DESTROY_PSV:
391 return "DESTROY_PSV";
393 case MLX5_CMD_OP_QUERY_PSV:
396 case MLX5_CMD_OP_QUERY_SIG_RULE_TABLE:
397 return "QUERY_SIG_RULE_TABLE";
399 case MLX5_CMD_OP_QUERY_BLOCK_SIZE_TABLE:
400 return "QUERY_BLOCK_SIZE_TABLE";
402 case MLX5_CMD_OP_CREATE_SRQ:
405 case MLX5_CMD_OP_DESTROY_SRQ:
406 return "DESTROY_SRQ";
408 case MLX5_CMD_OP_QUERY_SRQ:
411 case MLX5_CMD_OP_ARM_RQ:
414 case MLX5_CMD_OP_RESIZE_SRQ:
417 case MLX5_CMD_OP_ALLOC_PD:
420 case MLX5_CMD_OP_DEALLOC_PD:
423 case MLX5_CMD_OP_ALLOC_UAR:
426 case MLX5_CMD_OP_DEALLOC_UAR:
427 return "DEALLOC_UAR";
429 case MLX5_CMD_OP_ATTACH_TO_MCG:
430 return "ATTACH_TO_MCG";
432 case MLX5_CMD_OP_DETACH_FROM_MCG:
433 return "DETACH_FROM_MCG";
435 case MLX5_CMD_OP_ALLOC_XRCD:
438 case MLX5_CMD_OP_DEALLOC_XRCD:
439 return "DEALLOC_XRCD";
441 case MLX5_CMD_OP_ACCESS_REG:
442 return "MLX5_CMD_OP_ACCESS_REG";
444 default: return "unknown command opcode";
448 static void dump_command(struct mlx5_core_dev *dev,
449 struct mlx5_cmd_work_ent *ent, int input)
451 u16 op = be16_to_cpu(((struct mlx5_inbox_hdr *)(ent->lay->in))->opcode);
452 struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
453 struct mlx5_cmd_mailbox *next = msg->next;
458 data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
461 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
462 "dump command data %s(0x%x) %s\n",
463 mlx5_command_str(op), op,
464 input ? "INPUT" : "OUTPUT");
466 mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n",
467 mlx5_command_str(op), op,
468 input ? "INPUT" : "OUTPUT");
472 dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset);
473 offset += sizeof(ent->lay->in);
475 dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset);
476 offset += sizeof(ent->lay->out);
479 dump_buf(ent->lay, sizeof(*ent->lay), 0, offset);
480 offset += sizeof(*ent->lay);
483 while (next && offset < msg->len) {
485 dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset);
486 dump_buf(next->buf, dump_len, 1, offset);
487 offset += MLX5_CMD_DATA_BLOCK_SIZE;
489 mlx5_core_dbg(dev, "command block:\n");
490 dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset);
491 offset += sizeof(struct mlx5_cmd_prot_block);
500 static void cmd_work_handler(struct work_struct *work)
502 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
503 struct mlx5_cmd *cmd = ent->cmd;
504 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
505 struct mlx5_cmd_layout *lay;
506 struct semaphore *sem;
508 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
510 if (!ent->page_queue) {
511 ent->idx = alloc_ent(cmd);
513 mlx5_core_err(dev, "failed to allocate command entry\n");
518 ent->idx = cmd->max_reg_cmds;
521 ent->token = alloc_token(cmd);
522 cmd->ent_arr[ent->idx] = ent;
523 lay = get_inst(cmd, ent->idx);
525 memset(lay, 0, sizeof(*lay));
526 memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
528 lay->in_ptr = cpu_to_be64(ent->in->next->dma);
529 lay->inlen = cpu_to_be32(ent->in->len);
531 lay->out_ptr = cpu_to_be64(ent->out->next->dma);
532 lay->outlen = cpu_to_be32(ent->out->len);
533 lay->type = MLX5_PCI_CMD_XPORT;
534 lay->token = ent->token;
535 lay->status_own = CMD_OWNER_HW;
536 if (!cmd->checksum_disabled)
538 dump_command(dev, ent, 1);
539 ktime_get_ts(&ent->ts1);
541 /* ring doorbell after the descriptor is valid */
543 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
544 mlx5_core_dbg(dev, "write 0x%x to command doorbell\n", 1 << ent->idx);
546 if (cmd->mode == CMD_MODE_POLLING) {
548 /* make sure we read the descriptor after ownership is SW */
550 mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
554 static const char *deliv_status_to_str(u8 status)
557 case MLX5_CMD_DELIVERY_STAT_OK:
559 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
560 return "signature error";
561 case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
562 return "token error";
563 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
564 return "bad block number";
565 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
566 return "output pointer not aligned to block size";
567 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
568 return "input pointer not aligned to block size";
569 case MLX5_CMD_DELIVERY_STAT_FW_ERR:
570 return "firmware internal error";
571 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
572 return "command input length error";
573 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
574 return "command ouput length error";
575 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
576 return "reserved fields not cleared";
577 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
578 return "bad command descriptor type";
580 return "unknown status code";
584 static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
586 struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
588 return be16_to_cpu(hdr->opcode);
591 static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
593 unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
594 struct mlx5_cmd *cmd = &dev->cmd;
597 if (cmd->mode == CMD_MODE_POLLING) {
598 wait_for_completion(&ent->done);
601 if (!wait_for_completion_timeout(&ent->done, timeout))
606 if (err == -ETIMEDOUT) {
607 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
608 mlx5_command_str(msg_to_opcode(ent->in)),
609 msg_to_opcode(ent->in));
611 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", err,
612 deliv_status_to_str(ent->status), ent->status);
618 * 1. Callback functions may not sleep
619 * 2. page queue commands do not support asynchrous completion
621 static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
622 struct mlx5_cmd_msg *out, mlx5_cmd_cbk_t callback,
623 void *context, int page_queue, u8 *status)
625 struct mlx5_cmd *cmd = &dev->cmd;
626 struct mlx5_cmd_work_ent *ent;
627 ktime_t t1, t2, delta;
628 struct mlx5_cmd_stats *stats;
633 if (callback && page_queue)
636 ent = alloc_cmd(cmd, in, out, callback, context, page_queue);
641 init_completion(&ent->done);
643 INIT_WORK(&ent->work, cmd_work_handler);
645 cmd_work_handler(&ent->work);
646 } else if (!queue_work(cmd->wq, &ent->work)) {
647 mlx5_core_warn(dev, "failed to queue work\n");
653 err = wait_func(dev, ent);
654 if (err == -ETIMEDOUT)
657 t1 = timespec_to_ktime(ent->ts1);
658 t2 = timespec_to_ktime(ent->ts2);
659 delta = ktime_sub(t2, t1);
660 ds = ktime_to_ns(delta);
661 op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
662 if (op < ARRAY_SIZE(cmd->stats)) {
663 stats = &cmd->stats[op];
664 spin_lock(&stats->lock);
667 spin_unlock(&stats->lock);
669 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
670 "fw exec time for %s is %lld nsec\n",
671 mlx5_command_str(op), ds);
672 *status = ent->status;
684 static ssize_t dbg_write(struct file *filp, const char __user *buf,
685 size_t count, loff_t *pos)
687 struct mlx5_core_dev *dev = filp->private_data;
688 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
692 if (!dbg->in_msg || !dbg->out_msg)
695 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
698 lbuf[sizeof(lbuf) - 1] = 0;
700 if (strcmp(lbuf, "go"))
703 err = mlx5_cmd_exec(dev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen);
705 return err ? err : count;
709 static const struct file_operations fops = {
710 .owner = THIS_MODULE,
715 static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
717 struct mlx5_cmd_prot_block *block;
718 struct mlx5_cmd_mailbox *next;
724 copy = min_t(int, size, sizeof(to->first.data));
725 memcpy(to->first.data, from, copy);
736 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
738 memcpy(block->data, from, copy);
747 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
749 struct mlx5_cmd_prot_block *block;
750 struct mlx5_cmd_mailbox *next;
756 copy = min_t(int, size, sizeof(from->first.data));
757 memcpy(to, from->first.data, copy);
768 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
770 if (xor8_buf(block, sizeof(*block)) != 0xff)
773 memcpy(to, block->data, copy);
782 static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev,
785 struct mlx5_cmd_mailbox *mailbox;
787 mailbox = kmalloc(sizeof(*mailbox), flags);
789 return ERR_PTR(-ENOMEM);
791 mailbox->buf = pci_pool_alloc(dev->cmd.pool, flags,
794 mlx5_core_dbg(dev, "failed allocation\n");
796 return ERR_PTR(-ENOMEM);
798 memset(mailbox->buf, 0, sizeof(struct mlx5_cmd_prot_block));
799 mailbox->next = NULL;
804 static void free_cmd_box(struct mlx5_core_dev *dev,
805 struct mlx5_cmd_mailbox *mailbox)
807 pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
811 static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
812 gfp_t flags, int size)
814 struct mlx5_cmd_mailbox *tmp, *head = NULL;
815 struct mlx5_cmd_prot_block *block;
816 struct mlx5_cmd_msg *msg;
822 msg = kzalloc(sizeof(*msg), GFP_KERNEL);
824 return ERR_PTR(-ENOMEM);
826 blen = size - min_t(int, sizeof(msg->first.data), size);
827 n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) / MLX5_CMD_DATA_BLOCK_SIZE;
829 for (i = 0; i < n; i++) {
830 tmp = alloc_cmd_box(dev, flags);
832 mlx5_core_warn(dev, "failed allocating block\n");
839 block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
840 block->block_num = cpu_to_be32(n - i - 1);
850 free_cmd_box(dev, head);
858 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
859 struct mlx5_cmd_msg *msg)
861 struct mlx5_cmd_mailbox *head = msg->next;
862 struct mlx5_cmd_mailbox *next;
866 free_cmd_box(dev, head);
872 static ssize_t data_write(struct file *filp, const char __user *buf,
873 size_t count, loff_t *pos)
875 struct mlx5_core_dev *dev = filp->private_data;
876 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
887 ptr = kzalloc(count, GFP_KERNEL);
891 if (copy_from_user(ptr, buf, count)) {
907 static ssize_t data_read(struct file *filp, char __user *buf, size_t count,
910 struct mlx5_core_dev *dev = filp->private_data;
911 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
920 copy = min_t(int, count, dbg->outlen);
921 if (copy_to_user(buf, dbg->out_msg, copy))
929 static const struct file_operations dfops = {
930 .owner = THIS_MODULE,
936 static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count,
939 struct mlx5_core_dev *dev = filp->private_data;
940 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
947 err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen);
951 if (copy_to_user(buf, &outlen, err))
959 static ssize_t outlen_write(struct file *filp, const char __user *buf,
960 size_t count, loff_t *pos)
962 struct mlx5_core_dev *dev = filp->private_data;
963 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
969 if (*pos != 0 || count > 6)
976 if (copy_from_user(outlen_str, buf, count))
981 err = sscanf(outlen_str, "%d", &outlen);
985 ptr = kzalloc(outlen, GFP_KERNEL);
990 dbg->outlen = outlen;
997 static const struct file_operations olfops = {
998 .owner = THIS_MODULE,
1000 .write = outlen_write,
1001 .read = outlen_read,
1004 static void set_wqname(struct mlx5_core_dev *dev)
1006 struct mlx5_cmd *cmd = &dev->cmd;
1008 snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
1009 dev_name(&dev->pdev->dev));
1012 static void clean_debug_files(struct mlx5_core_dev *dev)
1014 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1016 if (!mlx5_debugfs_root)
1019 mlx5_cmdif_debugfs_cleanup(dev);
1020 debugfs_remove_recursive(dbg->dbg_root);
1023 static int create_debugfs_files(struct mlx5_core_dev *dev)
1025 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1028 if (!mlx5_debugfs_root)
1031 dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root);
1035 dbg->dbg_in = debugfs_create_file("in", 0400, dbg->dbg_root,
1040 dbg->dbg_out = debugfs_create_file("out", 0200, dbg->dbg_root,
1045 dbg->dbg_outlen = debugfs_create_file("out_len", 0600, dbg->dbg_root,
1047 if (!dbg->dbg_outlen)
1050 dbg->dbg_status = debugfs_create_u8("status", 0600, dbg->dbg_root,
1052 if (!dbg->dbg_status)
1055 dbg->dbg_run = debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops);
1059 mlx5_cmdif_debugfs_init(dev);
1064 clean_debug_files(dev);
1068 void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
1070 struct mlx5_cmd *cmd = &dev->cmd;
1073 for (i = 0; i < cmd->max_reg_cmds; i++)
1076 down(&cmd->pages_sem);
1078 flush_workqueue(cmd->wq);
1080 cmd->mode = CMD_MODE_EVENTS;
1082 up(&cmd->pages_sem);
1083 for (i = 0; i < cmd->max_reg_cmds; i++)
1087 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
1089 struct mlx5_cmd *cmd = &dev->cmd;
1092 for (i = 0; i < cmd->max_reg_cmds; i++)
1095 down(&cmd->pages_sem);
1097 flush_workqueue(cmd->wq);
1098 cmd->mode = CMD_MODE_POLLING;
1100 up(&cmd->pages_sem);
1101 for (i = 0; i < cmd->max_reg_cmds; i++)
1105 void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
1107 struct mlx5_cmd *cmd = &dev->cmd;
1108 struct mlx5_cmd_work_ent *ent;
1109 mlx5_cmd_cbk_t callback;
1114 for (i = 0; i < (1 << cmd->log_sz); i++) {
1115 if (test_bit(i, &vector)) {
1116 struct semaphore *sem;
1118 ent = cmd->ent_arr[i];
1119 if (ent->page_queue)
1120 sem = &cmd->pages_sem;
1123 ktime_get_ts(&ent->ts2);
1124 memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
1125 dump_command(dev, ent, 0);
1127 if (!cmd->checksum_disabled)
1128 ent->ret = verify_signature(ent);
1131 ent->status = ent->lay->status_own >> 1;
1132 mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
1133 ent->ret, deliv_status_to_str(ent->status), ent->status);
1135 free_ent(cmd, ent->idx);
1136 if (ent->callback) {
1137 callback = ent->callback;
1138 context = ent->context;
1141 callback(err, context);
1143 complete(&ent->done);
1149 EXPORT_SYMBOL(mlx5_cmd_comp_handler);
1151 static int status_to_err(u8 status)
1153 return status ? -1 : 0; /* TBD more meaningful codes */
1156 static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size)
1158 struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
1159 struct mlx5_cmd *cmd = &dev->cmd;
1160 struct cache_ent *ent = NULL;
1162 if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE)
1163 ent = &cmd->cache.large;
1164 else if (in_size > 16 && in_size <= MED_LIST_SIZE)
1165 ent = &cmd->cache.med;
1168 spin_lock(&ent->lock);
1169 if (!list_empty(&ent->head)) {
1170 msg = list_entry(ent->head.next, typeof(*msg), list);
1171 /* For cached lists, we must explicitly state what is
1175 list_del(&msg->list);
1177 spin_unlock(&ent->lock);
1181 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, in_size);
1186 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
1189 spin_lock(&msg->cache->lock);
1190 list_add_tail(&msg->list, &msg->cache->head);
1191 spin_unlock(&msg->cache->lock);
1193 mlx5_free_cmd_msg(dev, msg);
1197 static int is_manage_pages(struct mlx5_inbox_hdr *in)
1199 return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES;
1202 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1205 struct mlx5_cmd_msg *inb;
1206 struct mlx5_cmd_msg *outb;
1211 pages_queue = is_manage_pages(in);
1213 inb = alloc_msg(dev, in_size);
1219 err = mlx5_copy_to_msg(inb, in, in_size);
1221 mlx5_core_warn(dev, "err %d\n", err);
1225 outb = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, out_size);
1227 err = PTR_ERR(outb);
1231 err = mlx5_cmd_invoke(dev, inb, outb, NULL, NULL, pages_queue, &status);
1235 mlx5_core_dbg(dev, "err %d, status %d\n", err, status);
1237 err = status_to_err(status);
1241 err = mlx5_copy_from_msg(out, outb, out_size);
1244 mlx5_free_cmd_msg(dev, outb);
1250 EXPORT_SYMBOL(mlx5_cmd_exec);
1252 static void destroy_msg_cache(struct mlx5_core_dev *dev)
1254 struct mlx5_cmd *cmd = &dev->cmd;
1255 struct mlx5_cmd_msg *msg;
1256 struct mlx5_cmd_msg *n;
1258 list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) {
1259 list_del(&msg->list);
1260 mlx5_free_cmd_msg(dev, msg);
1263 list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) {
1264 list_del(&msg->list);
1265 mlx5_free_cmd_msg(dev, msg);
1269 static int create_msg_cache(struct mlx5_core_dev *dev)
1271 struct mlx5_cmd *cmd = &dev->cmd;
1272 struct mlx5_cmd_msg *msg;
1276 spin_lock_init(&cmd->cache.large.lock);
1277 INIT_LIST_HEAD(&cmd->cache.large.head);
1278 spin_lock_init(&cmd->cache.med.lock);
1279 INIT_LIST_HEAD(&cmd->cache.med.head);
1281 for (i = 0; i < NUM_LONG_LISTS; i++) {
1282 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE);
1287 msg->cache = &cmd->cache.large;
1288 list_add_tail(&msg->list, &cmd->cache.large.head);
1291 for (i = 0; i < NUM_MED_LISTS; i++) {
1292 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE);
1297 msg->cache = &cmd->cache.med;
1298 list_add_tail(&msg->list, &cmd->cache.med.head);
1304 destroy_msg_cache(dev);
1308 int mlx5_cmd_init(struct mlx5_core_dev *dev)
1310 int size = sizeof(struct mlx5_cmd_prot_block);
1311 int align = roundup_pow_of_two(size);
1312 struct mlx5_cmd *cmd = &dev->cmd;
1318 cmd_if_rev = cmdif_rev(dev);
1319 if (cmd_if_rev != CMD_IF_REV) {
1320 dev_err(&dev->pdev->dev,
1321 "Driver cmdif rev(%d) differs from firmware's(%d)\n",
1322 CMD_IF_REV, cmd_if_rev);
1326 cmd->pool = pci_pool_create("mlx5_cmd", dev->pdev, size, align, 0);
1330 cmd->cmd_buf = (void *)__get_free_pages(GFP_ATOMIC, 0);
1331 if (!cmd->cmd_buf) {
1335 cmd->dma = dma_map_single(&dev->pdev->dev, cmd->cmd_buf, PAGE_SIZE,
1337 if (dma_mapping_error(&dev->pdev->dev, cmd->dma)) {
1342 cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
1343 cmd->log_sz = cmd_l >> 4 & 0xf;
1344 cmd->log_stride = cmd_l & 0xf;
1345 if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
1346 dev_err(&dev->pdev->dev, "firmware reports too many outstanding commands %d\n",
1352 if (cmd->log_sz + cmd->log_stride > PAGE_SHIFT) {
1353 dev_err(&dev->pdev->dev, "command queue size overflow\n");
1358 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
1359 cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
1361 cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
1362 if (cmd->cmdif_rev > CMD_IF_REV) {
1363 dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
1364 CMD_IF_REV, cmd->cmdif_rev);
1369 spin_lock_init(&cmd->alloc_lock);
1370 spin_lock_init(&cmd->token_lock);
1371 for (i = 0; i < ARRAY_SIZE(cmd->stats); i++)
1372 spin_lock_init(&cmd->stats[i].lock);
1374 sema_init(&cmd->sem, cmd->max_reg_cmds);
1375 sema_init(&cmd->pages_sem, 1);
1377 cmd_h = (u32)((u64)(cmd->dma) >> 32);
1378 cmd_l = (u32)(cmd->dma);
1379 if (cmd_l & 0xfff) {
1380 dev_err(&dev->pdev->dev, "invalid command queue address\n");
1385 iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
1386 iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
1388 /* Make sure firmware sees the complete address before we proceed */
1391 mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
1393 cmd->mode = CMD_MODE_POLLING;
1395 err = create_msg_cache(dev);
1397 dev_err(&dev->pdev->dev, "failed to create command cache\n");
1402 cmd->wq = create_singlethread_workqueue(cmd->wq_name);
1404 dev_err(&dev->pdev->dev, "failed to create command workqueue\n");
1409 err = create_debugfs_files(dev);
1418 destroy_workqueue(cmd->wq);
1421 destroy_msg_cache(dev);
1424 dma_unmap_single(&dev->pdev->dev, cmd->dma, PAGE_SIZE,
1427 free_pages((unsigned long)cmd->cmd_buf, 0);
1430 pci_pool_destroy(cmd->pool);
1434 EXPORT_SYMBOL(mlx5_cmd_init);
1436 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
1438 struct mlx5_cmd *cmd = &dev->cmd;
1440 clean_debug_files(dev);
1441 destroy_workqueue(cmd->wq);
1442 destroy_msg_cache(dev);
1443 dma_unmap_single(&dev->pdev->dev, cmd->dma, PAGE_SIZE,
1445 free_pages((unsigned long)cmd->cmd_buf, 0);
1446 pci_pool_destroy(cmd->pool);
1448 EXPORT_SYMBOL(mlx5_cmd_cleanup);
1450 static const char *cmd_status_str(u8 status)
1453 case MLX5_CMD_STAT_OK:
1455 case MLX5_CMD_STAT_INT_ERR:
1456 return "internal error";
1457 case MLX5_CMD_STAT_BAD_OP_ERR:
1458 return "bad operation";
1459 case MLX5_CMD_STAT_BAD_PARAM_ERR:
1460 return "bad parameter";
1461 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
1462 return "bad system state";
1463 case MLX5_CMD_STAT_BAD_RES_ERR:
1464 return "bad resource";
1465 case MLX5_CMD_STAT_RES_BUSY:
1466 return "resource busy";
1467 case MLX5_CMD_STAT_LIM_ERR:
1468 return "limits exceeded";
1469 case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
1470 return "bad resource state";
1471 case MLX5_CMD_STAT_IX_ERR:
1473 case MLX5_CMD_STAT_NO_RES_ERR:
1474 return "no resources";
1475 case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
1476 return "bad input length";
1477 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
1478 return "bad output length";
1479 case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
1480 return "bad QP state";
1481 case MLX5_CMD_STAT_BAD_PKT_ERR:
1482 return "bad packet (discarded)";
1483 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
1484 return "bad size too many outstanding CQEs";
1486 return "unknown status";
1490 int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr)
1495 pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
1496 cmd_status_str(hdr->status), hdr->status,
1497 be32_to_cpu(hdr->syndrome));
1499 switch (hdr->status) {
1500 case MLX5_CMD_STAT_OK: return 0;
1501 case MLX5_CMD_STAT_INT_ERR: return -EIO;
1502 case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL;
1503 case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL;
1504 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
1505 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
1506 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
1507 case MLX5_CMD_STAT_LIM_ERR: return -EINVAL;
1508 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
1509 case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
1510 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN;
1511 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO;
1512 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO;
1513 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL;
1514 case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL;
1515 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL;
1516 default: return -EIO;