2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
48 #define MLX4_MAC_VALID (1ull << 63)
49 #define MLX4_MAC_MASK 0x7fffffffffffffffULL
53 struct list_head list;
59 struct list_head list;
73 struct list_head list;
75 enum mlx4_protocol prot;
76 enum mlx4_steer_type steer;
80 RES_QP_BUSY = RES_ANY_BUSY,
82 /* QP number was allocated */
85 /* ICM memory for QP context was mapped */
88 /* QP is in hw ownership */
92 static inline const char *qp_states_str(enum res_qp_states state)
95 case RES_QP_BUSY: return "RES_QP_BUSY";
96 case RES_QP_RESERVED: return "RES_QP_RESERVED";
97 case RES_QP_MAPPED: return "RES_QP_MAPPED";
98 case RES_QP_HW: return "RES_QP_HW";
99 default: return "Unknown";
104 struct res_common com;
109 struct list_head mcg_list;
114 enum res_mtt_states {
115 RES_MTT_BUSY = RES_ANY_BUSY,
119 static inline const char *mtt_states_str(enum res_mtt_states state)
122 case RES_MTT_BUSY: return "RES_MTT_BUSY";
123 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
124 default: return "Unknown";
129 struct res_common com;
134 enum res_mpt_states {
135 RES_MPT_BUSY = RES_ANY_BUSY,
142 struct res_common com;
148 RES_EQ_BUSY = RES_ANY_BUSY,
154 struct res_common com;
159 RES_CQ_BUSY = RES_ANY_BUSY,
165 struct res_common com;
170 enum res_srq_states {
171 RES_SRQ_BUSY = RES_ANY_BUSY,
176 static inline const char *srq_states_str(enum res_srq_states state)
179 case RES_SRQ_BUSY: return "RES_SRQ_BUSY";
180 case RES_SRQ_ALLOCATED: return "RES_SRQ_ALLOCATED";
181 case RES_SRQ_HW: return "RES_SRQ_HW";
182 default: return "Unknown";
187 struct res_common com;
193 enum res_counter_states {
194 RES_COUNTER_BUSY = RES_ANY_BUSY,
195 RES_COUNTER_ALLOCATED,
198 static inline const char *counter_states_str(enum res_counter_states state)
201 case RES_COUNTER_BUSY: return "RES_COUNTER_BUSY";
202 case RES_COUNTER_ALLOCATED: return "RES_COUNTER_ALLOCATED";
203 default: return "Unknown";
208 struct res_common com;
213 static const char *ResourceType(enum mlx4_resource rt)
216 case RES_QP: return "RES_QP";
217 case RES_CQ: return "RES_CQ";
218 case RES_SRQ: return "RES_SRQ";
219 case RES_MPT: return "RES_MPT";
220 case RES_MTT: return "RES_MTT";
221 case RES_MAC: return "RES_MAC";
222 case RES_EQ: return "RES_EQ";
223 case RES_COUNTER: return "RES_COUNTER";
224 default: return "Unknown resource type !!!";
228 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
230 struct mlx4_priv *priv = mlx4_priv(dev);
234 priv->mfunc.master.res_tracker.slave_list =
235 kzalloc(dev->num_slaves * sizeof(struct slave_list),
237 if (!priv->mfunc.master.res_tracker.slave_list)
240 for (i = 0 ; i < dev->num_slaves; i++) {
241 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
242 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
243 slave_list[i].res_list[t]);
244 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
247 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
249 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
250 INIT_RADIX_TREE(&priv->mfunc.master.res_tracker.res_tree[i],
251 GFP_ATOMIC|__GFP_NOWARN);
253 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
257 void mlx4_free_resource_tracker(struct mlx4_dev *dev)
259 struct mlx4_priv *priv = mlx4_priv(dev);
262 if (priv->mfunc.master.res_tracker.slave_list) {
263 for (i = 0 ; i < dev->num_slaves; i++)
264 mlx4_delete_all_resources_for_slave(dev, i);
266 kfree(priv->mfunc.master.res_tracker.slave_list);
270 static void update_ud_gid(struct mlx4_dev *dev,
271 struct mlx4_qp_context *qp_ctx, u8 slave)
273 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
275 if (MLX4_QP_ST_UD == ts)
276 qp_ctx->pri_path.mgid_index = 0x80 | slave;
278 mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
279 slave, qp_ctx->pri_path.mgid_index);
282 static int mpt_mask(struct mlx4_dev *dev)
284 return dev->caps.num_mpts - 1;
287 static void *find_res(struct mlx4_dev *dev, int res_id,
288 enum mlx4_resource type)
290 struct mlx4_priv *priv = mlx4_priv(dev);
292 return radix_tree_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
296 static int get_res(struct mlx4_dev *dev, int slave, int res_id,
297 enum mlx4_resource type,
300 struct res_common *r;
303 spin_lock_irq(mlx4_tlock(dev));
304 r = find_res(dev, res_id, type);
310 if (r->state == RES_ANY_BUSY) {
315 if (r->owner != slave) {
320 r->from_state = r->state;
321 r->state = RES_ANY_BUSY;
322 mlx4_dbg(dev, "res %s id 0x%x to busy\n",
323 ResourceType(type), r->res_id);
326 *((struct res_common **)res) = r;
329 spin_unlock_irq(mlx4_tlock(dev));
333 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
334 enum mlx4_resource type,
335 int res_id, int *slave)
338 struct res_common *r;
344 spin_lock(mlx4_tlock(dev));
346 r = find_res(dev, id, type);
351 spin_unlock(mlx4_tlock(dev));
356 static void put_res(struct mlx4_dev *dev, int slave, int res_id,
357 enum mlx4_resource type)
359 struct res_common *r;
361 spin_lock_irq(mlx4_tlock(dev));
362 r = find_res(dev, res_id, type);
364 r->state = r->from_state;
365 spin_unlock_irq(mlx4_tlock(dev));
368 static struct res_common *alloc_qp_tr(int id)
372 ret = kzalloc(sizeof *ret, GFP_KERNEL);
376 ret->com.res_id = id;
377 ret->com.state = RES_QP_RESERVED;
379 INIT_LIST_HEAD(&ret->mcg_list);
380 spin_lock_init(&ret->mcg_spl);
385 static struct res_common *alloc_mtt_tr(int id, int order)
389 ret = kzalloc(sizeof *ret, GFP_KERNEL);
393 ret->com.res_id = id;
395 ret->com.state = RES_MTT_ALLOCATED;
396 atomic_set(&ret->ref_count, 0);
401 static struct res_common *alloc_mpt_tr(int id, int key)
405 ret = kzalloc(sizeof *ret, GFP_KERNEL);
409 ret->com.res_id = id;
410 ret->com.state = RES_MPT_RESERVED;
416 static struct res_common *alloc_eq_tr(int id)
420 ret = kzalloc(sizeof *ret, GFP_KERNEL);
424 ret->com.res_id = id;
425 ret->com.state = RES_EQ_RESERVED;
430 static struct res_common *alloc_cq_tr(int id)
434 ret = kzalloc(sizeof *ret, GFP_KERNEL);
438 ret->com.res_id = id;
439 ret->com.state = RES_CQ_ALLOCATED;
440 atomic_set(&ret->ref_count, 0);
445 static struct res_common *alloc_srq_tr(int id)
449 ret = kzalloc(sizeof *ret, GFP_KERNEL);
453 ret->com.res_id = id;
454 ret->com.state = RES_SRQ_ALLOCATED;
455 atomic_set(&ret->ref_count, 0);
460 static struct res_common *alloc_counter_tr(int id)
462 struct res_counter *ret;
464 ret = kzalloc(sizeof *ret, GFP_KERNEL);
468 ret->com.res_id = id;
469 ret->com.state = RES_COUNTER_ALLOCATED;
474 static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
477 struct res_common *ret;
481 ret = alloc_qp_tr(id);
484 ret = alloc_mpt_tr(id, extra);
487 ret = alloc_mtt_tr(id, extra);
490 ret = alloc_eq_tr(id);
493 ret = alloc_cq_tr(id);
496 ret = alloc_srq_tr(id);
499 printk(KERN_ERR "implementation missing\n");
502 ret = alloc_counter_tr(id);
514 static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
515 enum mlx4_resource type, int extra)
519 struct mlx4_priv *priv = mlx4_priv(dev);
520 struct res_common **res_arr;
521 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
522 struct radix_tree_root *root = &tracker->res_tree[type];
524 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
528 for (i = 0; i < count; ++i) {
529 res_arr[i] = alloc_tr(base + i, type, slave, extra);
531 for (--i; i >= 0; --i)
539 spin_lock_irq(mlx4_tlock(dev));
540 for (i = 0; i < count; ++i) {
541 if (find_res(dev, base + i, type)) {
545 err = radix_tree_insert(root, base + i, res_arr[i]);
548 list_add_tail(&res_arr[i]->list,
549 &tracker->slave_list[slave].res_list[type]);
551 spin_unlock_irq(mlx4_tlock(dev));
557 for (--i; i >= base; --i)
558 radix_tree_delete(&tracker->res_tree[type], i);
560 spin_unlock_irq(mlx4_tlock(dev));
562 for (i = 0; i < count; ++i)
570 static int remove_qp_ok(struct res_qp *res)
572 if (res->com.state == RES_QP_BUSY)
574 else if (res->com.state != RES_QP_RESERVED)
580 static int remove_mtt_ok(struct res_mtt *res, int order)
582 if (res->com.state == RES_MTT_BUSY ||
583 atomic_read(&res->ref_count)) {
584 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
586 mtt_states_str(res->com.state),
587 atomic_read(&res->ref_count));
589 } else if (res->com.state != RES_MTT_ALLOCATED)
591 else if (res->order != order)
597 static int remove_mpt_ok(struct res_mpt *res)
599 if (res->com.state == RES_MPT_BUSY)
601 else if (res->com.state != RES_MPT_RESERVED)
607 static int remove_eq_ok(struct res_eq *res)
609 if (res->com.state == RES_MPT_BUSY)
611 else if (res->com.state != RES_MPT_RESERVED)
617 static int remove_counter_ok(struct res_counter *res)
619 if (res->com.state == RES_COUNTER_BUSY)
621 else if (res->com.state != RES_COUNTER_ALLOCATED)
627 static int remove_cq_ok(struct res_cq *res)
629 if (res->com.state == RES_CQ_BUSY)
631 else if (res->com.state != RES_CQ_ALLOCATED)
637 static int remove_srq_ok(struct res_srq *res)
639 if (res->com.state == RES_SRQ_BUSY)
641 else if (res->com.state != RES_SRQ_ALLOCATED)
647 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
651 return remove_qp_ok((struct res_qp *)res);
653 return remove_cq_ok((struct res_cq *)res);
655 return remove_srq_ok((struct res_srq *)res);
657 return remove_mpt_ok((struct res_mpt *)res);
659 return remove_mtt_ok((struct res_mtt *)res, extra);
663 return remove_eq_ok((struct res_eq *)res);
665 return remove_counter_ok((struct res_counter *)res);
671 static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
672 enum mlx4_resource type, int extra)
676 struct mlx4_priv *priv = mlx4_priv(dev);
677 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
678 struct res_common *r;
680 spin_lock_irq(mlx4_tlock(dev));
681 for (i = base; i < base + count; ++i) {
682 r = radix_tree_lookup(&tracker->res_tree[type], i);
687 if (r->owner != slave) {
691 err = remove_ok(r, type, extra);
696 for (i = base; i < base + count; ++i) {
697 r = radix_tree_lookup(&tracker->res_tree[type], i);
698 radix_tree_delete(&tracker->res_tree[type], i);
705 spin_unlock_irq(mlx4_tlock(dev));
710 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
711 enum res_qp_states state, struct res_qp **qp,
714 struct mlx4_priv *priv = mlx4_priv(dev);
715 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
719 spin_lock_irq(mlx4_tlock(dev));
720 r = radix_tree_lookup(&tracker->res_tree[RES_QP], qpn);
723 else if (r->com.owner != slave)
728 mlx4_dbg(dev, "%s: failed RES_QP, 0x%x\n",
729 __func__, r->com.res_id);
733 case RES_QP_RESERVED:
734 if (r->com.state == RES_QP_MAPPED && !alloc)
737 mlx4_dbg(dev, "failed RES_QP, 0x%x\n", r->com.res_id);
742 if ((r->com.state == RES_QP_RESERVED && alloc) ||
743 r->com.state == RES_QP_HW)
746 mlx4_dbg(dev, "failed RES_QP, 0x%x\n",
754 if (r->com.state != RES_QP_MAPPED)
762 r->com.from_state = r->com.state;
763 r->com.to_state = state;
764 r->com.state = RES_QP_BUSY;
766 *qp = (struct res_qp *)r;
770 spin_unlock_irq(mlx4_tlock(dev));
775 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
776 enum res_mpt_states state, struct res_mpt **mpt)
778 struct mlx4_priv *priv = mlx4_priv(dev);
779 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
783 spin_lock_irq(mlx4_tlock(dev));
784 r = radix_tree_lookup(&tracker->res_tree[RES_MPT], index);
787 else if (r->com.owner != slave)
795 case RES_MPT_RESERVED:
796 if (r->com.state != RES_MPT_MAPPED)
801 if (r->com.state != RES_MPT_RESERVED &&
802 r->com.state != RES_MPT_HW)
807 if (r->com.state != RES_MPT_MAPPED)
815 r->com.from_state = r->com.state;
816 r->com.to_state = state;
817 r->com.state = RES_MPT_BUSY;
819 *mpt = (struct res_mpt *)r;
823 spin_unlock_irq(mlx4_tlock(dev));
828 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
829 enum res_eq_states state, struct res_eq **eq)
831 struct mlx4_priv *priv = mlx4_priv(dev);
832 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
836 spin_lock_irq(mlx4_tlock(dev));
837 r = radix_tree_lookup(&tracker->res_tree[RES_EQ], index);
840 else if (r->com.owner != slave)
848 case RES_EQ_RESERVED:
849 if (r->com.state != RES_EQ_HW)
854 if (r->com.state != RES_EQ_RESERVED)
863 r->com.from_state = r->com.state;
864 r->com.to_state = state;
865 r->com.state = RES_EQ_BUSY;
871 spin_unlock_irq(mlx4_tlock(dev));
876 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
877 enum res_cq_states state, struct res_cq **cq)
879 struct mlx4_priv *priv = mlx4_priv(dev);
880 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
884 spin_lock_irq(mlx4_tlock(dev));
885 r = radix_tree_lookup(&tracker->res_tree[RES_CQ], cqn);
888 else if (r->com.owner != slave)
896 case RES_CQ_ALLOCATED:
897 if (r->com.state != RES_CQ_HW)
899 else if (atomic_read(&r->ref_count))
906 if (r->com.state != RES_CQ_ALLOCATED)
917 r->com.from_state = r->com.state;
918 r->com.to_state = state;
919 r->com.state = RES_CQ_BUSY;
925 spin_unlock_irq(mlx4_tlock(dev));
930 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
931 enum res_cq_states state, struct res_srq **srq)
933 struct mlx4_priv *priv = mlx4_priv(dev);
934 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
938 spin_lock_irq(mlx4_tlock(dev));
939 r = radix_tree_lookup(&tracker->res_tree[RES_SRQ], index);
942 else if (r->com.owner != slave)
950 case RES_SRQ_ALLOCATED:
951 if (r->com.state != RES_SRQ_HW)
953 else if (atomic_read(&r->ref_count))
958 if (r->com.state != RES_SRQ_ALLOCATED)
967 r->com.from_state = r->com.state;
968 r->com.to_state = state;
969 r->com.state = RES_SRQ_BUSY;
975 spin_unlock_irq(mlx4_tlock(dev));
980 static void res_abort_move(struct mlx4_dev *dev, int slave,
981 enum mlx4_resource type, int id)
983 struct mlx4_priv *priv = mlx4_priv(dev);
984 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
985 struct res_common *r;
987 spin_lock_irq(mlx4_tlock(dev));
988 r = radix_tree_lookup(&tracker->res_tree[type], id);
989 if (r && (r->owner == slave))
990 r->state = r->from_state;
991 spin_unlock_irq(mlx4_tlock(dev));
994 static void res_end_move(struct mlx4_dev *dev, int slave,
995 enum mlx4_resource type, int id)
997 struct mlx4_priv *priv = mlx4_priv(dev);
998 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
999 struct res_common *r;
1001 spin_lock_irq(mlx4_tlock(dev));
1002 r = radix_tree_lookup(&tracker->res_tree[type], id);
1003 if (r && (r->owner == slave))
1004 r->state = r->to_state;
1005 spin_unlock_irq(mlx4_tlock(dev));
1008 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1010 return mlx4_is_qp_reserved(dev, qpn);
1013 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1014 u64 in_param, u64 *out_param)
1023 case RES_OP_RESERVE:
1024 count = get_param_l(&in_param);
1025 align = get_param_h(&in_param);
1026 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1030 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1032 __mlx4_qp_release_range(dev, base, count);
1035 set_param_l(out_param, base);
1037 case RES_OP_MAP_ICM:
1038 qpn = get_param_l(&in_param) & 0x7fffff;
1039 if (valid_reserved(dev, slave, qpn)) {
1040 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1045 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1050 if (!valid_reserved(dev, slave, qpn)) {
1051 err = __mlx4_qp_alloc_icm(dev, qpn);
1053 res_abort_move(dev, slave, RES_QP, qpn);
1058 res_end_move(dev, slave, RES_QP, qpn);
1068 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1069 u64 in_param, u64 *out_param)
1075 if (op != RES_OP_RESERVE_AND_MAP)
1078 order = get_param_l(&in_param);
1079 base = __mlx4_alloc_mtt_range(dev, order);
1083 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1085 __mlx4_free_mtt_range(dev, base, order);
1087 set_param_l(out_param, base);
1092 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1093 u64 in_param, u64 *out_param)
1098 struct res_mpt *mpt;
1101 case RES_OP_RESERVE:
1102 index = __mlx4_mr_reserve(dev);
1105 id = index & mpt_mask(dev);
1107 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1109 __mlx4_mr_release(dev, index);
1112 set_param_l(out_param, index);
1114 case RES_OP_MAP_ICM:
1115 index = get_param_l(&in_param);
1116 id = index & mpt_mask(dev);
1117 err = mr_res_start_move_to(dev, slave, id,
1118 RES_MPT_MAPPED, &mpt);
1122 err = __mlx4_mr_alloc_icm(dev, mpt->key);
1124 res_abort_move(dev, slave, RES_MPT, id);
1128 res_end_move(dev, slave, RES_MPT, id);
1134 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1135 u64 in_param, u64 *out_param)
1141 case RES_OP_RESERVE_AND_MAP:
1142 err = __mlx4_cq_alloc_icm(dev, &cqn);
1146 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1148 __mlx4_cq_free_icm(dev, cqn);
1152 set_param_l(out_param, cqn);
1162 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1163 u64 in_param, u64 *out_param)
1169 case RES_OP_RESERVE_AND_MAP:
1170 err = __mlx4_srq_alloc_icm(dev, &srqn);
1174 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1176 __mlx4_srq_free_icm(dev, srqn);
1180 set_param_l(out_param, srqn);
1190 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1192 struct mlx4_priv *priv = mlx4_priv(dev);
1193 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1194 struct mac_res *res;
1196 res = kzalloc(sizeof *res, GFP_KERNEL);
1200 res->port = (u8) port;
1201 list_add_tail(&res->list,
1202 &tracker->slave_list[slave].res_list[RES_MAC]);
1206 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1209 struct mlx4_priv *priv = mlx4_priv(dev);
1210 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1211 struct list_head *mac_list =
1212 &tracker->slave_list[slave].res_list[RES_MAC];
1213 struct mac_res *res, *tmp;
1215 list_for_each_entry_safe(res, tmp, mac_list, list) {
1216 if (res->mac == mac && res->port == (u8) port) {
1217 list_del(&res->list);
1224 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1226 struct mlx4_priv *priv = mlx4_priv(dev);
1227 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1228 struct list_head *mac_list =
1229 &tracker->slave_list[slave].res_list[RES_MAC];
1230 struct mac_res *res, *tmp;
1232 list_for_each_entry_safe(res, tmp, mac_list, list) {
1233 list_del(&res->list);
1234 __mlx4_unregister_mac(dev, res->port, res->mac);
1239 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1240 u64 in_param, u64 *out_param)
1246 if (op != RES_OP_RESERVE_AND_MAP)
1249 port = get_param_l(out_param);
1252 err = __mlx4_register_mac(dev, port, mac);
1254 set_param_l(out_param, err);
1259 err = mac_add_to_slave(dev, slave, mac, port);
1261 __mlx4_unregister_mac(dev, port, mac);
1266 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1267 u64 in_param, u64 *out_param)
1272 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1273 struct mlx4_vhcr *vhcr,
1274 struct mlx4_cmd_mailbox *inbox,
1275 struct mlx4_cmd_mailbox *outbox,
1276 struct mlx4_cmd_info *cmd)
1279 int alop = vhcr->op_modifier;
1281 switch (vhcr->in_modifier) {
1283 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1284 vhcr->in_param, &vhcr->out_param);
1288 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1289 vhcr->in_param, &vhcr->out_param);
1293 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1294 vhcr->in_param, &vhcr->out_param);
1298 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1299 vhcr->in_param, &vhcr->out_param);
1303 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1304 vhcr->in_param, &vhcr->out_param);
1308 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1309 vhcr->in_param, &vhcr->out_param);
1313 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1314 vhcr->in_param, &vhcr->out_param);
1325 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1334 case RES_OP_RESERVE:
1335 base = get_param_l(&in_param) & 0x7fffff;
1336 count = get_param_h(&in_param);
1337 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1340 __mlx4_qp_release_range(dev, base, count);
1342 case RES_OP_MAP_ICM:
1343 qpn = get_param_l(&in_param) & 0x7fffff;
1344 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1349 if (!valid_reserved(dev, slave, qpn))
1350 __mlx4_qp_free_icm(dev, qpn);
1352 res_end_move(dev, slave, RES_QP, qpn);
1354 if (valid_reserved(dev, slave, qpn))
1355 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1364 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1365 u64 in_param, u64 *out_param)
1371 if (op != RES_OP_RESERVE_AND_MAP)
1374 base = get_param_l(&in_param);
1375 order = get_param_h(&in_param);
1376 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1378 __mlx4_free_mtt_range(dev, base, order);
1382 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1388 struct res_mpt *mpt;
1391 case RES_OP_RESERVE:
1392 index = get_param_l(&in_param);
1393 id = index & mpt_mask(dev);
1394 err = get_res(dev, slave, id, RES_MPT, &mpt);
1398 put_res(dev, slave, id, RES_MPT);
1400 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1403 __mlx4_mr_release(dev, index);
1405 case RES_OP_MAP_ICM:
1406 index = get_param_l(&in_param);
1407 id = index & mpt_mask(dev);
1408 err = mr_res_start_move_to(dev, slave, id,
1409 RES_MPT_RESERVED, &mpt);
1413 __mlx4_mr_free_icm(dev, mpt->key);
1414 res_end_move(dev, slave, RES_MPT, id);
1424 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1425 u64 in_param, u64 *out_param)
1431 case RES_OP_RESERVE_AND_MAP:
1432 cqn = get_param_l(&in_param);
1433 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1437 __mlx4_cq_free_icm(dev, cqn);
1448 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1449 u64 in_param, u64 *out_param)
1455 case RES_OP_RESERVE_AND_MAP:
1456 srqn = get_param_l(&in_param);
1457 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1461 __mlx4_srq_free_icm(dev, srqn);
1472 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1473 u64 in_param, u64 *out_param)
1479 case RES_OP_RESERVE_AND_MAP:
1480 port = get_param_l(out_param);
1481 mac_del_from_slave(dev, slave, in_param, port);
1482 __mlx4_unregister_mac(dev, port, in_param);
1493 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1494 u64 in_param, u64 *out_param)
1499 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1500 struct mlx4_vhcr *vhcr,
1501 struct mlx4_cmd_mailbox *inbox,
1502 struct mlx4_cmd_mailbox *outbox,
1503 struct mlx4_cmd_info *cmd)
1506 int alop = vhcr->op_modifier;
1508 switch (vhcr->in_modifier) {
1510 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1515 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1516 vhcr->in_param, &vhcr->out_param);
1520 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1525 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1526 vhcr->in_param, &vhcr->out_param);
1530 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1531 vhcr->in_param, &vhcr->out_param);
1535 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1536 vhcr->in_param, &vhcr->out_param);
1540 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
1541 vhcr->in_param, &vhcr->out_param);
1550 /* ugly but other choices are uglier */
1551 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1553 return (be32_to_cpu(mpt->flags) >> 9) & 1;
1556 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
1558 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
1561 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1563 return be32_to_cpu(mpt->mtt_sz);
1566 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
1568 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
1571 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
1573 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
1576 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
1578 int page_shift = (qpc->log_page_size & 0x3f) + 12;
1579 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
1580 int log_sq_sride = qpc->sq_size_stride & 7;
1581 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
1582 int log_rq_stride = qpc->rq_size_stride & 7;
1583 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
1584 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
1585 int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
1590 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
1592 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
1593 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
1594 total_mem = sq_size + rq_size;
1596 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
1602 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
1603 int size, struct res_mtt *mtt)
1605 int res_start = mtt->com.res_id;
1606 int res_size = (1 << mtt->order);
1608 if (start < res_start || start + size > res_start + res_size)
1613 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1614 struct mlx4_vhcr *vhcr,
1615 struct mlx4_cmd_mailbox *inbox,
1616 struct mlx4_cmd_mailbox *outbox,
1617 struct mlx4_cmd_info *cmd)
1620 int index = vhcr->in_modifier;
1621 struct res_mtt *mtt;
1622 struct res_mpt *mpt;
1623 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
1627 id = index & mpt_mask(dev);
1628 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
1632 phys = mr_phys_mpt(inbox->buf);
1634 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
1638 err = check_mtt_range(dev, slave, mtt_base,
1639 mr_get_mtt_size(inbox->buf), mtt);
1646 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1651 atomic_inc(&mtt->ref_count);
1652 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1655 res_end_move(dev, slave, RES_MPT, id);
1660 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1662 res_abort_move(dev, slave, RES_MPT, id);
1667 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1668 struct mlx4_vhcr *vhcr,
1669 struct mlx4_cmd_mailbox *inbox,
1670 struct mlx4_cmd_mailbox *outbox,
1671 struct mlx4_cmd_info *cmd)
1674 int index = vhcr->in_modifier;
1675 struct res_mpt *mpt;
1678 id = index & mpt_mask(dev);
1679 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
1683 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1688 atomic_dec(&mpt->mtt->ref_count);
1690 res_end_move(dev, slave, RES_MPT, id);
1694 res_abort_move(dev, slave, RES_MPT, id);
1699 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
1700 struct mlx4_vhcr *vhcr,
1701 struct mlx4_cmd_mailbox *inbox,
1702 struct mlx4_cmd_mailbox *outbox,
1703 struct mlx4_cmd_info *cmd)
1706 int index = vhcr->in_modifier;
1707 struct res_mpt *mpt;
1710 id = index & mpt_mask(dev);
1711 err = get_res(dev, slave, id, RES_MPT, &mpt);
1715 if (mpt->com.from_state != RES_MPT_HW) {
1720 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1723 put_res(dev, slave, id, RES_MPT);
1727 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
1729 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
1732 static int qp_get_scqn(struct mlx4_qp_context *qpc)
1734 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
1737 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
1739 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
1742 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
1743 struct mlx4_vhcr *vhcr,
1744 struct mlx4_cmd_mailbox *inbox,
1745 struct mlx4_cmd_mailbox *outbox,
1746 struct mlx4_cmd_info *cmd)
1749 int qpn = vhcr->in_modifier & 0x7fffff;
1750 struct res_mtt *mtt;
1752 struct mlx4_qp_context *qpc = inbox->buf + 8;
1753 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
1754 int mtt_size = qp_get_mtt_size(qpc);
1757 int rcqn = qp_get_rcqn(qpc);
1758 int scqn = qp_get_scqn(qpc);
1759 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
1760 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
1761 struct res_srq *srq;
1762 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
1764 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
1767 qp->local_qpn = local_qpn;
1769 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
1773 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
1777 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
1782 err = get_res(dev, slave, scqn, RES_CQ, &scq);
1789 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
1794 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1797 atomic_inc(&mtt->ref_count);
1799 atomic_inc(&rcq->ref_count);
1801 atomic_inc(&scq->ref_count);
1805 put_res(dev, slave, scqn, RES_CQ);
1808 atomic_inc(&srq->ref_count);
1809 put_res(dev, slave, srqn, RES_SRQ);
1812 put_res(dev, slave, rcqn, RES_CQ);
1813 put_res(dev, slave, mtt_base, RES_MTT);
1814 res_end_move(dev, slave, RES_QP, qpn);
1820 put_res(dev, slave, srqn, RES_SRQ);
1823 put_res(dev, slave, scqn, RES_CQ);
1825 put_res(dev, slave, rcqn, RES_CQ);
1827 put_res(dev, slave, mtt_base, RES_MTT);
1829 res_abort_move(dev, slave, RES_QP, qpn);
1834 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
1836 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
1839 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
1841 int log_eq_size = eqc->log_eq_size & 0x1f;
1842 int page_shift = (eqc->log_page_size & 0x3f) + 12;
1844 if (log_eq_size + 5 < page_shift)
1847 return 1 << (log_eq_size + 5 - page_shift);
1850 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
1852 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
1855 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
1857 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
1858 int page_shift = (cqc->log_page_size & 0x3f) + 12;
1860 if (log_cq_size + 5 < page_shift)
1863 return 1 << (log_cq_size + 5 - page_shift);
1866 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
1867 struct mlx4_vhcr *vhcr,
1868 struct mlx4_cmd_mailbox *inbox,
1869 struct mlx4_cmd_mailbox *outbox,
1870 struct mlx4_cmd_info *cmd)
1873 int eqn = vhcr->in_modifier;
1874 int res_id = (slave << 8) | eqn;
1875 struct mlx4_eq_context *eqc = inbox->buf;
1876 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
1877 int mtt_size = eq_get_mtt_size(eqc);
1879 struct res_mtt *mtt;
1881 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
1884 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
1888 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
1892 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
1896 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1900 atomic_inc(&mtt->ref_count);
1902 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1903 res_end_move(dev, slave, RES_EQ, res_id);
1907 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1909 res_abort_move(dev, slave, RES_EQ, res_id);
1911 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
1915 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
1916 int len, struct res_mtt **res)
1918 struct mlx4_priv *priv = mlx4_priv(dev);
1919 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1920 struct res_mtt *mtt;
1923 spin_lock_irq(mlx4_tlock(dev));
1924 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
1926 if (!check_mtt_range(dev, slave, start, len, mtt)) {
1928 mtt->com.from_state = mtt->com.state;
1929 mtt->com.state = RES_MTT_BUSY;
1934 spin_unlock_irq(mlx4_tlock(dev));
1939 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
1940 struct mlx4_vhcr *vhcr,
1941 struct mlx4_cmd_mailbox *inbox,
1942 struct mlx4_cmd_mailbox *outbox,
1943 struct mlx4_cmd_info *cmd)
1945 struct mlx4_mtt mtt;
1946 __be64 *page_list = inbox->buf;
1947 u64 *pg_list = (u64 *)page_list;
1949 struct res_mtt *rmtt = NULL;
1950 int start = be64_to_cpu(page_list[0]);
1951 int npages = vhcr->in_modifier;
1954 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
1958 /* Call the SW implementation of write_mtt:
1959 * - Prepare a dummy mtt struct
1960 * - Translate inbox contents to simple addresses in host endianess */
1961 mtt.offset = 0; /* TBD this is broken but I don't handle it since
1962 we don't really use it */
1965 for (i = 0; i < npages; ++i)
1966 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
1968 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
1969 ((u64 *)page_list + 2));
1972 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
1977 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
1978 struct mlx4_vhcr *vhcr,
1979 struct mlx4_cmd_mailbox *inbox,
1980 struct mlx4_cmd_mailbox *outbox,
1981 struct mlx4_cmd_info *cmd)
1983 int eqn = vhcr->in_modifier;
1984 int res_id = eqn | (slave << 8);
1988 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
1992 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
1996 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2000 atomic_dec(&eq->mtt->ref_count);
2001 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2002 res_end_move(dev, slave, RES_EQ, res_id);
2003 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2008 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2010 res_abort_move(dev, slave, RES_EQ, res_id);
2015 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2017 struct mlx4_priv *priv = mlx4_priv(dev);
2018 struct mlx4_slave_event_eq_info *event_eq;
2019 struct mlx4_cmd_mailbox *mailbox;
2020 u32 in_modifier = 0;
2025 if (!priv->mfunc.master.slave_state)
2028 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
2030 /* Create the event only if the slave is registered */
2031 if (event_eq->eqn < 0)
2034 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2035 res_id = (slave << 8) | event_eq->eqn;
2036 err = get_res(dev, slave, res_id, RES_EQ, &req);
2040 if (req->com.from_state != RES_EQ_HW) {
2045 mailbox = mlx4_alloc_cmd_mailbox(dev);
2046 if (IS_ERR(mailbox)) {
2047 err = PTR_ERR(mailbox);
2051 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2053 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2056 memcpy(mailbox->buf, (u8 *) eqe, 28);
2058 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2060 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2061 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2064 put_res(dev, slave, res_id, RES_EQ);
2065 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2066 mlx4_free_cmd_mailbox(dev, mailbox);
2070 put_res(dev, slave, res_id, RES_EQ);
2073 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2077 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2078 struct mlx4_vhcr *vhcr,
2079 struct mlx4_cmd_mailbox *inbox,
2080 struct mlx4_cmd_mailbox *outbox,
2081 struct mlx4_cmd_info *cmd)
2083 int eqn = vhcr->in_modifier;
2084 int res_id = eqn | (slave << 8);
2088 err = get_res(dev, slave, res_id, RES_EQ, &eq);
2092 if (eq->com.from_state != RES_EQ_HW) {
2097 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2100 put_res(dev, slave, res_id, RES_EQ);
2104 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2105 struct mlx4_vhcr *vhcr,
2106 struct mlx4_cmd_mailbox *inbox,
2107 struct mlx4_cmd_mailbox *outbox,
2108 struct mlx4_cmd_info *cmd)
2111 int cqn = vhcr->in_modifier;
2112 struct mlx4_cq_context *cqc = inbox->buf;
2113 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2115 struct res_mtt *mtt;
2117 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2120 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2123 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2126 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2129 atomic_inc(&mtt->ref_count);
2131 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2132 res_end_move(dev, slave, RES_CQ, cqn);
2136 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2138 res_abort_move(dev, slave, RES_CQ, cqn);
2142 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2143 struct mlx4_vhcr *vhcr,
2144 struct mlx4_cmd_mailbox *inbox,
2145 struct mlx4_cmd_mailbox *outbox,
2146 struct mlx4_cmd_info *cmd)
2149 int cqn = vhcr->in_modifier;
2152 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2155 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2158 atomic_dec(&cq->mtt->ref_count);
2159 res_end_move(dev, slave, RES_CQ, cqn);
2163 res_abort_move(dev, slave, RES_CQ, cqn);
2167 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2168 struct mlx4_vhcr *vhcr,
2169 struct mlx4_cmd_mailbox *inbox,
2170 struct mlx4_cmd_mailbox *outbox,
2171 struct mlx4_cmd_info *cmd)
2173 int cqn = vhcr->in_modifier;
2177 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2181 if (cq->com.from_state != RES_CQ_HW)
2184 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2186 put_res(dev, slave, cqn, RES_CQ);
2191 static int handle_resize(struct mlx4_dev *dev, int slave,
2192 struct mlx4_vhcr *vhcr,
2193 struct mlx4_cmd_mailbox *inbox,
2194 struct mlx4_cmd_mailbox *outbox,
2195 struct mlx4_cmd_info *cmd,
2199 struct res_mtt *orig_mtt;
2200 struct res_mtt *mtt;
2201 struct mlx4_cq_context *cqc = inbox->buf;
2202 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2204 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2208 if (orig_mtt != cq->mtt) {
2213 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2217 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2220 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2223 atomic_dec(&orig_mtt->ref_count);
2224 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2225 atomic_inc(&mtt->ref_count);
2227 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2231 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2233 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2239 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2240 struct mlx4_vhcr *vhcr,
2241 struct mlx4_cmd_mailbox *inbox,
2242 struct mlx4_cmd_mailbox *outbox,
2243 struct mlx4_cmd_info *cmd)
2245 int cqn = vhcr->in_modifier;
2249 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2253 if (cq->com.from_state != RES_CQ_HW)
2256 if (vhcr->op_modifier == 0) {
2257 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
2261 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2263 put_res(dev, slave, cqn, RES_CQ);
2268 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2270 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2271 int log_rq_stride = srqc->logstride & 7;
2272 int page_shift = (srqc->log_page_size & 0x3f) + 12;
2274 if (log_srq_size + log_rq_stride + 4 < page_shift)
2277 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2280 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2281 struct mlx4_vhcr *vhcr,
2282 struct mlx4_cmd_mailbox *inbox,
2283 struct mlx4_cmd_mailbox *outbox,
2284 struct mlx4_cmd_info *cmd)
2287 int srqn = vhcr->in_modifier;
2288 struct res_mtt *mtt;
2289 struct res_srq *srq;
2290 struct mlx4_srq_context *srqc = inbox->buf;
2291 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
2293 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2296 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2299 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2302 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2307 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2311 atomic_inc(&mtt->ref_count);
2313 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2314 res_end_move(dev, slave, RES_SRQ, srqn);
2318 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2320 res_abort_move(dev, slave, RES_SRQ, srqn);
2325 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2326 struct mlx4_vhcr *vhcr,
2327 struct mlx4_cmd_mailbox *inbox,
2328 struct mlx4_cmd_mailbox *outbox,
2329 struct mlx4_cmd_info *cmd)
2332 int srqn = vhcr->in_modifier;
2333 struct res_srq *srq;
2335 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2338 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2341 atomic_dec(&srq->mtt->ref_count);
2343 atomic_dec(&srq->cq->ref_count);
2344 res_end_move(dev, slave, RES_SRQ, srqn);
2349 res_abort_move(dev, slave, RES_SRQ, srqn);
2354 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2355 struct mlx4_vhcr *vhcr,
2356 struct mlx4_cmd_mailbox *inbox,
2357 struct mlx4_cmd_mailbox *outbox,
2358 struct mlx4_cmd_info *cmd)
2361 int srqn = vhcr->in_modifier;
2362 struct res_srq *srq;
2364 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2367 if (srq->com.from_state != RES_SRQ_HW) {
2371 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2373 put_res(dev, slave, srqn, RES_SRQ);
2377 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2378 struct mlx4_vhcr *vhcr,
2379 struct mlx4_cmd_mailbox *inbox,
2380 struct mlx4_cmd_mailbox *outbox,
2381 struct mlx4_cmd_info *cmd)
2384 int srqn = vhcr->in_modifier;
2385 struct res_srq *srq;
2387 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2391 if (srq->com.from_state != RES_SRQ_HW) {
2396 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2398 put_res(dev, slave, srqn, RES_SRQ);
2402 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2403 struct mlx4_vhcr *vhcr,
2404 struct mlx4_cmd_mailbox *inbox,
2405 struct mlx4_cmd_mailbox *outbox,
2406 struct mlx4_cmd_info *cmd)
2409 int qpn = vhcr->in_modifier & 0x7fffff;
2412 err = get_res(dev, slave, qpn, RES_QP, &qp);
2415 if (qp->com.from_state != RES_QP_HW) {
2420 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2422 put_res(dev, slave, qpn, RES_QP);
2426 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2427 struct mlx4_vhcr *vhcr,
2428 struct mlx4_cmd_mailbox *inbox,
2429 struct mlx4_cmd_mailbox *outbox,
2430 struct mlx4_cmd_info *cmd)
2432 struct mlx4_qp_context *qpc = inbox->buf + 8;
2434 update_ud_gid(dev, qpc, (u8)slave);
2436 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2439 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
2440 struct mlx4_vhcr *vhcr,
2441 struct mlx4_cmd_mailbox *inbox,
2442 struct mlx4_cmd_mailbox *outbox,
2443 struct mlx4_cmd_info *cmd)
2446 int qpn = vhcr->in_modifier & 0x7fffff;
2449 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
2452 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2456 atomic_dec(&qp->mtt->ref_count);
2457 atomic_dec(&qp->rcq->ref_count);
2458 atomic_dec(&qp->scq->ref_count);
2460 atomic_dec(&qp->srq->ref_count);
2461 res_end_move(dev, slave, RES_QP, qpn);
2465 res_abort_move(dev, slave, RES_QP, qpn);
2470 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
2471 struct res_qp *rqp, u8 *gid)
2473 struct res_gid *res;
2475 list_for_each_entry(res, &rqp->mcg_list, list) {
2476 if (!memcmp(res->gid, gid, 16))
2482 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2483 u8 *gid, enum mlx4_protocol prot,
2484 enum mlx4_steer_type steer)
2486 struct res_gid *res;
2489 res = kzalloc(sizeof *res, GFP_KERNEL);
2493 spin_lock_irq(&rqp->mcg_spl);
2494 if (find_gid(dev, slave, rqp, gid)) {
2498 memcpy(res->gid, gid, 16);
2501 list_add_tail(&res->list, &rqp->mcg_list);
2504 spin_unlock_irq(&rqp->mcg_spl);
2509 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2510 u8 *gid, enum mlx4_protocol prot,
2511 enum mlx4_steer_type steer)
2513 struct res_gid *res;
2516 spin_lock_irq(&rqp->mcg_spl);
2517 res = find_gid(dev, slave, rqp, gid);
2518 if (!res || res->prot != prot || res->steer != steer)
2521 list_del(&res->list);
2525 spin_unlock_irq(&rqp->mcg_spl);
2530 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2531 struct mlx4_vhcr *vhcr,
2532 struct mlx4_cmd_mailbox *inbox,
2533 struct mlx4_cmd_mailbox *outbox,
2534 struct mlx4_cmd_info *cmd)
2536 struct mlx4_qp qp; /* dummy for calling attach/detach */
2537 u8 *gid = inbox->buf;
2538 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
2542 int attach = vhcr->op_modifier;
2543 int block_loopback = vhcr->in_modifier >> 31;
2544 u8 steer_type_mask = 2;
2545 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
2547 qpn = vhcr->in_modifier & 0xffffff;
2548 err = get_res(dev, slave, qpn, RES_QP, &rqp);
2554 err = add_mcg_res(dev, slave, rqp, gid, prot, type);
2558 err = mlx4_qp_attach_common(dev, &qp, gid,
2559 block_loopback, prot, type);
2563 err = rem_mcg_res(dev, slave, rqp, gid, prot, type);
2566 err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
2569 put_res(dev, slave, qpn, RES_QP);
2573 /* ignore error return below, already in error */
2574 err1 = rem_mcg_res(dev, slave, rqp, gid, prot, type);
2576 put_res(dev, slave, qpn, RES_QP);
2582 BUSY_MAX_RETRIES = 10
2585 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
2586 struct mlx4_vhcr *vhcr,
2587 struct mlx4_cmd_mailbox *inbox,
2588 struct mlx4_cmd_mailbox *outbox,
2589 struct mlx4_cmd_info *cmd)
2592 int index = vhcr->in_modifier & 0xffff;
2594 err = get_res(dev, slave, index, RES_COUNTER, NULL);
2598 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2599 put_res(dev, slave, index, RES_COUNTER);
2603 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
2605 struct res_gid *rgid;
2606 struct res_gid *tmp;
2608 struct mlx4_qp qp; /* dummy for calling attach/detach */
2610 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
2611 qp.qpn = rqp->local_qpn;
2612 err = mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
2614 list_del(&rgid->list);
2619 static int _move_all_busy(struct mlx4_dev *dev, int slave,
2620 enum mlx4_resource type, int print)
2622 struct mlx4_priv *priv = mlx4_priv(dev);
2623 struct mlx4_resource_tracker *tracker =
2624 &priv->mfunc.master.res_tracker;
2625 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
2626 struct res_common *r;
2627 struct res_common *tmp;
2631 spin_lock_irq(mlx4_tlock(dev));
2632 list_for_each_entry_safe(r, tmp, rlist, list) {
2633 if (r->owner == slave) {
2635 if (r->state == RES_ANY_BUSY) {
2638 "%s id 0x%x is busy\n",
2643 r->from_state = r->state;
2644 r->state = RES_ANY_BUSY;
2650 spin_unlock_irq(mlx4_tlock(dev));
2655 static int move_all_busy(struct mlx4_dev *dev, int slave,
2656 enum mlx4_resource type)
2658 unsigned long begin;
2663 busy = _move_all_busy(dev, slave, type, 0);
2664 if (time_after(jiffies, begin + 5 * HZ))
2671 busy = _move_all_busy(dev, slave, type, 1);
2675 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
2677 struct mlx4_priv *priv = mlx4_priv(dev);
2678 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2679 struct list_head *qp_list =
2680 &tracker->slave_list[slave].res_list[RES_QP];
2688 err = move_all_busy(dev, slave, RES_QP);
2690 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
2691 "for slave %d\n", slave);
2693 spin_lock_irq(mlx4_tlock(dev));
2694 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
2695 spin_unlock_irq(mlx4_tlock(dev));
2696 if (qp->com.owner == slave) {
2697 qpn = qp->com.res_id;
2698 detach_qp(dev, slave, qp);
2699 state = qp->com.from_state;
2700 while (state != 0) {
2702 case RES_QP_RESERVED:
2703 spin_lock_irq(mlx4_tlock(dev));
2704 radix_tree_delete(&tracker->res_tree[RES_QP],
2706 list_del(&qp->com.list);
2707 spin_unlock_irq(mlx4_tlock(dev));
2712 if (!valid_reserved(dev, slave, qpn))
2713 __mlx4_qp_free_icm(dev, qpn);
2714 state = RES_QP_RESERVED;
2718 err = mlx4_cmd(dev, in_param,
2721 MLX4_CMD_TIME_CLASS_A,
2724 mlx4_dbg(dev, "rem_slave_qps: failed"
2725 " to move slave %d qpn %d to"
2728 atomic_dec(&qp->rcq->ref_count);
2729 atomic_dec(&qp->scq->ref_count);
2730 atomic_dec(&qp->mtt->ref_count);
2732 atomic_dec(&qp->srq->ref_count);
2733 state = RES_QP_MAPPED;
2740 spin_lock_irq(mlx4_tlock(dev));
2742 spin_unlock_irq(mlx4_tlock(dev));
2745 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
2747 struct mlx4_priv *priv = mlx4_priv(dev);
2748 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2749 struct list_head *srq_list =
2750 &tracker->slave_list[slave].res_list[RES_SRQ];
2751 struct res_srq *srq;
2752 struct res_srq *tmp;
2759 err = move_all_busy(dev, slave, RES_SRQ);
2761 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
2762 "busy for slave %d\n", slave);
2764 spin_lock_irq(mlx4_tlock(dev));
2765 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
2766 spin_unlock_irq(mlx4_tlock(dev));
2767 if (srq->com.owner == slave) {
2768 srqn = srq->com.res_id;
2769 state = srq->com.from_state;
2770 while (state != 0) {
2772 case RES_SRQ_ALLOCATED:
2773 __mlx4_srq_free_icm(dev, srqn);
2774 spin_lock_irq(mlx4_tlock(dev));
2775 radix_tree_delete(&tracker->res_tree[RES_SRQ],
2777 list_del(&srq->com.list);
2778 spin_unlock_irq(mlx4_tlock(dev));
2785 err = mlx4_cmd(dev, in_param, srqn, 1,
2787 MLX4_CMD_TIME_CLASS_A,
2790 mlx4_dbg(dev, "rem_slave_srqs: failed"
2791 " to move slave %d srq %d to"
2795 atomic_dec(&srq->mtt->ref_count);
2797 atomic_dec(&srq->cq->ref_count);
2798 state = RES_SRQ_ALLOCATED;
2806 spin_lock_irq(mlx4_tlock(dev));
2808 spin_unlock_irq(mlx4_tlock(dev));
2811 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
2813 struct mlx4_priv *priv = mlx4_priv(dev);
2814 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2815 struct list_head *cq_list =
2816 &tracker->slave_list[slave].res_list[RES_CQ];
2825 err = move_all_busy(dev, slave, RES_CQ);
2827 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
2828 "busy for slave %d\n", slave);
2830 spin_lock_irq(mlx4_tlock(dev));
2831 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
2832 spin_unlock_irq(mlx4_tlock(dev));
2833 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
2834 cqn = cq->com.res_id;
2835 state = cq->com.from_state;
2836 while (state != 0) {
2838 case RES_CQ_ALLOCATED:
2839 __mlx4_cq_free_icm(dev, cqn);
2840 spin_lock_irq(mlx4_tlock(dev));
2841 radix_tree_delete(&tracker->res_tree[RES_CQ],
2843 list_del(&cq->com.list);
2844 spin_unlock_irq(mlx4_tlock(dev));
2851 err = mlx4_cmd(dev, in_param, cqn, 1,
2853 MLX4_CMD_TIME_CLASS_A,
2856 mlx4_dbg(dev, "rem_slave_cqs: failed"
2857 " to move slave %d cq %d to"
2860 atomic_dec(&cq->mtt->ref_count);
2861 state = RES_CQ_ALLOCATED;
2869 spin_lock_irq(mlx4_tlock(dev));
2871 spin_unlock_irq(mlx4_tlock(dev));
2874 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
2876 struct mlx4_priv *priv = mlx4_priv(dev);
2877 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2878 struct list_head *mpt_list =
2879 &tracker->slave_list[slave].res_list[RES_MPT];
2880 struct res_mpt *mpt;
2881 struct res_mpt *tmp;
2888 err = move_all_busy(dev, slave, RES_MPT);
2890 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
2891 "busy for slave %d\n", slave);
2893 spin_lock_irq(mlx4_tlock(dev));
2894 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
2895 spin_unlock_irq(mlx4_tlock(dev));
2896 if (mpt->com.owner == slave) {
2897 mptn = mpt->com.res_id;
2898 state = mpt->com.from_state;
2899 while (state != 0) {
2901 case RES_MPT_RESERVED:
2902 __mlx4_mr_release(dev, mpt->key);
2903 spin_lock_irq(mlx4_tlock(dev));
2904 radix_tree_delete(&tracker->res_tree[RES_MPT],
2906 list_del(&mpt->com.list);
2907 spin_unlock_irq(mlx4_tlock(dev));
2912 case RES_MPT_MAPPED:
2913 __mlx4_mr_free_icm(dev, mpt->key);
2914 state = RES_MPT_RESERVED;
2919 err = mlx4_cmd(dev, in_param, mptn, 0,
2921 MLX4_CMD_TIME_CLASS_A,
2924 mlx4_dbg(dev, "rem_slave_mrs: failed"
2925 " to move slave %d mpt %d to"
2929 atomic_dec(&mpt->mtt->ref_count);
2930 state = RES_MPT_MAPPED;
2937 spin_lock_irq(mlx4_tlock(dev));
2939 spin_unlock_irq(mlx4_tlock(dev));
2942 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
2944 struct mlx4_priv *priv = mlx4_priv(dev);
2945 struct mlx4_resource_tracker *tracker =
2946 &priv->mfunc.master.res_tracker;
2947 struct list_head *mtt_list =
2948 &tracker->slave_list[slave].res_list[RES_MTT];
2949 struct res_mtt *mtt;
2950 struct res_mtt *tmp;
2956 err = move_all_busy(dev, slave, RES_MTT);
2958 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
2959 "busy for slave %d\n", slave);
2961 spin_lock_irq(mlx4_tlock(dev));
2962 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
2963 spin_unlock_irq(mlx4_tlock(dev));
2964 if (mtt->com.owner == slave) {
2965 base = mtt->com.res_id;
2966 state = mtt->com.from_state;
2967 while (state != 0) {
2969 case RES_MTT_ALLOCATED:
2970 __mlx4_free_mtt_range(dev, base,
2972 spin_lock_irq(mlx4_tlock(dev));
2973 radix_tree_delete(&tracker->res_tree[RES_MTT],
2975 list_del(&mtt->com.list);
2976 spin_unlock_irq(mlx4_tlock(dev));
2986 spin_lock_irq(mlx4_tlock(dev));
2988 spin_unlock_irq(mlx4_tlock(dev));
2991 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
2993 struct mlx4_priv *priv = mlx4_priv(dev);
2994 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2995 struct list_head *eq_list =
2996 &tracker->slave_list[slave].res_list[RES_EQ];
3003 struct mlx4_cmd_mailbox *mailbox;
3005 err = move_all_busy(dev, slave, RES_EQ);
3007 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
3008 "busy for slave %d\n", slave);
3010 spin_lock_irq(mlx4_tlock(dev));
3011 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
3012 spin_unlock_irq(mlx4_tlock(dev));
3013 if (eq->com.owner == slave) {
3014 eqn = eq->com.res_id;
3015 state = eq->com.from_state;
3016 while (state != 0) {
3018 case RES_EQ_RESERVED:
3019 spin_lock_irq(mlx4_tlock(dev));
3020 radix_tree_delete(&tracker->res_tree[RES_EQ],
3022 list_del(&eq->com.list);
3023 spin_unlock_irq(mlx4_tlock(dev));
3029 mailbox = mlx4_alloc_cmd_mailbox(dev);
3030 if (IS_ERR(mailbox)) {
3034 err = mlx4_cmd_box(dev, slave, 0,
3037 MLX4_CMD_TIME_CLASS_A,
3039 mlx4_dbg(dev, "rem_slave_eqs: failed"
3040 " to move slave %d eqs %d to"
3041 " SW ownership\n", slave, eqn);
3042 mlx4_free_cmd_mailbox(dev, mailbox);
3044 atomic_dec(&eq->mtt->ref_count);
3045 state = RES_EQ_RESERVED;
3054 spin_lock_irq(mlx4_tlock(dev));
3056 spin_unlock_irq(mlx4_tlock(dev));
3059 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3061 struct mlx4_priv *priv = mlx4_priv(dev);
3063 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3065 rem_slave_macs(dev, slave);
3066 rem_slave_qps(dev, slave);
3067 rem_slave_srqs(dev, slave);
3068 rem_slave_cqs(dev, slave);
3069 rem_slave_mrs(dev, slave);
3070 rem_slave_eqs(dev, slave);
3071 rem_slave_mtts(dev, slave);
3072 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);