2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
48 #define MLX4_MAC_VALID (1ull << 63)
49 #define MLX4_MAC_MASK 0x7fffffffffffffffULL
53 struct list_head list;
59 struct list_head list;
73 struct list_head list;
75 enum mlx4_protocol prot;
76 enum mlx4_steer_type steer;
80 RES_QP_BUSY = RES_ANY_BUSY,
82 /* QP number was allocated */
85 /* ICM memory for QP context was mapped */
88 /* QP is in hw ownership */
93 struct res_common com;
98 struct list_head mcg_list;
103 enum res_mtt_states {
104 RES_MTT_BUSY = RES_ANY_BUSY,
108 static inline const char *mtt_states_str(enum res_mtt_states state)
111 case RES_MTT_BUSY: return "RES_MTT_BUSY";
112 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
113 default: return "Unknown";
118 struct res_common com;
123 enum res_mpt_states {
124 RES_MPT_BUSY = RES_ANY_BUSY,
131 struct res_common com;
137 RES_EQ_BUSY = RES_ANY_BUSY,
143 struct res_common com;
148 RES_CQ_BUSY = RES_ANY_BUSY,
154 struct res_common com;
159 enum res_srq_states {
160 RES_SRQ_BUSY = RES_ANY_BUSY,
166 struct res_common com;
172 enum res_counter_states {
173 RES_COUNTER_BUSY = RES_ANY_BUSY,
174 RES_COUNTER_ALLOCATED,
178 struct res_common com;
183 static const char *ResourceType(enum mlx4_resource rt)
186 case RES_QP: return "RES_QP";
187 case RES_CQ: return "RES_CQ";
188 case RES_SRQ: return "RES_SRQ";
189 case RES_MPT: return "RES_MPT";
190 case RES_MTT: return "RES_MTT";
191 case RES_MAC: return "RES_MAC";
192 case RES_EQ: return "RES_EQ";
193 case RES_COUNTER: return "RES_COUNTER";
194 default: return "Unknown resource type !!!";
198 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
200 struct mlx4_priv *priv = mlx4_priv(dev);
204 priv->mfunc.master.res_tracker.slave_list =
205 kzalloc(dev->num_slaves * sizeof(struct slave_list),
207 if (!priv->mfunc.master.res_tracker.slave_list)
210 for (i = 0 ; i < dev->num_slaves; i++) {
211 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
212 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
213 slave_list[i].res_list[t]);
214 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
217 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
219 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
220 INIT_RADIX_TREE(&priv->mfunc.master.res_tracker.res_tree[i],
221 GFP_ATOMIC|__GFP_NOWARN);
223 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
227 void mlx4_free_resource_tracker(struct mlx4_dev *dev)
229 struct mlx4_priv *priv = mlx4_priv(dev);
232 if (priv->mfunc.master.res_tracker.slave_list) {
233 for (i = 0 ; i < dev->num_slaves; i++)
234 mlx4_delete_all_resources_for_slave(dev, i);
236 kfree(priv->mfunc.master.res_tracker.slave_list);
240 static void update_ud_gid(struct mlx4_dev *dev,
241 struct mlx4_qp_context *qp_ctx, u8 slave)
243 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
245 if (MLX4_QP_ST_UD == ts)
246 qp_ctx->pri_path.mgid_index = 0x80 | slave;
248 mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
249 slave, qp_ctx->pri_path.mgid_index);
252 static int mpt_mask(struct mlx4_dev *dev)
254 return dev->caps.num_mpts - 1;
257 static void *find_res(struct mlx4_dev *dev, int res_id,
258 enum mlx4_resource type)
260 struct mlx4_priv *priv = mlx4_priv(dev);
262 return radix_tree_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
266 static int get_res(struct mlx4_dev *dev, int slave, int res_id,
267 enum mlx4_resource type,
270 struct res_common *r;
273 spin_lock_irq(mlx4_tlock(dev));
274 r = find_res(dev, res_id, type);
280 if (r->state == RES_ANY_BUSY) {
285 if (r->owner != slave) {
290 r->from_state = r->state;
291 r->state = RES_ANY_BUSY;
292 mlx4_dbg(dev, "res %s id 0x%x to busy\n",
293 ResourceType(type), r->res_id);
296 *((struct res_common **)res) = r;
299 spin_unlock_irq(mlx4_tlock(dev));
303 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
304 enum mlx4_resource type,
305 int res_id, int *slave)
308 struct res_common *r;
314 spin_lock(mlx4_tlock(dev));
316 r = find_res(dev, id, type);
321 spin_unlock(mlx4_tlock(dev));
326 static void put_res(struct mlx4_dev *dev, int slave, int res_id,
327 enum mlx4_resource type)
329 struct res_common *r;
331 spin_lock_irq(mlx4_tlock(dev));
332 r = find_res(dev, res_id, type);
334 r->state = r->from_state;
335 spin_unlock_irq(mlx4_tlock(dev));
338 static struct res_common *alloc_qp_tr(int id)
342 ret = kzalloc(sizeof *ret, GFP_KERNEL);
346 ret->com.res_id = id;
347 ret->com.state = RES_QP_RESERVED;
349 INIT_LIST_HEAD(&ret->mcg_list);
350 spin_lock_init(&ret->mcg_spl);
355 static struct res_common *alloc_mtt_tr(int id, int order)
359 ret = kzalloc(sizeof *ret, GFP_KERNEL);
363 ret->com.res_id = id;
365 ret->com.state = RES_MTT_ALLOCATED;
366 atomic_set(&ret->ref_count, 0);
371 static struct res_common *alloc_mpt_tr(int id, int key)
375 ret = kzalloc(sizeof *ret, GFP_KERNEL);
379 ret->com.res_id = id;
380 ret->com.state = RES_MPT_RESERVED;
386 static struct res_common *alloc_eq_tr(int id)
390 ret = kzalloc(sizeof *ret, GFP_KERNEL);
394 ret->com.res_id = id;
395 ret->com.state = RES_EQ_RESERVED;
400 static struct res_common *alloc_cq_tr(int id)
404 ret = kzalloc(sizeof *ret, GFP_KERNEL);
408 ret->com.res_id = id;
409 ret->com.state = RES_CQ_ALLOCATED;
410 atomic_set(&ret->ref_count, 0);
415 static struct res_common *alloc_srq_tr(int id)
419 ret = kzalloc(sizeof *ret, GFP_KERNEL);
423 ret->com.res_id = id;
424 ret->com.state = RES_SRQ_ALLOCATED;
425 atomic_set(&ret->ref_count, 0);
430 static struct res_common *alloc_counter_tr(int id)
432 struct res_counter *ret;
434 ret = kzalloc(sizeof *ret, GFP_KERNEL);
438 ret->com.res_id = id;
439 ret->com.state = RES_COUNTER_ALLOCATED;
444 static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
447 struct res_common *ret;
451 ret = alloc_qp_tr(id);
454 ret = alloc_mpt_tr(id, extra);
457 ret = alloc_mtt_tr(id, extra);
460 ret = alloc_eq_tr(id);
463 ret = alloc_cq_tr(id);
466 ret = alloc_srq_tr(id);
469 printk(KERN_ERR "implementation missing\n");
472 ret = alloc_counter_tr(id);
484 static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
485 enum mlx4_resource type, int extra)
489 struct mlx4_priv *priv = mlx4_priv(dev);
490 struct res_common **res_arr;
491 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
492 struct radix_tree_root *root = &tracker->res_tree[type];
494 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
498 for (i = 0; i < count; ++i) {
499 res_arr[i] = alloc_tr(base + i, type, slave, extra);
501 for (--i; i >= 0; --i)
509 spin_lock_irq(mlx4_tlock(dev));
510 for (i = 0; i < count; ++i) {
511 if (find_res(dev, base + i, type)) {
515 err = radix_tree_insert(root, base + i, res_arr[i]);
518 list_add_tail(&res_arr[i]->list,
519 &tracker->slave_list[slave].res_list[type]);
521 spin_unlock_irq(mlx4_tlock(dev));
527 for (--i; i >= base; --i)
528 radix_tree_delete(&tracker->res_tree[type], i);
530 spin_unlock_irq(mlx4_tlock(dev));
532 for (i = 0; i < count; ++i)
540 static int remove_qp_ok(struct res_qp *res)
542 if (res->com.state == RES_QP_BUSY)
544 else if (res->com.state != RES_QP_RESERVED)
550 static int remove_mtt_ok(struct res_mtt *res, int order)
552 if (res->com.state == RES_MTT_BUSY ||
553 atomic_read(&res->ref_count)) {
554 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
556 mtt_states_str(res->com.state),
557 atomic_read(&res->ref_count));
559 } else if (res->com.state != RES_MTT_ALLOCATED)
561 else if (res->order != order)
567 static int remove_mpt_ok(struct res_mpt *res)
569 if (res->com.state == RES_MPT_BUSY)
571 else if (res->com.state != RES_MPT_RESERVED)
577 static int remove_eq_ok(struct res_eq *res)
579 if (res->com.state == RES_MPT_BUSY)
581 else if (res->com.state != RES_MPT_RESERVED)
587 static int remove_counter_ok(struct res_counter *res)
589 if (res->com.state == RES_COUNTER_BUSY)
591 else if (res->com.state != RES_COUNTER_ALLOCATED)
597 static int remove_cq_ok(struct res_cq *res)
599 if (res->com.state == RES_CQ_BUSY)
601 else if (res->com.state != RES_CQ_ALLOCATED)
607 static int remove_srq_ok(struct res_srq *res)
609 if (res->com.state == RES_SRQ_BUSY)
611 else if (res->com.state != RES_SRQ_ALLOCATED)
617 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
621 return remove_qp_ok((struct res_qp *)res);
623 return remove_cq_ok((struct res_cq *)res);
625 return remove_srq_ok((struct res_srq *)res);
627 return remove_mpt_ok((struct res_mpt *)res);
629 return remove_mtt_ok((struct res_mtt *)res, extra);
633 return remove_eq_ok((struct res_eq *)res);
635 return remove_counter_ok((struct res_counter *)res);
641 static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
642 enum mlx4_resource type, int extra)
646 struct mlx4_priv *priv = mlx4_priv(dev);
647 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
648 struct res_common *r;
650 spin_lock_irq(mlx4_tlock(dev));
651 for (i = base; i < base + count; ++i) {
652 r = radix_tree_lookup(&tracker->res_tree[type], i);
657 if (r->owner != slave) {
661 err = remove_ok(r, type, extra);
666 for (i = base; i < base + count; ++i) {
667 r = radix_tree_lookup(&tracker->res_tree[type], i);
668 radix_tree_delete(&tracker->res_tree[type], i);
675 spin_unlock_irq(mlx4_tlock(dev));
680 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
681 enum res_qp_states state, struct res_qp **qp,
684 struct mlx4_priv *priv = mlx4_priv(dev);
685 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
689 spin_lock_irq(mlx4_tlock(dev));
690 r = radix_tree_lookup(&tracker->res_tree[RES_QP], qpn);
693 else if (r->com.owner != slave)
698 mlx4_dbg(dev, "%s: failed RES_QP, 0x%x\n",
699 __func__, r->com.res_id);
703 case RES_QP_RESERVED:
704 if (r->com.state == RES_QP_MAPPED && !alloc)
707 mlx4_dbg(dev, "failed RES_QP, 0x%x\n", r->com.res_id);
712 if ((r->com.state == RES_QP_RESERVED && alloc) ||
713 r->com.state == RES_QP_HW)
716 mlx4_dbg(dev, "failed RES_QP, 0x%x\n",
724 if (r->com.state != RES_QP_MAPPED)
732 r->com.from_state = r->com.state;
733 r->com.to_state = state;
734 r->com.state = RES_QP_BUSY;
736 *qp = (struct res_qp *)r;
740 spin_unlock_irq(mlx4_tlock(dev));
745 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
746 enum res_mpt_states state, struct res_mpt **mpt)
748 struct mlx4_priv *priv = mlx4_priv(dev);
749 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
753 spin_lock_irq(mlx4_tlock(dev));
754 r = radix_tree_lookup(&tracker->res_tree[RES_MPT], index);
757 else if (r->com.owner != slave)
765 case RES_MPT_RESERVED:
766 if (r->com.state != RES_MPT_MAPPED)
771 if (r->com.state != RES_MPT_RESERVED &&
772 r->com.state != RES_MPT_HW)
777 if (r->com.state != RES_MPT_MAPPED)
785 r->com.from_state = r->com.state;
786 r->com.to_state = state;
787 r->com.state = RES_MPT_BUSY;
789 *mpt = (struct res_mpt *)r;
793 spin_unlock_irq(mlx4_tlock(dev));
798 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
799 enum res_eq_states state, struct res_eq **eq)
801 struct mlx4_priv *priv = mlx4_priv(dev);
802 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
806 spin_lock_irq(mlx4_tlock(dev));
807 r = radix_tree_lookup(&tracker->res_tree[RES_EQ], index);
810 else if (r->com.owner != slave)
818 case RES_EQ_RESERVED:
819 if (r->com.state != RES_EQ_HW)
824 if (r->com.state != RES_EQ_RESERVED)
833 r->com.from_state = r->com.state;
834 r->com.to_state = state;
835 r->com.state = RES_EQ_BUSY;
841 spin_unlock_irq(mlx4_tlock(dev));
846 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
847 enum res_cq_states state, struct res_cq **cq)
849 struct mlx4_priv *priv = mlx4_priv(dev);
850 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
854 spin_lock_irq(mlx4_tlock(dev));
855 r = radix_tree_lookup(&tracker->res_tree[RES_CQ], cqn);
858 else if (r->com.owner != slave)
866 case RES_CQ_ALLOCATED:
867 if (r->com.state != RES_CQ_HW)
869 else if (atomic_read(&r->ref_count))
876 if (r->com.state != RES_CQ_ALLOCATED)
887 r->com.from_state = r->com.state;
888 r->com.to_state = state;
889 r->com.state = RES_CQ_BUSY;
895 spin_unlock_irq(mlx4_tlock(dev));
900 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
901 enum res_cq_states state, struct res_srq **srq)
903 struct mlx4_priv *priv = mlx4_priv(dev);
904 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
908 spin_lock_irq(mlx4_tlock(dev));
909 r = radix_tree_lookup(&tracker->res_tree[RES_SRQ], index);
912 else if (r->com.owner != slave)
920 case RES_SRQ_ALLOCATED:
921 if (r->com.state != RES_SRQ_HW)
923 else if (atomic_read(&r->ref_count))
928 if (r->com.state != RES_SRQ_ALLOCATED)
937 r->com.from_state = r->com.state;
938 r->com.to_state = state;
939 r->com.state = RES_SRQ_BUSY;
945 spin_unlock_irq(mlx4_tlock(dev));
950 static void res_abort_move(struct mlx4_dev *dev, int slave,
951 enum mlx4_resource type, int id)
953 struct mlx4_priv *priv = mlx4_priv(dev);
954 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
955 struct res_common *r;
957 spin_lock_irq(mlx4_tlock(dev));
958 r = radix_tree_lookup(&tracker->res_tree[type], id);
959 if (r && (r->owner == slave))
960 r->state = r->from_state;
961 spin_unlock_irq(mlx4_tlock(dev));
964 static void res_end_move(struct mlx4_dev *dev, int slave,
965 enum mlx4_resource type, int id)
967 struct mlx4_priv *priv = mlx4_priv(dev);
968 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
969 struct res_common *r;
971 spin_lock_irq(mlx4_tlock(dev));
972 r = radix_tree_lookup(&tracker->res_tree[type], id);
973 if (r && (r->owner == slave))
974 r->state = r->to_state;
975 spin_unlock_irq(mlx4_tlock(dev));
978 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
980 return mlx4_is_qp_reserved(dev, qpn);
983 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
984 u64 in_param, u64 *out_param)
994 count = get_param_l(&in_param);
995 align = get_param_h(&in_param);
996 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1000 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1002 __mlx4_qp_release_range(dev, base, count);
1005 set_param_l(out_param, base);
1007 case RES_OP_MAP_ICM:
1008 qpn = get_param_l(&in_param) & 0x7fffff;
1009 if (valid_reserved(dev, slave, qpn)) {
1010 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1015 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1020 if (!valid_reserved(dev, slave, qpn)) {
1021 err = __mlx4_qp_alloc_icm(dev, qpn);
1023 res_abort_move(dev, slave, RES_QP, qpn);
1028 res_end_move(dev, slave, RES_QP, qpn);
1038 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1039 u64 in_param, u64 *out_param)
1045 if (op != RES_OP_RESERVE_AND_MAP)
1048 order = get_param_l(&in_param);
1049 base = __mlx4_alloc_mtt_range(dev, order);
1053 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1055 __mlx4_free_mtt_range(dev, base, order);
1057 set_param_l(out_param, base);
1062 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1063 u64 in_param, u64 *out_param)
1068 struct res_mpt *mpt;
1071 case RES_OP_RESERVE:
1072 index = __mlx4_mr_reserve(dev);
1075 id = index & mpt_mask(dev);
1077 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1079 __mlx4_mr_release(dev, index);
1082 set_param_l(out_param, index);
1084 case RES_OP_MAP_ICM:
1085 index = get_param_l(&in_param);
1086 id = index & mpt_mask(dev);
1087 err = mr_res_start_move_to(dev, slave, id,
1088 RES_MPT_MAPPED, &mpt);
1092 err = __mlx4_mr_alloc_icm(dev, mpt->key);
1094 res_abort_move(dev, slave, RES_MPT, id);
1098 res_end_move(dev, slave, RES_MPT, id);
1104 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1105 u64 in_param, u64 *out_param)
1111 case RES_OP_RESERVE_AND_MAP:
1112 err = __mlx4_cq_alloc_icm(dev, &cqn);
1116 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1118 __mlx4_cq_free_icm(dev, cqn);
1122 set_param_l(out_param, cqn);
1132 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1133 u64 in_param, u64 *out_param)
1139 case RES_OP_RESERVE_AND_MAP:
1140 err = __mlx4_srq_alloc_icm(dev, &srqn);
1144 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1146 __mlx4_srq_free_icm(dev, srqn);
1150 set_param_l(out_param, srqn);
1160 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1162 struct mlx4_priv *priv = mlx4_priv(dev);
1163 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1164 struct mac_res *res;
1166 res = kzalloc(sizeof *res, GFP_KERNEL);
1170 res->port = (u8) port;
1171 list_add_tail(&res->list,
1172 &tracker->slave_list[slave].res_list[RES_MAC]);
1176 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1179 struct mlx4_priv *priv = mlx4_priv(dev);
1180 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1181 struct list_head *mac_list =
1182 &tracker->slave_list[slave].res_list[RES_MAC];
1183 struct mac_res *res, *tmp;
1185 list_for_each_entry_safe(res, tmp, mac_list, list) {
1186 if (res->mac == mac && res->port == (u8) port) {
1187 list_del(&res->list);
1194 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1196 struct mlx4_priv *priv = mlx4_priv(dev);
1197 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1198 struct list_head *mac_list =
1199 &tracker->slave_list[slave].res_list[RES_MAC];
1200 struct mac_res *res, *tmp;
1202 list_for_each_entry_safe(res, tmp, mac_list, list) {
1203 list_del(&res->list);
1204 __mlx4_unregister_mac(dev, res->port, res->mac);
1209 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1210 u64 in_param, u64 *out_param)
1216 if (op != RES_OP_RESERVE_AND_MAP)
1219 port = get_param_l(out_param);
1222 err = __mlx4_register_mac(dev, port, mac);
1224 set_param_l(out_param, err);
1229 err = mac_add_to_slave(dev, slave, mac, port);
1231 __mlx4_unregister_mac(dev, port, mac);
1236 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1237 u64 in_param, u64 *out_param)
1242 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1243 struct mlx4_vhcr *vhcr,
1244 struct mlx4_cmd_mailbox *inbox,
1245 struct mlx4_cmd_mailbox *outbox,
1246 struct mlx4_cmd_info *cmd)
1249 int alop = vhcr->op_modifier;
1251 switch (vhcr->in_modifier) {
1253 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1254 vhcr->in_param, &vhcr->out_param);
1258 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1259 vhcr->in_param, &vhcr->out_param);
1263 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1264 vhcr->in_param, &vhcr->out_param);
1268 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1269 vhcr->in_param, &vhcr->out_param);
1273 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1274 vhcr->in_param, &vhcr->out_param);
1278 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1279 vhcr->in_param, &vhcr->out_param);
1283 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1284 vhcr->in_param, &vhcr->out_param);
1295 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1304 case RES_OP_RESERVE:
1305 base = get_param_l(&in_param) & 0x7fffff;
1306 count = get_param_h(&in_param);
1307 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1310 __mlx4_qp_release_range(dev, base, count);
1312 case RES_OP_MAP_ICM:
1313 qpn = get_param_l(&in_param) & 0x7fffff;
1314 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1319 if (!valid_reserved(dev, slave, qpn))
1320 __mlx4_qp_free_icm(dev, qpn);
1322 res_end_move(dev, slave, RES_QP, qpn);
1324 if (valid_reserved(dev, slave, qpn))
1325 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1334 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1335 u64 in_param, u64 *out_param)
1341 if (op != RES_OP_RESERVE_AND_MAP)
1344 base = get_param_l(&in_param);
1345 order = get_param_h(&in_param);
1346 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1348 __mlx4_free_mtt_range(dev, base, order);
1352 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1358 struct res_mpt *mpt;
1361 case RES_OP_RESERVE:
1362 index = get_param_l(&in_param);
1363 id = index & mpt_mask(dev);
1364 err = get_res(dev, slave, id, RES_MPT, &mpt);
1368 put_res(dev, slave, id, RES_MPT);
1370 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1373 __mlx4_mr_release(dev, index);
1375 case RES_OP_MAP_ICM:
1376 index = get_param_l(&in_param);
1377 id = index & mpt_mask(dev);
1378 err = mr_res_start_move_to(dev, slave, id,
1379 RES_MPT_RESERVED, &mpt);
1383 __mlx4_mr_free_icm(dev, mpt->key);
1384 res_end_move(dev, slave, RES_MPT, id);
1394 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1395 u64 in_param, u64 *out_param)
1401 case RES_OP_RESERVE_AND_MAP:
1402 cqn = get_param_l(&in_param);
1403 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1407 __mlx4_cq_free_icm(dev, cqn);
1418 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1419 u64 in_param, u64 *out_param)
1425 case RES_OP_RESERVE_AND_MAP:
1426 srqn = get_param_l(&in_param);
1427 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1431 __mlx4_srq_free_icm(dev, srqn);
1442 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1443 u64 in_param, u64 *out_param)
1449 case RES_OP_RESERVE_AND_MAP:
1450 port = get_param_l(out_param);
1451 mac_del_from_slave(dev, slave, in_param, port);
1452 __mlx4_unregister_mac(dev, port, in_param);
1463 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1464 u64 in_param, u64 *out_param)
1469 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1470 struct mlx4_vhcr *vhcr,
1471 struct mlx4_cmd_mailbox *inbox,
1472 struct mlx4_cmd_mailbox *outbox,
1473 struct mlx4_cmd_info *cmd)
1476 int alop = vhcr->op_modifier;
1478 switch (vhcr->in_modifier) {
1480 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1485 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1486 vhcr->in_param, &vhcr->out_param);
1490 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1495 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1496 vhcr->in_param, &vhcr->out_param);
1500 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1501 vhcr->in_param, &vhcr->out_param);
1505 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1506 vhcr->in_param, &vhcr->out_param);
1510 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
1511 vhcr->in_param, &vhcr->out_param);
1520 /* ugly but other choices are uglier */
1521 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1523 return (be32_to_cpu(mpt->flags) >> 9) & 1;
1526 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
1528 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
1531 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1533 return be32_to_cpu(mpt->mtt_sz);
1536 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
1538 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
1541 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
1543 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
1546 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
1548 int page_shift = (qpc->log_page_size & 0x3f) + 12;
1549 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
1550 int log_sq_sride = qpc->sq_size_stride & 7;
1551 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
1552 int log_rq_stride = qpc->rq_size_stride & 7;
1553 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
1554 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
1555 int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
1560 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
1562 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
1563 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
1564 total_mem = sq_size + rq_size;
1566 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
1572 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
1573 int size, struct res_mtt *mtt)
1575 int res_start = mtt->com.res_id;
1576 int res_size = (1 << mtt->order);
1578 if (start < res_start || start + size > res_start + res_size)
1583 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1584 struct mlx4_vhcr *vhcr,
1585 struct mlx4_cmd_mailbox *inbox,
1586 struct mlx4_cmd_mailbox *outbox,
1587 struct mlx4_cmd_info *cmd)
1590 int index = vhcr->in_modifier;
1591 struct res_mtt *mtt;
1592 struct res_mpt *mpt;
1593 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
1597 id = index & mpt_mask(dev);
1598 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
1602 phys = mr_phys_mpt(inbox->buf);
1604 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
1608 err = check_mtt_range(dev, slave, mtt_base,
1609 mr_get_mtt_size(inbox->buf), mtt);
1616 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1621 atomic_inc(&mtt->ref_count);
1622 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1625 res_end_move(dev, slave, RES_MPT, id);
1630 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1632 res_abort_move(dev, slave, RES_MPT, id);
1637 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1638 struct mlx4_vhcr *vhcr,
1639 struct mlx4_cmd_mailbox *inbox,
1640 struct mlx4_cmd_mailbox *outbox,
1641 struct mlx4_cmd_info *cmd)
1644 int index = vhcr->in_modifier;
1645 struct res_mpt *mpt;
1648 id = index & mpt_mask(dev);
1649 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
1653 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1658 atomic_dec(&mpt->mtt->ref_count);
1660 res_end_move(dev, slave, RES_MPT, id);
1664 res_abort_move(dev, slave, RES_MPT, id);
1669 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
1670 struct mlx4_vhcr *vhcr,
1671 struct mlx4_cmd_mailbox *inbox,
1672 struct mlx4_cmd_mailbox *outbox,
1673 struct mlx4_cmd_info *cmd)
1676 int index = vhcr->in_modifier;
1677 struct res_mpt *mpt;
1680 id = index & mpt_mask(dev);
1681 err = get_res(dev, slave, id, RES_MPT, &mpt);
1685 if (mpt->com.from_state != RES_MPT_HW) {
1690 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1693 put_res(dev, slave, id, RES_MPT);
1697 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
1699 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
1702 static int qp_get_scqn(struct mlx4_qp_context *qpc)
1704 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
1707 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
1709 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
1712 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
1713 struct mlx4_vhcr *vhcr,
1714 struct mlx4_cmd_mailbox *inbox,
1715 struct mlx4_cmd_mailbox *outbox,
1716 struct mlx4_cmd_info *cmd)
1719 int qpn = vhcr->in_modifier & 0x7fffff;
1720 struct res_mtt *mtt;
1722 struct mlx4_qp_context *qpc = inbox->buf + 8;
1723 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
1724 int mtt_size = qp_get_mtt_size(qpc);
1727 int rcqn = qp_get_rcqn(qpc);
1728 int scqn = qp_get_scqn(qpc);
1729 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
1730 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
1731 struct res_srq *srq;
1732 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
1734 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
1737 qp->local_qpn = local_qpn;
1739 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
1743 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
1747 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
1752 err = get_res(dev, slave, scqn, RES_CQ, &scq);
1759 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
1764 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1767 atomic_inc(&mtt->ref_count);
1769 atomic_inc(&rcq->ref_count);
1771 atomic_inc(&scq->ref_count);
1775 put_res(dev, slave, scqn, RES_CQ);
1778 atomic_inc(&srq->ref_count);
1779 put_res(dev, slave, srqn, RES_SRQ);
1782 put_res(dev, slave, rcqn, RES_CQ);
1783 put_res(dev, slave, mtt_base, RES_MTT);
1784 res_end_move(dev, slave, RES_QP, qpn);
1790 put_res(dev, slave, srqn, RES_SRQ);
1793 put_res(dev, slave, scqn, RES_CQ);
1795 put_res(dev, slave, rcqn, RES_CQ);
1797 put_res(dev, slave, mtt_base, RES_MTT);
1799 res_abort_move(dev, slave, RES_QP, qpn);
1804 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
1806 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
1809 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
1811 int log_eq_size = eqc->log_eq_size & 0x1f;
1812 int page_shift = (eqc->log_page_size & 0x3f) + 12;
1814 if (log_eq_size + 5 < page_shift)
1817 return 1 << (log_eq_size + 5 - page_shift);
1820 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
1822 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
1825 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
1827 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
1828 int page_shift = (cqc->log_page_size & 0x3f) + 12;
1830 if (log_cq_size + 5 < page_shift)
1833 return 1 << (log_cq_size + 5 - page_shift);
1836 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
1837 struct mlx4_vhcr *vhcr,
1838 struct mlx4_cmd_mailbox *inbox,
1839 struct mlx4_cmd_mailbox *outbox,
1840 struct mlx4_cmd_info *cmd)
1843 int eqn = vhcr->in_modifier;
1844 int res_id = (slave << 8) | eqn;
1845 struct mlx4_eq_context *eqc = inbox->buf;
1846 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
1847 int mtt_size = eq_get_mtt_size(eqc);
1849 struct res_mtt *mtt;
1851 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
1854 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
1858 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
1862 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
1866 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1870 atomic_inc(&mtt->ref_count);
1872 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1873 res_end_move(dev, slave, RES_EQ, res_id);
1877 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1879 res_abort_move(dev, slave, RES_EQ, res_id);
1881 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
1885 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
1886 int len, struct res_mtt **res)
1888 struct mlx4_priv *priv = mlx4_priv(dev);
1889 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1890 struct res_mtt *mtt;
1893 spin_lock_irq(mlx4_tlock(dev));
1894 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
1896 if (!check_mtt_range(dev, slave, start, len, mtt)) {
1898 mtt->com.from_state = mtt->com.state;
1899 mtt->com.state = RES_MTT_BUSY;
1904 spin_unlock_irq(mlx4_tlock(dev));
1909 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
1910 struct mlx4_vhcr *vhcr,
1911 struct mlx4_cmd_mailbox *inbox,
1912 struct mlx4_cmd_mailbox *outbox,
1913 struct mlx4_cmd_info *cmd)
1915 struct mlx4_mtt mtt;
1916 __be64 *page_list = inbox->buf;
1917 u64 *pg_list = (u64 *)page_list;
1919 struct res_mtt *rmtt = NULL;
1920 int start = be64_to_cpu(page_list[0]);
1921 int npages = vhcr->in_modifier;
1924 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
1928 /* Call the SW implementation of write_mtt:
1929 * - Prepare a dummy mtt struct
1930 * - Translate inbox contents to simple addresses in host endianess */
1931 mtt.offset = 0; /* TBD this is broken but I don't handle it since
1932 we don't really use it */
1935 for (i = 0; i < npages; ++i)
1936 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
1938 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
1939 ((u64 *)page_list + 2));
1942 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
1947 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
1948 struct mlx4_vhcr *vhcr,
1949 struct mlx4_cmd_mailbox *inbox,
1950 struct mlx4_cmd_mailbox *outbox,
1951 struct mlx4_cmd_info *cmd)
1953 int eqn = vhcr->in_modifier;
1954 int res_id = eqn | (slave << 8);
1958 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
1962 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
1966 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1970 atomic_dec(&eq->mtt->ref_count);
1971 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
1972 res_end_move(dev, slave, RES_EQ, res_id);
1973 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
1978 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
1980 res_abort_move(dev, slave, RES_EQ, res_id);
1985 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
1987 struct mlx4_priv *priv = mlx4_priv(dev);
1988 struct mlx4_slave_event_eq_info *event_eq;
1989 struct mlx4_cmd_mailbox *mailbox;
1990 u32 in_modifier = 0;
1995 if (!priv->mfunc.master.slave_state)
1998 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
2000 /* Create the event only if the slave is registered */
2001 if (event_eq->eqn < 0)
2004 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2005 res_id = (slave << 8) | event_eq->eqn;
2006 err = get_res(dev, slave, res_id, RES_EQ, &req);
2010 if (req->com.from_state != RES_EQ_HW) {
2015 mailbox = mlx4_alloc_cmd_mailbox(dev);
2016 if (IS_ERR(mailbox)) {
2017 err = PTR_ERR(mailbox);
2021 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2023 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2026 memcpy(mailbox->buf, (u8 *) eqe, 28);
2028 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2030 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2031 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2034 put_res(dev, slave, res_id, RES_EQ);
2035 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2036 mlx4_free_cmd_mailbox(dev, mailbox);
2040 put_res(dev, slave, res_id, RES_EQ);
2043 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2047 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2048 struct mlx4_vhcr *vhcr,
2049 struct mlx4_cmd_mailbox *inbox,
2050 struct mlx4_cmd_mailbox *outbox,
2051 struct mlx4_cmd_info *cmd)
2053 int eqn = vhcr->in_modifier;
2054 int res_id = eqn | (slave << 8);
2058 err = get_res(dev, slave, res_id, RES_EQ, &eq);
2062 if (eq->com.from_state != RES_EQ_HW) {
2067 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2070 put_res(dev, slave, res_id, RES_EQ);
2074 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2075 struct mlx4_vhcr *vhcr,
2076 struct mlx4_cmd_mailbox *inbox,
2077 struct mlx4_cmd_mailbox *outbox,
2078 struct mlx4_cmd_info *cmd)
2081 int cqn = vhcr->in_modifier;
2082 struct mlx4_cq_context *cqc = inbox->buf;
2083 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2085 struct res_mtt *mtt;
2087 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2090 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2093 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2096 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2099 atomic_inc(&mtt->ref_count);
2101 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2102 res_end_move(dev, slave, RES_CQ, cqn);
2106 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2108 res_abort_move(dev, slave, RES_CQ, cqn);
2112 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2113 struct mlx4_vhcr *vhcr,
2114 struct mlx4_cmd_mailbox *inbox,
2115 struct mlx4_cmd_mailbox *outbox,
2116 struct mlx4_cmd_info *cmd)
2119 int cqn = vhcr->in_modifier;
2122 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2125 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2128 atomic_dec(&cq->mtt->ref_count);
2129 res_end_move(dev, slave, RES_CQ, cqn);
2133 res_abort_move(dev, slave, RES_CQ, cqn);
2137 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2138 struct mlx4_vhcr *vhcr,
2139 struct mlx4_cmd_mailbox *inbox,
2140 struct mlx4_cmd_mailbox *outbox,
2141 struct mlx4_cmd_info *cmd)
2143 int cqn = vhcr->in_modifier;
2147 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2151 if (cq->com.from_state != RES_CQ_HW)
2154 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2156 put_res(dev, slave, cqn, RES_CQ);
2161 static int handle_resize(struct mlx4_dev *dev, int slave,
2162 struct mlx4_vhcr *vhcr,
2163 struct mlx4_cmd_mailbox *inbox,
2164 struct mlx4_cmd_mailbox *outbox,
2165 struct mlx4_cmd_info *cmd,
2169 struct res_mtt *orig_mtt;
2170 struct res_mtt *mtt;
2171 struct mlx4_cq_context *cqc = inbox->buf;
2172 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2174 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2178 if (orig_mtt != cq->mtt) {
2183 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2187 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2190 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2193 atomic_dec(&orig_mtt->ref_count);
2194 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2195 atomic_inc(&mtt->ref_count);
2197 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2201 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2203 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2209 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2210 struct mlx4_vhcr *vhcr,
2211 struct mlx4_cmd_mailbox *inbox,
2212 struct mlx4_cmd_mailbox *outbox,
2213 struct mlx4_cmd_info *cmd)
2215 int cqn = vhcr->in_modifier;
2219 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2223 if (cq->com.from_state != RES_CQ_HW)
2226 if (vhcr->op_modifier == 0) {
2227 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
2231 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2233 put_res(dev, slave, cqn, RES_CQ);
2238 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2240 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2241 int log_rq_stride = srqc->logstride & 7;
2242 int page_shift = (srqc->log_page_size & 0x3f) + 12;
2244 if (log_srq_size + log_rq_stride + 4 < page_shift)
2247 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2250 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2251 struct mlx4_vhcr *vhcr,
2252 struct mlx4_cmd_mailbox *inbox,
2253 struct mlx4_cmd_mailbox *outbox,
2254 struct mlx4_cmd_info *cmd)
2257 int srqn = vhcr->in_modifier;
2258 struct res_mtt *mtt;
2259 struct res_srq *srq;
2260 struct mlx4_srq_context *srqc = inbox->buf;
2261 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
2263 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2266 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2269 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2272 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2277 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2281 atomic_inc(&mtt->ref_count);
2283 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2284 res_end_move(dev, slave, RES_SRQ, srqn);
2288 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2290 res_abort_move(dev, slave, RES_SRQ, srqn);
2295 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2296 struct mlx4_vhcr *vhcr,
2297 struct mlx4_cmd_mailbox *inbox,
2298 struct mlx4_cmd_mailbox *outbox,
2299 struct mlx4_cmd_info *cmd)
2302 int srqn = vhcr->in_modifier;
2303 struct res_srq *srq;
2305 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2308 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2311 atomic_dec(&srq->mtt->ref_count);
2313 atomic_dec(&srq->cq->ref_count);
2314 res_end_move(dev, slave, RES_SRQ, srqn);
2319 res_abort_move(dev, slave, RES_SRQ, srqn);
2324 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2325 struct mlx4_vhcr *vhcr,
2326 struct mlx4_cmd_mailbox *inbox,
2327 struct mlx4_cmd_mailbox *outbox,
2328 struct mlx4_cmd_info *cmd)
2331 int srqn = vhcr->in_modifier;
2332 struct res_srq *srq;
2334 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2337 if (srq->com.from_state != RES_SRQ_HW) {
2341 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2343 put_res(dev, slave, srqn, RES_SRQ);
2347 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2348 struct mlx4_vhcr *vhcr,
2349 struct mlx4_cmd_mailbox *inbox,
2350 struct mlx4_cmd_mailbox *outbox,
2351 struct mlx4_cmd_info *cmd)
2354 int srqn = vhcr->in_modifier;
2355 struct res_srq *srq;
2357 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2361 if (srq->com.from_state != RES_SRQ_HW) {
2366 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2368 put_res(dev, slave, srqn, RES_SRQ);
2372 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2373 struct mlx4_vhcr *vhcr,
2374 struct mlx4_cmd_mailbox *inbox,
2375 struct mlx4_cmd_mailbox *outbox,
2376 struct mlx4_cmd_info *cmd)
2379 int qpn = vhcr->in_modifier & 0x7fffff;
2382 err = get_res(dev, slave, qpn, RES_QP, &qp);
2385 if (qp->com.from_state != RES_QP_HW) {
2390 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2392 put_res(dev, slave, qpn, RES_QP);
2396 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2397 struct mlx4_vhcr *vhcr,
2398 struct mlx4_cmd_mailbox *inbox,
2399 struct mlx4_cmd_mailbox *outbox,
2400 struct mlx4_cmd_info *cmd)
2402 struct mlx4_qp_context *qpc = inbox->buf + 8;
2404 update_ud_gid(dev, qpc, (u8)slave);
2406 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2409 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
2410 struct mlx4_vhcr *vhcr,
2411 struct mlx4_cmd_mailbox *inbox,
2412 struct mlx4_cmd_mailbox *outbox,
2413 struct mlx4_cmd_info *cmd)
2416 int qpn = vhcr->in_modifier & 0x7fffff;
2419 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
2422 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2426 atomic_dec(&qp->mtt->ref_count);
2427 atomic_dec(&qp->rcq->ref_count);
2428 atomic_dec(&qp->scq->ref_count);
2430 atomic_dec(&qp->srq->ref_count);
2431 res_end_move(dev, slave, RES_QP, qpn);
2435 res_abort_move(dev, slave, RES_QP, qpn);
2440 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
2441 struct res_qp *rqp, u8 *gid)
2443 struct res_gid *res;
2445 list_for_each_entry(res, &rqp->mcg_list, list) {
2446 if (!memcmp(res->gid, gid, 16))
2452 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2453 u8 *gid, enum mlx4_protocol prot,
2454 enum mlx4_steer_type steer)
2456 struct res_gid *res;
2459 res = kzalloc(sizeof *res, GFP_KERNEL);
2463 spin_lock_irq(&rqp->mcg_spl);
2464 if (find_gid(dev, slave, rqp, gid)) {
2468 memcpy(res->gid, gid, 16);
2471 list_add_tail(&res->list, &rqp->mcg_list);
2474 spin_unlock_irq(&rqp->mcg_spl);
2479 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2480 u8 *gid, enum mlx4_protocol prot,
2481 enum mlx4_steer_type steer)
2483 struct res_gid *res;
2486 spin_lock_irq(&rqp->mcg_spl);
2487 res = find_gid(dev, slave, rqp, gid);
2488 if (!res || res->prot != prot || res->steer != steer)
2491 list_del(&res->list);
2495 spin_unlock_irq(&rqp->mcg_spl);
2500 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2501 struct mlx4_vhcr *vhcr,
2502 struct mlx4_cmd_mailbox *inbox,
2503 struct mlx4_cmd_mailbox *outbox,
2504 struct mlx4_cmd_info *cmd)
2506 struct mlx4_qp qp; /* dummy for calling attach/detach */
2507 u8 *gid = inbox->buf;
2508 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
2512 int attach = vhcr->op_modifier;
2513 int block_loopback = vhcr->in_modifier >> 31;
2514 u8 steer_type_mask = 2;
2515 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
2517 qpn = vhcr->in_modifier & 0xffffff;
2518 err = get_res(dev, slave, qpn, RES_QP, &rqp);
2524 err = add_mcg_res(dev, slave, rqp, gid, prot, type);
2528 err = mlx4_qp_attach_common(dev, &qp, gid,
2529 block_loopback, prot, type);
2533 err = rem_mcg_res(dev, slave, rqp, gid, prot, type);
2536 err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
2539 put_res(dev, slave, qpn, RES_QP);
2543 /* ignore error return below, already in error */
2544 (void) rem_mcg_res(dev, slave, rqp, gid, prot, type);
2546 put_res(dev, slave, qpn, RES_QP);
2552 BUSY_MAX_RETRIES = 10
2555 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
2556 struct mlx4_vhcr *vhcr,
2557 struct mlx4_cmd_mailbox *inbox,
2558 struct mlx4_cmd_mailbox *outbox,
2559 struct mlx4_cmd_info *cmd)
2562 int index = vhcr->in_modifier & 0xffff;
2564 err = get_res(dev, slave, index, RES_COUNTER, NULL);
2568 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2569 put_res(dev, slave, index, RES_COUNTER);
2573 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
2575 struct res_gid *rgid;
2576 struct res_gid *tmp;
2577 struct mlx4_qp qp; /* dummy for calling attach/detach */
2579 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
2580 qp.qpn = rqp->local_qpn;
2581 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
2583 list_del(&rgid->list);
2588 static int _move_all_busy(struct mlx4_dev *dev, int slave,
2589 enum mlx4_resource type, int print)
2591 struct mlx4_priv *priv = mlx4_priv(dev);
2592 struct mlx4_resource_tracker *tracker =
2593 &priv->mfunc.master.res_tracker;
2594 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
2595 struct res_common *r;
2596 struct res_common *tmp;
2600 spin_lock_irq(mlx4_tlock(dev));
2601 list_for_each_entry_safe(r, tmp, rlist, list) {
2602 if (r->owner == slave) {
2604 if (r->state == RES_ANY_BUSY) {
2607 "%s id 0x%x is busy\n",
2612 r->from_state = r->state;
2613 r->state = RES_ANY_BUSY;
2619 spin_unlock_irq(mlx4_tlock(dev));
2624 static int move_all_busy(struct mlx4_dev *dev, int slave,
2625 enum mlx4_resource type)
2627 unsigned long begin;
2632 busy = _move_all_busy(dev, slave, type, 0);
2633 if (time_after(jiffies, begin + 5 * HZ))
2640 busy = _move_all_busy(dev, slave, type, 1);
2644 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
2646 struct mlx4_priv *priv = mlx4_priv(dev);
2647 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2648 struct list_head *qp_list =
2649 &tracker->slave_list[slave].res_list[RES_QP];
2657 err = move_all_busy(dev, slave, RES_QP);
2659 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
2660 "for slave %d\n", slave);
2662 spin_lock_irq(mlx4_tlock(dev));
2663 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
2664 spin_unlock_irq(mlx4_tlock(dev));
2665 if (qp->com.owner == slave) {
2666 qpn = qp->com.res_id;
2667 detach_qp(dev, slave, qp);
2668 state = qp->com.from_state;
2669 while (state != 0) {
2671 case RES_QP_RESERVED:
2672 spin_lock_irq(mlx4_tlock(dev));
2673 radix_tree_delete(&tracker->res_tree[RES_QP],
2675 list_del(&qp->com.list);
2676 spin_unlock_irq(mlx4_tlock(dev));
2681 if (!valid_reserved(dev, slave, qpn))
2682 __mlx4_qp_free_icm(dev, qpn);
2683 state = RES_QP_RESERVED;
2687 err = mlx4_cmd(dev, in_param,
2690 MLX4_CMD_TIME_CLASS_A,
2693 mlx4_dbg(dev, "rem_slave_qps: failed"
2694 " to move slave %d qpn %d to"
2697 atomic_dec(&qp->rcq->ref_count);
2698 atomic_dec(&qp->scq->ref_count);
2699 atomic_dec(&qp->mtt->ref_count);
2701 atomic_dec(&qp->srq->ref_count);
2702 state = RES_QP_MAPPED;
2709 spin_lock_irq(mlx4_tlock(dev));
2711 spin_unlock_irq(mlx4_tlock(dev));
2714 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
2716 struct mlx4_priv *priv = mlx4_priv(dev);
2717 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2718 struct list_head *srq_list =
2719 &tracker->slave_list[slave].res_list[RES_SRQ];
2720 struct res_srq *srq;
2721 struct res_srq *tmp;
2728 err = move_all_busy(dev, slave, RES_SRQ);
2730 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
2731 "busy for slave %d\n", slave);
2733 spin_lock_irq(mlx4_tlock(dev));
2734 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
2735 spin_unlock_irq(mlx4_tlock(dev));
2736 if (srq->com.owner == slave) {
2737 srqn = srq->com.res_id;
2738 state = srq->com.from_state;
2739 while (state != 0) {
2741 case RES_SRQ_ALLOCATED:
2742 __mlx4_srq_free_icm(dev, srqn);
2743 spin_lock_irq(mlx4_tlock(dev));
2744 radix_tree_delete(&tracker->res_tree[RES_SRQ],
2746 list_del(&srq->com.list);
2747 spin_unlock_irq(mlx4_tlock(dev));
2754 err = mlx4_cmd(dev, in_param, srqn, 1,
2756 MLX4_CMD_TIME_CLASS_A,
2759 mlx4_dbg(dev, "rem_slave_srqs: failed"
2760 " to move slave %d srq %d to"
2764 atomic_dec(&srq->mtt->ref_count);
2766 atomic_dec(&srq->cq->ref_count);
2767 state = RES_SRQ_ALLOCATED;
2775 spin_lock_irq(mlx4_tlock(dev));
2777 spin_unlock_irq(mlx4_tlock(dev));
2780 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
2782 struct mlx4_priv *priv = mlx4_priv(dev);
2783 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2784 struct list_head *cq_list =
2785 &tracker->slave_list[slave].res_list[RES_CQ];
2794 err = move_all_busy(dev, slave, RES_CQ);
2796 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
2797 "busy for slave %d\n", slave);
2799 spin_lock_irq(mlx4_tlock(dev));
2800 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
2801 spin_unlock_irq(mlx4_tlock(dev));
2802 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
2803 cqn = cq->com.res_id;
2804 state = cq->com.from_state;
2805 while (state != 0) {
2807 case RES_CQ_ALLOCATED:
2808 __mlx4_cq_free_icm(dev, cqn);
2809 spin_lock_irq(mlx4_tlock(dev));
2810 radix_tree_delete(&tracker->res_tree[RES_CQ],
2812 list_del(&cq->com.list);
2813 spin_unlock_irq(mlx4_tlock(dev));
2820 err = mlx4_cmd(dev, in_param, cqn, 1,
2822 MLX4_CMD_TIME_CLASS_A,
2825 mlx4_dbg(dev, "rem_slave_cqs: failed"
2826 " to move slave %d cq %d to"
2829 atomic_dec(&cq->mtt->ref_count);
2830 state = RES_CQ_ALLOCATED;
2838 spin_lock_irq(mlx4_tlock(dev));
2840 spin_unlock_irq(mlx4_tlock(dev));
2843 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
2845 struct mlx4_priv *priv = mlx4_priv(dev);
2846 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2847 struct list_head *mpt_list =
2848 &tracker->slave_list[slave].res_list[RES_MPT];
2849 struct res_mpt *mpt;
2850 struct res_mpt *tmp;
2857 err = move_all_busy(dev, slave, RES_MPT);
2859 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
2860 "busy for slave %d\n", slave);
2862 spin_lock_irq(mlx4_tlock(dev));
2863 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
2864 spin_unlock_irq(mlx4_tlock(dev));
2865 if (mpt->com.owner == slave) {
2866 mptn = mpt->com.res_id;
2867 state = mpt->com.from_state;
2868 while (state != 0) {
2870 case RES_MPT_RESERVED:
2871 __mlx4_mr_release(dev, mpt->key);
2872 spin_lock_irq(mlx4_tlock(dev));
2873 radix_tree_delete(&tracker->res_tree[RES_MPT],
2875 list_del(&mpt->com.list);
2876 spin_unlock_irq(mlx4_tlock(dev));
2881 case RES_MPT_MAPPED:
2882 __mlx4_mr_free_icm(dev, mpt->key);
2883 state = RES_MPT_RESERVED;
2888 err = mlx4_cmd(dev, in_param, mptn, 0,
2890 MLX4_CMD_TIME_CLASS_A,
2893 mlx4_dbg(dev, "rem_slave_mrs: failed"
2894 " to move slave %d mpt %d to"
2898 atomic_dec(&mpt->mtt->ref_count);
2899 state = RES_MPT_MAPPED;
2906 spin_lock_irq(mlx4_tlock(dev));
2908 spin_unlock_irq(mlx4_tlock(dev));
2911 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
2913 struct mlx4_priv *priv = mlx4_priv(dev);
2914 struct mlx4_resource_tracker *tracker =
2915 &priv->mfunc.master.res_tracker;
2916 struct list_head *mtt_list =
2917 &tracker->slave_list[slave].res_list[RES_MTT];
2918 struct res_mtt *mtt;
2919 struct res_mtt *tmp;
2925 err = move_all_busy(dev, slave, RES_MTT);
2927 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
2928 "busy for slave %d\n", slave);
2930 spin_lock_irq(mlx4_tlock(dev));
2931 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
2932 spin_unlock_irq(mlx4_tlock(dev));
2933 if (mtt->com.owner == slave) {
2934 base = mtt->com.res_id;
2935 state = mtt->com.from_state;
2936 while (state != 0) {
2938 case RES_MTT_ALLOCATED:
2939 __mlx4_free_mtt_range(dev, base,
2941 spin_lock_irq(mlx4_tlock(dev));
2942 radix_tree_delete(&tracker->res_tree[RES_MTT],
2944 list_del(&mtt->com.list);
2945 spin_unlock_irq(mlx4_tlock(dev));
2955 spin_lock_irq(mlx4_tlock(dev));
2957 spin_unlock_irq(mlx4_tlock(dev));
2960 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
2962 struct mlx4_priv *priv = mlx4_priv(dev);
2963 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2964 struct list_head *eq_list =
2965 &tracker->slave_list[slave].res_list[RES_EQ];
2972 struct mlx4_cmd_mailbox *mailbox;
2974 err = move_all_busy(dev, slave, RES_EQ);
2976 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
2977 "busy for slave %d\n", slave);
2979 spin_lock_irq(mlx4_tlock(dev));
2980 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
2981 spin_unlock_irq(mlx4_tlock(dev));
2982 if (eq->com.owner == slave) {
2983 eqn = eq->com.res_id;
2984 state = eq->com.from_state;
2985 while (state != 0) {
2987 case RES_EQ_RESERVED:
2988 spin_lock_irq(mlx4_tlock(dev));
2989 radix_tree_delete(&tracker->res_tree[RES_EQ],
2991 list_del(&eq->com.list);
2992 spin_unlock_irq(mlx4_tlock(dev));
2998 mailbox = mlx4_alloc_cmd_mailbox(dev);
2999 if (IS_ERR(mailbox)) {
3003 err = mlx4_cmd_box(dev, slave, 0,
3006 MLX4_CMD_TIME_CLASS_A,
3008 mlx4_dbg(dev, "rem_slave_eqs: failed"
3009 " to move slave %d eqs %d to"
3010 " SW ownership\n", slave, eqn);
3011 mlx4_free_cmd_mailbox(dev, mailbox);
3013 atomic_dec(&eq->mtt->ref_count);
3014 state = RES_EQ_RESERVED;
3023 spin_lock_irq(mlx4_tlock(dev));
3025 spin_unlock_irq(mlx4_tlock(dev));
3028 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3030 struct mlx4_priv *priv = mlx4_priv(dev);
3032 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3034 rem_slave_macs(dev, slave);
3035 rem_slave_qps(dev, slave);
3036 rem_slave_srqs(dev, slave);
3037 rem_slave_cqs(dev, slave);
3038 rem_slave_mrs(dev, slave);
3039 rem_slave_eqs(dev, slave);
3040 rem_slave_mtts(dev, slave);
3041 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);