ASoC: soc-compress: Send correct stream event for capture start
[firefly-linux-kernel-4.4.55.git] / drivers / net / ethernet / mellanox / mlx4 / resource_tracker.c
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4  * All rights reserved.
5  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/io.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
46
47 #include "mlx4.h"
48 #include "fw.h"
49
50 #define MLX4_MAC_VALID          (1ull << 63)
51
52 struct mac_res {
53         struct list_head list;
54         u64 mac;
55         u8 port;
56 };
57
58 struct res_common {
59         struct list_head        list;
60         struct rb_node          node;
61         u64                     res_id;
62         int                     owner;
63         int                     state;
64         int                     from_state;
65         int                     to_state;
66         int                     removing;
67 };
68
69 enum {
70         RES_ANY_BUSY = 1
71 };
72
73 struct res_gid {
74         struct list_head        list;
75         u8                      gid[16];
76         enum mlx4_protocol      prot;
77         enum mlx4_steer_type    steer;
78         u64                     reg_id;
79 };
80
81 enum res_qp_states {
82         RES_QP_BUSY = RES_ANY_BUSY,
83
84         /* QP number was allocated */
85         RES_QP_RESERVED,
86
87         /* ICM memory for QP context was mapped */
88         RES_QP_MAPPED,
89
90         /* QP is in hw ownership */
91         RES_QP_HW
92 };
93
94 struct res_qp {
95         struct res_common       com;
96         struct res_mtt         *mtt;
97         struct res_cq          *rcq;
98         struct res_cq          *scq;
99         struct res_srq         *srq;
100         struct list_head        mcg_list;
101         spinlock_t              mcg_spl;
102         int                     local_qpn;
103         atomic_t                ref_count;
104 };
105
106 enum res_mtt_states {
107         RES_MTT_BUSY = RES_ANY_BUSY,
108         RES_MTT_ALLOCATED,
109 };
110
111 static inline const char *mtt_states_str(enum res_mtt_states state)
112 {
113         switch (state) {
114         case RES_MTT_BUSY: return "RES_MTT_BUSY";
115         case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
116         default: return "Unknown";
117         }
118 }
119
120 struct res_mtt {
121         struct res_common       com;
122         int                     order;
123         atomic_t                ref_count;
124 };
125
126 enum res_mpt_states {
127         RES_MPT_BUSY = RES_ANY_BUSY,
128         RES_MPT_RESERVED,
129         RES_MPT_MAPPED,
130         RES_MPT_HW,
131 };
132
133 struct res_mpt {
134         struct res_common       com;
135         struct res_mtt         *mtt;
136         int                     key;
137 };
138
139 enum res_eq_states {
140         RES_EQ_BUSY = RES_ANY_BUSY,
141         RES_EQ_RESERVED,
142         RES_EQ_HW,
143 };
144
145 struct res_eq {
146         struct res_common       com;
147         struct res_mtt         *mtt;
148 };
149
150 enum res_cq_states {
151         RES_CQ_BUSY = RES_ANY_BUSY,
152         RES_CQ_ALLOCATED,
153         RES_CQ_HW,
154 };
155
156 struct res_cq {
157         struct res_common       com;
158         struct res_mtt         *mtt;
159         atomic_t                ref_count;
160 };
161
162 enum res_srq_states {
163         RES_SRQ_BUSY = RES_ANY_BUSY,
164         RES_SRQ_ALLOCATED,
165         RES_SRQ_HW,
166 };
167
168 struct res_srq {
169         struct res_common       com;
170         struct res_mtt         *mtt;
171         struct res_cq          *cq;
172         atomic_t                ref_count;
173 };
174
175 enum res_counter_states {
176         RES_COUNTER_BUSY = RES_ANY_BUSY,
177         RES_COUNTER_ALLOCATED,
178 };
179
180 struct res_counter {
181         struct res_common       com;
182         int                     port;
183 };
184
185 enum res_xrcdn_states {
186         RES_XRCD_BUSY = RES_ANY_BUSY,
187         RES_XRCD_ALLOCATED,
188 };
189
190 struct res_xrcdn {
191         struct res_common       com;
192         int                     port;
193 };
194
195 enum res_fs_rule_states {
196         RES_FS_RULE_BUSY = RES_ANY_BUSY,
197         RES_FS_RULE_ALLOCATED,
198 };
199
200 struct res_fs_rule {
201         struct res_common       com;
202         int                     qpn;
203 };
204
205 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
206 {
207         struct rb_node *node = root->rb_node;
208
209         while (node) {
210                 struct res_common *res = container_of(node, struct res_common,
211                                                       node);
212
213                 if (res_id < res->res_id)
214                         node = node->rb_left;
215                 else if (res_id > res->res_id)
216                         node = node->rb_right;
217                 else
218                         return res;
219         }
220         return NULL;
221 }
222
223 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
224 {
225         struct rb_node **new = &(root->rb_node), *parent = NULL;
226
227         /* Figure out where to put new node */
228         while (*new) {
229                 struct res_common *this = container_of(*new, struct res_common,
230                                                        node);
231
232                 parent = *new;
233                 if (res->res_id < this->res_id)
234                         new = &((*new)->rb_left);
235                 else if (res->res_id > this->res_id)
236                         new = &((*new)->rb_right);
237                 else
238                         return -EEXIST;
239         }
240
241         /* Add new node and rebalance tree. */
242         rb_link_node(&res->node, parent, new);
243         rb_insert_color(&res->node, root);
244
245         return 0;
246 }
247
248 enum qp_transition {
249         QP_TRANS_INIT2RTR,
250         QP_TRANS_RTR2RTS,
251         QP_TRANS_RTS2RTS,
252         QP_TRANS_SQERR2RTS,
253         QP_TRANS_SQD2SQD,
254         QP_TRANS_SQD2RTS
255 };
256
257 /* For Debug uses */
258 static const char *ResourceType(enum mlx4_resource rt)
259 {
260         switch (rt) {
261         case RES_QP: return "RES_QP";
262         case RES_CQ: return "RES_CQ";
263         case RES_SRQ: return "RES_SRQ";
264         case RES_MPT: return "RES_MPT";
265         case RES_MTT: return "RES_MTT";
266         case RES_MAC: return  "RES_MAC";
267         case RES_EQ: return "RES_EQ";
268         case RES_COUNTER: return "RES_COUNTER";
269         case RES_FS_RULE: return "RES_FS_RULE";
270         case RES_XRCD: return "RES_XRCD";
271         default: return "Unknown resource type !!!";
272         };
273 }
274
275 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
276 {
277         struct mlx4_priv *priv = mlx4_priv(dev);
278         int i;
279         int t;
280
281         priv->mfunc.master.res_tracker.slave_list =
282                 kzalloc(dev->num_slaves * sizeof(struct slave_list),
283                         GFP_KERNEL);
284         if (!priv->mfunc.master.res_tracker.slave_list)
285                 return -ENOMEM;
286
287         for (i = 0 ; i < dev->num_slaves; i++) {
288                 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
289                         INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
290                                        slave_list[i].res_list[t]);
291                 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
292         }
293
294         mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
295                  dev->num_slaves);
296         for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
297                 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
298
299         spin_lock_init(&priv->mfunc.master.res_tracker.lock);
300         return 0 ;
301 }
302
303 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
304                                 enum mlx4_res_tracker_free_type type)
305 {
306         struct mlx4_priv *priv = mlx4_priv(dev);
307         int i;
308
309         if (priv->mfunc.master.res_tracker.slave_list) {
310                 if (type != RES_TR_FREE_STRUCTS_ONLY)
311                         for (i = 0 ; i < dev->num_slaves; i++)
312                                 if (type == RES_TR_FREE_ALL ||
313                                     dev->caps.function != i)
314                                         mlx4_delete_all_resources_for_slave(dev, i);
315
316                 if (type != RES_TR_FREE_SLAVES_ONLY) {
317                         kfree(priv->mfunc.master.res_tracker.slave_list);
318                         priv->mfunc.master.res_tracker.slave_list = NULL;
319                 }
320         }
321 }
322
323 static void update_pkey_index(struct mlx4_dev *dev, int slave,
324                               struct mlx4_cmd_mailbox *inbox)
325 {
326         u8 sched = *(u8 *)(inbox->buf + 64);
327         u8 orig_index = *(u8 *)(inbox->buf + 35);
328         u8 new_index;
329         struct mlx4_priv *priv = mlx4_priv(dev);
330         int port;
331
332         port = (sched >> 6 & 1) + 1;
333
334         new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
335         *(u8 *)(inbox->buf + 35) = new_index;
336 }
337
338 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
339                        u8 slave)
340 {
341         struct mlx4_qp_context  *qp_ctx = inbox->buf + 8;
342         enum mlx4_qp_optpar     optpar = be32_to_cpu(*(__be32 *) inbox->buf);
343         u32                     ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
344
345         if (MLX4_QP_ST_UD == ts)
346                 qp_ctx->pri_path.mgid_index = 0x80 | slave;
347
348         if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) {
349                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
350                         qp_ctx->pri_path.mgid_index = slave & 0x7F;
351                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
352                         qp_ctx->alt_path.mgid_index = slave & 0x7F;
353         }
354 }
355
356 static int update_vport_qp_param(struct mlx4_dev *dev,
357                                  struct mlx4_cmd_mailbox *inbox,
358                                  u8 slave)
359 {
360         struct mlx4_qp_context  *qpc = inbox->buf + 8;
361         struct mlx4_vport_oper_state *vp_oper;
362         struct mlx4_priv *priv;
363         u32 qp_type;
364         int port;
365
366         port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
367         priv = mlx4_priv(dev);
368         vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
369
370         if (MLX4_VGT != vp_oper->state.default_vlan) {
371                 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
372                 if (MLX4_QP_ST_RC == qp_type)
373                         return -EINVAL;
374
375                 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
376                 qpc->pri_path.fl = (1 << 6) | (1 << 2); /* set cv bit and hide_cqe_vlan bit*/
377                 qpc->pri_path.feup |= 1 << 3; /* set fvl bit */
378                 qpc->pri_path.sched_queue &= 0xC7;
379                 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
380                 mlx4_dbg(dev, "qp %d  port %d Q 0x%x set vlan to %d vidx %d feup %x fl %x\n",
381                          be32_to_cpu(qpc->local_qpn) & 0xffffff, port,
382                          (int)(qpc->pri_path.sched_queue), vp_oper->state.default_vlan,
383                          vp_oper->vlan_idx, (int)(qpc->pri_path.feup),
384                          (int)(qpc->pri_path.fl));
385         }
386         if (vp_oper->state.spoofchk) {
387                 qpc->pri_path.feup |= 1 << 5; /* set fsm bit */;
388                 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
389                 mlx4_dbg(dev, "spoof qp %d  port %d feup  0x%x, myLmc 0x%x mindx %d\n",
390                          be32_to_cpu(qpc->local_qpn) & 0xffffff, port,
391                          (int)qpc->pri_path.feup, (int)qpc->pri_path.grh_mylmc,
392                          vp_oper->mac_idx);
393         }
394         return 0;
395 }
396
397 static int mpt_mask(struct mlx4_dev *dev)
398 {
399         return dev->caps.num_mpts - 1;
400 }
401
402 static void *find_res(struct mlx4_dev *dev, u64 res_id,
403                       enum mlx4_resource type)
404 {
405         struct mlx4_priv *priv = mlx4_priv(dev);
406
407         return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
408                                   res_id);
409 }
410
411 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
412                    enum mlx4_resource type,
413                    void *res)
414 {
415         struct res_common *r;
416         int err = 0;
417
418         spin_lock_irq(mlx4_tlock(dev));
419         r = find_res(dev, res_id, type);
420         if (!r) {
421                 err = -ENONET;
422                 goto exit;
423         }
424
425         if (r->state == RES_ANY_BUSY) {
426                 err = -EBUSY;
427                 goto exit;
428         }
429
430         if (r->owner != slave) {
431                 err = -EPERM;
432                 goto exit;
433         }
434
435         r->from_state = r->state;
436         r->state = RES_ANY_BUSY;
437
438         if (res)
439                 *((struct res_common **)res) = r;
440
441 exit:
442         spin_unlock_irq(mlx4_tlock(dev));
443         return err;
444 }
445
446 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
447                                     enum mlx4_resource type,
448                                     u64 res_id, int *slave)
449 {
450
451         struct res_common *r;
452         int err = -ENOENT;
453         int id = res_id;
454
455         if (type == RES_QP)
456                 id &= 0x7fffff;
457         spin_lock(mlx4_tlock(dev));
458
459         r = find_res(dev, id, type);
460         if (r) {
461                 *slave = r->owner;
462                 err = 0;
463         }
464         spin_unlock(mlx4_tlock(dev));
465
466         return err;
467 }
468
469 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
470                     enum mlx4_resource type)
471 {
472         struct res_common *r;
473
474         spin_lock_irq(mlx4_tlock(dev));
475         r = find_res(dev, res_id, type);
476         if (r)
477                 r->state = r->from_state;
478         spin_unlock_irq(mlx4_tlock(dev));
479 }
480
481 static struct res_common *alloc_qp_tr(int id)
482 {
483         struct res_qp *ret;
484
485         ret = kzalloc(sizeof *ret, GFP_KERNEL);
486         if (!ret)
487                 return NULL;
488
489         ret->com.res_id = id;
490         ret->com.state = RES_QP_RESERVED;
491         ret->local_qpn = id;
492         INIT_LIST_HEAD(&ret->mcg_list);
493         spin_lock_init(&ret->mcg_spl);
494         atomic_set(&ret->ref_count, 0);
495
496         return &ret->com;
497 }
498
499 static struct res_common *alloc_mtt_tr(int id, int order)
500 {
501         struct res_mtt *ret;
502
503         ret = kzalloc(sizeof *ret, GFP_KERNEL);
504         if (!ret)
505                 return NULL;
506
507         ret->com.res_id = id;
508         ret->order = order;
509         ret->com.state = RES_MTT_ALLOCATED;
510         atomic_set(&ret->ref_count, 0);
511
512         return &ret->com;
513 }
514
515 static struct res_common *alloc_mpt_tr(int id, int key)
516 {
517         struct res_mpt *ret;
518
519         ret = kzalloc(sizeof *ret, GFP_KERNEL);
520         if (!ret)
521                 return NULL;
522
523         ret->com.res_id = id;
524         ret->com.state = RES_MPT_RESERVED;
525         ret->key = key;
526
527         return &ret->com;
528 }
529
530 static struct res_common *alloc_eq_tr(int id)
531 {
532         struct res_eq *ret;
533
534         ret = kzalloc(sizeof *ret, GFP_KERNEL);
535         if (!ret)
536                 return NULL;
537
538         ret->com.res_id = id;
539         ret->com.state = RES_EQ_RESERVED;
540
541         return &ret->com;
542 }
543
544 static struct res_common *alloc_cq_tr(int id)
545 {
546         struct res_cq *ret;
547
548         ret = kzalloc(sizeof *ret, GFP_KERNEL);
549         if (!ret)
550                 return NULL;
551
552         ret->com.res_id = id;
553         ret->com.state = RES_CQ_ALLOCATED;
554         atomic_set(&ret->ref_count, 0);
555
556         return &ret->com;
557 }
558
559 static struct res_common *alloc_srq_tr(int id)
560 {
561         struct res_srq *ret;
562
563         ret = kzalloc(sizeof *ret, GFP_KERNEL);
564         if (!ret)
565                 return NULL;
566
567         ret->com.res_id = id;
568         ret->com.state = RES_SRQ_ALLOCATED;
569         atomic_set(&ret->ref_count, 0);
570
571         return &ret->com;
572 }
573
574 static struct res_common *alloc_counter_tr(int id)
575 {
576         struct res_counter *ret;
577
578         ret = kzalloc(sizeof *ret, GFP_KERNEL);
579         if (!ret)
580                 return NULL;
581
582         ret->com.res_id = id;
583         ret->com.state = RES_COUNTER_ALLOCATED;
584
585         return &ret->com;
586 }
587
588 static struct res_common *alloc_xrcdn_tr(int id)
589 {
590         struct res_xrcdn *ret;
591
592         ret = kzalloc(sizeof *ret, GFP_KERNEL);
593         if (!ret)
594                 return NULL;
595
596         ret->com.res_id = id;
597         ret->com.state = RES_XRCD_ALLOCATED;
598
599         return &ret->com;
600 }
601
602 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
603 {
604         struct res_fs_rule *ret;
605
606         ret = kzalloc(sizeof *ret, GFP_KERNEL);
607         if (!ret)
608                 return NULL;
609
610         ret->com.res_id = id;
611         ret->com.state = RES_FS_RULE_ALLOCATED;
612         ret->qpn = qpn;
613         return &ret->com;
614 }
615
616 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
617                                    int extra)
618 {
619         struct res_common *ret;
620
621         switch (type) {
622         case RES_QP:
623                 ret = alloc_qp_tr(id);
624                 break;
625         case RES_MPT:
626                 ret = alloc_mpt_tr(id, extra);
627                 break;
628         case RES_MTT:
629                 ret = alloc_mtt_tr(id, extra);
630                 break;
631         case RES_EQ:
632                 ret = alloc_eq_tr(id);
633                 break;
634         case RES_CQ:
635                 ret = alloc_cq_tr(id);
636                 break;
637         case RES_SRQ:
638                 ret = alloc_srq_tr(id);
639                 break;
640         case RES_MAC:
641                 printk(KERN_ERR "implementation missing\n");
642                 return NULL;
643         case RES_COUNTER:
644                 ret = alloc_counter_tr(id);
645                 break;
646         case RES_XRCD:
647                 ret = alloc_xrcdn_tr(id);
648                 break;
649         case RES_FS_RULE:
650                 ret = alloc_fs_rule_tr(id, extra);
651                 break;
652         default:
653                 return NULL;
654         }
655         if (ret)
656                 ret->owner = slave;
657
658         return ret;
659 }
660
661 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
662                          enum mlx4_resource type, int extra)
663 {
664         int i;
665         int err;
666         struct mlx4_priv *priv = mlx4_priv(dev);
667         struct res_common **res_arr;
668         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
669         struct rb_root *root = &tracker->res_tree[type];
670
671         res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
672         if (!res_arr)
673                 return -ENOMEM;
674
675         for (i = 0; i < count; ++i) {
676                 res_arr[i] = alloc_tr(base + i, type, slave, extra);
677                 if (!res_arr[i]) {
678                         for (--i; i >= 0; --i)
679                                 kfree(res_arr[i]);
680
681                         kfree(res_arr);
682                         return -ENOMEM;
683                 }
684         }
685
686         spin_lock_irq(mlx4_tlock(dev));
687         for (i = 0; i < count; ++i) {
688                 if (find_res(dev, base + i, type)) {
689                         err = -EEXIST;
690                         goto undo;
691                 }
692                 err = res_tracker_insert(root, res_arr[i]);
693                 if (err)
694                         goto undo;
695                 list_add_tail(&res_arr[i]->list,
696                               &tracker->slave_list[slave].res_list[type]);
697         }
698         spin_unlock_irq(mlx4_tlock(dev));
699         kfree(res_arr);
700
701         return 0;
702
703 undo:
704         for (--i; i >= base; --i)
705                 rb_erase(&res_arr[i]->node, root);
706
707         spin_unlock_irq(mlx4_tlock(dev));
708
709         for (i = 0; i < count; ++i)
710                 kfree(res_arr[i]);
711
712         kfree(res_arr);
713
714         return err;
715 }
716
717 static int remove_qp_ok(struct res_qp *res)
718 {
719         if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
720             !list_empty(&res->mcg_list)) {
721                 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
722                        res->com.state, atomic_read(&res->ref_count));
723                 return -EBUSY;
724         } else if (res->com.state != RES_QP_RESERVED) {
725                 return -EPERM;
726         }
727
728         return 0;
729 }
730
731 static int remove_mtt_ok(struct res_mtt *res, int order)
732 {
733         if (res->com.state == RES_MTT_BUSY ||
734             atomic_read(&res->ref_count)) {
735                 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
736                        __func__, __LINE__,
737                        mtt_states_str(res->com.state),
738                        atomic_read(&res->ref_count));
739                 return -EBUSY;
740         } else if (res->com.state != RES_MTT_ALLOCATED)
741                 return -EPERM;
742         else if (res->order != order)
743                 return -EINVAL;
744
745         return 0;
746 }
747
748 static int remove_mpt_ok(struct res_mpt *res)
749 {
750         if (res->com.state == RES_MPT_BUSY)
751                 return -EBUSY;
752         else if (res->com.state != RES_MPT_RESERVED)
753                 return -EPERM;
754
755         return 0;
756 }
757
758 static int remove_eq_ok(struct res_eq *res)
759 {
760         if (res->com.state == RES_MPT_BUSY)
761                 return -EBUSY;
762         else if (res->com.state != RES_MPT_RESERVED)
763                 return -EPERM;
764
765         return 0;
766 }
767
768 static int remove_counter_ok(struct res_counter *res)
769 {
770         if (res->com.state == RES_COUNTER_BUSY)
771                 return -EBUSY;
772         else if (res->com.state != RES_COUNTER_ALLOCATED)
773                 return -EPERM;
774
775         return 0;
776 }
777
778 static int remove_xrcdn_ok(struct res_xrcdn *res)
779 {
780         if (res->com.state == RES_XRCD_BUSY)
781                 return -EBUSY;
782         else if (res->com.state != RES_XRCD_ALLOCATED)
783                 return -EPERM;
784
785         return 0;
786 }
787
788 static int remove_fs_rule_ok(struct res_fs_rule *res)
789 {
790         if (res->com.state == RES_FS_RULE_BUSY)
791                 return -EBUSY;
792         else if (res->com.state != RES_FS_RULE_ALLOCATED)
793                 return -EPERM;
794
795         return 0;
796 }
797
798 static int remove_cq_ok(struct res_cq *res)
799 {
800         if (res->com.state == RES_CQ_BUSY)
801                 return -EBUSY;
802         else if (res->com.state != RES_CQ_ALLOCATED)
803                 return -EPERM;
804
805         return 0;
806 }
807
808 static int remove_srq_ok(struct res_srq *res)
809 {
810         if (res->com.state == RES_SRQ_BUSY)
811                 return -EBUSY;
812         else if (res->com.state != RES_SRQ_ALLOCATED)
813                 return -EPERM;
814
815         return 0;
816 }
817
818 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
819 {
820         switch (type) {
821         case RES_QP:
822                 return remove_qp_ok((struct res_qp *)res);
823         case RES_CQ:
824                 return remove_cq_ok((struct res_cq *)res);
825         case RES_SRQ:
826                 return remove_srq_ok((struct res_srq *)res);
827         case RES_MPT:
828                 return remove_mpt_ok((struct res_mpt *)res);
829         case RES_MTT:
830                 return remove_mtt_ok((struct res_mtt *)res, extra);
831         case RES_MAC:
832                 return -ENOSYS;
833         case RES_EQ:
834                 return remove_eq_ok((struct res_eq *)res);
835         case RES_COUNTER:
836                 return remove_counter_ok((struct res_counter *)res);
837         case RES_XRCD:
838                 return remove_xrcdn_ok((struct res_xrcdn *)res);
839         case RES_FS_RULE:
840                 return remove_fs_rule_ok((struct res_fs_rule *)res);
841         default:
842                 return -EINVAL;
843         }
844 }
845
846 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
847                          enum mlx4_resource type, int extra)
848 {
849         u64 i;
850         int err;
851         struct mlx4_priv *priv = mlx4_priv(dev);
852         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
853         struct res_common *r;
854
855         spin_lock_irq(mlx4_tlock(dev));
856         for (i = base; i < base + count; ++i) {
857                 r = res_tracker_lookup(&tracker->res_tree[type], i);
858                 if (!r) {
859                         err = -ENOENT;
860                         goto out;
861                 }
862                 if (r->owner != slave) {
863                         err = -EPERM;
864                         goto out;
865                 }
866                 err = remove_ok(r, type, extra);
867                 if (err)
868                         goto out;
869         }
870
871         for (i = base; i < base + count; ++i) {
872                 r = res_tracker_lookup(&tracker->res_tree[type], i);
873                 rb_erase(&r->node, &tracker->res_tree[type]);
874                 list_del(&r->list);
875                 kfree(r);
876         }
877         err = 0;
878
879 out:
880         spin_unlock_irq(mlx4_tlock(dev));
881
882         return err;
883 }
884
885 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
886                                 enum res_qp_states state, struct res_qp **qp,
887                                 int alloc)
888 {
889         struct mlx4_priv *priv = mlx4_priv(dev);
890         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
891         struct res_qp *r;
892         int err = 0;
893
894         spin_lock_irq(mlx4_tlock(dev));
895         r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
896         if (!r)
897                 err = -ENOENT;
898         else if (r->com.owner != slave)
899                 err = -EPERM;
900         else {
901                 switch (state) {
902                 case RES_QP_BUSY:
903                         mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
904                                  __func__, r->com.res_id);
905                         err = -EBUSY;
906                         break;
907
908                 case RES_QP_RESERVED:
909                         if (r->com.state == RES_QP_MAPPED && !alloc)
910                                 break;
911
912                         mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
913                         err = -EINVAL;
914                         break;
915
916                 case RES_QP_MAPPED:
917                         if ((r->com.state == RES_QP_RESERVED && alloc) ||
918                             r->com.state == RES_QP_HW)
919                                 break;
920                         else {
921                                 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
922                                           r->com.res_id);
923                                 err = -EINVAL;
924                         }
925
926                         break;
927
928                 case RES_QP_HW:
929                         if (r->com.state != RES_QP_MAPPED)
930                                 err = -EINVAL;
931                         break;
932                 default:
933                         err = -EINVAL;
934                 }
935
936                 if (!err) {
937                         r->com.from_state = r->com.state;
938                         r->com.to_state = state;
939                         r->com.state = RES_QP_BUSY;
940                         if (qp)
941                                 *qp = r;
942                 }
943         }
944
945         spin_unlock_irq(mlx4_tlock(dev));
946
947         return err;
948 }
949
950 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
951                                 enum res_mpt_states state, struct res_mpt **mpt)
952 {
953         struct mlx4_priv *priv = mlx4_priv(dev);
954         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
955         struct res_mpt *r;
956         int err = 0;
957
958         spin_lock_irq(mlx4_tlock(dev));
959         r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
960         if (!r)
961                 err = -ENOENT;
962         else if (r->com.owner != slave)
963                 err = -EPERM;
964         else {
965                 switch (state) {
966                 case RES_MPT_BUSY:
967                         err = -EINVAL;
968                         break;
969
970                 case RES_MPT_RESERVED:
971                         if (r->com.state != RES_MPT_MAPPED)
972                                 err = -EINVAL;
973                         break;
974
975                 case RES_MPT_MAPPED:
976                         if (r->com.state != RES_MPT_RESERVED &&
977                             r->com.state != RES_MPT_HW)
978                                 err = -EINVAL;
979                         break;
980
981                 case RES_MPT_HW:
982                         if (r->com.state != RES_MPT_MAPPED)
983                                 err = -EINVAL;
984                         break;
985                 default:
986                         err = -EINVAL;
987                 }
988
989                 if (!err) {
990                         r->com.from_state = r->com.state;
991                         r->com.to_state = state;
992                         r->com.state = RES_MPT_BUSY;
993                         if (mpt)
994                                 *mpt = r;
995                 }
996         }
997
998         spin_unlock_irq(mlx4_tlock(dev));
999
1000         return err;
1001 }
1002
1003 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1004                                 enum res_eq_states state, struct res_eq **eq)
1005 {
1006         struct mlx4_priv *priv = mlx4_priv(dev);
1007         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1008         struct res_eq *r;
1009         int err = 0;
1010
1011         spin_lock_irq(mlx4_tlock(dev));
1012         r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1013         if (!r)
1014                 err = -ENOENT;
1015         else if (r->com.owner != slave)
1016                 err = -EPERM;
1017         else {
1018                 switch (state) {
1019                 case RES_EQ_BUSY:
1020                         err = -EINVAL;
1021                         break;
1022
1023                 case RES_EQ_RESERVED:
1024                         if (r->com.state != RES_EQ_HW)
1025                                 err = -EINVAL;
1026                         break;
1027
1028                 case RES_EQ_HW:
1029                         if (r->com.state != RES_EQ_RESERVED)
1030                                 err = -EINVAL;
1031                         break;
1032
1033                 default:
1034                         err = -EINVAL;
1035                 }
1036
1037                 if (!err) {
1038                         r->com.from_state = r->com.state;
1039                         r->com.to_state = state;
1040                         r->com.state = RES_EQ_BUSY;
1041                         if (eq)
1042                                 *eq = r;
1043                 }
1044         }
1045
1046         spin_unlock_irq(mlx4_tlock(dev));
1047
1048         return err;
1049 }
1050
1051 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1052                                 enum res_cq_states state, struct res_cq **cq)
1053 {
1054         struct mlx4_priv *priv = mlx4_priv(dev);
1055         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1056         struct res_cq *r;
1057         int err;
1058
1059         spin_lock_irq(mlx4_tlock(dev));
1060         r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1061         if (!r)
1062                 err = -ENOENT;
1063         else if (r->com.owner != slave)
1064                 err = -EPERM;
1065         else {
1066                 switch (state) {
1067                 case RES_CQ_BUSY:
1068                         err = -EBUSY;
1069                         break;
1070
1071                 case RES_CQ_ALLOCATED:
1072                         if (r->com.state != RES_CQ_HW)
1073                                 err = -EINVAL;
1074                         else if (atomic_read(&r->ref_count))
1075                                 err = -EBUSY;
1076                         else
1077                                 err = 0;
1078                         break;
1079
1080                 case RES_CQ_HW:
1081                         if (r->com.state != RES_CQ_ALLOCATED)
1082                                 err = -EINVAL;
1083                         else
1084                                 err = 0;
1085                         break;
1086
1087                 default:
1088                         err = -EINVAL;
1089                 }
1090
1091                 if (!err) {
1092                         r->com.from_state = r->com.state;
1093                         r->com.to_state = state;
1094                         r->com.state = RES_CQ_BUSY;
1095                         if (cq)
1096                                 *cq = r;
1097                 }
1098         }
1099
1100         spin_unlock_irq(mlx4_tlock(dev));
1101
1102         return err;
1103 }
1104
1105 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1106                                  enum res_cq_states state, struct res_srq **srq)
1107 {
1108         struct mlx4_priv *priv = mlx4_priv(dev);
1109         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1110         struct res_srq *r;
1111         int err = 0;
1112
1113         spin_lock_irq(mlx4_tlock(dev));
1114         r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1115         if (!r)
1116                 err = -ENOENT;
1117         else if (r->com.owner != slave)
1118                 err = -EPERM;
1119         else {
1120                 switch (state) {
1121                 case RES_SRQ_BUSY:
1122                         err = -EINVAL;
1123                         break;
1124
1125                 case RES_SRQ_ALLOCATED:
1126                         if (r->com.state != RES_SRQ_HW)
1127                                 err = -EINVAL;
1128                         else if (atomic_read(&r->ref_count))
1129                                 err = -EBUSY;
1130                         break;
1131
1132                 case RES_SRQ_HW:
1133                         if (r->com.state != RES_SRQ_ALLOCATED)
1134                                 err = -EINVAL;
1135                         break;
1136
1137                 default:
1138                         err = -EINVAL;
1139                 }
1140
1141                 if (!err) {
1142                         r->com.from_state = r->com.state;
1143                         r->com.to_state = state;
1144                         r->com.state = RES_SRQ_BUSY;
1145                         if (srq)
1146                                 *srq = r;
1147                 }
1148         }
1149
1150         spin_unlock_irq(mlx4_tlock(dev));
1151
1152         return err;
1153 }
1154
1155 static void res_abort_move(struct mlx4_dev *dev, int slave,
1156                            enum mlx4_resource type, int id)
1157 {
1158         struct mlx4_priv *priv = mlx4_priv(dev);
1159         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1160         struct res_common *r;
1161
1162         spin_lock_irq(mlx4_tlock(dev));
1163         r = res_tracker_lookup(&tracker->res_tree[type], id);
1164         if (r && (r->owner == slave))
1165                 r->state = r->from_state;
1166         spin_unlock_irq(mlx4_tlock(dev));
1167 }
1168
1169 static void res_end_move(struct mlx4_dev *dev, int slave,
1170                          enum mlx4_resource type, int id)
1171 {
1172         struct mlx4_priv *priv = mlx4_priv(dev);
1173         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1174         struct res_common *r;
1175
1176         spin_lock_irq(mlx4_tlock(dev));
1177         r = res_tracker_lookup(&tracker->res_tree[type], id);
1178         if (r && (r->owner == slave))
1179                 r->state = r->to_state;
1180         spin_unlock_irq(mlx4_tlock(dev));
1181 }
1182
1183 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1184 {
1185         return mlx4_is_qp_reserved(dev, qpn) &&
1186                 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1187 }
1188
1189 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1190 {
1191         return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1192 }
1193
1194 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1195                         u64 in_param, u64 *out_param)
1196 {
1197         int err;
1198         int count;
1199         int align;
1200         int base;
1201         int qpn;
1202
1203         switch (op) {
1204         case RES_OP_RESERVE:
1205                 count = get_param_l(&in_param);
1206                 align = get_param_h(&in_param);
1207                 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1208                 if (err)
1209                         return err;
1210
1211                 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1212                 if (err) {
1213                         __mlx4_qp_release_range(dev, base, count);
1214                         return err;
1215                 }
1216                 set_param_l(out_param, base);
1217                 break;
1218         case RES_OP_MAP_ICM:
1219                 qpn = get_param_l(&in_param) & 0x7fffff;
1220                 if (valid_reserved(dev, slave, qpn)) {
1221                         err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1222                         if (err)
1223                                 return err;
1224                 }
1225
1226                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1227                                            NULL, 1);
1228                 if (err)
1229                         return err;
1230
1231                 if (!fw_reserved(dev, qpn)) {
1232                         err = __mlx4_qp_alloc_icm(dev, qpn);
1233                         if (err) {
1234                                 res_abort_move(dev, slave, RES_QP, qpn);
1235                                 return err;
1236                         }
1237                 }
1238
1239                 res_end_move(dev, slave, RES_QP, qpn);
1240                 break;
1241
1242         default:
1243                 err = -EINVAL;
1244                 break;
1245         }
1246         return err;
1247 }
1248
1249 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1250                          u64 in_param, u64 *out_param)
1251 {
1252         int err = -EINVAL;
1253         int base;
1254         int order;
1255
1256         if (op != RES_OP_RESERVE_AND_MAP)
1257                 return err;
1258
1259         order = get_param_l(&in_param);
1260         base = __mlx4_alloc_mtt_range(dev, order);
1261         if (base == -1)
1262                 return -ENOMEM;
1263
1264         err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1265         if (err)
1266                 __mlx4_free_mtt_range(dev, base, order);
1267         else
1268                 set_param_l(out_param, base);
1269
1270         return err;
1271 }
1272
1273 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1274                          u64 in_param, u64 *out_param)
1275 {
1276         int err = -EINVAL;
1277         int index;
1278         int id;
1279         struct res_mpt *mpt;
1280
1281         switch (op) {
1282         case RES_OP_RESERVE:
1283                 index = __mlx4_mpt_reserve(dev);
1284                 if (index == -1)
1285                         break;
1286                 id = index & mpt_mask(dev);
1287
1288                 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1289                 if (err) {
1290                         __mlx4_mpt_release(dev, index);
1291                         break;
1292                 }
1293                 set_param_l(out_param, index);
1294                 break;
1295         case RES_OP_MAP_ICM:
1296                 index = get_param_l(&in_param);
1297                 id = index & mpt_mask(dev);
1298                 err = mr_res_start_move_to(dev, slave, id,
1299                                            RES_MPT_MAPPED, &mpt);
1300                 if (err)
1301                         return err;
1302
1303                 err = __mlx4_mpt_alloc_icm(dev, mpt->key);
1304                 if (err) {
1305                         res_abort_move(dev, slave, RES_MPT, id);
1306                         return err;
1307                 }
1308
1309                 res_end_move(dev, slave, RES_MPT, id);
1310                 break;
1311         }
1312         return err;
1313 }
1314
1315 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1316                         u64 in_param, u64 *out_param)
1317 {
1318         int cqn;
1319         int err;
1320
1321         switch (op) {
1322         case RES_OP_RESERVE_AND_MAP:
1323                 err = __mlx4_cq_alloc_icm(dev, &cqn);
1324                 if (err)
1325                         break;
1326
1327                 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1328                 if (err) {
1329                         __mlx4_cq_free_icm(dev, cqn);
1330                         break;
1331                 }
1332
1333                 set_param_l(out_param, cqn);
1334                 break;
1335
1336         default:
1337                 err = -EINVAL;
1338         }
1339
1340         return err;
1341 }
1342
1343 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1344                          u64 in_param, u64 *out_param)
1345 {
1346         int srqn;
1347         int err;
1348
1349         switch (op) {
1350         case RES_OP_RESERVE_AND_MAP:
1351                 err = __mlx4_srq_alloc_icm(dev, &srqn);
1352                 if (err)
1353                         break;
1354
1355                 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1356                 if (err) {
1357                         __mlx4_srq_free_icm(dev, srqn);
1358                         break;
1359                 }
1360
1361                 set_param_l(out_param, srqn);
1362                 break;
1363
1364         default:
1365                 err = -EINVAL;
1366         }
1367
1368         return err;
1369 }
1370
1371 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1372 {
1373         struct mlx4_priv *priv = mlx4_priv(dev);
1374         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1375         struct mac_res *res;
1376
1377         res = kzalloc(sizeof *res, GFP_KERNEL);
1378         if (!res)
1379                 return -ENOMEM;
1380         res->mac = mac;
1381         res->port = (u8) port;
1382         list_add_tail(&res->list,
1383                       &tracker->slave_list[slave].res_list[RES_MAC]);
1384         return 0;
1385 }
1386
1387 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1388                                int port)
1389 {
1390         struct mlx4_priv *priv = mlx4_priv(dev);
1391         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1392         struct list_head *mac_list =
1393                 &tracker->slave_list[slave].res_list[RES_MAC];
1394         struct mac_res *res, *tmp;
1395
1396         list_for_each_entry_safe(res, tmp, mac_list, list) {
1397                 if (res->mac == mac && res->port == (u8) port) {
1398                         list_del(&res->list);
1399                         kfree(res);
1400                         break;
1401                 }
1402         }
1403 }
1404
1405 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1406 {
1407         struct mlx4_priv *priv = mlx4_priv(dev);
1408         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1409         struct list_head *mac_list =
1410                 &tracker->slave_list[slave].res_list[RES_MAC];
1411         struct mac_res *res, *tmp;
1412
1413         list_for_each_entry_safe(res, tmp, mac_list, list) {
1414                 list_del(&res->list);
1415                 __mlx4_unregister_mac(dev, res->port, res->mac);
1416                 kfree(res);
1417         }
1418 }
1419
1420 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1421                          u64 in_param, u64 *out_param)
1422 {
1423         int err = -EINVAL;
1424         int port;
1425         u64 mac;
1426
1427         if (op != RES_OP_RESERVE_AND_MAP)
1428                 return err;
1429
1430         port = get_param_l(out_param);
1431         mac = in_param;
1432
1433         err = __mlx4_register_mac(dev, port, mac);
1434         if (err >= 0) {
1435                 set_param_l(out_param, err);
1436                 err = 0;
1437         }
1438
1439         if (!err) {
1440                 err = mac_add_to_slave(dev, slave, mac, port);
1441                 if (err)
1442                         __mlx4_unregister_mac(dev, port, mac);
1443         }
1444         return err;
1445 }
1446
1447 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1448                          u64 in_param, u64 *out_param)
1449 {
1450         return 0;
1451 }
1452
1453 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1454                              u64 in_param, u64 *out_param)
1455 {
1456         u32 index;
1457         int err;
1458
1459         if (op != RES_OP_RESERVE)
1460                 return -EINVAL;
1461
1462         err = __mlx4_counter_alloc(dev, &index);
1463         if (err)
1464                 return err;
1465
1466         err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1467         if (err)
1468                 __mlx4_counter_free(dev, index);
1469         else
1470                 set_param_l(out_param, index);
1471
1472         return err;
1473 }
1474
1475 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1476                            u64 in_param, u64 *out_param)
1477 {
1478         u32 xrcdn;
1479         int err;
1480
1481         if (op != RES_OP_RESERVE)
1482                 return -EINVAL;
1483
1484         err = __mlx4_xrcd_alloc(dev, &xrcdn);
1485         if (err)
1486                 return err;
1487
1488         err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1489         if (err)
1490                 __mlx4_xrcd_free(dev, xrcdn);
1491         else
1492                 set_param_l(out_param, xrcdn);
1493
1494         return err;
1495 }
1496
1497 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1498                            struct mlx4_vhcr *vhcr,
1499                            struct mlx4_cmd_mailbox *inbox,
1500                            struct mlx4_cmd_mailbox *outbox,
1501                            struct mlx4_cmd_info *cmd)
1502 {
1503         int err;
1504         int alop = vhcr->op_modifier;
1505
1506         switch (vhcr->in_modifier) {
1507         case RES_QP:
1508                 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1509                                    vhcr->in_param, &vhcr->out_param);
1510                 break;
1511
1512         case RES_MTT:
1513                 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1514                                     vhcr->in_param, &vhcr->out_param);
1515                 break;
1516
1517         case RES_MPT:
1518                 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1519                                     vhcr->in_param, &vhcr->out_param);
1520                 break;
1521
1522         case RES_CQ:
1523                 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1524                                    vhcr->in_param, &vhcr->out_param);
1525                 break;
1526
1527         case RES_SRQ:
1528                 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1529                                     vhcr->in_param, &vhcr->out_param);
1530                 break;
1531
1532         case RES_MAC:
1533                 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1534                                     vhcr->in_param, &vhcr->out_param);
1535                 break;
1536
1537         case RES_VLAN:
1538                 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1539                                     vhcr->in_param, &vhcr->out_param);
1540                 break;
1541
1542         case RES_COUNTER:
1543                 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
1544                                         vhcr->in_param, &vhcr->out_param);
1545                 break;
1546
1547         case RES_XRCD:
1548                 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
1549                                       vhcr->in_param, &vhcr->out_param);
1550                 break;
1551
1552         default:
1553                 err = -EINVAL;
1554                 break;
1555         }
1556
1557         return err;
1558 }
1559
1560 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1561                        u64 in_param)
1562 {
1563         int err;
1564         int count;
1565         int base;
1566         int qpn;
1567
1568         switch (op) {
1569         case RES_OP_RESERVE:
1570                 base = get_param_l(&in_param) & 0x7fffff;
1571                 count = get_param_h(&in_param);
1572                 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1573                 if (err)
1574                         break;
1575                 __mlx4_qp_release_range(dev, base, count);
1576                 break;
1577         case RES_OP_MAP_ICM:
1578                 qpn = get_param_l(&in_param) & 0x7fffff;
1579                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1580                                            NULL, 0);
1581                 if (err)
1582                         return err;
1583
1584                 if (!fw_reserved(dev, qpn))
1585                         __mlx4_qp_free_icm(dev, qpn);
1586
1587                 res_end_move(dev, slave, RES_QP, qpn);
1588
1589                 if (valid_reserved(dev, slave, qpn))
1590                         err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1591                 break;
1592         default:
1593                 err = -EINVAL;
1594                 break;
1595         }
1596         return err;
1597 }
1598
1599 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1600                         u64 in_param, u64 *out_param)
1601 {
1602         int err = -EINVAL;
1603         int base;
1604         int order;
1605
1606         if (op != RES_OP_RESERVE_AND_MAP)
1607                 return err;
1608
1609         base = get_param_l(&in_param);
1610         order = get_param_h(&in_param);
1611         err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1612         if (!err)
1613                 __mlx4_free_mtt_range(dev, base, order);
1614         return err;
1615 }
1616
1617 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1618                         u64 in_param)
1619 {
1620         int err = -EINVAL;
1621         int index;
1622         int id;
1623         struct res_mpt *mpt;
1624
1625         switch (op) {
1626         case RES_OP_RESERVE:
1627                 index = get_param_l(&in_param);
1628                 id = index & mpt_mask(dev);
1629                 err = get_res(dev, slave, id, RES_MPT, &mpt);
1630                 if (err)
1631                         break;
1632                 index = mpt->key;
1633                 put_res(dev, slave, id, RES_MPT);
1634
1635                 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1636                 if (err)
1637                         break;
1638                 __mlx4_mpt_release(dev, index);
1639                 break;
1640         case RES_OP_MAP_ICM:
1641                         index = get_param_l(&in_param);
1642                         id = index & mpt_mask(dev);
1643                         err = mr_res_start_move_to(dev, slave, id,
1644                                                    RES_MPT_RESERVED, &mpt);
1645                         if (err)
1646                                 return err;
1647
1648                         __mlx4_mpt_free_icm(dev, mpt->key);
1649                         res_end_move(dev, slave, RES_MPT, id);
1650                         return err;
1651                 break;
1652         default:
1653                 err = -EINVAL;
1654                 break;
1655         }
1656         return err;
1657 }
1658
1659 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1660                        u64 in_param, u64 *out_param)
1661 {
1662         int cqn;
1663         int err;
1664
1665         switch (op) {
1666         case RES_OP_RESERVE_AND_MAP:
1667                 cqn = get_param_l(&in_param);
1668                 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1669                 if (err)
1670                         break;
1671
1672                 __mlx4_cq_free_icm(dev, cqn);
1673                 break;
1674
1675         default:
1676                 err = -EINVAL;
1677                 break;
1678         }
1679
1680         return err;
1681 }
1682
1683 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1684                         u64 in_param, u64 *out_param)
1685 {
1686         int srqn;
1687         int err;
1688
1689         switch (op) {
1690         case RES_OP_RESERVE_AND_MAP:
1691                 srqn = get_param_l(&in_param);
1692                 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1693                 if (err)
1694                         break;
1695
1696                 __mlx4_srq_free_icm(dev, srqn);
1697                 break;
1698
1699         default:
1700                 err = -EINVAL;
1701                 break;
1702         }
1703
1704         return err;
1705 }
1706
1707 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1708                             u64 in_param, u64 *out_param)
1709 {
1710         int port;
1711         int err = 0;
1712
1713         switch (op) {
1714         case RES_OP_RESERVE_AND_MAP:
1715                 port = get_param_l(out_param);
1716                 mac_del_from_slave(dev, slave, in_param, port);
1717                 __mlx4_unregister_mac(dev, port, in_param);
1718                 break;
1719         default:
1720                 err = -EINVAL;
1721                 break;
1722         }
1723
1724         return err;
1725
1726 }
1727
1728 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1729                             u64 in_param, u64 *out_param)
1730 {
1731         return 0;
1732 }
1733
1734 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1735                             u64 in_param, u64 *out_param)
1736 {
1737         int index;
1738         int err;
1739
1740         if (op != RES_OP_RESERVE)
1741                 return -EINVAL;
1742
1743         index = get_param_l(&in_param);
1744         err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1745         if (err)
1746                 return err;
1747
1748         __mlx4_counter_free(dev, index);
1749
1750         return err;
1751 }
1752
1753 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1754                           u64 in_param, u64 *out_param)
1755 {
1756         int xrcdn;
1757         int err;
1758
1759         if (op != RES_OP_RESERVE)
1760                 return -EINVAL;
1761
1762         xrcdn = get_param_l(&in_param);
1763         err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1764         if (err)
1765                 return err;
1766
1767         __mlx4_xrcd_free(dev, xrcdn);
1768
1769         return err;
1770 }
1771
1772 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1773                           struct mlx4_vhcr *vhcr,
1774                           struct mlx4_cmd_mailbox *inbox,
1775                           struct mlx4_cmd_mailbox *outbox,
1776                           struct mlx4_cmd_info *cmd)
1777 {
1778         int err = -EINVAL;
1779         int alop = vhcr->op_modifier;
1780
1781         switch (vhcr->in_modifier) {
1782         case RES_QP:
1783                 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1784                                   vhcr->in_param);
1785                 break;
1786
1787         case RES_MTT:
1788                 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1789                                    vhcr->in_param, &vhcr->out_param);
1790                 break;
1791
1792         case RES_MPT:
1793                 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1794                                    vhcr->in_param);
1795                 break;
1796
1797         case RES_CQ:
1798                 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1799                                   vhcr->in_param, &vhcr->out_param);
1800                 break;
1801
1802         case RES_SRQ:
1803                 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1804                                    vhcr->in_param, &vhcr->out_param);
1805                 break;
1806
1807         case RES_MAC:
1808                 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1809                                    vhcr->in_param, &vhcr->out_param);
1810                 break;
1811
1812         case RES_VLAN:
1813                 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
1814                                    vhcr->in_param, &vhcr->out_param);
1815                 break;
1816
1817         case RES_COUNTER:
1818                 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
1819                                        vhcr->in_param, &vhcr->out_param);
1820                 break;
1821
1822         case RES_XRCD:
1823                 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
1824                                      vhcr->in_param, &vhcr->out_param);
1825
1826         default:
1827                 break;
1828         }
1829         return err;
1830 }
1831
1832 /* ugly but other choices are uglier */
1833 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1834 {
1835         return (be32_to_cpu(mpt->flags) >> 9) & 1;
1836 }
1837
1838 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
1839 {
1840         return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
1841 }
1842
1843 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1844 {
1845         return be32_to_cpu(mpt->mtt_sz);
1846 }
1847
1848 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
1849 {
1850         return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
1851 }
1852
1853 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
1854 {
1855         return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
1856 }
1857
1858 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
1859 {
1860         return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
1861 }
1862
1863 static int mr_is_region(struct mlx4_mpt_entry *mpt)
1864 {
1865         return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
1866 }
1867
1868 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
1869 {
1870         return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
1871 }
1872
1873 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
1874 {
1875         return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
1876 }
1877
1878 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
1879 {
1880         int page_shift = (qpc->log_page_size & 0x3f) + 12;
1881         int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
1882         int log_sq_sride = qpc->sq_size_stride & 7;
1883         int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
1884         int log_rq_stride = qpc->rq_size_stride & 7;
1885         int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
1886         int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
1887         int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
1888         int sq_size;
1889         int rq_size;
1890         int total_pages;
1891         int total_mem;
1892         int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
1893
1894         sq_size = 1 << (log_sq_size + log_sq_sride + 4);
1895         rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
1896         total_mem = sq_size + rq_size;
1897         total_pages =
1898                 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
1899                                    page_shift);
1900
1901         return total_pages;
1902 }
1903
1904 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
1905                            int size, struct res_mtt *mtt)
1906 {
1907         int res_start = mtt->com.res_id;
1908         int res_size = (1 << mtt->order);
1909
1910         if (start < res_start || start + size > res_start + res_size)
1911                 return -EPERM;
1912         return 0;
1913 }
1914
1915 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1916                            struct mlx4_vhcr *vhcr,
1917                            struct mlx4_cmd_mailbox *inbox,
1918                            struct mlx4_cmd_mailbox *outbox,
1919                            struct mlx4_cmd_info *cmd)
1920 {
1921         int err;
1922         int index = vhcr->in_modifier;
1923         struct res_mtt *mtt;
1924         struct res_mpt *mpt;
1925         int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
1926         int phys;
1927         int id;
1928         u32 pd;
1929         int pd_slave;
1930
1931         id = index & mpt_mask(dev);
1932         err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
1933         if (err)
1934                 return err;
1935
1936         /* Disable memory windows for VFs. */
1937         if (!mr_is_region(inbox->buf)) {
1938                 err = -EPERM;
1939                 goto ex_abort;
1940         }
1941
1942         /* Make sure that the PD bits related to the slave id are zeros. */
1943         pd = mr_get_pd(inbox->buf);
1944         pd_slave = (pd >> 17) & 0x7f;
1945         if (pd_slave != 0 && pd_slave != slave) {
1946                 err = -EPERM;
1947                 goto ex_abort;
1948         }
1949
1950         if (mr_is_fmr(inbox->buf)) {
1951                 /* FMR and Bind Enable are forbidden in slave devices. */
1952                 if (mr_is_bind_enabled(inbox->buf)) {
1953                         err = -EPERM;
1954                         goto ex_abort;
1955                 }
1956                 /* FMR and Memory Windows are also forbidden. */
1957                 if (!mr_is_region(inbox->buf)) {
1958                         err = -EPERM;
1959                         goto ex_abort;
1960                 }
1961         }
1962
1963         phys = mr_phys_mpt(inbox->buf);
1964         if (!phys) {
1965                 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
1966                 if (err)
1967                         goto ex_abort;
1968
1969                 err = check_mtt_range(dev, slave, mtt_base,
1970                                       mr_get_mtt_size(inbox->buf), mtt);
1971                 if (err)
1972                         goto ex_put;
1973
1974                 mpt->mtt = mtt;
1975         }
1976
1977         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1978         if (err)
1979                 goto ex_put;
1980
1981         if (!phys) {
1982                 atomic_inc(&mtt->ref_count);
1983                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1984         }
1985
1986         res_end_move(dev, slave, RES_MPT, id);
1987         return 0;
1988
1989 ex_put:
1990         if (!phys)
1991                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1992 ex_abort:
1993         res_abort_move(dev, slave, RES_MPT, id);
1994
1995         return err;
1996 }
1997
1998 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1999                            struct mlx4_vhcr *vhcr,
2000                            struct mlx4_cmd_mailbox *inbox,
2001                            struct mlx4_cmd_mailbox *outbox,
2002                            struct mlx4_cmd_info *cmd)
2003 {
2004         int err;
2005         int index = vhcr->in_modifier;
2006         struct res_mpt *mpt;
2007         int id;
2008
2009         id = index & mpt_mask(dev);
2010         err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2011         if (err)
2012                 return err;
2013
2014         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2015         if (err)
2016                 goto ex_abort;
2017
2018         if (mpt->mtt)
2019                 atomic_dec(&mpt->mtt->ref_count);
2020
2021         res_end_move(dev, slave, RES_MPT, id);
2022         return 0;
2023
2024 ex_abort:
2025         res_abort_move(dev, slave, RES_MPT, id);
2026
2027         return err;
2028 }
2029
2030 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2031                            struct mlx4_vhcr *vhcr,
2032                            struct mlx4_cmd_mailbox *inbox,
2033                            struct mlx4_cmd_mailbox *outbox,
2034                            struct mlx4_cmd_info *cmd)
2035 {
2036         int err;
2037         int index = vhcr->in_modifier;
2038         struct res_mpt *mpt;
2039         int id;
2040
2041         id = index & mpt_mask(dev);
2042         err = get_res(dev, slave, id, RES_MPT, &mpt);
2043         if (err)
2044                 return err;
2045
2046         if (mpt->com.from_state != RES_MPT_HW) {
2047                 err = -EBUSY;
2048                 goto out;
2049         }
2050
2051         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2052
2053 out:
2054         put_res(dev, slave, id, RES_MPT);
2055         return err;
2056 }
2057
2058 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2059 {
2060         return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2061 }
2062
2063 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2064 {
2065         return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2066 }
2067
2068 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2069 {
2070         return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2071 }
2072
2073 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2074                                   struct mlx4_qp_context *context)
2075 {
2076         u32 qpn = vhcr->in_modifier & 0xffffff;
2077         u32 qkey = 0;
2078
2079         if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2080                 return;
2081
2082         /* adjust qkey in qp context */
2083         context->qkey = cpu_to_be32(qkey);
2084 }
2085
2086 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2087                              struct mlx4_vhcr *vhcr,
2088                              struct mlx4_cmd_mailbox *inbox,
2089                              struct mlx4_cmd_mailbox *outbox,
2090                              struct mlx4_cmd_info *cmd)
2091 {
2092         int err;
2093         int qpn = vhcr->in_modifier & 0x7fffff;
2094         struct res_mtt *mtt;
2095         struct res_qp *qp;
2096         struct mlx4_qp_context *qpc = inbox->buf + 8;
2097         int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2098         int mtt_size = qp_get_mtt_size(qpc);
2099         struct res_cq *rcq;
2100         struct res_cq *scq;
2101         int rcqn = qp_get_rcqn(qpc);
2102         int scqn = qp_get_scqn(qpc);
2103         u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2104         int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2105         struct res_srq *srq;
2106         int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2107
2108         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2109         if (err)
2110                 return err;
2111         qp->local_qpn = local_qpn;
2112
2113         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2114         if (err)
2115                 goto ex_abort;
2116
2117         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2118         if (err)
2119                 goto ex_put_mtt;
2120
2121         err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2122         if (err)
2123                 goto ex_put_mtt;
2124
2125         if (scqn != rcqn) {
2126                 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2127                 if (err)
2128                         goto ex_put_rcq;
2129         } else
2130                 scq = rcq;
2131
2132         if (use_srq) {
2133                 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2134                 if (err)
2135                         goto ex_put_scq;
2136         }
2137
2138         adjust_proxy_tun_qkey(dev, vhcr, qpc);
2139         update_pkey_index(dev, slave, inbox);
2140         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2141         if (err)
2142                 goto ex_put_srq;
2143         atomic_inc(&mtt->ref_count);
2144         qp->mtt = mtt;
2145         atomic_inc(&rcq->ref_count);
2146         qp->rcq = rcq;
2147         atomic_inc(&scq->ref_count);
2148         qp->scq = scq;
2149
2150         if (scqn != rcqn)
2151                 put_res(dev, slave, scqn, RES_CQ);
2152
2153         if (use_srq) {
2154                 atomic_inc(&srq->ref_count);
2155                 put_res(dev, slave, srqn, RES_SRQ);
2156                 qp->srq = srq;
2157         }
2158         put_res(dev, slave, rcqn, RES_CQ);
2159         put_res(dev, slave, mtt_base, RES_MTT);
2160         res_end_move(dev, slave, RES_QP, qpn);
2161
2162         return 0;
2163
2164 ex_put_srq:
2165         if (use_srq)
2166                 put_res(dev, slave, srqn, RES_SRQ);
2167 ex_put_scq:
2168         if (scqn != rcqn)
2169                 put_res(dev, slave, scqn, RES_CQ);
2170 ex_put_rcq:
2171         put_res(dev, slave, rcqn, RES_CQ);
2172 ex_put_mtt:
2173         put_res(dev, slave, mtt_base, RES_MTT);
2174 ex_abort:
2175         res_abort_move(dev, slave, RES_QP, qpn);
2176
2177         return err;
2178 }
2179
2180 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2181 {
2182         return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2183 }
2184
2185 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2186 {
2187         int log_eq_size = eqc->log_eq_size & 0x1f;
2188         int page_shift = (eqc->log_page_size & 0x3f) + 12;
2189
2190         if (log_eq_size + 5 < page_shift)
2191                 return 1;
2192
2193         return 1 << (log_eq_size + 5 - page_shift);
2194 }
2195
2196 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2197 {
2198         return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2199 }
2200
2201 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2202 {
2203         int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2204         int page_shift = (cqc->log_page_size & 0x3f) + 12;
2205
2206         if (log_cq_size + 5 < page_shift)
2207                 return 1;
2208
2209         return 1 << (log_cq_size + 5 - page_shift);
2210 }
2211
2212 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2213                           struct mlx4_vhcr *vhcr,
2214                           struct mlx4_cmd_mailbox *inbox,
2215                           struct mlx4_cmd_mailbox *outbox,
2216                           struct mlx4_cmd_info *cmd)
2217 {
2218         int err;
2219         int eqn = vhcr->in_modifier;
2220         int res_id = (slave << 8) | eqn;
2221         struct mlx4_eq_context *eqc = inbox->buf;
2222         int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2223         int mtt_size = eq_get_mtt_size(eqc);
2224         struct res_eq *eq;
2225         struct res_mtt *mtt;
2226
2227         err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2228         if (err)
2229                 return err;
2230         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2231         if (err)
2232                 goto out_add;
2233
2234         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2235         if (err)
2236                 goto out_move;
2237
2238         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2239         if (err)
2240                 goto out_put;
2241
2242         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2243         if (err)
2244                 goto out_put;
2245
2246         atomic_inc(&mtt->ref_count);
2247         eq->mtt = mtt;
2248         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2249         res_end_move(dev, slave, RES_EQ, res_id);
2250         return 0;
2251
2252 out_put:
2253         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2254 out_move:
2255         res_abort_move(dev, slave, RES_EQ, res_id);
2256 out_add:
2257         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2258         return err;
2259 }
2260
2261 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2262                               int len, struct res_mtt **res)
2263 {
2264         struct mlx4_priv *priv = mlx4_priv(dev);
2265         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2266         struct res_mtt *mtt;
2267         int err = -EINVAL;
2268
2269         spin_lock_irq(mlx4_tlock(dev));
2270         list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2271                             com.list) {
2272                 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2273                         *res = mtt;
2274                         mtt->com.from_state = mtt->com.state;
2275                         mtt->com.state = RES_MTT_BUSY;
2276                         err = 0;
2277                         break;
2278                 }
2279         }
2280         spin_unlock_irq(mlx4_tlock(dev));
2281
2282         return err;
2283 }
2284
2285 static int verify_qp_parameters(struct mlx4_dev *dev,
2286                                 struct mlx4_cmd_mailbox *inbox,
2287                                 enum qp_transition transition, u8 slave)
2288 {
2289         u32                     qp_type;
2290         struct mlx4_qp_context  *qp_ctx;
2291         enum mlx4_qp_optpar     optpar;
2292
2293         qp_ctx  = inbox->buf + 8;
2294         qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2295         optpar  = be32_to_cpu(*(__be32 *) inbox->buf);
2296
2297         switch (qp_type) {
2298         case MLX4_QP_ST_RC:
2299         case MLX4_QP_ST_UC:
2300                 switch (transition) {
2301                 case QP_TRANS_INIT2RTR:
2302                 case QP_TRANS_RTR2RTS:
2303                 case QP_TRANS_RTS2RTS:
2304                 case QP_TRANS_SQD2SQD:
2305                 case QP_TRANS_SQD2RTS:
2306                         if (slave != mlx4_master_func_num(dev))
2307                                 /* slaves have only gid index 0 */
2308                                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
2309                                         if (qp_ctx->pri_path.mgid_index)
2310                                                 return -EINVAL;
2311                                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
2312                                         if (qp_ctx->alt_path.mgid_index)
2313                                                 return -EINVAL;
2314                         break;
2315                 default:
2316                         break;
2317                 }
2318
2319                 break;
2320         default:
2321                 break;
2322         }
2323
2324         return 0;
2325 }
2326
2327 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2328                            struct mlx4_vhcr *vhcr,
2329                            struct mlx4_cmd_mailbox *inbox,
2330                            struct mlx4_cmd_mailbox *outbox,
2331                            struct mlx4_cmd_info *cmd)
2332 {
2333         struct mlx4_mtt mtt;
2334         __be64 *page_list = inbox->buf;
2335         u64 *pg_list = (u64 *)page_list;
2336         int i;
2337         struct res_mtt *rmtt = NULL;
2338         int start = be64_to_cpu(page_list[0]);
2339         int npages = vhcr->in_modifier;
2340         int err;
2341
2342         err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2343         if (err)
2344                 return err;
2345
2346         /* Call the SW implementation of write_mtt:
2347          * - Prepare a dummy mtt struct
2348          * - Translate inbox contents to simple addresses in host endianess */
2349         mtt.offset = 0;  /* TBD this is broken but I don't handle it since
2350                             we don't really use it */
2351         mtt.order = 0;
2352         mtt.page_shift = 0;
2353         for (i = 0; i < npages; ++i)
2354                 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2355
2356         err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2357                                ((u64 *)page_list + 2));
2358
2359         if (rmtt)
2360                 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2361
2362         return err;
2363 }
2364
2365 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2366                           struct mlx4_vhcr *vhcr,
2367                           struct mlx4_cmd_mailbox *inbox,
2368                           struct mlx4_cmd_mailbox *outbox,
2369                           struct mlx4_cmd_info *cmd)
2370 {
2371         int eqn = vhcr->in_modifier;
2372         int res_id = eqn | (slave << 8);
2373         struct res_eq *eq;
2374         int err;
2375
2376         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2377         if (err)
2378                 return err;
2379
2380         err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2381         if (err)
2382                 goto ex_abort;
2383
2384         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2385         if (err)
2386                 goto ex_put;
2387
2388         atomic_dec(&eq->mtt->ref_count);
2389         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2390         res_end_move(dev, slave, RES_EQ, res_id);
2391         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2392
2393         return 0;
2394
2395 ex_put:
2396         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2397 ex_abort:
2398         res_abort_move(dev, slave, RES_EQ, res_id);
2399
2400         return err;
2401 }
2402
2403 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2404 {
2405         struct mlx4_priv *priv = mlx4_priv(dev);
2406         struct mlx4_slave_event_eq_info *event_eq;
2407         struct mlx4_cmd_mailbox *mailbox;
2408         u32 in_modifier = 0;
2409         int err;
2410         int res_id;
2411         struct res_eq *req;
2412
2413         if (!priv->mfunc.master.slave_state)
2414                 return -EINVAL;
2415
2416         event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
2417
2418         /* Create the event only if the slave is registered */
2419         if (event_eq->eqn < 0)
2420                 return 0;
2421
2422         mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2423         res_id = (slave << 8) | event_eq->eqn;
2424         err = get_res(dev, slave, res_id, RES_EQ, &req);
2425         if (err)
2426                 goto unlock;
2427
2428         if (req->com.from_state != RES_EQ_HW) {
2429                 err = -EINVAL;
2430                 goto put;
2431         }
2432
2433         mailbox = mlx4_alloc_cmd_mailbox(dev);
2434         if (IS_ERR(mailbox)) {
2435                 err = PTR_ERR(mailbox);
2436                 goto put;
2437         }
2438
2439         if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2440                 ++event_eq->token;
2441                 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2442         }
2443
2444         memcpy(mailbox->buf, (u8 *) eqe, 28);
2445
2446         in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2447
2448         err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2449                        MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2450                        MLX4_CMD_NATIVE);
2451
2452         put_res(dev, slave, res_id, RES_EQ);
2453         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2454         mlx4_free_cmd_mailbox(dev, mailbox);
2455         return err;
2456
2457 put:
2458         put_res(dev, slave, res_id, RES_EQ);
2459
2460 unlock:
2461         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2462         return err;
2463 }
2464
2465 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2466                           struct mlx4_vhcr *vhcr,
2467                           struct mlx4_cmd_mailbox *inbox,
2468                           struct mlx4_cmd_mailbox *outbox,
2469                           struct mlx4_cmd_info *cmd)
2470 {
2471         int eqn = vhcr->in_modifier;
2472         int res_id = eqn | (slave << 8);
2473         struct res_eq *eq;
2474         int err;
2475
2476         err = get_res(dev, slave, res_id, RES_EQ, &eq);
2477         if (err)
2478                 return err;
2479
2480         if (eq->com.from_state != RES_EQ_HW) {
2481                 err = -EINVAL;
2482                 goto ex_put;
2483         }
2484
2485         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2486
2487 ex_put:
2488         put_res(dev, slave, res_id, RES_EQ);
2489         return err;
2490 }
2491
2492 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2493                           struct mlx4_vhcr *vhcr,
2494                           struct mlx4_cmd_mailbox *inbox,
2495                           struct mlx4_cmd_mailbox *outbox,
2496                           struct mlx4_cmd_info *cmd)
2497 {
2498         int err;
2499         int cqn = vhcr->in_modifier;
2500         struct mlx4_cq_context *cqc = inbox->buf;
2501         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2502         struct res_cq *cq;
2503         struct res_mtt *mtt;
2504
2505         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2506         if (err)
2507                 return err;
2508         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2509         if (err)
2510                 goto out_move;
2511         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2512         if (err)
2513                 goto out_put;
2514         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2515         if (err)
2516                 goto out_put;
2517         atomic_inc(&mtt->ref_count);
2518         cq->mtt = mtt;
2519         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2520         res_end_move(dev, slave, RES_CQ, cqn);
2521         return 0;
2522
2523 out_put:
2524         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2525 out_move:
2526         res_abort_move(dev, slave, RES_CQ, cqn);
2527         return err;
2528 }
2529
2530 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2531                           struct mlx4_vhcr *vhcr,
2532                           struct mlx4_cmd_mailbox *inbox,
2533                           struct mlx4_cmd_mailbox *outbox,
2534                           struct mlx4_cmd_info *cmd)
2535 {
2536         int err;
2537         int cqn = vhcr->in_modifier;
2538         struct res_cq *cq;
2539
2540         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2541         if (err)
2542                 return err;
2543         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2544         if (err)
2545                 goto out_move;
2546         atomic_dec(&cq->mtt->ref_count);
2547         res_end_move(dev, slave, RES_CQ, cqn);
2548         return 0;
2549
2550 out_move:
2551         res_abort_move(dev, slave, RES_CQ, cqn);
2552         return err;
2553 }
2554
2555 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2556                           struct mlx4_vhcr *vhcr,
2557                           struct mlx4_cmd_mailbox *inbox,
2558                           struct mlx4_cmd_mailbox *outbox,
2559                           struct mlx4_cmd_info *cmd)
2560 {
2561         int cqn = vhcr->in_modifier;
2562         struct res_cq *cq;
2563         int err;
2564
2565         err = get_res(dev, slave, cqn, RES_CQ, &cq);
2566         if (err)
2567                 return err;
2568
2569         if (cq->com.from_state != RES_CQ_HW)
2570                 goto ex_put;
2571
2572         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2573 ex_put:
2574         put_res(dev, slave, cqn, RES_CQ);
2575
2576         return err;
2577 }
2578
2579 static int handle_resize(struct mlx4_dev *dev, int slave,
2580                          struct mlx4_vhcr *vhcr,
2581                          struct mlx4_cmd_mailbox *inbox,
2582                          struct mlx4_cmd_mailbox *outbox,
2583                          struct mlx4_cmd_info *cmd,
2584                          struct res_cq *cq)
2585 {
2586         int err;
2587         struct res_mtt *orig_mtt;
2588         struct res_mtt *mtt;
2589         struct mlx4_cq_context *cqc = inbox->buf;
2590         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2591
2592         err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2593         if (err)
2594                 return err;
2595
2596         if (orig_mtt != cq->mtt) {
2597                 err = -EINVAL;
2598                 goto ex_put;
2599         }
2600
2601         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2602         if (err)
2603                 goto ex_put;
2604
2605         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2606         if (err)
2607                 goto ex_put1;
2608         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2609         if (err)
2610                 goto ex_put1;
2611         atomic_dec(&orig_mtt->ref_count);
2612         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2613         atomic_inc(&mtt->ref_count);
2614         cq->mtt = mtt;
2615         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2616         return 0;
2617
2618 ex_put1:
2619         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2620 ex_put:
2621         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2622
2623         return err;
2624
2625 }
2626
2627 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2628                            struct mlx4_vhcr *vhcr,
2629                            struct mlx4_cmd_mailbox *inbox,
2630                            struct mlx4_cmd_mailbox *outbox,
2631                            struct mlx4_cmd_info *cmd)
2632 {
2633         int cqn = vhcr->in_modifier;
2634         struct res_cq *cq;
2635         int err;
2636
2637         err = get_res(dev, slave, cqn, RES_CQ, &cq);
2638         if (err)
2639                 return err;
2640
2641         if (cq->com.from_state != RES_CQ_HW)
2642                 goto ex_put;
2643
2644         if (vhcr->op_modifier == 0) {
2645                 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
2646                 goto ex_put;
2647         }
2648
2649         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2650 ex_put:
2651         put_res(dev, slave, cqn, RES_CQ);
2652
2653         return err;
2654 }
2655
2656 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2657 {
2658         int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2659         int log_rq_stride = srqc->logstride & 7;
2660         int page_shift = (srqc->log_page_size & 0x3f) + 12;
2661
2662         if (log_srq_size + log_rq_stride + 4 < page_shift)
2663                 return 1;
2664
2665         return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2666 }
2667
2668 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2669                            struct mlx4_vhcr *vhcr,
2670                            struct mlx4_cmd_mailbox *inbox,
2671                            struct mlx4_cmd_mailbox *outbox,
2672                            struct mlx4_cmd_info *cmd)
2673 {
2674         int err;
2675         int srqn = vhcr->in_modifier;
2676         struct res_mtt *mtt;
2677         struct res_srq *srq;
2678         struct mlx4_srq_context *srqc = inbox->buf;
2679         int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
2680
2681         if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2682                 return -EINVAL;
2683
2684         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2685         if (err)
2686                 return err;
2687         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2688         if (err)
2689                 goto ex_abort;
2690         err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2691                               mtt);
2692         if (err)
2693                 goto ex_put_mtt;
2694
2695         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2696         if (err)
2697                 goto ex_put_mtt;
2698
2699         atomic_inc(&mtt->ref_count);
2700         srq->mtt = mtt;
2701         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2702         res_end_move(dev, slave, RES_SRQ, srqn);
2703         return 0;
2704
2705 ex_put_mtt:
2706         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2707 ex_abort:
2708         res_abort_move(dev, slave, RES_SRQ, srqn);
2709
2710         return err;
2711 }
2712
2713 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2714                            struct mlx4_vhcr *vhcr,
2715                            struct mlx4_cmd_mailbox *inbox,
2716                            struct mlx4_cmd_mailbox *outbox,
2717                            struct mlx4_cmd_info *cmd)
2718 {
2719         int err;
2720         int srqn = vhcr->in_modifier;
2721         struct res_srq *srq;
2722
2723         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2724         if (err)
2725                 return err;
2726         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2727         if (err)
2728                 goto ex_abort;
2729         atomic_dec(&srq->mtt->ref_count);
2730         if (srq->cq)
2731                 atomic_dec(&srq->cq->ref_count);
2732         res_end_move(dev, slave, RES_SRQ, srqn);
2733
2734         return 0;
2735
2736 ex_abort:
2737         res_abort_move(dev, slave, RES_SRQ, srqn);
2738
2739         return err;
2740 }
2741
2742 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2743                            struct mlx4_vhcr *vhcr,
2744                            struct mlx4_cmd_mailbox *inbox,
2745                            struct mlx4_cmd_mailbox *outbox,
2746                            struct mlx4_cmd_info *cmd)
2747 {
2748         int err;
2749         int srqn = vhcr->in_modifier;
2750         struct res_srq *srq;
2751
2752         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2753         if (err)
2754                 return err;
2755         if (srq->com.from_state != RES_SRQ_HW) {
2756                 err = -EBUSY;
2757                 goto out;
2758         }
2759         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2760 out:
2761         put_res(dev, slave, srqn, RES_SRQ);
2762         return err;
2763 }
2764
2765 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2766                          struct mlx4_vhcr *vhcr,
2767                          struct mlx4_cmd_mailbox *inbox,
2768                          struct mlx4_cmd_mailbox *outbox,
2769                          struct mlx4_cmd_info *cmd)
2770 {
2771         int err;
2772         int srqn = vhcr->in_modifier;
2773         struct res_srq *srq;
2774
2775         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2776         if (err)
2777                 return err;
2778
2779         if (srq->com.from_state != RES_SRQ_HW) {
2780                 err = -EBUSY;
2781                 goto out;
2782         }
2783
2784         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2785 out:
2786         put_res(dev, slave, srqn, RES_SRQ);
2787         return err;
2788 }
2789
2790 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2791                         struct mlx4_vhcr *vhcr,
2792                         struct mlx4_cmd_mailbox *inbox,
2793                         struct mlx4_cmd_mailbox *outbox,
2794                         struct mlx4_cmd_info *cmd)
2795 {
2796         int err;
2797         int qpn = vhcr->in_modifier & 0x7fffff;
2798         struct res_qp *qp;
2799
2800         err = get_res(dev, slave, qpn, RES_QP, &qp);
2801         if (err)
2802                 return err;
2803         if (qp->com.from_state != RES_QP_HW) {
2804                 err = -EBUSY;
2805                 goto out;
2806         }
2807
2808         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2809 out:
2810         put_res(dev, slave, qpn, RES_QP);
2811         return err;
2812 }
2813
2814 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2815                               struct mlx4_vhcr *vhcr,
2816                               struct mlx4_cmd_mailbox *inbox,
2817                               struct mlx4_cmd_mailbox *outbox,
2818                               struct mlx4_cmd_info *cmd)
2819 {
2820         struct mlx4_qp_context *context = inbox->buf + 8;
2821         adjust_proxy_tun_qkey(dev, vhcr, context);
2822         update_pkey_index(dev, slave, inbox);
2823         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2824 }
2825
2826 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2827                              struct mlx4_vhcr *vhcr,
2828                              struct mlx4_cmd_mailbox *inbox,
2829                              struct mlx4_cmd_mailbox *outbox,
2830                              struct mlx4_cmd_info *cmd)
2831 {
2832         int err;
2833         struct mlx4_qp_context *qpc = inbox->buf + 8;
2834
2835         err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
2836         if (err)
2837                 return err;
2838
2839         update_pkey_index(dev, slave, inbox);
2840         update_gid(dev, inbox, (u8)slave);
2841         adjust_proxy_tun_qkey(dev, vhcr, qpc);
2842         err = update_vport_qp_param(dev, inbox, slave);
2843         if (err)
2844                 return err;
2845
2846         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2847 }
2848
2849 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2850                             struct mlx4_vhcr *vhcr,
2851                             struct mlx4_cmd_mailbox *inbox,
2852                             struct mlx4_cmd_mailbox *outbox,
2853                             struct mlx4_cmd_info *cmd)
2854 {
2855         int err;
2856         struct mlx4_qp_context *context = inbox->buf + 8;
2857
2858         err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
2859         if (err)
2860                 return err;
2861
2862         update_pkey_index(dev, slave, inbox);
2863         update_gid(dev, inbox, (u8)slave);
2864         adjust_proxy_tun_qkey(dev, vhcr, context);
2865         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2866 }
2867
2868 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2869                             struct mlx4_vhcr *vhcr,
2870                             struct mlx4_cmd_mailbox *inbox,
2871                             struct mlx4_cmd_mailbox *outbox,
2872                             struct mlx4_cmd_info *cmd)
2873 {
2874         int err;
2875         struct mlx4_qp_context *context = inbox->buf + 8;
2876
2877         err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
2878         if (err)
2879                 return err;
2880
2881         update_pkey_index(dev, slave, inbox);
2882         update_gid(dev, inbox, (u8)slave);
2883         adjust_proxy_tun_qkey(dev, vhcr, context);
2884         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2885 }
2886
2887
2888 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2889                               struct mlx4_vhcr *vhcr,
2890                               struct mlx4_cmd_mailbox *inbox,
2891                               struct mlx4_cmd_mailbox *outbox,
2892                               struct mlx4_cmd_info *cmd)
2893 {
2894         struct mlx4_qp_context *context = inbox->buf + 8;
2895         adjust_proxy_tun_qkey(dev, vhcr, context);
2896         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2897 }
2898
2899 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
2900                             struct mlx4_vhcr *vhcr,
2901                             struct mlx4_cmd_mailbox *inbox,
2902                             struct mlx4_cmd_mailbox *outbox,
2903                             struct mlx4_cmd_info *cmd)
2904 {
2905         int err;
2906         struct mlx4_qp_context *context = inbox->buf + 8;
2907
2908         err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
2909         if (err)
2910                 return err;
2911
2912         adjust_proxy_tun_qkey(dev, vhcr, context);
2913         update_gid(dev, inbox, (u8)slave);
2914         update_pkey_index(dev, slave, inbox);
2915         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2916 }
2917
2918 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2919                             struct mlx4_vhcr *vhcr,
2920                             struct mlx4_cmd_mailbox *inbox,
2921                             struct mlx4_cmd_mailbox *outbox,
2922                             struct mlx4_cmd_info *cmd)
2923 {
2924         int err;
2925         struct mlx4_qp_context *context = inbox->buf + 8;
2926
2927         err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
2928         if (err)
2929                 return err;
2930
2931         adjust_proxy_tun_qkey(dev, vhcr, context);
2932         update_gid(dev, inbox, (u8)slave);
2933         update_pkey_index(dev, slave, inbox);
2934         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2935 }
2936
2937 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
2938                          struct mlx4_vhcr *vhcr,
2939                          struct mlx4_cmd_mailbox *inbox,
2940                          struct mlx4_cmd_mailbox *outbox,
2941                          struct mlx4_cmd_info *cmd)
2942 {
2943         int err;
2944         int qpn = vhcr->in_modifier & 0x7fffff;
2945         struct res_qp *qp;
2946
2947         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
2948         if (err)
2949                 return err;
2950         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2951         if (err)
2952                 goto ex_abort;
2953
2954         atomic_dec(&qp->mtt->ref_count);
2955         atomic_dec(&qp->rcq->ref_count);
2956         atomic_dec(&qp->scq->ref_count);
2957         if (qp->srq)
2958                 atomic_dec(&qp->srq->ref_count);
2959         res_end_move(dev, slave, RES_QP, qpn);
2960         return 0;
2961
2962 ex_abort:
2963         res_abort_move(dev, slave, RES_QP, qpn);
2964
2965         return err;
2966 }
2967
2968 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
2969                                 struct res_qp *rqp, u8 *gid)
2970 {
2971         struct res_gid *res;
2972
2973         list_for_each_entry(res, &rqp->mcg_list, list) {
2974                 if (!memcmp(res->gid, gid, 16))
2975                         return res;
2976         }
2977         return NULL;
2978 }
2979
2980 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2981                        u8 *gid, enum mlx4_protocol prot,
2982                        enum mlx4_steer_type steer, u64 reg_id)
2983 {
2984         struct res_gid *res;
2985         int err;
2986
2987         res = kzalloc(sizeof *res, GFP_KERNEL);
2988         if (!res)
2989                 return -ENOMEM;
2990
2991         spin_lock_irq(&rqp->mcg_spl);
2992         if (find_gid(dev, slave, rqp, gid)) {
2993                 kfree(res);
2994                 err = -EEXIST;
2995         } else {
2996                 memcpy(res->gid, gid, 16);
2997                 res->prot = prot;
2998                 res->steer = steer;
2999                 res->reg_id = reg_id;
3000                 list_add_tail(&res->list, &rqp->mcg_list);
3001                 err = 0;
3002         }
3003         spin_unlock_irq(&rqp->mcg_spl);
3004
3005         return err;
3006 }
3007
3008 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3009                        u8 *gid, enum mlx4_protocol prot,
3010                        enum mlx4_steer_type steer, u64 *reg_id)
3011 {
3012         struct res_gid *res;
3013         int err;
3014
3015         spin_lock_irq(&rqp->mcg_spl);
3016         res = find_gid(dev, slave, rqp, gid);
3017         if (!res || res->prot != prot || res->steer != steer)
3018                 err = -EINVAL;
3019         else {
3020                 *reg_id = res->reg_id;
3021                 list_del(&res->list);
3022                 kfree(res);
3023                 err = 0;
3024         }
3025         spin_unlock_irq(&rqp->mcg_spl);
3026
3027         return err;
3028 }
3029
3030 static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3031                      int block_loopback, enum mlx4_protocol prot,
3032                      enum mlx4_steer_type type, u64 *reg_id)
3033 {
3034         switch (dev->caps.steering_mode) {
3035         case MLX4_STEERING_MODE_DEVICE_MANAGED:
3036                 return mlx4_trans_to_dmfs_attach(dev, qp, gid, gid[5],
3037                                                 block_loopback, prot,
3038                                                 reg_id);
3039         case MLX4_STEERING_MODE_B0:
3040                 return mlx4_qp_attach_common(dev, qp, gid,
3041                                             block_loopback, prot, type);
3042         default:
3043                 return -EINVAL;
3044         }
3045 }
3046
3047 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3048                      enum mlx4_protocol prot, enum mlx4_steer_type type,
3049                      u64 reg_id)
3050 {
3051         switch (dev->caps.steering_mode) {
3052         case MLX4_STEERING_MODE_DEVICE_MANAGED:
3053                 return mlx4_flow_detach(dev, reg_id);
3054         case MLX4_STEERING_MODE_B0:
3055                 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3056         default:
3057                 return -EINVAL;
3058         }
3059 }
3060
3061 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3062                                struct mlx4_vhcr *vhcr,
3063                                struct mlx4_cmd_mailbox *inbox,
3064                                struct mlx4_cmd_mailbox *outbox,
3065                                struct mlx4_cmd_info *cmd)
3066 {
3067         struct mlx4_qp qp; /* dummy for calling attach/detach */
3068         u8 *gid = inbox->buf;
3069         enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
3070         int err;
3071         int qpn;
3072         struct res_qp *rqp;
3073         u64 reg_id = 0;
3074         int attach = vhcr->op_modifier;
3075         int block_loopback = vhcr->in_modifier >> 31;
3076         u8 steer_type_mask = 2;
3077         enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
3078
3079         qpn = vhcr->in_modifier & 0xffffff;
3080         err = get_res(dev, slave, qpn, RES_QP, &rqp);
3081         if (err)
3082                 return err;
3083
3084         qp.qpn = qpn;
3085         if (attach) {
3086                 err = qp_attach(dev, &qp, gid, block_loopback, prot,
3087                                 type, &reg_id);
3088                 if (err) {
3089                         pr_err("Fail to attach rule to qp 0x%x\n", qpn);
3090                         goto ex_put;
3091                 }
3092                 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
3093                 if (err)
3094                         goto ex_detach;
3095         } else {
3096                 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
3097                 if (err)
3098                         goto ex_put;
3099
3100                 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3101                 if (err)
3102                         pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3103                                qpn, reg_id);
3104         }
3105         put_res(dev, slave, qpn, RES_QP);
3106         return err;
3107
3108 ex_detach:
3109         qp_detach(dev, &qp, gid, prot, type, reg_id);
3110 ex_put:
3111         put_res(dev, slave, qpn, RES_QP);
3112         return err;
3113 }
3114
3115 /*
3116  * MAC validation for Flow Steering rules.
3117  * VF can attach rules only with a mac address which is assigned to it.
3118  */
3119 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3120                                    struct list_head *rlist)
3121 {
3122         struct mac_res *res, *tmp;
3123         __be64 be_mac;
3124
3125         /* make sure it isn't multicast or broadcast mac*/
3126         if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3127             !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3128                 list_for_each_entry_safe(res, tmp, rlist, list) {
3129                         be_mac = cpu_to_be64(res->mac << 16);
3130                         if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
3131                                 return 0;
3132                 }
3133                 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3134                        eth_header->eth.dst_mac, slave);
3135                 return -EINVAL;
3136         }
3137         return 0;
3138 }
3139
3140 /*
3141  * In case of missing eth header, append eth header with a MAC address
3142  * assigned to the VF.
3143  */
3144 static int add_eth_header(struct mlx4_dev *dev, int slave,
3145                           struct mlx4_cmd_mailbox *inbox,
3146                           struct list_head *rlist, int header_id)
3147 {
3148         struct mac_res *res, *tmp;
3149         u8 port;
3150         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3151         struct mlx4_net_trans_rule_hw_eth *eth_header;
3152         struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3153         struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3154         __be64 be_mac = 0;
3155         __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3156
3157         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3158         port = ctrl->port;
3159         eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3160
3161         /* Clear a space in the inbox for eth header */
3162         switch (header_id) {
3163         case MLX4_NET_TRANS_RULE_ID_IPV4:
3164                 ip_header =
3165                         (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3166                 memmove(ip_header, eth_header,
3167                         sizeof(*ip_header) + sizeof(*l4_header));
3168                 break;
3169         case MLX4_NET_TRANS_RULE_ID_TCP:
3170         case MLX4_NET_TRANS_RULE_ID_UDP:
3171                 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3172                             (eth_header + 1);
3173                 memmove(l4_header, eth_header, sizeof(*l4_header));
3174                 break;
3175         default:
3176                 return -EINVAL;
3177         }
3178         list_for_each_entry_safe(res, tmp, rlist, list) {
3179                 if (port == res->port) {
3180                         be_mac = cpu_to_be64(res->mac << 16);
3181                         break;
3182                 }
3183         }
3184         if (!be_mac) {
3185                 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
3186                        port);
3187                 return -EINVAL;
3188         }
3189
3190         memset(eth_header, 0, sizeof(*eth_header));
3191         eth_header->size = sizeof(*eth_header) >> 2;
3192         eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
3193         memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
3194         memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
3195
3196         return 0;
3197
3198 }
3199
3200 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3201                                          struct mlx4_vhcr *vhcr,
3202                                          struct mlx4_cmd_mailbox *inbox,
3203                                          struct mlx4_cmd_mailbox *outbox,
3204                                          struct mlx4_cmd_info *cmd)
3205 {
3206
3207         struct mlx4_priv *priv = mlx4_priv(dev);
3208         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3209         struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
3210         int err;
3211         int qpn;
3212         struct res_qp *rqp;
3213         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3214         struct _rule_hw  *rule_header;
3215         int header_id;
3216
3217         if (dev->caps.steering_mode !=
3218             MLX4_STEERING_MODE_DEVICE_MANAGED)
3219                 return -EOPNOTSUPP;
3220
3221         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3222         qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
3223         err = get_res(dev, slave, qpn, RES_QP, &rqp);
3224         if (err) {
3225                 pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
3226                 return err;
3227         }
3228         rule_header = (struct _rule_hw *)(ctrl + 1);
3229         header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
3230
3231         switch (header_id) {
3232         case MLX4_NET_TRANS_RULE_ID_ETH:
3233                 if (validate_eth_header_mac(slave, rule_header, rlist)) {
3234                         err = -EINVAL;
3235                         goto err_put;
3236                 }
3237                 break;
3238         case MLX4_NET_TRANS_RULE_ID_IB:
3239                 break;
3240         case MLX4_NET_TRANS_RULE_ID_IPV4:
3241         case MLX4_NET_TRANS_RULE_ID_TCP:
3242         case MLX4_NET_TRANS_RULE_ID_UDP:
3243                 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
3244                 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
3245                         err = -EINVAL;
3246                         goto err_put;
3247                 }
3248                 vhcr->in_modifier +=
3249                         sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
3250                 break;
3251         default:
3252                 pr_err("Corrupted mailbox.\n");
3253                 err = -EINVAL;
3254                 goto err_put;
3255         }
3256
3257         err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
3258                            vhcr->in_modifier, 0,
3259                            MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
3260                            MLX4_CMD_NATIVE);
3261         if (err)
3262                 goto err_put;
3263
3264         err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
3265         if (err) {
3266                 mlx4_err(dev, "Fail to add flow steering resources.\n ");
3267                 /* detach rule*/
3268                 mlx4_cmd(dev, vhcr->out_param, 0, 0,
3269                          MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3270                          MLX4_CMD_NATIVE);
3271                 goto err_put;
3272         }
3273         atomic_inc(&rqp->ref_count);
3274 err_put:
3275         put_res(dev, slave, qpn, RES_QP);
3276         return err;
3277 }
3278
3279 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
3280                                          struct mlx4_vhcr *vhcr,
3281                                          struct mlx4_cmd_mailbox *inbox,
3282                                          struct mlx4_cmd_mailbox *outbox,
3283                                          struct mlx4_cmd_info *cmd)
3284 {
3285         int err;
3286         struct res_qp *rqp;
3287         struct res_fs_rule *rrule;
3288
3289         if (dev->caps.steering_mode !=
3290             MLX4_STEERING_MODE_DEVICE_MANAGED)
3291                 return -EOPNOTSUPP;
3292
3293         err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
3294         if (err)
3295                 return err;
3296         /* Release the rule form busy state before removal */
3297         put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
3298         err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
3299         if (err)
3300                 return err;
3301
3302         err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
3303         if (err) {
3304                 mlx4_err(dev, "Fail to remove flow steering resources.\n ");
3305                 goto out;
3306         }
3307
3308         err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
3309                        MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3310                        MLX4_CMD_NATIVE);
3311         if (!err)
3312                 atomic_dec(&rqp->ref_count);
3313 out:
3314         put_res(dev, slave, rrule->qpn, RES_QP);
3315         return err;
3316 }
3317
3318 enum {
3319         BUSY_MAX_RETRIES = 10
3320 };
3321
3322 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
3323                                struct mlx4_vhcr *vhcr,
3324                                struct mlx4_cmd_mailbox *inbox,
3325                                struct mlx4_cmd_mailbox *outbox,
3326                                struct mlx4_cmd_info *cmd)
3327 {
3328         int err;
3329         int index = vhcr->in_modifier & 0xffff;
3330
3331         err = get_res(dev, slave, index, RES_COUNTER, NULL);
3332         if (err)
3333                 return err;
3334
3335         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3336         put_res(dev, slave, index, RES_COUNTER);
3337         return err;
3338 }
3339
3340 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
3341 {
3342         struct res_gid *rgid;
3343         struct res_gid *tmp;
3344         struct mlx4_qp qp; /* dummy for calling attach/detach */
3345
3346         list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
3347                 switch (dev->caps.steering_mode) {
3348                 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3349                         mlx4_flow_detach(dev, rgid->reg_id);
3350                         break;
3351                 case MLX4_STEERING_MODE_B0:
3352                         qp.qpn = rqp->local_qpn;
3353                         (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
3354                                                      rgid->prot, rgid->steer);
3355                         break;
3356                 }
3357                 list_del(&rgid->list);
3358                 kfree(rgid);
3359         }
3360 }
3361
3362 static int _move_all_busy(struct mlx4_dev *dev, int slave,
3363                           enum mlx4_resource type, int print)
3364 {
3365         struct mlx4_priv *priv = mlx4_priv(dev);
3366         struct mlx4_resource_tracker *tracker =
3367                 &priv->mfunc.master.res_tracker;
3368         struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
3369         struct res_common *r;
3370         struct res_common *tmp;
3371         int busy;
3372
3373         busy = 0;
3374         spin_lock_irq(mlx4_tlock(dev));
3375         list_for_each_entry_safe(r, tmp, rlist, list) {
3376                 if (r->owner == slave) {
3377                         if (!r->removing) {
3378                                 if (r->state == RES_ANY_BUSY) {
3379                                         if (print)
3380                                                 mlx4_dbg(dev,
3381                                                          "%s id 0x%llx is busy\n",
3382                                                           ResourceType(type),
3383                                                           r->res_id);
3384                                         ++busy;
3385                                 } else {
3386                                         r->from_state = r->state;
3387                                         r->state = RES_ANY_BUSY;
3388                                         r->removing = 1;
3389                                 }
3390                         }
3391                 }
3392         }
3393         spin_unlock_irq(mlx4_tlock(dev));
3394
3395         return busy;
3396 }
3397
3398 static int move_all_busy(struct mlx4_dev *dev, int slave,
3399                          enum mlx4_resource type)
3400 {
3401         unsigned long begin;
3402         int busy;
3403
3404         begin = jiffies;
3405         do {
3406                 busy = _move_all_busy(dev, slave, type, 0);
3407                 if (time_after(jiffies, begin + 5 * HZ))
3408                         break;
3409                 if (busy)
3410                         cond_resched();
3411         } while (busy);
3412
3413         if (busy)
3414                 busy = _move_all_busy(dev, slave, type, 1);
3415
3416         return busy;
3417 }
3418 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
3419 {
3420         struct mlx4_priv *priv = mlx4_priv(dev);
3421         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3422         struct list_head *qp_list =
3423                 &tracker->slave_list[slave].res_list[RES_QP];
3424         struct res_qp *qp;
3425         struct res_qp *tmp;
3426         int state;
3427         u64 in_param;
3428         int qpn;
3429         int err;
3430
3431         err = move_all_busy(dev, slave, RES_QP);
3432         if (err)
3433                 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
3434                           "for slave %d\n", slave);
3435
3436         spin_lock_irq(mlx4_tlock(dev));
3437         list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
3438                 spin_unlock_irq(mlx4_tlock(dev));
3439                 if (qp->com.owner == slave) {
3440                         qpn = qp->com.res_id;
3441                         detach_qp(dev, slave, qp);
3442                         state = qp->com.from_state;
3443                         while (state != 0) {
3444                                 switch (state) {
3445                                 case RES_QP_RESERVED:
3446                                         spin_lock_irq(mlx4_tlock(dev));
3447                                         rb_erase(&qp->com.node,
3448                                                  &tracker->res_tree[RES_QP]);
3449                                         list_del(&qp->com.list);
3450                                         spin_unlock_irq(mlx4_tlock(dev));
3451                                         kfree(qp);
3452                                         state = 0;
3453                                         break;
3454                                 case RES_QP_MAPPED:
3455                                         if (!valid_reserved(dev, slave, qpn))
3456                                                 __mlx4_qp_free_icm(dev, qpn);
3457                                         state = RES_QP_RESERVED;
3458                                         break;
3459                                 case RES_QP_HW:
3460                                         in_param = slave;
3461                                         err = mlx4_cmd(dev, in_param,
3462                                                        qp->local_qpn, 2,
3463                                                        MLX4_CMD_2RST_QP,
3464                                                        MLX4_CMD_TIME_CLASS_A,
3465                                                        MLX4_CMD_NATIVE);
3466                                         if (err)
3467                                                 mlx4_dbg(dev, "rem_slave_qps: failed"
3468                                                          " to move slave %d qpn %d to"
3469                                                          " reset\n", slave,
3470                                                          qp->local_qpn);
3471                                         atomic_dec(&qp->rcq->ref_count);
3472                                         atomic_dec(&qp->scq->ref_count);
3473                                         atomic_dec(&qp->mtt->ref_count);
3474                                         if (qp->srq)
3475                                                 atomic_dec(&qp->srq->ref_count);
3476                                         state = RES_QP_MAPPED;
3477                                         break;
3478                                 default:
3479                                         state = 0;
3480                                 }
3481                         }
3482                 }
3483                 spin_lock_irq(mlx4_tlock(dev));
3484         }
3485         spin_unlock_irq(mlx4_tlock(dev));
3486 }
3487
3488 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
3489 {
3490         struct mlx4_priv *priv = mlx4_priv(dev);
3491         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3492         struct list_head *srq_list =
3493                 &tracker->slave_list[slave].res_list[RES_SRQ];
3494         struct res_srq *srq;
3495         struct res_srq *tmp;
3496         int state;
3497         u64 in_param;
3498         LIST_HEAD(tlist);
3499         int srqn;
3500         int err;
3501
3502         err = move_all_busy(dev, slave, RES_SRQ);
3503         if (err)
3504                 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
3505                           "busy for slave %d\n", slave);
3506
3507         spin_lock_irq(mlx4_tlock(dev));
3508         list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
3509                 spin_unlock_irq(mlx4_tlock(dev));
3510                 if (srq->com.owner == slave) {
3511                         srqn = srq->com.res_id;
3512                         state = srq->com.from_state;
3513                         while (state != 0) {
3514                                 switch (state) {
3515                                 case RES_SRQ_ALLOCATED:
3516                                         __mlx4_srq_free_icm(dev, srqn);
3517                                         spin_lock_irq(mlx4_tlock(dev));
3518                                         rb_erase(&srq->com.node,
3519                                                  &tracker->res_tree[RES_SRQ]);
3520                                         list_del(&srq->com.list);
3521                                         spin_unlock_irq(mlx4_tlock(dev));
3522                                         kfree(srq);
3523                                         state = 0;
3524                                         break;
3525
3526                                 case RES_SRQ_HW:
3527                                         in_param = slave;
3528                                         err = mlx4_cmd(dev, in_param, srqn, 1,
3529                                                        MLX4_CMD_HW2SW_SRQ,
3530                                                        MLX4_CMD_TIME_CLASS_A,
3531                                                        MLX4_CMD_NATIVE);
3532                                         if (err)
3533                                                 mlx4_dbg(dev, "rem_slave_srqs: failed"
3534                                                          " to move slave %d srq %d to"
3535                                                          " SW ownership\n",
3536                                                          slave, srqn);
3537
3538                                         atomic_dec(&srq->mtt->ref_count);
3539                                         if (srq->cq)
3540                                                 atomic_dec(&srq->cq->ref_count);
3541                                         state = RES_SRQ_ALLOCATED;
3542                                         break;
3543
3544                                 default:
3545                                         state = 0;
3546                                 }
3547                         }
3548                 }
3549                 spin_lock_irq(mlx4_tlock(dev));
3550         }
3551         spin_unlock_irq(mlx4_tlock(dev));
3552 }
3553
3554 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
3555 {
3556         struct mlx4_priv *priv = mlx4_priv(dev);
3557         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3558         struct list_head *cq_list =
3559                 &tracker->slave_list[slave].res_list[RES_CQ];
3560         struct res_cq *cq;
3561         struct res_cq *tmp;
3562         int state;
3563         u64 in_param;
3564         LIST_HEAD(tlist);
3565         int cqn;
3566         int err;
3567
3568         err = move_all_busy(dev, slave, RES_CQ);
3569         if (err)
3570                 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
3571                           "busy for slave %d\n", slave);
3572
3573         spin_lock_irq(mlx4_tlock(dev));
3574         list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
3575                 spin_unlock_irq(mlx4_tlock(dev));
3576                 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
3577                         cqn = cq->com.res_id;
3578                         state = cq->com.from_state;
3579                         while (state != 0) {
3580                                 switch (state) {
3581                                 case RES_CQ_ALLOCATED:
3582                                         __mlx4_cq_free_icm(dev, cqn);
3583                                         spin_lock_irq(mlx4_tlock(dev));
3584                                         rb_erase(&cq->com.node,
3585                                                  &tracker->res_tree[RES_CQ]);
3586                                         list_del(&cq->com.list);
3587                                         spin_unlock_irq(mlx4_tlock(dev));
3588                                         kfree(cq);
3589                                         state = 0;
3590                                         break;
3591
3592                                 case RES_CQ_HW:
3593                                         in_param = slave;
3594                                         err = mlx4_cmd(dev, in_param, cqn, 1,
3595                                                        MLX4_CMD_HW2SW_CQ,
3596                                                        MLX4_CMD_TIME_CLASS_A,
3597                                                        MLX4_CMD_NATIVE);
3598                                         if (err)
3599                                                 mlx4_dbg(dev, "rem_slave_cqs: failed"
3600                                                          " to move slave %d cq %d to"
3601                                                          " SW ownership\n",
3602                                                          slave, cqn);
3603                                         atomic_dec(&cq->mtt->ref_count);
3604                                         state = RES_CQ_ALLOCATED;
3605                                         break;
3606
3607                                 default:
3608                                         state = 0;
3609                                 }
3610                         }
3611                 }
3612                 spin_lock_irq(mlx4_tlock(dev));
3613         }
3614         spin_unlock_irq(mlx4_tlock(dev));
3615 }
3616
3617 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
3618 {
3619         struct mlx4_priv *priv = mlx4_priv(dev);
3620         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3621         struct list_head *mpt_list =
3622                 &tracker->slave_list[slave].res_list[RES_MPT];
3623         struct res_mpt *mpt;
3624         struct res_mpt *tmp;
3625         int state;
3626         u64 in_param;
3627         LIST_HEAD(tlist);
3628         int mptn;
3629         int err;
3630
3631         err = move_all_busy(dev, slave, RES_MPT);
3632         if (err)
3633                 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
3634                           "busy for slave %d\n", slave);
3635
3636         spin_lock_irq(mlx4_tlock(dev));
3637         list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
3638                 spin_unlock_irq(mlx4_tlock(dev));
3639                 if (mpt->com.owner == slave) {
3640                         mptn = mpt->com.res_id;
3641                         state = mpt->com.from_state;
3642                         while (state != 0) {
3643                                 switch (state) {
3644                                 case RES_MPT_RESERVED:
3645                                         __mlx4_mpt_release(dev, mpt->key);
3646                                         spin_lock_irq(mlx4_tlock(dev));
3647                                         rb_erase(&mpt->com.node,
3648                                                  &tracker->res_tree[RES_MPT]);
3649                                         list_del(&mpt->com.list);
3650                                         spin_unlock_irq(mlx4_tlock(dev));
3651                                         kfree(mpt);
3652                                         state = 0;
3653                                         break;
3654
3655                                 case RES_MPT_MAPPED:
3656                                         __mlx4_mpt_free_icm(dev, mpt->key);
3657                                         state = RES_MPT_RESERVED;
3658                                         break;
3659
3660                                 case RES_MPT_HW:
3661                                         in_param = slave;
3662                                         err = mlx4_cmd(dev, in_param, mptn, 0,
3663                                                      MLX4_CMD_HW2SW_MPT,
3664                                                      MLX4_CMD_TIME_CLASS_A,
3665                                                      MLX4_CMD_NATIVE);
3666                                         if (err)
3667                                                 mlx4_dbg(dev, "rem_slave_mrs: failed"
3668                                                          " to move slave %d mpt %d to"
3669                                                          " SW ownership\n",
3670                                                          slave, mptn);
3671                                         if (mpt->mtt)
3672                                                 atomic_dec(&mpt->mtt->ref_count);
3673                                         state = RES_MPT_MAPPED;
3674                                         break;
3675                                 default:
3676                                         state = 0;
3677                                 }
3678                         }
3679                 }
3680                 spin_lock_irq(mlx4_tlock(dev));
3681         }
3682         spin_unlock_irq(mlx4_tlock(dev));
3683 }
3684
3685 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
3686 {
3687         struct mlx4_priv *priv = mlx4_priv(dev);
3688         struct mlx4_resource_tracker *tracker =
3689                 &priv->mfunc.master.res_tracker;
3690         struct list_head *mtt_list =
3691                 &tracker->slave_list[slave].res_list[RES_MTT];
3692         struct res_mtt *mtt;
3693         struct res_mtt *tmp;
3694         int state;
3695         LIST_HEAD(tlist);
3696         int base;
3697         int err;
3698
3699         err = move_all_busy(dev, slave, RES_MTT);
3700         if (err)
3701                 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
3702                           "busy for slave %d\n", slave);
3703
3704         spin_lock_irq(mlx4_tlock(dev));
3705         list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
3706                 spin_unlock_irq(mlx4_tlock(dev));
3707                 if (mtt->com.owner == slave) {
3708                         base = mtt->com.res_id;
3709                         state = mtt->com.from_state;
3710                         while (state != 0) {
3711                                 switch (state) {
3712                                 case RES_MTT_ALLOCATED:
3713                                         __mlx4_free_mtt_range(dev, base,
3714                                                               mtt->order);
3715                                         spin_lock_irq(mlx4_tlock(dev));
3716                                         rb_erase(&mtt->com.node,
3717                                                  &tracker->res_tree[RES_MTT]);
3718                                         list_del(&mtt->com.list);
3719                                         spin_unlock_irq(mlx4_tlock(dev));
3720                                         kfree(mtt);
3721                                         state = 0;
3722                                         break;
3723
3724                                 default:
3725                                         state = 0;
3726                                 }
3727                         }
3728                 }
3729                 spin_lock_irq(mlx4_tlock(dev));
3730         }
3731         spin_unlock_irq(mlx4_tlock(dev));
3732 }
3733
3734 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
3735 {
3736         struct mlx4_priv *priv = mlx4_priv(dev);
3737         struct mlx4_resource_tracker *tracker =
3738                 &priv->mfunc.master.res_tracker;
3739         struct list_head *fs_rule_list =
3740                 &tracker->slave_list[slave].res_list[RES_FS_RULE];
3741         struct res_fs_rule *fs_rule;
3742         struct res_fs_rule *tmp;
3743         int state;
3744         u64 base;
3745         int err;
3746
3747         err = move_all_busy(dev, slave, RES_FS_RULE);
3748         if (err)
3749                 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
3750                           slave);
3751
3752         spin_lock_irq(mlx4_tlock(dev));
3753         list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
3754                 spin_unlock_irq(mlx4_tlock(dev));
3755                 if (fs_rule->com.owner == slave) {
3756                         base = fs_rule->com.res_id;
3757                         state = fs_rule->com.from_state;
3758                         while (state != 0) {
3759                                 switch (state) {
3760                                 case RES_FS_RULE_ALLOCATED:
3761                                         /* detach rule */
3762                                         err = mlx4_cmd(dev, base, 0, 0,
3763                                                        MLX4_QP_FLOW_STEERING_DETACH,
3764                                                        MLX4_CMD_TIME_CLASS_A,
3765                                                        MLX4_CMD_NATIVE);
3766
3767                                         spin_lock_irq(mlx4_tlock(dev));
3768                                         rb_erase(&fs_rule->com.node,
3769                                                  &tracker->res_tree[RES_FS_RULE]);
3770                                         list_del(&fs_rule->com.list);
3771                                         spin_unlock_irq(mlx4_tlock(dev));
3772                                         kfree(fs_rule);
3773                                         state = 0;
3774                                         break;
3775
3776                                 default:
3777                                         state = 0;
3778                                 }
3779                         }
3780                 }
3781                 spin_lock_irq(mlx4_tlock(dev));
3782         }
3783         spin_unlock_irq(mlx4_tlock(dev));
3784 }
3785
3786 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
3787 {
3788         struct mlx4_priv *priv = mlx4_priv(dev);
3789         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3790         struct list_head *eq_list =
3791                 &tracker->slave_list[slave].res_list[RES_EQ];
3792         struct res_eq *eq;
3793         struct res_eq *tmp;
3794         int err;
3795         int state;
3796         LIST_HEAD(tlist);
3797         int eqn;
3798         struct mlx4_cmd_mailbox *mailbox;
3799
3800         err = move_all_busy(dev, slave, RES_EQ);
3801         if (err)
3802                 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
3803                           "busy for slave %d\n", slave);
3804
3805         spin_lock_irq(mlx4_tlock(dev));
3806         list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
3807                 spin_unlock_irq(mlx4_tlock(dev));
3808                 if (eq->com.owner == slave) {
3809                         eqn = eq->com.res_id;
3810                         state = eq->com.from_state;
3811                         while (state != 0) {
3812                                 switch (state) {
3813                                 case RES_EQ_RESERVED:
3814                                         spin_lock_irq(mlx4_tlock(dev));
3815                                         rb_erase(&eq->com.node,
3816                                                  &tracker->res_tree[RES_EQ]);
3817                                         list_del(&eq->com.list);
3818                                         spin_unlock_irq(mlx4_tlock(dev));
3819                                         kfree(eq);
3820                                         state = 0;
3821                                         break;
3822
3823                                 case RES_EQ_HW:
3824                                         mailbox = mlx4_alloc_cmd_mailbox(dev);
3825                                         if (IS_ERR(mailbox)) {
3826                                                 cond_resched();
3827                                                 continue;
3828                                         }
3829                                         err = mlx4_cmd_box(dev, slave, 0,
3830                                                            eqn & 0xff, 0,
3831                                                            MLX4_CMD_HW2SW_EQ,
3832                                                            MLX4_CMD_TIME_CLASS_A,
3833                                                            MLX4_CMD_NATIVE);
3834                                         if (err)
3835                                                 mlx4_dbg(dev, "rem_slave_eqs: failed"
3836                                                          " to move slave %d eqs %d to"
3837                                                          " SW ownership\n", slave, eqn);
3838                                         mlx4_free_cmd_mailbox(dev, mailbox);
3839                                         atomic_dec(&eq->mtt->ref_count);
3840                                         state = RES_EQ_RESERVED;
3841                                         break;
3842
3843                                 default:
3844                                         state = 0;
3845                                 }
3846                         }
3847                 }
3848                 spin_lock_irq(mlx4_tlock(dev));
3849         }
3850         spin_unlock_irq(mlx4_tlock(dev));
3851 }
3852
3853 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
3854 {
3855         struct mlx4_priv *priv = mlx4_priv(dev);
3856         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3857         struct list_head *counter_list =
3858                 &tracker->slave_list[slave].res_list[RES_COUNTER];
3859         struct res_counter *counter;
3860         struct res_counter *tmp;
3861         int err;
3862         int index;
3863
3864         err = move_all_busy(dev, slave, RES_COUNTER);
3865         if (err)
3866                 mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
3867                           "busy for slave %d\n", slave);
3868
3869         spin_lock_irq(mlx4_tlock(dev));
3870         list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
3871                 if (counter->com.owner == slave) {
3872                         index = counter->com.res_id;
3873                         rb_erase(&counter->com.node,
3874                                  &tracker->res_tree[RES_COUNTER]);
3875                         list_del(&counter->com.list);
3876                         kfree(counter);
3877                         __mlx4_counter_free(dev, index);
3878                 }
3879         }
3880         spin_unlock_irq(mlx4_tlock(dev));
3881 }
3882
3883 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
3884 {
3885         struct mlx4_priv *priv = mlx4_priv(dev);
3886         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3887         struct list_head *xrcdn_list =
3888                 &tracker->slave_list[slave].res_list[RES_XRCD];
3889         struct res_xrcdn *xrcd;
3890         struct res_xrcdn *tmp;
3891         int err;
3892         int xrcdn;
3893
3894         err = move_all_busy(dev, slave, RES_XRCD);
3895         if (err)
3896                 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
3897                           "busy for slave %d\n", slave);
3898
3899         spin_lock_irq(mlx4_tlock(dev));
3900         list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
3901                 if (xrcd->com.owner == slave) {
3902                         xrcdn = xrcd->com.res_id;
3903                         rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
3904                         list_del(&xrcd->com.list);
3905                         kfree(xrcd);
3906                         __mlx4_xrcd_free(dev, xrcdn);
3907                 }
3908         }
3909         spin_unlock_irq(mlx4_tlock(dev));
3910 }
3911
3912 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3913 {
3914         struct mlx4_priv *priv = mlx4_priv(dev);
3915
3916         mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3917         /*VLAN*/
3918         rem_slave_macs(dev, slave);
3919         rem_slave_fs_rule(dev, slave);
3920         rem_slave_qps(dev, slave);
3921         rem_slave_srqs(dev, slave);
3922         rem_slave_cqs(dev, slave);
3923         rem_slave_mrs(dev, slave);
3924         rem_slave_eqs(dev, slave);
3925         rem_slave_mtts(dev, slave);
3926         rem_slave_counters(dev, slave);
3927         rem_slave_xrcdns(dev, slave);
3928         mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3929 }