2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
50 #define MLX4_MAC_VALID (1ull << 63)
53 struct list_head list;
61 struct list_head list;
69 struct list_head list;
84 struct list_head list;
86 enum mlx4_protocol prot;
87 enum mlx4_steer_type steer;
92 RES_QP_BUSY = RES_ANY_BUSY,
94 /* QP number was allocated */
97 /* ICM memory for QP context was mapped */
100 /* QP is in hw ownership */
105 struct res_common com;
110 struct list_head mcg_list;
115 /* saved qp params before VST enforcement in order to restore on VGT */
125 enum res_mtt_states {
126 RES_MTT_BUSY = RES_ANY_BUSY,
130 static inline const char *mtt_states_str(enum res_mtt_states state)
133 case RES_MTT_BUSY: return "RES_MTT_BUSY";
134 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
135 default: return "Unknown";
140 struct res_common com;
145 enum res_mpt_states {
146 RES_MPT_BUSY = RES_ANY_BUSY,
153 struct res_common com;
159 RES_EQ_BUSY = RES_ANY_BUSY,
165 struct res_common com;
170 RES_CQ_BUSY = RES_ANY_BUSY,
176 struct res_common com;
181 enum res_srq_states {
182 RES_SRQ_BUSY = RES_ANY_BUSY,
188 struct res_common com;
194 enum res_counter_states {
195 RES_COUNTER_BUSY = RES_ANY_BUSY,
196 RES_COUNTER_ALLOCATED,
200 struct res_common com;
204 enum res_xrcdn_states {
205 RES_XRCD_BUSY = RES_ANY_BUSY,
210 struct res_common com;
214 enum res_fs_rule_states {
215 RES_FS_RULE_BUSY = RES_ANY_BUSY,
216 RES_FS_RULE_ALLOCATED,
220 struct res_common com;
224 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
226 struct rb_node *node = root->rb_node;
229 struct res_common *res = container_of(node, struct res_common,
232 if (res_id < res->res_id)
233 node = node->rb_left;
234 else if (res_id > res->res_id)
235 node = node->rb_right;
242 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
244 struct rb_node **new = &(root->rb_node), *parent = NULL;
246 /* Figure out where to put new node */
248 struct res_common *this = container_of(*new, struct res_common,
252 if (res->res_id < this->res_id)
253 new = &((*new)->rb_left);
254 else if (res->res_id > this->res_id)
255 new = &((*new)->rb_right);
260 /* Add new node and rebalance tree. */
261 rb_link_node(&res->node, parent, new);
262 rb_insert_color(&res->node, root);
277 static const char *resource_str(enum mlx4_resource rt)
280 case RES_QP: return "RES_QP";
281 case RES_CQ: return "RES_CQ";
282 case RES_SRQ: return "RES_SRQ";
283 case RES_MPT: return "RES_MPT";
284 case RES_MTT: return "RES_MTT";
285 case RES_MAC: return "RES_MAC";
286 case RES_VLAN: return "RES_VLAN";
287 case RES_EQ: return "RES_EQ";
288 case RES_COUNTER: return "RES_COUNTER";
289 case RES_FS_RULE: return "RES_FS_RULE";
290 case RES_XRCD: return "RES_XRCD";
291 default: return "Unknown resource type !!!";
295 static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
296 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
297 enum mlx4_resource res_type, int count,
300 struct mlx4_priv *priv = mlx4_priv(dev);
301 struct resource_allocator *res_alloc =
302 &priv->mfunc.master.res_tracker.res_alloc[res_type];
304 int allocated, free, reserved, guaranteed, from_free;
307 if (slave > dev->persist->num_vfs)
310 spin_lock(&res_alloc->alloc_lock);
311 allocated = (port > 0) ?
312 res_alloc->allocated[(port - 1) *
313 (dev->persist->num_vfs + 1) + slave] :
314 res_alloc->allocated[slave];
315 free = (port > 0) ? res_alloc->res_port_free[port - 1] :
317 reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
318 res_alloc->res_reserved;
319 guaranteed = res_alloc->guaranteed[slave];
321 if (allocated + count > res_alloc->quota[slave]) {
322 mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
323 slave, port, resource_str(res_type), count,
324 allocated, res_alloc->quota[slave]);
328 if (allocated + count <= guaranteed) {
332 /* portion may need to be obtained from free area */
333 if (guaranteed - allocated > 0)
334 from_free = count - (guaranteed - allocated);
338 from_rsvd = count - from_free;
340 if (free - from_free >= reserved)
343 mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
344 slave, port, resource_str(res_type), free,
345 from_free, reserved);
349 /* grant the request */
351 res_alloc->allocated[(port - 1) *
352 (dev->persist->num_vfs + 1) + slave] += count;
353 res_alloc->res_port_free[port - 1] -= count;
354 res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
356 res_alloc->allocated[slave] += count;
357 res_alloc->res_free -= count;
358 res_alloc->res_reserved -= from_rsvd;
363 spin_unlock(&res_alloc->alloc_lock);
367 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
368 enum mlx4_resource res_type, int count,
371 struct mlx4_priv *priv = mlx4_priv(dev);
372 struct resource_allocator *res_alloc =
373 &priv->mfunc.master.res_tracker.res_alloc[res_type];
374 int allocated, guaranteed, from_rsvd;
376 if (slave > dev->persist->num_vfs)
379 spin_lock(&res_alloc->alloc_lock);
381 allocated = (port > 0) ?
382 res_alloc->allocated[(port - 1) *
383 (dev->persist->num_vfs + 1) + slave] :
384 res_alloc->allocated[slave];
385 guaranteed = res_alloc->guaranteed[slave];
387 if (allocated - count >= guaranteed) {
390 /* portion may need to be returned to reserved area */
391 if (allocated - guaranteed > 0)
392 from_rsvd = count - (allocated - guaranteed);
398 res_alloc->allocated[(port - 1) *
399 (dev->persist->num_vfs + 1) + slave] -= count;
400 res_alloc->res_port_free[port - 1] += count;
401 res_alloc->res_port_rsvd[port - 1] += from_rsvd;
403 res_alloc->allocated[slave] -= count;
404 res_alloc->res_free += count;
405 res_alloc->res_reserved += from_rsvd;
408 spin_unlock(&res_alloc->alloc_lock);
412 static inline void initialize_res_quotas(struct mlx4_dev *dev,
413 struct resource_allocator *res_alloc,
414 enum mlx4_resource res_type,
415 int vf, int num_instances)
417 res_alloc->guaranteed[vf] = num_instances /
418 (2 * (dev->persist->num_vfs + 1));
419 res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
420 if (vf == mlx4_master_func_num(dev)) {
421 res_alloc->res_free = num_instances;
422 if (res_type == RES_MTT) {
423 /* reserved mtts will be taken out of the PF allocation */
424 res_alloc->res_free += dev->caps.reserved_mtts;
425 res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
426 res_alloc->quota[vf] += dev->caps.reserved_mtts;
431 void mlx4_init_quotas(struct mlx4_dev *dev)
433 struct mlx4_priv *priv = mlx4_priv(dev);
436 /* quotas for VFs are initialized in mlx4_slave_cap */
437 if (mlx4_is_slave(dev))
440 if (!mlx4_is_mfunc(dev)) {
441 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
442 mlx4_num_reserved_sqps(dev);
443 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
444 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
445 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
446 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
450 pf = mlx4_master_func_num(dev);
452 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
454 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
456 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
458 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
460 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
462 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
464 struct mlx4_priv *priv = mlx4_priv(dev);
468 priv->mfunc.master.res_tracker.slave_list =
469 kzalloc(dev->num_slaves * sizeof(struct slave_list),
471 if (!priv->mfunc.master.res_tracker.slave_list)
474 for (i = 0 ; i < dev->num_slaves; i++) {
475 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
476 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
477 slave_list[i].res_list[t]);
478 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
481 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
483 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
484 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
486 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
487 struct resource_allocator *res_alloc =
488 &priv->mfunc.master.res_tracker.res_alloc[i];
489 res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) *
490 sizeof(int), GFP_KERNEL);
491 res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) *
492 sizeof(int), GFP_KERNEL);
493 if (i == RES_MAC || i == RES_VLAN)
494 res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
495 (dev->persist->num_vfs
497 sizeof(int), GFP_KERNEL);
499 res_alloc->allocated = kzalloc((dev->persist->
501 sizeof(int), GFP_KERNEL);
503 if (!res_alloc->quota || !res_alloc->guaranteed ||
504 !res_alloc->allocated)
507 spin_lock_init(&res_alloc->alloc_lock);
508 for (t = 0; t < dev->persist->num_vfs + 1; t++) {
509 struct mlx4_active_ports actv_ports =
510 mlx4_get_active_ports(dev, t);
513 initialize_res_quotas(dev, res_alloc, RES_QP,
514 t, dev->caps.num_qps -
515 dev->caps.reserved_qps -
516 mlx4_num_reserved_sqps(dev));
519 initialize_res_quotas(dev, res_alloc, RES_CQ,
520 t, dev->caps.num_cqs -
521 dev->caps.reserved_cqs);
524 initialize_res_quotas(dev, res_alloc, RES_SRQ,
525 t, dev->caps.num_srqs -
526 dev->caps.reserved_srqs);
529 initialize_res_quotas(dev, res_alloc, RES_MPT,
530 t, dev->caps.num_mpts -
531 dev->caps.reserved_mrws);
534 initialize_res_quotas(dev, res_alloc, RES_MTT,
535 t, dev->caps.num_mtts -
536 dev->caps.reserved_mtts);
539 if (t == mlx4_master_func_num(dev)) {
540 int max_vfs_pport = 0;
541 /* Calculate the max vfs per port for */
543 for (j = 0; j < dev->caps.num_ports;
545 struct mlx4_slaves_pport slaves_pport =
546 mlx4_phys_to_slaves_pport(dev, j + 1);
547 unsigned current_slaves =
548 bitmap_weight(slaves_pport.slaves,
549 dev->caps.num_ports) - 1;
550 if (max_vfs_pport < current_slaves)
554 res_alloc->quota[t] =
557 res_alloc->guaranteed[t] = 2;
558 for (j = 0; j < MLX4_MAX_PORTS; j++)
559 res_alloc->res_port_free[j] =
562 res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
563 res_alloc->guaranteed[t] = 2;
567 if (t == mlx4_master_func_num(dev)) {
568 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
569 res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
570 for (j = 0; j < MLX4_MAX_PORTS; j++)
571 res_alloc->res_port_free[j] =
574 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
575 res_alloc->guaranteed[t] = 0;
579 res_alloc->quota[t] = dev->caps.max_counters;
580 res_alloc->guaranteed[t] = 0;
581 if (t == mlx4_master_func_num(dev))
582 res_alloc->res_free = res_alloc->quota[t];
587 if (i == RES_MAC || i == RES_VLAN) {
588 for (j = 0; j < dev->caps.num_ports; j++)
589 if (test_bit(j, actv_ports.ports))
590 res_alloc->res_port_rsvd[j] +=
591 res_alloc->guaranteed[t];
593 res_alloc->res_reserved += res_alloc->guaranteed[t];
597 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
601 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
602 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
603 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
604 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
605 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
606 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
607 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
612 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
613 enum mlx4_res_tracker_free_type type)
615 struct mlx4_priv *priv = mlx4_priv(dev);
618 if (priv->mfunc.master.res_tracker.slave_list) {
619 if (type != RES_TR_FREE_STRUCTS_ONLY) {
620 for (i = 0; i < dev->num_slaves; i++) {
621 if (type == RES_TR_FREE_ALL ||
622 dev->caps.function != i)
623 mlx4_delete_all_resources_for_slave(dev, i);
625 /* free master's vlans */
626 i = dev->caps.function;
627 mlx4_reset_roce_gids(dev, i);
628 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
629 rem_slave_vlans(dev, i);
630 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
633 if (type != RES_TR_FREE_SLAVES_ONLY) {
634 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
635 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
636 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
637 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
638 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
639 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
640 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
642 kfree(priv->mfunc.master.res_tracker.slave_list);
643 priv->mfunc.master.res_tracker.slave_list = NULL;
648 static void update_pkey_index(struct mlx4_dev *dev, int slave,
649 struct mlx4_cmd_mailbox *inbox)
651 u8 sched = *(u8 *)(inbox->buf + 64);
652 u8 orig_index = *(u8 *)(inbox->buf + 35);
654 struct mlx4_priv *priv = mlx4_priv(dev);
657 port = (sched >> 6 & 1) + 1;
659 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
660 *(u8 *)(inbox->buf + 35) = new_index;
663 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
666 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
667 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
668 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
671 if (MLX4_QP_ST_UD == ts) {
672 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
673 if (mlx4_is_eth(dev, port))
674 qp_ctx->pri_path.mgid_index =
675 mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
677 qp_ctx->pri_path.mgid_index = slave | 0x80;
679 } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
680 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
681 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
682 if (mlx4_is_eth(dev, port)) {
683 qp_ctx->pri_path.mgid_index +=
684 mlx4_get_base_gid_ix(dev, slave, port);
685 qp_ctx->pri_path.mgid_index &= 0x7f;
687 qp_ctx->pri_path.mgid_index = slave & 0x7F;
690 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
691 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
692 if (mlx4_is_eth(dev, port)) {
693 qp_ctx->alt_path.mgid_index +=
694 mlx4_get_base_gid_ix(dev, slave, port);
695 qp_ctx->alt_path.mgid_index &= 0x7f;
697 qp_ctx->alt_path.mgid_index = slave & 0x7F;
703 static int update_vport_qp_param(struct mlx4_dev *dev,
704 struct mlx4_cmd_mailbox *inbox,
707 struct mlx4_qp_context *qpc = inbox->buf + 8;
708 struct mlx4_vport_oper_state *vp_oper;
709 struct mlx4_priv *priv;
713 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
714 priv = mlx4_priv(dev);
715 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
716 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
718 if (MLX4_VGT != vp_oper->state.default_vlan) {
719 /* the reserved QPs (special, proxy, tunnel)
720 * do not operate over vlans
722 if (mlx4_is_qp_reserved(dev, qpn))
725 /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
726 if (qp_type == MLX4_QP_ST_UD ||
727 (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
728 if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
729 *(__be32 *)inbox->buf =
730 cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
731 MLX4_QP_OPTPAR_VLAN_STRIPPING);
732 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
734 struct mlx4_update_qp_params params = {.flags = 0};
736 err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms);
742 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
743 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
744 qpc->pri_path.vlan_control =
745 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
746 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
747 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
748 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
749 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
750 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
751 } else if (0 != vp_oper->state.default_vlan) {
752 qpc->pri_path.vlan_control =
753 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
754 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
755 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
756 } else { /* priority tagged */
757 qpc->pri_path.vlan_control =
758 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
759 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
762 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
763 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
764 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
765 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
766 qpc->pri_path.sched_queue &= 0xC7;
767 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
768 qpc->qos_vport = vp_oper->state.qos_vport;
770 if (vp_oper->state.spoofchk) {
771 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
772 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
778 static int mpt_mask(struct mlx4_dev *dev)
780 return dev->caps.num_mpts - 1;
783 static void *find_res(struct mlx4_dev *dev, u64 res_id,
784 enum mlx4_resource type)
786 struct mlx4_priv *priv = mlx4_priv(dev);
788 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
792 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
793 enum mlx4_resource type,
796 struct res_common *r;
799 spin_lock_irq(mlx4_tlock(dev));
800 r = find_res(dev, res_id, type);
806 if (r->state == RES_ANY_BUSY) {
811 if (r->owner != slave) {
816 r->from_state = r->state;
817 r->state = RES_ANY_BUSY;
820 *((struct res_common **)res) = r;
823 spin_unlock_irq(mlx4_tlock(dev));
827 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
828 enum mlx4_resource type,
829 u64 res_id, int *slave)
832 struct res_common *r;
838 spin_lock(mlx4_tlock(dev));
840 r = find_res(dev, id, type);
845 spin_unlock(mlx4_tlock(dev));
850 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
851 enum mlx4_resource type)
853 struct res_common *r;
855 spin_lock_irq(mlx4_tlock(dev));
856 r = find_res(dev, res_id, type);
858 r->state = r->from_state;
859 spin_unlock_irq(mlx4_tlock(dev));
862 static struct res_common *alloc_qp_tr(int id)
866 ret = kzalloc(sizeof *ret, GFP_KERNEL);
870 ret->com.res_id = id;
871 ret->com.state = RES_QP_RESERVED;
873 INIT_LIST_HEAD(&ret->mcg_list);
874 spin_lock_init(&ret->mcg_spl);
875 atomic_set(&ret->ref_count, 0);
880 static struct res_common *alloc_mtt_tr(int id, int order)
884 ret = kzalloc(sizeof *ret, GFP_KERNEL);
888 ret->com.res_id = id;
890 ret->com.state = RES_MTT_ALLOCATED;
891 atomic_set(&ret->ref_count, 0);
896 static struct res_common *alloc_mpt_tr(int id, int key)
900 ret = kzalloc(sizeof *ret, GFP_KERNEL);
904 ret->com.res_id = id;
905 ret->com.state = RES_MPT_RESERVED;
911 static struct res_common *alloc_eq_tr(int id)
915 ret = kzalloc(sizeof *ret, GFP_KERNEL);
919 ret->com.res_id = id;
920 ret->com.state = RES_EQ_RESERVED;
925 static struct res_common *alloc_cq_tr(int id)
929 ret = kzalloc(sizeof *ret, GFP_KERNEL);
933 ret->com.res_id = id;
934 ret->com.state = RES_CQ_ALLOCATED;
935 atomic_set(&ret->ref_count, 0);
940 static struct res_common *alloc_srq_tr(int id)
944 ret = kzalloc(sizeof *ret, GFP_KERNEL);
948 ret->com.res_id = id;
949 ret->com.state = RES_SRQ_ALLOCATED;
950 atomic_set(&ret->ref_count, 0);
955 static struct res_common *alloc_counter_tr(int id)
957 struct res_counter *ret;
959 ret = kzalloc(sizeof *ret, GFP_KERNEL);
963 ret->com.res_id = id;
964 ret->com.state = RES_COUNTER_ALLOCATED;
969 static struct res_common *alloc_xrcdn_tr(int id)
971 struct res_xrcdn *ret;
973 ret = kzalloc(sizeof *ret, GFP_KERNEL);
977 ret->com.res_id = id;
978 ret->com.state = RES_XRCD_ALLOCATED;
983 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
985 struct res_fs_rule *ret;
987 ret = kzalloc(sizeof *ret, GFP_KERNEL);
991 ret->com.res_id = id;
992 ret->com.state = RES_FS_RULE_ALLOCATED;
997 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
1000 struct res_common *ret;
1004 ret = alloc_qp_tr(id);
1007 ret = alloc_mpt_tr(id, extra);
1010 ret = alloc_mtt_tr(id, extra);
1013 ret = alloc_eq_tr(id);
1016 ret = alloc_cq_tr(id);
1019 ret = alloc_srq_tr(id);
1022 pr_err("implementation missing\n");
1025 ret = alloc_counter_tr(id);
1028 ret = alloc_xrcdn_tr(id);
1031 ret = alloc_fs_rule_tr(id, extra);
1042 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1043 enum mlx4_resource type, int extra)
1047 struct mlx4_priv *priv = mlx4_priv(dev);
1048 struct res_common **res_arr;
1049 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1050 struct rb_root *root = &tracker->res_tree[type];
1052 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
1056 for (i = 0; i < count; ++i) {
1057 res_arr[i] = alloc_tr(base + i, type, slave, extra);
1059 for (--i; i >= 0; --i)
1067 spin_lock_irq(mlx4_tlock(dev));
1068 for (i = 0; i < count; ++i) {
1069 if (find_res(dev, base + i, type)) {
1073 err = res_tracker_insert(root, res_arr[i]);
1076 list_add_tail(&res_arr[i]->list,
1077 &tracker->slave_list[slave].res_list[type]);
1079 spin_unlock_irq(mlx4_tlock(dev));
1085 for (--i; i >= base; --i)
1086 rb_erase(&res_arr[i]->node, root);
1088 spin_unlock_irq(mlx4_tlock(dev));
1090 for (i = 0; i < count; ++i)
1098 static int remove_qp_ok(struct res_qp *res)
1100 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1101 !list_empty(&res->mcg_list)) {
1102 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1103 res->com.state, atomic_read(&res->ref_count));
1105 } else if (res->com.state != RES_QP_RESERVED) {
1112 static int remove_mtt_ok(struct res_mtt *res, int order)
1114 if (res->com.state == RES_MTT_BUSY ||
1115 atomic_read(&res->ref_count)) {
1116 pr_devel("%s-%d: state %s, ref_count %d\n",
1118 mtt_states_str(res->com.state),
1119 atomic_read(&res->ref_count));
1121 } else if (res->com.state != RES_MTT_ALLOCATED)
1123 else if (res->order != order)
1129 static int remove_mpt_ok(struct res_mpt *res)
1131 if (res->com.state == RES_MPT_BUSY)
1133 else if (res->com.state != RES_MPT_RESERVED)
1139 static int remove_eq_ok(struct res_eq *res)
1141 if (res->com.state == RES_MPT_BUSY)
1143 else if (res->com.state != RES_MPT_RESERVED)
1149 static int remove_counter_ok(struct res_counter *res)
1151 if (res->com.state == RES_COUNTER_BUSY)
1153 else if (res->com.state != RES_COUNTER_ALLOCATED)
1159 static int remove_xrcdn_ok(struct res_xrcdn *res)
1161 if (res->com.state == RES_XRCD_BUSY)
1163 else if (res->com.state != RES_XRCD_ALLOCATED)
1169 static int remove_fs_rule_ok(struct res_fs_rule *res)
1171 if (res->com.state == RES_FS_RULE_BUSY)
1173 else if (res->com.state != RES_FS_RULE_ALLOCATED)
1179 static int remove_cq_ok(struct res_cq *res)
1181 if (res->com.state == RES_CQ_BUSY)
1183 else if (res->com.state != RES_CQ_ALLOCATED)
1189 static int remove_srq_ok(struct res_srq *res)
1191 if (res->com.state == RES_SRQ_BUSY)
1193 else if (res->com.state != RES_SRQ_ALLOCATED)
1199 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1203 return remove_qp_ok((struct res_qp *)res);
1205 return remove_cq_ok((struct res_cq *)res);
1207 return remove_srq_ok((struct res_srq *)res);
1209 return remove_mpt_ok((struct res_mpt *)res);
1211 return remove_mtt_ok((struct res_mtt *)res, extra);
1215 return remove_eq_ok((struct res_eq *)res);
1217 return remove_counter_ok((struct res_counter *)res);
1219 return remove_xrcdn_ok((struct res_xrcdn *)res);
1221 return remove_fs_rule_ok((struct res_fs_rule *)res);
1227 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1228 enum mlx4_resource type, int extra)
1232 struct mlx4_priv *priv = mlx4_priv(dev);
1233 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1234 struct res_common *r;
1236 spin_lock_irq(mlx4_tlock(dev));
1237 for (i = base; i < base + count; ++i) {
1238 r = res_tracker_lookup(&tracker->res_tree[type], i);
1243 if (r->owner != slave) {
1247 err = remove_ok(r, type, extra);
1252 for (i = base; i < base + count; ++i) {
1253 r = res_tracker_lookup(&tracker->res_tree[type], i);
1254 rb_erase(&r->node, &tracker->res_tree[type]);
1261 spin_unlock_irq(mlx4_tlock(dev));
1266 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1267 enum res_qp_states state, struct res_qp **qp,
1270 struct mlx4_priv *priv = mlx4_priv(dev);
1271 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1275 spin_lock_irq(mlx4_tlock(dev));
1276 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1279 else if (r->com.owner != slave)
1284 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1285 __func__, r->com.res_id);
1289 case RES_QP_RESERVED:
1290 if (r->com.state == RES_QP_MAPPED && !alloc)
1293 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1298 if ((r->com.state == RES_QP_RESERVED && alloc) ||
1299 r->com.state == RES_QP_HW)
1302 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1310 if (r->com.state != RES_QP_MAPPED)
1318 r->com.from_state = r->com.state;
1319 r->com.to_state = state;
1320 r->com.state = RES_QP_BUSY;
1326 spin_unlock_irq(mlx4_tlock(dev));
1331 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1332 enum res_mpt_states state, struct res_mpt **mpt)
1334 struct mlx4_priv *priv = mlx4_priv(dev);
1335 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1339 spin_lock_irq(mlx4_tlock(dev));
1340 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1343 else if (r->com.owner != slave)
1351 case RES_MPT_RESERVED:
1352 if (r->com.state != RES_MPT_MAPPED)
1356 case RES_MPT_MAPPED:
1357 if (r->com.state != RES_MPT_RESERVED &&
1358 r->com.state != RES_MPT_HW)
1363 if (r->com.state != RES_MPT_MAPPED)
1371 r->com.from_state = r->com.state;
1372 r->com.to_state = state;
1373 r->com.state = RES_MPT_BUSY;
1379 spin_unlock_irq(mlx4_tlock(dev));
1384 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1385 enum res_eq_states state, struct res_eq **eq)
1387 struct mlx4_priv *priv = mlx4_priv(dev);
1388 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1392 spin_lock_irq(mlx4_tlock(dev));
1393 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1396 else if (r->com.owner != slave)
1404 case RES_EQ_RESERVED:
1405 if (r->com.state != RES_EQ_HW)
1410 if (r->com.state != RES_EQ_RESERVED)
1419 r->com.from_state = r->com.state;
1420 r->com.to_state = state;
1421 r->com.state = RES_EQ_BUSY;
1427 spin_unlock_irq(mlx4_tlock(dev));
1432 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1433 enum res_cq_states state, struct res_cq **cq)
1435 struct mlx4_priv *priv = mlx4_priv(dev);
1436 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1440 spin_lock_irq(mlx4_tlock(dev));
1441 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1444 } else if (r->com.owner != slave) {
1446 } else if (state == RES_CQ_ALLOCATED) {
1447 if (r->com.state != RES_CQ_HW)
1449 else if (atomic_read(&r->ref_count))
1453 } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1460 r->com.from_state = r->com.state;
1461 r->com.to_state = state;
1462 r->com.state = RES_CQ_BUSY;
1467 spin_unlock_irq(mlx4_tlock(dev));
1472 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1473 enum res_srq_states state, struct res_srq **srq)
1475 struct mlx4_priv *priv = mlx4_priv(dev);
1476 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1480 spin_lock_irq(mlx4_tlock(dev));
1481 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1484 } else if (r->com.owner != slave) {
1486 } else if (state == RES_SRQ_ALLOCATED) {
1487 if (r->com.state != RES_SRQ_HW)
1489 else if (atomic_read(&r->ref_count))
1491 } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1496 r->com.from_state = r->com.state;
1497 r->com.to_state = state;
1498 r->com.state = RES_SRQ_BUSY;
1503 spin_unlock_irq(mlx4_tlock(dev));
1508 static void res_abort_move(struct mlx4_dev *dev, int slave,
1509 enum mlx4_resource type, int id)
1511 struct mlx4_priv *priv = mlx4_priv(dev);
1512 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1513 struct res_common *r;
1515 spin_lock_irq(mlx4_tlock(dev));
1516 r = res_tracker_lookup(&tracker->res_tree[type], id);
1517 if (r && (r->owner == slave))
1518 r->state = r->from_state;
1519 spin_unlock_irq(mlx4_tlock(dev));
1522 static void res_end_move(struct mlx4_dev *dev, int slave,
1523 enum mlx4_resource type, int id)
1525 struct mlx4_priv *priv = mlx4_priv(dev);
1526 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1527 struct res_common *r;
1529 spin_lock_irq(mlx4_tlock(dev));
1530 r = res_tracker_lookup(&tracker->res_tree[type], id);
1531 if (r && (r->owner == slave))
1532 r->state = r->to_state;
1533 spin_unlock_irq(mlx4_tlock(dev));
1536 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1538 return mlx4_is_qp_reserved(dev, qpn) &&
1539 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1542 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1544 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1547 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1548 u64 in_param, u64 *out_param)
1558 case RES_OP_RESERVE:
1559 count = get_param_l(&in_param) & 0xffffff;
1560 /* Turn off all unsupported QP allocation flags that the
1561 * slave tries to set.
1563 flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
1564 align = get_param_h(&in_param);
1565 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1569 err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
1571 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1575 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1577 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1578 __mlx4_qp_release_range(dev, base, count);
1581 set_param_l(out_param, base);
1583 case RES_OP_MAP_ICM:
1584 qpn = get_param_l(&in_param) & 0x7fffff;
1585 if (valid_reserved(dev, slave, qpn)) {
1586 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1591 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1596 if (!fw_reserved(dev, qpn)) {
1597 err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL);
1599 res_abort_move(dev, slave, RES_QP, qpn);
1604 res_end_move(dev, slave, RES_QP, qpn);
1614 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1615 u64 in_param, u64 *out_param)
1621 if (op != RES_OP_RESERVE_AND_MAP)
1624 order = get_param_l(&in_param);
1626 err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1630 base = __mlx4_alloc_mtt_range(dev, order);
1632 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1636 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1638 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1639 __mlx4_free_mtt_range(dev, base, order);
1641 set_param_l(out_param, base);
1647 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1648 u64 in_param, u64 *out_param)
1653 struct res_mpt *mpt;
1656 case RES_OP_RESERVE:
1657 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1661 index = __mlx4_mpt_reserve(dev);
1663 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1666 id = index & mpt_mask(dev);
1668 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1670 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1671 __mlx4_mpt_release(dev, index);
1674 set_param_l(out_param, index);
1676 case RES_OP_MAP_ICM:
1677 index = get_param_l(&in_param);
1678 id = index & mpt_mask(dev);
1679 err = mr_res_start_move_to(dev, slave, id,
1680 RES_MPT_MAPPED, &mpt);
1684 err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL);
1686 res_abort_move(dev, slave, RES_MPT, id);
1690 res_end_move(dev, slave, RES_MPT, id);
1696 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1697 u64 in_param, u64 *out_param)
1703 case RES_OP_RESERVE_AND_MAP:
1704 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1708 err = __mlx4_cq_alloc_icm(dev, &cqn);
1710 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1714 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1716 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1717 __mlx4_cq_free_icm(dev, cqn);
1721 set_param_l(out_param, cqn);
1731 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1732 u64 in_param, u64 *out_param)
1738 case RES_OP_RESERVE_AND_MAP:
1739 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1743 err = __mlx4_srq_alloc_icm(dev, &srqn);
1745 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1749 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1751 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1752 __mlx4_srq_free_icm(dev, srqn);
1756 set_param_l(out_param, srqn);
1766 static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1767 u8 smac_index, u64 *mac)
1769 struct mlx4_priv *priv = mlx4_priv(dev);
1770 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1771 struct list_head *mac_list =
1772 &tracker->slave_list[slave].res_list[RES_MAC];
1773 struct mac_res *res, *tmp;
1775 list_for_each_entry_safe(res, tmp, mac_list, list) {
1776 if (res->smac_index == smac_index && res->port == (u8) port) {
1784 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
1786 struct mlx4_priv *priv = mlx4_priv(dev);
1787 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1788 struct list_head *mac_list =
1789 &tracker->slave_list[slave].res_list[RES_MAC];
1790 struct mac_res *res, *tmp;
1792 list_for_each_entry_safe(res, tmp, mac_list, list) {
1793 if (res->mac == mac && res->port == (u8) port) {
1794 /* mac found. update ref count */
1800 if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1802 res = kzalloc(sizeof *res, GFP_KERNEL);
1804 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1808 res->port = (u8) port;
1809 res->smac_index = smac_index;
1811 list_add_tail(&res->list,
1812 &tracker->slave_list[slave].res_list[RES_MAC]);
1816 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1819 struct mlx4_priv *priv = mlx4_priv(dev);
1820 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1821 struct list_head *mac_list =
1822 &tracker->slave_list[slave].res_list[RES_MAC];
1823 struct mac_res *res, *tmp;
1825 list_for_each_entry_safe(res, tmp, mac_list, list) {
1826 if (res->mac == mac && res->port == (u8) port) {
1827 if (!--res->ref_count) {
1828 list_del(&res->list);
1829 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1837 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1839 struct mlx4_priv *priv = mlx4_priv(dev);
1840 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1841 struct list_head *mac_list =
1842 &tracker->slave_list[slave].res_list[RES_MAC];
1843 struct mac_res *res, *tmp;
1846 list_for_each_entry_safe(res, tmp, mac_list, list) {
1847 list_del(&res->list);
1848 /* dereference the mac the num times the slave referenced it */
1849 for (i = 0; i < res->ref_count; i++)
1850 __mlx4_unregister_mac(dev, res->port, res->mac);
1851 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
1856 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1857 u64 in_param, u64 *out_param, int in_port)
1864 if (op != RES_OP_RESERVE_AND_MAP)
1867 port = !in_port ? get_param_l(out_param) : in_port;
1868 port = mlx4_slave_convert_port(
1875 err = __mlx4_register_mac(dev, port, mac);
1878 set_param_l(out_param, err);
1883 err = mac_add_to_slave(dev, slave, mac, port, smac_index);
1885 __mlx4_unregister_mac(dev, port, mac);
1890 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1891 int port, int vlan_index)
1893 struct mlx4_priv *priv = mlx4_priv(dev);
1894 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1895 struct list_head *vlan_list =
1896 &tracker->slave_list[slave].res_list[RES_VLAN];
1897 struct vlan_res *res, *tmp;
1899 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1900 if (res->vlan == vlan && res->port == (u8) port) {
1901 /* vlan found. update ref count */
1907 if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
1909 res = kzalloc(sizeof(*res), GFP_KERNEL);
1911 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
1915 res->port = (u8) port;
1916 res->vlan_index = vlan_index;
1918 list_add_tail(&res->list,
1919 &tracker->slave_list[slave].res_list[RES_VLAN]);
1924 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1927 struct mlx4_priv *priv = mlx4_priv(dev);
1928 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1929 struct list_head *vlan_list =
1930 &tracker->slave_list[slave].res_list[RES_VLAN];
1931 struct vlan_res *res, *tmp;
1933 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1934 if (res->vlan == vlan && res->port == (u8) port) {
1935 if (!--res->ref_count) {
1936 list_del(&res->list);
1937 mlx4_release_resource(dev, slave, RES_VLAN,
1946 static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
1948 struct mlx4_priv *priv = mlx4_priv(dev);
1949 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1950 struct list_head *vlan_list =
1951 &tracker->slave_list[slave].res_list[RES_VLAN];
1952 struct vlan_res *res, *tmp;
1955 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1956 list_del(&res->list);
1957 /* dereference the vlan the num times the slave referenced it */
1958 for (i = 0; i < res->ref_count; i++)
1959 __mlx4_unregister_vlan(dev, res->port, res->vlan);
1960 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
1965 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1966 u64 in_param, u64 *out_param, int in_port)
1968 struct mlx4_priv *priv = mlx4_priv(dev);
1969 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
1975 port = !in_port ? get_param_l(out_param) : in_port;
1977 if (!port || op != RES_OP_RESERVE_AND_MAP)
1980 port = mlx4_slave_convert_port(
1985 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
1986 if (!in_port && port > 0 && port <= dev->caps.num_ports) {
1987 slave_state[slave].old_vlan_api = true;
1991 vlan = (u16) in_param;
1993 err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
1995 set_param_l(out_param, (u32) vlan_index);
1996 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
1998 __mlx4_unregister_vlan(dev, port, vlan);
2003 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2004 u64 in_param, u64 *out_param)
2009 if (op != RES_OP_RESERVE)
2012 err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
2016 err = __mlx4_counter_alloc(dev, &index);
2018 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2022 err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2024 __mlx4_counter_free(dev, index);
2025 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2027 set_param_l(out_param, index);
2033 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2034 u64 in_param, u64 *out_param)
2039 if (op != RES_OP_RESERVE)
2042 err = __mlx4_xrcd_alloc(dev, &xrcdn);
2046 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2048 __mlx4_xrcd_free(dev, xrcdn);
2050 set_param_l(out_param, xrcdn);
2055 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2056 struct mlx4_vhcr *vhcr,
2057 struct mlx4_cmd_mailbox *inbox,
2058 struct mlx4_cmd_mailbox *outbox,
2059 struct mlx4_cmd_info *cmd)
2062 int alop = vhcr->op_modifier;
2064 switch (vhcr->in_modifier & 0xFF) {
2066 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2067 vhcr->in_param, &vhcr->out_param);
2071 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2072 vhcr->in_param, &vhcr->out_param);
2076 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2077 vhcr->in_param, &vhcr->out_param);
2081 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2082 vhcr->in_param, &vhcr->out_param);
2086 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2087 vhcr->in_param, &vhcr->out_param);
2091 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
2092 vhcr->in_param, &vhcr->out_param,
2093 (vhcr->in_modifier >> 8) & 0xFF);
2097 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
2098 vhcr->in_param, &vhcr->out_param,
2099 (vhcr->in_modifier >> 8) & 0xFF);
2103 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2104 vhcr->in_param, &vhcr->out_param);
2108 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2109 vhcr->in_param, &vhcr->out_param);
2120 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2129 case RES_OP_RESERVE:
2130 base = get_param_l(&in_param) & 0x7fffff;
2131 count = get_param_h(&in_param);
2132 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2135 mlx4_release_resource(dev, slave, RES_QP, count, 0);
2136 __mlx4_qp_release_range(dev, base, count);
2138 case RES_OP_MAP_ICM:
2139 qpn = get_param_l(&in_param) & 0x7fffff;
2140 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2145 if (!fw_reserved(dev, qpn))
2146 __mlx4_qp_free_icm(dev, qpn);
2148 res_end_move(dev, slave, RES_QP, qpn);
2150 if (valid_reserved(dev, slave, qpn))
2151 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2160 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2161 u64 in_param, u64 *out_param)
2167 if (op != RES_OP_RESERVE_AND_MAP)
2170 base = get_param_l(&in_param);
2171 order = get_param_h(&in_param);
2172 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2174 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2175 __mlx4_free_mtt_range(dev, base, order);
2180 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2186 struct res_mpt *mpt;
2189 case RES_OP_RESERVE:
2190 index = get_param_l(&in_param);
2191 id = index & mpt_mask(dev);
2192 err = get_res(dev, slave, id, RES_MPT, &mpt);
2196 put_res(dev, slave, id, RES_MPT);
2198 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2201 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2202 __mlx4_mpt_release(dev, index);
2204 case RES_OP_MAP_ICM:
2205 index = get_param_l(&in_param);
2206 id = index & mpt_mask(dev);
2207 err = mr_res_start_move_to(dev, slave, id,
2208 RES_MPT_RESERVED, &mpt);
2212 __mlx4_mpt_free_icm(dev, mpt->key);
2213 res_end_move(dev, slave, RES_MPT, id);
2223 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2224 u64 in_param, u64 *out_param)
2230 case RES_OP_RESERVE_AND_MAP:
2231 cqn = get_param_l(&in_param);
2232 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2236 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2237 __mlx4_cq_free_icm(dev, cqn);
2248 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2249 u64 in_param, u64 *out_param)
2255 case RES_OP_RESERVE_AND_MAP:
2256 srqn = get_param_l(&in_param);
2257 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2261 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2262 __mlx4_srq_free_icm(dev, srqn);
2273 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2274 u64 in_param, u64 *out_param, int in_port)
2280 case RES_OP_RESERVE_AND_MAP:
2281 port = !in_port ? get_param_l(out_param) : in_port;
2282 port = mlx4_slave_convert_port(
2287 mac_del_from_slave(dev, slave, in_param, port);
2288 __mlx4_unregister_mac(dev, port, in_param);
2299 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2300 u64 in_param, u64 *out_param, int port)
2302 struct mlx4_priv *priv = mlx4_priv(dev);
2303 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2306 port = mlx4_slave_convert_port(
2312 case RES_OP_RESERVE_AND_MAP:
2313 if (slave_state[slave].old_vlan_api)
2317 vlan_del_from_slave(dev, slave, in_param, port);
2318 __mlx4_unregister_vlan(dev, port, in_param);
2328 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2329 u64 in_param, u64 *out_param)
2334 if (op != RES_OP_RESERVE)
2337 index = get_param_l(&in_param);
2338 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2342 __mlx4_counter_free(dev, index);
2343 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2348 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2349 u64 in_param, u64 *out_param)
2354 if (op != RES_OP_RESERVE)
2357 xrcdn = get_param_l(&in_param);
2358 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2362 __mlx4_xrcd_free(dev, xrcdn);
2367 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2368 struct mlx4_vhcr *vhcr,
2369 struct mlx4_cmd_mailbox *inbox,
2370 struct mlx4_cmd_mailbox *outbox,
2371 struct mlx4_cmd_info *cmd)
2374 int alop = vhcr->op_modifier;
2376 switch (vhcr->in_modifier & 0xFF) {
2378 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2383 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2384 vhcr->in_param, &vhcr->out_param);
2388 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2393 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2394 vhcr->in_param, &vhcr->out_param);
2398 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2399 vhcr->in_param, &vhcr->out_param);
2403 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2404 vhcr->in_param, &vhcr->out_param,
2405 (vhcr->in_modifier >> 8) & 0xFF);
2409 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2410 vhcr->in_param, &vhcr->out_param,
2411 (vhcr->in_modifier >> 8) & 0xFF);
2415 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2416 vhcr->in_param, &vhcr->out_param);
2420 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2421 vhcr->in_param, &vhcr->out_param);
2429 /* ugly but other choices are uglier */
2430 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2432 return (be32_to_cpu(mpt->flags) >> 9) & 1;
2435 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2437 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2440 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2442 return be32_to_cpu(mpt->mtt_sz);
2445 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2447 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2450 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2452 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2455 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2457 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2460 static int mr_is_region(struct mlx4_mpt_entry *mpt)
2462 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2465 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2467 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2470 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2472 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2475 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2477 int page_shift = (qpc->log_page_size & 0x3f) + 12;
2478 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2479 int log_sq_sride = qpc->sq_size_stride & 7;
2480 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2481 int log_rq_stride = qpc->rq_size_stride & 7;
2482 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2483 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2484 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2485 int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2490 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2492 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2493 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2494 total_mem = sq_size + rq_size;
2496 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2502 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2503 int size, struct res_mtt *mtt)
2505 int res_start = mtt->com.res_id;
2506 int res_size = (1 << mtt->order);
2508 if (start < res_start || start + size > res_start + res_size)
2513 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2514 struct mlx4_vhcr *vhcr,
2515 struct mlx4_cmd_mailbox *inbox,
2516 struct mlx4_cmd_mailbox *outbox,
2517 struct mlx4_cmd_info *cmd)
2520 int index = vhcr->in_modifier;
2521 struct res_mtt *mtt;
2522 struct res_mpt *mpt;
2523 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2529 id = index & mpt_mask(dev);
2530 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2534 /* Disable memory windows for VFs. */
2535 if (!mr_is_region(inbox->buf)) {
2540 /* Make sure that the PD bits related to the slave id are zeros. */
2541 pd = mr_get_pd(inbox->buf);
2542 pd_slave = (pd >> 17) & 0x7f;
2543 if (pd_slave != 0 && --pd_slave != slave) {
2548 if (mr_is_fmr(inbox->buf)) {
2549 /* FMR and Bind Enable are forbidden in slave devices. */
2550 if (mr_is_bind_enabled(inbox->buf)) {
2554 /* FMR and Memory Windows are also forbidden. */
2555 if (!mr_is_region(inbox->buf)) {
2561 phys = mr_phys_mpt(inbox->buf);
2563 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2567 err = check_mtt_range(dev, slave, mtt_base,
2568 mr_get_mtt_size(inbox->buf), mtt);
2575 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2580 atomic_inc(&mtt->ref_count);
2581 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2584 res_end_move(dev, slave, RES_MPT, id);
2589 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2591 res_abort_move(dev, slave, RES_MPT, id);
2596 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2597 struct mlx4_vhcr *vhcr,
2598 struct mlx4_cmd_mailbox *inbox,
2599 struct mlx4_cmd_mailbox *outbox,
2600 struct mlx4_cmd_info *cmd)
2603 int index = vhcr->in_modifier;
2604 struct res_mpt *mpt;
2607 id = index & mpt_mask(dev);
2608 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2612 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2617 atomic_dec(&mpt->mtt->ref_count);
2619 res_end_move(dev, slave, RES_MPT, id);
2623 res_abort_move(dev, slave, RES_MPT, id);
2628 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2629 struct mlx4_vhcr *vhcr,
2630 struct mlx4_cmd_mailbox *inbox,
2631 struct mlx4_cmd_mailbox *outbox,
2632 struct mlx4_cmd_info *cmd)
2635 int index = vhcr->in_modifier;
2636 struct res_mpt *mpt;
2639 id = index & mpt_mask(dev);
2640 err = get_res(dev, slave, id, RES_MPT, &mpt);
2644 if (mpt->com.from_state == RES_MPT_MAPPED) {
2645 /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2646 * that, the VF must read the MPT. But since the MPT entry memory is not
2647 * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2648 * entry contents. To guarantee that the MPT cannot be changed, the driver
2649 * must perform HW2SW_MPT before this query and return the MPT entry to HW
2650 * ownership fofollowing the change. The change here allows the VF to
2651 * perform QUERY_MPT also when the entry is in SW ownership.
2653 struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
2654 &mlx4_priv(dev)->mr_table.dmpt_table,
2657 if (NULL == mpt_entry || NULL == outbox->buf) {
2662 memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
2665 } else if (mpt->com.from_state == RES_MPT_HW) {
2666 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2674 put_res(dev, slave, id, RES_MPT);
2678 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2680 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2683 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2685 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2688 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2690 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2693 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2694 struct mlx4_qp_context *context)
2696 u32 qpn = vhcr->in_modifier & 0xffffff;
2699 if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2702 /* adjust qkey in qp context */
2703 context->qkey = cpu_to_be32(qkey);
2706 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2707 struct mlx4_vhcr *vhcr,
2708 struct mlx4_cmd_mailbox *inbox,
2709 struct mlx4_cmd_mailbox *outbox,
2710 struct mlx4_cmd_info *cmd)
2713 int qpn = vhcr->in_modifier & 0x7fffff;
2714 struct res_mtt *mtt;
2716 struct mlx4_qp_context *qpc = inbox->buf + 8;
2717 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2718 int mtt_size = qp_get_mtt_size(qpc);
2721 int rcqn = qp_get_rcqn(qpc);
2722 int scqn = qp_get_scqn(qpc);
2723 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2724 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2725 struct res_srq *srq;
2726 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2728 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2731 qp->local_qpn = local_qpn;
2732 qp->sched_queue = 0;
2734 qp->vlan_control = 0;
2736 qp->pri_path_fl = 0;
2739 qp->qpc_flags = be32_to_cpu(qpc->flags);
2741 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2745 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2749 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2754 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2761 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2766 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2767 update_pkey_index(dev, slave, inbox);
2768 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2771 atomic_inc(&mtt->ref_count);
2773 atomic_inc(&rcq->ref_count);
2775 atomic_inc(&scq->ref_count);
2779 put_res(dev, slave, scqn, RES_CQ);
2782 atomic_inc(&srq->ref_count);
2783 put_res(dev, slave, srqn, RES_SRQ);
2786 put_res(dev, slave, rcqn, RES_CQ);
2787 put_res(dev, slave, mtt_base, RES_MTT);
2788 res_end_move(dev, slave, RES_QP, qpn);
2794 put_res(dev, slave, srqn, RES_SRQ);
2797 put_res(dev, slave, scqn, RES_CQ);
2799 put_res(dev, slave, rcqn, RES_CQ);
2801 put_res(dev, slave, mtt_base, RES_MTT);
2803 res_abort_move(dev, slave, RES_QP, qpn);
2808 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2810 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2813 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2815 int log_eq_size = eqc->log_eq_size & 0x1f;
2816 int page_shift = (eqc->log_page_size & 0x3f) + 12;
2818 if (log_eq_size + 5 < page_shift)
2821 return 1 << (log_eq_size + 5 - page_shift);
2824 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2826 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2829 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2831 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2832 int page_shift = (cqc->log_page_size & 0x3f) + 12;
2834 if (log_cq_size + 5 < page_shift)
2837 return 1 << (log_cq_size + 5 - page_shift);
2840 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2841 struct mlx4_vhcr *vhcr,
2842 struct mlx4_cmd_mailbox *inbox,
2843 struct mlx4_cmd_mailbox *outbox,
2844 struct mlx4_cmd_info *cmd)
2847 int eqn = vhcr->in_modifier;
2848 int res_id = (slave << 10) | eqn;
2849 struct mlx4_eq_context *eqc = inbox->buf;
2850 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2851 int mtt_size = eq_get_mtt_size(eqc);
2853 struct res_mtt *mtt;
2855 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2858 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2862 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2866 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2870 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2874 atomic_inc(&mtt->ref_count);
2876 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2877 res_end_move(dev, slave, RES_EQ, res_id);
2881 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2883 res_abort_move(dev, slave, RES_EQ, res_id);
2885 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2889 int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
2890 struct mlx4_vhcr *vhcr,
2891 struct mlx4_cmd_mailbox *inbox,
2892 struct mlx4_cmd_mailbox *outbox,
2893 struct mlx4_cmd_info *cmd)
2896 u8 get = vhcr->op_modifier;
2901 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2906 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2907 int len, struct res_mtt **res)
2909 struct mlx4_priv *priv = mlx4_priv(dev);
2910 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2911 struct res_mtt *mtt;
2914 spin_lock_irq(mlx4_tlock(dev));
2915 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2917 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2919 mtt->com.from_state = mtt->com.state;
2920 mtt->com.state = RES_MTT_BUSY;
2925 spin_unlock_irq(mlx4_tlock(dev));
2930 static int verify_qp_parameters(struct mlx4_dev *dev,
2931 struct mlx4_vhcr *vhcr,
2932 struct mlx4_cmd_mailbox *inbox,
2933 enum qp_transition transition, u8 slave)
2937 struct mlx4_qp_context *qp_ctx;
2938 enum mlx4_qp_optpar optpar;
2942 qp_ctx = inbox->buf + 8;
2943 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2944 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
2946 if (slave != mlx4_master_func_num(dev)) {
2947 qp_ctx->params2 &= ~MLX4_QP_BIT_FPP;
2948 /* setting QP rate-limit is disallowed for VFs */
2949 if (qp_ctx->rate_limit_params)
2955 case MLX4_QP_ST_XRC:
2957 switch (transition) {
2958 case QP_TRANS_INIT2RTR:
2959 case QP_TRANS_RTR2RTS:
2960 case QP_TRANS_RTS2RTS:
2961 case QP_TRANS_SQD2SQD:
2962 case QP_TRANS_SQD2RTS:
2963 if (slave != mlx4_master_func_num(dev))
2964 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
2965 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
2966 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2967 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
2970 if (qp_ctx->pri_path.mgid_index >= num_gids)
2973 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
2974 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
2975 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2976 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
2979 if (qp_ctx->alt_path.mgid_index >= num_gids)
2988 case MLX4_QP_ST_MLX:
2989 qpn = vhcr->in_modifier & 0x7fffff;
2990 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
2991 if (transition == QP_TRANS_INIT2RTR &&
2992 slave != mlx4_master_func_num(dev) &&
2993 mlx4_is_qp_reserved(dev, qpn) &&
2994 !mlx4_vf_smi_enabled(dev, slave, port)) {
2995 /* only enabled VFs may create MLX proxy QPs */
2996 mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
2997 __func__, slave, port);
3009 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
3010 struct mlx4_vhcr *vhcr,
3011 struct mlx4_cmd_mailbox *inbox,
3012 struct mlx4_cmd_mailbox *outbox,
3013 struct mlx4_cmd_info *cmd)
3015 struct mlx4_mtt mtt;
3016 __be64 *page_list = inbox->buf;
3017 u64 *pg_list = (u64 *)page_list;
3019 struct res_mtt *rmtt = NULL;
3020 int start = be64_to_cpu(page_list[0]);
3021 int npages = vhcr->in_modifier;
3024 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
3028 /* Call the SW implementation of write_mtt:
3029 * - Prepare a dummy mtt struct
3030 * - Translate inbox contents to simple addresses in host endianness */
3031 mtt.offset = 0; /* TBD this is broken but I don't handle it since
3032 we don't really use it */
3035 for (i = 0; i < npages; ++i)
3036 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
3038 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
3039 ((u64 *)page_list + 2));
3042 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
3047 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3048 struct mlx4_vhcr *vhcr,
3049 struct mlx4_cmd_mailbox *inbox,
3050 struct mlx4_cmd_mailbox *outbox,
3051 struct mlx4_cmd_info *cmd)
3053 int eqn = vhcr->in_modifier;
3054 int res_id = eqn | (slave << 10);
3058 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
3062 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
3066 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3070 atomic_dec(&eq->mtt->ref_count);
3071 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3072 res_end_move(dev, slave, RES_EQ, res_id);
3073 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3078 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3080 res_abort_move(dev, slave, RES_EQ, res_id);
3085 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3087 struct mlx4_priv *priv = mlx4_priv(dev);
3088 struct mlx4_slave_event_eq_info *event_eq;
3089 struct mlx4_cmd_mailbox *mailbox;
3090 u32 in_modifier = 0;
3095 if (!priv->mfunc.master.slave_state)
3098 /* check for slave valid, slave not PF, and slave active */
3099 if (slave < 0 || slave > dev->persist->num_vfs ||
3100 slave == dev->caps.function ||
3101 !priv->mfunc.master.slave_state[slave].active)
3104 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
3106 /* Create the event only if the slave is registered */
3107 if (event_eq->eqn < 0)
3110 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3111 res_id = (slave << 10) | event_eq->eqn;
3112 err = get_res(dev, slave, res_id, RES_EQ, &req);
3116 if (req->com.from_state != RES_EQ_HW) {
3121 mailbox = mlx4_alloc_cmd_mailbox(dev);
3122 if (IS_ERR(mailbox)) {
3123 err = PTR_ERR(mailbox);
3127 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3129 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3132 memcpy(mailbox->buf, (u8 *) eqe, 28);
3134 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16);
3136 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3137 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3140 put_res(dev, slave, res_id, RES_EQ);
3141 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3142 mlx4_free_cmd_mailbox(dev, mailbox);
3146 put_res(dev, slave, res_id, RES_EQ);
3149 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3153 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3154 struct mlx4_vhcr *vhcr,
3155 struct mlx4_cmd_mailbox *inbox,
3156 struct mlx4_cmd_mailbox *outbox,
3157 struct mlx4_cmd_info *cmd)
3159 int eqn = vhcr->in_modifier;
3160 int res_id = eqn | (slave << 10);
3164 err = get_res(dev, slave, res_id, RES_EQ, &eq);
3168 if (eq->com.from_state != RES_EQ_HW) {
3173 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3176 put_res(dev, slave, res_id, RES_EQ);
3180 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3181 struct mlx4_vhcr *vhcr,
3182 struct mlx4_cmd_mailbox *inbox,
3183 struct mlx4_cmd_mailbox *outbox,
3184 struct mlx4_cmd_info *cmd)
3187 int cqn = vhcr->in_modifier;
3188 struct mlx4_cq_context *cqc = inbox->buf;
3189 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3190 struct res_cq *cq = NULL;
3191 struct res_mtt *mtt;
3193 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3196 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3199 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3202 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3205 atomic_inc(&mtt->ref_count);
3207 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3208 res_end_move(dev, slave, RES_CQ, cqn);
3212 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3214 res_abort_move(dev, slave, RES_CQ, cqn);
3218 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3219 struct mlx4_vhcr *vhcr,
3220 struct mlx4_cmd_mailbox *inbox,
3221 struct mlx4_cmd_mailbox *outbox,
3222 struct mlx4_cmd_info *cmd)
3225 int cqn = vhcr->in_modifier;
3226 struct res_cq *cq = NULL;
3228 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3231 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3234 atomic_dec(&cq->mtt->ref_count);
3235 res_end_move(dev, slave, RES_CQ, cqn);
3239 res_abort_move(dev, slave, RES_CQ, cqn);
3243 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3244 struct mlx4_vhcr *vhcr,
3245 struct mlx4_cmd_mailbox *inbox,
3246 struct mlx4_cmd_mailbox *outbox,
3247 struct mlx4_cmd_info *cmd)
3249 int cqn = vhcr->in_modifier;
3253 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3257 if (cq->com.from_state != RES_CQ_HW)
3260 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3262 put_res(dev, slave, cqn, RES_CQ);
3267 static int handle_resize(struct mlx4_dev *dev, int slave,
3268 struct mlx4_vhcr *vhcr,
3269 struct mlx4_cmd_mailbox *inbox,
3270 struct mlx4_cmd_mailbox *outbox,
3271 struct mlx4_cmd_info *cmd,
3275 struct res_mtt *orig_mtt;
3276 struct res_mtt *mtt;
3277 struct mlx4_cq_context *cqc = inbox->buf;
3278 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3280 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3284 if (orig_mtt != cq->mtt) {
3289 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3293 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3296 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3299 atomic_dec(&orig_mtt->ref_count);
3300 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3301 atomic_inc(&mtt->ref_count);
3303 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3307 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3309 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3315 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3316 struct mlx4_vhcr *vhcr,
3317 struct mlx4_cmd_mailbox *inbox,
3318 struct mlx4_cmd_mailbox *outbox,
3319 struct mlx4_cmd_info *cmd)
3321 int cqn = vhcr->in_modifier;
3325 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3329 if (cq->com.from_state != RES_CQ_HW)
3332 if (vhcr->op_modifier == 0) {
3333 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3337 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3339 put_res(dev, slave, cqn, RES_CQ);
3344 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3346 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3347 int log_rq_stride = srqc->logstride & 7;
3348 int page_shift = (srqc->log_page_size & 0x3f) + 12;
3350 if (log_srq_size + log_rq_stride + 4 < page_shift)
3353 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3356 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3357 struct mlx4_vhcr *vhcr,
3358 struct mlx4_cmd_mailbox *inbox,
3359 struct mlx4_cmd_mailbox *outbox,
3360 struct mlx4_cmd_info *cmd)
3363 int srqn = vhcr->in_modifier;
3364 struct res_mtt *mtt;
3365 struct res_srq *srq = NULL;
3366 struct mlx4_srq_context *srqc = inbox->buf;
3367 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3369 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3372 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3375 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3378 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3383 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3387 atomic_inc(&mtt->ref_count);
3389 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3390 res_end_move(dev, slave, RES_SRQ, srqn);
3394 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3396 res_abort_move(dev, slave, RES_SRQ, srqn);
3401 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3402 struct mlx4_vhcr *vhcr,
3403 struct mlx4_cmd_mailbox *inbox,
3404 struct mlx4_cmd_mailbox *outbox,
3405 struct mlx4_cmd_info *cmd)
3408 int srqn = vhcr->in_modifier;
3409 struct res_srq *srq = NULL;
3411 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3414 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3417 atomic_dec(&srq->mtt->ref_count);
3419 atomic_dec(&srq->cq->ref_count);
3420 res_end_move(dev, slave, RES_SRQ, srqn);
3425 res_abort_move(dev, slave, RES_SRQ, srqn);
3430 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3431 struct mlx4_vhcr *vhcr,
3432 struct mlx4_cmd_mailbox *inbox,
3433 struct mlx4_cmd_mailbox *outbox,
3434 struct mlx4_cmd_info *cmd)
3437 int srqn = vhcr->in_modifier;
3438 struct res_srq *srq;
3440 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3443 if (srq->com.from_state != RES_SRQ_HW) {
3447 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3449 put_res(dev, slave, srqn, RES_SRQ);
3453 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3454 struct mlx4_vhcr *vhcr,
3455 struct mlx4_cmd_mailbox *inbox,
3456 struct mlx4_cmd_mailbox *outbox,
3457 struct mlx4_cmd_info *cmd)
3460 int srqn = vhcr->in_modifier;
3461 struct res_srq *srq;
3463 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3467 if (srq->com.from_state != RES_SRQ_HW) {
3472 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3474 put_res(dev, slave, srqn, RES_SRQ);
3478 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3479 struct mlx4_vhcr *vhcr,
3480 struct mlx4_cmd_mailbox *inbox,
3481 struct mlx4_cmd_mailbox *outbox,
3482 struct mlx4_cmd_info *cmd)
3485 int qpn = vhcr->in_modifier & 0x7fffff;
3488 err = get_res(dev, slave, qpn, RES_QP, &qp);
3491 if (qp->com.from_state != RES_QP_HW) {
3496 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3498 put_res(dev, slave, qpn, RES_QP);
3502 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3503 struct mlx4_vhcr *vhcr,
3504 struct mlx4_cmd_mailbox *inbox,
3505 struct mlx4_cmd_mailbox *outbox,
3506 struct mlx4_cmd_info *cmd)
3508 struct mlx4_qp_context *context = inbox->buf + 8;
3509 adjust_proxy_tun_qkey(dev, vhcr, context);
3510 update_pkey_index(dev, slave, inbox);
3511 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3514 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3515 struct mlx4_qp_context *qpc,
3516 struct mlx4_cmd_mailbox *inbox)
3518 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3520 int port = mlx4_slave_convert_port(
3521 dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3526 pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3529 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH ||
3530 mlx4_is_eth(dev, port + 1)) {
3531 qpc->pri_path.sched_queue = pri_sched_queue;
3534 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3535 port = mlx4_slave_convert_port(
3536 dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3540 qpc->alt_path.sched_queue =
3541 (qpc->alt_path.sched_queue & ~(1 << 6)) |
3547 static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3548 struct mlx4_qp_context *qpc,
3549 struct mlx4_cmd_mailbox *inbox)
3553 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3554 u8 sched = *(u8 *)(inbox->buf + 64);
3557 port = (sched >> 6 & 1) + 1;
3558 if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3559 smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3560 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3566 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3567 struct mlx4_vhcr *vhcr,
3568 struct mlx4_cmd_mailbox *inbox,
3569 struct mlx4_cmd_mailbox *outbox,
3570 struct mlx4_cmd_info *cmd)
3573 struct mlx4_qp_context *qpc = inbox->buf + 8;
3574 int qpn = vhcr->in_modifier & 0x7fffff;
3576 u8 orig_sched_queue;
3577 __be32 orig_param3 = qpc->param3;
3578 u8 orig_vlan_control = qpc->pri_path.vlan_control;
3579 u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3580 u8 orig_pri_path_fl = qpc->pri_path.fl;
3581 u8 orig_vlan_index = qpc->pri_path.vlan_index;
3582 u8 orig_feup = qpc->pri_path.feup;
3584 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3587 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
3591 if (roce_verify_mac(dev, slave, qpc, inbox))
3594 update_pkey_index(dev, slave, inbox);
3595 update_gid(dev, inbox, (u8)slave);
3596 adjust_proxy_tun_qkey(dev, vhcr, qpc);
3597 orig_sched_queue = qpc->pri_path.sched_queue;
3598 err = update_vport_qp_param(dev, inbox, slave, qpn);
3602 err = get_res(dev, slave, qpn, RES_QP, &qp);
3605 if (qp->com.from_state != RES_QP_HW) {
3610 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3612 /* if no error, save sched queue value passed in by VF. This is
3613 * essentially the QOS value provided by the VF. This will be useful
3614 * if we allow dynamic changes from VST back to VGT
3617 qp->sched_queue = orig_sched_queue;
3618 qp->param3 = orig_param3;
3619 qp->vlan_control = orig_vlan_control;
3620 qp->fvl_rx = orig_fvl_rx;
3621 qp->pri_path_fl = orig_pri_path_fl;
3622 qp->vlan_index = orig_vlan_index;
3623 qp->feup = orig_feup;
3625 put_res(dev, slave, qpn, RES_QP);
3629 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3630 struct mlx4_vhcr *vhcr,
3631 struct mlx4_cmd_mailbox *inbox,
3632 struct mlx4_cmd_mailbox *outbox,
3633 struct mlx4_cmd_info *cmd)
3636 struct mlx4_qp_context *context = inbox->buf + 8;
3638 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3641 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
3645 update_pkey_index(dev, slave, inbox);
3646 update_gid(dev, inbox, (u8)slave);
3647 adjust_proxy_tun_qkey(dev, vhcr, context);
3648 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3651 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3652 struct mlx4_vhcr *vhcr,
3653 struct mlx4_cmd_mailbox *inbox,
3654 struct mlx4_cmd_mailbox *outbox,
3655 struct mlx4_cmd_info *cmd)
3658 struct mlx4_qp_context *context = inbox->buf + 8;
3660 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3663 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
3667 update_pkey_index(dev, slave, inbox);
3668 update_gid(dev, inbox, (u8)slave);
3669 adjust_proxy_tun_qkey(dev, vhcr, context);
3670 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3674 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3675 struct mlx4_vhcr *vhcr,
3676 struct mlx4_cmd_mailbox *inbox,
3677 struct mlx4_cmd_mailbox *outbox,
3678 struct mlx4_cmd_info *cmd)
3680 struct mlx4_qp_context *context = inbox->buf + 8;
3681 int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3684 adjust_proxy_tun_qkey(dev, vhcr, context);
3685 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3688 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3689 struct mlx4_vhcr *vhcr,
3690 struct mlx4_cmd_mailbox *inbox,
3691 struct mlx4_cmd_mailbox *outbox,
3692 struct mlx4_cmd_info *cmd)
3695 struct mlx4_qp_context *context = inbox->buf + 8;
3697 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3700 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
3704 adjust_proxy_tun_qkey(dev, vhcr, context);
3705 update_gid(dev, inbox, (u8)slave);
3706 update_pkey_index(dev, slave, inbox);
3707 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3710 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3711 struct mlx4_vhcr *vhcr,
3712 struct mlx4_cmd_mailbox *inbox,
3713 struct mlx4_cmd_mailbox *outbox,
3714 struct mlx4_cmd_info *cmd)
3717 struct mlx4_qp_context *context = inbox->buf + 8;
3719 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3722 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
3726 adjust_proxy_tun_qkey(dev, vhcr, context);
3727 update_gid(dev, inbox, (u8)slave);
3728 update_pkey_index(dev, slave, inbox);
3729 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3732 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3733 struct mlx4_vhcr *vhcr,
3734 struct mlx4_cmd_mailbox *inbox,
3735 struct mlx4_cmd_mailbox *outbox,
3736 struct mlx4_cmd_info *cmd)
3739 int qpn = vhcr->in_modifier & 0x7fffff;
3742 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3745 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3749 atomic_dec(&qp->mtt->ref_count);
3750 atomic_dec(&qp->rcq->ref_count);
3751 atomic_dec(&qp->scq->ref_count);
3753 atomic_dec(&qp->srq->ref_count);
3754 res_end_move(dev, slave, RES_QP, qpn);
3758 res_abort_move(dev, slave, RES_QP, qpn);
3763 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3764 struct res_qp *rqp, u8 *gid)
3766 struct res_gid *res;
3768 list_for_each_entry(res, &rqp->mcg_list, list) {
3769 if (!memcmp(res->gid, gid, 16))
3775 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3776 u8 *gid, enum mlx4_protocol prot,
3777 enum mlx4_steer_type steer, u64 reg_id)
3779 struct res_gid *res;
3782 res = kzalloc(sizeof *res, GFP_KERNEL);
3786 spin_lock_irq(&rqp->mcg_spl);
3787 if (find_gid(dev, slave, rqp, gid)) {
3791 memcpy(res->gid, gid, 16);
3794 res->reg_id = reg_id;
3795 list_add_tail(&res->list, &rqp->mcg_list);
3798 spin_unlock_irq(&rqp->mcg_spl);
3803 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3804 u8 *gid, enum mlx4_protocol prot,
3805 enum mlx4_steer_type steer, u64 *reg_id)
3807 struct res_gid *res;
3810 spin_lock_irq(&rqp->mcg_spl);
3811 res = find_gid(dev, slave, rqp, gid);
3812 if (!res || res->prot != prot || res->steer != steer)
3815 *reg_id = res->reg_id;
3816 list_del(&res->list);
3820 spin_unlock_irq(&rqp->mcg_spl);
3825 static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
3826 u8 gid[16], int block_loopback, enum mlx4_protocol prot,
3827 enum mlx4_steer_type type, u64 *reg_id)
3829 switch (dev->caps.steering_mode) {
3830 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
3831 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3834 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
3835 block_loopback, prot,
3838 case MLX4_STEERING_MODE_B0:
3839 if (prot == MLX4_PROT_ETH) {
3840 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3845 return mlx4_qp_attach_common(dev, qp, gid,
3846 block_loopback, prot, type);
3852 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
3853 u8 gid[16], enum mlx4_protocol prot,
3854 enum mlx4_steer_type type, u64 reg_id)
3856 switch (dev->caps.steering_mode) {
3857 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3858 return mlx4_flow_detach(dev, reg_id);
3859 case MLX4_STEERING_MODE_B0:
3860 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3866 static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
3867 u8 *gid, enum mlx4_protocol prot)
3871 if (prot != MLX4_PROT_ETH)
3874 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
3875 dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
3876 real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
3885 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3886 struct mlx4_vhcr *vhcr,
3887 struct mlx4_cmd_mailbox *inbox,
3888 struct mlx4_cmd_mailbox *outbox,
3889 struct mlx4_cmd_info *cmd)
3891 struct mlx4_qp qp; /* dummy for calling attach/detach */
3892 u8 *gid = inbox->buf;
3893 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
3898 int attach = vhcr->op_modifier;
3899 int block_loopback = vhcr->in_modifier >> 31;
3900 u8 steer_type_mask = 2;
3901 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
3903 qpn = vhcr->in_modifier & 0xffffff;
3904 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3910 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
3913 pr_err("Fail to attach rule to qp 0x%x\n", qpn);
3916 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
3920 err = mlx4_adjust_port(dev, slave, gid, prot);
3924 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, ®_id);
3928 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3930 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3933 put_res(dev, slave, qpn, RES_QP);
3937 qp_detach(dev, &qp, gid, prot, type, reg_id);
3939 put_res(dev, slave, qpn, RES_QP);
3944 * MAC validation for Flow Steering rules.
3945 * VF can attach rules only with a mac address which is assigned to it.
3947 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3948 struct list_head *rlist)
3950 struct mac_res *res, *tmp;
3953 /* make sure it isn't multicast or broadcast mac*/
3954 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3955 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3956 list_for_each_entry_safe(res, tmp, rlist, list) {
3957 be_mac = cpu_to_be64(res->mac << 16);
3958 if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
3961 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3962 eth_header->eth.dst_mac, slave);
3969 * In case of missing eth header, append eth header with a MAC address
3970 * assigned to the VF.
3972 static int add_eth_header(struct mlx4_dev *dev, int slave,
3973 struct mlx4_cmd_mailbox *inbox,
3974 struct list_head *rlist, int header_id)
3976 struct mac_res *res, *tmp;
3978 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3979 struct mlx4_net_trans_rule_hw_eth *eth_header;
3980 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3981 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3983 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3985 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3987 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3989 /* Clear a space in the inbox for eth header */
3990 switch (header_id) {
3991 case MLX4_NET_TRANS_RULE_ID_IPV4:
3993 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3994 memmove(ip_header, eth_header,
3995 sizeof(*ip_header) + sizeof(*l4_header));
3997 case MLX4_NET_TRANS_RULE_ID_TCP:
3998 case MLX4_NET_TRANS_RULE_ID_UDP:
3999 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
4001 memmove(l4_header, eth_header, sizeof(*l4_header));
4006 list_for_each_entry_safe(res, tmp, rlist, list) {
4007 if (port == res->port) {
4008 be_mac = cpu_to_be64(res->mac << 16);
4013 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
4018 memset(eth_header, 0, sizeof(*eth_header));
4019 eth_header->size = sizeof(*eth_header) >> 2;
4020 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
4021 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
4022 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
4028 #define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
4029 int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
4030 struct mlx4_vhcr *vhcr,
4031 struct mlx4_cmd_mailbox *inbox,
4032 struct mlx4_cmd_mailbox *outbox,
4033 struct mlx4_cmd_info *cmd_info)
4036 u32 qpn = vhcr->in_modifier & 0xffffff;
4040 u64 pri_addr_path_mask;
4041 struct mlx4_update_qp_context *cmd;
4044 cmd = (struct mlx4_update_qp_context *)inbox->buf;
4046 pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
4047 if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
4048 (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
4051 /* Just change the smac for the QP */
4052 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4054 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
4058 port = (rqp->sched_queue >> 6 & 1) + 1;
4060 if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
4061 smac_index = cmd->qp_context.pri_path.grh_mylmc;
4062 err = mac_find_smac_ix_in_slave(dev, slave, port,
4066 mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4072 err = mlx4_cmd(dev, inbox->dma,
4073 vhcr->in_modifier, 0,
4074 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
4077 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
4082 put_res(dev, slave, qpn, RES_QP);
4086 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4087 struct mlx4_vhcr *vhcr,
4088 struct mlx4_cmd_mailbox *inbox,
4089 struct mlx4_cmd_mailbox *outbox,
4090 struct mlx4_cmd_info *cmd)
4093 struct mlx4_priv *priv = mlx4_priv(dev);
4094 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4095 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
4099 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4100 struct _rule_hw *rule_header;
4103 if (dev->caps.steering_mode !=
4104 MLX4_STEERING_MODE_DEVICE_MANAGED)
4107 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4108 ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port);
4109 if (ctrl->port <= 0)
4111 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
4112 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4114 pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
4117 rule_header = (struct _rule_hw *)(ctrl + 1);
4118 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4120 switch (header_id) {
4121 case MLX4_NET_TRANS_RULE_ID_ETH:
4122 if (validate_eth_header_mac(slave, rule_header, rlist)) {
4127 case MLX4_NET_TRANS_RULE_ID_IB:
4129 case MLX4_NET_TRANS_RULE_ID_IPV4:
4130 case MLX4_NET_TRANS_RULE_ID_TCP:
4131 case MLX4_NET_TRANS_RULE_ID_UDP:
4132 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4133 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4137 vhcr->in_modifier +=
4138 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4141 pr_err("Corrupted mailbox\n");
4146 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4147 vhcr->in_modifier, 0,
4148 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4153 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
4155 mlx4_err(dev, "Fail to add flow steering resources\n");
4157 mlx4_cmd(dev, vhcr->out_param, 0, 0,
4158 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4162 atomic_inc(&rqp->ref_count);
4164 put_res(dev, slave, qpn, RES_QP);
4168 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4169 struct mlx4_vhcr *vhcr,
4170 struct mlx4_cmd_mailbox *inbox,
4171 struct mlx4_cmd_mailbox *outbox,
4172 struct mlx4_cmd_info *cmd)
4176 struct res_fs_rule *rrule;
4178 if (dev->caps.steering_mode !=
4179 MLX4_STEERING_MODE_DEVICE_MANAGED)
4182 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4185 /* Release the rule form busy state before removal */
4186 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4187 err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
4191 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4193 mlx4_err(dev, "Fail to remove flow steering resources\n");
4197 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4198 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4201 atomic_dec(&rqp->ref_count);
4203 put_res(dev, slave, rrule->qpn, RES_QP);
4208 BUSY_MAX_RETRIES = 10
4211 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4212 struct mlx4_vhcr *vhcr,
4213 struct mlx4_cmd_mailbox *inbox,
4214 struct mlx4_cmd_mailbox *outbox,
4215 struct mlx4_cmd_info *cmd)
4218 int index = vhcr->in_modifier & 0xffff;
4220 err = get_res(dev, slave, index, RES_COUNTER, NULL);
4224 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4225 put_res(dev, slave, index, RES_COUNTER);
4229 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4231 struct res_gid *rgid;
4232 struct res_gid *tmp;
4233 struct mlx4_qp qp; /* dummy for calling attach/detach */
4235 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
4236 switch (dev->caps.steering_mode) {
4237 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4238 mlx4_flow_detach(dev, rgid->reg_id);
4240 case MLX4_STEERING_MODE_B0:
4241 qp.qpn = rqp->local_qpn;
4242 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4243 rgid->prot, rgid->steer);
4246 list_del(&rgid->list);
4251 static int _move_all_busy(struct mlx4_dev *dev, int slave,
4252 enum mlx4_resource type, int print)
4254 struct mlx4_priv *priv = mlx4_priv(dev);
4255 struct mlx4_resource_tracker *tracker =
4256 &priv->mfunc.master.res_tracker;
4257 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4258 struct res_common *r;
4259 struct res_common *tmp;
4263 spin_lock_irq(mlx4_tlock(dev));
4264 list_for_each_entry_safe(r, tmp, rlist, list) {
4265 if (r->owner == slave) {
4267 if (r->state == RES_ANY_BUSY) {
4270 "%s id 0x%llx is busy\n",
4275 r->from_state = r->state;
4276 r->state = RES_ANY_BUSY;
4282 spin_unlock_irq(mlx4_tlock(dev));
4287 static int move_all_busy(struct mlx4_dev *dev, int slave,
4288 enum mlx4_resource type)
4290 unsigned long begin;
4295 busy = _move_all_busy(dev, slave, type, 0);
4296 if (time_after(jiffies, begin + 5 * HZ))
4303 busy = _move_all_busy(dev, slave, type, 1);
4307 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4309 struct mlx4_priv *priv = mlx4_priv(dev);
4310 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4311 struct list_head *qp_list =
4312 &tracker->slave_list[slave].res_list[RES_QP];
4320 err = move_all_busy(dev, slave, RES_QP);
4322 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4325 spin_lock_irq(mlx4_tlock(dev));
4326 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4327 spin_unlock_irq(mlx4_tlock(dev));
4328 if (qp->com.owner == slave) {
4329 qpn = qp->com.res_id;
4330 detach_qp(dev, slave, qp);
4331 state = qp->com.from_state;
4332 while (state != 0) {
4334 case RES_QP_RESERVED:
4335 spin_lock_irq(mlx4_tlock(dev));
4336 rb_erase(&qp->com.node,
4337 &tracker->res_tree[RES_QP]);
4338 list_del(&qp->com.list);
4339 spin_unlock_irq(mlx4_tlock(dev));
4340 if (!valid_reserved(dev, slave, qpn)) {
4341 __mlx4_qp_release_range(dev, qpn, 1);
4342 mlx4_release_resource(dev, slave,
4349 if (!valid_reserved(dev, slave, qpn))
4350 __mlx4_qp_free_icm(dev, qpn);
4351 state = RES_QP_RESERVED;
4355 err = mlx4_cmd(dev, in_param,
4358 MLX4_CMD_TIME_CLASS_A,
4361 mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4362 slave, qp->local_qpn);
4363 atomic_dec(&qp->rcq->ref_count);
4364 atomic_dec(&qp->scq->ref_count);
4365 atomic_dec(&qp->mtt->ref_count);
4367 atomic_dec(&qp->srq->ref_count);
4368 state = RES_QP_MAPPED;
4375 spin_lock_irq(mlx4_tlock(dev));
4377 spin_unlock_irq(mlx4_tlock(dev));
4380 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4382 struct mlx4_priv *priv = mlx4_priv(dev);
4383 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4384 struct list_head *srq_list =
4385 &tracker->slave_list[slave].res_list[RES_SRQ];
4386 struct res_srq *srq;
4387 struct res_srq *tmp;
4394 err = move_all_busy(dev, slave, RES_SRQ);
4396 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4399 spin_lock_irq(mlx4_tlock(dev));
4400 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4401 spin_unlock_irq(mlx4_tlock(dev));
4402 if (srq->com.owner == slave) {
4403 srqn = srq->com.res_id;
4404 state = srq->com.from_state;
4405 while (state != 0) {
4407 case RES_SRQ_ALLOCATED:
4408 __mlx4_srq_free_icm(dev, srqn);
4409 spin_lock_irq(mlx4_tlock(dev));
4410 rb_erase(&srq->com.node,
4411 &tracker->res_tree[RES_SRQ]);
4412 list_del(&srq->com.list);
4413 spin_unlock_irq(mlx4_tlock(dev));
4414 mlx4_release_resource(dev, slave,
4422 err = mlx4_cmd(dev, in_param, srqn, 1,
4424 MLX4_CMD_TIME_CLASS_A,
4427 mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4430 atomic_dec(&srq->mtt->ref_count);
4432 atomic_dec(&srq->cq->ref_count);
4433 state = RES_SRQ_ALLOCATED;
4441 spin_lock_irq(mlx4_tlock(dev));
4443 spin_unlock_irq(mlx4_tlock(dev));
4446 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4448 struct mlx4_priv *priv = mlx4_priv(dev);
4449 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4450 struct list_head *cq_list =
4451 &tracker->slave_list[slave].res_list[RES_CQ];
4460 err = move_all_busy(dev, slave, RES_CQ);
4462 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4465 spin_lock_irq(mlx4_tlock(dev));
4466 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4467 spin_unlock_irq(mlx4_tlock(dev));
4468 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4469 cqn = cq->com.res_id;
4470 state = cq->com.from_state;
4471 while (state != 0) {
4473 case RES_CQ_ALLOCATED:
4474 __mlx4_cq_free_icm(dev, cqn);
4475 spin_lock_irq(mlx4_tlock(dev));
4476 rb_erase(&cq->com.node,
4477 &tracker->res_tree[RES_CQ]);
4478 list_del(&cq->com.list);
4479 spin_unlock_irq(mlx4_tlock(dev));
4480 mlx4_release_resource(dev, slave,
4488 err = mlx4_cmd(dev, in_param, cqn, 1,
4490 MLX4_CMD_TIME_CLASS_A,
4493 mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4495 atomic_dec(&cq->mtt->ref_count);
4496 state = RES_CQ_ALLOCATED;
4504 spin_lock_irq(mlx4_tlock(dev));
4506 spin_unlock_irq(mlx4_tlock(dev));
4509 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4511 struct mlx4_priv *priv = mlx4_priv(dev);
4512 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4513 struct list_head *mpt_list =
4514 &tracker->slave_list[slave].res_list[RES_MPT];
4515 struct res_mpt *mpt;
4516 struct res_mpt *tmp;
4523 err = move_all_busy(dev, slave, RES_MPT);
4525 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4528 spin_lock_irq(mlx4_tlock(dev));
4529 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4530 spin_unlock_irq(mlx4_tlock(dev));
4531 if (mpt->com.owner == slave) {
4532 mptn = mpt->com.res_id;
4533 state = mpt->com.from_state;
4534 while (state != 0) {
4536 case RES_MPT_RESERVED:
4537 __mlx4_mpt_release(dev, mpt->key);
4538 spin_lock_irq(mlx4_tlock(dev));
4539 rb_erase(&mpt->com.node,
4540 &tracker->res_tree[RES_MPT]);
4541 list_del(&mpt->com.list);
4542 spin_unlock_irq(mlx4_tlock(dev));
4543 mlx4_release_resource(dev, slave,
4549 case RES_MPT_MAPPED:
4550 __mlx4_mpt_free_icm(dev, mpt->key);
4551 state = RES_MPT_RESERVED;
4556 err = mlx4_cmd(dev, in_param, mptn, 0,
4558 MLX4_CMD_TIME_CLASS_A,
4561 mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4564 atomic_dec(&mpt->mtt->ref_count);
4565 state = RES_MPT_MAPPED;
4572 spin_lock_irq(mlx4_tlock(dev));
4574 spin_unlock_irq(mlx4_tlock(dev));
4577 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4579 struct mlx4_priv *priv = mlx4_priv(dev);
4580 struct mlx4_resource_tracker *tracker =
4581 &priv->mfunc.master.res_tracker;
4582 struct list_head *mtt_list =
4583 &tracker->slave_list[slave].res_list[RES_MTT];
4584 struct res_mtt *mtt;
4585 struct res_mtt *tmp;
4591 err = move_all_busy(dev, slave, RES_MTT);
4593 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
4596 spin_lock_irq(mlx4_tlock(dev));
4597 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4598 spin_unlock_irq(mlx4_tlock(dev));
4599 if (mtt->com.owner == slave) {
4600 base = mtt->com.res_id;
4601 state = mtt->com.from_state;
4602 while (state != 0) {
4604 case RES_MTT_ALLOCATED:
4605 __mlx4_free_mtt_range(dev, base,
4607 spin_lock_irq(mlx4_tlock(dev));
4608 rb_erase(&mtt->com.node,
4609 &tracker->res_tree[RES_MTT]);
4610 list_del(&mtt->com.list);
4611 spin_unlock_irq(mlx4_tlock(dev));
4612 mlx4_release_resource(dev, slave, RES_MTT,
4613 1 << mtt->order, 0);
4623 spin_lock_irq(mlx4_tlock(dev));
4625 spin_unlock_irq(mlx4_tlock(dev));
4628 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
4630 struct mlx4_priv *priv = mlx4_priv(dev);
4631 struct mlx4_resource_tracker *tracker =
4632 &priv->mfunc.master.res_tracker;
4633 struct list_head *fs_rule_list =
4634 &tracker->slave_list[slave].res_list[RES_FS_RULE];
4635 struct res_fs_rule *fs_rule;
4636 struct res_fs_rule *tmp;
4641 err = move_all_busy(dev, slave, RES_FS_RULE);
4643 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4646 spin_lock_irq(mlx4_tlock(dev));
4647 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
4648 spin_unlock_irq(mlx4_tlock(dev));
4649 if (fs_rule->com.owner == slave) {
4650 base = fs_rule->com.res_id;
4651 state = fs_rule->com.from_state;
4652 while (state != 0) {
4654 case RES_FS_RULE_ALLOCATED:
4656 err = mlx4_cmd(dev, base, 0, 0,
4657 MLX4_QP_FLOW_STEERING_DETACH,
4658 MLX4_CMD_TIME_CLASS_A,
4661 spin_lock_irq(mlx4_tlock(dev));
4662 rb_erase(&fs_rule->com.node,
4663 &tracker->res_tree[RES_FS_RULE]);
4664 list_del(&fs_rule->com.list);
4665 spin_unlock_irq(mlx4_tlock(dev));
4675 spin_lock_irq(mlx4_tlock(dev));
4677 spin_unlock_irq(mlx4_tlock(dev));
4680 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4682 struct mlx4_priv *priv = mlx4_priv(dev);
4683 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4684 struct list_head *eq_list =
4685 &tracker->slave_list[slave].res_list[RES_EQ];
4693 err = move_all_busy(dev, slave, RES_EQ);
4695 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
4698 spin_lock_irq(mlx4_tlock(dev));
4699 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
4700 spin_unlock_irq(mlx4_tlock(dev));
4701 if (eq->com.owner == slave) {
4702 eqn = eq->com.res_id;
4703 state = eq->com.from_state;
4704 while (state != 0) {
4706 case RES_EQ_RESERVED:
4707 spin_lock_irq(mlx4_tlock(dev));
4708 rb_erase(&eq->com.node,
4709 &tracker->res_tree[RES_EQ]);
4710 list_del(&eq->com.list);
4711 spin_unlock_irq(mlx4_tlock(dev));
4717 err = mlx4_cmd(dev, slave, eqn & 0x3ff,
4718 1, MLX4_CMD_HW2SW_EQ,
4719 MLX4_CMD_TIME_CLASS_A,
4722 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
4723 slave, eqn & 0x3ff);
4724 atomic_dec(&eq->mtt->ref_count);
4725 state = RES_EQ_RESERVED;
4733 spin_lock_irq(mlx4_tlock(dev));
4735 spin_unlock_irq(mlx4_tlock(dev));
4738 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4740 struct mlx4_priv *priv = mlx4_priv(dev);
4741 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4742 struct list_head *counter_list =
4743 &tracker->slave_list[slave].res_list[RES_COUNTER];
4744 struct res_counter *counter;
4745 struct res_counter *tmp;
4749 err = move_all_busy(dev, slave, RES_COUNTER);
4751 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
4754 spin_lock_irq(mlx4_tlock(dev));
4755 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4756 if (counter->com.owner == slave) {
4757 index = counter->com.res_id;
4758 rb_erase(&counter->com.node,
4759 &tracker->res_tree[RES_COUNTER]);
4760 list_del(&counter->com.list);
4762 __mlx4_counter_free(dev, index);
4763 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
4766 spin_unlock_irq(mlx4_tlock(dev));
4769 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4771 struct mlx4_priv *priv = mlx4_priv(dev);
4772 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4773 struct list_head *xrcdn_list =
4774 &tracker->slave_list[slave].res_list[RES_XRCD];
4775 struct res_xrcdn *xrcd;
4776 struct res_xrcdn *tmp;
4780 err = move_all_busy(dev, slave, RES_XRCD);
4782 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
4785 spin_lock_irq(mlx4_tlock(dev));
4786 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
4787 if (xrcd->com.owner == slave) {
4788 xrcdn = xrcd->com.res_id;
4789 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
4790 list_del(&xrcd->com.list);
4792 __mlx4_xrcd_free(dev, xrcdn);
4795 spin_unlock_irq(mlx4_tlock(dev));
4798 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4800 struct mlx4_priv *priv = mlx4_priv(dev);
4801 mlx4_reset_roce_gids(dev, slave);
4802 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4803 rem_slave_vlans(dev, slave);
4804 rem_slave_macs(dev, slave);
4805 rem_slave_fs_rule(dev, slave);
4806 rem_slave_qps(dev, slave);
4807 rem_slave_srqs(dev, slave);
4808 rem_slave_cqs(dev, slave);
4809 rem_slave_mrs(dev, slave);
4810 rem_slave_eqs(dev, slave);
4811 rem_slave_mtts(dev, slave);
4812 rem_slave_counters(dev, slave);
4813 rem_slave_xrcdns(dev, slave);
4814 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4817 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4819 struct mlx4_vf_immed_vlan_work *work =
4820 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
4821 struct mlx4_cmd_mailbox *mailbox;
4822 struct mlx4_update_qp_context *upd_context;
4823 struct mlx4_dev *dev = &work->priv->dev;
4824 struct mlx4_resource_tracker *tracker =
4825 &work->priv->mfunc.master.res_tracker;
4826 struct list_head *qp_list =
4827 &tracker->slave_list[work->slave].res_list[RES_QP];
4830 u64 qp_path_mask_vlan_ctrl =
4831 ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
4832 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
4833 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
4834 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
4835 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
4836 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
4838 u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
4839 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
4840 (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
4841 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
4842 (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
4843 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
4844 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
4847 int port, errors = 0;
4850 if (mlx4_is_slave(dev)) {
4851 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
4856 mailbox = mlx4_alloc_cmd_mailbox(dev);
4857 if (IS_ERR(mailbox))
4859 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
4860 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4861 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
4862 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
4863 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4864 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
4865 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4866 else if (!work->vlan_id)
4867 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4868 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4870 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4871 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4872 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4874 upd_context = mailbox->buf;
4875 upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
4877 spin_lock_irq(mlx4_tlock(dev));
4878 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4879 spin_unlock_irq(mlx4_tlock(dev));
4880 if (qp->com.owner == work->slave) {
4881 if (qp->com.from_state != RES_QP_HW ||
4882 !qp->sched_queue || /* no INIT2RTR trans yet */
4883 mlx4_is_qp_reserved(dev, qp->local_qpn) ||
4884 qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
4885 spin_lock_irq(mlx4_tlock(dev));
4888 port = (qp->sched_queue >> 6 & 1) + 1;
4889 if (port != work->port) {
4890 spin_lock_irq(mlx4_tlock(dev));
4893 if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
4894 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
4896 upd_context->primary_addr_path_mask =
4897 cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
4898 if (work->vlan_id == MLX4_VGT) {
4899 upd_context->qp_context.param3 = qp->param3;
4900 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
4901 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
4902 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
4903 upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
4904 upd_context->qp_context.pri_path.feup = qp->feup;
4905 upd_context->qp_context.pri_path.sched_queue =
4908 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
4909 upd_context->qp_context.pri_path.vlan_control = vlan_control;
4910 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4911 upd_context->qp_context.pri_path.fvl_rx =
4912 qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
4913 upd_context->qp_context.pri_path.fl =
4914 qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
4915 upd_context->qp_context.pri_path.feup =
4916 qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
4917 upd_context->qp_context.pri_path.sched_queue =
4918 qp->sched_queue & 0xC7;
4919 upd_context->qp_context.pri_path.sched_queue |=
4920 ((work->qos & 0x7) << 3);
4921 upd_context->qp_mask |=
4923 MLX4_UPD_QP_MASK_QOS_VPP);
4924 upd_context->qp_context.qos_vport =
4928 err = mlx4_cmd(dev, mailbox->dma,
4929 qp->local_qpn & 0xffffff,
4930 0, MLX4_CMD_UPDATE_QP,
4931 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4933 mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
4934 work->slave, port, qp->local_qpn, err);
4938 spin_lock_irq(mlx4_tlock(dev));
4940 spin_unlock_irq(mlx4_tlock(dev));
4941 mlx4_free_cmd_mailbox(dev, mailbox);
4944 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
4945 errors, work->slave, work->port);
4947 /* unregister previous vlan_id if needed and we had no errors
4948 * while updating the QPs
4950 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
4951 NO_INDX != work->orig_vlan_ix)
4952 __mlx4_unregister_vlan(&work->priv->dev, work->port,
4953 work->orig_vlan_id);