2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
50 #define MLX4_MAC_VALID (1ull << 63)
53 struct list_head list;
61 struct list_head list;
69 struct list_head list;
84 struct list_head list;
86 enum mlx4_protocol prot;
87 enum mlx4_steer_type steer;
92 RES_QP_BUSY = RES_ANY_BUSY,
94 /* QP number was allocated */
97 /* ICM memory for QP context was mapped */
100 /* QP is in hw ownership */
105 struct res_common com;
110 struct list_head mcg_list;
115 /* saved qp params before VST enforcement in order to restore on VGT */
125 enum res_mtt_states {
126 RES_MTT_BUSY = RES_ANY_BUSY,
130 static inline const char *mtt_states_str(enum res_mtt_states state)
133 case RES_MTT_BUSY: return "RES_MTT_BUSY";
134 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
135 default: return "Unknown";
140 struct res_common com;
145 enum res_mpt_states {
146 RES_MPT_BUSY = RES_ANY_BUSY,
153 struct res_common com;
159 RES_EQ_BUSY = RES_ANY_BUSY,
165 struct res_common com;
170 RES_CQ_BUSY = RES_ANY_BUSY,
176 struct res_common com;
181 enum res_srq_states {
182 RES_SRQ_BUSY = RES_ANY_BUSY,
188 struct res_common com;
194 enum res_counter_states {
195 RES_COUNTER_BUSY = RES_ANY_BUSY,
196 RES_COUNTER_ALLOCATED,
200 struct res_common com;
204 enum res_xrcdn_states {
205 RES_XRCD_BUSY = RES_ANY_BUSY,
210 struct res_common com;
214 enum res_fs_rule_states {
215 RES_FS_RULE_BUSY = RES_ANY_BUSY,
216 RES_FS_RULE_ALLOCATED,
220 struct res_common com;
224 static int mlx4_is_eth(struct mlx4_dev *dev, int port)
226 return dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB ? 0 : 1;
229 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
231 struct rb_node *node = root->rb_node;
234 struct res_common *res = container_of(node, struct res_common,
237 if (res_id < res->res_id)
238 node = node->rb_left;
239 else if (res_id > res->res_id)
240 node = node->rb_right;
247 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
249 struct rb_node **new = &(root->rb_node), *parent = NULL;
251 /* Figure out where to put new node */
253 struct res_common *this = container_of(*new, struct res_common,
257 if (res->res_id < this->res_id)
258 new = &((*new)->rb_left);
259 else if (res->res_id > this->res_id)
260 new = &((*new)->rb_right);
265 /* Add new node and rebalance tree. */
266 rb_link_node(&res->node, parent, new);
267 rb_insert_color(&res->node, root);
282 static const char *resource_str(enum mlx4_resource rt)
285 case RES_QP: return "RES_QP";
286 case RES_CQ: return "RES_CQ";
287 case RES_SRQ: return "RES_SRQ";
288 case RES_MPT: return "RES_MPT";
289 case RES_MTT: return "RES_MTT";
290 case RES_MAC: return "RES_MAC";
291 case RES_VLAN: return "RES_VLAN";
292 case RES_EQ: return "RES_EQ";
293 case RES_COUNTER: return "RES_COUNTER";
294 case RES_FS_RULE: return "RES_FS_RULE";
295 case RES_XRCD: return "RES_XRCD";
296 default: return "Unknown resource type !!!";
300 static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
301 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
302 enum mlx4_resource res_type, int count,
305 struct mlx4_priv *priv = mlx4_priv(dev);
306 struct resource_allocator *res_alloc =
307 &priv->mfunc.master.res_tracker.res_alloc[res_type];
309 int allocated, free, reserved, guaranteed, from_free;
312 if (slave > dev->num_vfs)
315 spin_lock(&res_alloc->alloc_lock);
316 allocated = (port > 0) ?
317 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
318 res_alloc->allocated[slave];
319 free = (port > 0) ? res_alloc->res_port_free[port - 1] :
321 reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
322 res_alloc->res_reserved;
323 guaranteed = res_alloc->guaranteed[slave];
325 if (allocated + count > res_alloc->quota[slave]) {
326 mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
327 slave, port, resource_str(res_type), count,
328 allocated, res_alloc->quota[slave]);
332 if (allocated + count <= guaranteed) {
336 /* portion may need to be obtained from free area */
337 if (guaranteed - allocated > 0)
338 from_free = count - (guaranteed - allocated);
342 from_rsvd = count - from_free;
344 if (free - from_free >= reserved)
347 mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
348 slave, port, resource_str(res_type), free,
349 from_free, reserved);
353 /* grant the request */
355 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count;
356 res_alloc->res_port_free[port - 1] -= count;
357 res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
359 res_alloc->allocated[slave] += count;
360 res_alloc->res_free -= count;
361 res_alloc->res_reserved -= from_rsvd;
366 spin_unlock(&res_alloc->alloc_lock);
370 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
371 enum mlx4_resource res_type, int count,
374 struct mlx4_priv *priv = mlx4_priv(dev);
375 struct resource_allocator *res_alloc =
376 &priv->mfunc.master.res_tracker.res_alloc[res_type];
377 int allocated, guaranteed, from_rsvd;
379 if (slave > dev->num_vfs)
382 spin_lock(&res_alloc->alloc_lock);
384 allocated = (port > 0) ?
385 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
386 res_alloc->allocated[slave];
387 guaranteed = res_alloc->guaranteed[slave];
389 if (allocated - count >= guaranteed) {
392 /* portion may need to be returned to reserved area */
393 if (allocated - guaranteed > 0)
394 from_rsvd = count - (allocated - guaranteed);
400 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count;
401 res_alloc->res_port_free[port - 1] += count;
402 res_alloc->res_port_rsvd[port - 1] += from_rsvd;
404 res_alloc->allocated[slave] -= count;
405 res_alloc->res_free += count;
406 res_alloc->res_reserved += from_rsvd;
409 spin_unlock(&res_alloc->alloc_lock);
413 static inline void initialize_res_quotas(struct mlx4_dev *dev,
414 struct resource_allocator *res_alloc,
415 enum mlx4_resource res_type,
416 int vf, int num_instances)
418 res_alloc->guaranteed[vf] = num_instances / (2 * (dev->num_vfs + 1));
419 res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
420 if (vf == mlx4_master_func_num(dev)) {
421 res_alloc->res_free = num_instances;
422 if (res_type == RES_MTT) {
423 /* reserved mtts will be taken out of the PF allocation */
424 res_alloc->res_free += dev->caps.reserved_mtts;
425 res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
426 res_alloc->quota[vf] += dev->caps.reserved_mtts;
431 void mlx4_init_quotas(struct mlx4_dev *dev)
433 struct mlx4_priv *priv = mlx4_priv(dev);
436 /* quotas for VFs are initialized in mlx4_slave_cap */
437 if (mlx4_is_slave(dev))
440 if (!mlx4_is_mfunc(dev)) {
441 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
442 mlx4_num_reserved_sqps(dev);
443 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
444 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
445 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
446 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
450 pf = mlx4_master_func_num(dev);
452 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
454 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
456 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
458 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
460 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
462 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
464 struct mlx4_priv *priv = mlx4_priv(dev);
468 priv->mfunc.master.res_tracker.slave_list =
469 kzalloc(dev->num_slaves * sizeof(struct slave_list),
471 if (!priv->mfunc.master.res_tracker.slave_list)
474 for (i = 0 ; i < dev->num_slaves; i++) {
475 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
476 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
477 slave_list[i].res_list[t]);
478 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
481 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
483 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
484 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
486 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
487 struct resource_allocator *res_alloc =
488 &priv->mfunc.master.res_tracker.res_alloc[i];
489 res_alloc->quota = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
490 res_alloc->guaranteed = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
491 if (i == RES_MAC || i == RES_VLAN)
492 res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
493 (dev->num_vfs + 1) * sizeof(int),
496 res_alloc->allocated = kzalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
498 if (!res_alloc->quota || !res_alloc->guaranteed ||
499 !res_alloc->allocated)
502 spin_lock_init(&res_alloc->alloc_lock);
503 for (t = 0; t < dev->num_vfs + 1; t++) {
504 struct mlx4_active_ports actv_ports =
505 mlx4_get_active_ports(dev, t);
508 initialize_res_quotas(dev, res_alloc, RES_QP,
509 t, dev->caps.num_qps -
510 dev->caps.reserved_qps -
511 mlx4_num_reserved_sqps(dev));
514 initialize_res_quotas(dev, res_alloc, RES_CQ,
515 t, dev->caps.num_cqs -
516 dev->caps.reserved_cqs);
519 initialize_res_quotas(dev, res_alloc, RES_SRQ,
520 t, dev->caps.num_srqs -
521 dev->caps.reserved_srqs);
524 initialize_res_quotas(dev, res_alloc, RES_MPT,
525 t, dev->caps.num_mpts -
526 dev->caps.reserved_mrws);
529 initialize_res_quotas(dev, res_alloc, RES_MTT,
530 t, dev->caps.num_mtts -
531 dev->caps.reserved_mtts);
534 if (t == mlx4_master_func_num(dev)) {
535 int max_vfs_pport = 0;
536 /* Calculate the max vfs per port for */
538 for (j = 0; j < dev->caps.num_ports;
540 struct mlx4_slaves_pport slaves_pport =
541 mlx4_phys_to_slaves_pport(dev, j + 1);
542 unsigned current_slaves =
543 bitmap_weight(slaves_pport.slaves,
544 dev->caps.num_ports) - 1;
545 if (max_vfs_pport < current_slaves)
549 res_alloc->quota[t] =
552 res_alloc->guaranteed[t] = 2;
553 for (j = 0; j < MLX4_MAX_PORTS; j++)
554 res_alloc->res_port_free[j] =
557 res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
558 res_alloc->guaranteed[t] = 2;
562 if (t == mlx4_master_func_num(dev)) {
563 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
564 res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
565 for (j = 0; j < MLX4_MAX_PORTS; j++)
566 res_alloc->res_port_free[j] =
569 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
570 res_alloc->guaranteed[t] = 0;
574 res_alloc->quota[t] = dev->caps.max_counters;
575 res_alloc->guaranteed[t] = 0;
576 if (t == mlx4_master_func_num(dev))
577 res_alloc->res_free = res_alloc->quota[t];
582 if (i == RES_MAC || i == RES_VLAN) {
583 for (j = 0; j < dev->caps.num_ports; j++)
584 if (test_bit(j, actv_ports.ports))
585 res_alloc->res_port_rsvd[j] +=
586 res_alloc->guaranteed[t];
588 res_alloc->res_reserved += res_alloc->guaranteed[t];
592 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
596 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
597 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
598 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
599 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
600 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
601 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
602 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
607 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
608 enum mlx4_res_tracker_free_type type)
610 struct mlx4_priv *priv = mlx4_priv(dev);
613 if (priv->mfunc.master.res_tracker.slave_list) {
614 if (type != RES_TR_FREE_STRUCTS_ONLY) {
615 for (i = 0; i < dev->num_slaves; i++) {
616 if (type == RES_TR_FREE_ALL ||
617 dev->caps.function != i)
618 mlx4_delete_all_resources_for_slave(dev, i);
620 /* free master's vlans */
621 i = dev->caps.function;
622 mlx4_reset_roce_gids(dev, i);
623 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
624 rem_slave_vlans(dev, i);
625 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
628 if (type != RES_TR_FREE_SLAVES_ONLY) {
629 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
630 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
631 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
632 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
633 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
634 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
635 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
637 kfree(priv->mfunc.master.res_tracker.slave_list);
638 priv->mfunc.master.res_tracker.slave_list = NULL;
643 static void update_pkey_index(struct mlx4_dev *dev, int slave,
644 struct mlx4_cmd_mailbox *inbox)
646 u8 sched = *(u8 *)(inbox->buf + 64);
647 u8 orig_index = *(u8 *)(inbox->buf + 35);
649 struct mlx4_priv *priv = mlx4_priv(dev);
652 port = (sched >> 6 & 1) + 1;
654 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
655 *(u8 *)(inbox->buf + 35) = new_index;
658 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
661 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
662 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
663 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
666 if (MLX4_QP_ST_UD == ts) {
667 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
668 if (mlx4_is_eth(dev, port))
669 qp_ctx->pri_path.mgid_index =
670 mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
672 qp_ctx->pri_path.mgid_index = slave | 0x80;
674 } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
675 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
676 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
677 if (mlx4_is_eth(dev, port)) {
678 qp_ctx->pri_path.mgid_index +=
679 mlx4_get_base_gid_ix(dev, slave, port);
680 qp_ctx->pri_path.mgid_index &= 0x7f;
682 qp_ctx->pri_path.mgid_index = slave & 0x7F;
685 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
686 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
687 if (mlx4_is_eth(dev, port)) {
688 qp_ctx->alt_path.mgid_index +=
689 mlx4_get_base_gid_ix(dev, slave, port);
690 qp_ctx->alt_path.mgid_index &= 0x7f;
692 qp_ctx->alt_path.mgid_index = slave & 0x7F;
698 static int update_vport_qp_param(struct mlx4_dev *dev,
699 struct mlx4_cmd_mailbox *inbox,
702 struct mlx4_qp_context *qpc = inbox->buf + 8;
703 struct mlx4_vport_oper_state *vp_oper;
704 struct mlx4_priv *priv;
707 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
708 priv = mlx4_priv(dev);
709 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
711 if (MLX4_VGT != vp_oper->state.default_vlan) {
712 /* the reserved QPs (special, proxy, tunnel)
713 * do not operate over vlans
715 if (mlx4_is_qp_reserved(dev, qpn))
718 /* force strip vlan by clear vsd */
719 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
721 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
722 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
723 qpc->pri_path.vlan_control =
724 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
725 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
726 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
727 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
728 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
729 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
730 } else if (0 != vp_oper->state.default_vlan) {
731 qpc->pri_path.vlan_control =
732 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
733 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
734 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
735 } else { /* priority tagged */
736 qpc->pri_path.vlan_control =
737 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
738 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
741 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
742 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
743 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
744 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
745 qpc->pri_path.sched_queue &= 0xC7;
746 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
748 if (vp_oper->state.spoofchk) {
749 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
750 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
755 static int mpt_mask(struct mlx4_dev *dev)
757 return dev->caps.num_mpts - 1;
760 static void *find_res(struct mlx4_dev *dev, u64 res_id,
761 enum mlx4_resource type)
763 struct mlx4_priv *priv = mlx4_priv(dev);
765 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
769 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
770 enum mlx4_resource type,
773 struct res_common *r;
776 spin_lock_irq(mlx4_tlock(dev));
777 r = find_res(dev, res_id, type);
783 if (r->state == RES_ANY_BUSY) {
788 if (r->owner != slave) {
793 r->from_state = r->state;
794 r->state = RES_ANY_BUSY;
797 *((struct res_common **)res) = r;
800 spin_unlock_irq(mlx4_tlock(dev));
804 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
805 enum mlx4_resource type,
806 u64 res_id, int *slave)
809 struct res_common *r;
815 spin_lock(mlx4_tlock(dev));
817 r = find_res(dev, id, type);
822 spin_unlock(mlx4_tlock(dev));
827 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
828 enum mlx4_resource type)
830 struct res_common *r;
832 spin_lock_irq(mlx4_tlock(dev));
833 r = find_res(dev, res_id, type);
835 r->state = r->from_state;
836 spin_unlock_irq(mlx4_tlock(dev));
839 static struct res_common *alloc_qp_tr(int id)
843 ret = kzalloc(sizeof *ret, GFP_KERNEL);
847 ret->com.res_id = id;
848 ret->com.state = RES_QP_RESERVED;
850 INIT_LIST_HEAD(&ret->mcg_list);
851 spin_lock_init(&ret->mcg_spl);
852 atomic_set(&ret->ref_count, 0);
857 static struct res_common *alloc_mtt_tr(int id, int order)
861 ret = kzalloc(sizeof *ret, GFP_KERNEL);
865 ret->com.res_id = id;
867 ret->com.state = RES_MTT_ALLOCATED;
868 atomic_set(&ret->ref_count, 0);
873 static struct res_common *alloc_mpt_tr(int id, int key)
877 ret = kzalloc(sizeof *ret, GFP_KERNEL);
881 ret->com.res_id = id;
882 ret->com.state = RES_MPT_RESERVED;
888 static struct res_common *alloc_eq_tr(int id)
892 ret = kzalloc(sizeof *ret, GFP_KERNEL);
896 ret->com.res_id = id;
897 ret->com.state = RES_EQ_RESERVED;
902 static struct res_common *alloc_cq_tr(int id)
906 ret = kzalloc(sizeof *ret, GFP_KERNEL);
910 ret->com.res_id = id;
911 ret->com.state = RES_CQ_ALLOCATED;
912 atomic_set(&ret->ref_count, 0);
917 static struct res_common *alloc_srq_tr(int id)
921 ret = kzalloc(sizeof *ret, GFP_KERNEL);
925 ret->com.res_id = id;
926 ret->com.state = RES_SRQ_ALLOCATED;
927 atomic_set(&ret->ref_count, 0);
932 static struct res_common *alloc_counter_tr(int id)
934 struct res_counter *ret;
936 ret = kzalloc(sizeof *ret, GFP_KERNEL);
940 ret->com.res_id = id;
941 ret->com.state = RES_COUNTER_ALLOCATED;
946 static struct res_common *alloc_xrcdn_tr(int id)
948 struct res_xrcdn *ret;
950 ret = kzalloc(sizeof *ret, GFP_KERNEL);
954 ret->com.res_id = id;
955 ret->com.state = RES_XRCD_ALLOCATED;
960 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
962 struct res_fs_rule *ret;
964 ret = kzalloc(sizeof *ret, GFP_KERNEL);
968 ret->com.res_id = id;
969 ret->com.state = RES_FS_RULE_ALLOCATED;
974 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
977 struct res_common *ret;
981 ret = alloc_qp_tr(id);
984 ret = alloc_mpt_tr(id, extra);
987 ret = alloc_mtt_tr(id, extra);
990 ret = alloc_eq_tr(id);
993 ret = alloc_cq_tr(id);
996 ret = alloc_srq_tr(id);
999 pr_err("implementation missing\n");
1002 ret = alloc_counter_tr(id);
1005 ret = alloc_xrcdn_tr(id);
1008 ret = alloc_fs_rule_tr(id, extra);
1019 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1020 enum mlx4_resource type, int extra)
1024 struct mlx4_priv *priv = mlx4_priv(dev);
1025 struct res_common **res_arr;
1026 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1027 struct rb_root *root = &tracker->res_tree[type];
1029 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
1033 for (i = 0; i < count; ++i) {
1034 res_arr[i] = alloc_tr(base + i, type, slave, extra);
1036 for (--i; i >= 0; --i)
1044 spin_lock_irq(mlx4_tlock(dev));
1045 for (i = 0; i < count; ++i) {
1046 if (find_res(dev, base + i, type)) {
1050 err = res_tracker_insert(root, res_arr[i]);
1053 list_add_tail(&res_arr[i]->list,
1054 &tracker->slave_list[slave].res_list[type]);
1056 spin_unlock_irq(mlx4_tlock(dev));
1062 for (--i; i >= base; --i)
1063 rb_erase(&res_arr[i]->node, root);
1065 spin_unlock_irq(mlx4_tlock(dev));
1067 for (i = 0; i < count; ++i)
1075 static int remove_qp_ok(struct res_qp *res)
1077 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1078 !list_empty(&res->mcg_list)) {
1079 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1080 res->com.state, atomic_read(&res->ref_count));
1082 } else if (res->com.state != RES_QP_RESERVED) {
1089 static int remove_mtt_ok(struct res_mtt *res, int order)
1091 if (res->com.state == RES_MTT_BUSY ||
1092 atomic_read(&res->ref_count)) {
1093 pr_devel("%s-%d: state %s, ref_count %d\n",
1095 mtt_states_str(res->com.state),
1096 atomic_read(&res->ref_count));
1098 } else if (res->com.state != RES_MTT_ALLOCATED)
1100 else if (res->order != order)
1106 static int remove_mpt_ok(struct res_mpt *res)
1108 if (res->com.state == RES_MPT_BUSY)
1110 else if (res->com.state != RES_MPT_RESERVED)
1116 static int remove_eq_ok(struct res_eq *res)
1118 if (res->com.state == RES_MPT_BUSY)
1120 else if (res->com.state != RES_MPT_RESERVED)
1126 static int remove_counter_ok(struct res_counter *res)
1128 if (res->com.state == RES_COUNTER_BUSY)
1130 else if (res->com.state != RES_COUNTER_ALLOCATED)
1136 static int remove_xrcdn_ok(struct res_xrcdn *res)
1138 if (res->com.state == RES_XRCD_BUSY)
1140 else if (res->com.state != RES_XRCD_ALLOCATED)
1146 static int remove_fs_rule_ok(struct res_fs_rule *res)
1148 if (res->com.state == RES_FS_RULE_BUSY)
1150 else if (res->com.state != RES_FS_RULE_ALLOCATED)
1156 static int remove_cq_ok(struct res_cq *res)
1158 if (res->com.state == RES_CQ_BUSY)
1160 else if (res->com.state != RES_CQ_ALLOCATED)
1166 static int remove_srq_ok(struct res_srq *res)
1168 if (res->com.state == RES_SRQ_BUSY)
1170 else if (res->com.state != RES_SRQ_ALLOCATED)
1176 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1180 return remove_qp_ok((struct res_qp *)res);
1182 return remove_cq_ok((struct res_cq *)res);
1184 return remove_srq_ok((struct res_srq *)res);
1186 return remove_mpt_ok((struct res_mpt *)res);
1188 return remove_mtt_ok((struct res_mtt *)res, extra);
1192 return remove_eq_ok((struct res_eq *)res);
1194 return remove_counter_ok((struct res_counter *)res);
1196 return remove_xrcdn_ok((struct res_xrcdn *)res);
1198 return remove_fs_rule_ok((struct res_fs_rule *)res);
1204 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1205 enum mlx4_resource type, int extra)
1209 struct mlx4_priv *priv = mlx4_priv(dev);
1210 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1211 struct res_common *r;
1213 spin_lock_irq(mlx4_tlock(dev));
1214 for (i = base; i < base + count; ++i) {
1215 r = res_tracker_lookup(&tracker->res_tree[type], i);
1220 if (r->owner != slave) {
1224 err = remove_ok(r, type, extra);
1229 for (i = base; i < base + count; ++i) {
1230 r = res_tracker_lookup(&tracker->res_tree[type], i);
1231 rb_erase(&r->node, &tracker->res_tree[type]);
1238 spin_unlock_irq(mlx4_tlock(dev));
1243 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1244 enum res_qp_states state, struct res_qp **qp,
1247 struct mlx4_priv *priv = mlx4_priv(dev);
1248 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1252 spin_lock_irq(mlx4_tlock(dev));
1253 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1256 else if (r->com.owner != slave)
1261 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1262 __func__, r->com.res_id);
1266 case RES_QP_RESERVED:
1267 if (r->com.state == RES_QP_MAPPED && !alloc)
1270 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1275 if ((r->com.state == RES_QP_RESERVED && alloc) ||
1276 r->com.state == RES_QP_HW)
1279 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1287 if (r->com.state != RES_QP_MAPPED)
1295 r->com.from_state = r->com.state;
1296 r->com.to_state = state;
1297 r->com.state = RES_QP_BUSY;
1303 spin_unlock_irq(mlx4_tlock(dev));
1308 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1309 enum res_mpt_states state, struct res_mpt **mpt)
1311 struct mlx4_priv *priv = mlx4_priv(dev);
1312 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1316 spin_lock_irq(mlx4_tlock(dev));
1317 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1320 else if (r->com.owner != slave)
1328 case RES_MPT_RESERVED:
1329 if (r->com.state != RES_MPT_MAPPED)
1333 case RES_MPT_MAPPED:
1334 if (r->com.state != RES_MPT_RESERVED &&
1335 r->com.state != RES_MPT_HW)
1340 if (r->com.state != RES_MPT_MAPPED)
1348 r->com.from_state = r->com.state;
1349 r->com.to_state = state;
1350 r->com.state = RES_MPT_BUSY;
1356 spin_unlock_irq(mlx4_tlock(dev));
1361 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1362 enum res_eq_states state, struct res_eq **eq)
1364 struct mlx4_priv *priv = mlx4_priv(dev);
1365 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1369 spin_lock_irq(mlx4_tlock(dev));
1370 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1373 else if (r->com.owner != slave)
1381 case RES_EQ_RESERVED:
1382 if (r->com.state != RES_EQ_HW)
1387 if (r->com.state != RES_EQ_RESERVED)
1396 r->com.from_state = r->com.state;
1397 r->com.to_state = state;
1398 r->com.state = RES_EQ_BUSY;
1404 spin_unlock_irq(mlx4_tlock(dev));
1409 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1410 enum res_cq_states state, struct res_cq **cq)
1412 struct mlx4_priv *priv = mlx4_priv(dev);
1413 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1417 spin_lock_irq(mlx4_tlock(dev));
1418 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1421 } else if (r->com.owner != slave) {
1423 } else if (state == RES_CQ_ALLOCATED) {
1424 if (r->com.state != RES_CQ_HW)
1426 else if (atomic_read(&r->ref_count))
1430 } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1437 r->com.from_state = r->com.state;
1438 r->com.to_state = state;
1439 r->com.state = RES_CQ_BUSY;
1444 spin_unlock_irq(mlx4_tlock(dev));
1449 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1450 enum res_srq_states state, struct res_srq **srq)
1452 struct mlx4_priv *priv = mlx4_priv(dev);
1453 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1457 spin_lock_irq(mlx4_tlock(dev));
1458 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1461 } else if (r->com.owner != slave) {
1463 } else if (state == RES_SRQ_ALLOCATED) {
1464 if (r->com.state != RES_SRQ_HW)
1466 else if (atomic_read(&r->ref_count))
1468 } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1473 r->com.from_state = r->com.state;
1474 r->com.to_state = state;
1475 r->com.state = RES_SRQ_BUSY;
1480 spin_unlock_irq(mlx4_tlock(dev));
1485 static void res_abort_move(struct mlx4_dev *dev, int slave,
1486 enum mlx4_resource type, int id)
1488 struct mlx4_priv *priv = mlx4_priv(dev);
1489 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1490 struct res_common *r;
1492 spin_lock_irq(mlx4_tlock(dev));
1493 r = res_tracker_lookup(&tracker->res_tree[type], id);
1494 if (r && (r->owner == slave))
1495 r->state = r->from_state;
1496 spin_unlock_irq(mlx4_tlock(dev));
1499 static void res_end_move(struct mlx4_dev *dev, int slave,
1500 enum mlx4_resource type, int id)
1502 struct mlx4_priv *priv = mlx4_priv(dev);
1503 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1504 struct res_common *r;
1506 spin_lock_irq(mlx4_tlock(dev));
1507 r = res_tracker_lookup(&tracker->res_tree[type], id);
1508 if (r && (r->owner == slave))
1509 r->state = r->to_state;
1510 spin_unlock_irq(mlx4_tlock(dev));
1513 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1515 return mlx4_is_qp_reserved(dev, qpn) &&
1516 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1519 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1521 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1524 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1525 u64 in_param, u64 *out_param)
1534 case RES_OP_RESERVE:
1535 count = get_param_l(&in_param);
1536 align = get_param_h(&in_param);
1537 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1541 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1543 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1547 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1549 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1550 __mlx4_qp_release_range(dev, base, count);
1553 set_param_l(out_param, base);
1555 case RES_OP_MAP_ICM:
1556 qpn = get_param_l(&in_param) & 0x7fffff;
1557 if (valid_reserved(dev, slave, qpn)) {
1558 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1563 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1568 if (!fw_reserved(dev, qpn)) {
1569 err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL);
1571 res_abort_move(dev, slave, RES_QP, qpn);
1576 res_end_move(dev, slave, RES_QP, qpn);
1586 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1587 u64 in_param, u64 *out_param)
1593 if (op != RES_OP_RESERVE_AND_MAP)
1596 order = get_param_l(&in_param);
1598 err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1602 base = __mlx4_alloc_mtt_range(dev, order);
1604 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1608 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1610 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1611 __mlx4_free_mtt_range(dev, base, order);
1613 set_param_l(out_param, base);
1619 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1620 u64 in_param, u64 *out_param)
1625 struct res_mpt *mpt;
1628 case RES_OP_RESERVE:
1629 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1633 index = __mlx4_mpt_reserve(dev);
1635 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1638 id = index & mpt_mask(dev);
1640 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1642 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1643 __mlx4_mpt_release(dev, index);
1646 set_param_l(out_param, index);
1648 case RES_OP_MAP_ICM:
1649 index = get_param_l(&in_param);
1650 id = index & mpt_mask(dev);
1651 err = mr_res_start_move_to(dev, slave, id,
1652 RES_MPT_MAPPED, &mpt);
1656 err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL);
1658 res_abort_move(dev, slave, RES_MPT, id);
1662 res_end_move(dev, slave, RES_MPT, id);
1668 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1669 u64 in_param, u64 *out_param)
1675 case RES_OP_RESERVE_AND_MAP:
1676 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1680 err = __mlx4_cq_alloc_icm(dev, &cqn);
1682 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1686 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1688 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1689 __mlx4_cq_free_icm(dev, cqn);
1693 set_param_l(out_param, cqn);
1703 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1704 u64 in_param, u64 *out_param)
1710 case RES_OP_RESERVE_AND_MAP:
1711 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1715 err = __mlx4_srq_alloc_icm(dev, &srqn);
1717 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1721 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1723 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1724 __mlx4_srq_free_icm(dev, srqn);
1728 set_param_l(out_param, srqn);
1738 static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1739 u8 smac_index, u64 *mac)
1741 struct mlx4_priv *priv = mlx4_priv(dev);
1742 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1743 struct list_head *mac_list =
1744 &tracker->slave_list[slave].res_list[RES_MAC];
1745 struct mac_res *res, *tmp;
1747 list_for_each_entry_safe(res, tmp, mac_list, list) {
1748 if (res->smac_index == smac_index && res->port == (u8) port) {
1756 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
1758 struct mlx4_priv *priv = mlx4_priv(dev);
1759 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1760 struct list_head *mac_list =
1761 &tracker->slave_list[slave].res_list[RES_MAC];
1762 struct mac_res *res, *tmp;
1764 list_for_each_entry_safe(res, tmp, mac_list, list) {
1765 if (res->mac == mac && res->port == (u8) port) {
1766 /* mac found. update ref count */
1772 if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1774 res = kzalloc(sizeof *res, GFP_KERNEL);
1776 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1780 res->port = (u8) port;
1781 res->smac_index = smac_index;
1783 list_add_tail(&res->list,
1784 &tracker->slave_list[slave].res_list[RES_MAC]);
1788 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1791 struct mlx4_priv *priv = mlx4_priv(dev);
1792 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1793 struct list_head *mac_list =
1794 &tracker->slave_list[slave].res_list[RES_MAC];
1795 struct mac_res *res, *tmp;
1797 list_for_each_entry_safe(res, tmp, mac_list, list) {
1798 if (res->mac == mac && res->port == (u8) port) {
1799 if (!--res->ref_count) {
1800 list_del(&res->list);
1801 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1809 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1811 struct mlx4_priv *priv = mlx4_priv(dev);
1812 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1813 struct list_head *mac_list =
1814 &tracker->slave_list[slave].res_list[RES_MAC];
1815 struct mac_res *res, *tmp;
1818 list_for_each_entry_safe(res, tmp, mac_list, list) {
1819 list_del(&res->list);
1820 /* dereference the mac the num times the slave referenced it */
1821 for (i = 0; i < res->ref_count; i++)
1822 __mlx4_unregister_mac(dev, res->port, res->mac);
1823 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
1828 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1829 u64 in_param, u64 *out_param, int in_port)
1836 if (op != RES_OP_RESERVE_AND_MAP)
1839 port = !in_port ? get_param_l(out_param) : in_port;
1840 port = mlx4_slave_convert_port(
1847 err = __mlx4_register_mac(dev, port, mac);
1850 set_param_l(out_param, err);
1855 err = mac_add_to_slave(dev, slave, mac, port, smac_index);
1857 __mlx4_unregister_mac(dev, port, mac);
1862 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1863 int port, int vlan_index)
1865 struct mlx4_priv *priv = mlx4_priv(dev);
1866 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1867 struct list_head *vlan_list =
1868 &tracker->slave_list[slave].res_list[RES_VLAN];
1869 struct vlan_res *res, *tmp;
1871 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1872 if (res->vlan == vlan && res->port == (u8) port) {
1873 /* vlan found. update ref count */
1879 if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
1881 res = kzalloc(sizeof(*res), GFP_KERNEL);
1883 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
1887 res->port = (u8) port;
1888 res->vlan_index = vlan_index;
1890 list_add_tail(&res->list,
1891 &tracker->slave_list[slave].res_list[RES_VLAN]);
1896 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1899 struct mlx4_priv *priv = mlx4_priv(dev);
1900 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1901 struct list_head *vlan_list =
1902 &tracker->slave_list[slave].res_list[RES_VLAN];
1903 struct vlan_res *res, *tmp;
1905 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1906 if (res->vlan == vlan && res->port == (u8) port) {
1907 if (!--res->ref_count) {
1908 list_del(&res->list);
1909 mlx4_release_resource(dev, slave, RES_VLAN,
1918 static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
1920 struct mlx4_priv *priv = mlx4_priv(dev);
1921 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1922 struct list_head *vlan_list =
1923 &tracker->slave_list[slave].res_list[RES_VLAN];
1924 struct vlan_res *res, *tmp;
1927 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1928 list_del(&res->list);
1929 /* dereference the vlan the num times the slave referenced it */
1930 for (i = 0; i < res->ref_count; i++)
1931 __mlx4_unregister_vlan(dev, res->port, res->vlan);
1932 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
1937 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1938 u64 in_param, u64 *out_param, int in_port)
1940 struct mlx4_priv *priv = mlx4_priv(dev);
1941 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
1947 port = !in_port ? get_param_l(out_param) : in_port;
1949 if (!port || op != RES_OP_RESERVE_AND_MAP)
1952 port = mlx4_slave_convert_port(
1957 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
1958 if (!in_port && port > 0 && port <= dev->caps.num_ports) {
1959 slave_state[slave].old_vlan_api = true;
1963 vlan = (u16) in_param;
1965 err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
1967 set_param_l(out_param, (u32) vlan_index);
1968 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
1970 __mlx4_unregister_vlan(dev, port, vlan);
1975 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1976 u64 in_param, u64 *out_param)
1981 if (op != RES_OP_RESERVE)
1984 err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
1988 err = __mlx4_counter_alloc(dev, &index);
1990 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
1994 err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1996 __mlx4_counter_free(dev, index);
1997 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
1999 set_param_l(out_param, index);
2005 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2006 u64 in_param, u64 *out_param)
2011 if (op != RES_OP_RESERVE)
2014 err = __mlx4_xrcd_alloc(dev, &xrcdn);
2018 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2020 __mlx4_xrcd_free(dev, xrcdn);
2022 set_param_l(out_param, xrcdn);
2027 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2028 struct mlx4_vhcr *vhcr,
2029 struct mlx4_cmd_mailbox *inbox,
2030 struct mlx4_cmd_mailbox *outbox,
2031 struct mlx4_cmd_info *cmd)
2034 int alop = vhcr->op_modifier;
2036 switch (vhcr->in_modifier & 0xFF) {
2038 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2039 vhcr->in_param, &vhcr->out_param);
2043 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2044 vhcr->in_param, &vhcr->out_param);
2048 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2049 vhcr->in_param, &vhcr->out_param);
2053 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2054 vhcr->in_param, &vhcr->out_param);
2058 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2059 vhcr->in_param, &vhcr->out_param);
2063 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
2064 vhcr->in_param, &vhcr->out_param,
2065 (vhcr->in_modifier >> 8) & 0xFF);
2069 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
2070 vhcr->in_param, &vhcr->out_param,
2071 (vhcr->in_modifier >> 8) & 0xFF);
2075 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2076 vhcr->in_param, &vhcr->out_param);
2080 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2081 vhcr->in_param, &vhcr->out_param);
2092 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2101 case RES_OP_RESERVE:
2102 base = get_param_l(&in_param) & 0x7fffff;
2103 count = get_param_h(&in_param);
2104 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2107 mlx4_release_resource(dev, slave, RES_QP, count, 0);
2108 __mlx4_qp_release_range(dev, base, count);
2110 case RES_OP_MAP_ICM:
2111 qpn = get_param_l(&in_param) & 0x7fffff;
2112 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2117 if (!fw_reserved(dev, qpn))
2118 __mlx4_qp_free_icm(dev, qpn);
2120 res_end_move(dev, slave, RES_QP, qpn);
2122 if (valid_reserved(dev, slave, qpn))
2123 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2132 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2133 u64 in_param, u64 *out_param)
2139 if (op != RES_OP_RESERVE_AND_MAP)
2142 base = get_param_l(&in_param);
2143 order = get_param_h(&in_param);
2144 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2146 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2147 __mlx4_free_mtt_range(dev, base, order);
2152 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2158 struct res_mpt *mpt;
2161 case RES_OP_RESERVE:
2162 index = get_param_l(&in_param);
2163 id = index & mpt_mask(dev);
2164 err = get_res(dev, slave, id, RES_MPT, &mpt);
2168 put_res(dev, slave, id, RES_MPT);
2170 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2173 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2174 __mlx4_mpt_release(dev, index);
2176 case RES_OP_MAP_ICM:
2177 index = get_param_l(&in_param);
2178 id = index & mpt_mask(dev);
2179 err = mr_res_start_move_to(dev, slave, id,
2180 RES_MPT_RESERVED, &mpt);
2184 __mlx4_mpt_free_icm(dev, mpt->key);
2185 res_end_move(dev, slave, RES_MPT, id);
2195 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2196 u64 in_param, u64 *out_param)
2202 case RES_OP_RESERVE_AND_MAP:
2203 cqn = get_param_l(&in_param);
2204 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2208 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2209 __mlx4_cq_free_icm(dev, cqn);
2220 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2221 u64 in_param, u64 *out_param)
2227 case RES_OP_RESERVE_AND_MAP:
2228 srqn = get_param_l(&in_param);
2229 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2233 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2234 __mlx4_srq_free_icm(dev, srqn);
2245 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2246 u64 in_param, u64 *out_param, int in_port)
2252 case RES_OP_RESERVE_AND_MAP:
2253 port = !in_port ? get_param_l(out_param) : in_port;
2254 port = mlx4_slave_convert_port(
2259 mac_del_from_slave(dev, slave, in_param, port);
2260 __mlx4_unregister_mac(dev, port, in_param);
2271 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2272 u64 in_param, u64 *out_param, int port)
2274 struct mlx4_priv *priv = mlx4_priv(dev);
2275 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2278 port = mlx4_slave_convert_port(
2284 case RES_OP_RESERVE_AND_MAP:
2285 if (slave_state[slave].old_vlan_api)
2289 vlan_del_from_slave(dev, slave, in_param, port);
2290 __mlx4_unregister_vlan(dev, port, in_param);
2300 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2301 u64 in_param, u64 *out_param)
2306 if (op != RES_OP_RESERVE)
2309 index = get_param_l(&in_param);
2310 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2314 __mlx4_counter_free(dev, index);
2315 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2320 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2321 u64 in_param, u64 *out_param)
2326 if (op != RES_OP_RESERVE)
2329 xrcdn = get_param_l(&in_param);
2330 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2334 __mlx4_xrcd_free(dev, xrcdn);
2339 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2340 struct mlx4_vhcr *vhcr,
2341 struct mlx4_cmd_mailbox *inbox,
2342 struct mlx4_cmd_mailbox *outbox,
2343 struct mlx4_cmd_info *cmd)
2346 int alop = vhcr->op_modifier;
2348 switch (vhcr->in_modifier & 0xFF) {
2350 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2355 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2356 vhcr->in_param, &vhcr->out_param);
2360 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2365 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2366 vhcr->in_param, &vhcr->out_param);
2370 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2371 vhcr->in_param, &vhcr->out_param);
2375 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2376 vhcr->in_param, &vhcr->out_param,
2377 (vhcr->in_modifier >> 8) & 0xFF);
2381 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2382 vhcr->in_param, &vhcr->out_param,
2383 (vhcr->in_modifier >> 8) & 0xFF);
2387 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2388 vhcr->in_param, &vhcr->out_param);
2392 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2393 vhcr->in_param, &vhcr->out_param);
2401 /* ugly but other choices are uglier */
2402 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2404 return (be32_to_cpu(mpt->flags) >> 9) & 1;
2407 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2409 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2412 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2414 return be32_to_cpu(mpt->mtt_sz);
2417 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2419 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2422 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2424 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2427 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2429 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2432 static int mr_is_region(struct mlx4_mpt_entry *mpt)
2434 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2437 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2439 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2442 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2444 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2447 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2449 int page_shift = (qpc->log_page_size & 0x3f) + 12;
2450 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2451 int log_sq_sride = qpc->sq_size_stride & 7;
2452 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2453 int log_rq_stride = qpc->rq_size_stride & 7;
2454 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2455 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2456 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2457 int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2462 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2464 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2465 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2466 total_mem = sq_size + rq_size;
2468 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2474 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2475 int size, struct res_mtt *mtt)
2477 int res_start = mtt->com.res_id;
2478 int res_size = (1 << mtt->order);
2480 if (start < res_start || start + size > res_start + res_size)
2485 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2486 struct mlx4_vhcr *vhcr,
2487 struct mlx4_cmd_mailbox *inbox,
2488 struct mlx4_cmd_mailbox *outbox,
2489 struct mlx4_cmd_info *cmd)
2492 int index = vhcr->in_modifier;
2493 struct res_mtt *mtt;
2494 struct res_mpt *mpt;
2495 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2501 id = index & mpt_mask(dev);
2502 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2506 /* Disable memory windows for VFs. */
2507 if (!mr_is_region(inbox->buf)) {
2512 /* Make sure that the PD bits related to the slave id are zeros. */
2513 pd = mr_get_pd(inbox->buf);
2514 pd_slave = (pd >> 17) & 0x7f;
2515 if (pd_slave != 0 && pd_slave != slave) {
2520 if (mr_is_fmr(inbox->buf)) {
2521 /* FMR and Bind Enable are forbidden in slave devices. */
2522 if (mr_is_bind_enabled(inbox->buf)) {
2526 /* FMR and Memory Windows are also forbidden. */
2527 if (!mr_is_region(inbox->buf)) {
2533 phys = mr_phys_mpt(inbox->buf);
2535 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2539 err = check_mtt_range(dev, slave, mtt_base,
2540 mr_get_mtt_size(inbox->buf), mtt);
2547 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2552 atomic_inc(&mtt->ref_count);
2553 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2556 res_end_move(dev, slave, RES_MPT, id);
2561 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2563 res_abort_move(dev, slave, RES_MPT, id);
2568 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2569 struct mlx4_vhcr *vhcr,
2570 struct mlx4_cmd_mailbox *inbox,
2571 struct mlx4_cmd_mailbox *outbox,
2572 struct mlx4_cmd_info *cmd)
2575 int index = vhcr->in_modifier;
2576 struct res_mpt *mpt;
2579 id = index & mpt_mask(dev);
2580 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2584 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2589 atomic_dec(&mpt->mtt->ref_count);
2591 res_end_move(dev, slave, RES_MPT, id);
2595 res_abort_move(dev, slave, RES_MPT, id);
2600 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2601 struct mlx4_vhcr *vhcr,
2602 struct mlx4_cmd_mailbox *inbox,
2603 struct mlx4_cmd_mailbox *outbox,
2604 struct mlx4_cmd_info *cmd)
2607 int index = vhcr->in_modifier;
2608 struct res_mpt *mpt;
2611 id = index & mpt_mask(dev);
2612 err = get_res(dev, slave, id, RES_MPT, &mpt);
2616 if (mpt->com.from_state != RES_MPT_HW) {
2621 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2624 put_res(dev, slave, id, RES_MPT);
2628 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2630 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2633 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2635 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2638 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2640 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2643 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2644 struct mlx4_qp_context *context)
2646 u32 qpn = vhcr->in_modifier & 0xffffff;
2649 if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2652 /* adjust qkey in qp context */
2653 context->qkey = cpu_to_be32(qkey);
2656 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2657 struct mlx4_vhcr *vhcr,
2658 struct mlx4_cmd_mailbox *inbox,
2659 struct mlx4_cmd_mailbox *outbox,
2660 struct mlx4_cmd_info *cmd)
2663 int qpn = vhcr->in_modifier & 0x7fffff;
2664 struct res_mtt *mtt;
2666 struct mlx4_qp_context *qpc = inbox->buf + 8;
2667 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2668 int mtt_size = qp_get_mtt_size(qpc);
2671 int rcqn = qp_get_rcqn(qpc);
2672 int scqn = qp_get_scqn(qpc);
2673 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2674 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2675 struct res_srq *srq;
2676 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2678 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2681 qp->local_qpn = local_qpn;
2682 qp->sched_queue = 0;
2684 qp->vlan_control = 0;
2686 qp->pri_path_fl = 0;
2689 qp->qpc_flags = be32_to_cpu(qpc->flags);
2691 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2695 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2699 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2704 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2711 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2716 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2717 update_pkey_index(dev, slave, inbox);
2718 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2721 atomic_inc(&mtt->ref_count);
2723 atomic_inc(&rcq->ref_count);
2725 atomic_inc(&scq->ref_count);
2729 put_res(dev, slave, scqn, RES_CQ);
2732 atomic_inc(&srq->ref_count);
2733 put_res(dev, slave, srqn, RES_SRQ);
2736 put_res(dev, slave, rcqn, RES_CQ);
2737 put_res(dev, slave, mtt_base, RES_MTT);
2738 res_end_move(dev, slave, RES_QP, qpn);
2744 put_res(dev, slave, srqn, RES_SRQ);
2747 put_res(dev, slave, scqn, RES_CQ);
2749 put_res(dev, slave, rcqn, RES_CQ);
2751 put_res(dev, slave, mtt_base, RES_MTT);
2753 res_abort_move(dev, slave, RES_QP, qpn);
2758 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2760 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2763 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2765 int log_eq_size = eqc->log_eq_size & 0x1f;
2766 int page_shift = (eqc->log_page_size & 0x3f) + 12;
2768 if (log_eq_size + 5 < page_shift)
2771 return 1 << (log_eq_size + 5 - page_shift);
2774 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2776 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2779 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2781 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2782 int page_shift = (cqc->log_page_size & 0x3f) + 12;
2784 if (log_cq_size + 5 < page_shift)
2787 return 1 << (log_cq_size + 5 - page_shift);
2790 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2791 struct mlx4_vhcr *vhcr,
2792 struct mlx4_cmd_mailbox *inbox,
2793 struct mlx4_cmd_mailbox *outbox,
2794 struct mlx4_cmd_info *cmd)
2797 int eqn = vhcr->in_modifier;
2798 int res_id = (slave << 8) | eqn;
2799 struct mlx4_eq_context *eqc = inbox->buf;
2800 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2801 int mtt_size = eq_get_mtt_size(eqc);
2803 struct res_mtt *mtt;
2805 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2808 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2812 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2816 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2820 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2824 atomic_inc(&mtt->ref_count);
2826 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2827 res_end_move(dev, slave, RES_EQ, res_id);
2831 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2833 res_abort_move(dev, slave, RES_EQ, res_id);
2835 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2839 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2840 int len, struct res_mtt **res)
2842 struct mlx4_priv *priv = mlx4_priv(dev);
2843 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2844 struct res_mtt *mtt;
2847 spin_lock_irq(mlx4_tlock(dev));
2848 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2850 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2852 mtt->com.from_state = mtt->com.state;
2853 mtt->com.state = RES_MTT_BUSY;
2858 spin_unlock_irq(mlx4_tlock(dev));
2863 static int verify_qp_parameters(struct mlx4_dev *dev,
2864 struct mlx4_vhcr *vhcr,
2865 struct mlx4_cmd_mailbox *inbox,
2866 enum qp_transition transition, u8 slave)
2870 struct mlx4_qp_context *qp_ctx;
2871 enum mlx4_qp_optpar optpar;
2875 qp_ctx = inbox->buf + 8;
2876 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2877 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
2881 case MLX4_QP_ST_XRC:
2883 switch (transition) {
2884 case QP_TRANS_INIT2RTR:
2885 case QP_TRANS_RTR2RTS:
2886 case QP_TRANS_RTS2RTS:
2887 case QP_TRANS_SQD2SQD:
2888 case QP_TRANS_SQD2RTS:
2889 if (slave != mlx4_master_func_num(dev))
2890 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
2891 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
2892 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2893 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
2896 if (qp_ctx->pri_path.mgid_index >= num_gids)
2899 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
2900 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
2901 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2902 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
2905 if (qp_ctx->alt_path.mgid_index >= num_gids)
2914 case MLX4_QP_ST_MLX:
2915 qpn = vhcr->in_modifier & 0x7fffff;
2916 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
2917 if (transition == QP_TRANS_INIT2RTR &&
2918 slave != mlx4_master_func_num(dev) &&
2919 mlx4_is_qp_reserved(dev, qpn) &&
2920 !mlx4_vf_smi_enabled(dev, slave, port)) {
2921 /* only enabled VFs may create MLX proxy QPs */
2922 mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
2923 __func__, slave, port);
2935 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2936 struct mlx4_vhcr *vhcr,
2937 struct mlx4_cmd_mailbox *inbox,
2938 struct mlx4_cmd_mailbox *outbox,
2939 struct mlx4_cmd_info *cmd)
2941 struct mlx4_mtt mtt;
2942 __be64 *page_list = inbox->buf;
2943 u64 *pg_list = (u64 *)page_list;
2945 struct res_mtt *rmtt = NULL;
2946 int start = be64_to_cpu(page_list[0]);
2947 int npages = vhcr->in_modifier;
2950 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2954 /* Call the SW implementation of write_mtt:
2955 * - Prepare a dummy mtt struct
2956 * - Translate inbox contents to simple addresses in host endianess */
2957 mtt.offset = 0; /* TBD this is broken but I don't handle it since
2958 we don't really use it */
2961 for (i = 0; i < npages; ++i)
2962 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2964 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2965 ((u64 *)page_list + 2));
2968 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2973 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2974 struct mlx4_vhcr *vhcr,
2975 struct mlx4_cmd_mailbox *inbox,
2976 struct mlx4_cmd_mailbox *outbox,
2977 struct mlx4_cmd_info *cmd)
2979 int eqn = vhcr->in_modifier;
2980 int res_id = eqn | (slave << 8);
2984 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2988 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2992 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2996 atomic_dec(&eq->mtt->ref_count);
2997 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2998 res_end_move(dev, slave, RES_EQ, res_id);
2999 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3004 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3006 res_abort_move(dev, slave, RES_EQ, res_id);
3011 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3013 struct mlx4_priv *priv = mlx4_priv(dev);
3014 struct mlx4_slave_event_eq_info *event_eq;
3015 struct mlx4_cmd_mailbox *mailbox;
3016 u32 in_modifier = 0;
3021 if (!priv->mfunc.master.slave_state)
3024 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
3026 /* Create the event only if the slave is registered */
3027 if (event_eq->eqn < 0)
3030 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3031 res_id = (slave << 8) | event_eq->eqn;
3032 err = get_res(dev, slave, res_id, RES_EQ, &req);
3036 if (req->com.from_state != RES_EQ_HW) {
3041 mailbox = mlx4_alloc_cmd_mailbox(dev);
3042 if (IS_ERR(mailbox)) {
3043 err = PTR_ERR(mailbox);
3047 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3049 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3052 memcpy(mailbox->buf, (u8 *) eqe, 28);
3054 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
3056 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3057 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3060 put_res(dev, slave, res_id, RES_EQ);
3061 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3062 mlx4_free_cmd_mailbox(dev, mailbox);
3066 put_res(dev, slave, res_id, RES_EQ);
3069 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3073 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3074 struct mlx4_vhcr *vhcr,
3075 struct mlx4_cmd_mailbox *inbox,
3076 struct mlx4_cmd_mailbox *outbox,
3077 struct mlx4_cmd_info *cmd)
3079 int eqn = vhcr->in_modifier;
3080 int res_id = eqn | (slave << 8);
3084 err = get_res(dev, slave, res_id, RES_EQ, &eq);
3088 if (eq->com.from_state != RES_EQ_HW) {
3093 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3096 put_res(dev, slave, res_id, RES_EQ);
3100 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3101 struct mlx4_vhcr *vhcr,
3102 struct mlx4_cmd_mailbox *inbox,
3103 struct mlx4_cmd_mailbox *outbox,
3104 struct mlx4_cmd_info *cmd)
3107 int cqn = vhcr->in_modifier;
3108 struct mlx4_cq_context *cqc = inbox->buf;
3109 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3111 struct res_mtt *mtt;
3113 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3116 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3119 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3122 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3125 atomic_inc(&mtt->ref_count);
3127 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3128 res_end_move(dev, slave, RES_CQ, cqn);
3132 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3134 res_abort_move(dev, slave, RES_CQ, cqn);
3138 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3139 struct mlx4_vhcr *vhcr,
3140 struct mlx4_cmd_mailbox *inbox,
3141 struct mlx4_cmd_mailbox *outbox,
3142 struct mlx4_cmd_info *cmd)
3145 int cqn = vhcr->in_modifier;
3148 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3151 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3154 atomic_dec(&cq->mtt->ref_count);
3155 res_end_move(dev, slave, RES_CQ, cqn);
3159 res_abort_move(dev, slave, RES_CQ, cqn);
3163 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3164 struct mlx4_vhcr *vhcr,
3165 struct mlx4_cmd_mailbox *inbox,
3166 struct mlx4_cmd_mailbox *outbox,
3167 struct mlx4_cmd_info *cmd)
3169 int cqn = vhcr->in_modifier;
3173 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3177 if (cq->com.from_state != RES_CQ_HW)
3180 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3182 put_res(dev, slave, cqn, RES_CQ);
3187 static int handle_resize(struct mlx4_dev *dev, int slave,
3188 struct mlx4_vhcr *vhcr,
3189 struct mlx4_cmd_mailbox *inbox,
3190 struct mlx4_cmd_mailbox *outbox,
3191 struct mlx4_cmd_info *cmd,
3195 struct res_mtt *orig_mtt;
3196 struct res_mtt *mtt;
3197 struct mlx4_cq_context *cqc = inbox->buf;
3198 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3200 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3204 if (orig_mtt != cq->mtt) {
3209 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3213 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3216 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3219 atomic_dec(&orig_mtt->ref_count);
3220 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3221 atomic_inc(&mtt->ref_count);
3223 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3227 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3229 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3235 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3236 struct mlx4_vhcr *vhcr,
3237 struct mlx4_cmd_mailbox *inbox,
3238 struct mlx4_cmd_mailbox *outbox,
3239 struct mlx4_cmd_info *cmd)
3241 int cqn = vhcr->in_modifier;
3245 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3249 if (cq->com.from_state != RES_CQ_HW)
3252 if (vhcr->op_modifier == 0) {
3253 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3257 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3259 put_res(dev, slave, cqn, RES_CQ);
3264 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3266 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3267 int log_rq_stride = srqc->logstride & 7;
3268 int page_shift = (srqc->log_page_size & 0x3f) + 12;
3270 if (log_srq_size + log_rq_stride + 4 < page_shift)
3273 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3276 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3277 struct mlx4_vhcr *vhcr,
3278 struct mlx4_cmd_mailbox *inbox,
3279 struct mlx4_cmd_mailbox *outbox,
3280 struct mlx4_cmd_info *cmd)
3283 int srqn = vhcr->in_modifier;
3284 struct res_mtt *mtt;
3285 struct res_srq *srq;
3286 struct mlx4_srq_context *srqc = inbox->buf;
3287 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3289 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3292 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3295 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3298 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3303 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3307 atomic_inc(&mtt->ref_count);
3309 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3310 res_end_move(dev, slave, RES_SRQ, srqn);
3314 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3316 res_abort_move(dev, slave, RES_SRQ, srqn);
3321 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3322 struct mlx4_vhcr *vhcr,
3323 struct mlx4_cmd_mailbox *inbox,
3324 struct mlx4_cmd_mailbox *outbox,
3325 struct mlx4_cmd_info *cmd)
3328 int srqn = vhcr->in_modifier;
3329 struct res_srq *srq;
3331 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3334 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3337 atomic_dec(&srq->mtt->ref_count);
3339 atomic_dec(&srq->cq->ref_count);
3340 res_end_move(dev, slave, RES_SRQ, srqn);
3345 res_abort_move(dev, slave, RES_SRQ, srqn);
3350 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3351 struct mlx4_vhcr *vhcr,
3352 struct mlx4_cmd_mailbox *inbox,
3353 struct mlx4_cmd_mailbox *outbox,
3354 struct mlx4_cmd_info *cmd)
3357 int srqn = vhcr->in_modifier;
3358 struct res_srq *srq;
3360 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3363 if (srq->com.from_state != RES_SRQ_HW) {
3367 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3369 put_res(dev, slave, srqn, RES_SRQ);
3373 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3374 struct mlx4_vhcr *vhcr,
3375 struct mlx4_cmd_mailbox *inbox,
3376 struct mlx4_cmd_mailbox *outbox,
3377 struct mlx4_cmd_info *cmd)
3380 int srqn = vhcr->in_modifier;
3381 struct res_srq *srq;
3383 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3387 if (srq->com.from_state != RES_SRQ_HW) {
3392 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3394 put_res(dev, slave, srqn, RES_SRQ);
3398 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3399 struct mlx4_vhcr *vhcr,
3400 struct mlx4_cmd_mailbox *inbox,
3401 struct mlx4_cmd_mailbox *outbox,
3402 struct mlx4_cmd_info *cmd)
3405 int qpn = vhcr->in_modifier & 0x7fffff;
3408 err = get_res(dev, slave, qpn, RES_QP, &qp);
3411 if (qp->com.from_state != RES_QP_HW) {
3416 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3418 put_res(dev, slave, qpn, RES_QP);
3422 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3423 struct mlx4_vhcr *vhcr,
3424 struct mlx4_cmd_mailbox *inbox,
3425 struct mlx4_cmd_mailbox *outbox,
3426 struct mlx4_cmd_info *cmd)
3428 struct mlx4_qp_context *context = inbox->buf + 8;
3429 adjust_proxy_tun_qkey(dev, vhcr, context);
3430 update_pkey_index(dev, slave, inbox);
3431 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3434 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3435 struct mlx4_qp_context *qpc,
3436 struct mlx4_cmd_mailbox *inbox)
3438 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3440 int port = mlx4_slave_convert_port(
3441 dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3446 pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3449 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH ||
3450 mlx4_is_eth(dev, port + 1)) {
3451 qpc->pri_path.sched_queue = pri_sched_queue;
3454 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3455 port = mlx4_slave_convert_port(
3456 dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3460 qpc->alt_path.sched_queue =
3461 (qpc->alt_path.sched_queue & ~(1 << 6)) |
3467 static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3468 struct mlx4_qp_context *qpc,
3469 struct mlx4_cmd_mailbox *inbox)
3473 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3474 u8 sched = *(u8 *)(inbox->buf + 64);
3477 port = (sched >> 6 & 1) + 1;
3478 if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3479 smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3480 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3486 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3487 struct mlx4_vhcr *vhcr,
3488 struct mlx4_cmd_mailbox *inbox,
3489 struct mlx4_cmd_mailbox *outbox,
3490 struct mlx4_cmd_info *cmd)
3493 struct mlx4_qp_context *qpc = inbox->buf + 8;
3494 int qpn = vhcr->in_modifier & 0x7fffff;
3496 u8 orig_sched_queue;
3497 __be32 orig_param3 = qpc->param3;
3498 u8 orig_vlan_control = qpc->pri_path.vlan_control;
3499 u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3500 u8 orig_pri_path_fl = qpc->pri_path.fl;
3501 u8 orig_vlan_index = qpc->pri_path.vlan_index;
3502 u8 orig_feup = qpc->pri_path.feup;
3504 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3507 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
3511 if (roce_verify_mac(dev, slave, qpc, inbox))
3514 update_pkey_index(dev, slave, inbox);
3515 update_gid(dev, inbox, (u8)slave);
3516 adjust_proxy_tun_qkey(dev, vhcr, qpc);
3517 orig_sched_queue = qpc->pri_path.sched_queue;
3518 err = update_vport_qp_param(dev, inbox, slave, qpn);
3522 err = get_res(dev, slave, qpn, RES_QP, &qp);
3525 if (qp->com.from_state != RES_QP_HW) {
3530 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3532 /* if no error, save sched queue value passed in by VF. This is
3533 * essentially the QOS value provided by the VF. This will be useful
3534 * if we allow dynamic changes from VST back to VGT
3537 qp->sched_queue = orig_sched_queue;
3538 qp->param3 = orig_param3;
3539 qp->vlan_control = orig_vlan_control;
3540 qp->fvl_rx = orig_fvl_rx;
3541 qp->pri_path_fl = orig_pri_path_fl;
3542 qp->vlan_index = orig_vlan_index;
3543 qp->feup = orig_feup;
3545 put_res(dev, slave, qpn, RES_QP);
3549 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3550 struct mlx4_vhcr *vhcr,
3551 struct mlx4_cmd_mailbox *inbox,
3552 struct mlx4_cmd_mailbox *outbox,
3553 struct mlx4_cmd_info *cmd)
3556 struct mlx4_qp_context *context = inbox->buf + 8;
3558 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3561 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
3565 update_pkey_index(dev, slave, inbox);
3566 update_gid(dev, inbox, (u8)slave);
3567 adjust_proxy_tun_qkey(dev, vhcr, context);
3568 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3571 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3572 struct mlx4_vhcr *vhcr,
3573 struct mlx4_cmd_mailbox *inbox,
3574 struct mlx4_cmd_mailbox *outbox,
3575 struct mlx4_cmd_info *cmd)
3578 struct mlx4_qp_context *context = inbox->buf + 8;
3580 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3583 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
3587 update_pkey_index(dev, slave, inbox);
3588 update_gid(dev, inbox, (u8)slave);
3589 adjust_proxy_tun_qkey(dev, vhcr, context);
3590 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3594 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3595 struct mlx4_vhcr *vhcr,
3596 struct mlx4_cmd_mailbox *inbox,
3597 struct mlx4_cmd_mailbox *outbox,
3598 struct mlx4_cmd_info *cmd)
3600 struct mlx4_qp_context *context = inbox->buf + 8;
3601 int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3604 adjust_proxy_tun_qkey(dev, vhcr, context);
3605 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3608 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3609 struct mlx4_vhcr *vhcr,
3610 struct mlx4_cmd_mailbox *inbox,
3611 struct mlx4_cmd_mailbox *outbox,
3612 struct mlx4_cmd_info *cmd)
3615 struct mlx4_qp_context *context = inbox->buf + 8;
3617 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3620 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
3624 adjust_proxy_tun_qkey(dev, vhcr, context);
3625 update_gid(dev, inbox, (u8)slave);
3626 update_pkey_index(dev, slave, inbox);
3627 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3630 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3631 struct mlx4_vhcr *vhcr,
3632 struct mlx4_cmd_mailbox *inbox,
3633 struct mlx4_cmd_mailbox *outbox,
3634 struct mlx4_cmd_info *cmd)
3637 struct mlx4_qp_context *context = inbox->buf + 8;
3639 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3642 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
3646 adjust_proxy_tun_qkey(dev, vhcr, context);
3647 update_gid(dev, inbox, (u8)slave);
3648 update_pkey_index(dev, slave, inbox);
3649 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3652 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3653 struct mlx4_vhcr *vhcr,
3654 struct mlx4_cmd_mailbox *inbox,
3655 struct mlx4_cmd_mailbox *outbox,
3656 struct mlx4_cmd_info *cmd)
3659 int qpn = vhcr->in_modifier & 0x7fffff;
3662 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3665 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3669 atomic_dec(&qp->mtt->ref_count);
3670 atomic_dec(&qp->rcq->ref_count);
3671 atomic_dec(&qp->scq->ref_count);
3673 atomic_dec(&qp->srq->ref_count);
3674 res_end_move(dev, slave, RES_QP, qpn);
3678 res_abort_move(dev, slave, RES_QP, qpn);
3683 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3684 struct res_qp *rqp, u8 *gid)
3686 struct res_gid *res;
3688 list_for_each_entry(res, &rqp->mcg_list, list) {
3689 if (!memcmp(res->gid, gid, 16))
3695 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3696 u8 *gid, enum mlx4_protocol prot,
3697 enum mlx4_steer_type steer, u64 reg_id)
3699 struct res_gid *res;
3702 res = kzalloc(sizeof *res, GFP_KERNEL);
3706 spin_lock_irq(&rqp->mcg_spl);
3707 if (find_gid(dev, slave, rqp, gid)) {
3711 memcpy(res->gid, gid, 16);
3714 res->reg_id = reg_id;
3715 list_add_tail(&res->list, &rqp->mcg_list);
3718 spin_unlock_irq(&rqp->mcg_spl);
3723 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3724 u8 *gid, enum mlx4_protocol prot,
3725 enum mlx4_steer_type steer, u64 *reg_id)
3727 struct res_gid *res;
3730 spin_lock_irq(&rqp->mcg_spl);
3731 res = find_gid(dev, slave, rqp, gid);
3732 if (!res || res->prot != prot || res->steer != steer)
3735 *reg_id = res->reg_id;
3736 list_del(&res->list);
3740 spin_unlock_irq(&rqp->mcg_spl);
3745 static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
3746 u8 gid[16], int block_loopback, enum mlx4_protocol prot,
3747 enum mlx4_steer_type type, u64 *reg_id)
3749 switch (dev->caps.steering_mode) {
3750 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
3751 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3754 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
3755 block_loopback, prot,
3758 case MLX4_STEERING_MODE_B0:
3759 if (prot == MLX4_PROT_ETH) {
3760 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3765 return mlx4_qp_attach_common(dev, qp, gid,
3766 block_loopback, prot, type);
3772 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
3773 u8 gid[16], enum mlx4_protocol prot,
3774 enum mlx4_steer_type type, u64 reg_id)
3776 switch (dev->caps.steering_mode) {
3777 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3778 return mlx4_flow_detach(dev, reg_id);
3779 case MLX4_STEERING_MODE_B0:
3780 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3786 static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
3787 u8 *gid, enum mlx4_protocol prot)
3791 if (prot != MLX4_PROT_ETH)
3794 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
3795 dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
3796 real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
3805 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3806 struct mlx4_vhcr *vhcr,
3807 struct mlx4_cmd_mailbox *inbox,
3808 struct mlx4_cmd_mailbox *outbox,
3809 struct mlx4_cmd_info *cmd)
3811 struct mlx4_qp qp; /* dummy for calling attach/detach */
3812 u8 *gid = inbox->buf;
3813 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
3818 int attach = vhcr->op_modifier;
3819 int block_loopback = vhcr->in_modifier >> 31;
3820 u8 steer_type_mask = 2;
3821 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
3823 qpn = vhcr->in_modifier & 0xffffff;
3824 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3830 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
3833 pr_err("Fail to attach rule to qp 0x%x\n", qpn);
3836 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
3840 err = mlx4_adjust_port(dev, slave, gid, prot);
3844 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, ®_id);
3848 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3850 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3853 put_res(dev, slave, qpn, RES_QP);
3857 qp_detach(dev, &qp, gid, prot, type, reg_id);
3859 put_res(dev, slave, qpn, RES_QP);
3864 * MAC validation for Flow Steering rules.
3865 * VF can attach rules only with a mac address which is assigned to it.
3867 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3868 struct list_head *rlist)
3870 struct mac_res *res, *tmp;
3873 /* make sure it isn't multicast or broadcast mac*/
3874 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3875 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3876 list_for_each_entry_safe(res, tmp, rlist, list) {
3877 be_mac = cpu_to_be64(res->mac << 16);
3878 if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
3881 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3882 eth_header->eth.dst_mac, slave);
3889 * In case of missing eth header, append eth header with a MAC address
3890 * assigned to the VF.
3892 static int add_eth_header(struct mlx4_dev *dev, int slave,
3893 struct mlx4_cmd_mailbox *inbox,
3894 struct list_head *rlist, int header_id)
3896 struct mac_res *res, *tmp;
3898 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3899 struct mlx4_net_trans_rule_hw_eth *eth_header;
3900 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3901 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3903 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3905 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3907 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3909 /* Clear a space in the inbox for eth header */
3910 switch (header_id) {
3911 case MLX4_NET_TRANS_RULE_ID_IPV4:
3913 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3914 memmove(ip_header, eth_header,
3915 sizeof(*ip_header) + sizeof(*l4_header));
3917 case MLX4_NET_TRANS_RULE_ID_TCP:
3918 case MLX4_NET_TRANS_RULE_ID_UDP:
3919 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3921 memmove(l4_header, eth_header, sizeof(*l4_header));
3926 list_for_each_entry_safe(res, tmp, rlist, list) {
3927 if (port == res->port) {
3928 be_mac = cpu_to_be64(res->mac << 16);
3933 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
3938 memset(eth_header, 0, sizeof(*eth_header));
3939 eth_header->size = sizeof(*eth_header) >> 2;
3940 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
3941 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
3942 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
3948 #define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
3949 int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
3950 struct mlx4_vhcr *vhcr,
3951 struct mlx4_cmd_mailbox *inbox,
3952 struct mlx4_cmd_mailbox *outbox,
3953 struct mlx4_cmd_info *cmd_info)
3956 u32 qpn = vhcr->in_modifier & 0xffffff;
3960 u64 pri_addr_path_mask;
3961 struct mlx4_update_qp_context *cmd;
3964 cmd = (struct mlx4_update_qp_context *)inbox->buf;
3966 pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
3967 if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
3968 (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
3971 /* Just change the smac for the QP */
3972 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3974 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
3978 port = (rqp->sched_queue >> 6 & 1) + 1;
3979 smac_index = cmd->qp_context.pri_path.grh_mylmc;
3980 err = mac_find_smac_ix_in_slave(dev, slave, port,
3983 mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
3988 err = mlx4_cmd(dev, inbox->dma,
3989 vhcr->in_modifier, 0,
3990 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
3993 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
3998 put_res(dev, slave, qpn, RES_QP);
4002 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4003 struct mlx4_vhcr *vhcr,
4004 struct mlx4_cmd_mailbox *inbox,
4005 struct mlx4_cmd_mailbox *outbox,
4006 struct mlx4_cmd_info *cmd)
4009 struct mlx4_priv *priv = mlx4_priv(dev);
4010 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4011 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
4015 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4016 struct _rule_hw *rule_header;
4019 if (dev->caps.steering_mode !=
4020 MLX4_STEERING_MODE_DEVICE_MANAGED)
4023 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4024 ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port);
4025 if (ctrl->port <= 0)
4027 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
4028 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4030 pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
4033 rule_header = (struct _rule_hw *)(ctrl + 1);
4034 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4036 switch (header_id) {
4037 case MLX4_NET_TRANS_RULE_ID_ETH:
4038 if (validate_eth_header_mac(slave, rule_header, rlist)) {
4043 case MLX4_NET_TRANS_RULE_ID_IB:
4045 case MLX4_NET_TRANS_RULE_ID_IPV4:
4046 case MLX4_NET_TRANS_RULE_ID_TCP:
4047 case MLX4_NET_TRANS_RULE_ID_UDP:
4048 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4049 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4053 vhcr->in_modifier +=
4054 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4057 pr_err("Corrupted mailbox\n");
4062 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4063 vhcr->in_modifier, 0,
4064 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4069 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
4071 mlx4_err(dev, "Fail to add flow steering resources\n");
4073 mlx4_cmd(dev, vhcr->out_param, 0, 0,
4074 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4078 atomic_inc(&rqp->ref_count);
4080 put_res(dev, slave, qpn, RES_QP);
4084 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4085 struct mlx4_vhcr *vhcr,
4086 struct mlx4_cmd_mailbox *inbox,
4087 struct mlx4_cmd_mailbox *outbox,
4088 struct mlx4_cmd_info *cmd)
4092 struct res_fs_rule *rrule;
4094 if (dev->caps.steering_mode !=
4095 MLX4_STEERING_MODE_DEVICE_MANAGED)
4098 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4101 /* Release the rule form busy state before removal */
4102 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4103 err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
4107 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4109 mlx4_err(dev, "Fail to remove flow steering resources\n");
4113 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4114 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4117 atomic_dec(&rqp->ref_count);
4119 put_res(dev, slave, rrule->qpn, RES_QP);
4124 BUSY_MAX_RETRIES = 10
4127 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4128 struct mlx4_vhcr *vhcr,
4129 struct mlx4_cmd_mailbox *inbox,
4130 struct mlx4_cmd_mailbox *outbox,
4131 struct mlx4_cmd_info *cmd)
4134 int index = vhcr->in_modifier & 0xffff;
4136 err = get_res(dev, slave, index, RES_COUNTER, NULL);
4140 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4141 put_res(dev, slave, index, RES_COUNTER);
4145 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4147 struct res_gid *rgid;
4148 struct res_gid *tmp;
4149 struct mlx4_qp qp; /* dummy for calling attach/detach */
4151 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
4152 switch (dev->caps.steering_mode) {
4153 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4154 mlx4_flow_detach(dev, rgid->reg_id);
4156 case MLX4_STEERING_MODE_B0:
4157 qp.qpn = rqp->local_qpn;
4158 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4159 rgid->prot, rgid->steer);
4162 list_del(&rgid->list);
4167 static int _move_all_busy(struct mlx4_dev *dev, int slave,
4168 enum mlx4_resource type, int print)
4170 struct mlx4_priv *priv = mlx4_priv(dev);
4171 struct mlx4_resource_tracker *tracker =
4172 &priv->mfunc.master.res_tracker;
4173 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4174 struct res_common *r;
4175 struct res_common *tmp;
4179 spin_lock_irq(mlx4_tlock(dev));
4180 list_for_each_entry_safe(r, tmp, rlist, list) {
4181 if (r->owner == slave) {
4183 if (r->state == RES_ANY_BUSY) {
4186 "%s id 0x%llx is busy\n",
4191 r->from_state = r->state;
4192 r->state = RES_ANY_BUSY;
4198 spin_unlock_irq(mlx4_tlock(dev));
4203 static int move_all_busy(struct mlx4_dev *dev, int slave,
4204 enum mlx4_resource type)
4206 unsigned long begin;
4211 busy = _move_all_busy(dev, slave, type, 0);
4212 if (time_after(jiffies, begin + 5 * HZ))
4219 busy = _move_all_busy(dev, slave, type, 1);
4223 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4225 struct mlx4_priv *priv = mlx4_priv(dev);
4226 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4227 struct list_head *qp_list =
4228 &tracker->slave_list[slave].res_list[RES_QP];
4236 err = move_all_busy(dev, slave, RES_QP);
4238 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4241 spin_lock_irq(mlx4_tlock(dev));
4242 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4243 spin_unlock_irq(mlx4_tlock(dev));
4244 if (qp->com.owner == slave) {
4245 qpn = qp->com.res_id;
4246 detach_qp(dev, slave, qp);
4247 state = qp->com.from_state;
4248 while (state != 0) {
4250 case RES_QP_RESERVED:
4251 spin_lock_irq(mlx4_tlock(dev));
4252 rb_erase(&qp->com.node,
4253 &tracker->res_tree[RES_QP]);
4254 list_del(&qp->com.list);
4255 spin_unlock_irq(mlx4_tlock(dev));
4256 if (!valid_reserved(dev, slave, qpn)) {
4257 __mlx4_qp_release_range(dev, qpn, 1);
4258 mlx4_release_resource(dev, slave,
4265 if (!valid_reserved(dev, slave, qpn))
4266 __mlx4_qp_free_icm(dev, qpn);
4267 state = RES_QP_RESERVED;
4271 err = mlx4_cmd(dev, in_param,
4274 MLX4_CMD_TIME_CLASS_A,
4277 mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4278 slave, qp->local_qpn);
4279 atomic_dec(&qp->rcq->ref_count);
4280 atomic_dec(&qp->scq->ref_count);
4281 atomic_dec(&qp->mtt->ref_count);
4283 atomic_dec(&qp->srq->ref_count);
4284 state = RES_QP_MAPPED;
4291 spin_lock_irq(mlx4_tlock(dev));
4293 spin_unlock_irq(mlx4_tlock(dev));
4296 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4298 struct mlx4_priv *priv = mlx4_priv(dev);
4299 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4300 struct list_head *srq_list =
4301 &tracker->slave_list[slave].res_list[RES_SRQ];
4302 struct res_srq *srq;
4303 struct res_srq *tmp;
4310 err = move_all_busy(dev, slave, RES_SRQ);
4312 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4315 spin_lock_irq(mlx4_tlock(dev));
4316 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4317 spin_unlock_irq(mlx4_tlock(dev));
4318 if (srq->com.owner == slave) {
4319 srqn = srq->com.res_id;
4320 state = srq->com.from_state;
4321 while (state != 0) {
4323 case RES_SRQ_ALLOCATED:
4324 __mlx4_srq_free_icm(dev, srqn);
4325 spin_lock_irq(mlx4_tlock(dev));
4326 rb_erase(&srq->com.node,
4327 &tracker->res_tree[RES_SRQ]);
4328 list_del(&srq->com.list);
4329 spin_unlock_irq(mlx4_tlock(dev));
4330 mlx4_release_resource(dev, slave,
4338 err = mlx4_cmd(dev, in_param, srqn, 1,
4340 MLX4_CMD_TIME_CLASS_A,
4343 mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4346 atomic_dec(&srq->mtt->ref_count);
4348 atomic_dec(&srq->cq->ref_count);
4349 state = RES_SRQ_ALLOCATED;
4357 spin_lock_irq(mlx4_tlock(dev));
4359 spin_unlock_irq(mlx4_tlock(dev));
4362 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4364 struct mlx4_priv *priv = mlx4_priv(dev);
4365 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4366 struct list_head *cq_list =
4367 &tracker->slave_list[slave].res_list[RES_CQ];
4376 err = move_all_busy(dev, slave, RES_CQ);
4378 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4381 spin_lock_irq(mlx4_tlock(dev));
4382 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4383 spin_unlock_irq(mlx4_tlock(dev));
4384 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4385 cqn = cq->com.res_id;
4386 state = cq->com.from_state;
4387 while (state != 0) {
4389 case RES_CQ_ALLOCATED:
4390 __mlx4_cq_free_icm(dev, cqn);
4391 spin_lock_irq(mlx4_tlock(dev));
4392 rb_erase(&cq->com.node,
4393 &tracker->res_tree[RES_CQ]);
4394 list_del(&cq->com.list);
4395 spin_unlock_irq(mlx4_tlock(dev));
4396 mlx4_release_resource(dev, slave,
4404 err = mlx4_cmd(dev, in_param, cqn, 1,
4406 MLX4_CMD_TIME_CLASS_A,
4409 mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4411 atomic_dec(&cq->mtt->ref_count);
4412 state = RES_CQ_ALLOCATED;
4420 spin_lock_irq(mlx4_tlock(dev));
4422 spin_unlock_irq(mlx4_tlock(dev));
4425 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4427 struct mlx4_priv *priv = mlx4_priv(dev);
4428 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4429 struct list_head *mpt_list =
4430 &tracker->slave_list[slave].res_list[RES_MPT];
4431 struct res_mpt *mpt;
4432 struct res_mpt *tmp;
4439 err = move_all_busy(dev, slave, RES_MPT);
4441 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4444 spin_lock_irq(mlx4_tlock(dev));
4445 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4446 spin_unlock_irq(mlx4_tlock(dev));
4447 if (mpt->com.owner == slave) {
4448 mptn = mpt->com.res_id;
4449 state = mpt->com.from_state;
4450 while (state != 0) {
4452 case RES_MPT_RESERVED:
4453 __mlx4_mpt_release(dev, mpt->key);
4454 spin_lock_irq(mlx4_tlock(dev));
4455 rb_erase(&mpt->com.node,
4456 &tracker->res_tree[RES_MPT]);
4457 list_del(&mpt->com.list);
4458 spin_unlock_irq(mlx4_tlock(dev));
4459 mlx4_release_resource(dev, slave,
4465 case RES_MPT_MAPPED:
4466 __mlx4_mpt_free_icm(dev, mpt->key);
4467 state = RES_MPT_RESERVED;
4472 err = mlx4_cmd(dev, in_param, mptn, 0,
4474 MLX4_CMD_TIME_CLASS_A,
4477 mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4480 atomic_dec(&mpt->mtt->ref_count);
4481 state = RES_MPT_MAPPED;
4488 spin_lock_irq(mlx4_tlock(dev));
4490 spin_unlock_irq(mlx4_tlock(dev));
4493 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4495 struct mlx4_priv *priv = mlx4_priv(dev);
4496 struct mlx4_resource_tracker *tracker =
4497 &priv->mfunc.master.res_tracker;
4498 struct list_head *mtt_list =
4499 &tracker->slave_list[slave].res_list[RES_MTT];
4500 struct res_mtt *mtt;
4501 struct res_mtt *tmp;
4507 err = move_all_busy(dev, slave, RES_MTT);
4509 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
4512 spin_lock_irq(mlx4_tlock(dev));
4513 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4514 spin_unlock_irq(mlx4_tlock(dev));
4515 if (mtt->com.owner == slave) {
4516 base = mtt->com.res_id;
4517 state = mtt->com.from_state;
4518 while (state != 0) {
4520 case RES_MTT_ALLOCATED:
4521 __mlx4_free_mtt_range(dev, base,
4523 spin_lock_irq(mlx4_tlock(dev));
4524 rb_erase(&mtt->com.node,
4525 &tracker->res_tree[RES_MTT]);
4526 list_del(&mtt->com.list);
4527 spin_unlock_irq(mlx4_tlock(dev));
4528 mlx4_release_resource(dev, slave, RES_MTT,
4529 1 << mtt->order, 0);
4539 spin_lock_irq(mlx4_tlock(dev));
4541 spin_unlock_irq(mlx4_tlock(dev));
4544 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
4546 struct mlx4_priv *priv = mlx4_priv(dev);
4547 struct mlx4_resource_tracker *tracker =
4548 &priv->mfunc.master.res_tracker;
4549 struct list_head *fs_rule_list =
4550 &tracker->slave_list[slave].res_list[RES_FS_RULE];
4551 struct res_fs_rule *fs_rule;
4552 struct res_fs_rule *tmp;
4557 err = move_all_busy(dev, slave, RES_FS_RULE);
4559 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4562 spin_lock_irq(mlx4_tlock(dev));
4563 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
4564 spin_unlock_irq(mlx4_tlock(dev));
4565 if (fs_rule->com.owner == slave) {
4566 base = fs_rule->com.res_id;
4567 state = fs_rule->com.from_state;
4568 while (state != 0) {
4570 case RES_FS_RULE_ALLOCATED:
4572 err = mlx4_cmd(dev, base, 0, 0,
4573 MLX4_QP_FLOW_STEERING_DETACH,
4574 MLX4_CMD_TIME_CLASS_A,
4577 spin_lock_irq(mlx4_tlock(dev));
4578 rb_erase(&fs_rule->com.node,
4579 &tracker->res_tree[RES_FS_RULE]);
4580 list_del(&fs_rule->com.list);
4581 spin_unlock_irq(mlx4_tlock(dev));
4591 spin_lock_irq(mlx4_tlock(dev));
4593 spin_unlock_irq(mlx4_tlock(dev));
4596 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4598 struct mlx4_priv *priv = mlx4_priv(dev);
4599 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4600 struct list_head *eq_list =
4601 &tracker->slave_list[slave].res_list[RES_EQ];
4608 struct mlx4_cmd_mailbox *mailbox;
4610 err = move_all_busy(dev, slave, RES_EQ);
4612 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
4615 spin_lock_irq(mlx4_tlock(dev));
4616 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
4617 spin_unlock_irq(mlx4_tlock(dev));
4618 if (eq->com.owner == slave) {
4619 eqn = eq->com.res_id;
4620 state = eq->com.from_state;
4621 while (state != 0) {
4623 case RES_EQ_RESERVED:
4624 spin_lock_irq(mlx4_tlock(dev));
4625 rb_erase(&eq->com.node,
4626 &tracker->res_tree[RES_EQ]);
4627 list_del(&eq->com.list);
4628 spin_unlock_irq(mlx4_tlock(dev));
4634 mailbox = mlx4_alloc_cmd_mailbox(dev);
4635 if (IS_ERR(mailbox)) {
4639 err = mlx4_cmd_box(dev, slave, 0,
4642 MLX4_CMD_TIME_CLASS_A,
4645 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
4647 mlx4_free_cmd_mailbox(dev, mailbox);
4648 atomic_dec(&eq->mtt->ref_count);
4649 state = RES_EQ_RESERVED;
4657 spin_lock_irq(mlx4_tlock(dev));
4659 spin_unlock_irq(mlx4_tlock(dev));
4662 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4664 struct mlx4_priv *priv = mlx4_priv(dev);
4665 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4666 struct list_head *counter_list =
4667 &tracker->slave_list[slave].res_list[RES_COUNTER];
4668 struct res_counter *counter;
4669 struct res_counter *tmp;
4673 err = move_all_busy(dev, slave, RES_COUNTER);
4675 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
4678 spin_lock_irq(mlx4_tlock(dev));
4679 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4680 if (counter->com.owner == slave) {
4681 index = counter->com.res_id;
4682 rb_erase(&counter->com.node,
4683 &tracker->res_tree[RES_COUNTER]);
4684 list_del(&counter->com.list);
4686 __mlx4_counter_free(dev, index);
4687 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
4690 spin_unlock_irq(mlx4_tlock(dev));
4693 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4695 struct mlx4_priv *priv = mlx4_priv(dev);
4696 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4697 struct list_head *xrcdn_list =
4698 &tracker->slave_list[slave].res_list[RES_XRCD];
4699 struct res_xrcdn *xrcd;
4700 struct res_xrcdn *tmp;
4704 err = move_all_busy(dev, slave, RES_XRCD);
4706 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
4709 spin_lock_irq(mlx4_tlock(dev));
4710 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
4711 if (xrcd->com.owner == slave) {
4712 xrcdn = xrcd->com.res_id;
4713 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
4714 list_del(&xrcd->com.list);
4716 __mlx4_xrcd_free(dev, xrcdn);
4719 spin_unlock_irq(mlx4_tlock(dev));
4722 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4724 struct mlx4_priv *priv = mlx4_priv(dev);
4725 mlx4_reset_roce_gids(dev, slave);
4726 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4727 rem_slave_vlans(dev, slave);
4728 rem_slave_macs(dev, slave);
4729 rem_slave_fs_rule(dev, slave);
4730 rem_slave_qps(dev, slave);
4731 rem_slave_srqs(dev, slave);
4732 rem_slave_cqs(dev, slave);
4733 rem_slave_mrs(dev, slave);
4734 rem_slave_eqs(dev, slave);
4735 rem_slave_mtts(dev, slave);
4736 rem_slave_counters(dev, slave);
4737 rem_slave_xrcdns(dev, slave);
4738 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4741 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4743 struct mlx4_vf_immed_vlan_work *work =
4744 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
4745 struct mlx4_cmd_mailbox *mailbox;
4746 struct mlx4_update_qp_context *upd_context;
4747 struct mlx4_dev *dev = &work->priv->dev;
4748 struct mlx4_resource_tracker *tracker =
4749 &work->priv->mfunc.master.res_tracker;
4750 struct list_head *qp_list =
4751 &tracker->slave_list[work->slave].res_list[RES_QP];
4754 u64 qp_path_mask_vlan_ctrl =
4755 ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
4756 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
4757 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
4758 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
4759 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
4760 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
4762 u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
4763 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
4764 (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
4765 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
4766 (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
4767 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
4768 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
4771 int port, errors = 0;
4774 if (mlx4_is_slave(dev)) {
4775 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
4780 mailbox = mlx4_alloc_cmd_mailbox(dev);
4781 if (IS_ERR(mailbox))
4783 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
4784 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4785 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
4786 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
4787 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4788 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
4789 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4790 else if (!work->vlan_id)
4791 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4792 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4794 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4795 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4796 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4798 upd_context = mailbox->buf;
4799 upd_context->qp_mask = cpu_to_be64(MLX4_UPD_QP_MASK_VSD);
4801 spin_lock_irq(mlx4_tlock(dev));
4802 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4803 spin_unlock_irq(mlx4_tlock(dev));
4804 if (qp->com.owner == work->slave) {
4805 if (qp->com.from_state != RES_QP_HW ||
4806 !qp->sched_queue || /* no INIT2RTR trans yet */
4807 mlx4_is_qp_reserved(dev, qp->local_qpn) ||
4808 qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
4809 spin_lock_irq(mlx4_tlock(dev));
4812 port = (qp->sched_queue >> 6 & 1) + 1;
4813 if (port != work->port) {
4814 spin_lock_irq(mlx4_tlock(dev));
4817 if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
4818 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
4820 upd_context->primary_addr_path_mask =
4821 cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
4822 if (work->vlan_id == MLX4_VGT) {
4823 upd_context->qp_context.param3 = qp->param3;
4824 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
4825 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
4826 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
4827 upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
4828 upd_context->qp_context.pri_path.feup = qp->feup;
4829 upd_context->qp_context.pri_path.sched_queue =
4832 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
4833 upd_context->qp_context.pri_path.vlan_control = vlan_control;
4834 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4835 upd_context->qp_context.pri_path.fvl_rx =
4836 qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
4837 upd_context->qp_context.pri_path.fl =
4838 qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
4839 upd_context->qp_context.pri_path.feup =
4840 qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
4841 upd_context->qp_context.pri_path.sched_queue =
4842 qp->sched_queue & 0xC7;
4843 upd_context->qp_context.pri_path.sched_queue |=
4844 ((work->qos & 0x7) << 3);
4847 err = mlx4_cmd(dev, mailbox->dma,
4848 qp->local_qpn & 0xffffff,
4849 0, MLX4_CMD_UPDATE_QP,
4850 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4852 mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
4853 work->slave, port, qp->local_qpn, err);
4857 spin_lock_irq(mlx4_tlock(dev));
4859 spin_unlock_irq(mlx4_tlock(dev));
4860 mlx4_free_cmd_mailbox(dev, mailbox);
4863 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
4864 errors, work->slave, work->port);
4866 /* unregister previous vlan_id if needed and we had no errors
4867 * while updating the QPs
4869 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
4870 NO_INDX != work->orig_vlan_ix)
4871 __mlx4_unregister_vlan(&work->priv->dev, work->port,
4872 work->orig_vlan_id);