2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/etherdevice.h>
35 #include <linux/tcp.h>
36 #include <linux/if_vlan.h>
37 #include <linux/delay.h>
38 #include <linux/slab.h>
39 #include <linux/hash.h>
41 #include <net/busy_poll.h>
42 #include <net/vxlan.h>
44 #include <linux/mlx4/driver.h>
45 #include <linux/mlx4/device.h>
46 #include <linux/mlx4/cmd.h>
47 #include <linux/mlx4/cq.h>
52 int mlx4_en_setup_tc(struct net_device *dev, u8 up)
54 struct mlx4_en_priv *priv = netdev_priv(dev);
56 unsigned int offset = 0;
58 if (up && up != MLX4_EN_NUM_UP)
61 netdev_set_num_tc(dev, up);
63 /* Partition Tx queues evenly amongst UP's */
64 for (i = 0; i < up; i++) {
65 netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset);
66 offset += priv->num_tx_rings_p_up;
72 #ifdef CONFIG_NET_RX_BUSY_POLL
73 /* must be called with local_bh_disable()d */
74 static int mlx4_en_low_latency_recv(struct napi_struct *napi)
76 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
77 struct net_device *dev = cq->dev;
78 struct mlx4_en_priv *priv = netdev_priv(dev);
79 struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
83 return LL_FLUSH_FAILED;
85 if (!mlx4_en_cq_lock_poll(cq))
88 done = mlx4_en_process_rx_cq(dev, cq, 4);
90 rx_ring->cleaned += done;
94 mlx4_en_cq_unlock_poll(cq);
98 #endif /* CONFIG_NET_RX_BUSY_POLL */
100 #ifdef CONFIG_RFS_ACCEL
102 struct mlx4_en_filter {
103 struct list_head next;
104 struct work_struct work;
113 struct mlx4_en_priv *priv;
114 u32 flow_id; /* RFS infrastructure id */
115 int id; /* mlx4_en driver id */
116 u64 reg_id; /* Flow steering API id */
117 u8 activated; /* Used to prevent expiry before filter
120 struct hlist_node filter_chain;
123 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
125 static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
129 return MLX4_NET_TRANS_RULE_ID_UDP;
131 return MLX4_NET_TRANS_RULE_ID_TCP;
133 return MLX4_NET_TRANS_RULE_NUM;
137 static void mlx4_en_filter_work(struct work_struct *work)
139 struct mlx4_en_filter *filter = container_of(work,
140 struct mlx4_en_filter,
142 struct mlx4_en_priv *priv = filter->priv;
143 struct mlx4_spec_list spec_tcp_udp = {
144 .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto),
147 .dst_port = filter->dst_port,
148 .dst_port_msk = (__force __be16)-1,
149 .src_port = filter->src_port,
150 .src_port_msk = (__force __be16)-1,
154 struct mlx4_spec_list spec_ip = {
155 .id = MLX4_NET_TRANS_RULE_ID_IPV4,
158 .dst_ip = filter->dst_ip,
159 .dst_ip_msk = (__force __be32)-1,
160 .src_ip = filter->src_ip,
161 .src_ip_msk = (__force __be32)-1,
165 struct mlx4_spec_list spec_eth = {
166 .id = MLX4_NET_TRANS_RULE_ID_ETH,
168 struct mlx4_net_trans_rule rule = {
169 .list = LIST_HEAD_INIT(rule.list),
170 .queue_mode = MLX4_NET_TRANS_Q_LIFO,
173 .promisc_mode = MLX4_FS_REGULAR,
175 .priority = MLX4_DOMAIN_RFS,
178 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
180 if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) {
181 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
185 list_add_tail(&spec_eth.list, &rule.list);
186 list_add_tail(&spec_ip.list, &rule.list);
187 list_add_tail(&spec_tcp_udp.list, &rule.list);
189 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
190 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
191 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
193 filter->activated = 0;
195 if (filter->reg_id) {
196 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
197 if (rc && rc != -ENOENT)
198 en_err(priv, "Error detaching flow. rc = %d\n", rc);
201 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id);
203 en_err(priv, "Error attaching flow. err = %d\n", rc);
206 mlx4_en_filter_rfs_expire(priv);
208 filter->activated = 1;
211 static inline struct hlist_head *
212 filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
213 __be16 src_port, __be16 dst_port)
218 l = (__force unsigned long)src_port |
219 ((__force unsigned long)dst_port << 2);
220 l ^= (__force unsigned long)(src_ip ^ dst_ip);
222 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT);
224 return &priv->filter_hash[bucket_idx];
227 static struct mlx4_en_filter *
228 mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
229 __be32 dst_ip, u8 ip_proto, __be16 src_port,
230 __be16 dst_port, u32 flow_id)
232 struct mlx4_en_filter *filter = NULL;
234 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC);
239 filter->rxq_index = rxq_index;
240 INIT_WORK(&filter->work, mlx4_en_filter_work);
242 filter->src_ip = src_ip;
243 filter->dst_ip = dst_ip;
244 filter->ip_proto = ip_proto;
245 filter->src_port = src_port;
246 filter->dst_port = dst_port;
248 filter->flow_id = flow_id;
250 filter->id = priv->last_filter_id++ % RPS_NO_FILTER;
252 list_add_tail(&filter->next, &priv->filters);
253 hlist_add_head(&filter->filter_chain,
254 filter_hash_bucket(priv, src_ip, dst_ip, src_port,
260 static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
262 struct mlx4_en_priv *priv = filter->priv;
265 list_del(&filter->next);
267 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
268 if (rc && rc != -ENOENT)
269 en_err(priv, "Error detaching flow. rc = %d\n", rc);
274 static inline struct mlx4_en_filter *
275 mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
276 u8 ip_proto, __be16 src_port, __be16 dst_port)
278 struct mlx4_en_filter *filter;
279 struct mlx4_en_filter *ret = NULL;
281 hlist_for_each_entry(filter,
282 filter_hash_bucket(priv, src_ip, dst_ip,
285 if (filter->src_ip == src_ip &&
286 filter->dst_ip == dst_ip &&
287 filter->ip_proto == ip_proto &&
288 filter->src_port == src_port &&
289 filter->dst_port == dst_port) {
299 mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
300 u16 rxq_index, u32 flow_id)
302 struct mlx4_en_priv *priv = netdev_priv(net_dev);
303 struct mlx4_en_filter *filter;
304 const struct iphdr *ip;
311 int nhoff = skb_network_offset(skb);
314 if (skb->protocol != htons(ETH_P_IP))
315 return -EPROTONOSUPPORT;
317 ip = (const struct iphdr *)(skb->data + nhoff);
318 if (ip_is_fragment(ip))
319 return -EPROTONOSUPPORT;
321 if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
322 return -EPROTONOSUPPORT;
323 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
325 ip_proto = ip->protocol;
331 spin_lock_bh(&priv->filters_lock);
332 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto,
335 if (filter->rxq_index == rxq_index)
338 filter->rxq_index = rxq_index;
340 filter = mlx4_en_filter_alloc(priv, rxq_index,
341 src_ip, dst_ip, ip_proto,
342 src_port, dst_port, flow_id);
349 queue_work(priv->mdev->workqueue, &filter->work);
354 spin_unlock_bh(&priv->filters_lock);
359 void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv)
361 struct mlx4_en_filter *filter, *tmp;
364 spin_lock_bh(&priv->filters_lock);
365 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
366 list_move(&filter->next, &del_list);
367 hlist_del(&filter->filter_chain);
369 spin_unlock_bh(&priv->filters_lock);
371 list_for_each_entry_safe(filter, tmp, &del_list, next) {
372 cancel_work_sync(&filter->work);
373 mlx4_en_filter_free(filter);
377 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
379 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL;
383 spin_lock_bh(&priv->filters_lock);
384 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
385 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA)
388 if (filter->activated &&
389 !work_pending(&filter->work) &&
390 rps_may_expire_flow(priv->dev,
391 filter->rxq_index, filter->flow_id,
393 list_move(&filter->next, &del_list);
394 hlist_del(&filter->filter_chain);
396 last_filter = filter;
401 if (last_filter && (&last_filter->next != priv->filters.next))
402 list_move(&priv->filters, &last_filter->next);
404 spin_unlock_bh(&priv->filters_lock);
406 list_for_each_entry_safe(filter, tmp, &del_list, next)
407 mlx4_en_filter_free(filter);
411 static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
412 __be16 proto, u16 vid)
414 struct mlx4_en_priv *priv = netdev_priv(dev);
415 struct mlx4_en_dev *mdev = priv->mdev;
419 en_dbg(HW, priv, "adding VLAN:%d\n", vid);
421 set_bit(vid, priv->active_vlans);
423 /* Add VID to port VLAN filter */
424 mutex_lock(&mdev->state_lock);
425 if (mdev->device_up && priv->port_up) {
426 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
428 en_err(priv, "Failed configuring VLAN filter\n");
430 if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
431 en_dbg(HW, priv, "failed adding vlan %d\n", vid);
432 mutex_unlock(&mdev->state_lock);
437 static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
438 __be16 proto, u16 vid)
440 struct mlx4_en_priv *priv = netdev_priv(dev);
441 struct mlx4_en_dev *mdev = priv->mdev;
444 en_dbg(HW, priv, "Killing VID:%d\n", vid);
446 clear_bit(vid, priv->active_vlans);
448 /* Remove VID from port VLAN filter */
449 mutex_lock(&mdev->state_lock);
450 mlx4_unregister_vlan(mdev->dev, priv->port, vid);
452 if (mdev->device_up && priv->port_up) {
453 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
455 en_err(priv, "Failed configuring VLAN filter\n");
457 mutex_unlock(&mdev->state_lock);
462 static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
465 for (i = ETH_ALEN - 1; i >= 0; --i) {
466 dst_mac[i] = src_mac & 0xff;
469 memset(&dst_mac[ETH_ALEN], 0, 2);
473 static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr,
474 int qpn, u64 *reg_id)
478 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
479 priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
480 return 0; /* do nothing */
482 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
483 MLX4_DOMAIN_NIC, reg_id);
485 en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
488 en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, *reg_id);
493 static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
494 unsigned char *mac, int *qpn, u64 *reg_id)
496 struct mlx4_en_dev *mdev = priv->mdev;
497 struct mlx4_dev *dev = mdev->dev;
500 switch (dev->caps.steering_mode) {
501 case MLX4_STEERING_MODE_B0: {
506 memcpy(&gid[10], mac, ETH_ALEN);
509 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
512 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
513 struct mlx4_spec_list spec_eth = { {NULL} };
514 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
516 struct mlx4_net_trans_rule rule = {
517 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
520 .promisc_mode = MLX4_FS_REGULAR,
521 .priority = MLX4_DOMAIN_NIC,
524 rule.port = priv->port;
526 INIT_LIST_HEAD(&rule.list);
528 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
529 memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN);
530 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
531 list_add_tail(&spec_eth.list, &rule.list);
533 err = mlx4_flow_attach(dev, &rule, reg_id);
540 en_warn(priv, "Failed Attaching Unicast\n");
545 static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
546 unsigned char *mac, int qpn, u64 reg_id)
548 struct mlx4_en_dev *mdev = priv->mdev;
549 struct mlx4_dev *dev = mdev->dev;
551 switch (dev->caps.steering_mode) {
552 case MLX4_STEERING_MODE_B0: {
557 memcpy(&gid[10], mac, ETH_ALEN);
560 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
563 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
564 mlx4_flow_detach(dev, reg_id);
568 en_err(priv, "Invalid steering mode.\n");
572 static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
574 struct mlx4_en_dev *mdev = priv->mdev;
575 struct mlx4_dev *dev = mdev->dev;
576 struct mlx4_mac_entry *entry;
580 int *qpn = &priv->base_qpn;
581 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
583 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
584 priv->dev->dev_addr);
585 index = mlx4_register_mac(dev, priv->port, mac);
588 en_err(priv, "Failed adding MAC: %pM\n",
589 priv->dev->dev_addr);
593 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
594 int base_qpn = mlx4_get_base_qpn(dev, priv->port);
595 *qpn = base_qpn + index;
599 err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP);
600 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
602 en_err(priv, "Failed to reserve qp for mac registration\n");
606 err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, ®_id);
610 err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
611 &priv->tunnel_reg_id);
615 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
620 memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
621 memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac));
622 entry->reg_id = reg_id;
624 hlist_add_head_rcu(&entry->hlist,
625 &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
630 if (priv->tunnel_reg_id)
631 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
633 mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
636 mlx4_qp_release_range(dev, *qpn, 1);
639 mlx4_unregister_mac(dev, priv->port, mac);
643 static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
645 struct mlx4_en_dev *mdev = priv->mdev;
646 struct mlx4_dev *dev = mdev->dev;
647 int qpn = priv->base_qpn;
650 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
651 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
652 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
653 priv->dev->dev_addr);
654 mlx4_unregister_mac(dev, priv->port, mac);
656 struct mlx4_mac_entry *entry;
657 struct hlist_node *tmp;
658 struct hlist_head *bucket;
661 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
662 bucket = &priv->mac_hash[i];
663 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
664 mac = mlx4_mac_to_u64(entry->mac);
665 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
667 mlx4_en_uc_steer_release(priv, entry->mac,
670 mlx4_unregister_mac(dev, priv->port, mac);
671 hlist_del_rcu(&entry->hlist);
672 kfree_rcu(entry, rcu);
676 if (priv->tunnel_reg_id) {
677 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
678 priv->tunnel_reg_id = 0;
681 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
683 mlx4_qp_release_range(dev, qpn, 1);
684 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
688 static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
689 unsigned char *new_mac, unsigned char *prev_mac)
691 struct mlx4_en_dev *mdev = priv->mdev;
692 struct mlx4_dev *dev = mdev->dev;
694 u64 new_mac_u64 = mlx4_mac_to_u64(new_mac);
696 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
697 struct hlist_head *bucket;
698 unsigned int mac_hash;
699 struct mlx4_mac_entry *entry;
700 struct hlist_node *tmp;
701 u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac);
703 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
704 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
705 if (ether_addr_equal_64bits(entry->mac, prev_mac)) {
706 mlx4_en_uc_steer_release(priv, entry->mac,
708 mlx4_unregister_mac(dev, priv->port,
710 hlist_del_rcu(&entry->hlist);
712 memcpy(entry->mac, new_mac, ETH_ALEN);
714 mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX];
715 hlist_add_head_rcu(&entry->hlist,
716 &priv->mac_hash[mac_hash]);
717 mlx4_register_mac(dev, priv->port, new_mac_u64);
718 err = mlx4_en_uc_steer_add(priv, new_mac,
723 if (priv->tunnel_reg_id) {
724 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
725 priv->tunnel_reg_id = 0;
727 err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn,
728 &priv->tunnel_reg_id);
735 return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
738 static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv,
739 unsigned char new_mac[ETH_ALEN + 2])
744 /* Remove old MAC and insert the new one */
745 err = mlx4_en_replace_mac(priv, priv->base_qpn,
746 new_mac, priv->current_mac);
748 en_err(priv, "Failed changing HW MAC address\n");
750 en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
753 memcpy(priv->current_mac, new_mac, sizeof(priv->current_mac));
758 static int mlx4_en_set_mac(struct net_device *dev, void *addr)
760 struct mlx4_en_priv *priv = netdev_priv(dev);
761 struct mlx4_en_dev *mdev = priv->mdev;
762 struct sockaddr *saddr = addr;
763 unsigned char new_mac[ETH_ALEN + 2];
766 if (!is_valid_ether_addr(saddr->sa_data))
767 return -EADDRNOTAVAIL;
769 mutex_lock(&mdev->state_lock);
770 memcpy(new_mac, saddr->sa_data, ETH_ALEN);
771 err = mlx4_en_do_set_mac(priv, new_mac);
773 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
774 mutex_unlock(&mdev->state_lock);
779 static void mlx4_en_clear_list(struct net_device *dev)
781 struct mlx4_en_priv *priv = netdev_priv(dev);
782 struct mlx4_en_mc_list *tmp, *mc_to_del;
784 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
785 list_del(&mc_to_del->list);
790 static void mlx4_en_cache_mclist(struct net_device *dev)
792 struct mlx4_en_priv *priv = netdev_priv(dev);
793 struct netdev_hw_addr *ha;
794 struct mlx4_en_mc_list *tmp;
796 mlx4_en_clear_list(dev);
797 netdev_for_each_mc_addr(ha, dev) {
798 tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
800 mlx4_en_clear_list(dev);
803 memcpy(tmp->addr, ha->addr, ETH_ALEN);
804 list_add_tail(&tmp->list, &priv->mc_list);
808 static void update_mclist_flags(struct mlx4_en_priv *priv,
809 struct list_head *dst,
810 struct list_head *src)
812 struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc;
815 /* Find all the entries that should be removed from dst,
816 * These are the entries that are not found in src
818 list_for_each_entry(dst_tmp, dst, list) {
820 list_for_each_entry(src_tmp, src, list) {
821 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
827 dst_tmp->action = MCLIST_REM;
830 /* Add entries that exist in src but not in dst
831 * mark them as need to add
833 list_for_each_entry(src_tmp, src, list) {
835 list_for_each_entry(dst_tmp, dst, list) {
836 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
837 dst_tmp->action = MCLIST_NONE;
843 new_mc = kmemdup(src_tmp,
844 sizeof(struct mlx4_en_mc_list),
849 new_mc->action = MCLIST_ADD;
850 list_add_tail(&new_mc->list, dst);
855 static void mlx4_en_set_rx_mode(struct net_device *dev)
857 struct mlx4_en_priv *priv = netdev_priv(dev);
862 queue_work(priv->mdev->workqueue, &priv->rx_mode_task);
865 static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
866 struct mlx4_en_dev *mdev)
870 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
871 if (netif_msg_rx_status(priv))
872 en_warn(priv, "Entering promiscuous mode\n");
873 priv->flags |= MLX4_EN_FLAG_PROMISC;
875 /* Enable promiscouos mode */
876 switch (mdev->dev->caps.steering_mode) {
877 case MLX4_STEERING_MODE_DEVICE_MANAGED:
878 err = mlx4_flow_steer_promisc_add(mdev->dev,
881 MLX4_FS_ALL_DEFAULT);
883 en_err(priv, "Failed enabling promiscuous mode\n");
884 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
887 case MLX4_STEERING_MODE_B0:
888 err = mlx4_unicast_promisc_add(mdev->dev,
892 en_err(priv, "Failed enabling unicast promiscuous mode\n");
894 /* Add the default qp number as multicast
897 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
898 err = mlx4_multicast_promisc_add(mdev->dev,
902 en_err(priv, "Failed enabling multicast promiscuous mode\n");
903 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
907 case MLX4_STEERING_MODE_A0:
908 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
913 en_err(priv, "Failed enabling promiscuous mode\n");
917 /* Disable port multicast filter (unconditionally) */
918 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
919 0, MLX4_MCAST_DISABLE);
921 en_err(priv, "Failed disabling multicast filter\n");
925 static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
926 struct mlx4_en_dev *mdev)
930 if (netif_msg_rx_status(priv))
931 en_warn(priv, "Leaving promiscuous mode\n");
932 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
934 /* Disable promiscouos mode */
935 switch (mdev->dev->caps.steering_mode) {
936 case MLX4_STEERING_MODE_DEVICE_MANAGED:
937 err = mlx4_flow_steer_promisc_remove(mdev->dev,
939 MLX4_FS_ALL_DEFAULT);
941 en_err(priv, "Failed disabling promiscuous mode\n");
942 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
945 case MLX4_STEERING_MODE_B0:
946 err = mlx4_unicast_promisc_remove(mdev->dev,
950 en_err(priv, "Failed disabling unicast promiscuous mode\n");
951 /* Disable Multicast promisc */
952 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
953 err = mlx4_multicast_promisc_remove(mdev->dev,
957 en_err(priv, "Failed disabling multicast promiscuous mode\n");
958 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
962 case MLX4_STEERING_MODE_A0:
963 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
967 en_err(priv, "Failed disabling promiscuous mode\n");
972 static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
973 struct net_device *dev,
974 struct mlx4_en_dev *mdev)
976 struct mlx4_en_mc_list *mclist, *tmp;
978 u8 mc_list[16] = {0};
981 /* Enable/disable the multicast filter according to IFF_ALLMULTI */
982 if (dev->flags & IFF_ALLMULTI) {
983 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
984 0, MLX4_MCAST_DISABLE);
986 en_err(priv, "Failed disabling multicast filter\n");
988 /* Add the default qp number as multicast promisc */
989 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
990 switch (mdev->dev->caps.steering_mode) {
991 case MLX4_STEERING_MODE_DEVICE_MANAGED:
992 err = mlx4_flow_steer_promisc_add(mdev->dev,
998 case MLX4_STEERING_MODE_B0:
999 err = mlx4_multicast_promisc_add(mdev->dev,
1004 case MLX4_STEERING_MODE_A0:
1008 en_err(priv, "Failed entering multicast promisc mode\n");
1009 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
1012 /* Disable Multicast promisc */
1013 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
1014 switch (mdev->dev->caps.steering_mode) {
1015 case MLX4_STEERING_MODE_DEVICE_MANAGED:
1016 err = mlx4_flow_steer_promisc_remove(mdev->dev,
1018 MLX4_FS_MC_DEFAULT);
1021 case MLX4_STEERING_MODE_B0:
1022 err = mlx4_multicast_promisc_remove(mdev->dev,
1027 case MLX4_STEERING_MODE_A0:
1031 en_err(priv, "Failed disabling multicast promiscuous mode\n");
1032 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
1035 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
1036 0, MLX4_MCAST_DISABLE);
1038 en_err(priv, "Failed disabling multicast filter\n");
1040 /* Flush mcast filter and init it with broadcast address */
1041 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
1042 1, MLX4_MCAST_CONFIG);
1044 /* Update multicast list - we cache all addresses so they won't
1045 * change while HW is updated holding the command semaphor */
1046 netif_addr_lock_bh(dev);
1047 mlx4_en_cache_mclist(dev);
1048 netif_addr_unlock_bh(dev);
1049 list_for_each_entry(mclist, &priv->mc_list, list) {
1050 mcast_addr = mlx4_mac_to_u64(mclist->addr);
1051 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
1052 mcast_addr, 0, MLX4_MCAST_CONFIG);
1054 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
1055 0, MLX4_MCAST_ENABLE);
1057 en_err(priv, "Failed enabling multicast filter\n");
1059 update_mclist_flags(priv, &priv->curr_list, &priv->mc_list);
1060 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1061 if (mclist->action == MCLIST_REM) {
1062 /* detach this address and delete from list */
1063 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1064 mc_list[5] = priv->port;
1065 err = mlx4_multicast_detach(mdev->dev,
1066 &priv->rss_map.indir_qp,
1071 en_err(priv, "Fail to detach multicast address\n");
1073 if (mclist->tunnel_reg_id) {
1074 err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id);
1076 en_err(priv, "Failed to detach multicast address\n");
1079 /* remove from list */
1080 list_del(&mclist->list);
1082 } else if (mclist->action == MCLIST_ADD) {
1083 /* attach the address */
1084 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1085 /* needed for B0 steering support */
1086 mc_list[5] = priv->port;
1087 err = mlx4_multicast_attach(mdev->dev,
1088 &priv->rss_map.indir_qp,
1094 en_err(priv, "Fail to attach multicast address\n");
1096 err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn,
1097 &mclist->tunnel_reg_id);
1099 en_err(priv, "Failed to attach multicast address\n");
1105 static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
1106 struct net_device *dev,
1107 struct mlx4_en_dev *mdev)
1109 struct netdev_hw_addr *ha;
1110 struct mlx4_mac_entry *entry;
1111 struct hlist_node *tmp;
1115 struct hlist_head *bucket;
1120 /* Note that we do not need to protect our mac_hash traversal with rcu,
1121 * since all modification code is protected by mdev->state_lock
1124 /* find what to remove */
1125 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
1126 bucket = &priv->mac_hash[i];
1127 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
1129 netdev_for_each_uc_addr(ha, dev) {
1130 if (ether_addr_equal_64bits(entry->mac,
1137 /* MAC address of the port is not in uc list */
1138 if (ether_addr_equal_64bits(entry->mac,
1143 mac = mlx4_mac_to_u64(entry->mac);
1144 mlx4_en_uc_steer_release(priv, entry->mac,
1147 mlx4_unregister_mac(mdev->dev, priv->port, mac);
1149 hlist_del_rcu(&entry->hlist);
1150 kfree_rcu(entry, rcu);
1151 en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n",
1152 entry->mac, priv->port);
1158 /* if we didn't remove anything, there is no use in trying to add
1159 * again once we are in a forced promisc mode state
1161 if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed)
1164 prev_flags = priv->flags;
1165 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
1167 /* find what to add */
1168 netdev_for_each_uc_addr(ha, dev) {
1170 bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
1171 hlist_for_each_entry(entry, bucket, hlist) {
1172 if (ether_addr_equal_64bits(entry->mac, ha->addr)) {
1179 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1181 en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n",
1182 ha->addr, priv->port);
1183 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1186 mac = mlx4_mac_to_u64(ha->addr);
1187 memcpy(entry->mac, ha->addr, ETH_ALEN);
1188 err = mlx4_register_mac(mdev->dev, priv->port, mac);
1190 en_err(priv, "Failed registering MAC %pM on port %d: %d\n",
1191 ha->addr, priv->port, err);
1193 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1196 err = mlx4_en_uc_steer_add(priv, ha->addr,
1200 en_err(priv, "Failed adding MAC %pM on port %d: %d\n",
1201 ha->addr, priv->port, err);
1202 mlx4_unregister_mac(mdev->dev, priv->port, mac);
1204 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1207 unsigned int mac_hash;
1208 en_dbg(DRV, priv, "Added MAC %pM on port:%d\n",
1209 ha->addr, priv->port);
1210 mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX];
1211 bucket = &priv->mac_hash[mac_hash];
1212 hlist_add_head_rcu(&entry->hlist, bucket);
1217 if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1218 en_warn(priv, "Forcing promiscuous mode on port:%d\n",
1220 } else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1221 en_warn(priv, "Stop forcing promiscuous mode on port:%d\n",
1226 static void mlx4_en_do_set_rx_mode(struct work_struct *work)
1228 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1230 struct mlx4_en_dev *mdev = priv->mdev;
1231 struct net_device *dev = priv->dev;
1233 mutex_lock(&mdev->state_lock);
1234 if (!mdev->device_up) {
1235 en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n");
1238 if (!priv->port_up) {
1239 en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n");
1243 if (!netif_carrier_ok(dev)) {
1244 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
1245 if (priv->port_state.link_state) {
1246 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
1247 netif_carrier_on(dev);
1248 en_dbg(LINK, priv, "Link Up\n");
1253 if (dev->priv_flags & IFF_UNICAST_FLT)
1254 mlx4_en_do_uc_filter(priv, dev, mdev);
1256 /* Promsicuous mode: disable all filters */
1257 if ((dev->flags & IFF_PROMISC) ||
1258 (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) {
1259 mlx4_en_set_promisc_mode(priv, mdev);
1263 /* Not in promiscuous mode */
1264 if (priv->flags & MLX4_EN_FLAG_PROMISC)
1265 mlx4_en_clear_promisc_mode(priv, mdev);
1267 mlx4_en_do_multicast(priv, dev, mdev);
1269 mutex_unlock(&mdev->state_lock);
1272 #ifdef CONFIG_NET_POLL_CONTROLLER
1273 static void mlx4_en_netpoll(struct net_device *dev)
1275 struct mlx4_en_priv *priv = netdev_priv(dev);
1276 struct mlx4_en_cq *cq;
1279 for (i = 0; i < priv->rx_ring_num; i++) {
1280 cq = priv->rx_cq[i];
1281 napi_schedule(&cq->napi);
1286 static void mlx4_en_tx_timeout(struct net_device *dev)
1288 struct mlx4_en_priv *priv = netdev_priv(dev);
1289 struct mlx4_en_dev *mdev = priv->mdev;
1292 if (netif_msg_timer(priv))
1293 en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
1295 for (i = 0; i < priv->tx_ring_num; i++) {
1296 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
1298 en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
1299 i, priv->tx_ring[i]->qpn, priv->tx_ring[i]->cqn,
1300 priv->tx_ring[i]->cons, priv->tx_ring[i]->prod);
1303 priv->port_stats.tx_timeout++;
1304 en_dbg(DRV, priv, "Scheduling watchdog\n");
1305 queue_work(mdev->workqueue, &priv->watchdog_task);
1309 static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
1311 struct mlx4_en_priv *priv = netdev_priv(dev);
1313 spin_lock_bh(&priv->stats_lock);
1314 memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats));
1315 spin_unlock_bh(&priv->stats_lock);
1317 return &priv->ret_stats;
1320 static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
1322 struct mlx4_en_cq *cq;
1325 /* If we haven't received a specific coalescing setting
1326 * (module param), we set the moderation parameters as follows:
1327 * - moder_cnt is set to the number of mtu sized packets to
1328 * satisfy our coalescing target.
1329 * - moder_time is set to a fixed value.
1331 priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
1332 priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
1333 priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
1334 priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
1335 en_dbg(INTR, priv, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
1336 priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
1338 /* Setup cq moderation params */
1339 for (i = 0; i < priv->rx_ring_num; i++) {
1340 cq = priv->rx_cq[i];
1341 cq->moder_cnt = priv->rx_frames;
1342 cq->moder_time = priv->rx_usecs;
1343 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
1344 priv->last_moder_packets[i] = 0;
1345 priv->last_moder_bytes[i] = 0;
1348 for (i = 0; i < priv->tx_ring_num; i++) {
1349 cq = priv->tx_cq[i];
1350 cq->moder_cnt = priv->tx_frames;
1351 cq->moder_time = priv->tx_usecs;
1354 /* Reset auto-moderation params */
1355 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
1356 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
1357 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
1358 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
1359 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
1360 priv->adaptive_rx_coal = 1;
1361 priv->last_moder_jiffies = 0;
1362 priv->last_moder_tx_packets = 0;
1365 static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
1367 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
1368 struct mlx4_en_cq *cq;
1369 unsigned long packets;
1371 unsigned long avg_pkt_size;
1372 unsigned long rx_packets;
1373 unsigned long rx_bytes;
1374 unsigned long rx_pkt_diff;
1378 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
1381 for (ring = 0; ring < priv->rx_ring_num; ring++) {
1382 spin_lock_bh(&priv->stats_lock);
1383 rx_packets = priv->rx_ring[ring]->packets;
1384 rx_bytes = priv->rx_ring[ring]->bytes;
1385 spin_unlock_bh(&priv->stats_lock);
1387 rx_pkt_diff = ((unsigned long) (rx_packets -
1388 priv->last_moder_packets[ring]));
1389 packets = rx_pkt_diff;
1390 rate = packets * HZ / period;
1391 avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
1392 priv->last_moder_bytes[ring])) / packets : 0;
1394 /* Apply auto-moderation only when packet rate
1395 * exceeds a rate that it matters */
1396 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) &&
1397 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
1398 if (rate < priv->pkt_rate_low)
1399 moder_time = priv->rx_usecs_low;
1400 else if (rate > priv->pkt_rate_high)
1401 moder_time = priv->rx_usecs_high;
1403 moder_time = (rate - priv->pkt_rate_low) *
1404 (priv->rx_usecs_high - priv->rx_usecs_low) /
1405 (priv->pkt_rate_high - priv->pkt_rate_low) +
1408 moder_time = priv->rx_usecs_low;
1411 if (moder_time != priv->last_moder_time[ring]) {
1412 priv->last_moder_time[ring] = moder_time;
1413 cq = priv->rx_cq[ring];
1414 cq->moder_time = moder_time;
1415 cq->moder_cnt = priv->rx_frames;
1416 err = mlx4_en_set_cq_moder(priv, cq);
1418 en_err(priv, "Failed modifying moderation for cq:%d\n",
1421 priv->last_moder_packets[ring] = rx_packets;
1422 priv->last_moder_bytes[ring] = rx_bytes;
1425 priv->last_moder_jiffies = jiffies;
1428 static void mlx4_en_do_get_stats(struct work_struct *work)
1430 struct delayed_work *delay = to_delayed_work(work);
1431 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1433 struct mlx4_en_dev *mdev = priv->mdev;
1436 mutex_lock(&mdev->state_lock);
1437 if (mdev->device_up) {
1438 if (priv->port_up) {
1439 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
1441 en_dbg(HW, priv, "Could not update stats\n");
1443 mlx4_en_auto_moderation(priv);
1446 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1448 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
1449 mlx4_en_do_set_mac(priv, priv->current_mac);
1450 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
1452 mutex_unlock(&mdev->state_lock);
1455 /* mlx4_en_service_task - Run service task for tasks that needed to be done
1458 static void mlx4_en_service_task(struct work_struct *work)
1460 struct delayed_work *delay = to_delayed_work(work);
1461 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1463 struct mlx4_en_dev *mdev = priv->mdev;
1465 mutex_lock(&mdev->state_lock);
1466 if (mdev->device_up) {
1467 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
1468 mlx4_en_ptp_overflow_check(mdev);
1470 queue_delayed_work(mdev->workqueue, &priv->service_task,
1471 SERVICE_TASK_DELAY);
1473 mutex_unlock(&mdev->state_lock);
1476 static void mlx4_en_linkstate(struct work_struct *work)
1478 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1480 struct mlx4_en_dev *mdev = priv->mdev;
1481 int linkstate = priv->link_state;
1483 mutex_lock(&mdev->state_lock);
1484 /* If observable port state changed set carrier state and
1485 * report to system log */
1486 if (priv->last_link_state != linkstate) {
1487 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
1488 en_info(priv, "Link Down\n");
1489 netif_carrier_off(priv->dev);
1491 en_info(priv, "Link Up\n");
1492 netif_carrier_on(priv->dev);
1495 priv->last_link_state = linkstate;
1496 mutex_unlock(&mdev->state_lock);
1499 static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1501 struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx];
1502 int numa_node = priv->mdev->dev->numa_node;
1505 if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL))
1508 ret = cpumask_set_cpu_local_first(ring_idx, numa_node,
1509 ring->affinity_mask);
1511 free_cpumask_var(ring->affinity_mask);
1516 static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1518 free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask);
1521 int mlx4_en_start_port(struct net_device *dev)
1523 struct mlx4_en_priv *priv = netdev_priv(dev);
1524 struct mlx4_en_dev *mdev = priv->mdev;
1525 struct mlx4_en_cq *cq;
1526 struct mlx4_en_tx_ring *tx_ring;
1532 u8 mc_list[16] = {0};
1534 if (priv->port_up) {
1535 en_dbg(DRV, priv, "start port called while port already up\n");
1539 INIT_LIST_HEAD(&priv->mc_list);
1540 INIT_LIST_HEAD(&priv->curr_list);
1541 INIT_LIST_HEAD(&priv->ethtool_list);
1542 memset(&priv->ethtool_rules[0], 0,
1543 sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES);
1545 /* Calculate Rx buf size */
1546 dev->mtu = min(dev->mtu, priv->max_mtu);
1547 mlx4_en_calc_rx_buf(dev);
1548 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
1550 /* Configure rx cq's and rings */
1551 err = mlx4_en_activate_rx_rings(priv);
1553 en_err(priv, "Failed to activate RX rings\n");
1556 for (i = 0; i < priv->rx_ring_num; i++) {
1557 cq = priv->rx_cq[i];
1559 mlx4_en_cq_init_lock(cq);
1561 err = mlx4_en_init_affinity_hint(priv, i);
1563 en_err(priv, "Failed preparing IRQ affinity hint\n");
1567 err = mlx4_en_activate_cq(priv, cq, i);
1569 en_err(priv, "Failed activating Rx CQ\n");
1570 mlx4_en_free_affinity_hint(priv, i);
1574 for (j = 0; j < cq->size; j++) {
1575 struct mlx4_cqe *cqe = NULL;
1577 cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) +
1579 cqe->owner_sr_opcode = MLX4_CQE_OWNER_MASK;
1582 err = mlx4_en_set_cq_moder(priv, cq);
1584 en_err(priv, "Failed setting cq moderation parameters\n");
1585 mlx4_en_deactivate_cq(priv, cq);
1586 mlx4_en_free_affinity_hint(priv, i);
1589 mlx4_en_arm_cq(priv, cq);
1590 priv->rx_ring[i]->cqn = cq->mcq.cqn;
1595 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
1596 err = mlx4_en_get_qp(priv);
1598 en_err(priv, "Failed getting eth qp\n");
1601 mdev->mac_removed[priv->port] = 0;
1603 err = mlx4_en_config_rss_steer(priv);
1605 en_err(priv, "Failed configuring rss steering\n");
1609 err = mlx4_en_create_drop_qp(priv);
1613 /* Configure tx cq's and rings */
1614 for (i = 0; i < priv->tx_ring_num; i++) {
1616 cq = priv->tx_cq[i];
1617 err = mlx4_en_activate_cq(priv, cq, i);
1619 en_err(priv, "Failed allocating Tx CQ\n");
1622 err = mlx4_en_set_cq_moder(priv, cq);
1624 en_err(priv, "Failed setting cq moderation parameters\n");
1625 mlx4_en_deactivate_cq(priv, cq);
1628 en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
1629 cq->buf->wqe_index = cpu_to_be16(0xffff);
1631 /* Configure ring */
1632 tx_ring = priv->tx_ring[i];
1633 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
1634 i / priv->num_tx_rings_p_up);
1636 en_err(priv, "Failed allocating Tx ring\n");
1637 mlx4_en_deactivate_cq(priv, cq);
1640 tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
1642 /* Arm CQ for TX completions */
1643 mlx4_en_arm_cq(priv, cq);
1645 /* Set initial ownership of all Tx TXBBs to SW (1) */
1646 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
1647 *((u32 *) (tx_ring->buf + j)) = 0xffffffff;
1651 /* Configure port */
1652 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
1653 priv->rx_skb_size + ETH_FCS_LEN,
1654 priv->prof->tx_pause,
1656 priv->prof->rx_pause,
1657 priv->prof->rx_ppp);
1659 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
1663 /* Set default qp number */
1664 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
1666 en_err(priv, "Failed setting default qp numbers\n");
1670 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
1671 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
1673 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
1680 en_dbg(HW, priv, "Initializing port\n");
1681 err = mlx4_INIT_PORT(mdev->dev, priv->port);
1683 en_err(priv, "Failed Initializing port\n");
1687 /* Attach rx QP to bradcast address */
1688 memset(&mc_list[10], 0xff, ETH_ALEN);
1689 mc_list[5] = priv->port; /* needed for B0 steering support */
1690 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
1691 priv->port, 0, MLX4_PROT_ETH,
1692 &priv->broadcast_id))
1693 mlx4_warn(mdev, "Failed Attaching Broadcast\n");
1695 /* Must redo promiscuous mode setup. */
1696 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
1698 /* Schedule multicast task to populate multicast list */
1699 queue_work(mdev->workqueue, &priv->rx_mode_task);
1701 #ifdef CONFIG_MLX4_EN_VXLAN
1702 if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
1703 vxlan_get_rx_port(dev);
1705 priv->port_up = true;
1706 netif_tx_start_all_queues(dev);
1707 netif_device_attach(dev);
1712 while (tx_index--) {
1713 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]);
1714 mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]);
1716 mlx4_en_destroy_drop_qp(priv);
1718 mlx4_en_release_rss_steer(priv);
1720 mlx4_en_put_qp(priv);
1722 while (rx_index--) {
1723 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
1724 mlx4_en_free_affinity_hint(priv, i);
1726 for (i = 0; i < priv->rx_ring_num; i++)
1727 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1729 return err; /* need to close devices */
1733 void mlx4_en_stop_port(struct net_device *dev, int detach)
1735 struct mlx4_en_priv *priv = netdev_priv(dev);
1736 struct mlx4_en_dev *mdev = priv->mdev;
1737 struct mlx4_en_mc_list *mclist, *tmp;
1738 struct ethtool_flow_id *flow, *tmp_flow;
1740 u8 mc_list[16] = {0};
1742 if (!priv->port_up) {
1743 en_dbg(DRV, priv, "stop port called while port already down\n");
1748 mlx4_CLOSE_PORT(mdev->dev, priv->port);
1750 /* Synchronize with tx routine */
1751 netif_tx_lock_bh(dev);
1753 netif_device_detach(dev);
1754 netif_tx_stop_all_queues(dev);
1755 netif_tx_unlock_bh(dev);
1757 netif_tx_disable(dev);
1759 /* Set port as not active */
1760 priv->port_up = false;
1762 /* Promsicuous mode */
1763 if (mdev->dev->caps.steering_mode ==
1764 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1765 priv->flags &= ~(MLX4_EN_FLAG_PROMISC |
1766 MLX4_EN_FLAG_MC_PROMISC);
1767 mlx4_flow_steer_promisc_remove(mdev->dev,
1769 MLX4_FS_ALL_DEFAULT);
1770 mlx4_flow_steer_promisc_remove(mdev->dev,
1772 MLX4_FS_MC_DEFAULT);
1773 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
1774 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
1776 /* Disable promiscouos mode */
1777 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
1780 /* Disable Multicast promisc */
1781 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
1782 mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
1784 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
1788 /* Detach All multicasts */
1789 memset(&mc_list[10], 0xff, ETH_ALEN);
1790 mc_list[5] = priv->port; /* needed for B0 steering support */
1791 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
1792 MLX4_PROT_ETH, priv->broadcast_id);
1793 list_for_each_entry(mclist, &priv->curr_list, list) {
1794 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1795 mc_list[5] = priv->port;
1796 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
1797 mc_list, MLX4_PROT_ETH, mclist->reg_id);
1798 if (mclist->tunnel_reg_id)
1799 mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id);
1801 mlx4_en_clear_list(dev);
1802 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1803 list_del(&mclist->list);
1807 /* Flush multicast filter */
1808 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
1810 /* Remove flow steering rules for the port*/
1811 if (mdev->dev->caps.steering_mode ==
1812 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1814 list_for_each_entry_safe(flow, tmp_flow,
1815 &priv->ethtool_list, list) {
1816 mlx4_flow_detach(mdev->dev, flow->id);
1817 list_del(&flow->list);
1821 mlx4_en_destroy_drop_qp(priv);
1824 for (i = 0; i < priv->tx_ring_num; i++) {
1825 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]);
1826 mlx4_en_deactivate_cq(priv, priv->tx_cq[i]);
1830 for (i = 0; i < priv->tx_ring_num; i++)
1831 mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
1834 mlx4_en_release_rss_steer(priv);
1836 /* Unregister Mac address for the port */
1837 mlx4_en_put_qp(priv);
1838 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN))
1839 mdev->mac_removed[priv->port] = 1;
1842 for (i = 0; i < priv->rx_ring_num; i++) {
1843 struct mlx4_en_cq *cq = priv->rx_cq[i];
1846 while (!mlx4_en_cq_lock_napi(cq)) {
1847 pr_info("CQ %d locked\n", i);
1852 napi_synchronize(&cq->napi);
1853 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1854 mlx4_en_deactivate_cq(priv, cq);
1856 mlx4_en_free_affinity_hint(priv, i);
1860 static void mlx4_en_restart(struct work_struct *work)
1862 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1864 struct mlx4_en_dev *mdev = priv->mdev;
1865 struct net_device *dev = priv->dev;
1867 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
1869 mutex_lock(&mdev->state_lock);
1870 if (priv->port_up) {
1871 mlx4_en_stop_port(dev, 1);
1872 if (mlx4_en_start_port(dev))
1873 en_err(priv, "Failed restarting port %d\n", priv->port);
1875 mutex_unlock(&mdev->state_lock);
1878 static void mlx4_en_clear_stats(struct net_device *dev)
1880 struct mlx4_en_priv *priv = netdev_priv(dev);
1881 struct mlx4_en_dev *mdev = priv->mdev;
1884 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
1885 en_dbg(HW, priv, "Failed dumping statistics\n");
1887 memset(&priv->stats, 0, sizeof(priv->stats));
1888 memset(&priv->pstats, 0, sizeof(priv->pstats));
1889 memset(&priv->pkstats, 0, sizeof(priv->pkstats));
1890 memset(&priv->port_stats, 0, sizeof(priv->port_stats));
1892 for (i = 0; i < priv->tx_ring_num; i++) {
1893 priv->tx_ring[i]->bytes = 0;
1894 priv->tx_ring[i]->packets = 0;
1895 priv->tx_ring[i]->tx_csum = 0;
1897 for (i = 0; i < priv->rx_ring_num; i++) {
1898 priv->rx_ring[i]->bytes = 0;
1899 priv->rx_ring[i]->packets = 0;
1900 priv->rx_ring[i]->csum_ok = 0;
1901 priv->rx_ring[i]->csum_none = 0;
1902 priv->rx_ring[i]->csum_complete = 0;
1906 static int mlx4_en_open(struct net_device *dev)
1908 struct mlx4_en_priv *priv = netdev_priv(dev);
1909 struct mlx4_en_dev *mdev = priv->mdev;
1912 mutex_lock(&mdev->state_lock);
1914 if (!mdev->device_up) {
1915 en_err(priv, "Cannot open - device down/disabled\n");
1920 /* Reset HW statistics and SW counters */
1921 mlx4_en_clear_stats(dev);
1923 err = mlx4_en_start_port(dev);
1925 en_err(priv, "Failed starting port:%d\n", priv->port);
1928 mutex_unlock(&mdev->state_lock);
1933 static int mlx4_en_close(struct net_device *dev)
1935 struct mlx4_en_priv *priv = netdev_priv(dev);
1936 struct mlx4_en_dev *mdev = priv->mdev;
1938 en_dbg(IFDOWN, priv, "Close port called\n");
1940 mutex_lock(&mdev->state_lock);
1942 mlx4_en_stop_port(dev, 0);
1943 netif_carrier_off(dev);
1945 mutex_unlock(&mdev->state_lock);
1949 void mlx4_en_free_resources(struct mlx4_en_priv *priv)
1953 #ifdef CONFIG_RFS_ACCEL
1954 free_irq_cpu_rmap(priv->dev->rx_cpu_rmap);
1955 priv->dev->rx_cpu_rmap = NULL;
1958 for (i = 0; i < priv->tx_ring_num; i++) {
1959 if (priv->tx_ring && priv->tx_ring[i])
1960 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
1961 if (priv->tx_cq && priv->tx_cq[i])
1962 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
1965 for (i = 0; i < priv->rx_ring_num; i++) {
1966 if (priv->rx_ring[i])
1967 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
1968 priv->prof->rx_ring_size, priv->stride);
1970 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
1973 if (priv->base_tx_qpn) {
1974 mlx4_qp_release_range(priv->mdev->dev, priv->base_tx_qpn, priv->tx_ring_num);
1975 priv->base_tx_qpn = 0;
1979 int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
1981 struct mlx4_en_port_profile *prof = priv->prof;
1985 /* Create tx Rings */
1986 for (i = 0; i < priv->tx_ring_num; i++) {
1987 node = cpu_to_node(i % num_online_cpus());
1988 if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
1989 prof->tx_ring_size, i, TX, node))
1992 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
1993 prof->tx_ring_size, TXBB_SIZE,
1998 /* Create rx Rings */
1999 for (i = 0; i < priv->rx_ring_num; i++) {
2000 node = cpu_to_node(i % num_online_cpus());
2001 if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
2002 prof->rx_ring_size, i, RX, node))
2005 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
2006 prof->rx_ring_size, priv->stride,
2011 #ifdef CONFIG_RFS_ACCEL
2012 if (priv->mdev->dev->caps.comp_pool) {
2013 priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool);
2014 if (!priv->dev->rx_cpu_rmap)
2022 en_err(priv, "Failed to allocate NIC resources\n");
2023 for (i = 0; i < priv->rx_ring_num; i++) {
2024 if (priv->rx_ring[i])
2025 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
2029 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
2031 for (i = 0; i < priv->tx_ring_num; i++) {
2032 if (priv->tx_ring[i])
2033 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
2035 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
2041 void mlx4_en_destroy_netdev(struct net_device *dev)
2043 struct mlx4_en_priv *priv = netdev_priv(dev);
2044 struct mlx4_en_dev *mdev = priv->mdev;
2046 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
2048 /* Unregister device - this will close the port if it was up */
2049 if (priv->registered)
2050 unregister_netdev(dev);
2052 if (priv->allocated)
2053 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
2055 cancel_delayed_work(&priv->stats_task);
2056 cancel_delayed_work(&priv->service_task);
2057 /* flush any pending task for this netdev */
2058 flush_workqueue(mdev->workqueue);
2060 /* Detach the netdev so tasks would not attempt to access it */
2061 mutex_lock(&mdev->state_lock);
2062 mdev->pndev[priv->port] = NULL;
2063 mdev->upper[priv->port] = NULL;
2064 mutex_unlock(&mdev->state_lock);
2066 mlx4_en_free_resources(priv);
2068 kfree(priv->tx_ring);
2074 static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
2076 struct mlx4_en_priv *priv = netdev_priv(dev);
2077 struct mlx4_en_dev *mdev = priv->mdev;
2080 en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
2083 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
2084 en_err(priv, "Bad MTU size:%d.\n", new_mtu);
2089 if (netif_running(dev)) {
2090 mutex_lock(&mdev->state_lock);
2091 if (!mdev->device_up) {
2092 /* NIC is probably restarting - let watchdog task reset
2094 en_dbg(DRV, priv, "Change MTU called with card down!?\n");
2096 mlx4_en_stop_port(dev, 1);
2097 err = mlx4_en_start_port(dev);
2099 en_err(priv, "Failed restarting port:%d\n",
2101 queue_work(mdev->workqueue, &priv->watchdog_task);
2104 mutex_unlock(&mdev->state_lock);
2109 static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
2111 struct mlx4_en_priv *priv = netdev_priv(dev);
2112 struct mlx4_en_dev *mdev = priv->mdev;
2113 struct hwtstamp_config config;
2115 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2118 /* reserved for future extensions */
2122 /* device doesn't support time stamping */
2123 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS))
2126 /* TX HW timestamp */
2127 switch (config.tx_type) {
2128 case HWTSTAMP_TX_OFF:
2129 case HWTSTAMP_TX_ON:
2135 /* RX HW timestamp */
2136 switch (config.rx_filter) {
2137 case HWTSTAMP_FILTER_NONE:
2139 case HWTSTAMP_FILTER_ALL:
2140 case HWTSTAMP_FILTER_SOME:
2141 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2142 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2143 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2144 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2145 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2146 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2147 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2148 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2149 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2150 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2151 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2152 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2153 config.rx_filter = HWTSTAMP_FILTER_ALL;
2159 if (mlx4_en_reset_config(dev, config, dev->features)) {
2160 config.tx_type = HWTSTAMP_TX_OFF;
2161 config.rx_filter = HWTSTAMP_FILTER_NONE;
2164 return copy_to_user(ifr->ifr_data, &config,
2165 sizeof(config)) ? -EFAULT : 0;
2168 static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
2170 struct mlx4_en_priv *priv = netdev_priv(dev);
2172 return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config,
2173 sizeof(priv->hwtstamp_config)) ? -EFAULT : 0;
2176 static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2180 return mlx4_en_hwtstamp_set(dev, ifr);
2182 return mlx4_en_hwtstamp_get(dev, ifr);
2188 static int mlx4_en_set_features(struct net_device *netdev,
2189 netdev_features_t features)
2191 struct mlx4_en_priv *priv = netdev_priv(netdev);
2194 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
2195 en_info(priv, "Turn %s RX vlan strip offload\n",
2196 (features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF");
2197 ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config,
2203 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX))
2204 en_info(priv, "Turn %s TX vlan strip offload\n",
2205 (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
2207 if (features & NETIF_F_LOOPBACK)
2208 priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
2211 cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK);
2213 mlx4_en_update_loopback_state(netdev, features);
2219 static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
2221 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2222 struct mlx4_en_dev *mdev = en_priv->mdev;
2223 u64 mac_u64 = mlx4_mac_to_u64(mac);
2225 if (!is_valid_ether_addr(mac))
2228 return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64);
2231 static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
2233 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2234 struct mlx4_en_dev *mdev = en_priv->mdev;
2236 return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos);
2239 static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
2241 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2242 struct mlx4_en_dev *mdev = en_priv->mdev;
2244 return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting);
2247 static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf)
2249 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2250 struct mlx4_en_dev *mdev = en_priv->mdev;
2252 return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf);
2255 static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state)
2257 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2258 struct mlx4_en_dev *mdev = en_priv->mdev;
2260 return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
2263 #define PORT_ID_BYTE_LEN 8
2264 static int mlx4_en_get_phys_port_id(struct net_device *dev,
2265 struct netdev_phys_item_id *ppid)
2267 struct mlx4_en_priv *priv = netdev_priv(dev);
2268 struct mlx4_dev *mdev = priv->mdev->dev;
2270 u64 phys_port_id = mdev->caps.phys_port_id[priv->port];
2275 ppid->id_len = sizeof(phys_port_id);
2276 for (i = PORT_ID_BYTE_LEN - 1; i >= 0; --i) {
2277 ppid->id[i] = phys_port_id & 0xff;
2283 #ifdef CONFIG_MLX4_EN_VXLAN
2284 static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
2287 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2290 ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port);
2294 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2295 VXLAN_STEER_BY_OUTER_MAC, 1);
2298 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
2303 priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2304 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
2305 priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
2306 priv->dev->features |= NETIF_F_GSO_UDP_TUNNEL;
2309 static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
2312 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2314 /* unset offloads */
2315 priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2316 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL);
2317 priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
2318 priv->dev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
2320 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2321 VXLAN_STEER_BY_OUTER_MAC, 0);
2323 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
2325 priv->vxlan_port = 0;
2328 static void mlx4_en_add_vxlan_port(struct net_device *dev,
2329 sa_family_t sa_family, __be16 port)
2331 struct mlx4_en_priv *priv = netdev_priv(dev);
2332 __be16 current_port;
2334 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
2337 if (sa_family == AF_INET6)
2340 current_port = priv->vxlan_port;
2341 if (current_port && current_port != port) {
2342 en_warn(priv, "vxlan port %d configured, can't add port %d\n",
2343 ntohs(current_port), ntohs(port));
2347 priv->vxlan_port = port;
2348 queue_work(priv->mdev->workqueue, &priv->vxlan_add_task);
2351 static void mlx4_en_del_vxlan_port(struct net_device *dev,
2352 sa_family_t sa_family, __be16 port)
2354 struct mlx4_en_priv *priv = netdev_priv(dev);
2355 __be16 current_port;
2357 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
2360 if (sa_family == AF_INET6)
2363 current_port = priv->vxlan_port;
2364 if (current_port != port) {
2365 en_dbg(DRV, priv, "vxlan port %d isn't configured, ignoring\n", ntohs(port));
2369 queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
2372 static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
2373 struct net_device *dev,
2374 netdev_features_t features)
2376 return vxlan_features_check(skb, features);
2380 static const struct net_device_ops mlx4_netdev_ops = {
2381 .ndo_open = mlx4_en_open,
2382 .ndo_stop = mlx4_en_close,
2383 .ndo_start_xmit = mlx4_en_xmit,
2384 .ndo_select_queue = mlx4_en_select_queue,
2385 .ndo_get_stats = mlx4_en_get_stats,
2386 .ndo_set_rx_mode = mlx4_en_set_rx_mode,
2387 .ndo_set_mac_address = mlx4_en_set_mac,
2388 .ndo_validate_addr = eth_validate_addr,
2389 .ndo_change_mtu = mlx4_en_change_mtu,
2390 .ndo_do_ioctl = mlx4_en_ioctl,
2391 .ndo_tx_timeout = mlx4_en_tx_timeout,
2392 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
2393 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
2394 #ifdef CONFIG_NET_POLL_CONTROLLER
2395 .ndo_poll_controller = mlx4_en_netpoll,
2397 .ndo_set_features = mlx4_en_set_features,
2398 .ndo_setup_tc = mlx4_en_setup_tc,
2399 #ifdef CONFIG_RFS_ACCEL
2400 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2402 #ifdef CONFIG_NET_RX_BUSY_POLL
2403 .ndo_busy_poll = mlx4_en_low_latency_recv,
2405 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
2406 #ifdef CONFIG_MLX4_EN_VXLAN
2407 .ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
2408 .ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
2409 .ndo_features_check = mlx4_en_features_check,
2413 static const struct net_device_ops mlx4_netdev_ops_master = {
2414 .ndo_open = mlx4_en_open,
2415 .ndo_stop = mlx4_en_close,
2416 .ndo_start_xmit = mlx4_en_xmit,
2417 .ndo_select_queue = mlx4_en_select_queue,
2418 .ndo_get_stats = mlx4_en_get_stats,
2419 .ndo_set_rx_mode = mlx4_en_set_rx_mode,
2420 .ndo_set_mac_address = mlx4_en_set_mac,
2421 .ndo_validate_addr = eth_validate_addr,
2422 .ndo_change_mtu = mlx4_en_change_mtu,
2423 .ndo_tx_timeout = mlx4_en_tx_timeout,
2424 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
2425 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
2426 .ndo_set_vf_mac = mlx4_en_set_vf_mac,
2427 .ndo_set_vf_vlan = mlx4_en_set_vf_vlan,
2428 .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk,
2429 .ndo_set_vf_link_state = mlx4_en_set_vf_link_state,
2430 .ndo_get_vf_config = mlx4_en_get_vf_config,
2431 #ifdef CONFIG_NET_POLL_CONTROLLER
2432 .ndo_poll_controller = mlx4_en_netpoll,
2434 .ndo_set_features = mlx4_en_set_features,
2435 .ndo_setup_tc = mlx4_en_setup_tc,
2436 #ifdef CONFIG_RFS_ACCEL
2437 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2439 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
2440 #ifdef CONFIG_MLX4_EN_VXLAN
2441 .ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
2442 .ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
2443 .ndo_features_check = mlx4_en_features_check,
2447 struct mlx4_en_bond {
2448 struct work_struct work;
2449 struct mlx4_en_priv *priv;
2451 struct mlx4_port_map port_map;
2454 static void mlx4_en_bond_work(struct work_struct *work)
2456 struct mlx4_en_bond *bond = container_of(work,
2457 struct mlx4_en_bond,
2460 struct mlx4_dev *dev = bond->priv->mdev->dev;
2462 if (bond->is_bonded) {
2463 if (!mlx4_is_bonded(dev)) {
2464 err = mlx4_bond(dev);
2466 en_err(bond->priv, "Fail to bond device\n");
2469 err = mlx4_port_map_set(dev, &bond->port_map);
2471 en_err(bond->priv, "Fail to set port map [%d][%d]: %d\n",
2472 bond->port_map.port1,
2473 bond->port_map.port2,
2476 } else if (mlx4_is_bonded(dev)) {
2477 err = mlx4_unbond(dev);
2479 en_err(bond->priv, "Fail to unbond device\n");
2481 dev_put(bond->priv->dev);
2485 static int mlx4_en_queue_bond_work(struct mlx4_en_priv *priv, int is_bonded,
2486 u8 v2p_p1, u8 v2p_p2)
2488 struct mlx4_en_bond *bond = NULL;
2490 bond = kzalloc(sizeof(*bond), GFP_ATOMIC);
2494 INIT_WORK(&bond->work, mlx4_en_bond_work);
2496 bond->is_bonded = is_bonded;
2497 bond->port_map.port1 = v2p_p1;
2498 bond->port_map.port2 = v2p_p2;
2499 dev_hold(priv->dev);
2500 queue_work(priv->mdev->workqueue, &bond->work);
2504 int mlx4_en_netdev_event(struct notifier_block *this,
2505 unsigned long event, void *ptr)
2507 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
2509 struct mlx4_en_dev *mdev;
2510 struct mlx4_dev *dev;
2511 int i, num_eth_ports = 0;
2512 bool do_bond = true;
2513 struct mlx4_en_priv *priv;
2517 if (!net_eq(dev_net(ndev), &init_net))
2520 mdev = container_of(this, struct mlx4_en_dev, nb);
2523 /* Go into this mode only when two network devices set on two ports
2524 * of the same mlx4 device are slaves of the same bonding master
2526 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
2528 if (!port && (mdev->pndev[i] == ndev))
2530 mdev->upper[i] = mdev->pndev[i] ?
2531 netdev_master_upper_dev_get(mdev->pndev[i]) : NULL;
2532 /* condition not met: network device is a slave */
2533 if (!mdev->upper[i])
2535 if (num_eth_ports < 2)
2537 /* condition not met: same master */
2538 if (mdev->upper[i] != mdev->upper[i-1])
2541 /* condition not met: 2 salves */
2542 do_bond = (num_eth_ports == 2) ? do_bond : false;
2544 /* handle only events that come with enough info */
2545 if ((do_bond && (event != NETDEV_BONDING_INFO)) || !port)
2548 priv = netdev_priv(ndev);
2550 struct netdev_notifier_bonding_info *notifier_info = ptr;
2551 struct netdev_bonding_info *bonding_info =
2552 ¬ifier_info->bonding_info;
2554 /* required mode 1, 2 or 4 */
2555 if ((bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) &&
2556 (bonding_info->master.bond_mode != BOND_MODE_XOR) &&
2557 (bonding_info->master.bond_mode != BOND_MODE_8023AD))
2560 /* require exactly 2 slaves */
2561 if (bonding_info->master.num_slaves != 2)
2566 if (bonding_info->master.bond_mode ==
2567 BOND_MODE_ACTIVEBACKUP) {
2568 /* in active-backup mode virtual ports are
2569 * mapped to the physical port of the active
2571 if (bonding_info->slave.state ==
2572 BOND_STATE_BACKUP) {
2580 } else { /* BOND_STATE_ACTIVE */
2589 } else { /* Active-Active */
2590 /* in active-active mode a virtual port is
2591 * mapped to the native physical port if and only
2592 * if the physical port is up */
2593 __s8 link = bonding_info->slave.link;
2599 if ((link == BOND_LINK_UP) ||
2600 (link == BOND_LINK_FAIL)) {
2605 } else { /* BOND_LINK_DOWN || BOND_LINK_BACK */
2615 mlx4_en_queue_bond_work(priv, do_bond,
2616 v2p_port1, v2p_port2);
2621 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2622 struct mlx4_en_port_profile *prof)
2624 struct net_device *dev;
2625 struct mlx4_en_priv *priv;
2630 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
2631 MAX_TX_RINGS, MAX_RX_RINGS);
2635 netif_set_real_num_tx_queues(dev, prof->tx_ring_num);
2636 netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
2638 SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev);
2639 dev->dev_port = port - 1;
2642 * Initialize driver private data
2645 priv = netdev_priv(dev);
2646 memset(priv, 0, sizeof(struct mlx4_en_priv));
2647 spin_lock_init(&priv->stats_lock);
2648 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
2649 INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
2650 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
2651 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
2652 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
2653 #ifdef CONFIG_MLX4_EN_VXLAN
2654 INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads);
2655 INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads);
2657 #ifdef CONFIG_RFS_ACCEL
2658 INIT_LIST_HEAD(&priv->filters);
2659 spin_lock_init(&priv->filters_lock);
2664 priv->ddev = &mdev->pdev->dev;
2667 priv->port_up = false;
2668 priv->flags = prof->flags;
2669 priv->pflags = MLX4_EN_PRIV_FLAGS_BLUEFLAME;
2670 priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
2671 MLX4_WQE_CTRL_SOLICITED);
2672 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
2673 priv->tx_ring_num = prof->tx_ring_num;
2674 priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
2675 netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key));
2677 priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
2679 if (!priv->tx_ring) {
2683 priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
2689 priv->rx_ring_num = prof->rx_ring_num;
2690 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
2691 priv->cqe_size = mdev->dev->caps.cqe_size;
2692 priv->mac_index = -1;
2693 priv->msg_enable = MLX4_EN_MSG_LEVEL;
2694 #ifdef CONFIG_MLX4_EN_DCB
2695 if (!mlx4_is_slave(priv->mdev->dev)) {
2696 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) {
2697 dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
2699 en_info(priv, "enabling only PFC DCB ops\n");
2700 dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops;
2705 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
2706 INIT_HLIST_HEAD(&priv->mac_hash[i]);
2708 /* Query for default mac and max mtu */
2709 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
2711 if (mdev->dev->caps.rx_checksum_flags_port[priv->port] &
2712 MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP)
2713 priv->flags |= MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP;
2715 /* Set default MAC */
2716 dev->addr_len = ETH_ALEN;
2717 mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
2718 if (!is_valid_ether_addr(dev->dev_addr)) {
2719 if (mlx4_is_slave(priv->mdev->dev)) {
2720 eth_hw_addr_random(dev);
2721 en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
2722 mac_u64 = mlx4_mac_to_u64(dev->dev_addr);
2723 mdev->dev->caps.def_mac[priv->port] = mac_u64;
2725 en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
2726 priv->port, dev->dev_addr);
2732 memcpy(priv->current_mac, dev->dev_addr, sizeof(priv->current_mac));
2734 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
2735 DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
2736 err = mlx4_en_alloc_resources(priv);
2740 /* Initialize time stamping config */
2741 priv->hwtstamp_config.flags = 0;
2742 priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
2743 priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
2745 /* Allocate page for receive rings */
2746 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
2747 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
2749 en_err(priv, "Failed to allocate page for rx qps\n");
2752 priv->allocated = 1;
2755 * Initialize netdev entry points
2757 if (mlx4_is_master(priv->mdev->dev))
2758 dev->netdev_ops = &mlx4_netdev_ops_master;
2760 dev->netdev_ops = &mlx4_netdev_ops;
2761 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
2762 netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
2763 netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
2765 dev->ethtool_ops = &mlx4_en_ethtool_ops;
2768 * Set driver features
2770 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2771 if (mdev->LSO_support)
2772 dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
2774 dev->vlan_features = dev->hw_features;
2776 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
2777 dev->features = dev->hw_features | NETIF_F_HIGHDMA |
2778 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
2779 NETIF_F_HW_VLAN_CTAG_FILTER;
2780 dev->hw_features |= NETIF_F_LOOPBACK |
2781 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
2783 if (mdev->dev->caps.steering_mode ==
2784 MLX4_STEERING_MODE_DEVICE_MANAGED &&
2785 mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC)
2786 dev->hw_features |= NETIF_F_NTUPLE;
2788 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
2789 dev->priv_flags |= IFF_UNICAST_FLT;
2791 /* Setting a default hash function value */
2792 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP) {
2793 priv->rss_hash_fn = ETH_RSS_HASH_TOP;
2794 } else if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR) {
2795 priv->rss_hash_fn = ETH_RSS_HASH_XOR;
2798 "No RSS hash capabilities exposed, using Toeplitz\n");
2799 priv->rss_hash_fn = ETH_RSS_HASH_TOP;
2802 mdev->pndev[port] = dev;
2803 mdev->upper[port] = NULL;
2805 netif_carrier_off(dev);
2806 mlx4_en_set_default_moderation(priv);
2808 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
2809 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
2811 mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
2813 /* Configure port */
2814 mlx4_en_calc_rx_buf(dev);
2815 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
2816 priv->rx_skb_size + ETH_FCS_LEN,
2817 prof->tx_pause, prof->tx_ppp,
2818 prof->rx_pause, prof->rx_ppp);
2820 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
2825 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
2826 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
2828 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
2835 en_warn(priv, "Initializing port\n");
2836 err = mlx4_INIT_PORT(mdev->dev, priv->port);
2838 en_err(priv, "Failed Initializing port\n");
2841 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
2843 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
2844 queue_delayed_work(mdev->workqueue, &priv->service_task,
2845 SERVICE_TASK_DELAY);
2847 mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
2849 err = register_netdev(dev);
2851 en_err(priv, "Netdev registration failed for port %d\n", port);
2855 priv->registered = 1;
2860 mlx4_en_destroy_netdev(dev);
2864 int mlx4_en_reset_config(struct net_device *dev,
2865 struct hwtstamp_config ts_config,
2866 netdev_features_t features)
2868 struct mlx4_en_priv *priv = netdev_priv(dev);
2869 struct mlx4_en_dev *mdev = priv->mdev;
2873 if (priv->hwtstamp_config.tx_type == ts_config.tx_type &&
2874 priv->hwtstamp_config.rx_filter == ts_config.rx_filter &&
2875 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX))
2876 return 0; /* Nothing to change */
2878 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
2879 (features & NETIF_F_HW_VLAN_CTAG_RX) &&
2880 (priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE)) {
2881 en_warn(priv, "Can't turn ON rx vlan offload while time-stamping rx filter is ON\n");
2885 mutex_lock(&mdev->state_lock);
2886 if (priv->port_up) {
2888 mlx4_en_stop_port(dev, 1);
2891 mlx4_en_free_resources(priv);
2893 en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n",
2894 ts_config.rx_filter, !!(features & NETIF_F_HW_VLAN_CTAG_RX));
2896 priv->hwtstamp_config.tx_type = ts_config.tx_type;
2897 priv->hwtstamp_config.rx_filter = ts_config.rx_filter;
2899 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
2900 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2901 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
2903 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
2904 } else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) {
2905 /* RX time-stamping is OFF, update the RX vlan offload
2906 * to the latest wanted state
2908 if (dev->wanted_features & NETIF_F_HW_VLAN_CTAG_RX)
2909 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
2911 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
2914 /* RX vlan offload and RX time-stamping can't co-exist !
2915 * Regardless of the caller's choice,
2916 * Turn Off RX vlan offload in case of time-stamping is ON
2918 if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) {
2919 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
2920 en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n");
2921 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
2924 err = mlx4_en_alloc_resources(priv);
2926 en_err(priv, "Failed reallocating port resources\n");
2930 err = mlx4_en_start_port(dev);
2932 en_err(priv, "Failed starting port\n");
2936 mutex_unlock(&mdev->state_lock);
2937 netdev_features_change(dev);