batman-adv: refactoring API: find generalized name for bat_ogm_init_primary callback
[firefly-linux-kernel-4.4.55.git] / net / batman-adv / hard-interface.c
1 /*
2  * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3  *
4  * Marek Lindner, Simon Wunderlich
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of version 2 of the GNU General Public
8  * License as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18  * 02110-1301, USA
19  *
20  */
21
22 #include "main.h"
23 #include "hard-interface.h"
24 #include "soft-interface.h"
25 #include "send.h"
26 #include "translation-table.h"
27 #include "routing.h"
28 #include "bat_sysfs.h"
29 #include "originator.h"
30 #include "hash.h"
31 #include "bridge_loop_avoidance.h"
32
33 #include <linux/if_arp.h>
34
35
36 static int batman_skb_recv(struct sk_buff *skb,
37                            struct net_device *dev,
38                            struct packet_type *ptype,
39                            struct net_device *orig_dev);
40
41 void hardif_free_rcu(struct rcu_head *rcu)
42 {
43         struct hard_iface *hard_iface;
44
45         hard_iface = container_of(rcu, struct hard_iface, rcu);
46         dev_put(hard_iface->net_dev);
47         kfree(hard_iface);
48 }
49
50 struct hard_iface *hardif_get_by_netdev(const struct net_device *net_dev)
51 {
52         struct hard_iface *hard_iface;
53
54         rcu_read_lock();
55         list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
56                 if (hard_iface->net_dev == net_dev &&
57                     atomic_inc_not_zero(&hard_iface->refcount))
58                         goto out;
59         }
60
61         hard_iface = NULL;
62
63 out:
64         rcu_read_unlock();
65         return hard_iface;
66 }
67
68 static int is_valid_iface(const struct net_device *net_dev)
69 {
70         if (net_dev->flags & IFF_LOOPBACK)
71                 return 0;
72
73         if (net_dev->type != ARPHRD_ETHER)
74                 return 0;
75
76         if (net_dev->addr_len != ETH_ALEN)
77                 return 0;
78
79         /* no batman over batman */
80         if (softif_is_valid(net_dev))
81                 return 0;
82
83         /* Device is being bridged */
84         /* if (net_dev->priv_flags & IFF_BRIDGE_PORT)
85                 return 0; */
86
87         return 1;
88 }
89
90 static struct hard_iface *hardif_get_active(const struct net_device *soft_iface)
91 {
92         struct hard_iface *hard_iface;
93
94         rcu_read_lock();
95         list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
96                 if (hard_iface->soft_iface != soft_iface)
97                         continue;
98
99                 if (hard_iface->if_status == IF_ACTIVE &&
100                     atomic_inc_not_zero(&hard_iface->refcount))
101                         goto out;
102         }
103
104         hard_iface = NULL;
105
106 out:
107         rcu_read_unlock();
108         return hard_iface;
109 }
110
111 static void primary_if_update_addr(struct bat_priv *bat_priv,
112                                    struct hard_iface *oldif)
113 {
114         struct vis_packet *vis_packet;
115         struct hard_iface *primary_if;
116
117         primary_if = primary_if_get_selected(bat_priv);
118         if (!primary_if)
119                 goto out;
120
121         vis_packet = (struct vis_packet *)
122                                 bat_priv->my_vis_info->skb_packet->data;
123         memcpy(vis_packet->vis_orig, primary_if->net_dev->dev_addr, ETH_ALEN);
124         memcpy(vis_packet->sender_orig,
125                primary_if->net_dev->dev_addr, ETH_ALEN);
126
127         bla_update_orig_address(bat_priv, primary_if, oldif);
128 out:
129         if (primary_if)
130                 hardif_free_ref(primary_if);
131 }
132
133 static void primary_if_select(struct bat_priv *bat_priv,
134                               struct hard_iface *new_hard_iface)
135 {
136         struct hard_iface *curr_hard_iface;
137
138         ASSERT_RTNL();
139
140         if (new_hard_iface && !atomic_inc_not_zero(&new_hard_iface->refcount))
141                 new_hard_iface = NULL;
142
143         curr_hard_iface = rcu_dereference_protected(bat_priv->primary_if, 1);
144         rcu_assign_pointer(bat_priv->primary_if, new_hard_iface);
145
146         if (!new_hard_iface)
147                 goto out;
148
149         bat_priv->bat_algo_ops->bat_primary_iface_set(new_hard_iface);
150         primary_if_update_addr(bat_priv, curr_hard_iface);
151
152 out:
153         if (curr_hard_iface)
154                 hardif_free_ref(curr_hard_iface);
155 }
156
157 static bool hardif_is_iface_up(const struct hard_iface *hard_iface)
158 {
159         if (hard_iface->net_dev->flags & IFF_UP)
160                 return true;
161
162         return false;
163 }
164
165 static void check_known_mac_addr(const struct net_device *net_dev)
166 {
167         const struct hard_iface *hard_iface;
168
169         rcu_read_lock();
170         list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
171                 if ((hard_iface->if_status != IF_ACTIVE) &&
172                     (hard_iface->if_status != IF_TO_BE_ACTIVATED))
173                         continue;
174
175                 if (hard_iface->net_dev == net_dev)
176                         continue;
177
178                 if (!compare_eth(hard_iface->net_dev->dev_addr,
179                                  net_dev->dev_addr))
180                         continue;
181
182                 pr_warning("The newly added mac address (%pM) already exists on: %s\n",
183                            net_dev->dev_addr, hard_iface->net_dev->name);
184                 pr_warning("It is strongly recommended to keep mac addresses unique to avoid problems!\n");
185         }
186         rcu_read_unlock();
187 }
188
189 int hardif_min_mtu(struct net_device *soft_iface)
190 {
191         const struct bat_priv *bat_priv = netdev_priv(soft_iface);
192         const struct hard_iface *hard_iface;
193         /* allow big frames if all devices are capable to do so
194          * (have MTU > 1500 + BAT_HEADER_LEN) */
195         int min_mtu = ETH_DATA_LEN;
196
197         if (atomic_read(&bat_priv->fragmentation))
198                 goto out;
199
200         rcu_read_lock();
201         list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
202                 if ((hard_iface->if_status != IF_ACTIVE) &&
203                     (hard_iface->if_status != IF_TO_BE_ACTIVATED))
204                         continue;
205
206                 if (hard_iface->soft_iface != soft_iface)
207                         continue;
208
209                 min_mtu = min_t(int, hard_iface->net_dev->mtu - BAT_HEADER_LEN,
210                                 min_mtu);
211         }
212         rcu_read_unlock();
213 out:
214         return min_mtu;
215 }
216
217 /* adjusts the MTU if a new interface with a smaller MTU appeared. */
218 void update_min_mtu(struct net_device *soft_iface)
219 {
220         int min_mtu;
221
222         min_mtu = hardif_min_mtu(soft_iface);
223         if (soft_iface->mtu != min_mtu)
224                 soft_iface->mtu = min_mtu;
225 }
226
227 static void hardif_activate_interface(struct hard_iface *hard_iface)
228 {
229         struct bat_priv *bat_priv;
230         struct hard_iface *primary_if = NULL;
231
232         if (hard_iface->if_status != IF_INACTIVE)
233                 goto out;
234
235         bat_priv = netdev_priv(hard_iface->soft_iface);
236
237         bat_priv->bat_algo_ops->bat_ogm_update_mac(hard_iface);
238         hard_iface->if_status = IF_TO_BE_ACTIVATED;
239
240         /**
241          * the first active interface becomes our primary interface or
242          * the next active interface after the old primary interface was removed
243          */
244         primary_if = primary_if_get_selected(bat_priv);
245         if (!primary_if)
246                 primary_if_select(bat_priv, hard_iface);
247
248         bat_info(hard_iface->soft_iface, "Interface activated: %s\n",
249                  hard_iface->net_dev->name);
250
251         update_min_mtu(hard_iface->soft_iface);
252
253 out:
254         if (primary_if)
255                 hardif_free_ref(primary_if);
256 }
257
258 static void hardif_deactivate_interface(struct hard_iface *hard_iface)
259 {
260         if ((hard_iface->if_status != IF_ACTIVE) &&
261             (hard_iface->if_status != IF_TO_BE_ACTIVATED))
262                 return;
263
264         hard_iface->if_status = IF_INACTIVE;
265
266         bat_info(hard_iface->soft_iface, "Interface deactivated: %s\n",
267                  hard_iface->net_dev->name);
268
269         update_min_mtu(hard_iface->soft_iface);
270 }
271
272 int hardif_enable_interface(struct hard_iface *hard_iface,
273                             const char *iface_name)
274 {
275         struct bat_priv *bat_priv;
276         struct net_device *soft_iface;
277         int ret;
278
279         if (hard_iface->if_status != IF_NOT_IN_USE)
280                 goto out;
281
282         if (!atomic_inc_not_zero(&hard_iface->refcount))
283                 goto out;
284
285         /* hard-interface is part of a bridge */
286         if (hard_iface->net_dev->priv_flags & IFF_BRIDGE_PORT)
287                 pr_err("You are about to enable batman-adv on '%s' which already is part of a bridge. Unless you know exactly what you are doing this is probably wrong and won't work the way you think it would.\n",
288                        hard_iface->net_dev->name);
289
290         soft_iface = dev_get_by_name(&init_net, iface_name);
291
292         if (!soft_iface) {
293                 soft_iface = softif_create(iface_name);
294
295                 if (!soft_iface) {
296                         ret = -ENOMEM;
297                         goto err;
298                 }
299
300                 /* dev_get_by_name() increases the reference counter for us */
301                 dev_hold(soft_iface);
302         }
303
304         if (!softif_is_valid(soft_iface)) {
305                 pr_err("Can't create batman mesh interface %s: already exists as regular interface\n",
306                        soft_iface->name);
307                 ret = -EINVAL;
308                 goto err_dev;
309         }
310
311         hard_iface->soft_iface = soft_iface;
312         bat_priv = netdev_priv(hard_iface->soft_iface);
313
314         ret = bat_priv->bat_algo_ops->bat_iface_enable(hard_iface);
315         if (ret < 0) {
316                 ret = -ENOMEM;
317                 goto err_dev;
318         }
319
320         hard_iface->if_num = bat_priv->num_ifaces;
321         bat_priv->num_ifaces++;
322         hard_iface->if_status = IF_INACTIVE;
323         orig_hash_add_if(hard_iface, bat_priv->num_ifaces);
324
325         hard_iface->batman_adv_ptype.type = __constant_htons(ETH_P_BATMAN);
326         hard_iface->batman_adv_ptype.func = batman_skb_recv;
327         hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
328         dev_add_pack(&hard_iface->batman_adv_ptype);
329
330         atomic_set(&hard_iface->frag_seqno, 1);
331         bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
332                  hard_iface->net_dev->name);
333
334         if (atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu <
335                 ETH_DATA_LEN + BAT_HEADER_LEN)
336                 bat_info(hard_iface->soft_iface,
337                          "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to %zi would solve the problem.\n",
338                          hard_iface->net_dev->name, hard_iface->net_dev->mtu,
339                          ETH_DATA_LEN + BAT_HEADER_LEN);
340
341         if (!atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu <
342                 ETH_DATA_LEN + BAT_HEADER_LEN)
343                 bat_info(hard_iface->soft_iface,
344                          "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. If you experience problems getting traffic through try increasing the MTU to %zi.\n",
345                          hard_iface->net_dev->name, hard_iface->net_dev->mtu,
346                          ETH_DATA_LEN + BAT_HEADER_LEN);
347
348         if (hardif_is_iface_up(hard_iface))
349                 hardif_activate_interface(hard_iface);
350         else
351                 bat_err(hard_iface->soft_iface,
352                         "Not using interface %s (retrying later): interface not active\n",
353                         hard_iface->net_dev->name);
354
355         /* begin scheduling originator messages on that interface */
356         schedule_bat_ogm(hard_iface);
357
358 out:
359         return 0;
360
361 err_dev:
362         dev_put(soft_iface);
363 err:
364         hardif_free_ref(hard_iface);
365         return ret;
366 }
367
368 void hardif_disable_interface(struct hard_iface *hard_iface)
369 {
370         struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
371         struct hard_iface *primary_if = NULL;
372
373         if (hard_iface->if_status == IF_ACTIVE)
374                 hardif_deactivate_interface(hard_iface);
375
376         if (hard_iface->if_status != IF_INACTIVE)
377                 goto out;
378
379         bat_info(hard_iface->soft_iface, "Removing interface: %s\n",
380                  hard_iface->net_dev->name);
381         dev_remove_pack(&hard_iface->batman_adv_ptype);
382
383         bat_priv->num_ifaces--;
384         orig_hash_del_if(hard_iface, bat_priv->num_ifaces);
385
386         primary_if = primary_if_get_selected(bat_priv);
387         if (hard_iface == primary_if) {
388                 struct hard_iface *new_if;
389
390                 new_if = hardif_get_active(hard_iface->soft_iface);
391                 primary_if_select(bat_priv, new_if);
392
393                 if (new_if)
394                         hardif_free_ref(new_if);
395         }
396
397         bat_priv->bat_algo_ops->bat_iface_disable(hard_iface);
398         hard_iface->if_status = IF_NOT_IN_USE;
399
400         /* delete all references to this hard_iface */
401         purge_orig_ref(bat_priv);
402         purge_outstanding_packets(bat_priv, hard_iface);
403         dev_put(hard_iface->soft_iface);
404
405         /* nobody uses this interface anymore */
406         if (!bat_priv->num_ifaces)
407                 softif_destroy(hard_iface->soft_iface);
408
409         hard_iface->soft_iface = NULL;
410         hardif_free_ref(hard_iface);
411
412 out:
413         if (primary_if)
414                 hardif_free_ref(primary_if);
415 }
416
417 static struct hard_iface *hardif_add_interface(struct net_device *net_dev)
418 {
419         struct hard_iface *hard_iface;
420         int ret;
421
422         ASSERT_RTNL();
423
424         ret = is_valid_iface(net_dev);
425         if (ret != 1)
426                 goto out;
427
428         dev_hold(net_dev);
429
430         hard_iface = kmalloc(sizeof(*hard_iface), GFP_ATOMIC);
431         if (!hard_iface)
432                 goto release_dev;
433
434         ret = sysfs_add_hardif(&hard_iface->hardif_obj, net_dev);
435         if (ret)
436                 goto free_if;
437
438         hard_iface->if_num = -1;
439         hard_iface->net_dev = net_dev;
440         hard_iface->soft_iface = NULL;
441         hard_iface->if_status = IF_NOT_IN_USE;
442         INIT_LIST_HEAD(&hard_iface->list);
443         /* extra reference for return */
444         atomic_set(&hard_iface->refcount, 2);
445
446         check_known_mac_addr(hard_iface->net_dev);
447         list_add_tail_rcu(&hard_iface->list, &hardif_list);
448
449         /**
450          * This can't be called via a bat_priv callback because
451          * we have no bat_priv yet.
452          */
453         atomic_set(&hard_iface->seqno, 1);
454         hard_iface->packet_buff = NULL;
455
456         return hard_iface;
457
458 free_if:
459         kfree(hard_iface);
460 release_dev:
461         dev_put(net_dev);
462 out:
463         return NULL;
464 }
465
466 static void hardif_remove_interface(struct hard_iface *hard_iface)
467 {
468         ASSERT_RTNL();
469
470         /* first deactivate interface */
471         if (hard_iface->if_status != IF_NOT_IN_USE)
472                 hardif_disable_interface(hard_iface);
473
474         if (hard_iface->if_status != IF_NOT_IN_USE)
475                 return;
476
477         hard_iface->if_status = IF_TO_BE_REMOVED;
478         sysfs_del_hardif(&hard_iface->hardif_obj);
479         hardif_free_ref(hard_iface);
480 }
481
482 void hardif_remove_interfaces(void)
483 {
484         struct hard_iface *hard_iface, *hard_iface_tmp;
485
486         rtnl_lock();
487         list_for_each_entry_safe(hard_iface, hard_iface_tmp,
488                                  &hardif_list, list) {
489                 list_del_rcu(&hard_iface->list);
490                 hardif_remove_interface(hard_iface);
491         }
492         rtnl_unlock();
493 }
494
495 static int hard_if_event(struct notifier_block *this,
496                          unsigned long event, void *ptr)
497 {
498         struct net_device *net_dev = ptr;
499         struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
500         struct hard_iface *primary_if = NULL;
501         struct bat_priv *bat_priv;
502
503         if (!hard_iface && event == NETDEV_REGISTER)
504                 hard_iface = hardif_add_interface(net_dev);
505
506         if (!hard_iface)
507                 goto out;
508
509         switch (event) {
510         case NETDEV_UP:
511                 hardif_activate_interface(hard_iface);
512                 break;
513         case NETDEV_GOING_DOWN:
514         case NETDEV_DOWN:
515                 hardif_deactivate_interface(hard_iface);
516                 break;
517         case NETDEV_UNREGISTER:
518                 list_del_rcu(&hard_iface->list);
519
520                 hardif_remove_interface(hard_iface);
521                 break;
522         case NETDEV_CHANGEMTU:
523                 if (hard_iface->soft_iface)
524                         update_min_mtu(hard_iface->soft_iface);
525                 break;
526         case NETDEV_CHANGEADDR:
527                 if (hard_iface->if_status == IF_NOT_IN_USE)
528                         goto hardif_put;
529
530                 check_known_mac_addr(hard_iface->net_dev);
531
532                 bat_priv = netdev_priv(hard_iface->soft_iface);
533                 bat_priv->bat_algo_ops->bat_ogm_update_mac(hard_iface);
534
535                 primary_if = primary_if_get_selected(bat_priv);
536                 if (!primary_if)
537                         goto hardif_put;
538
539                 if (hard_iface == primary_if)
540                         primary_if_update_addr(bat_priv, NULL);
541                 break;
542         default:
543                 break;
544         }
545
546 hardif_put:
547         hardif_free_ref(hard_iface);
548 out:
549         if (primary_if)
550                 hardif_free_ref(primary_if);
551         return NOTIFY_DONE;
552 }
553
554 /* incoming packets with the batman ethertype received on any active hard
555  * interface */
556 static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
557                            struct packet_type *ptype,
558                            struct net_device *orig_dev)
559 {
560         struct bat_priv *bat_priv;
561         struct batman_ogm_packet *batman_ogm_packet;
562         struct hard_iface *hard_iface;
563         int ret;
564
565         hard_iface = container_of(ptype, struct hard_iface, batman_adv_ptype);
566         skb = skb_share_check(skb, GFP_ATOMIC);
567
568         /* skb was released by skb_share_check() */
569         if (!skb)
570                 goto err_out;
571
572         /* packet should hold at least type and version */
573         if (unlikely(!pskb_may_pull(skb, 2)))
574                 goto err_free;
575
576         /* expect a valid ethernet header here. */
577         if (unlikely(skb->mac_len != sizeof(struct ethhdr) ||
578                      !skb_mac_header(skb)))
579                 goto err_free;
580
581         if (!hard_iface->soft_iface)
582                 goto err_free;
583
584         bat_priv = netdev_priv(hard_iface->soft_iface);
585
586         if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
587                 goto err_free;
588
589         /* discard frames on not active interfaces */
590         if (hard_iface->if_status != IF_ACTIVE)
591                 goto err_free;
592
593         batman_ogm_packet = (struct batman_ogm_packet *)skb->data;
594
595         if (batman_ogm_packet->header.version != COMPAT_VERSION) {
596                 bat_dbg(DBG_BATMAN, bat_priv,
597                         "Drop packet: incompatible batman version (%i)\n",
598                         batman_ogm_packet->header.version);
599                 goto err_free;
600         }
601
602         /* all receive handlers return whether they received or reused
603          * the supplied skb. if not, we have to free the skb. */
604
605         switch (batman_ogm_packet->header.packet_type) {
606                 /* batman originator packet */
607         case BAT_OGM:
608                 ret = recv_bat_ogm_packet(skb, hard_iface);
609                 break;
610
611                 /* batman icmp packet */
612         case BAT_ICMP:
613                 ret = recv_icmp_packet(skb, hard_iface);
614                 break;
615
616                 /* unicast packet */
617         case BAT_UNICAST:
618                 ret = recv_unicast_packet(skb, hard_iface);
619                 break;
620
621                 /* fragmented unicast packet */
622         case BAT_UNICAST_FRAG:
623                 ret = recv_ucast_frag_packet(skb, hard_iface);
624                 break;
625
626                 /* broadcast packet */
627         case BAT_BCAST:
628                 ret = recv_bcast_packet(skb, hard_iface);
629                 break;
630
631                 /* vis packet */
632         case BAT_VIS:
633                 ret = recv_vis_packet(skb, hard_iface);
634                 break;
635                 /* Translation table query (request or response) */
636         case BAT_TT_QUERY:
637                 ret = recv_tt_query(skb, hard_iface);
638                 break;
639                 /* Roaming advertisement */
640         case BAT_ROAM_ADV:
641                 ret = recv_roam_adv(skb, hard_iface);
642                 break;
643         default:
644                 ret = NET_RX_DROP;
645         }
646
647         if (ret == NET_RX_DROP)
648                 kfree_skb(skb);
649
650         /* return NET_RX_SUCCESS in any case as we
651          * most probably dropped the packet for
652          * routing-logical reasons. */
653
654         return NET_RX_SUCCESS;
655
656 err_free:
657         kfree_skb(skb);
658 err_out:
659         return NET_RX_DROP;
660 }
661
662 /* This function returns true if the interface represented by ifindex is a
663  * 802.11 wireless device */
664 bool is_wifi_iface(int ifindex)
665 {
666         struct net_device *net_device = NULL;
667         bool ret = false;
668
669         if (ifindex == NULL_IFINDEX)
670                 goto out;
671
672         net_device = dev_get_by_index(&init_net, ifindex);
673         if (!net_device)
674                 goto out;
675
676 #ifdef CONFIG_WIRELESS_EXT
677         /* pre-cfg80211 drivers have to implement WEXT, so it is possible to
678          * check for wireless_handlers != NULL */
679         if (net_device->wireless_handlers)
680                 ret = true;
681         else
682 #endif
683                 /* cfg80211 drivers have to set ieee80211_ptr */
684                 if (net_device->ieee80211_ptr)
685                         ret = true;
686 out:
687         if (net_device)
688                 dev_put(net_device);
689         return ret;
690 }
691
692 struct notifier_block hard_if_notifier = {
693         .notifier_call = hard_if_event,
694 };