Merge branch 'for-linus' of git://git.open-osd.org/linux-open-osd
[firefly-linux-kernel-4.4.55.git] / drivers / staging / batman-adv / translation-table.c
1 /*
2  * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
3  *
4  * Marek Lindner, Simon Wunderlich
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of version 2 of the GNU General Public
8  * License as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18  * 02110-1301, USA
19  *
20  */
21
22 #include "main.h"
23 #include "translation-table.h"
24 #include "soft-interface.h"
25 #include "types.h"
26 #include "hash.h"
27
28 struct hashtable_t *hna_local_hash;
29 static struct hashtable_t *hna_global_hash;
30 atomic_t hna_local_changed;
31
32 DEFINE_SPINLOCK(hna_local_hash_lock);
33 static DEFINE_SPINLOCK(hna_global_hash_lock);
34
35 static void hna_local_purge(struct work_struct *work);
36 static DECLARE_DELAYED_WORK(hna_local_purge_wq, hna_local_purge);
37 static void _hna_global_del_orig(struct hna_global_entry *hna_global_entry,
38                                  char *message);
39
40 static void hna_local_start_timer(void)
41 {
42         queue_delayed_work(bat_event_workqueue, &hna_local_purge_wq, 10 * HZ);
43 }
44
45 int hna_local_init(void)
46 {
47         if (hna_local_hash)
48                 return 1;
49
50         hna_local_hash = hash_new(128, compare_orig, choose_orig);
51
52         if (!hna_local_hash)
53                 return 0;
54
55         atomic_set(&hna_local_changed, 0);
56         hna_local_start_timer();
57
58         return 1;
59 }
60
61 void hna_local_add(uint8_t *addr)
62 {
63         /* FIXME: each orig_node->batman_if will be attached to a softif */
64         struct bat_priv *bat_priv = netdev_priv(soft_device);
65         struct hna_local_entry *hna_local_entry;
66         struct hna_global_entry *hna_global_entry;
67         struct hashtable_t *swaphash;
68         unsigned long flags;
69
70         spin_lock_irqsave(&hna_local_hash_lock, flags);
71         hna_local_entry =
72                 ((struct hna_local_entry *)hash_find(hna_local_hash, addr));
73         spin_unlock_irqrestore(&hna_local_hash_lock, flags);
74
75         if (hna_local_entry != NULL) {
76                 hna_local_entry->last_seen = jiffies;
77                 return;
78         }
79
80         /* only announce as many hosts as possible in the batman-packet and
81            space in batman_packet->num_hna That also should give a limit to
82            MAC-flooding. */
83         if ((num_hna + 1 > (ETH_DATA_LEN - BAT_PACKET_LEN) / ETH_ALEN) ||
84             (num_hna + 1 > 255)) {
85                 bat_dbg(DBG_ROUTES, bat_priv,
86                         "Can't add new local hna entry (%pM): "
87                         "number of local hna entries exceeds packet size\n",
88                         addr);
89                 return;
90         }
91
92         bat_dbg(DBG_ROUTES, bat_priv,
93                 "Creating new local hna entry: %pM\n", addr);
94
95         hna_local_entry = kmalloc(sizeof(struct hna_local_entry), GFP_ATOMIC);
96         if (!hna_local_entry)
97                 return;
98
99         memcpy(hna_local_entry->addr, addr, ETH_ALEN);
100         hna_local_entry->last_seen = jiffies;
101
102         /* the batman interface mac address should never be purged */
103         if (compare_orig(addr, soft_device->dev_addr))
104                 hna_local_entry->never_purge = 1;
105         else
106                 hna_local_entry->never_purge = 0;
107
108         spin_lock_irqsave(&hna_local_hash_lock, flags);
109
110         hash_add(hna_local_hash, hna_local_entry);
111         num_hna++;
112         atomic_set(&hna_local_changed, 1);
113
114         if (hna_local_hash->elements * 4 > hna_local_hash->size) {
115                 swaphash = hash_resize(hna_local_hash,
116                                        hna_local_hash->size * 2);
117
118                 if (swaphash == NULL)
119                         pr_err("Couldn't resize local hna hash table\n");
120                 else
121                         hna_local_hash = swaphash;
122         }
123
124         spin_unlock_irqrestore(&hna_local_hash_lock, flags);
125
126         /* remove address from global hash if present */
127         spin_lock_irqsave(&hna_global_hash_lock, flags);
128
129         hna_global_entry =
130                 ((struct hna_global_entry *)hash_find(hna_global_hash, addr));
131
132         if (hna_global_entry != NULL)
133                 _hna_global_del_orig(hna_global_entry, "local hna received");
134
135         spin_unlock_irqrestore(&hna_global_hash_lock, flags);
136 }
137
138 int hna_local_fill_buffer(unsigned char *buff, int buff_len)
139 {
140         struct hna_local_entry *hna_local_entry;
141         HASHIT(hashit);
142         int i = 0;
143         unsigned long flags;
144
145         spin_lock_irqsave(&hna_local_hash_lock, flags);
146
147         while (hash_iterate(hna_local_hash, &hashit)) {
148
149                 if (buff_len < (i + 1) * ETH_ALEN)
150                         break;
151
152                 hna_local_entry = hashit.bucket->data;
153                 memcpy(buff + (i * ETH_ALEN), hna_local_entry->addr, ETH_ALEN);
154
155                 i++;
156         }
157
158         /* if we did not get all new local hnas see you next time  ;-) */
159         if (i == num_hna)
160                 atomic_set(&hna_local_changed, 0);
161
162         spin_unlock_irqrestore(&hna_local_hash_lock, flags);
163
164         return i;
165 }
166
167 int hna_local_seq_print_text(struct seq_file *seq, void *offset)
168 {
169         struct net_device *net_dev = (struct net_device *)seq->private;
170         struct bat_priv *bat_priv = netdev_priv(net_dev);
171         struct hna_local_entry *hna_local_entry;
172         HASHIT(hashit);
173         HASHIT(hashit_count);
174         unsigned long flags;
175         size_t buf_size, pos;
176         char *buff;
177
178         if (!bat_priv->primary_if) {
179                 return seq_printf(seq, "BATMAN mesh %s disabled - "
180                                "please specify interfaces to enable it\n",
181                                net_dev->name);
182         }
183
184         seq_printf(seq, "Locally retrieved addresses (from %s) "
185                    "announced via HNA:\n",
186                    net_dev->name);
187
188         spin_lock_irqsave(&hna_local_hash_lock, flags);
189
190         buf_size = 1;
191         /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
192         while (hash_iterate(hna_local_hash, &hashit_count))
193                 buf_size += 21;
194
195         buff = kmalloc(buf_size, GFP_ATOMIC);
196         if (!buff) {
197                 spin_unlock_irqrestore(&hna_local_hash_lock, flags);
198                 return -ENOMEM;
199         }
200         buff[0] = '\0';
201         pos = 0;
202
203         while (hash_iterate(hna_local_hash, &hashit)) {
204                 hna_local_entry = hashit.bucket->data;
205
206                 pos += snprintf(buff + pos, 22, " * %pM\n",
207                                 hna_local_entry->addr);
208         }
209
210         spin_unlock_irqrestore(&hna_local_hash_lock, flags);
211
212         seq_printf(seq, "%s", buff);
213         kfree(buff);
214         return 0;
215 }
216
217 static void _hna_local_del(void *data)
218 {
219         kfree(data);
220         num_hna--;
221         atomic_set(&hna_local_changed, 1);
222 }
223
224 static void hna_local_del(struct hna_local_entry *hna_local_entry,
225                           char *message)
226 {
227         /* FIXME: each orig_node->batman_if will be attached to a softif */
228         struct bat_priv *bat_priv = netdev_priv(soft_device);
229         bat_dbg(DBG_ROUTES, bat_priv, "Deleting local hna entry (%pM): %s\n",
230                 hna_local_entry->addr, message);
231
232         hash_remove(hna_local_hash, hna_local_entry->addr);
233         _hna_local_del(hna_local_entry);
234 }
235
236 void hna_local_remove(uint8_t *addr, char *message)
237 {
238         struct hna_local_entry *hna_local_entry;
239         unsigned long flags;
240
241         spin_lock_irqsave(&hna_local_hash_lock, flags);
242
243         hna_local_entry = (struct hna_local_entry *)
244                 hash_find(hna_local_hash, addr);
245         if (hna_local_entry)
246                 hna_local_del(hna_local_entry, message);
247
248         spin_unlock_irqrestore(&hna_local_hash_lock, flags);
249 }
250
251 static void hna_local_purge(struct work_struct *work)
252 {
253         struct hna_local_entry *hna_local_entry;
254         HASHIT(hashit);
255         unsigned long flags;
256         unsigned long timeout;
257
258         spin_lock_irqsave(&hna_local_hash_lock, flags);
259
260         while (hash_iterate(hna_local_hash, &hashit)) {
261                 hna_local_entry = hashit.bucket->data;
262
263                 timeout = hna_local_entry->last_seen + LOCAL_HNA_TIMEOUT * HZ;
264                 if ((!hna_local_entry->never_purge) &&
265                     time_after(jiffies, timeout))
266                         hna_local_del(hna_local_entry, "address timed out");
267         }
268
269         spin_unlock_irqrestore(&hna_local_hash_lock, flags);
270         hna_local_start_timer();
271 }
272
273 void hna_local_free(void)
274 {
275         if (!hna_local_hash)
276                 return;
277
278         cancel_delayed_work_sync(&hna_local_purge_wq);
279         hash_delete(hna_local_hash, _hna_local_del);
280         hna_local_hash = NULL;
281 }
282
283 int hna_global_init(void)
284 {
285         if (hna_global_hash)
286                 return 1;
287
288         hna_global_hash = hash_new(128, compare_orig, choose_orig);
289
290         if (!hna_global_hash)
291                 return 0;
292
293         return 1;
294 }
295
296 void hna_global_add_orig(struct orig_node *orig_node,
297                          unsigned char *hna_buff, int hna_buff_len)
298 {
299         /* FIXME: each orig_node->batman_if will be attached to a softif */
300         struct bat_priv *bat_priv = netdev_priv(soft_device);
301         struct hna_global_entry *hna_global_entry;
302         struct hna_local_entry *hna_local_entry;
303         struct hashtable_t *swaphash;
304         int hna_buff_count = 0;
305         unsigned long flags;
306         unsigned char *hna_ptr;
307
308         while ((hna_buff_count + 1) * ETH_ALEN <= hna_buff_len) {
309                 spin_lock_irqsave(&hna_global_hash_lock, flags);
310
311                 hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
312                 hna_global_entry = (struct hna_global_entry *)
313                         hash_find(hna_global_hash, hna_ptr);
314
315                 if (hna_global_entry == NULL) {
316                         spin_unlock_irqrestore(&hna_global_hash_lock, flags);
317
318                         hna_global_entry =
319                                 kmalloc(sizeof(struct hna_global_entry),
320                                         GFP_ATOMIC);
321
322                         if (!hna_global_entry)
323                                 break;
324
325                         memcpy(hna_global_entry->addr, hna_ptr, ETH_ALEN);
326
327                         bat_dbg(DBG_ROUTES, bat_priv,
328                                 "Creating new global hna entry: "
329                                 "%pM (via %pM)\n",
330                                 hna_global_entry->addr, orig_node->orig);
331
332                         spin_lock_irqsave(&hna_global_hash_lock, flags);
333                         hash_add(hna_global_hash, hna_global_entry);
334
335                 }
336
337                 hna_global_entry->orig_node = orig_node;
338                 spin_unlock_irqrestore(&hna_global_hash_lock, flags);
339
340                 /* remove address from local hash if present */
341                 spin_lock_irqsave(&hna_local_hash_lock, flags);
342
343                 hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
344                 hna_local_entry = (struct hna_local_entry *)
345                         hash_find(hna_local_hash, hna_ptr);
346
347                 if (hna_local_entry != NULL)
348                         hna_local_del(hna_local_entry, "global hna received");
349
350                 spin_unlock_irqrestore(&hna_local_hash_lock, flags);
351
352                 hna_buff_count++;
353         }
354
355         /* initialize, and overwrite if malloc succeeds */
356         orig_node->hna_buff = NULL;
357         orig_node->hna_buff_len = 0;
358
359         if (hna_buff_len > 0) {
360                 orig_node->hna_buff = kmalloc(hna_buff_len, GFP_ATOMIC);
361                 if (orig_node->hna_buff) {
362                         memcpy(orig_node->hna_buff, hna_buff, hna_buff_len);
363                         orig_node->hna_buff_len = hna_buff_len;
364                 }
365         }
366
367         spin_lock_irqsave(&hna_global_hash_lock, flags);
368
369         if (hna_global_hash->elements * 4 > hna_global_hash->size) {
370                 swaphash = hash_resize(hna_global_hash,
371                                        hna_global_hash->size * 2);
372
373                 if (swaphash == NULL)
374                         pr_err("Couldn't resize global hna hash table\n");
375                 else
376                         hna_global_hash = swaphash;
377         }
378
379         spin_unlock_irqrestore(&hna_global_hash_lock, flags);
380 }
381
382 int hna_global_seq_print_text(struct seq_file *seq, void *offset)
383 {
384         struct net_device *net_dev = (struct net_device *)seq->private;
385         struct bat_priv *bat_priv = netdev_priv(net_dev);
386         struct hna_global_entry *hna_global_entry;
387         HASHIT(hashit);
388         HASHIT(hashit_count);
389         unsigned long flags;
390         size_t buf_size, pos;
391         char *buff;
392
393         if (!bat_priv->primary_if) {
394                 return seq_printf(seq, "BATMAN mesh %s disabled - "
395                                   "please specify interfaces to enable it\n",
396                                   net_dev->name);
397         }
398
399         seq_printf(seq, "Globally announced HNAs received via the mesh %s\n",
400                    net_dev->name);
401
402         spin_lock_irqsave(&hna_global_hash_lock, flags);
403
404         buf_size = 1;
405         /* Estimate length for: " * xx:xx:xx:xx:xx:xx via xx:xx:xx:xx:xx:xx\n"*/
406         while (hash_iterate(hna_global_hash, &hashit_count))
407                 buf_size += 43;
408
409         buff = kmalloc(buf_size, GFP_ATOMIC);
410         if (!buff) {
411                 spin_unlock_irqrestore(&hna_global_hash_lock, flags);
412                 return -ENOMEM;
413         }
414         buff[0] = '\0';
415         pos = 0;
416
417         while (hash_iterate(hna_global_hash, &hashit)) {
418                 hna_global_entry = hashit.bucket->data;
419
420                 pos += snprintf(buff + pos, 44,
421                                 " * %pM via %pM\n", hna_global_entry->addr,
422                                 hna_global_entry->orig_node->orig);
423         }
424
425         spin_unlock_irqrestore(&hna_global_hash_lock, flags);
426
427         seq_printf(seq, "%s", buff);
428         kfree(buff);
429         return 0;
430 }
431
432 static void _hna_global_del_orig(struct hna_global_entry *hna_global_entry,
433                                  char *message)
434 {
435         /* FIXME: each orig_node->batman_if will be attached to a softif */
436         struct bat_priv *bat_priv = netdev_priv(soft_device);
437         bat_dbg(DBG_ROUTES, bat_priv,
438                 "Deleting global hna entry %pM (via %pM): %s\n",
439                 hna_global_entry->addr, hna_global_entry->orig_node->orig,
440                 message);
441
442         hash_remove(hna_global_hash, hna_global_entry->addr);
443         kfree(hna_global_entry);
444 }
445
446 void hna_global_del_orig(struct orig_node *orig_node, char *message)
447 {
448         struct hna_global_entry *hna_global_entry;
449         int hna_buff_count = 0;
450         unsigned long flags;
451         unsigned char *hna_ptr;
452
453         if (orig_node->hna_buff_len == 0)
454                 return;
455
456         spin_lock_irqsave(&hna_global_hash_lock, flags);
457
458         while ((hna_buff_count + 1) * ETH_ALEN <= orig_node->hna_buff_len) {
459                 hna_ptr = orig_node->hna_buff + (hna_buff_count * ETH_ALEN);
460                 hna_global_entry = (struct hna_global_entry *)
461                         hash_find(hna_global_hash, hna_ptr);
462
463                 if ((hna_global_entry != NULL) &&
464                     (hna_global_entry->orig_node == orig_node))
465                         _hna_global_del_orig(hna_global_entry, message);
466
467                 hna_buff_count++;
468         }
469
470         spin_unlock_irqrestore(&hna_global_hash_lock, flags);
471
472         orig_node->hna_buff_len = 0;
473         kfree(orig_node->hna_buff);
474         orig_node->hna_buff = NULL;
475 }
476
477 static void hna_global_del(void *data)
478 {
479         kfree(data);
480 }
481
482 void hna_global_free(void)
483 {
484         if (!hna_global_hash)
485                 return;
486
487         hash_delete(hna_global_hash, hna_global_del);
488         hna_global_hash = NULL;
489 }
490
491 struct orig_node *transtable_search(uint8_t *addr)
492 {
493         struct hna_global_entry *hna_global_entry;
494         unsigned long flags;
495
496         spin_lock_irqsave(&hna_global_hash_lock, flags);
497         hna_global_entry = (struct hna_global_entry *)
498                 hash_find(hna_global_hash, addr);
499         spin_unlock_irqrestore(&hna_global_hash_lock, flags);
500
501         if (hna_global_entry == NULL)
502                 return NULL;
503
504         return hna_global_entry->orig_node;
505 }