regmap: rbtree: Fixed node range check on sync
[firefly-linux-kernel-4.4.55.git] / net / batman-adv / originator.c
1 /* Copyright (C) 2009-2013 B.A.T.M.A.N. contributors:
2  *
3  * Marek Lindner, Simon Wunderlich
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of version 2 of the GNU General Public
7  * License as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17  * 02110-1301, USA
18  */
19
20 #include "main.h"
21 #include "distributed-arp-table.h"
22 #include "originator.h"
23 #include "hash.h"
24 #include "translation-table.h"
25 #include "routing.h"
26 #include "gateway_client.h"
27 #include "hard-interface.h"
28 #include "unicast.h"
29 #include "soft-interface.h"
30 #include "bridge_loop_avoidance.h"
31 #include "network-coding.h"
32
33 /* hash class keys */
34 static struct lock_class_key batadv_orig_hash_lock_class_key;
35
36 static void batadv_purge_orig(struct work_struct *work);
37
38 /* returns 1 if they are the same originator */
39 static int batadv_compare_orig(const struct hlist_node *node, const void *data2)
40 {
41         const void *data1 = container_of(node, struct batadv_orig_node,
42                                          hash_entry);
43
44         return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
45 }
46
47 int batadv_originator_init(struct batadv_priv *bat_priv)
48 {
49         if (bat_priv->orig_hash)
50                 return 0;
51
52         bat_priv->orig_hash = batadv_hash_new(1024);
53
54         if (!bat_priv->orig_hash)
55                 goto err;
56
57         batadv_hash_set_lock_class(bat_priv->orig_hash,
58                                    &batadv_orig_hash_lock_class_key);
59
60         INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
61         queue_delayed_work(batadv_event_workqueue,
62                            &bat_priv->orig_work,
63                            msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
64
65         return 0;
66
67 err:
68         return -ENOMEM;
69 }
70
71 void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
72 {
73         if (atomic_dec_and_test(&neigh_node->refcount))
74                 kfree_rcu(neigh_node, rcu);
75 }
76
77 /* increases the refcounter of a found router */
78 struct batadv_neigh_node *
79 batadv_orig_node_get_router(struct batadv_orig_node *orig_node)
80 {
81         struct batadv_neigh_node *router;
82
83         rcu_read_lock();
84         router = rcu_dereference(orig_node->router);
85
86         if (router && !atomic_inc_not_zero(&router->refcount))
87                 router = NULL;
88
89         rcu_read_unlock();
90         return router;
91 }
92
93 struct batadv_neigh_node *
94 batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
95                       const uint8_t *neigh_addr, uint32_t seqno)
96 {
97         struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
98         struct batadv_neigh_node *neigh_node;
99
100         neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
101         if (!neigh_node)
102                 goto out;
103
104         INIT_HLIST_NODE(&neigh_node->list);
105
106         memcpy(neigh_node->addr, neigh_addr, ETH_ALEN);
107         spin_lock_init(&neigh_node->lq_update_lock);
108
109         /* extra reference for return */
110         atomic_set(&neigh_node->refcount, 2);
111
112         batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
113                    "Creating new neighbor %pM, initial seqno %d\n",
114                    neigh_addr, seqno);
115
116 out:
117         return neigh_node;
118 }
119
120 static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
121 {
122         struct hlist_node *node_tmp;
123         struct batadv_neigh_node *neigh_node, *tmp_neigh_node;
124         struct batadv_orig_node *orig_node;
125
126         orig_node = container_of(rcu, struct batadv_orig_node, rcu);
127
128         spin_lock_bh(&orig_node->neigh_list_lock);
129
130         /* for all bonding members ... */
131         list_for_each_entry_safe(neigh_node, tmp_neigh_node,
132                                  &orig_node->bond_list, bonding_list) {
133                 list_del_rcu(&neigh_node->bonding_list);
134                 batadv_neigh_node_free_ref(neigh_node);
135         }
136
137         /* for all neighbors towards this originator ... */
138         hlist_for_each_entry_safe(neigh_node, node_tmp,
139                                   &orig_node->neigh_list, list) {
140                 hlist_del_rcu(&neigh_node->list);
141                 batadv_neigh_node_free_ref(neigh_node);
142         }
143
144         spin_unlock_bh(&orig_node->neigh_list_lock);
145
146         /* Free nc_nodes */
147         batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
148
149         batadv_frag_list_free(&orig_node->frag_list);
150         batadv_tt_global_del_orig(orig_node->bat_priv, orig_node,
151                                   "originator timed out");
152
153         kfree(orig_node->tt_buff);
154         kfree(orig_node->bcast_own);
155         kfree(orig_node->bcast_own_sum);
156         kfree(orig_node);
157 }
158
159 void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
160 {
161         if (atomic_dec_and_test(&orig_node->refcount))
162                 call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
163 }
164
165 void batadv_originator_free(struct batadv_priv *bat_priv)
166 {
167         struct batadv_hashtable *hash = bat_priv->orig_hash;
168         struct hlist_node *node_tmp;
169         struct hlist_head *head;
170         spinlock_t *list_lock; /* spinlock to protect write access */
171         struct batadv_orig_node *orig_node;
172         uint32_t i;
173
174         if (!hash)
175                 return;
176
177         cancel_delayed_work_sync(&bat_priv->orig_work);
178
179         bat_priv->orig_hash = NULL;
180
181         for (i = 0; i < hash->size; i++) {
182                 head = &hash->table[i];
183                 list_lock = &hash->list_locks[i];
184
185                 spin_lock_bh(list_lock);
186                 hlist_for_each_entry_safe(orig_node, node_tmp,
187                                           head, hash_entry) {
188                         hlist_del_rcu(&orig_node->hash_entry);
189                         batadv_orig_node_free_ref(orig_node);
190                 }
191                 spin_unlock_bh(list_lock);
192         }
193
194         batadv_hash_destroy(hash);
195 }
196
197 /* this function finds or creates an originator entry for the given
198  * address if it does not exits
199  */
200 struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv,
201                                               const uint8_t *addr)
202 {
203         struct batadv_orig_node *orig_node;
204         int size;
205         int hash_added;
206         unsigned long reset_time;
207
208         orig_node = batadv_orig_hash_find(bat_priv, addr);
209         if (orig_node)
210                 return orig_node;
211
212         batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
213                    "Creating new originator: %pM\n", addr);
214
215         orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
216         if (!orig_node)
217                 return NULL;
218
219         INIT_HLIST_HEAD(&orig_node->neigh_list);
220         INIT_LIST_HEAD(&orig_node->bond_list);
221         spin_lock_init(&orig_node->ogm_cnt_lock);
222         spin_lock_init(&orig_node->bcast_seqno_lock);
223         spin_lock_init(&orig_node->neigh_list_lock);
224         spin_lock_init(&orig_node->tt_buff_lock);
225
226         batadv_nc_init_orig(orig_node);
227
228         /* extra reference for return */
229         atomic_set(&orig_node->refcount, 2);
230
231         orig_node->tt_initialised = false;
232         orig_node->bat_priv = bat_priv;
233         memcpy(orig_node->orig, addr, ETH_ALEN);
234         batadv_dat_init_orig_node_addr(orig_node);
235         orig_node->router = NULL;
236         orig_node->tt_crc = 0;
237         atomic_set(&orig_node->last_ttvn, 0);
238         orig_node->tt_buff = NULL;
239         orig_node->tt_buff_len = 0;
240         atomic_set(&orig_node->tt_size, 0);
241         reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
242         orig_node->bcast_seqno_reset = reset_time;
243         orig_node->batman_seqno_reset = reset_time;
244
245         atomic_set(&orig_node->bond_candidates, 0);
246
247         size = bat_priv->num_ifaces * sizeof(unsigned long) * BATADV_NUM_WORDS;
248
249         orig_node->bcast_own = kzalloc(size, GFP_ATOMIC);
250         if (!orig_node->bcast_own)
251                 goto free_orig_node;
252
253         size = bat_priv->num_ifaces * sizeof(uint8_t);
254         orig_node->bcast_own_sum = kzalloc(size, GFP_ATOMIC);
255
256         INIT_LIST_HEAD(&orig_node->frag_list);
257         orig_node->last_frag_packet = 0;
258
259         if (!orig_node->bcast_own_sum)
260                 goto free_bcast_own;
261
262         hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig,
263                                      batadv_choose_orig, orig_node,
264                                      &orig_node->hash_entry);
265         if (hash_added != 0)
266                 goto free_bcast_own_sum;
267
268         return orig_node;
269 free_bcast_own_sum:
270         kfree(orig_node->bcast_own_sum);
271 free_bcast_own:
272         kfree(orig_node->bcast_own);
273 free_orig_node:
274         kfree(orig_node);
275         return NULL;
276 }
277
278 static bool
279 batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
280                             struct batadv_orig_node *orig_node,
281                             struct batadv_neigh_node **best_neigh_node)
282 {
283         struct hlist_node *node_tmp;
284         struct batadv_neigh_node *neigh_node;
285         bool neigh_purged = false;
286         unsigned long last_seen;
287         struct batadv_hard_iface *if_incoming;
288
289         *best_neigh_node = NULL;
290
291         spin_lock_bh(&orig_node->neigh_list_lock);
292
293         /* for all neighbors towards this originator ... */
294         hlist_for_each_entry_safe(neigh_node, node_tmp,
295                                   &orig_node->neigh_list, list) {
296                 last_seen = neigh_node->last_seen;
297                 if_incoming = neigh_node->if_incoming;
298
299                 if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
300                     (if_incoming->if_status == BATADV_IF_INACTIVE) ||
301                     (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
302                     (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
303                         if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
304                             (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
305                             (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
306                                 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
307                                            "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
308                                            orig_node->orig, neigh_node->addr,
309                                            if_incoming->net_dev->name);
310                         else
311                                 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
312                                            "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
313                                            orig_node->orig, neigh_node->addr,
314                                            jiffies_to_msecs(last_seen));
315
316                         neigh_purged = true;
317
318                         hlist_del_rcu(&neigh_node->list);
319                         batadv_bonding_candidate_del(orig_node, neigh_node);
320                         batadv_neigh_node_free_ref(neigh_node);
321                 } else {
322                         if ((!*best_neigh_node) ||
323                             (neigh_node->tq_avg > (*best_neigh_node)->tq_avg))
324                                 *best_neigh_node = neigh_node;
325                 }
326         }
327
328         spin_unlock_bh(&orig_node->neigh_list_lock);
329         return neigh_purged;
330 }
331
332 static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
333                                    struct batadv_orig_node *orig_node)
334 {
335         struct batadv_neigh_node *best_neigh_node;
336
337         if (batadv_has_timed_out(orig_node->last_seen,
338                                  2 * BATADV_PURGE_TIMEOUT)) {
339                 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
340                            "Originator timeout: originator %pM, last_seen %u\n",
341                            orig_node->orig,
342                            jiffies_to_msecs(orig_node->last_seen));
343                 return true;
344         } else {
345                 if (batadv_purge_orig_neighbors(bat_priv, orig_node,
346                                                 &best_neigh_node))
347                         batadv_update_route(bat_priv, orig_node,
348                                             best_neigh_node);
349         }
350
351         return false;
352 }
353
354 static void _batadv_purge_orig(struct batadv_priv *bat_priv)
355 {
356         struct batadv_hashtable *hash = bat_priv->orig_hash;
357         struct hlist_node *node_tmp;
358         struct hlist_head *head;
359         spinlock_t *list_lock; /* spinlock to protect write access */
360         struct batadv_orig_node *orig_node;
361         uint32_t i;
362
363         if (!hash)
364                 return;
365
366         /* for all origins... */
367         for (i = 0; i < hash->size; i++) {
368                 head = &hash->table[i];
369                 list_lock = &hash->list_locks[i];
370
371                 spin_lock_bh(list_lock);
372                 hlist_for_each_entry_safe(orig_node, node_tmp,
373                                           head, hash_entry) {
374                         if (batadv_purge_orig_node(bat_priv, orig_node)) {
375                                 if (orig_node->gw_flags)
376                                         batadv_gw_node_delete(bat_priv,
377                                                               orig_node);
378                                 hlist_del_rcu(&orig_node->hash_entry);
379                                 batadv_orig_node_free_ref(orig_node);
380                                 continue;
381                         }
382
383                         if (batadv_has_timed_out(orig_node->last_frag_packet,
384                                                  BATADV_FRAG_TIMEOUT))
385                                 batadv_frag_list_free(&orig_node->frag_list);
386                 }
387                 spin_unlock_bh(list_lock);
388         }
389
390         batadv_gw_node_purge(bat_priv);
391         batadv_gw_election(bat_priv);
392 }
393
394 static void batadv_purge_orig(struct work_struct *work)
395 {
396         struct delayed_work *delayed_work;
397         struct batadv_priv *bat_priv;
398
399         delayed_work = container_of(work, struct delayed_work, work);
400         bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
401         _batadv_purge_orig(bat_priv);
402         queue_delayed_work(batadv_event_workqueue,
403                            &bat_priv->orig_work,
404                            msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
405 }
406
407 void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
408 {
409         _batadv_purge_orig(bat_priv);
410 }
411
412 int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
413 {
414         struct net_device *net_dev = (struct net_device *)seq->private;
415         struct batadv_priv *bat_priv = netdev_priv(net_dev);
416         struct batadv_hashtable *hash = bat_priv->orig_hash;
417         struct hlist_head *head;
418         struct batadv_hard_iface *primary_if;
419         struct batadv_orig_node *orig_node;
420         struct batadv_neigh_node *neigh_node, *neigh_node_tmp;
421         int batman_count = 0;
422         int last_seen_secs;
423         int last_seen_msecs;
424         unsigned long last_seen_jiffies;
425         uint32_t i;
426
427         primary_if = batadv_seq_print_text_primary_if_get(seq);
428         if (!primary_if)
429                 goto out;
430
431         seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
432                    BATADV_SOURCE_VERSION, primary_if->net_dev->name,
433                    primary_if->net_dev->dev_addr, net_dev->name);
434         seq_printf(seq, "  %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
435                    "Originator", "last-seen", "#", BATADV_TQ_MAX_VALUE,
436                    "Nexthop", "outgoingIF", "Potential nexthops");
437
438         for (i = 0; i < hash->size; i++) {
439                 head = &hash->table[i];
440
441                 rcu_read_lock();
442                 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
443                         neigh_node = batadv_orig_node_get_router(orig_node);
444                         if (!neigh_node)
445                                 continue;
446
447                         if (neigh_node->tq_avg == 0)
448                                 goto next;
449
450                         last_seen_jiffies = jiffies - orig_node->last_seen;
451                         last_seen_msecs = jiffies_to_msecs(last_seen_jiffies);
452                         last_seen_secs = last_seen_msecs / 1000;
453                         last_seen_msecs = last_seen_msecs % 1000;
454
455                         seq_printf(seq, "%pM %4i.%03is   (%3i) %pM [%10s]:",
456                                    orig_node->orig, last_seen_secs,
457                                    last_seen_msecs, neigh_node->tq_avg,
458                                    neigh_node->addr,
459                                    neigh_node->if_incoming->net_dev->name);
460
461                         hlist_for_each_entry_rcu(neigh_node_tmp,
462                                                  &orig_node->neigh_list, list) {
463                                 seq_printf(seq, " %pM (%3i)",
464                                            neigh_node_tmp->addr,
465                                            neigh_node_tmp->tq_avg);
466                         }
467
468                         seq_puts(seq, "\n");
469                         batman_count++;
470
471 next:
472                         batadv_neigh_node_free_ref(neigh_node);
473                 }
474                 rcu_read_unlock();
475         }
476
477         if (batman_count == 0)
478                 seq_puts(seq, "No batman nodes in range ...\n");
479
480 out:
481         if (primary_if)
482                 batadv_hardif_free_ref(primary_if);
483         return 0;
484 }
485
486 static int batadv_orig_node_add_if(struct batadv_orig_node *orig_node,
487                                    int max_if_num)
488 {
489         void *data_ptr;
490         size_t data_size, old_size;
491
492         data_size = max_if_num * sizeof(unsigned long) * BATADV_NUM_WORDS;
493         old_size = (max_if_num - 1) * sizeof(unsigned long) * BATADV_NUM_WORDS;
494         data_ptr = kmalloc(data_size, GFP_ATOMIC);
495         if (!data_ptr)
496                 return -ENOMEM;
497
498         memcpy(data_ptr, orig_node->bcast_own, old_size);
499         kfree(orig_node->bcast_own);
500         orig_node->bcast_own = data_ptr;
501
502         data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
503         if (!data_ptr)
504                 return -ENOMEM;
505
506         memcpy(data_ptr, orig_node->bcast_own_sum,
507                (max_if_num - 1) * sizeof(uint8_t));
508         kfree(orig_node->bcast_own_sum);
509         orig_node->bcast_own_sum = data_ptr;
510
511         return 0;
512 }
513
514 int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
515                             int max_if_num)
516 {
517         struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
518         struct batadv_hashtable *hash = bat_priv->orig_hash;
519         struct hlist_head *head;
520         struct batadv_orig_node *orig_node;
521         uint32_t i;
522         int ret;
523
524         /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
525          * if_num
526          */
527         for (i = 0; i < hash->size; i++) {
528                 head = &hash->table[i];
529
530                 rcu_read_lock();
531                 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
532                         spin_lock_bh(&orig_node->ogm_cnt_lock);
533                         ret = batadv_orig_node_add_if(orig_node, max_if_num);
534                         spin_unlock_bh(&orig_node->ogm_cnt_lock);
535
536                         if (ret == -ENOMEM)
537                                 goto err;
538                 }
539                 rcu_read_unlock();
540         }
541
542         return 0;
543
544 err:
545         rcu_read_unlock();
546         return -ENOMEM;
547 }
548
549 static int batadv_orig_node_del_if(struct batadv_orig_node *orig_node,
550                                    int max_if_num, int del_if_num)
551 {
552         void *data_ptr = NULL;
553         int chunk_size;
554
555         /* last interface was removed */
556         if (max_if_num == 0)
557                 goto free_bcast_own;
558
559         chunk_size = sizeof(unsigned long) * BATADV_NUM_WORDS;
560         data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC);
561         if (!data_ptr)
562                 return -ENOMEM;
563
564         /* copy first part */
565         memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size);
566
567         /* copy second part */
568         memcpy((char *)data_ptr + del_if_num * chunk_size,
569                orig_node->bcast_own + ((del_if_num + 1) * chunk_size),
570                (max_if_num - del_if_num) * chunk_size);
571
572 free_bcast_own:
573         kfree(orig_node->bcast_own);
574         orig_node->bcast_own = data_ptr;
575
576         if (max_if_num == 0)
577                 goto free_own_sum;
578
579         data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
580         if (!data_ptr)
581                 return -ENOMEM;
582
583         memcpy(data_ptr, orig_node->bcast_own_sum,
584                del_if_num * sizeof(uint8_t));
585
586         memcpy((char *)data_ptr + del_if_num * sizeof(uint8_t),
587                orig_node->bcast_own_sum + ((del_if_num + 1) * sizeof(uint8_t)),
588                (max_if_num - del_if_num) * sizeof(uint8_t));
589
590 free_own_sum:
591         kfree(orig_node->bcast_own_sum);
592         orig_node->bcast_own_sum = data_ptr;
593
594         return 0;
595 }
596
597 int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
598                             int max_if_num)
599 {
600         struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
601         struct batadv_hashtable *hash = bat_priv->orig_hash;
602         struct hlist_head *head;
603         struct batadv_hard_iface *hard_iface_tmp;
604         struct batadv_orig_node *orig_node;
605         uint32_t i;
606         int ret;
607
608         /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
609          * if_num
610          */
611         for (i = 0; i < hash->size; i++) {
612                 head = &hash->table[i];
613
614                 rcu_read_lock();
615                 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
616                         spin_lock_bh(&orig_node->ogm_cnt_lock);
617                         ret = batadv_orig_node_del_if(orig_node, max_if_num,
618                                                       hard_iface->if_num);
619                         spin_unlock_bh(&orig_node->ogm_cnt_lock);
620
621                         if (ret == -ENOMEM)
622                                 goto err;
623                 }
624                 rcu_read_unlock();
625         }
626
627         /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
628         rcu_read_lock();
629         list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
630                 if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
631                         continue;
632
633                 if (hard_iface == hard_iface_tmp)
634                         continue;
635
636                 if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
637                         continue;
638
639                 if (hard_iface_tmp->if_num > hard_iface->if_num)
640                         hard_iface_tmp->if_num--;
641         }
642         rcu_read_unlock();
643
644         hard_iface->if_num = -1;
645         return 0;
646
647 err:
648         rcu_read_unlock();
649         return -ENOMEM;
650 }