2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
7 * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet
8 * & Swedish University of Agricultural Sciences.
10 * Jens Laas <jens.laas@data.slu.se> Swedish University of
11 * Agricultural Sciences.
13 * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet
15 * This work is based on the LPC-trie which is originally described in:
17 * An experimental study of compression methods for dynamic tries
18 * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
19 * http://www.csc.kth.se/~snilsson/software/dyntrie2/
22 * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
23 * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999
26 * Code from fib_hash has been reused which includes the following header:
29 * INET An implementation of the TCP/IP protocol suite for the LINUX
30 * operating system. INET is implemented using the BSD Socket
31 * interface as the means of communication with the user level.
33 * IPv4 FIB: lookup engine and maintenance routines.
36 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
38 * This program is free software; you can redistribute it and/or
39 * modify it under the terms of the GNU General Public License
40 * as published by the Free Software Foundation; either version
41 * 2 of the License, or (at your option) any later version.
43 * Substantial contributions to this work comes from:
45 * David S. Miller, <davem@davemloft.net>
46 * Stephen Hemminger <shemminger@osdl.org>
47 * Paul E. McKenney <paulmck@us.ibm.com>
48 * Patrick McHardy <kaber@trash.net>
51 #define VERSION "0.409"
53 #include <asm/uaccess.h>
54 #include <linux/bitops.h>
55 #include <linux/types.h>
56 #include <linux/kernel.h>
58 #include <linux/string.h>
59 #include <linux/socket.h>
60 #include <linux/sockios.h>
61 #include <linux/errno.h>
63 #include <linux/inet.h>
64 #include <linux/inetdevice.h>
65 #include <linux/netdevice.h>
66 #include <linux/if_arp.h>
67 #include <linux/proc_fs.h>
68 #include <linux/rcupdate.h>
69 #include <linux/skbuff.h>
70 #include <linux/netlink.h>
71 #include <linux/init.h>
72 #include <linux/list.h>
73 #include <linux/slab.h>
74 #include <linux/export.h>
75 #include <net/net_namespace.h>
77 #include <net/protocol.h>
78 #include <net/route.h>
81 #include <net/ip_fib.h>
82 #include "fib_lookup.h"
84 #define MAX_STAT_DEPTH 32
86 #define KEYLENGTH (8*sizeof(t_key))
88 typedef unsigned int t_key;
90 #define IS_TNODE(n) ((n)->bits)
91 #define IS_LEAF(n) (!(n)->bits)
93 #define get_index(_key, _kv) (((_key) ^ (_kv)->key) >> (_kv)->pos)
97 unsigned char bits; /* 2log(KEYLENGTH) bits needed */
98 unsigned char pos; /* 2log(KEYLENGTH) bits needed */
100 struct tnode __rcu *parent;
103 /* The fields in this struct are valid if bits > 0 (TNODE) */
105 unsigned int full_children; /* KEYLENGTH bits needed */
106 unsigned int empty_children; /* KEYLENGTH bits needed */
107 struct tnode __rcu *child[0];
109 /* This list pointer if valid if bits == 0 (LEAF) */
110 struct hlist_head list;
115 struct hlist_node hlist;
117 u32 mask_plen; /* ntohl(inet_make_mask(plen)) */
118 struct list_head falh;
122 #ifdef CONFIG_IP_FIB_TRIE_STATS
123 struct trie_use_stats {
125 unsigned int backtrack;
126 unsigned int semantic_match_passed;
127 unsigned int semantic_match_miss;
128 unsigned int null_node_hit;
129 unsigned int resize_node_skipped;
134 unsigned int totdepth;
135 unsigned int maxdepth;
138 unsigned int nullpointers;
139 unsigned int prefixes;
140 unsigned int nodesizes[MAX_STAT_DEPTH];
144 struct tnode __rcu *trie;
145 #ifdef CONFIG_IP_FIB_TRIE_STATS
146 struct trie_use_stats __percpu *stats;
150 static void resize(struct trie *t, struct tnode *tn);
151 static size_t tnode_free_size;
154 * synchronize_rcu after call_rcu for that many pages; it should be especially
155 * useful before resizing the root node with PREEMPT_NONE configs; the value was
156 * obtained experimentally, aiming to avoid visible slowdown.
158 static const int sync_pages = 128;
160 static struct kmem_cache *fn_alias_kmem __read_mostly;
161 static struct kmem_cache *trie_leaf_kmem __read_mostly;
163 /* caller must hold RTNL */
164 #define node_parent(n) rtnl_dereference((n)->parent)
166 /* caller must hold RCU read lock or RTNL */
167 #define node_parent_rcu(n) rcu_dereference_rtnl((n)->parent)
169 /* wrapper for rcu_assign_pointer */
170 static inline void node_set_parent(struct tnode *n, struct tnode *tp)
173 rcu_assign_pointer(n->parent, tp);
176 #define NODE_INIT_PARENT(n, p) RCU_INIT_POINTER((n)->parent, p)
178 /* This provides us with the number of children in this node, in the case of a
179 * leaf this will return 0 meaning none of the children are accessible.
181 static inline unsigned long tnode_child_length(const struct tnode *tn)
183 return (1ul << tn->bits) & ~(1ul);
186 /* caller must hold RTNL */
187 static inline struct tnode *tnode_get_child(const struct tnode *tn,
190 return rtnl_dereference(tn->child[i]);
193 /* caller must hold RCU read lock or RTNL */
194 static inline struct tnode *tnode_get_child_rcu(const struct tnode *tn,
197 return rcu_dereference_rtnl(tn->child[i]);
200 /* To understand this stuff, an understanding of keys and all their bits is
201 * necessary. Every node in the trie has a key associated with it, but not
202 * all of the bits in that key are significant.
204 * Consider a node 'n' and its parent 'tp'.
206 * If n is a leaf, every bit in its key is significant. Its presence is
207 * necessitated by path compression, since during a tree traversal (when
208 * searching for a leaf - unless we are doing an insertion) we will completely
209 * ignore all skipped bits we encounter. Thus we need to verify, at the end of
210 * a potentially successful search, that we have indeed been walking the
213 * Note that we can never "miss" the correct key in the tree if present by
214 * following the wrong path. Path compression ensures that segments of the key
215 * that are the same for all keys with a given prefix are skipped, but the
216 * skipped part *is* identical for each node in the subtrie below the skipped
217 * bit! trie_insert() in this implementation takes care of that.
219 * if n is an internal node - a 'tnode' here, the various parts of its key
220 * have many different meanings.
223 * _________________________________________________________________
224 * | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
225 * -----------------------------------------------------------------
226 * 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16
228 * _________________________________________________________________
229 * | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
230 * -----------------------------------------------------------------
231 * 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
238 * First, let's just ignore the bits that come before the parent tp, that is
239 * the bits from (tp->pos + tp->bits) to 31. They are *known* but at this
240 * point we do not use them for anything.
242 * The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
243 * index into the parent's child array. That is, they will be used to find
244 * 'n' among tp's children.
246 * The bits from (n->pos + n->bits) to (tn->pos - 1) - "S" - are skipped bits
249 * All the bits we have seen so far are significant to the node n. The rest
250 * of the bits are really not needed or indeed known in n->key.
252 * The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
253 * n's child array, and will of course be different for each child.
255 * The rest of the bits, from 0 to (n->pos + n->bits), are completely unknown
259 static const int halve_threshold = 25;
260 static const int inflate_threshold = 50;
261 static const int halve_threshold_root = 15;
262 static const int inflate_threshold_root = 30;
264 static void __alias_free_mem(struct rcu_head *head)
266 struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
267 kmem_cache_free(fn_alias_kmem, fa);
270 static inline void alias_free_mem_rcu(struct fib_alias *fa)
272 call_rcu(&fa->rcu, __alias_free_mem);
275 #define TNODE_KMALLOC_MAX \
276 ilog2((PAGE_SIZE - sizeof(struct tnode)) / sizeof(struct tnode *))
278 static void __node_free_rcu(struct rcu_head *head)
280 struct tnode *n = container_of(head, struct tnode, rcu);
283 kmem_cache_free(trie_leaf_kmem, n);
284 else if (n->bits <= TNODE_KMALLOC_MAX)
290 #define node_free(n) call_rcu(&n->rcu, __node_free_rcu)
292 static inline void free_leaf_info(struct leaf_info *leaf)
294 kfree_rcu(leaf, rcu);
297 static struct tnode *tnode_alloc(size_t size)
299 if (size <= PAGE_SIZE)
300 return kzalloc(size, GFP_KERNEL);
302 return vzalloc(size);
305 static struct tnode *leaf_new(t_key key)
307 struct tnode *l = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
310 /* set key and pos to reflect full key value
311 * any trailing zeros in the key should be ignored
312 * as the nodes are searched
317 /* set bits to 0 indicating we are not a tnode */
320 INIT_HLIST_HEAD(&l->list);
325 static struct leaf_info *leaf_info_new(int plen)
327 struct leaf_info *li = kmalloc(sizeof(struct leaf_info), GFP_KERNEL);
330 li->mask_plen = ntohl(inet_make_mask(plen));
331 INIT_LIST_HEAD(&li->falh);
336 static struct tnode *tnode_new(t_key key, int pos, int bits)
338 size_t sz = offsetof(struct tnode, child[1 << bits]);
339 struct tnode *tn = tnode_alloc(sz);
340 unsigned int shift = pos + bits;
342 /* verify bits and pos their msb bits clear and values are valid */
343 BUG_ON(!bits || (shift > KEYLENGTH));
350 tn->key = (shift < KEYLENGTH) ? (key >> shift) << shift : 0;
351 tn->full_children = 0;
352 tn->empty_children = 1<<bits;
355 pr_debug("AT %p s=%zu %zu\n", tn, sizeof(struct tnode),
356 sizeof(struct tnode *) << bits);
360 /* Check whether a tnode 'n' is "full", i.e. it is an internal node
361 * and no bits are skipped. See discussion in dyntree paper p. 6
363 static inline int tnode_full(const struct tnode *tn, const struct tnode *n)
365 return n && ((n->pos + n->bits) == tn->pos) && IS_TNODE(n);
368 /* Add a child at position i overwriting the old value.
369 * Update the value of full_children and empty_children.
371 static void put_child(struct tnode *tn, unsigned long i, struct tnode *n)
373 struct tnode *chi = tnode_get_child(tn, i);
376 BUG_ON(i >= tnode_child_length(tn));
378 /* update emptyChildren */
379 if (n == NULL && chi != NULL)
380 tn->empty_children++;
381 else if (n != NULL && chi == NULL)
382 tn->empty_children--;
384 /* update fullChildren */
385 wasfull = tnode_full(tn, chi);
386 isfull = tnode_full(tn, n);
388 if (wasfull && !isfull)
390 else if (!wasfull && isfull)
393 if (n && (tn->slen < n->slen))
396 rcu_assign_pointer(tn->child[i], n);
399 static void put_child_root(struct tnode *tp, struct trie *t,
400 t_key key, struct tnode *n)
403 put_child(tp, get_index(key, tp), n);
405 rcu_assign_pointer(t->trie, n);
408 static inline void tnode_free_init(struct tnode *tn)
413 static inline void tnode_free_append(struct tnode *tn, struct tnode *n)
415 n->rcu.next = tn->rcu.next;
416 tn->rcu.next = &n->rcu;
419 static void tnode_free(struct tnode *tn)
421 struct callback_head *head = &tn->rcu;
425 tnode_free_size += offsetof(struct tnode, child[1 << tn->bits]);
428 tn = container_of(head, struct tnode, rcu);
431 if (tnode_free_size >= PAGE_SIZE * sync_pages) {
437 static int inflate(struct trie *t, struct tnode *oldtnode)
439 struct tnode *inode, *node0, *node1, *tn, *tp;
440 unsigned long i, j, k;
443 pr_debug("In inflate\n");
445 tn = tnode_new(oldtnode->key, oldtnode->pos - 1, oldtnode->bits + 1);
449 /* Assemble all of the pointers in our cluster, in this case that
450 * represents all of the pointers out of our allocated nodes that
451 * point to existing tnodes and the links between our allocated
454 for (i = tnode_child_length(oldtnode), m = 1u << tn->pos; i;) {
455 inode = tnode_get_child(oldtnode, --i);
461 /* A leaf or an internal node with skipped bits */
462 if (!tnode_full(oldtnode, inode)) {
463 put_child(tn, get_index(inode->key, tn), inode);
467 /* An internal node with two children */
468 if (inode->bits == 1) {
469 put_child(tn, 2 * i + 1, tnode_get_child(inode, 1));
470 put_child(tn, 2 * i, tnode_get_child(inode, 0));
474 /* We will replace this node 'inode' with two new
475 * ones, 'node0' and 'node1', each with half of the
476 * original children. The two new nodes will have
477 * a position one bit further down the key and this
478 * means that the "significant" part of their keys
479 * (see the discussion near the top of this file)
480 * will differ by one bit, which will be "0" in
481 * node0's key and "1" in node1's key. Since we are
482 * moving the key position by one step, the bit that
483 * we are moving away from - the bit at position
484 * (tn->pos) - is the one that will differ between
485 * node0 and node1. So... we synthesize that bit in the
488 node1 = tnode_new(inode->key | m, inode->pos, inode->bits - 1);
491 tnode_free_append(tn, node1);
493 node0 = tnode_new(inode->key & ~m, inode->pos, inode->bits - 1);
496 tnode_free_append(tn, node0);
498 /* populate child pointers in new nodes */
499 for (k = tnode_child_length(inode), j = k / 2; j;) {
500 put_child(node1, --j, tnode_get_child(inode, --k));
501 put_child(node0, j, tnode_get_child(inode, j));
502 put_child(node1, --j, tnode_get_child(inode, --k));
503 put_child(node0, j, tnode_get_child(inode, j));
506 /* link new nodes to parent */
507 NODE_INIT_PARENT(node1, tn);
508 NODE_INIT_PARENT(node0, tn);
510 /* link parent to nodes */
511 put_child(tn, 2 * i + 1, node1);
512 put_child(tn, 2 * i, node0);
515 /* setup the parent pointer into and out of this node */
516 tp = node_parent(oldtnode);
517 NODE_INIT_PARENT(tn, tp);
518 put_child_root(tp, t, tn->key, tn);
520 /* prepare oldtnode to be freed */
521 tnode_free_init(oldtnode);
523 /* update all child nodes parent pointers to route to us */
524 for (i = tnode_child_length(oldtnode); i;) {
525 inode = tnode_get_child(oldtnode, --i);
527 /* A leaf or an internal node with skipped bits */
528 if (!tnode_full(oldtnode, inode)) {
529 node_set_parent(inode, tn);
533 /* drop the node in the old tnode free list */
534 tnode_free_append(oldtnode, inode);
536 /* fetch new nodes */
537 node1 = tnode_get_child(tn, 2 * i + 1);
538 node0 = tnode_get_child(tn, 2 * i);
540 /* bits == 1 then node0 and node1 represent inode's children */
541 if (inode->bits == 1) {
542 node_set_parent(node1, tn);
543 node_set_parent(node0, tn);
547 /* update parent pointers in child node's children */
548 for (k = tnode_child_length(inode), j = k / 2; j;) {
549 node_set_parent(tnode_get_child(inode, --k), node1);
550 node_set_parent(tnode_get_child(inode, --j), node0);
551 node_set_parent(tnode_get_child(inode, --k), node1);
552 node_set_parent(tnode_get_child(inode, --j), node0);
555 /* resize child nodes */
560 /* we completed without error, prepare to free old node */
561 tnode_free(oldtnode);
564 /* all pointers should be clean so we are done */
569 static int halve(struct trie *t, struct tnode *oldtnode)
571 struct tnode *tn, *tp, *inode, *node0, *node1;
574 pr_debug("In halve\n");
576 tn = tnode_new(oldtnode->key, oldtnode->pos + 1, oldtnode->bits - 1);
580 /* Assemble all of the pointers in our cluster, in this case that
581 * represents all of the pointers out of our allocated nodes that
582 * point to existing tnodes and the links between our allocated
585 for (i = tnode_child_length(oldtnode); i;) {
586 node1 = tnode_get_child(oldtnode, --i);
587 node0 = tnode_get_child(oldtnode, --i);
589 /* At least one of the children is empty */
590 if (!node1 || !node0) {
591 put_child(tn, i / 2, node1 ? : node0);
595 /* Two nonempty children */
596 inode = tnode_new(node0->key, oldtnode->pos, 1);
601 tnode_free_append(tn, inode);
603 /* initialize pointers out of node */
604 put_child(inode, 1, node1);
605 put_child(inode, 0, node0);
606 NODE_INIT_PARENT(inode, tn);
608 /* link parent to node */
609 put_child(tn, i / 2, inode);
612 /* setup the parent pointer out of and back into this node */
613 tp = node_parent(oldtnode);
614 NODE_INIT_PARENT(tn, tp);
615 put_child_root(tp, t, tn->key, tn);
617 /* prepare oldtnode to be freed */
618 tnode_free_init(oldtnode);
620 /* update all of the child parent pointers */
621 for (i = tnode_child_length(tn); i;) {
622 inode = tnode_get_child(tn, --i);
624 /* only new tnodes will be considered "full" nodes */
625 if (!tnode_full(tn, inode)) {
626 node_set_parent(inode, tn);
630 /* Two nonempty children */
631 node_set_parent(tnode_get_child(inode, 1), inode);
632 node_set_parent(tnode_get_child(inode, 0), inode);
634 /* resize child node */
638 /* all pointers should be clean so we are done */
639 tnode_free(oldtnode);
644 static unsigned char update_suffix(struct tnode *tn)
646 unsigned char slen = tn->pos;
647 unsigned long stride, i;
649 /* search though the list of children looking for nodes that might
650 * have a suffix greater than the one we currently have. This is
651 * why we start with a stride of 2 since a stride of 1 would
652 * represent the nodes with suffix length equal to tn->pos
654 for (i = 0, stride = 0x2ul ; i < tnode_child_length(tn); i += stride) {
655 struct tnode *n = tnode_get_child(tn, i);
657 if (!n || (n->slen <= slen))
660 /* update stride and slen based on new value */
661 stride <<= (n->slen - slen);
665 /* if slen covers all but the last bit we can stop here
666 * there will be nothing longer than that since only node
667 * 0 and 1 << (bits - 1) could have that as their suffix
670 if ((slen + 1) >= (tn->pos + tn->bits))
679 /* From "Implementing a dynamic compressed trie" by Stefan Nilsson of
680 * the Helsinki University of Technology and Matti Tikkanen of Nokia
681 * Telecommunications, page 6:
682 * "A node is doubled if the ratio of non-empty children to all
683 * children in the *doubled* node is at least 'high'."
685 * 'high' in this instance is the variable 'inflate_threshold'. It
686 * is expressed as a percentage, so we multiply it with
687 * tnode_child_length() and instead of multiplying by 2 (since the
688 * child array will be doubled by inflate()) and multiplying
689 * the left-hand side by 100 (to handle the percentage thing) we
690 * multiply the left-hand side by 50.
692 * The left-hand side may look a bit weird: tnode_child_length(tn)
693 * - tn->empty_children is of course the number of non-null children
694 * in the current node. tn->full_children is the number of "full"
695 * children, that is non-null tnodes with a skip value of 0.
696 * All of those will be doubled in the resulting inflated tnode, so
697 * we just count them one extra time here.
699 * A clearer way to write this would be:
701 * to_be_doubled = tn->full_children;
702 * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children -
705 * new_child_length = tnode_child_length(tn) * 2;
707 * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
709 * if (new_fill_factor >= inflate_threshold)
711 * ...and so on, tho it would mess up the while () loop.
714 * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >=
718 * 100 * (not_to_be_doubled + 2*to_be_doubled) >=
719 * inflate_threshold * new_child_length
721 * expand not_to_be_doubled and to_be_doubled, and shorten:
722 * 100 * (tnode_child_length(tn) - tn->empty_children +
723 * tn->full_children) >= inflate_threshold * new_child_length
725 * expand new_child_length:
726 * 100 * (tnode_child_length(tn) - tn->empty_children +
727 * tn->full_children) >=
728 * inflate_threshold * tnode_child_length(tn) * 2
731 * 50 * (tn->full_children + tnode_child_length(tn) -
732 * tn->empty_children) >= inflate_threshold *
733 * tnode_child_length(tn)
736 static bool should_inflate(const struct tnode *tp, const struct tnode *tn)
738 unsigned long used = tnode_child_length(tn);
739 unsigned long threshold = used;
741 /* Keep root node larger */
742 threshold *= tp ? inflate_threshold : inflate_threshold_root;
743 used += tn->full_children;
744 used -= tn->empty_children;
746 return tn->pos && ((50 * used) >= threshold);
749 static bool should_halve(const struct tnode *tp, const struct tnode *tn)
751 unsigned long used = tnode_child_length(tn);
752 unsigned long threshold = used;
754 /* Keep root node larger */
755 threshold *= tp ? halve_threshold : halve_threshold_root;
756 used -= tn->empty_children;
758 return (tn->bits > 1) && ((100 * used) < threshold);
762 static void resize(struct trie *t, struct tnode *tn)
764 struct tnode *tp = node_parent(tn), *n = NULL;
765 struct tnode __rcu **cptr;
768 pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
769 tn, inflate_threshold, halve_threshold);
771 /* track the tnode via the pointer from the parent instead of
772 * doing it ourselves. This way we can let RCU fully do its
773 * thing without us interfering
775 cptr = tp ? &tp->child[get_index(tn->key, tp)] : &t->trie;
776 BUG_ON(tn != rtnl_dereference(*cptr));
779 if (tn->empty_children > (tnode_child_length(tn) - 1))
783 if (tn->empty_children == (tnode_child_length(tn) - 1))
786 /* Double as long as the resulting node has a number of
787 * nonempty nodes that are above the threshold.
790 while (should_inflate(tp, tn) && max_work--) {
791 if (inflate(t, tn)) {
792 #ifdef CONFIG_IP_FIB_TRIE_STATS
793 this_cpu_inc(t->stats->resize_node_skipped);
798 tn = rtnl_dereference(*cptr);
801 /* Return if at least one inflate is run */
802 if (max_work != MAX_WORK)
805 /* Halve as long as the number of empty children in this
806 * node is above threshold.
809 while (should_halve(tp, tn) && max_work--) {
811 #ifdef CONFIG_IP_FIB_TRIE_STATS
812 this_cpu_inc(t->stats->resize_node_skipped);
817 tn = rtnl_dereference(*cptr);
820 /* Only one child remains */
821 if (tn->empty_children == (tnode_child_length(tn) - 1)) {
824 for (i = tnode_child_length(tn); !n && i;)
825 n = tnode_get_child(tn, --i);
827 /* compress one level */
828 put_child_root(tp, t, tn->key, n);
829 node_set_parent(n, tp);
837 /* Return if at least one deflate was run */
838 if (max_work != MAX_WORK)
841 /* push the suffix length to the parent node */
842 if (tn->slen > tn->pos) {
843 unsigned char slen = update_suffix(tn);
845 if (tp && (slen > tp->slen))
850 /* readside must use rcu_read_lock currently dump routines
851 via get_fa_head and dump */
853 static struct leaf_info *find_leaf_info(struct tnode *l, int plen)
855 struct hlist_head *head = &l->list;
856 struct leaf_info *li;
858 hlist_for_each_entry_rcu(li, head, hlist)
859 if (li->plen == plen)
865 static inline struct list_head *get_fa_head(struct tnode *l, int plen)
867 struct leaf_info *li = find_leaf_info(l, plen);
875 static void leaf_pull_suffix(struct tnode *l)
877 struct tnode *tp = node_parent(l);
879 while (tp && (tp->slen > tp->pos) && (tp->slen > l->slen)) {
880 if (update_suffix(tp) > l->slen)
882 tp = node_parent(tp);
886 static void leaf_push_suffix(struct tnode *l)
888 struct tnode *tn = node_parent(l);
890 /* if this is a new leaf then tn will be NULL and we can sort
891 * out parent suffix lengths as a part of trie_rebalance
893 while (tn && (tn->slen < l->slen)) {
895 tn = node_parent(tn);
899 static void remove_leaf_info(struct tnode *l, struct leaf_info *old)
901 struct hlist_node *prev;
903 /* record the location of the pointer to this object */
904 prev = rtnl_dereference(hlist_pprev_rcu(&old->hlist));
906 /* remove the leaf info from the list */
907 hlist_del_rcu(&old->hlist);
909 /* if we emptied the list this leaf will be freed and we can sort
910 * out parent suffix lengths as a part of trie_rebalance
912 if (hlist_empty(&l->list))
915 /* if we removed the tail then we need to update slen */
916 if (!rcu_access_pointer(hlist_next_rcu(prev))) {
917 struct leaf_info *li = hlist_entry(prev, typeof(*li), hlist);
919 l->slen = KEYLENGTH - li->plen;
924 static void insert_leaf_info(struct tnode *l, struct leaf_info *new)
926 struct hlist_head *head = &l->list;
927 struct leaf_info *li = NULL, *last = NULL;
929 if (hlist_empty(head)) {
930 hlist_add_head_rcu(&new->hlist, head);
932 hlist_for_each_entry(li, head, hlist) {
933 if (new->plen > li->plen)
939 hlist_add_behind_rcu(&new->hlist, &last->hlist);
941 hlist_add_before_rcu(&new->hlist, &li->hlist);
944 /* if we added to the tail node then we need to update slen */
945 if (!rcu_access_pointer(hlist_next_rcu(&new->hlist))) {
946 l->slen = KEYLENGTH - new->plen;
951 /* rcu_read_lock needs to be hold by caller from readside */
952 static struct tnode *fib_find_node(struct trie *t, u32 key)
954 struct tnode *n = rcu_dereference_rtnl(t->trie);
957 unsigned long index = get_index(key, n);
959 /* This bit of code is a bit tricky but it combines multiple
960 * checks into a single check. The prefix consists of the
961 * prefix plus zeros for the bits in the cindex. The index
962 * is the difference between the key and this value. From
963 * this we can actually derive several pieces of data.
964 * if !(index >> bits)
965 * we know the value is cindex
967 * we have a mismatch in skip bits and failed
969 if (index >> n->bits)
972 /* we have found a leaf. Prefixes have already been compared */
976 n = tnode_get_child_rcu(n, index);
982 static void trie_rebalance(struct trie *t, struct tnode *tn)
986 while ((tp = node_parent(tn)) != NULL) {
991 /* Handle last (top) tnode */
996 /* only used from updater-side */
998 static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
1000 struct list_head *fa_head = NULL;
1001 struct tnode *l, *n, *tp = NULL;
1002 struct leaf_info *li;
1004 li = leaf_info_new(plen);
1007 fa_head = &li->falh;
1009 n = rtnl_dereference(t->trie);
1011 /* If we point to NULL, stop. Either the tree is empty and we should
1012 * just put a new leaf in if, or we have reached an empty child slot,
1013 * and we should just put our new leaf in that.
1015 * If we hit a node with a key that does't match then we should stop
1016 * and create a new tnode to replace that node and insert ourselves
1017 * and the other node into the new tnode.
1020 unsigned long index = get_index(key, n);
1022 /* This bit of code is a bit tricky but it combines multiple
1023 * checks into a single check. The prefix consists of the
1024 * prefix plus zeros for the "bits" in the prefix. The index
1025 * is the difference between the key and this value. From
1026 * this we can actually derive several pieces of data.
1027 * if !(index >> bits)
1028 * we know the value is child index
1030 * we have a mismatch in skip bits and failed
1032 if (index >> n->bits)
1035 /* we have found a leaf. Prefixes have already been compared */
1037 /* Case 1: n is a leaf, and prefixes match*/
1038 insert_leaf_info(n, li);
1043 n = tnode_get_child_rcu(n, index);
1052 insert_leaf_info(l, li);
1054 /* Case 2: n is a LEAF or a TNODE and the key doesn't match.
1056 * Add a new tnode here
1057 * first tnode need some special handling
1058 * leaves us in position for handling as case 3
1063 tn = tnode_new(key, __fls(key ^ n->key), 1);
1070 /* initialize routes out of node */
1071 NODE_INIT_PARENT(tn, tp);
1072 put_child(tn, get_index(key, tn) ^ 1, n);
1074 /* start adding routes into the node */
1075 put_child_root(tp, t, key, tn);
1076 node_set_parent(n, tn);
1078 /* parent now has a NULL spot where the leaf can go */
1082 /* Case 3: n is NULL, and will just insert a new leaf */
1084 NODE_INIT_PARENT(l, tp);
1085 put_child(tp, get_index(key, tp), l);
1086 trie_rebalance(t, tp);
1088 rcu_assign_pointer(t->trie, l);
1095 * Caller must hold RTNL.
1097 int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
1099 struct trie *t = (struct trie *) tb->tb_data;
1100 struct fib_alias *fa, *new_fa;
1101 struct list_head *fa_head = NULL;
1102 struct fib_info *fi;
1103 int plen = cfg->fc_dst_len;
1104 u8 tos = cfg->fc_tos;
1112 key = ntohl(cfg->fc_dst);
1114 pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen);
1116 mask = ntohl(inet_make_mask(plen));
1123 fi = fib_create_info(cfg);
1129 l = fib_find_node(t, key);
1133 fa_head = get_fa_head(l, plen);
1134 fa = fib_find_alias(fa_head, tos, fi->fib_priority);
1137 /* Now fa, if non-NULL, points to the first fib alias
1138 * with the same keys [prefix,tos,priority], if such key already
1139 * exists or to the node before which we will insert new one.
1141 * If fa is NULL, we will need to allocate a new one and
1142 * insert to the head of f.
1144 * If f is NULL, no fib node matched the destination key
1145 * and we need to allocate a new one of those as well.
1148 if (fa && fa->fa_tos == tos &&
1149 fa->fa_info->fib_priority == fi->fib_priority) {
1150 struct fib_alias *fa_first, *fa_match;
1153 if (cfg->fc_nlflags & NLM_F_EXCL)
1157 * 1. Find exact match for type, scope, fib_info to avoid
1159 * 2. Find next 'fa' (or head), NLM_F_APPEND inserts before it
1163 fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
1164 list_for_each_entry_continue(fa, fa_head, fa_list) {
1165 if (fa->fa_tos != tos)
1167 if (fa->fa_info->fib_priority != fi->fib_priority)
1169 if (fa->fa_type == cfg->fc_type &&
1170 fa->fa_info == fi) {
1176 if (cfg->fc_nlflags & NLM_F_REPLACE) {
1177 struct fib_info *fi_drop;
1187 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
1191 fi_drop = fa->fa_info;
1192 new_fa->fa_tos = fa->fa_tos;
1193 new_fa->fa_info = fi;
1194 new_fa->fa_type = cfg->fc_type;
1195 state = fa->fa_state;
1196 new_fa->fa_state = state & ~FA_S_ACCESSED;
1198 list_replace_rcu(&fa->fa_list, &new_fa->fa_list);
1199 alias_free_mem_rcu(fa);
1201 fib_release_info(fi_drop);
1202 if (state & FA_S_ACCESSED)
1203 rt_cache_flush(cfg->fc_nlinfo.nl_net);
1204 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen,
1205 tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE);
1209 /* Error if we find a perfect match which
1210 * uses the same scope, type, and nexthop
1216 if (!(cfg->fc_nlflags & NLM_F_APPEND))
1220 if (!(cfg->fc_nlflags & NLM_F_CREATE))
1224 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
1228 new_fa->fa_info = fi;
1229 new_fa->fa_tos = tos;
1230 new_fa->fa_type = cfg->fc_type;
1231 new_fa->fa_state = 0;
1233 * Insert new entry to the list.
1237 fa_head = fib_insert_node(t, key, plen);
1238 if (unlikely(!fa_head)) {
1240 goto out_free_new_fa;
1245 tb->tb_num_default++;
1247 list_add_tail_rcu(&new_fa->fa_list,
1248 (fa ? &fa->fa_list : fa_head));
1250 rt_cache_flush(cfg->fc_nlinfo.nl_net);
1251 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id,
1252 &cfg->fc_nlinfo, 0);
1257 kmem_cache_free(fn_alias_kmem, new_fa);
1259 fib_release_info(fi);
1264 static inline t_key prefix_mismatch(t_key key, struct tnode *n)
1266 t_key prefix = n->key;
1268 return (key ^ prefix) & (prefix | -prefix);
1271 /* should be called with rcu_read_lock */
1272 int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
1273 struct fib_result *res, int fib_flags)
1275 struct trie *t = (struct trie *)tb->tb_data;
1276 #ifdef CONFIG_IP_FIB_TRIE_STATS
1277 struct trie_use_stats __percpu *stats = t->stats;
1279 const t_key key = ntohl(flp->daddr);
1280 struct tnode *n, *pn;
1281 struct leaf_info *li;
1284 n = rcu_dereference(t->trie);
1288 #ifdef CONFIG_IP_FIB_TRIE_STATS
1289 this_cpu_inc(stats->gets);
1295 /* Step 1: Travel to the longest prefix match in the trie */
1297 unsigned long index = get_index(key, n);
1299 /* This bit of code is a bit tricky but it combines multiple
1300 * checks into a single check. The prefix consists of the
1301 * prefix plus zeros for the "bits" in the prefix. The index
1302 * is the difference between the key and this value. From
1303 * this we can actually derive several pieces of data.
1304 * if !(index >> bits)
1305 * we know the value is child index
1307 * we have a mismatch in skip bits and failed
1309 if (index >> n->bits)
1312 /* we have found a leaf. Prefixes have already been compared */
1316 /* only record pn and cindex if we are going to be chopping
1317 * bits later. Otherwise we are just wasting cycles.
1319 if (n->slen > n->pos) {
1324 n = tnode_get_child_rcu(n, index);
1329 /* Step 2: Sort out leaves and begin backtracing for longest prefix */
1331 /* record the pointer where our next node pointer is stored */
1332 struct tnode __rcu **cptr = n->child;
1334 /* This test verifies that none of the bits that differ
1335 * between the key and the prefix exist in the region of
1336 * the lsb and higher in the prefix.
1338 if (unlikely(prefix_mismatch(key, n)) || (n->slen == n->pos))
1341 /* exit out and process leaf */
1342 if (unlikely(IS_LEAF(n)))
1345 /* Don't bother recording parent info. Since we are in
1346 * prefix match mode we will have to come back to wherever
1347 * we started this traversal anyway
1350 while ((n = rcu_dereference(*cptr)) == NULL) {
1352 #ifdef CONFIG_IP_FIB_TRIE_STATS
1354 this_cpu_inc(stats->null_node_hit);
1356 /* If we are at cindex 0 there are no more bits for
1357 * us to strip at this level so we must ascend back
1358 * up one level to see if there are any more bits to
1359 * be stripped there.
1362 t_key pkey = pn->key;
1364 pn = node_parent_rcu(pn);
1367 #ifdef CONFIG_IP_FIB_TRIE_STATS
1368 this_cpu_inc(stats->backtrack);
1370 /* Get Child's index */
1371 cindex = get_index(pkey, pn);
1374 /* strip the least significant bit from the cindex */
1375 cindex &= cindex - 1;
1377 /* grab pointer for next child node */
1378 cptr = &pn->child[cindex];
1383 /* Step 3: Process the leaf, if that fails fall back to backtracing */
1384 hlist_for_each_entry_rcu(li, &n->list, hlist) {
1385 struct fib_alias *fa;
1387 if ((key ^ n->key) & li->mask_plen)
1390 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
1391 struct fib_info *fi = fa->fa_info;
1394 if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
1398 if (fa->fa_info->fib_scope < flp->flowi4_scope)
1400 fib_alias_accessed(fa);
1401 err = fib_props[fa->fa_type].error;
1402 if (unlikely(err < 0)) {
1403 #ifdef CONFIG_IP_FIB_TRIE_STATS
1404 this_cpu_inc(stats->semantic_match_passed);
1408 if (fi->fib_flags & RTNH_F_DEAD)
1410 for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
1411 const struct fib_nh *nh = &fi->fib_nh[nhsel];
1413 if (nh->nh_flags & RTNH_F_DEAD)
1415 if (flp->flowi4_oif && flp->flowi4_oif != nh->nh_oif)
1418 if (!(fib_flags & FIB_LOOKUP_NOREF))
1419 atomic_inc(&fi->fib_clntref);
1421 res->prefixlen = li->plen;
1422 res->nh_sel = nhsel;
1423 res->type = fa->fa_type;
1424 res->scope = fi->fib_scope;
1427 res->fa_head = &li->falh;
1428 #ifdef CONFIG_IP_FIB_TRIE_STATS
1429 this_cpu_inc(stats->semantic_match_passed);
1435 #ifdef CONFIG_IP_FIB_TRIE_STATS
1436 this_cpu_inc(stats->semantic_match_miss);
1441 EXPORT_SYMBOL_GPL(fib_table_lookup);
1444 * Remove the leaf and return parent.
1446 static void trie_leaf_remove(struct trie *t, struct tnode *l)
1448 struct tnode *tp = node_parent(l);
1450 pr_debug("entering trie_leaf_remove(%p)\n", l);
1453 put_child(tp, get_index(l->key, tp), NULL);
1454 trie_rebalance(t, tp);
1456 RCU_INIT_POINTER(t->trie, NULL);
1463 * Caller must hold RTNL.
1465 int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
1467 struct trie *t = (struct trie *) tb->tb_data;
1469 int plen = cfg->fc_dst_len;
1470 u8 tos = cfg->fc_tos;
1471 struct fib_alias *fa, *fa_to_delete;
1472 struct list_head *fa_head;
1474 struct leaf_info *li;
1479 key = ntohl(cfg->fc_dst);
1480 mask = ntohl(inet_make_mask(plen));
1486 l = fib_find_node(t, key);
1491 li = find_leaf_info(l, plen);
1496 fa_head = &li->falh;
1497 fa = fib_find_alias(fa_head, tos, 0);
1502 pr_debug("Deleting %08x/%d tos=%d t=%p\n", key, plen, tos, t);
1504 fa_to_delete = NULL;
1505 fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
1506 list_for_each_entry_continue(fa, fa_head, fa_list) {
1507 struct fib_info *fi = fa->fa_info;
1509 if (fa->fa_tos != tos)
1512 if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) &&
1513 (cfg->fc_scope == RT_SCOPE_NOWHERE ||
1514 fa->fa_info->fib_scope == cfg->fc_scope) &&
1515 (!cfg->fc_prefsrc ||
1516 fi->fib_prefsrc == cfg->fc_prefsrc) &&
1517 (!cfg->fc_protocol ||
1518 fi->fib_protocol == cfg->fc_protocol) &&
1519 fib_nh_match(cfg, fi) == 0) {
1529 rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id,
1530 &cfg->fc_nlinfo, 0);
1532 list_del_rcu(&fa->fa_list);
1535 tb->tb_num_default--;
1537 if (list_empty(fa_head)) {
1538 remove_leaf_info(l, li);
1542 if (hlist_empty(&l->list))
1543 trie_leaf_remove(t, l);
1545 if (fa->fa_state & FA_S_ACCESSED)
1546 rt_cache_flush(cfg->fc_nlinfo.nl_net);
1548 fib_release_info(fa->fa_info);
1549 alias_free_mem_rcu(fa);
1553 static int trie_flush_list(struct list_head *head)
1555 struct fib_alias *fa, *fa_node;
1558 list_for_each_entry_safe(fa, fa_node, head, fa_list) {
1559 struct fib_info *fi = fa->fa_info;
1561 if (fi && (fi->fib_flags & RTNH_F_DEAD)) {
1562 list_del_rcu(&fa->fa_list);
1563 fib_release_info(fa->fa_info);
1564 alias_free_mem_rcu(fa);
1571 static int trie_flush_leaf(struct tnode *l)
1574 struct hlist_head *lih = &l->list;
1575 struct hlist_node *tmp;
1576 struct leaf_info *li = NULL;
1578 hlist_for_each_entry_safe(li, tmp, lih, hlist) {
1579 found += trie_flush_list(&li->falh);
1581 if (list_empty(&li->falh)) {
1582 hlist_del_rcu(&li->hlist);
1590 * Scan for the next right leaf starting at node p->child[idx]
1591 * Since we have back pointer, no recursion necessary.
1593 static struct tnode *leaf_walk_rcu(struct tnode *p, struct tnode *c)
1596 unsigned long idx = c ? idx = get_index(c->key, p) + 1 : 0;
1598 while (idx < tnode_child_length(p)) {
1599 c = tnode_get_child_rcu(p, idx++);
1606 /* Rescan start scanning in new node */
1611 /* Node empty, walk back up to parent */
1613 } while ((p = node_parent_rcu(c)) != NULL);
1615 return NULL; /* Root of trie */
1618 static struct tnode *trie_firstleaf(struct trie *t)
1620 struct tnode *n = rcu_dereference_rtnl(t->trie);
1625 if (IS_LEAF(n)) /* trie is just a leaf */
1628 return leaf_walk_rcu(n, NULL);
1631 static struct tnode *trie_nextleaf(struct tnode *l)
1633 struct tnode *p = node_parent_rcu(l);
1636 return NULL; /* trie with just one leaf */
1638 return leaf_walk_rcu(p, l);
1641 static struct tnode *trie_leafindex(struct trie *t, int index)
1643 struct tnode *l = trie_firstleaf(t);
1645 while (l && index-- > 0)
1646 l = trie_nextleaf(l);
1653 * Caller must hold RTNL.
1655 int fib_table_flush(struct fib_table *tb)
1657 struct trie *t = (struct trie *) tb->tb_data;
1658 struct tnode *l, *ll = NULL;
1661 for (l = trie_firstleaf(t); l; l = trie_nextleaf(l)) {
1662 found += trie_flush_leaf(l);
1664 if (ll && hlist_empty(&ll->list))
1665 trie_leaf_remove(t, ll);
1669 if (ll && hlist_empty(&ll->list))
1670 trie_leaf_remove(t, ll);
1672 pr_debug("trie_flush found=%d\n", found);
1676 void fib_free_table(struct fib_table *tb)
1678 #ifdef CONFIG_IP_FIB_TRIE_STATS
1679 struct trie *t = (struct trie *)tb->tb_data;
1681 free_percpu(t->stats);
1682 #endif /* CONFIG_IP_FIB_TRIE_STATS */
1686 static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah,
1687 struct fib_table *tb,
1688 struct sk_buff *skb, struct netlink_callback *cb)
1691 struct fib_alias *fa;
1692 __be32 xkey = htonl(key);
1697 /* rcu_read_lock is hold by caller */
1699 list_for_each_entry_rcu(fa, fah, fa_list) {
1705 if (fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
1713 fa->fa_info, NLM_F_MULTI) < 0) {
1723 static int fn_trie_dump_leaf(struct tnode *l, struct fib_table *tb,
1724 struct sk_buff *skb, struct netlink_callback *cb)
1726 struct leaf_info *li;
1732 /* rcu_read_lock is hold by caller */
1733 hlist_for_each_entry_rcu(li, &l->list, hlist) {
1742 if (list_empty(&li->falh))
1745 if (fn_trie_dump_fa(l->key, li->plen, &li->falh, tb, skb, cb) < 0) {
1756 int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
1757 struct netlink_callback *cb)
1760 struct trie *t = (struct trie *) tb->tb_data;
1761 t_key key = cb->args[2];
1762 int count = cb->args[3];
1765 /* Dump starting at last key.
1766 * Note: 0.0.0.0/0 (ie default) is first key.
1769 l = trie_firstleaf(t);
1771 /* Normally, continue from last key, but if that is missing
1772 * fallback to using slow rescan
1774 l = fib_find_node(t, key);
1776 l = trie_leafindex(t, count);
1780 cb->args[2] = l->key;
1781 if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) {
1782 cb->args[3] = count;
1788 l = trie_nextleaf(l);
1789 memset(&cb->args[4], 0,
1790 sizeof(cb->args) - 4*sizeof(cb->args[0]));
1792 cb->args[3] = count;
1798 void __init fib_trie_init(void)
1800 fn_alias_kmem = kmem_cache_create("ip_fib_alias",
1801 sizeof(struct fib_alias),
1802 0, SLAB_PANIC, NULL);
1804 trie_leaf_kmem = kmem_cache_create("ip_fib_trie",
1805 max(sizeof(struct tnode),
1806 sizeof(struct leaf_info)),
1807 0, SLAB_PANIC, NULL);
1811 struct fib_table *fib_trie_table(u32 id)
1813 struct fib_table *tb;
1816 tb = kmalloc(sizeof(struct fib_table) + sizeof(struct trie),
1822 tb->tb_default = -1;
1823 tb->tb_num_default = 0;
1825 t = (struct trie *) tb->tb_data;
1826 RCU_INIT_POINTER(t->trie, NULL);
1827 #ifdef CONFIG_IP_FIB_TRIE_STATS
1828 t->stats = alloc_percpu(struct trie_use_stats);
1838 #ifdef CONFIG_PROC_FS
1839 /* Depth first Trie walk iterator */
1840 struct fib_trie_iter {
1841 struct seq_net_private p;
1842 struct fib_table *tb;
1843 struct tnode *tnode;
1848 static struct tnode *fib_trie_get_next(struct fib_trie_iter *iter)
1850 unsigned long cindex = iter->index;
1851 struct tnode *tn = iter->tnode;
1854 /* A single entry routing table */
1858 pr_debug("get_next iter={node=%p index=%d depth=%d}\n",
1859 iter->tnode, iter->index, iter->depth);
1861 while (cindex < tnode_child_length(tn)) {
1862 struct tnode *n = tnode_get_child_rcu(tn, cindex);
1867 iter->index = cindex + 1;
1869 /* push down one level */
1880 /* Current node exhausted, pop back up */
1881 p = node_parent_rcu(tn);
1883 cindex = get_index(tn->key, p) + 1;
1893 static struct tnode *fib_trie_get_first(struct fib_trie_iter *iter,
1901 n = rcu_dereference(t->trie);
1918 static void trie_collect_stats(struct trie *t, struct trie_stat *s)
1921 struct fib_trie_iter iter;
1923 memset(s, 0, sizeof(*s));
1926 for (n = fib_trie_get_first(&iter, t); n; n = fib_trie_get_next(&iter)) {
1928 struct leaf_info *li;
1931 s->totdepth += iter.depth;
1932 if (iter.depth > s->maxdepth)
1933 s->maxdepth = iter.depth;
1935 hlist_for_each_entry_rcu(li, &n->list, hlist)
1941 if (n->bits < MAX_STAT_DEPTH)
1942 s->nodesizes[n->bits]++;
1944 for (i = tnode_child_length(n); i--;) {
1945 if (!rcu_access_pointer(n->child[i]))
1954 * This outputs /proc/net/fib_triestats
1956 static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
1958 unsigned int i, max, pointers, bytes, avdepth;
1961 avdepth = stat->totdepth*100 / stat->leaves;
1965 seq_printf(seq, "\tAver depth: %u.%02d\n",
1966 avdepth / 100, avdepth % 100);
1967 seq_printf(seq, "\tMax depth: %u\n", stat->maxdepth);
1969 seq_printf(seq, "\tLeaves: %u\n", stat->leaves);
1970 bytes = sizeof(struct tnode) * stat->leaves;
1972 seq_printf(seq, "\tPrefixes: %u\n", stat->prefixes);
1973 bytes += sizeof(struct leaf_info) * stat->prefixes;
1975 seq_printf(seq, "\tInternal nodes: %u\n\t", stat->tnodes);
1976 bytes += sizeof(struct tnode) * stat->tnodes;
1978 max = MAX_STAT_DEPTH;
1979 while (max > 0 && stat->nodesizes[max-1] == 0)
1983 for (i = 1; i < max; i++)
1984 if (stat->nodesizes[i] != 0) {
1985 seq_printf(seq, " %u: %u", i, stat->nodesizes[i]);
1986 pointers += (1<<i) * stat->nodesizes[i];
1988 seq_putc(seq, '\n');
1989 seq_printf(seq, "\tPointers: %u\n", pointers);
1991 bytes += sizeof(struct tnode *) * pointers;
1992 seq_printf(seq, "Null ptrs: %u\n", stat->nullpointers);
1993 seq_printf(seq, "Total size: %u kB\n", (bytes + 1023) / 1024);
1996 #ifdef CONFIG_IP_FIB_TRIE_STATS
1997 static void trie_show_usage(struct seq_file *seq,
1998 const struct trie_use_stats __percpu *stats)
2000 struct trie_use_stats s = { 0 };
2003 /* loop through all of the CPUs and gather up the stats */
2004 for_each_possible_cpu(cpu) {
2005 const struct trie_use_stats *pcpu = per_cpu_ptr(stats, cpu);
2007 s.gets += pcpu->gets;
2008 s.backtrack += pcpu->backtrack;
2009 s.semantic_match_passed += pcpu->semantic_match_passed;
2010 s.semantic_match_miss += pcpu->semantic_match_miss;
2011 s.null_node_hit += pcpu->null_node_hit;
2012 s.resize_node_skipped += pcpu->resize_node_skipped;
2015 seq_printf(seq, "\nCounters:\n---------\n");
2016 seq_printf(seq, "gets = %u\n", s.gets);
2017 seq_printf(seq, "backtracks = %u\n", s.backtrack);
2018 seq_printf(seq, "semantic match passed = %u\n",
2019 s.semantic_match_passed);
2020 seq_printf(seq, "semantic match miss = %u\n", s.semantic_match_miss);
2021 seq_printf(seq, "null node hit= %u\n", s.null_node_hit);
2022 seq_printf(seq, "skipped node resize = %u\n\n", s.resize_node_skipped);
2024 #endif /* CONFIG_IP_FIB_TRIE_STATS */
2026 static void fib_table_print(struct seq_file *seq, struct fib_table *tb)
2028 if (tb->tb_id == RT_TABLE_LOCAL)
2029 seq_puts(seq, "Local:\n");
2030 else if (tb->tb_id == RT_TABLE_MAIN)
2031 seq_puts(seq, "Main:\n");
2033 seq_printf(seq, "Id %d:\n", tb->tb_id);
2037 static int fib_triestat_seq_show(struct seq_file *seq, void *v)
2039 struct net *net = (struct net *)seq->private;
2043 "Basic info: size of leaf:"
2044 " %Zd bytes, size of tnode: %Zd bytes.\n",
2045 sizeof(struct tnode), sizeof(struct tnode));
2047 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
2048 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
2049 struct fib_table *tb;
2051 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
2052 struct trie *t = (struct trie *) tb->tb_data;
2053 struct trie_stat stat;
2058 fib_table_print(seq, tb);
2060 trie_collect_stats(t, &stat);
2061 trie_show_stats(seq, &stat);
2062 #ifdef CONFIG_IP_FIB_TRIE_STATS
2063 trie_show_usage(seq, t->stats);
2071 static int fib_triestat_seq_open(struct inode *inode, struct file *file)
2073 return single_open_net(inode, file, fib_triestat_seq_show);
2076 static const struct file_operations fib_triestat_fops = {
2077 .owner = THIS_MODULE,
2078 .open = fib_triestat_seq_open,
2080 .llseek = seq_lseek,
2081 .release = single_release_net,
2084 static struct tnode *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
2086 struct fib_trie_iter *iter = seq->private;
2087 struct net *net = seq_file_net(seq);
2091 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
2092 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
2093 struct fib_table *tb;
2095 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
2098 for (n = fib_trie_get_first(iter,
2099 (struct trie *) tb->tb_data);
2100 n; n = fib_trie_get_next(iter))
2111 static void *fib_trie_seq_start(struct seq_file *seq, loff_t *pos)
2115 return fib_trie_get_idx(seq, *pos);
2118 static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2120 struct fib_trie_iter *iter = seq->private;
2121 struct net *net = seq_file_net(seq);
2122 struct fib_table *tb = iter->tb;
2123 struct hlist_node *tb_node;
2128 /* next node in same table */
2129 n = fib_trie_get_next(iter);
2133 /* walk rest of this hash chain */
2134 h = tb->tb_id & (FIB_TABLE_HASHSZ - 1);
2135 while ((tb_node = rcu_dereference(hlist_next_rcu(&tb->tb_hlist)))) {
2136 tb = hlist_entry(tb_node, struct fib_table, tb_hlist);
2137 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
2142 /* new hash chain */
2143 while (++h < FIB_TABLE_HASHSZ) {
2144 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
2145 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
2146 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
2158 static void fib_trie_seq_stop(struct seq_file *seq, void *v)
2164 static void seq_indent(struct seq_file *seq, int n)
2170 static inline const char *rtn_scope(char *buf, size_t len, enum rt_scope_t s)
2173 case RT_SCOPE_UNIVERSE: return "universe";
2174 case RT_SCOPE_SITE: return "site";
2175 case RT_SCOPE_LINK: return "link";
2176 case RT_SCOPE_HOST: return "host";
2177 case RT_SCOPE_NOWHERE: return "nowhere";
2179 snprintf(buf, len, "scope=%d", s);
2184 static const char *const rtn_type_names[__RTN_MAX] = {
2185 [RTN_UNSPEC] = "UNSPEC",
2186 [RTN_UNICAST] = "UNICAST",
2187 [RTN_LOCAL] = "LOCAL",
2188 [RTN_BROADCAST] = "BROADCAST",
2189 [RTN_ANYCAST] = "ANYCAST",
2190 [RTN_MULTICAST] = "MULTICAST",
2191 [RTN_BLACKHOLE] = "BLACKHOLE",
2192 [RTN_UNREACHABLE] = "UNREACHABLE",
2193 [RTN_PROHIBIT] = "PROHIBIT",
2194 [RTN_THROW] = "THROW",
2196 [RTN_XRESOLVE] = "XRESOLVE",
2199 static inline const char *rtn_type(char *buf, size_t len, unsigned int t)
2201 if (t < __RTN_MAX && rtn_type_names[t])
2202 return rtn_type_names[t];
2203 snprintf(buf, len, "type %u", t);
2207 /* Pretty print the trie */
2208 static int fib_trie_seq_show(struct seq_file *seq, void *v)
2210 const struct fib_trie_iter *iter = seq->private;
2211 struct tnode *n = v;
2213 if (!node_parent_rcu(n))
2214 fib_table_print(seq, iter->tb);
2217 __be32 prf = htonl(n->key);
2219 seq_indent(seq, iter->depth-1);
2220 seq_printf(seq, " +-- %pI4/%zu %u %u %u\n",
2221 &prf, KEYLENGTH - n->pos - n->bits, n->bits,
2222 n->full_children, n->empty_children);
2224 struct leaf_info *li;
2225 __be32 val = htonl(n->key);
2227 seq_indent(seq, iter->depth);
2228 seq_printf(seq, " |-- %pI4\n", &val);
2230 hlist_for_each_entry_rcu(li, &n->list, hlist) {
2231 struct fib_alias *fa;
2233 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2234 char buf1[32], buf2[32];
2236 seq_indent(seq, iter->depth+1);
2237 seq_printf(seq, " /%d %s %s", li->plen,
2238 rtn_scope(buf1, sizeof(buf1),
2239 fa->fa_info->fib_scope),
2240 rtn_type(buf2, sizeof(buf2),
2243 seq_printf(seq, " tos=%d", fa->fa_tos);
2244 seq_putc(seq, '\n');
2252 static const struct seq_operations fib_trie_seq_ops = {
2253 .start = fib_trie_seq_start,
2254 .next = fib_trie_seq_next,
2255 .stop = fib_trie_seq_stop,
2256 .show = fib_trie_seq_show,
2259 static int fib_trie_seq_open(struct inode *inode, struct file *file)
2261 return seq_open_net(inode, file, &fib_trie_seq_ops,
2262 sizeof(struct fib_trie_iter));
2265 static const struct file_operations fib_trie_fops = {
2266 .owner = THIS_MODULE,
2267 .open = fib_trie_seq_open,
2269 .llseek = seq_lseek,
2270 .release = seq_release_net,
2273 struct fib_route_iter {
2274 struct seq_net_private p;
2275 struct trie *main_trie;
2280 static struct tnode *fib_route_get_idx(struct fib_route_iter *iter, loff_t pos)
2282 struct tnode *l = NULL;
2283 struct trie *t = iter->main_trie;
2285 /* use cache location of last found key */
2286 if (iter->pos > 0 && pos >= iter->pos && (l = fib_find_node(t, iter->key)))
2290 l = trie_firstleaf(t);
2293 while (l && pos-- > 0) {
2295 l = trie_nextleaf(l);
2299 iter->key = pos; /* remember it */
2301 iter->pos = 0; /* forget it */
2306 static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
2309 struct fib_route_iter *iter = seq->private;
2310 struct fib_table *tb;
2313 tb = fib_get_table(seq_file_net(seq), RT_TABLE_MAIN);
2317 iter->main_trie = (struct trie *) tb->tb_data;
2319 return SEQ_START_TOKEN;
2321 return fib_route_get_idx(iter, *pos - 1);
2324 static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2326 struct fib_route_iter *iter = seq->private;
2327 struct tnode *l = v;
2330 if (v == SEQ_START_TOKEN) {
2332 l = trie_firstleaf(iter->main_trie);
2335 l = trie_nextleaf(l);
2345 static void fib_route_seq_stop(struct seq_file *seq, void *v)
2351 static unsigned int fib_flag_trans(int type, __be32 mask, const struct fib_info *fi)
2353 unsigned int flags = 0;
2355 if (type == RTN_UNREACHABLE || type == RTN_PROHIBIT)
2357 if (fi && fi->fib_nh->nh_gw)
2358 flags |= RTF_GATEWAY;
2359 if (mask == htonl(0xFFFFFFFF))
2366 * This outputs /proc/net/route.
2367 * The format of the file is not supposed to be changed
2368 * and needs to be same as fib_hash output to avoid breaking
2371 static int fib_route_seq_show(struct seq_file *seq, void *v)
2373 struct tnode *l = v;
2374 struct leaf_info *li;
2376 if (v == SEQ_START_TOKEN) {
2377 seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
2378 "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
2383 hlist_for_each_entry_rcu(li, &l->list, hlist) {
2384 struct fib_alias *fa;
2385 __be32 mask, prefix;
2387 mask = inet_make_mask(li->plen);
2388 prefix = htonl(l->key);
2390 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2391 const struct fib_info *fi = fa->fa_info;
2392 unsigned int flags = fib_flag_trans(fa->fa_type, mask, fi);
2394 if (fa->fa_type == RTN_BROADCAST
2395 || fa->fa_type == RTN_MULTICAST)
2398 seq_setwidth(seq, 127);
2402 "%s\t%08X\t%08X\t%04X\t%d\t%u\t"
2403 "%d\t%08X\t%d\t%u\t%u",
2404 fi->fib_dev ? fi->fib_dev->name : "*",
2406 fi->fib_nh->nh_gw, flags, 0, 0,
2410 fi->fib_advmss + 40 : 0),
2415 "*\t%08X\t%08X\t%04X\t%d\t%u\t"
2416 "%d\t%08X\t%d\t%u\t%u",
2417 prefix, 0, flags, 0, 0, 0,
2427 static const struct seq_operations fib_route_seq_ops = {
2428 .start = fib_route_seq_start,
2429 .next = fib_route_seq_next,
2430 .stop = fib_route_seq_stop,
2431 .show = fib_route_seq_show,
2434 static int fib_route_seq_open(struct inode *inode, struct file *file)
2436 return seq_open_net(inode, file, &fib_route_seq_ops,
2437 sizeof(struct fib_route_iter));
2440 static const struct file_operations fib_route_fops = {
2441 .owner = THIS_MODULE,
2442 .open = fib_route_seq_open,
2444 .llseek = seq_lseek,
2445 .release = seq_release_net,
2448 int __net_init fib_proc_init(struct net *net)
2450 if (!proc_create("fib_trie", S_IRUGO, net->proc_net, &fib_trie_fops))
2453 if (!proc_create("fib_triestat", S_IRUGO, net->proc_net,
2454 &fib_triestat_fops))
2457 if (!proc_create("route", S_IRUGO, net->proc_net, &fib_route_fops))
2463 remove_proc_entry("fib_triestat", net->proc_net);
2465 remove_proc_entry("fib_trie", net->proc_net);
2470 void __net_exit fib_proc_exit(struct net *net)
2472 remove_proc_entry("fib_trie", net->proc_net);
2473 remove_proc_entry("fib_triestat", net->proc_net);
2474 remove_proc_entry("route", net->proc_net);
2477 #endif /* CONFIG_PROC_FS */