2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Generic INET transport hashtables
8 * Authors: Lotsa people, from code originally in tcp
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 #include <linux/module.h>
17 #include <linux/random.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/wait.h>
22 #include <net/inet_connection_sock.h>
23 #include <net/inet_hashtables.h>
24 #include <net/secure_seq.h>
28 * Allocate and initialize a new local port bind bucket.
29 * The bindhash mutex for snum's hash chain must be held here.
31 struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
33 struct inet_bind_hashbucket *head,
34 const unsigned short snum)
36 struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
39 write_pnet(&tb->ib_net, hold_net(net));
42 tb->fastreuseport = 0;
44 INIT_HLIST_HEAD(&tb->owners);
45 hlist_add_head(&tb->node, &head->chain);
51 * Caller must hold hashbucket lock for this tb with local BH disabled
53 void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb)
55 if (hlist_empty(&tb->owners)) {
56 __hlist_del(&tb->node);
57 release_net(ib_net(tb));
58 kmem_cache_free(cachep, tb);
62 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
63 const unsigned short snum)
65 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
67 atomic_inc(&hashinfo->bsockets);
69 inet_sk(sk)->inet_num = snum;
70 sk_add_bind_node(sk, &tb->owners);
72 inet_csk(sk)->icsk_bind_hash = tb;
76 * Get rid of any references to a local port held by the given sock.
78 static void __inet_put_port(struct sock *sk)
80 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
81 const int bhash = inet_bhashfn(sock_net(sk), inet_sk(sk)->inet_num,
82 hashinfo->bhash_size);
83 struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash];
84 struct inet_bind_bucket *tb;
86 atomic_dec(&hashinfo->bsockets);
88 spin_lock(&head->lock);
89 tb = inet_csk(sk)->icsk_bind_hash;
90 __sk_del_bind_node(sk);
92 inet_csk(sk)->icsk_bind_hash = NULL;
93 inet_sk(sk)->inet_num = 0;
94 inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
95 spin_unlock(&head->lock);
98 void inet_put_port(struct sock *sk)
104 EXPORT_SYMBOL(inet_put_port);
106 int __inet_inherit_port(struct sock *sk, struct sock *child)
108 struct inet_hashinfo *table = sk->sk_prot->h.hashinfo;
109 unsigned short port = inet_sk(child)->inet_num;
110 const int bhash = inet_bhashfn(sock_net(sk), port,
112 struct inet_bind_hashbucket *head = &table->bhash[bhash];
113 struct inet_bind_bucket *tb;
115 spin_lock(&head->lock);
116 tb = inet_csk(sk)->icsk_bind_hash;
117 if (tb->port != port) {
118 /* NOTE: using tproxy and redirecting skbs to a proxy
119 * on a different listener port breaks the assumption
120 * that the listener socket's icsk_bind_hash is the same
121 * as that of the child socket. We have to look up or
122 * create a new bind bucket for the child here. */
123 struct hlist_node *node;
124 inet_bind_bucket_for_each(tb, node, &head->chain) {
125 if (net_eq(ib_net(tb), sock_net(sk)) &&
130 tb = inet_bind_bucket_create(table->bind_bucket_cachep,
131 sock_net(sk), head, port);
133 spin_unlock(&head->lock);
138 inet_bind_hash(child, tb, port);
139 spin_unlock(&head->lock);
143 EXPORT_SYMBOL_GPL(__inet_inherit_port);
145 static inline int compute_score(struct sock *sk, struct net *net,
146 const unsigned short hnum, const __be32 daddr,
150 struct inet_sock *inet = inet_sk(sk);
152 if (net_eq(sock_net(sk), net) && inet->inet_num == hnum &&
153 !ipv6_only_sock(sk)) {
154 __be32 rcv_saddr = inet->inet_rcv_saddr;
155 score = sk->sk_family == PF_INET ? 2 : 1;
157 if (rcv_saddr != daddr)
161 if (sk->sk_bound_dev_if) {
162 if (sk->sk_bound_dev_if != dif)
171 * Don't inline this cruft. Here are some nice properties to exploit here. The
172 * BSD API does not allow a listening sock to specify the remote port nor the
173 * remote address for the connection. So always assume those are both
174 * wildcarded during the search since they can never be otherwise.
178 struct sock *__inet_lookup_listener(struct net *net,
179 struct inet_hashinfo *hashinfo,
180 const __be32 saddr, __be16 sport,
181 const __be32 daddr, const unsigned short hnum,
184 struct sock *sk, *result;
185 struct hlist_nulls_node *node;
186 unsigned int hash = inet_lhashfn(net, hnum);
187 struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
188 int score, hiscore, matches = 0, reuseport = 0;
195 sk_nulls_for_each_rcu(sk, node, &ilb->head) {
196 score = compute_score(sk, net, hnum, daddr, dif);
197 if (score > hiscore) {
200 reuseport = sk->sk_reuseport;
202 phash = inet_ehashfn(net, daddr, hnum,
206 } else if (score == hiscore && reuseport) {
208 if (((u64)phash * matches) >> 32 == 0)
210 phash = next_pseudo_random32(phash);
214 * if the nulls value we got at the end of this lookup is
215 * not the expected one, we must restart lookup.
216 * We probably met an item that was moved to another chain.
218 if (get_nulls_value(node) != hash + LISTENING_NULLS_BASE)
221 if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
223 else if (unlikely(compute_score(result, net, hnum, daddr,
232 EXPORT_SYMBOL_GPL(__inet_lookup_listener);
234 struct sock *__inet_lookup_established(struct net *net,
235 struct inet_hashinfo *hashinfo,
236 const __be32 saddr, const __be16 sport,
237 const __be32 daddr, const u16 hnum,
240 INET_ADDR_COOKIE(acookie, saddr, daddr)
241 const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
243 const struct hlist_nulls_node *node;
244 /* Optimize here for direct hit, only listening connections can
245 * have wildcards anyways.
247 unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport);
248 unsigned int slot = hash & hashinfo->ehash_mask;
249 struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
253 sk_nulls_for_each_rcu(sk, node, &head->chain) {
254 if (sk->sk_hash != hash)
256 if (likely(INET_MATCH(sk, net, acookie,
257 saddr, daddr, ports, dif))) {
258 if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt)))
260 if (unlikely(!INET_MATCH(sk, net, acookie,
261 saddr, daddr, ports, dif))) {
269 * if the nulls value we got at the end of this lookup is
270 * not the expected one, we must restart lookup.
271 * We probably met an item that was moved to another chain.
273 if (get_nulls_value(node) != slot)
277 /* Must check for a TIME_WAIT'er before going to listener hash. */
278 sk_nulls_for_each_rcu(sk, node, &head->twchain) {
279 if (sk->sk_hash != hash)
281 if (likely(INET_TW_MATCH(sk, net, acookie,
284 if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt))) {
288 if (unlikely(!INET_TW_MATCH(sk, net, acookie,
298 * if the nulls value we got at the end of this lookup is
299 * not the expected one, we must restart lookup.
300 * We probably met an item that was moved to another chain.
302 if (get_nulls_value(node) != slot)
309 EXPORT_SYMBOL_GPL(__inet_lookup_established);
311 /* called with local bh disabled */
312 static int __inet_check_established(struct inet_timewait_death_row *death_row,
313 struct sock *sk, __u16 lport,
314 struct inet_timewait_sock **twp)
316 struct inet_hashinfo *hinfo = death_row->hashinfo;
317 struct inet_sock *inet = inet_sk(sk);
318 __be32 daddr = inet->inet_rcv_saddr;
319 __be32 saddr = inet->inet_daddr;
320 int dif = sk->sk_bound_dev_if;
321 INET_ADDR_COOKIE(acookie, saddr, daddr)
322 const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport);
323 struct net *net = sock_net(sk);
324 unsigned int hash = inet_ehashfn(net, daddr, lport,
325 saddr, inet->inet_dport);
326 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
327 spinlock_t *lock = inet_ehash_lockp(hinfo, hash);
329 const struct hlist_nulls_node *node;
330 struct inet_timewait_sock *tw;
335 /* Check TIME-WAIT sockets first. */
336 sk_nulls_for_each(sk2, node, &head->twchain) {
337 if (sk2->sk_hash != hash)
340 if (likely(INET_TW_MATCH(sk2, net, acookie,
341 saddr, daddr, ports, dif))) {
343 if (twsk_unique(sk, sk2, twp))
351 /* And established part... */
352 sk_nulls_for_each(sk2, node, &head->chain) {
353 if (sk2->sk_hash != hash)
355 if (likely(INET_MATCH(sk2, net, acookie,
356 saddr, daddr, ports, dif)))
361 /* Must record num and sport now. Otherwise we will see
362 * in hash table socket with a funny identity. */
363 inet->inet_num = lport;
364 inet->inet_sport = htons(lport);
366 WARN_ON(!sk_unhashed(sk));
367 __sk_nulls_add_node_rcu(sk, &head->chain);
369 twrefcnt = inet_twsk_unhash(tw);
370 NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
375 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
380 /* Silly. Should hash-dance instead... */
381 inet_twsk_deschedule(tw, death_row);
389 return -EADDRNOTAVAIL;
392 static inline u32 inet_sk_port_offset(const struct sock *sk)
394 const struct inet_sock *inet = inet_sk(sk);
395 return secure_ipv4_port_ephemeral(inet->inet_rcv_saddr,
400 int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw)
402 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
403 struct hlist_nulls_head *list;
405 struct inet_ehash_bucket *head;
408 WARN_ON(!sk_unhashed(sk));
410 sk->sk_hash = inet_sk_ehashfn(sk);
411 head = inet_ehash_bucket(hashinfo, sk->sk_hash);
413 lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
416 __sk_nulls_add_node_rcu(sk, list);
418 WARN_ON(sk->sk_hash != tw->tw_hash);
419 twrefcnt = inet_twsk_unhash(tw);
422 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
425 EXPORT_SYMBOL_GPL(__inet_hash_nolisten);
427 static void __inet_hash(struct sock *sk)
429 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
430 struct inet_listen_hashbucket *ilb;
432 if (sk->sk_state != TCP_LISTEN) {
433 __inet_hash_nolisten(sk, NULL);
437 WARN_ON(!sk_unhashed(sk));
438 ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
440 spin_lock(&ilb->lock);
441 __sk_nulls_add_node_rcu(sk, &ilb->head);
442 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
443 spin_unlock(&ilb->lock);
446 void inet_hash(struct sock *sk)
448 if (sk->sk_state != TCP_CLOSE) {
454 EXPORT_SYMBOL_GPL(inet_hash);
456 void inet_unhash(struct sock *sk)
458 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
465 if (sk->sk_state == TCP_LISTEN)
466 lock = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)].lock;
468 lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
471 done =__sk_nulls_del_node_init_rcu(sk);
473 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
474 spin_unlock_bh(lock);
476 EXPORT_SYMBOL_GPL(inet_unhash);
478 int __inet_hash_connect(struct inet_timewait_death_row *death_row,
479 struct sock *sk, u32 port_offset,
480 int (*check_established)(struct inet_timewait_death_row *,
481 struct sock *, __u16, struct inet_timewait_sock **),
482 int (*hash)(struct sock *sk, struct inet_timewait_sock *twp))
484 struct inet_hashinfo *hinfo = death_row->hashinfo;
485 const unsigned short snum = inet_sk(sk)->inet_num;
486 struct inet_bind_hashbucket *head;
487 struct inet_bind_bucket *tb;
489 struct net *net = sock_net(sk);
493 int i, remaining, low, high, port;
495 u32 offset = hint + port_offset;
496 struct hlist_node *node;
497 struct inet_timewait_sock *tw = NULL;
499 inet_get_local_port_range(&low, &high);
500 remaining = (high - low) + 1;
503 for (i = 1; i <= remaining; i++) {
504 port = low + (i + offset) % remaining;
505 if (inet_is_reserved_local_port(port))
507 head = &hinfo->bhash[inet_bhashfn(net, port,
509 spin_lock(&head->lock);
511 /* Does not bother with rcv_saddr checks,
512 * because the established check is already
515 inet_bind_bucket_for_each(tb, node, &head->chain) {
516 if (net_eq(ib_net(tb), net) &&
518 if (tb->fastreuse >= 0 ||
519 tb->fastreuseport >= 0)
521 WARN_ON(hlist_empty(&tb->owners));
522 if (!check_established(death_row, sk,
529 tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
532 spin_unlock(&head->lock);
536 tb->fastreuseport = -1;
540 spin_unlock(&head->lock);
544 return -EADDRNOTAVAIL;
549 /* Head lock still held and bh's disabled */
550 inet_bind_hash(sk, tb, port);
551 if (sk_unhashed(sk)) {
552 inet_sk(sk)->inet_sport = htons(port);
553 twrefcnt += hash(sk, tw);
556 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
557 spin_unlock(&head->lock);
560 inet_twsk_deschedule(tw, death_row);
571 head = &hinfo->bhash[inet_bhashfn(net, snum, hinfo->bhash_size)];
572 tb = inet_csk(sk)->icsk_bind_hash;
573 spin_lock_bh(&head->lock);
574 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
576 spin_unlock_bh(&head->lock);
579 spin_unlock(&head->lock);
580 /* No definite answer... Walk to established hash table */
581 ret = check_established(death_row, sk, snum, NULL);
589 * Bind a port for a connect operation and hash it.
591 int inet_hash_connect(struct inet_timewait_death_row *death_row,
594 return __inet_hash_connect(death_row, sk, inet_sk_port_offset(sk),
595 __inet_check_established, __inet_hash_nolisten);
597 EXPORT_SYMBOL_GPL(inet_hash_connect);
599 void inet_hashinfo_init(struct inet_hashinfo *h)
603 atomic_set(&h->bsockets, 0);
604 for (i = 0; i < INET_LHTABLE_SIZE; i++) {
605 spin_lock_init(&h->listening_hash[i].lock);
606 INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].head,
607 i + LISTENING_NULLS_BASE);
610 EXPORT_SYMBOL_GPL(inet_hashinfo_init);