2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/secure_seq.h>
76 #include <net/tcp_memcontrol.h>
77 #include <net/busy_poll.h>
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
88 int sysctl_tcp_tw_reuse __read_mostly;
89 int sysctl_tcp_low_latency __read_mostly;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
92 #ifdef CONFIG_TCP_MD5SIG
93 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
94 __be32 daddr, __be32 saddr, const struct tcphdr *th);
97 struct inet_hashinfo tcp_hashinfo;
98 EXPORT_SYMBOL(tcp_hashinfo);
100 static __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
102 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
105 tcp_hdr(skb)->source);
108 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
110 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
111 struct tcp_sock *tp = tcp_sk(sk);
113 /* With PAWS, it is safe from the viewpoint
114 of data integrity. Even without PAWS it is safe provided sequence
115 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
117 Actually, the idea is close to VJ's one, only timestamp cache is
118 held not per host, but per port pair and TW bucket is used as state
121 If TW bucket has been already destroyed we fall back to VJ's scheme
122 and use initial timestamp retrieved from peer table.
124 if (tcptw->tw_ts_recent_stamp &&
125 (!twp || (sysctl_tcp_tw_reuse &&
126 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
127 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
128 if (tp->write_seq == 0)
130 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
131 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
138 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
140 /* This will initiate an outgoing connection. */
141 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
143 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
144 struct inet_sock *inet = inet_sk(sk);
145 struct tcp_sock *tp = tcp_sk(sk);
146 __be16 orig_sport, orig_dport;
147 __be32 daddr, nexthop;
151 struct ip_options_rcu *inet_opt;
153 if (addr_len < sizeof(struct sockaddr_in))
156 if (usin->sin_family != AF_INET)
157 return -EAFNOSUPPORT;
159 nexthop = daddr = usin->sin_addr.s_addr;
160 inet_opt = rcu_dereference_protected(inet->inet_opt,
161 sock_owned_by_user(sk));
162 if (inet_opt && inet_opt->opt.srr) {
165 nexthop = inet_opt->opt.faddr;
168 orig_sport = inet->inet_sport;
169 orig_dport = usin->sin_port;
170 fl4 = &inet->cork.fl.u.ip4;
171 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
172 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
174 orig_sport, orig_dport, sk);
177 if (err == -ENETUNREACH)
178 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
182 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
187 if (!inet_opt || !inet_opt->opt.srr)
190 if (!inet->inet_saddr)
191 inet->inet_saddr = fl4->saddr;
192 sk_rcv_saddr_set(sk, inet->inet_saddr);
194 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
195 /* Reset inherited state */
196 tp->rx_opt.ts_recent = 0;
197 tp->rx_opt.ts_recent_stamp = 0;
198 if (likely(!tp->repair))
202 if (tcp_death_row.sysctl_tw_recycle &&
203 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
204 tcp_fetch_timewait_stamp(sk, &rt->dst);
206 inet->inet_dport = usin->sin_port;
207 sk_daddr_set(sk, daddr);
209 inet_csk(sk)->icsk_ext_hdr_len = 0;
211 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
213 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
215 /* Socket identity is still unknown (sport may be zero).
216 * However we set state to SYN-SENT and not releasing socket
217 * lock select source port, enter ourselves into the hash tables and
218 * complete initialization after this.
220 tcp_set_state(sk, TCP_SYN_SENT);
221 err = inet_hash_connect(&tcp_death_row, sk);
227 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
228 inet->inet_sport, inet->inet_dport, sk);
234 /* OK, now commit destination to socket. */
235 sk->sk_gso_type = SKB_GSO_TCPV4;
236 sk_setup_caps(sk, &rt->dst);
238 if (!tp->write_seq && likely(!tp->repair))
239 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
244 inet->inet_id = tp->write_seq ^ jiffies;
246 err = tcp_connect(sk);
256 * This unhashes the socket and releases the local port,
259 tcp_set_state(sk, TCP_CLOSE);
261 sk->sk_route_caps = 0;
262 inet->inet_dport = 0;
265 EXPORT_SYMBOL(tcp_v4_connect);
268 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
269 * It can be called through tcp_release_cb() if socket was owned by user
270 * at the time tcp_v4_err() was called to handle ICMP message.
272 void tcp_v4_mtu_reduced(struct sock *sk)
274 struct dst_entry *dst;
275 struct inet_sock *inet = inet_sk(sk);
276 u32 mtu = tcp_sk(sk)->mtu_info;
278 dst = inet_csk_update_pmtu(sk, mtu);
282 /* Something is about to be wrong... Remember soft error
283 * for the case, if this connection will not able to recover.
285 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
286 sk->sk_err_soft = EMSGSIZE;
290 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
291 ip_sk_accept_pmtu(sk) &&
292 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
293 tcp_sync_mss(sk, mtu);
295 /* Resend the TCP packet because it's
296 * clear that the old packet has been
297 * dropped. This is the new "fast" path mtu
300 tcp_simple_retransmit(sk);
301 } /* else let the usual retransmit timer handle it */
303 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
305 static void do_redirect(struct sk_buff *skb, struct sock *sk)
307 struct dst_entry *dst = __sk_dst_check(sk, 0);
310 dst->ops->redirect(dst, sk, skb);
314 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
315 void tcp_req_err(struct sock *sk, u32 seq)
317 struct request_sock *req = inet_reqsk(sk);
318 struct net *net = sock_net(sk);
320 /* ICMPs are not backlogged, hence we cannot get
321 * an established socket here.
325 if (seq != tcp_rsk(req)->snt_isn) {
326 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
329 * Still in SYN_RECV, just remove it silently.
330 * There is no good way to pass the error to the newly
331 * created socket, and POSIX does not want network
332 * errors returned from accept().
334 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
335 NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS);
339 EXPORT_SYMBOL(tcp_req_err);
342 * This routine is called by the ICMP module when it gets some
343 * sort of error condition. If err < 0 then the socket should
344 * be closed and the error returned to the user. If err > 0
345 * it's just the icmp type << 8 | icmp code. After adjustment
346 * header points to the first 8 bytes of the tcp header. We need
347 * to find the appropriate port.
349 * The locking strategy used here is very "optimistic". When
350 * someone else accesses the socket the ICMP is just dropped
351 * and for some paths there is no check at all.
352 * A more general error queue to queue errors for later handling
353 * is probably better.
357 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
359 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
360 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
361 struct inet_connection_sock *icsk;
363 struct inet_sock *inet;
364 const int type = icmp_hdr(icmp_skb)->type;
365 const int code = icmp_hdr(icmp_skb)->code;
368 struct request_sock *fastopen;
372 struct net *net = dev_net(icmp_skb->dev);
374 sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
375 th->dest, iph->saddr, ntohs(th->source),
378 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
381 if (sk->sk_state == TCP_TIME_WAIT) {
382 inet_twsk_put(inet_twsk(sk));
385 seq = ntohl(th->seq);
386 if (sk->sk_state == TCP_NEW_SYN_RECV)
387 return tcp_req_err(sk, seq);
390 /* If too many ICMPs get dropped on busy
391 * servers this needs to be solved differently.
392 * We do take care of PMTU discovery (RFC1191) special case :
393 * we can receive locally generated ICMP messages while socket is held.
395 if (sock_owned_by_user(sk)) {
396 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
397 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
399 if (sk->sk_state == TCP_CLOSE)
402 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
403 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
409 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
410 fastopen = tp->fastopen_rsk;
411 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
412 if (sk->sk_state != TCP_LISTEN &&
413 !between(seq, snd_una, tp->snd_nxt)) {
414 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
420 do_redirect(icmp_skb, sk);
422 case ICMP_SOURCE_QUENCH:
423 /* Just silently ignore these. */
425 case ICMP_PARAMETERPROB:
428 case ICMP_DEST_UNREACH:
429 if (code > NR_ICMP_UNREACH)
432 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
433 /* We are not interested in TCP_LISTEN and open_requests
434 * (SYN-ACKs send out by Linux are always <576bytes so
435 * they should go through unfragmented).
437 if (sk->sk_state == TCP_LISTEN)
441 if (!sock_owned_by_user(sk)) {
442 tcp_v4_mtu_reduced(sk);
444 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
450 err = icmp_err_convert[code].errno;
451 /* check if icmp_skb allows revert of backoff
452 * (see draft-zimmermann-tcp-lcd) */
453 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
455 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
456 !icsk->icsk_backoff || fastopen)
459 if (sock_owned_by_user(sk))
462 icsk->icsk_backoff--;
463 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
465 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
467 skb = tcp_write_queue_head(sk);
470 remaining = icsk->icsk_rto -
472 tcp_time_stamp - tcp_skb_timestamp(skb));
475 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
476 remaining, TCP_RTO_MAX);
478 /* RTO revert clocked out retransmission.
479 * Will retransmit now */
480 tcp_retransmit_timer(sk);
484 case ICMP_TIME_EXCEEDED:
491 switch (sk->sk_state) {
494 /* Only in fast or simultaneous open. If a fast open socket is
495 * is already accepted it is treated as a connected one below.
497 if (fastopen && !fastopen->sk)
500 if (!sock_owned_by_user(sk)) {
503 sk->sk_error_report(sk);
507 sk->sk_err_soft = err;
512 /* If we've already connected we will keep trying
513 * until we time out, or the user gives up.
515 * rfc1122 4.2.3.9 allows to consider as hard errors
516 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
517 * but it is obsoleted by pmtu discovery).
519 * Note, that in modern internet, where routing is unreliable
520 * and in each dark corner broken firewalls sit, sending random
521 * errors ordered by their masters even this two messages finally lose
522 * their original sense (even Linux sends invalid PORT_UNREACHs)
524 * Now we are in compliance with RFCs.
529 if (!sock_owned_by_user(sk) && inet->recverr) {
531 sk->sk_error_report(sk);
532 } else { /* Only an error on timeout */
533 sk->sk_err_soft = err;
541 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
543 struct tcphdr *th = tcp_hdr(skb);
545 if (skb->ip_summed == CHECKSUM_PARTIAL) {
546 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
547 skb->csum_start = skb_transport_header(skb) - skb->head;
548 skb->csum_offset = offsetof(struct tcphdr, check);
550 th->check = tcp_v4_check(skb->len, saddr, daddr,
557 /* This routine computes an IPv4 TCP checksum. */
558 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
560 const struct inet_sock *inet = inet_sk(sk);
562 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
564 EXPORT_SYMBOL(tcp_v4_send_check);
567 * This routine will send an RST to the other tcp.
569 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
571 * Answer: if a packet caused RST, it is not for a socket
572 * existing in our system, if it is matched to a socket,
573 * it is just duplicate segment or bug in other side's TCP.
574 * So that we build reply only basing on parameters
575 * arrived with segment.
576 * Exception: precedence violation. We do not implement it in any case.
579 static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
581 const struct tcphdr *th = tcp_hdr(skb);
584 #ifdef CONFIG_TCP_MD5SIG
585 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
588 struct ip_reply_arg arg;
589 #ifdef CONFIG_TCP_MD5SIG
590 struct tcp_md5sig_key *key;
591 const __u8 *hash_location = NULL;
592 unsigned char newhash[16];
594 struct sock *sk1 = NULL;
598 /* Never send a reset in response to a reset. */
602 /* If sk not NULL, it means we did a successful lookup and incoming
603 * route had to be correct. prequeue might have dropped our dst.
605 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
608 /* Swap the send and the receive. */
609 memset(&rep, 0, sizeof(rep));
610 rep.th.dest = th->source;
611 rep.th.source = th->dest;
612 rep.th.doff = sizeof(struct tcphdr) / 4;
616 rep.th.seq = th->ack_seq;
619 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
620 skb->len - (th->doff << 2));
623 memset(&arg, 0, sizeof(arg));
624 arg.iov[0].iov_base = (unsigned char *)&rep;
625 arg.iov[0].iov_len = sizeof(rep.th);
627 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
628 #ifdef CONFIG_TCP_MD5SIG
629 hash_location = tcp_parse_md5sig_option(th);
630 if (!sk && hash_location) {
632 * active side is lost. Try to find listening socket through
633 * source port, and then find md5 key through listening socket.
634 * we are not loose security here:
635 * Incoming packet is checked with md5 hash with finding key,
636 * no RST generated if md5 hash doesn't match.
638 sk1 = __inet_lookup_listener(net,
639 &tcp_hashinfo, ip_hdr(skb)->saddr,
640 th->source, ip_hdr(skb)->daddr,
641 ntohs(th->source), inet_iif(skb));
642 /* don't send rst if it can't find key */
646 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
647 &ip_hdr(skb)->saddr, AF_INET);
651 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
652 if (genhash || memcmp(hash_location, newhash, 16) != 0)
655 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
661 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
663 (TCPOPT_MD5SIG << 8) |
665 /* Update length and the length the header thinks exists */
666 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
667 rep.th.doff = arg.iov[0].iov_len / 4;
669 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
670 key, ip_hdr(skb)->saddr,
671 ip_hdr(skb)->daddr, &rep.th);
674 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
675 ip_hdr(skb)->saddr, /* XXX */
676 arg.iov[0].iov_len, IPPROTO_TCP, 0);
677 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
678 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
679 /* When socket is gone, all binding information is lost.
680 * routing might fail in this case. No choice here, if we choose to force
681 * input interface, we will misroute in case of asymmetric route.
684 arg.bound_dev_if = sk->sk_bound_dev_if;
686 arg.tos = ip_hdr(skb)->tos;
687 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
688 skb, &TCP_SKB_CB(skb)->header.h4.opt,
689 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
690 &arg, arg.iov[0].iov_len);
692 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
693 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
695 #ifdef CONFIG_TCP_MD5SIG
704 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
705 outside socket context is ugly, certainly. What can I do?
708 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
709 u32 win, u32 tsval, u32 tsecr, int oif,
710 struct tcp_md5sig_key *key,
711 int reply_flags, u8 tos)
713 const struct tcphdr *th = tcp_hdr(skb);
716 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
717 #ifdef CONFIG_TCP_MD5SIG
718 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
722 struct ip_reply_arg arg;
723 struct net *net = dev_net(skb_dst(skb)->dev);
725 memset(&rep.th, 0, sizeof(struct tcphdr));
726 memset(&arg, 0, sizeof(arg));
728 arg.iov[0].iov_base = (unsigned char *)&rep;
729 arg.iov[0].iov_len = sizeof(rep.th);
731 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
732 (TCPOPT_TIMESTAMP << 8) |
734 rep.opt[1] = htonl(tsval);
735 rep.opt[2] = htonl(tsecr);
736 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
739 /* Swap the send and the receive. */
740 rep.th.dest = th->source;
741 rep.th.source = th->dest;
742 rep.th.doff = arg.iov[0].iov_len / 4;
743 rep.th.seq = htonl(seq);
744 rep.th.ack_seq = htonl(ack);
746 rep.th.window = htons(win);
748 #ifdef CONFIG_TCP_MD5SIG
750 int offset = (tsecr) ? 3 : 0;
752 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
754 (TCPOPT_MD5SIG << 8) |
756 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
757 rep.th.doff = arg.iov[0].iov_len/4;
759 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
760 key, ip_hdr(skb)->saddr,
761 ip_hdr(skb)->daddr, &rep.th);
764 arg.flags = reply_flags;
765 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
766 ip_hdr(skb)->saddr, /* XXX */
767 arg.iov[0].iov_len, IPPROTO_TCP, 0);
768 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
770 arg.bound_dev_if = oif;
772 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
773 skb, &TCP_SKB_CB(skb)->header.h4.opt,
774 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
775 &arg, arg.iov[0].iov_len);
777 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
780 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
782 struct inet_timewait_sock *tw = inet_twsk(sk);
783 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
785 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
786 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
787 tcp_time_stamp + tcptw->tw_ts_offset,
790 tcp_twsk_md5_key(tcptw),
791 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
798 static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
799 struct request_sock *req)
801 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
802 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
804 tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
805 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
806 tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
810 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
812 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
817 * Send a SYN-ACK after having received a SYN.
818 * This still operates on a request_sock only, not on a big
821 static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
823 struct request_sock *req,
824 struct tcp_fastopen_cookie *foc,
827 const struct inet_request_sock *ireq = inet_rsk(req);
832 /* First, grab a route. */
833 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
836 skb = tcp_make_synack(sk, dst, req, foc, attach_req);
839 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
841 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
844 err = net_xmit_eval(err);
851 * IPv4 request_sock destructor.
853 static void tcp_v4_reqsk_destructor(struct request_sock *req)
855 kfree(inet_rsk(req)->opt);
859 #ifdef CONFIG_TCP_MD5SIG
861 * RFC2385 MD5 checksumming requires a mapping of
862 * IP address->MD5 Key.
863 * We need to maintain these in the sk structure.
866 /* Find the Key structure for an address. */
867 struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
868 const union tcp_md5_addr *addr,
871 const struct tcp_sock *tp = tcp_sk(sk);
872 struct tcp_md5sig_key *key;
873 unsigned int size = sizeof(struct in_addr);
874 const struct tcp_md5sig_info *md5sig;
876 /* caller either holds rcu_read_lock() or socket lock */
877 md5sig = rcu_dereference_check(tp->md5sig_info,
878 sock_owned_by_user(sk) ||
879 lockdep_is_held((spinlock_t *)&sk->sk_lock.slock));
882 #if IS_ENABLED(CONFIG_IPV6)
883 if (family == AF_INET6)
884 size = sizeof(struct in6_addr);
886 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
887 if (key->family != family)
889 if (!memcmp(&key->addr, addr, size))
894 EXPORT_SYMBOL(tcp_md5_do_lookup);
896 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
897 const struct sock *addr_sk)
899 const union tcp_md5_addr *addr;
901 addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
902 return tcp_md5_do_lookup(sk, addr, AF_INET);
904 EXPORT_SYMBOL(tcp_v4_md5_lookup);
906 /* This can be called on a newly created socket, from other files */
907 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
908 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
910 /* Add Key to the list */
911 struct tcp_md5sig_key *key;
912 struct tcp_sock *tp = tcp_sk(sk);
913 struct tcp_md5sig_info *md5sig;
915 key = tcp_md5_do_lookup(sk, addr, family);
917 /* Pre-existing entry - just update that one. */
918 memcpy(key->key, newkey, newkeylen);
919 key->keylen = newkeylen;
923 md5sig = rcu_dereference_protected(tp->md5sig_info,
924 sock_owned_by_user(sk));
926 md5sig = kmalloc(sizeof(*md5sig), gfp);
930 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
931 INIT_HLIST_HEAD(&md5sig->head);
932 rcu_assign_pointer(tp->md5sig_info, md5sig);
935 key = sock_kmalloc(sk, sizeof(*key), gfp);
938 if (!tcp_alloc_md5sig_pool()) {
939 sock_kfree_s(sk, key, sizeof(*key));
943 memcpy(key->key, newkey, newkeylen);
944 key->keylen = newkeylen;
945 key->family = family;
946 memcpy(&key->addr, addr,
947 (family == AF_INET6) ? sizeof(struct in6_addr) :
948 sizeof(struct in_addr));
949 hlist_add_head_rcu(&key->node, &md5sig->head);
952 EXPORT_SYMBOL(tcp_md5_do_add);
954 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
956 struct tcp_md5sig_key *key;
958 key = tcp_md5_do_lookup(sk, addr, family);
961 hlist_del_rcu(&key->node);
962 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
966 EXPORT_SYMBOL(tcp_md5_do_del);
968 static void tcp_clear_md5_list(struct sock *sk)
970 struct tcp_sock *tp = tcp_sk(sk);
971 struct tcp_md5sig_key *key;
972 struct hlist_node *n;
973 struct tcp_md5sig_info *md5sig;
975 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
977 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
978 hlist_del_rcu(&key->node);
979 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
984 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
987 struct tcp_md5sig cmd;
988 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
990 if (optlen < sizeof(cmd))
993 if (copy_from_user(&cmd, optval, sizeof(cmd)))
996 if (sin->sin_family != AF_INET)
999 if (!cmd.tcpm_keylen)
1000 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1003 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1006 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1007 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1011 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1012 __be32 daddr, __be32 saddr, int nbytes)
1014 struct tcp4_pseudohdr *bp;
1015 struct scatterlist sg;
1017 bp = &hp->md5_blk.ip4;
1020 * 1. the TCP pseudo-header (in the order: source IP address,
1021 * destination IP address, zero-padded protocol number, and
1027 bp->protocol = IPPROTO_TCP;
1028 bp->len = cpu_to_be16(nbytes);
1030 sg_init_one(&sg, bp, sizeof(*bp));
1031 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1034 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1035 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1037 struct tcp_md5sig_pool *hp;
1038 struct hash_desc *desc;
1040 hp = tcp_get_md5sig_pool();
1042 goto clear_hash_noput;
1043 desc = &hp->md5_desc;
1045 if (crypto_hash_init(desc))
1047 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1049 if (tcp_md5_hash_header(hp, th))
1051 if (tcp_md5_hash_key(hp, key))
1053 if (crypto_hash_final(desc, md5_hash))
1056 tcp_put_md5sig_pool();
1060 tcp_put_md5sig_pool();
1062 memset(md5_hash, 0, 16);
1066 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1067 const struct sock *sk,
1068 const struct sk_buff *skb)
1070 struct tcp_md5sig_pool *hp;
1071 struct hash_desc *desc;
1072 const struct tcphdr *th = tcp_hdr(skb);
1073 __be32 saddr, daddr;
1075 if (sk) { /* valid for establish/request sockets */
1076 saddr = sk->sk_rcv_saddr;
1077 daddr = sk->sk_daddr;
1079 const struct iphdr *iph = ip_hdr(skb);
1084 hp = tcp_get_md5sig_pool();
1086 goto clear_hash_noput;
1087 desc = &hp->md5_desc;
1089 if (crypto_hash_init(desc))
1092 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1094 if (tcp_md5_hash_header(hp, th))
1096 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1098 if (tcp_md5_hash_key(hp, key))
1100 if (crypto_hash_final(desc, md5_hash))
1103 tcp_put_md5sig_pool();
1107 tcp_put_md5sig_pool();
1109 memset(md5_hash, 0, 16);
1112 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1116 /* Called with rcu_read_lock() */
1117 static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1118 const struct sk_buff *skb)
1120 #ifdef CONFIG_TCP_MD5SIG
1122 * This gets called for each TCP segment that arrives
1123 * so we want to be efficient.
1124 * We have 3 drop cases:
1125 * o No MD5 hash and one expected.
1126 * o MD5 hash and we're not expecting one.
1127 * o MD5 hash and its wrong.
1129 const __u8 *hash_location = NULL;
1130 struct tcp_md5sig_key *hash_expected;
1131 const struct iphdr *iph = ip_hdr(skb);
1132 const struct tcphdr *th = tcp_hdr(skb);
1134 unsigned char newhash[16];
1136 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1138 hash_location = tcp_parse_md5sig_option(th);
1140 /* We've parsed the options - do we have a hash? */
1141 if (!hash_expected && !hash_location)
1144 if (hash_expected && !hash_location) {
1145 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1149 if (!hash_expected && hash_location) {
1150 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1154 /* Okay, so this is hash_expected and hash_location -
1155 * so we need to calculate the checksum.
1157 genhash = tcp_v4_md5_hash_skb(newhash,
1161 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1162 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1163 &iph->saddr, ntohs(th->source),
1164 &iph->daddr, ntohs(th->dest),
1165 genhash ? " tcp_v4_calc_md5_hash failed"
1174 static void tcp_v4_init_req(struct request_sock *req,
1175 const struct sock *sk_listener,
1176 struct sk_buff *skb)
1178 struct inet_request_sock *ireq = inet_rsk(req);
1180 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1181 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1182 ireq->no_srccheck = inet_sk(sk_listener)->transparent;
1183 ireq->opt = tcp_v4_save_options(skb);
1186 static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1188 const struct request_sock *req,
1191 struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1194 if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1203 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1205 .obj_size = sizeof(struct tcp_request_sock),
1206 .rtx_syn_ack = tcp_rtx_synack,
1207 .send_ack = tcp_v4_reqsk_send_ack,
1208 .destructor = tcp_v4_reqsk_destructor,
1209 .send_reset = tcp_v4_send_reset,
1210 .syn_ack_timeout = tcp_syn_ack_timeout,
1213 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1214 .mss_clamp = TCP_MSS_DEFAULT,
1215 #ifdef CONFIG_TCP_MD5SIG
1216 .req_md5_lookup = tcp_v4_md5_lookup,
1217 .calc_md5_hash = tcp_v4_md5_hash_skb,
1219 .init_req = tcp_v4_init_req,
1220 #ifdef CONFIG_SYN_COOKIES
1221 .cookie_init_seq = cookie_v4_init_sequence,
1223 .route_req = tcp_v4_route_req,
1224 .init_seq = tcp_v4_init_sequence,
1225 .send_synack = tcp_v4_send_synack,
1228 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1230 /* Never answer to SYNs send to broadcast or multicast */
1231 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1234 return tcp_conn_request(&tcp_request_sock_ops,
1235 &tcp_request_sock_ipv4_ops, sk, skb);
1238 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1241 EXPORT_SYMBOL(tcp_v4_conn_request);
1245 * The three way handshake has completed - we got a valid synack -
1246 * now create the new socket.
1248 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1249 struct request_sock *req,
1250 struct dst_entry *dst)
1252 struct inet_request_sock *ireq;
1253 struct inet_sock *newinet;
1254 struct tcp_sock *newtp;
1256 #ifdef CONFIG_TCP_MD5SIG
1257 struct tcp_md5sig_key *key;
1259 struct ip_options_rcu *inet_opt;
1261 if (sk_acceptq_is_full(sk))
1264 newsk = tcp_create_openreq_child(sk, req, skb);
1268 newsk->sk_gso_type = SKB_GSO_TCPV4;
1269 inet_sk_rx_dst_set(newsk, skb);
1271 newtp = tcp_sk(newsk);
1272 newinet = inet_sk(newsk);
1273 ireq = inet_rsk(req);
1274 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1275 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1276 newinet->inet_saddr = ireq->ir_loc_addr;
1277 inet_opt = ireq->opt;
1278 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1280 newinet->mc_index = inet_iif(skb);
1281 newinet->mc_ttl = ip_hdr(skb)->ttl;
1282 newinet->rcv_tos = ip_hdr(skb)->tos;
1283 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1285 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1286 newinet->inet_id = newtp->write_seq ^ jiffies;
1289 dst = inet_csk_route_child_sock(sk, newsk, req);
1293 /* syncookie case : see end of cookie_v4_check() */
1295 sk_setup_caps(newsk, dst);
1297 tcp_ca_openreq_child(newsk, dst);
1299 tcp_sync_mss(newsk, dst_mtu(dst));
1300 newtp->advmss = dst_metric_advmss(dst);
1301 if (tcp_sk(sk)->rx_opt.user_mss &&
1302 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1303 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1305 tcp_initialize_rcv_mss(newsk);
1307 #ifdef CONFIG_TCP_MD5SIG
1308 /* Copy over the MD5 key from the original socket */
1309 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1313 * We're using one, so create a matching key
1314 * on the newsk structure. If we fail to get
1315 * memory, then we end up not copying the key
1318 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1319 AF_INET, key->key, key->keylen, GFP_ATOMIC);
1320 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1324 if (__inet_inherit_port(sk, newsk) < 0)
1326 __inet_hash_nolisten(newsk, NULL);
1331 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1335 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1338 inet_csk_prepare_forced_close(newsk);
1342 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1344 static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1346 #ifdef CONFIG_SYN_COOKIES
1347 const struct tcphdr *th = tcp_hdr(skb);
1350 sk = cookie_v4_check(sk, skb);
1355 /* The socket must have it's spinlock held when we get
1356 * here, unless it is a TCP_LISTEN socket.
1358 * We have a potential double-lock case here, so even when
1359 * doing backlog processing we use the BH locking scheme.
1360 * This is because we cannot sleep with the original spinlock
1363 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1367 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1368 struct dst_entry *dst = sk->sk_rx_dst;
1370 sock_rps_save_rxhash(sk, skb);
1371 sk_mark_napi_id(sk, skb);
1373 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1374 !dst->ops->check(dst, 0)) {
1376 sk->sk_rx_dst = NULL;
1379 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1383 if (tcp_checksum_complete(skb))
1386 if (sk->sk_state == TCP_LISTEN) {
1387 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1392 sock_rps_save_rxhash(nsk, skb);
1393 sk_mark_napi_id(nsk, skb);
1394 if (tcp_child_process(sk, nsk, skb)) {
1401 sock_rps_save_rxhash(sk, skb);
1403 if (tcp_rcv_state_process(sk, skb)) {
1410 tcp_v4_send_reset(rsk, skb);
1413 /* Be careful here. If this function gets more complicated and
1414 * gcc suffers from register pressure on the x86, sk (in %ebx)
1415 * might be destroyed here. This current version compiles correctly,
1416 * but you have been warned.
1421 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1422 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1425 EXPORT_SYMBOL(tcp_v4_do_rcv);
1427 void tcp_v4_early_demux(struct sk_buff *skb)
1429 const struct iphdr *iph;
1430 const struct tcphdr *th;
1433 if (skb->pkt_type != PACKET_HOST)
1436 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1442 if (th->doff < sizeof(struct tcphdr) / 4)
1445 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1446 iph->saddr, th->source,
1447 iph->daddr, ntohs(th->dest),
1451 skb->destructor = sock_edemux;
1452 if (sk_fullsock(sk)) {
1453 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1456 dst = dst_check(dst, 0);
1458 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1459 skb_dst_set_noref(skb, dst);
1464 /* Packet is added to VJ-style prequeue for processing in process
1465 * context, if a reader task is waiting. Apparently, this exciting
1466 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1467 * failed somewhere. Latency? Burstiness? Well, at least now we will
1468 * see, why it failed. 8)8) --ANK
1471 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1473 struct tcp_sock *tp = tcp_sk(sk);
1475 if (sysctl_tcp_low_latency || !tp->ucopy.task)
1478 if (skb->len <= tcp_hdrlen(skb) &&
1479 skb_queue_len(&tp->ucopy.prequeue) == 0)
1482 /* Before escaping RCU protected region, we need to take care of skb
1483 * dst. Prequeue is only enabled for established sockets.
1484 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1485 * Instead of doing full sk_rx_dst validity here, let's perform
1486 * an optimistic check.
1488 if (likely(sk->sk_rx_dst))
1493 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1494 tp->ucopy.memory += skb->truesize;
1495 if (tp->ucopy.memory > sk->sk_rcvbuf) {
1496 struct sk_buff *skb1;
1498 BUG_ON(sock_owned_by_user(sk));
1500 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1501 sk_backlog_rcv(sk, skb1);
1502 NET_INC_STATS_BH(sock_net(sk),
1503 LINUX_MIB_TCPPREQUEUEDROPPED);
1506 tp->ucopy.memory = 0;
1507 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1508 wake_up_interruptible_sync_poll(sk_sleep(sk),
1509 POLLIN | POLLRDNORM | POLLRDBAND);
1510 if (!inet_csk_ack_scheduled(sk))
1511 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1512 (3 * tcp_rto_min(sk)) / 4,
1517 EXPORT_SYMBOL(tcp_prequeue);
1523 int tcp_v4_rcv(struct sk_buff *skb)
1525 const struct iphdr *iph;
1526 const struct tcphdr *th;
1529 struct net *net = dev_net(skb->dev);
1531 if (skb->pkt_type != PACKET_HOST)
1534 /* Count it even if it's bad */
1535 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1537 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1542 if (th->doff < sizeof(struct tcphdr) / 4)
1544 if (!pskb_may_pull(skb, th->doff * 4))
1547 /* An explanation is required here, I think.
1548 * Packet length and doff are validated by header prediction,
1549 * provided case of th->doff==0 is eliminated.
1550 * So, we defer the checks. */
1552 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1557 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1558 * barrier() makes sure compiler wont play fool^Waliasing games.
1560 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1561 sizeof(struct inet_skb_parm));
1564 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1565 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1566 skb->len - th->doff * 4);
1567 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1568 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1569 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1570 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1571 TCP_SKB_CB(skb)->sacked = 0;
1574 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1579 if (sk->sk_state == TCP_TIME_WAIT)
1582 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1583 struct request_sock *req = inet_reqsk(sk);
1584 struct sock *nsk = NULL;
1586 sk = req->rsk_listener;
1587 if (tcp_v4_inbound_md5_hash(sk, skb))
1588 goto discard_and_relse;
1589 if (likely(sk->sk_state == TCP_LISTEN)) {
1590 nsk = tcp_check_req(sk, skb, req, false);
1592 inet_csk_reqsk_queue_drop_and_put(sk, req);
1602 } else if (tcp_child_process(sk, nsk, skb)) {
1603 tcp_v4_send_reset(nsk, skb);
1609 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1610 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1611 goto discard_and_relse;
1614 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1615 goto discard_and_relse;
1617 if (tcp_v4_inbound_md5_hash(sk, skb))
1618 goto discard_and_relse;
1622 if (sk_filter(sk, skb))
1623 goto discard_and_relse;
1627 if (sk->sk_state == TCP_LISTEN) {
1628 ret = tcp_v4_do_rcv(sk, skb);
1629 goto put_and_return;
1632 sk_incoming_cpu_update(sk);
1634 bh_lock_sock_nested(sk);
1635 tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1637 if (!sock_owned_by_user(sk)) {
1638 if (!tcp_prequeue(sk, skb))
1639 ret = tcp_v4_do_rcv(sk, skb);
1640 } else if (unlikely(sk_add_backlog(sk, skb,
1641 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1643 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1644 goto discard_and_relse;
1654 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1657 if (tcp_checksum_complete(skb)) {
1659 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1661 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1663 tcp_v4_send_reset(NULL, skb);
1667 /* Discard frame. */
1676 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1677 inet_twsk_put(inet_twsk(sk));
1681 if (tcp_checksum_complete(skb)) {
1682 inet_twsk_put(inet_twsk(sk));
1685 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1687 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1689 iph->saddr, th->source,
1690 iph->daddr, th->dest,
1693 inet_twsk_deschedule_put(inet_twsk(sk));
1697 /* Fall through to ACK */
1700 tcp_v4_timewait_ack(sk, skb);
1704 case TCP_TW_SUCCESS:;
1709 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1710 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1711 .twsk_unique = tcp_twsk_unique,
1712 .twsk_destructor= tcp_twsk_destructor,
1715 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1717 struct dst_entry *dst = skb_dst(skb);
1721 sk->sk_rx_dst = dst;
1722 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1725 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1727 const struct inet_connection_sock_af_ops ipv4_specific = {
1728 .queue_xmit = ip_queue_xmit,
1729 .send_check = tcp_v4_send_check,
1730 .rebuild_header = inet_sk_rebuild_header,
1731 .sk_rx_dst_set = inet_sk_rx_dst_set,
1732 .conn_request = tcp_v4_conn_request,
1733 .syn_recv_sock = tcp_v4_syn_recv_sock,
1734 .net_header_len = sizeof(struct iphdr),
1735 .setsockopt = ip_setsockopt,
1736 .getsockopt = ip_getsockopt,
1737 .addr2sockaddr = inet_csk_addr2sockaddr,
1738 .sockaddr_len = sizeof(struct sockaddr_in),
1739 .bind_conflict = inet_csk_bind_conflict,
1740 #ifdef CONFIG_COMPAT
1741 .compat_setsockopt = compat_ip_setsockopt,
1742 .compat_getsockopt = compat_ip_getsockopt,
1744 .mtu_reduced = tcp_v4_mtu_reduced,
1746 EXPORT_SYMBOL(ipv4_specific);
1748 #ifdef CONFIG_TCP_MD5SIG
1749 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1750 .md5_lookup = tcp_v4_md5_lookup,
1751 .calc_md5_hash = tcp_v4_md5_hash_skb,
1752 .md5_parse = tcp_v4_parse_md5_keys,
1756 /* NOTE: A lot of things set to zero explicitly by call to
1757 * sk_alloc() so need not be done here.
1759 static int tcp_v4_init_sock(struct sock *sk)
1761 struct inet_connection_sock *icsk = inet_csk(sk);
1765 icsk->icsk_af_ops = &ipv4_specific;
1767 #ifdef CONFIG_TCP_MD5SIG
1768 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1774 void tcp_v4_destroy_sock(struct sock *sk)
1776 struct tcp_sock *tp = tcp_sk(sk);
1778 tcp_clear_xmit_timers(sk);
1780 tcp_cleanup_congestion_control(sk);
1782 /* Cleanup up the write buffer. */
1783 tcp_write_queue_purge(sk);
1785 /* Cleans up our, hopefully empty, out_of_order_queue. */
1786 __skb_queue_purge(&tp->out_of_order_queue);
1788 #ifdef CONFIG_TCP_MD5SIG
1789 /* Clean up the MD5 key list, if any */
1790 if (tp->md5sig_info) {
1791 tcp_clear_md5_list(sk);
1792 kfree_rcu(tp->md5sig_info, rcu);
1793 tp->md5sig_info = NULL;
1797 /* Clean prequeue, it must be empty really */
1798 __skb_queue_purge(&tp->ucopy.prequeue);
1800 /* Clean up a referenced TCP bind bucket. */
1801 if (inet_csk(sk)->icsk_bind_hash)
1804 BUG_ON(tp->fastopen_rsk);
1806 /* If socket is aborted during connect operation */
1807 tcp_free_fastopen_req(tp);
1808 tcp_saved_syn_free(tp);
1810 sk_sockets_allocated_dec(sk);
1811 sock_release_memcg(sk);
1813 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1815 #ifdef CONFIG_PROC_FS
1816 /* Proc filesystem TCP sock list dumping. */
1819 * Get next listener socket follow cur. If cur is NULL, get first socket
1820 * starting from bucket given in st->bucket; when st->bucket is zero the
1821 * very first socket in the hash table is returned.
1823 static void *listening_get_next(struct seq_file *seq, void *cur)
1825 struct inet_connection_sock *icsk;
1826 struct hlist_nulls_node *node;
1827 struct sock *sk = cur;
1828 struct inet_listen_hashbucket *ilb;
1829 struct tcp_iter_state *st = seq->private;
1830 struct net *net = seq_file_net(seq);
1833 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1834 spin_lock_bh(&ilb->lock);
1835 sk = sk_nulls_head(&ilb->head);
1839 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1843 sk = sk_nulls_next(sk);
1845 sk_nulls_for_each_from(sk, node) {
1846 if (!net_eq(sock_net(sk), net))
1848 if (sk->sk_family == st->family) {
1852 icsk = inet_csk(sk);
1854 spin_unlock_bh(&ilb->lock);
1856 if (++st->bucket < INET_LHTABLE_SIZE) {
1857 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1858 spin_lock_bh(&ilb->lock);
1859 sk = sk_nulls_head(&ilb->head);
1867 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1869 struct tcp_iter_state *st = seq->private;
1874 rc = listening_get_next(seq, NULL);
1876 while (rc && *pos) {
1877 rc = listening_get_next(seq, rc);
1883 static inline bool empty_bucket(const struct tcp_iter_state *st)
1885 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
1889 * Get first established socket starting from bucket given in st->bucket.
1890 * If st->bucket is zero, the very first socket in the hash is returned.
1892 static void *established_get_first(struct seq_file *seq)
1894 struct tcp_iter_state *st = seq->private;
1895 struct net *net = seq_file_net(seq);
1899 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1901 struct hlist_nulls_node *node;
1902 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1904 /* Lockless fast path for the common case of empty buckets */
1905 if (empty_bucket(st))
1909 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1910 if (sk->sk_family != st->family ||
1911 !net_eq(sock_net(sk), net)) {
1917 spin_unlock_bh(lock);
1923 static void *established_get_next(struct seq_file *seq, void *cur)
1925 struct sock *sk = cur;
1926 struct hlist_nulls_node *node;
1927 struct tcp_iter_state *st = seq->private;
1928 struct net *net = seq_file_net(seq);
1933 sk = sk_nulls_next(sk);
1935 sk_nulls_for_each_from(sk, node) {
1936 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
1940 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1942 return established_get_first(seq);
1945 static void *established_get_idx(struct seq_file *seq, loff_t pos)
1947 struct tcp_iter_state *st = seq->private;
1951 rc = established_get_first(seq);
1954 rc = established_get_next(seq, rc);
1960 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
1963 struct tcp_iter_state *st = seq->private;
1965 st->state = TCP_SEQ_STATE_LISTENING;
1966 rc = listening_get_idx(seq, &pos);
1969 st->state = TCP_SEQ_STATE_ESTABLISHED;
1970 rc = established_get_idx(seq, pos);
1976 static void *tcp_seek_last_pos(struct seq_file *seq)
1978 struct tcp_iter_state *st = seq->private;
1979 int offset = st->offset;
1980 int orig_num = st->num;
1983 switch (st->state) {
1984 case TCP_SEQ_STATE_LISTENING:
1985 if (st->bucket >= INET_LHTABLE_SIZE)
1987 st->state = TCP_SEQ_STATE_LISTENING;
1988 rc = listening_get_next(seq, NULL);
1989 while (offset-- && rc)
1990 rc = listening_get_next(seq, rc);
1994 st->state = TCP_SEQ_STATE_ESTABLISHED;
1996 case TCP_SEQ_STATE_ESTABLISHED:
1997 if (st->bucket > tcp_hashinfo.ehash_mask)
1999 rc = established_get_first(seq);
2000 while (offset-- && rc)
2001 rc = established_get_next(seq, rc);
2009 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2011 struct tcp_iter_state *st = seq->private;
2014 if (*pos && *pos == st->last_pos) {
2015 rc = tcp_seek_last_pos(seq);
2020 st->state = TCP_SEQ_STATE_LISTENING;
2024 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2027 st->last_pos = *pos;
2031 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2033 struct tcp_iter_state *st = seq->private;
2036 if (v == SEQ_START_TOKEN) {
2037 rc = tcp_get_idx(seq, 0);
2041 switch (st->state) {
2042 case TCP_SEQ_STATE_LISTENING:
2043 rc = listening_get_next(seq, v);
2045 st->state = TCP_SEQ_STATE_ESTABLISHED;
2048 rc = established_get_first(seq);
2051 case TCP_SEQ_STATE_ESTABLISHED:
2052 rc = established_get_next(seq, v);
2057 st->last_pos = *pos;
2061 static void tcp_seq_stop(struct seq_file *seq, void *v)
2063 struct tcp_iter_state *st = seq->private;
2065 switch (st->state) {
2066 case TCP_SEQ_STATE_LISTENING:
2067 if (v != SEQ_START_TOKEN)
2068 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2070 case TCP_SEQ_STATE_ESTABLISHED:
2072 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2077 int tcp_seq_open(struct inode *inode, struct file *file)
2079 struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2080 struct tcp_iter_state *s;
2083 err = seq_open_net(inode, file, &afinfo->seq_ops,
2084 sizeof(struct tcp_iter_state));
2088 s = ((struct seq_file *)file->private_data)->private;
2089 s->family = afinfo->family;
2093 EXPORT_SYMBOL(tcp_seq_open);
2095 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2098 struct proc_dir_entry *p;
2100 afinfo->seq_ops.start = tcp_seq_start;
2101 afinfo->seq_ops.next = tcp_seq_next;
2102 afinfo->seq_ops.stop = tcp_seq_stop;
2104 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2105 afinfo->seq_fops, afinfo);
2110 EXPORT_SYMBOL(tcp_proc_register);
2112 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2114 remove_proc_entry(afinfo->name, net->proc_net);
2116 EXPORT_SYMBOL(tcp_proc_unregister);
2118 static void get_openreq4(const struct request_sock *req,
2119 struct seq_file *f, int i)
2121 const struct inet_request_sock *ireq = inet_rsk(req);
2122 long delta = req->rsk_timer.expires - jiffies;
2124 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2125 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2130 ntohs(ireq->ir_rmt_port),
2132 0, 0, /* could print option size, but that is af dependent. */
2133 1, /* timers active (only the expire timer) */
2134 jiffies_delta_to_clock_t(delta),
2136 from_kuid_munged(seq_user_ns(f),
2137 sock_i_uid(req->rsk_listener)),
2138 0, /* non standard timer */
2139 0, /* open_requests have no inode */
2144 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2147 unsigned long timer_expires;
2148 const struct tcp_sock *tp = tcp_sk(sk);
2149 const struct inet_connection_sock *icsk = inet_csk(sk);
2150 const struct inet_sock *inet = inet_sk(sk);
2151 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2152 __be32 dest = inet->inet_daddr;
2153 __be32 src = inet->inet_rcv_saddr;
2154 __u16 destp = ntohs(inet->inet_dport);
2155 __u16 srcp = ntohs(inet->inet_sport);
2158 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2159 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2160 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2162 timer_expires = icsk->icsk_timeout;
2163 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2165 timer_expires = icsk->icsk_timeout;
2166 } else if (timer_pending(&sk->sk_timer)) {
2168 timer_expires = sk->sk_timer.expires;
2171 timer_expires = jiffies;
2174 if (sk->sk_state == TCP_LISTEN)
2175 rx_queue = sk->sk_ack_backlog;
2178 * because we dont lock socket, we might find a transient negative value
2180 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2182 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2183 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2184 i, src, srcp, dest, destp, sk->sk_state,
2185 tp->write_seq - tp->snd_una,
2188 jiffies_delta_to_clock_t(timer_expires - jiffies),
2189 icsk->icsk_retransmits,
2190 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2191 icsk->icsk_probes_out,
2193 atomic_read(&sk->sk_refcnt), sk,
2194 jiffies_to_clock_t(icsk->icsk_rto),
2195 jiffies_to_clock_t(icsk->icsk_ack.ato),
2196 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2198 sk->sk_state == TCP_LISTEN ?
2199 (fastopenq ? fastopenq->max_qlen : 0) :
2200 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2203 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2204 struct seq_file *f, int i)
2206 long delta = tw->tw_timer.expires - jiffies;
2210 dest = tw->tw_daddr;
2211 src = tw->tw_rcv_saddr;
2212 destp = ntohs(tw->tw_dport);
2213 srcp = ntohs(tw->tw_sport);
2215 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2216 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2217 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2218 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2219 atomic_read(&tw->tw_refcnt), tw);
2224 static int tcp4_seq_show(struct seq_file *seq, void *v)
2226 struct tcp_iter_state *st;
2227 struct sock *sk = v;
2229 seq_setwidth(seq, TMPSZ - 1);
2230 if (v == SEQ_START_TOKEN) {
2231 seq_puts(seq, " sl local_address rem_address st tx_queue "
2232 "rx_queue tr tm->when retrnsmt uid timeout "
2238 if (sk->sk_state == TCP_TIME_WAIT)
2239 get_timewait4_sock(v, seq, st->num);
2240 else if (sk->sk_state == TCP_NEW_SYN_RECV)
2241 get_openreq4(v, seq, st->num);
2243 get_tcp4_sock(v, seq, st->num);
2249 static const struct file_operations tcp_afinfo_seq_fops = {
2250 .owner = THIS_MODULE,
2251 .open = tcp_seq_open,
2253 .llseek = seq_lseek,
2254 .release = seq_release_net
2257 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2260 .seq_fops = &tcp_afinfo_seq_fops,
2262 .show = tcp4_seq_show,
2266 static int __net_init tcp4_proc_init_net(struct net *net)
2268 return tcp_proc_register(net, &tcp4_seq_afinfo);
2271 static void __net_exit tcp4_proc_exit_net(struct net *net)
2273 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2276 static struct pernet_operations tcp4_net_ops = {
2277 .init = tcp4_proc_init_net,
2278 .exit = tcp4_proc_exit_net,
2281 int __init tcp4_proc_init(void)
2283 return register_pernet_subsys(&tcp4_net_ops);
2286 void tcp4_proc_exit(void)
2288 unregister_pernet_subsys(&tcp4_net_ops);
2290 #endif /* CONFIG_PROC_FS */
2292 struct proto tcp_prot = {
2294 .owner = THIS_MODULE,
2296 .connect = tcp_v4_connect,
2297 .disconnect = tcp_disconnect,
2298 .accept = inet_csk_accept,
2300 .init = tcp_v4_init_sock,
2301 .destroy = tcp_v4_destroy_sock,
2302 .shutdown = tcp_shutdown,
2303 .setsockopt = tcp_setsockopt,
2304 .getsockopt = tcp_getsockopt,
2305 .recvmsg = tcp_recvmsg,
2306 .sendmsg = tcp_sendmsg,
2307 .sendpage = tcp_sendpage,
2308 .backlog_rcv = tcp_v4_do_rcv,
2309 .release_cb = tcp_release_cb,
2311 .unhash = inet_unhash,
2312 .get_port = inet_csk_get_port,
2313 .enter_memory_pressure = tcp_enter_memory_pressure,
2314 .stream_memory_free = tcp_stream_memory_free,
2315 .sockets_allocated = &tcp_sockets_allocated,
2316 .orphan_count = &tcp_orphan_count,
2317 .memory_allocated = &tcp_memory_allocated,
2318 .memory_pressure = &tcp_memory_pressure,
2319 .sysctl_mem = sysctl_tcp_mem,
2320 .sysctl_wmem = sysctl_tcp_wmem,
2321 .sysctl_rmem = sysctl_tcp_rmem,
2322 .max_header = MAX_TCP_HEADER,
2323 .obj_size = sizeof(struct tcp_sock),
2324 .slab_flags = SLAB_DESTROY_BY_RCU,
2325 .twsk_prot = &tcp_timewait_sock_ops,
2326 .rsk_prot = &tcp_request_sock_ops,
2327 .h.hashinfo = &tcp_hashinfo,
2328 .no_autobind = true,
2329 #ifdef CONFIG_COMPAT
2330 .compat_setsockopt = compat_tcp_setsockopt,
2331 .compat_getsockopt = compat_tcp_getsockopt,
2333 #ifdef CONFIG_MEMCG_KMEM
2334 .init_cgroup = tcp_init_cgroup,
2335 .destroy_cgroup = tcp_destroy_cgroup,
2336 .proto_cgroup = tcp_proto_cgroup,
2339 EXPORT_SYMBOL(tcp_prot);
2341 static void __net_exit tcp_sk_exit(struct net *net)
2345 for_each_possible_cpu(cpu)
2346 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2347 free_percpu(net->ipv4.tcp_sk);
2350 static int __net_init tcp_sk_init(struct net *net)
2354 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2355 if (!net->ipv4.tcp_sk)
2358 for_each_possible_cpu(cpu) {
2361 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2365 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2368 net->ipv4.sysctl_tcp_ecn = 2;
2369 net->ipv4.sysctl_tcp_ecn_fallback = 1;
2371 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2372 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2373 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2382 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2384 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2387 static struct pernet_operations __net_initdata tcp_sk_ops = {
2388 .init = tcp_sk_init,
2389 .exit = tcp_sk_exit,
2390 .exit_batch = tcp_sk_exit_batch,
2393 void __init tcp_v4_init(void)
2395 inet_hashinfo_init(&tcp_hashinfo);
2396 if (register_pernet_subsys(&tcp_sk_ops))
2397 panic("Failed to create the TCP control socket.\n");