2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
54 #include <linux/bottom_half.h>
55 #include <linux/types.h>
56 #include <linux/fcntl.h>
57 #include <linux/module.h>
58 #include <linux/random.h>
59 #include <linux/cache.h>
60 #include <linux/jhash.h>
61 #include <linux/init.h>
62 #include <linux/times.h>
64 #include <net/net_namespace.h>
66 #include <net/inet_hashtables.h>
68 #include <net/transp_v6.h>
70 #include <net/inet_common.h>
71 #include <net/timewait_sock.h>
73 #include <net/netdma.h>
75 #include <linux/inet.h>
76 #include <linux/ipv6.h>
77 #include <linux/stddef.h>
78 #include <linux/proc_fs.h>
79 #include <linux/seq_file.h>
81 #include <linux/crypto.h>
82 #include <linux/scatterlist.h>
84 int sysctl_tcp_tw_reuse __read_mostly;
85 int sysctl_tcp_low_latency __read_mostly;
88 #ifdef CONFIG_TCP_MD5SIG
89 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
91 static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
92 __be32 daddr, __be32 saddr, struct tcphdr *th);
95 struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
101 struct inet_hashinfo tcp_hashinfo;
103 static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb)
105 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
108 tcp_hdr(skb)->source);
111 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
113 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
114 struct tcp_sock *tp = tcp_sk(sk);
116 /* With PAWS, it is safe from the viewpoint
117 of data integrity. Even without PAWS it is safe provided sequence
118 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
120 Actually, the idea is close to VJ's one, only timestamp cache is
121 held not per host, but per port pair and TW bucket is used as state
124 If TW bucket has been already destroyed we fall back to VJ's scheme
125 and use initial timestamp retrieved from peer table.
127 if (tcptw->tw_ts_recent_stamp &&
128 (twp == NULL || (sysctl_tcp_tw_reuse &&
129 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
130 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
131 if (tp->write_seq == 0)
133 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
134 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
142 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
144 /* This will initiate an outgoing connection. */
145 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
147 struct inet_sock *inet = inet_sk(sk);
148 struct tcp_sock *tp = tcp_sk(sk);
149 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
151 __be32 daddr, nexthop;
155 if (addr_len < sizeof(struct sockaddr_in))
158 if (usin->sin_family != AF_INET)
159 return -EAFNOSUPPORT;
161 nexthop = daddr = usin->sin_addr.s_addr;
162 if (inet->opt && inet->opt->srr) {
165 nexthop = inet->opt->faddr;
168 tmp = ip_route_connect(&rt, nexthop, inet->inet_saddr,
169 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
171 inet->inet_sport, usin->sin_port, sk, 1);
173 if (tmp == -ENETUNREACH)
174 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
178 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
183 if (!inet->opt || !inet->opt->srr)
186 if (!inet->inet_saddr)
187 inet->inet_saddr = rt->rt_src;
188 inet->inet_rcv_saddr = inet->inet_saddr;
190 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
191 /* Reset inherited state */
192 tp->rx_opt.ts_recent = 0;
193 tp->rx_opt.ts_recent_stamp = 0;
197 if (tcp_death_row.sysctl_tw_recycle &&
198 !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) {
199 struct inet_peer *peer = rt_get_peer(rt);
201 * VJ's idea. We save last timestamp seen from
202 * the destination in peer table, when entering state
203 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
204 * when trying new connection.
207 (u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
208 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
209 tp->rx_opt.ts_recent = peer->tcp_ts;
213 inet->inet_dport = usin->sin_port;
214 inet->inet_daddr = daddr;
216 inet_csk(sk)->icsk_ext_hdr_len = 0;
218 inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen;
220 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
222 /* Socket identity is still unknown (sport may be zero).
223 * However we set state to SYN-SENT and not releasing socket
224 * lock select source port, enter ourselves into the hash tables and
225 * complete initialization after this.
227 tcp_set_state(sk, TCP_SYN_SENT);
228 err = inet_hash_connect(&tcp_death_row, sk);
232 err = ip_route_newports(&rt, IPPROTO_TCP,
233 inet->inet_sport, inet->inet_dport, sk);
237 /* OK, now commit destination to socket. */
238 sk->sk_gso_type = SKB_GSO_TCPV4;
239 sk_setup_caps(sk, &rt->u.dst);
242 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
247 inet->inet_id = tp->write_seq ^ jiffies;
249 err = tcp_connect(sk);
258 * This unhashes the socket and releases the local port,
261 tcp_set_state(sk, TCP_CLOSE);
263 sk->sk_route_caps = 0;
264 inet->inet_dport = 0;
269 * This routine does path mtu discovery as defined in RFC1191.
271 static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu)
273 struct dst_entry *dst;
274 struct inet_sock *inet = inet_sk(sk);
276 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
277 * send out by Linux are always <576bytes so they should go through
280 if (sk->sk_state == TCP_LISTEN)
283 /* We don't check in the destentry if pmtu discovery is forbidden
284 * on this route. We just assume that no packet_to_big packets
285 * are send back when pmtu discovery is not active.
286 * There is a small race when the user changes this flag in the
287 * route, but I think that's acceptable.
289 if ((dst = __sk_dst_check(sk, 0)) == NULL)
292 dst->ops->update_pmtu(dst, mtu);
294 /* Something is about to be wrong... Remember soft error
295 * for the case, if this connection will not able to recover.
297 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
298 sk->sk_err_soft = EMSGSIZE;
302 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
303 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
304 tcp_sync_mss(sk, mtu);
306 /* Resend the TCP packet because it's
307 * clear that the old packet has been
308 * dropped. This is the new "fast" path mtu
311 tcp_simple_retransmit(sk);
312 } /* else let the usual retransmit timer handle it */
316 * This routine is called by the ICMP module when it gets some
317 * sort of error condition. If err < 0 then the socket should
318 * be closed and the error returned to the user. If err > 0
319 * it's just the icmp type << 8 | icmp code. After adjustment
320 * header points to the first 8 bytes of the tcp header. We need
321 * to find the appropriate port.
323 * The locking strategy used here is very "optimistic". When
324 * someone else accesses the socket the ICMP is just dropped
325 * and for some paths there is no check at all.
326 * A more general error queue to queue errors for later handling
327 * is probably better.
331 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
333 struct iphdr *iph = (struct iphdr *)icmp_skb->data;
334 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
335 struct inet_connection_sock *icsk;
337 struct inet_sock *inet;
338 const int type = icmp_hdr(icmp_skb)->type;
339 const int code = icmp_hdr(icmp_skb)->code;
345 struct net *net = dev_net(icmp_skb->dev);
347 if (icmp_skb->len < (iph->ihl << 2) + 8) {
348 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
352 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
353 iph->saddr, th->source, inet_iif(icmp_skb));
355 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
358 if (sk->sk_state == TCP_TIME_WAIT) {
359 inet_twsk_put(inet_twsk(sk));
364 /* If too many ICMPs get dropped on busy
365 * servers this needs to be solved differently.
367 if (sock_owned_by_user(sk))
368 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
370 if (sk->sk_state == TCP_CLOSE)
375 seq = ntohl(th->seq);
376 if (sk->sk_state != TCP_LISTEN &&
377 !between(seq, tp->snd_una, tp->snd_nxt)) {
378 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
383 case ICMP_SOURCE_QUENCH:
384 /* Just silently ignore these. */
386 case ICMP_PARAMETERPROB:
389 case ICMP_DEST_UNREACH:
390 if (code > NR_ICMP_UNREACH)
393 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
394 if (!sock_owned_by_user(sk))
395 do_pmtu_discovery(sk, iph, info);
399 err = icmp_err_convert[code].errno;
400 /* check if icmp_skb allows revert of backoff
401 * (see draft-zimmermann-tcp-lcd) */
402 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
404 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
408 icsk->icsk_backoff--;
409 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp) <<
413 skb = tcp_write_queue_head(sk);
416 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
417 tcp_time_stamp - TCP_SKB_CB(skb)->when);
420 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
421 remaining, TCP_RTO_MAX);
422 } else if (sock_owned_by_user(sk)) {
423 /* RTO revert clocked out retransmission,
424 * but socket is locked. Will defer. */
425 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
428 /* RTO revert clocked out retransmission.
429 * Will retransmit now */
430 tcp_retransmit_timer(sk);
434 case ICMP_TIME_EXCEEDED:
441 switch (sk->sk_state) {
442 struct request_sock *req, **prev;
444 if (sock_owned_by_user(sk))
447 req = inet_csk_search_req(sk, &prev, th->dest,
448 iph->daddr, iph->saddr);
452 /* ICMPs are not backlogged, hence we cannot get
453 an established socket here.
457 if (seq != tcp_rsk(req)->snt_isn) {
458 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
463 * Still in SYN_RECV, just remove it silently.
464 * There is no good way to pass the error to the newly
465 * created socket, and POSIX does not want network
466 * errors returned from accept().
468 inet_csk_reqsk_queue_drop(sk, req, prev);
472 case TCP_SYN_RECV: /* Cannot happen.
473 It can f.e. if SYNs crossed.
475 if (!sock_owned_by_user(sk)) {
478 sk->sk_error_report(sk);
482 sk->sk_err_soft = err;
487 /* If we've already connected we will keep trying
488 * until we time out, or the user gives up.
490 * rfc1122 4.2.3.9 allows to consider as hard errors
491 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
492 * but it is obsoleted by pmtu discovery).
494 * Note, that in modern internet, where routing is unreliable
495 * and in each dark corner broken firewalls sit, sending random
496 * errors ordered by their masters even this two messages finally lose
497 * their original sense (even Linux sends invalid PORT_UNREACHs)
499 * Now we are in compliance with RFCs.
504 if (!sock_owned_by_user(sk) && inet->recverr) {
506 sk->sk_error_report(sk);
507 } else { /* Only an error on timeout */
508 sk->sk_err_soft = err;
516 /* This routine computes an IPv4 TCP checksum. */
517 void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
519 struct inet_sock *inet = inet_sk(sk);
520 struct tcphdr *th = tcp_hdr(skb);
522 if (skb->ip_summed == CHECKSUM_PARTIAL) {
523 th->check = ~tcp_v4_check(len, inet->inet_saddr,
524 inet->inet_daddr, 0);
525 skb->csum_start = skb_transport_header(skb) - skb->head;
526 skb->csum_offset = offsetof(struct tcphdr, check);
528 th->check = tcp_v4_check(len, inet->inet_saddr,
536 int tcp_v4_gso_send_check(struct sk_buff *skb)
538 const struct iphdr *iph;
541 if (!pskb_may_pull(skb, sizeof(*th)))
548 th->check = ~tcp_v4_check(skb->len, iph->saddr, iph->daddr, 0);
549 skb->csum_start = skb_transport_header(skb) - skb->head;
550 skb->csum_offset = offsetof(struct tcphdr, check);
551 skb->ip_summed = CHECKSUM_PARTIAL;
556 * This routine will send an RST to the other tcp.
558 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
560 * Answer: if a packet caused RST, it is not for a socket
561 * existing in our system, if it is matched to a socket,
562 * it is just duplicate segment or bug in other side's TCP.
563 * So that we build reply only basing on parameters
564 * arrived with segment.
565 * Exception: precedence violation. We do not implement it in any case.
568 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
570 struct tcphdr *th = tcp_hdr(skb);
573 #ifdef CONFIG_TCP_MD5SIG
574 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
577 struct ip_reply_arg arg;
578 #ifdef CONFIG_TCP_MD5SIG
579 struct tcp_md5sig_key *key;
583 /* Never send a reset in response to a reset. */
587 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
590 /* Swap the send and the receive. */
591 memset(&rep, 0, sizeof(rep));
592 rep.th.dest = th->source;
593 rep.th.source = th->dest;
594 rep.th.doff = sizeof(struct tcphdr) / 4;
598 rep.th.seq = th->ack_seq;
601 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
602 skb->len - (th->doff << 2));
605 memset(&arg, 0, sizeof(arg));
606 arg.iov[0].iov_base = (unsigned char *)&rep;
607 arg.iov[0].iov_len = sizeof(rep.th);
609 #ifdef CONFIG_TCP_MD5SIG
610 key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr) : NULL;
612 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
614 (TCPOPT_MD5SIG << 8) |
616 /* Update length and the length the header thinks exists */
617 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
618 rep.th.doff = arg.iov[0].iov_len / 4;
620 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
621 key, ip_hdr(skb)->saddr,
622 ip_hdr(skb)->daddr, &rep.th);
625 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
626 ip_hdr(skb)->saddr, /* XXX */
627 arg.iov[0].iov_len, IPPROTO_TCP, 0);
628 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
629 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
631 net = dev_net(skb_dst(skb)->dev);
632 ip_send_reply(net->ipv4.tcp_sock, skb,
633 &arg, arg.iov[0].iov_len);
635 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
636 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
639 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
640 outside socket context is ugly, certainly. What can I do?
643 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
644 u32 win, u32 ts, int oif,
645 struct tcp_md5sig_key *key,
648 struct tcphdr *th = tcp_hdr(skb);
651 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
652 #ifdef CONFIG_TCP_MD5SIG
653 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
657 struct ip_reply_arg arg;
658 struct net *net = dev_net(skb_dst(skb)->dev);
660 memset(&rep.th, 0, sizeof(struct tcphdr));
661 memset(&arg, 0, sizeof(arg));
663 arg.iov[0].iov_base = (unsigned char *)&rep;
664 arg.iov[0].iov_len = sizeof(rep.th);
666 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
667 (TCPOPT_TIMESTAMP << 8) |
669 rep.opt[1] = htonl(tcp_time_stamp);
670 rep.opt[2] = htonl(ts);
671 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
674 /* Swap the send and the receive. */
675 rep.th.dest = th->source;
676 rep.th.source = th->dest;
677 rep.th.doff = arg.iov[0].iov_len / 4;
678 rep.th.seq = htonl(seq);
679 rep.th.ack_seq = htonl(ack);
681 rep.th.window = htons(win);
683 #ifdef CONFIG_TCP_MD5SIG
685 int offset = (ts) ? 3 : 0;
687 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
689 (TCPOPT_MD5SIG << 8) |
691 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
692 rep.th.doff = arg.iov[0].iov_len/4;
694 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
695 key, ip_hdr(skb)->saddr,
696 ip_hdr(skb)->daddr, &rep.th);
699 arg.flags = reply_flags;
700 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
701 ip_hdr(skb)->saddr, /* XXX */
702 arg.iov[0].iov_len, IPPROTO_TCP, 0);
703 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
705 arg.bound_dev_if = oif;
707 ip_send_reply(net->ipv4.tcp_sock, skb,
708 &arg, arg.iov[0].iov_len);
710 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
713 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
715 struct inet_timewait_sock *tw = inet_twsk(sk);
716 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
718 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
719 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
722 tcp_twsk_md5_key(tcptw),
723 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0
729 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
730 struct request_sock *req)
732 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
733 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
736 tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr),
737 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0);
741 * Send a SYN-ACK after having received a SYN.
742 * This still operates on a request_sock only, not on a big
745 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
746 struct request_sock *req,
747 struct request_values *rvp)
749 const struct inet_request_sock *ireq = inet_rsk(req);
751 struct sk_buff * skb;
753 /* First, grab a route. */
754 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
757 skb = tcp_make_synack(sk, dst, req, rvp);
760 struct tcphdr *th = tcp_hdr(skb);
762 th->check = tcp_v4_check(skb->len,
765 csum_partial(th, skb->len,
768 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
771 err = net_xmit_eval(err);
778 static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
779 struct request_values *rvp)
781 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
782 return tcp_v4_send_synack(sk, NULL, req, rvp);
786 * IPv4 request_sock destructor.
788 static void tcp_v4_reqsk_destructor(struct request_sock *req)
790 kfree(inet_rsk(req)->opt);
793 #ifdef CONFIG_SYN_COOKIES
794 static void syn_flood_warning(struct sk_buff *skb)
796 static unsigned long warntime;
798 if (time_after(jiffies, (warntime + HZ * 60))) {
801 "possible SYN flooding on port %d. Sending cookies.\n",
802 ntohs(tcp_hdr(skb)->dest));
808 * Save and compile IPv4 options into the request_sock if needed.
810 static struct ip_options *tcp_v4_save_options(struct sock *sk,
813 struct ip_options *opt = &(IPCB(skb)->opt);
814 struct ip_options *dopt = NULL;
816 if (opt && opt->optlen) {
817 int opt_size = optlength(opt);
818 dopt = kmalloc(opt_size, GFP_ATOMIC);
820 if (ip_options_echo(dopt, skb)) {
829 #ifdef CONFIG_TCP_MD5SIG
831 * RFC2385 MD5 checksumming requires a mapping of
832 * IP address->MD5 Key.
833 * We need to maintain these in the sk structure.
836 /* Find the Key structure for an address. */
837 static struct tcp_md5sig_key *
838 tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
840 struct tcp_sock *tp = tcp_sk(sk);
843 if (!tp->md5sig_info || !tp->md5sig_info->entries4)
845 for (i = 0; i < tp->md5sig_info->entries4; i++) {
846 if (tp->md5sig_info->keys4[i].addr == addr)
847 return &tp->md5sig_info->keys4[i].base;
852 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
853 struct sock *addr_sk)
855 return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->inet_daddr);
858 EXPORT_SYMBOL(tcp_v4_md5_lookup);
860 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
861 struct request_sock *req)
863 return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr);
866 /* This can be called on a newly created socket, from other files */
867 int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
868 u8 *newkey, u8 newkeylen)
870 /* Add Key to the list */
871 struct tcp_md5sig_key *key;
872 struct tcp_sock *tp = tcp_sk(sk);
873 struct tcp4_md5sig_key *keys;
875 key = tcp_v4_md5_do_lookup(sk, addr);
877 /* Pre-existing entry - just update that one. */
880 key->keylen = newkeylen;
882 struct tcp_md5sig_info *md5sig;
884 if (!tp->md5sig_info) {
885 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info),
887 if (!tp->md5sig_info) {
891 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
893 if (tcp_alloc_md5sig_pool(sk) == NULL) {
897 md5sig = tp->md5sig_info;
899 if (md5sig->alloced4 == md5sig->entries4) {
900 keys = kmalloc((sizeof(*keys) *
901 (md5sig->entries4 + 1)), GFP_ATOMIC);
904 tcp_free_md5sig_pool();
908 if (md5sig->entries4)
909 memcpy(keys, md5sig->keys4,
910 sizeof(*keys) * md5sig->entries4);
912 /* Free old key list, and reference new one */
913 kfree(md5sig->keys4);
914 md5sig->keys4 = keys;
918 md5sig->keys4[md5sig->entries4 - 1].addr = addr;
919 md5sig->keys4[md5sig->entries4 - 1].base.key = newkey;
920 md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen;
925 EXPORT_SYMBOL(tcp_v4_md5_do_add);
927 static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk,
928 u8 *newkey, u8 newkeylen)
930 return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->inet_daddr,
934 int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
936 struct tcp_sock *tp = tcp_sk(sk);
939 for (i = 0; i < tp->md5sig_info->entries4; i++) {
940 if (tp->md5sig_info->keys4[i].addr == addr) {
942 kfree(tp->md5sig_info->keys4[i].base.key);
943 tp->md5sig_info->entries4--;
945 if (tp->md5sig_info->entries4 == 0) {
946 kfree(tp->md5sig_info->keys4);
947 tp->md5sig_info->keys4 = NULL;
948 tp->md5sig_info->alloced4 = 0;
949 } else if (tp->md5sig_info->entries4 != i) {
950 /* Need to do some manipulation */
951 memmove(&tp->md5sig_info->keys4[i],
952 &tp->md5sig_info->keys4[i+1],
953 (tp->md5sig_info->entries4 - i) *
954 sizeof(struct tcp4_md5sig_key));
956 tcp_free_md5sig_pool();
963 EXPORT_SYMBOL(tcp_v4_md5_do_del);
965 static void tcp_v4_clear_md5_list(struct sock *sk)
967 struct tcp_sock *tp = tcp_sk(sk);
969 /* Free each key, then the set of key keys,
970 * the crypto element, and then decrement our
971 * hold on the last resort crypto.
973 if (tp->md5sig_info->entries4) {
975 for (i = 0; i < tp->md5sig_info->entries4; i++)
976 kfree(tp->md5sig_info->keys4[i].base.key);
977 tp->md5sig_info->entries4 = 0;
978 tcp_free_md5sig_pool();
980 if (tp->md5sig_info->keys4) {
981 kfree(tp->md5sig_info->keys4);
982 tp->md5sig_info->keys4 = NULL;
983 tp->md5sig_info->alloced4 = 0;
987 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
990 struct tcp_md5sig cmd;
991 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
994 if (optlen < sizeof(cmd))
997 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1000 if (sin->sin_family != AF_INET)
1003 if (!cmd.tcpm_key || !cmd.tcpm_keylen) {
1004 if (!tcp_sk(sk)->md5sig_info)
1006 return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr);
1009 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1012 if (!tcp_sk(sk)->md5sig_info) {
1013 struct tcp_sock *tp = tcp_sk(sk);
1014 struct tcp_md5sig_info *p;
1016 p = kzalloc(sizeof(*p), sk->sk_allocation);
1020 tp->md5sig_info = p;
1021 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1024 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, sk->sk_allocation);
1027 return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,
1028 newkey, cmd.tcpm_keylen);
1031 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1032 __be32 daddr, __be32 saddr, int nbytes)
1034 struct tcp4_pseudohdr *bp;
1035 struct scatterlist sg;
1037 bp = &hp->md5_blk.ip4;
1040 * 1. the TCP pseudo-header (in the order: source IP address,
1041 * destination IP address, zero-padded protocol number, and
1047 bp->protocol = IPPROTO_TCP;
1048 bp->len = cpu_to_be16(nbytes);
1050 sg_init_one(&sg, bp, sizeof(*bp));
1051 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1054 static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
1055 __be32 daddr, __be32 saddr, struct tcphdr *th)
1057 struct tcp_md5sig_pool *hp;
1058 struct hash_desc *desc;
1060 hp = tcp_get_md5sig_pool();
1062 goto clear_hash_noput;
1063 desc = &hp->md5_desc;
1065 if (crypto_hash_init(desc))
1067 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1069 if (tcp_md5_hash_header(hp, th))
1071 if (tcp_md5_hash_key(hp, key))
1073 if (crypto_hash_final(desc, md5_hash))
1076 tcp_put_md5sig_pool();
1080 tcp_put_md5sig_pool();
1082 memset(md5_hash, 0, 16);
1086 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1087 struct sock *sk, struct request_sock *req,
1088 struct sk_buff *skb)
1090 struct tcp_md5sig_pool *hp;
1091 struct hash_desc *desc;
1092 struct tcphdr *th = tcp_hdr(skb);
1093 __be32 saddr, daddr;
1096 saddr = inet_sk(sk)->inet_saddr;
1097 daddr = inet_sk(sk)->inet_daddr;
1099 saddr = inet_rsk(req)->loc_addr;
1100 daddr = inet_rsk(req)->rmt_addr;
1102 const struct iphdr *iph = ip_hdr(skb);
1107 hp = tcp_get_md5sig_pool();
1109 goto clear_hash_noput;
1110 desc = &hp->md5_desc;
1112 if (crypto_hash_init(desc))
1115 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1117 if (tcp_md5_hash_header(hp, th))
1119 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1121 if (tcp_md5_hash_key(hp, key))
1123 if (crypto_hash_final(desc, md5_hash))
1126 tcp_put_md5sig_pool();
1130 tcp_put_md5sig_pool();
1132 memset(md5_hash, 0, 16);
1136 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1138 static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
1141 * This gets called for each TCP segment that arrives
1142 * so we want to be efficient.
1143 * We have 3 drop cases:
1144 * o No MD5 hash and one expected.
1145 * o MD5 hash and we're not expecting one.
1146 * o MD5 hash and its wrong.
1148 __u8 *hash_location = NULL;
1149 struct tcp_md5sig_key *hash_expected;
1150 const struct iphdr *iph = ip_hdr(skb);
1151 struct tcphdr *th = tcp_hdr(skb);
1153 unsigned char newhash[16];
1155 hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr);
1156 hash_location = tcp_parse_md5sig_option(th);
1158 /* We've parsed the options - do we have a hash? */
1159 if (!hash_expected && !hash_location)
1162 if (hash_expected && !hash_location) {
1163 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1167 if (!hash_expected && hash_location) {
1168 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1172 /* Okay, so this is hash_expected and hash_location -
1173 * so we need to calculate the checksum.
1175 genhash = tcp_v4_md5_hash_skb(newhash,
1179 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1180 if (net_ratelimit()) {
1181 printk(KERN_INFO "MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1182 &iph->saddr, ntohs(th->source),
1183 &iph->daddr, ntohs(th->dest),
1184 genhash ? " tcp_v4_calc_md5_hash failed" : "");
1193 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1195 .obj_size = sizeof(struct tcp_request_sock),
1196 .rtx_syn_ack = tcp_v4_rtx_synack,
1197 .send_ack = tcp_v4_reqsk_send_ack,
1198 .destructor = tcp_v4_reqsk_destructor,
1199 .send_reset = tcp_v4_send_reset,
1200 .syn_ack_timeout = tcp_syn_ack_timeout,
1203 #ifdef CONFIG_TCP_MD5SIG
1204 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1205 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1206 .calc_md5_hash = tcp_v4_md5_hash_skb,
1210 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1211 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1212 .twsk_unique = tcp_twsk_unique,
1213 .twsk_destructor= tcp_twsk_destructor,
1216 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1218 struct tcp_extend_values tmp_ext;
1219 struct tcp_options_received tmp_opt;
1221 struct request_sock *req;
1222 struct inet_request_sock *ireq;
1223 struct tcp_sock *tp = tcp_sk(sk);
1224 struct dst_entry *dst = NULL;
1225 __be32 saddr = ip_hdr(skb)->saddr;
1226 __be32 daddr = ip_hdr(skb)->daddr;
1227 __u32 isn = TCP_SKB_CB(skb)->when;
1228 #ifdef CONFIG_SYN_COOKIES
1229 int want_cookie = 0;
1231 #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1234 /* Never answer to SYNs send to broadcast or multicast */
1235 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1238 /* TW buckets are converted to open requests without
1239 * limitations, they conserve resources and peer is
1240 * evidently real one.
1242 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1243 #ifdef CONFIG_SYN_COOKIES
1244 if (sysctl_tcp_syncookies) {
1251 /* Accept backlog is full. If we have already queued enough
1252 * of warm entries in syn queue, drop request. It is better than
1253 * clogging syn queue with openreqs with exponentially increasing
1256 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1259 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1263 #ifdef CONFIG_TCP_MD5SIG
1264 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1267 tcp_clear_options(&tmp_opt);
1268 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1269 tmp_opt.user_mss = tp->rx_opt.user_mss;
1270 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1272 if (tmp_opt.cookie_plus > 0 &&
1273 tmp_opt.saw_tstamp &&
1274 !tp->rx_opt.cookie_out_never &&
1275 (sysctl_tcp_cookie_size > 0 ||
1276 (tp->cookie_values != NULL &&
1277 tp->cookie_values->cookie_desired > 0))) {
1279 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1280 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1282 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1283 goto drop_and_release;
1285 /* Secret recipe starts with IP addresses */
1289 /* plus variable length Initiator Cookie */
1292 *c++ ^= *hash_location++;
1294 #ifdef CONFIG_SYN_COOKIES
1295 want_cookie = 0; /* not our kind of cookie */
1297 tmp_ext.cookie_out_never = 0; /* false */
1298 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1299 } else if (!tp->rx_opt.cookie_in_always) {
1300 /* redundant indications, but ensure initialization. */
1301 tmp_ext.cookie_out_never = 1; /* true */
1302 tmp_ext.cookie_plus = 0;
1304 goto drop_and_release;
1306 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1308 if (want_cookie && !tmp_opt.saw_tstamp)
1309 tcp_clear_options(&tmp_opt);
1311 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1312 tcp_openreq_init(req, &tmp_opt, skb);
1314 ireq = inet_rsk(req);
1315 ireq->loc_addr = daddr;
1316 ireq->rmt_addr = saddr;
1317 ireq->no_srccheck = inet_sk(sk)->transparent;
1318 ireq->opt = tcp_v4_save_options(sk, skb);
1320 if (security_inet_conn_request(sk, skb, req))
1324 TCP_ECN_create_request(req, tcp_hdr(skb));
1327 #ifdef CONFIG_SYN_COOKIES
1328 syn_flood_warning(skb);
1329 req->cookie_ts = tmp_opt.tstamp_ok;
1331 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1333 struct inet_peer *peer = NULL;
1335 /* VJ's idea. We save last timestamp seen
1336 * from the destination in peer table, when entering
1337 * state TIME-WAIT, and check against it before
1338 * accepting new connection request.
1340 * If "isn" is not zero, this request hit alive
1341 * timewait bucket, so that all the necessary checks
1342 * are made in the function processing timewait state.
1344 if (tmp_opt.saw_tstamp &&
1345 tcp_death_row.sysctl_tw_recycle &&
1346 (dst = inet_csk_route_req(sk, req)) != NULL &&
1347 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1348 peer->v4daddr == saddr) {
1349 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1350 (s32)(peer->tcp_ts - req->ts_recent) >
1352 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1353 goto drop_and_release;
1356 /* Kill the following clause, if you dislike this way. */
1357 else if (!sysctl_tcp_syncookies &&
1358 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1359 (sysctl_max_syn_backlog >> 2)) &&
1360 (!peer || !peer->tcp_ts_stamp) &&
1361 (!dst || !dst_metric(dst, RTAX_RTT))) {
1362 /* Without syncookies last quarter of
1363 * backlog is filled with destinations,
1364 * proven to be alive.
1365 * It means that we continue to communicate
1366 * to destinations, already remembered
1367 * to the moment of synflood.
1369 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI4/%u\n",
1370 &saddr, ntohs(tcp_hdr(skb)->source));
1371 goto drop_and_release;
1374 isn = tcp_v4_init_sequence(skb);
1376 tcp_rsk(req)->snt_isn = isn;
1378 if (tcp_v4_send_synack(sk, dst, req,
1379 (struct request_values *)&tmp_ext) ||
1383 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1396 * The three way handshake has completed - we got a valid synack -
1397 * now create the new socket.
1399 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1400 struct request_sock *req,
1401 struct dst_entry *dst)
1403 struct inet_request_sock *ireq;
1404 struct inet_sock *newinet;
1405 struct tcp_sock *newtp;
1407 #ifdef CONFIG_TCP_MD5SIG
1408 struct tcp_md5sig_key *key;
1411 if (sk_acceptq_is_full(sk))
1414 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
1417 newsk = tcp_create_openreq_child(sk, req, skb);
1421 newsk->sk_gso_type = SKB_GSO_TCPV4;
1422 sk_setup_caps(newsk, dst);
1424 newtp = tcp_sk(newsk);
1425 newinet = inet_sk(newsk);
1426 ireq = inet_rsk(req);
1427 newinet->inet_daddr = ireq->rmt_addr;
1428 newinet->inet_rcv_saddr = ireq->loc_addr;
1429 newinet->inet_saddr = ireq->loc_addr;
1430 newinet->opt = ireq->opt;
1432 newinet->mc_index = inet_iif(skb);
1433 newinet->mc_ttl = ip_hdr(skb)->ttl;
1434 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1436 inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen;
1437 newinet->inet_id = newtp->write_seq ^ jiffies;
1439 tcp_mtup_init(newsk);
1440 tcp_sync_mss(newsk, dst_mtu(dst));
1441 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1442 if (tcp_sk(sk)->rx_opt.user_mss &&
1443 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1444 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1446 tcp_initialize_rcv_mss(newsk);
1448 #ifdef CONFIG_TCP_MD5SIG
1449 /* Copy over the MD5 key from the original socket */
1450 key = tcp_v4_md5_do_lookup(sk, newinet->inet_daddr);
1453 * We're using one, so create a matching key
1454 * on the newsk structure. If we fail to get
1455 * memory, then we end up not copying the key
1458 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1460 tcp_v4_md5_do_add(newsk, newinet->inet_daddr,
1461 newkey, key->keylen);
1462 newsk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1466 __inet_hash_nolisten(newsk, NULL);
1467 __inet_inherit_port(sk, newsk);
1472 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1474 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1479 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1481 struct tcphdr *th = tcp_hdr(skb);
1482 const struct iphdr *iph = ip_hdr(skb);
1484 struct request_sock **prev;
1485 /* Find possible connection requests. */
1486 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1487 iph->saddr, iph->daddr);
1489 return tcp_check_req(sk, skb, req, prev);
1491 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1492 th->source, iph->daddr, th->dest, inet_iif(skb));
1495 if (nsk->sk_state != TCP_TIME_WAIT) {
1499 inet_twsk_put(inet_twsk(nsk));
1503 #ifdef CONFIG_SYN_COOKIES
1504 if (!th->rst && !th->syn && th->ack)
1505 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1510 static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1512 const struct iphdr *iph = ip_hdr(skb);
1514 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1515 if (!tcp_v4_check(skb->len, iph->saddr,
1516 iph->daddr, skb->csum)) {
1517 skb->ip_summed = CHECKSUM_UNNECESSARY;
1522 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1523 skb->len, IPPROTO_TCP, 0);
1525 if (skb->len <= 76) {
1526 return __skb_checksum_complete(skb);
1532 /* The socket must have it's spinlock held when we get
1535 * We have a potential double-lock case here, so even when
1536 * doing backlog processing we use the BH locking scheme.
1537 * This is because we cannot sleep with the original spinlock
1540 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1543 #ifdef CONFIG_TCP_MD5SIG
1545 * We really want to reject the packet as early as possible
1547 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1548 * o There is an MD5 option and we're not expecting one
1550 if (tcp_v4_inbound_md5_hash(sk, skb))
1554 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1555 TCP_CHECK_TIMER(sk);
1556 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1560 TCP_CHECK_TIMER(sk);
1564 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1567 if (sk->sk_state == TCP_LISTEN) {
1568 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1573 if (tcp_child_process(sk, nsk, skb)) {
1581 TCP_CHECK_TIMER(sk);
1582 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1586 TCP_CHECK_TIMER(sk);
1590 tcp_v4_send_reset(rsk, skb);
1593 /* Be careful here. If this function gets more complicated and
1594 * gcc suffers from register pressure on the x86, sk (in %ebx)
1595 * might be destroyed here. This current version compiles correctly,
1596 * but you have been warned.
1601 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1609 int tcp_v4_rcv(struct sk_buff *skb)
1611 const struct iphdr *iph;
1615 struct net *net = dev_net(skb->dev);
1617 if (skb->pkt_type != PACKET_HOST)
1620 /* Count it even if it's bad */
1621 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1623 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1628 if (th->doff < sizeof(struct tcphdr) / 4)
1630 if (!pskb_may_pull(skb, th->doff * 4))
1633 /* An explanation is required here, I think.
1634 * Packet length and doff are validated by header prediction,
1635 * provided case of th->doff==0 is eliminated.
1636 * So, we defer the checks. */
1637 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1642 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1643 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1644 skb->len - th->doff * 4);
1645 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1646 TCP_SKB_CB(skb)->when = 0;
1647 TCP_SKB_CB(skb)->flags = iph->tos;
1648 TCP_SKB_CB(skb)->sacked = 0;
1650 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1654 if (iph->ttl < inet_sk(sk)->min_ttl)
1655 goto discard_and_relse;
1658 if (sk->sk_state == TCP_TIME_WAIT)
1661 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1662 goto discard_and_relse;
1665 if (sk_filter(sk, skb))
1666 goto discard_and_relse;
1670 bh_lock_sock_nested(sk);
1672 if (!sock_owned_by_user(sk)) {
1673 #ifdef CONFIG_NET_DMA
1674 struct tcp_sock *tp = tcp_sk(sk);
1675 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1676 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1677 if (tp->ucopy.dma_chan)
1678 ret = tcp_v4_do_rcv(sk, skb);
1682 if (!tcp_prequeue(sk, skb))
1683 ret = tcp_v4_do_rcv(sk, skb);
1686 sk_add_backlog(sk, skb);
1694 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1697 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1699 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1701 tcp_v4_send_reset(NULL, skb);
1705 /* Discard frame. */
1714 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1715 inet_twsk_put(inet_twsk(sk));
1719 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1720 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1721 inet_twsk_put(inet_twsk(sk));
1724 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1726 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1728 iph->daddr, th->dest,
1731 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1732 inet_twsk_put(inet_twsk(sk));
1736 /* Fall through to ACK */
1739 tcp_v4_timewait_ack(sk, skb);
1743 case TCP_TW_SUCCESS:;
1748 /* VJ's idea. Save last timestamp seen from this destination
1749 * and hold it at least for normal timewait interval to use for duplicate
1750 * segment detection in subsequent connections, before they enter synchronized
1754 int tcp_v4_remember_stamp(struct sock *sk)
1756 struct inet_sock *inet = inet_sk(sk);
1757 struct tcp_sock *tp = tcp_sk(sk);
1758 struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
1759 struct inet_peer *peer = NULL;
1762 if (!rt || rt->rt_dst != inet->inet_daddr) {
1763 peer = inet_getpeer(inet->inet_daddr, 1);
1767 rt_bind_peer(rt, 1);
1772 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
1773 ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
1774 peer->tcp_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
1775 peer->tcp_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
1776 peer->tcp_ts = tp->rx_opt.ts_recent;
1786 int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
1788 struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1);
1791 const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
1793 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
1794 ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
1795 peer->tcp_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
1796 peer->tcp_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
1797 peer->tcp_ts = tcptw->tw_ts_recent;
1806 const struct inet_connection_sock_af_ops ipv4_specific = {
1807 .queue_xmit = ip_queue_xmit,
1808 .send_check = tcp_v4_send_check,
1809 .rebuild_header = inet_sk_rebuild_header,
1810 .conn_request = tcp_v4_conn_request,
1811 .syn_recv_sock = tcp_v4_syn_recv_sock,
1812 .remember_stamp = tcp_v4_remember_stamp,
1813 .net_header_len = sizeof(struct iphdr),
1814 .setsockopt = ip_setsockopt,
1815 .getsockopt = ip_getsockopt,
1816 .addr2sockaddr = inet_csk_addr2sockaddr,
1817 .sockaddr_len = sizeof(struct sockaddr_in),
1818 .bind_conflict = inet_csk_bind_conflict,
1819 #ifdef CONFIG_COMPAT
1820 .compat_setsockopt = compat_ip_setsockopt,
1821 .compat_getsockopt = compat_ip_getsockopt,
1825 #ifdef CONFIG_TCP_MD5SIG
1826 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1827 .md5_lookup = tcp_v4_md5_lookup,
1828 .calc_md5_hash = tcp_v4_md5_hash_skb,
1829 .md5_add = tcp_v4_md5_add_func,
1830 .md5_parse = tcp_v4_parse_md5_keys,
1834 /* NOTE: A lot of things set to zero explicitly by call to
1835 * sk_alloc() so need not be done here.
1837 static int tcp_v4_init_sock(struct sock *sk)
1839 struct inet_connection_sock *icsk = inet_csk(sk);
1840 struct tcp_sock *tp = tcp_sk(sk);
1842 skb_queue_head_init(&tp->out_of_order_queue);
1843 tcp_init_xmit_timers(sk);
1844 tcp_prequeue_init(tp);
1846 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1847 tp->mdev = TCP_TIMEOUT_INIT;
1849 /* So many TCP implementations out there (incorrectly) count the
1850 * initial SYN frame in their delayed-ACK and congestion control
1851 * algorithms that we must have the following bandaid to talk
1852 * efficiently to them. -DaveM
1856 /* See draft-stevens-tcpca-spec-01 for discussion of the
1857 * initialization of these values.
1859 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1860 tp->snd_cwnd_clamp = ~0;
1861 tp->mss_cache = TCP_MSS_DEFAULT;
1863 tp->reordering = sysctl_tcp_reordering;
1864 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1866 sk->sk_state = TCP_CLOSE;
1868 sk->sk_write_space = sk_stream_write_space;
1869 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1871 icsk->icsk_af_ops = &ipv4_specific;
1872 icsk->icsk_sync_mss = tcp_sync_mss;
1873 #ifdef CONFIG_TCP_MD5SIG
1874 tp->af_specific = &tcp_sock_ipv4_specific;
1877 /* TCP Cookie Transactions */
1878 if (sysctl_tcp_cookie_size > 0) {
1879 /* Default, cookies without s_data_payload. */
1881 kzalloc(sizeof(*tp->cookie_values),
1883 if (tp->cookie_values != NULL)
1884 kref_init(&tp->cookie_values->kref);
1886 /* Presumed zeroed, in order of appearance:
1887 * cookie_in_always, cookie_out_never,
1888 * s_data_constant, s_data_in, s_data_out
1890 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1891 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1894 percpu_counter_inc(&tcp_sockets_allocated);
1900 void tcp_v4_destroy_sock(struct sock *sk)
1902 struct tcp_sock *tp = tcp_sk(sk);
1904 tcp_clear_xmit_timers(sk);
1906 tcp_cleanup_congestion_control(sk);
1908 /* Cleanup up the write buffer. */
1909 tcp_write_queue_purge(sk);
1911 /* Cleans up our, hopefully empty, out_of_order_queue. */
1912 __skb_queue_purge(&tp->out_of_order_queue);
1914 #ifdef CONFIG_TCP_MD5SIG
1915 /* Clean up the MD5 key list, if any */
1916 if (tp->md5sig_info) {
1917 tcp_v4_clear_md5_list(sk);
1918 kfree(tp->md5sig_info);
1919 tp->md5sig_info = NULL;
1923 #ifdef CONFIG_NET_DMA
1924 /* Cleans up our sk_async_wait_queue */
1925 __skb_queue_purge(&sk->sk_async_wait_queue);
1928 /* Clean prequeue, it must be empty really */
1929 __skb_queue_purge(&tp->ucopy.prequeue);
1931 /* Clean up a referenced TCP bind bucket. */
1932 if (inet_csk(sk)->icsk_bind_hash)
1936 * If sendmsg cached page exists, toss it.
1938 if (sk->sk_sndmsg_page) {
1939 __free_page(sk->sk_sndmsg_page);
1940 sk->sk_sndmsg_page = NULL;
1943 /* TCP Cookie Transactions */
1944 if (tp->cookie_values != NULL) {
1945 kref_put(&tp->cookie_values->kref,
1946 tcp_cookie_values_release);
1947 tp->cookie_values = NULL;
1950 percpu_counter_dec(&tcp_sockets_allocated);
1953 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1955 #ifdef CONFIG_PROC_FS
1956 /* Proc filesystem TCP sock list dumping. */
1958 static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
1960 return hlist_nulls_empty(head) ? NULL :
1961 list_entry(head->first, struct inet_timewait_sock, tw_node);
1964 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1966 return !is_a_nulls(tw->tw_node.next) ?
1967 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1970 static void *listening_get_next(struct seq_file *seq, void *cur)
1972 struct inet_connection_sock *icsk;
1973 struct hlist_nulls_node *node;
1974 struct sock *sk = cur;
1975 struct inet_listen_hashbucket *ilb;
1976 struct tcp_iter_state *st = seq->private;
1977 struct net *net = seq_file_net(seq);
1981 ilb = &tcp_hashinfo.listening_hash[0];
1982 spin_lock_bh(&ilb->lock);
1983 sk = sk_nulls_head(&ilb->head);
1986 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1989 if (st->state == TCP_SEQ_STATE_OPENREQ) {
1990 struct request_sock *req = cur;
1992 icsk = inet_csk(st->syn_wait_sk);
1996 if (req->rsk_ops->family == st->family) {
2002 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
2005 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2007 sk = sk_next(st->syn_wait_sk);
2008 st->state = TCP_SEQ_STATE_LISTENING;
2009 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2011 icsk = inet_csk(sk);
2012 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2013 if (reqsk_queue_len(&icsk->icsk_accept_queue))
2015 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2019 sk_nulls_for_each_from(sk, node) {
2020 if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) {
2024 icsk = inet_csk(sk);
2025 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2026 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2028 st->uid = sock_i_uid(sk);
2029 st->syn_wait_sk = sk;
2030 st->state = TCP_SEQ_STATE_OPENREQ;
2034 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2036 spin_unlock_bh(&ilb->lock);
2037 if (++st->bucket < INET_LHTABLE_SIZE) {
2038 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2039 spin_lock_bh(&ilb->lock);
2040 sk = sk_nulls_head(&ilb->head);
2048 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2050 void *rc = listening_get_next(seq, NULL);
2052 while (rc && *pos) {
2053 rc = listening_get_next(seq, rc);
2059 static inline int empty_bucket(struct tcp_iter_state *st)
2061 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2062 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
2065 static void *established_get_first(struct seq_file *seq)
2067 struct tcp_iter_state *st = seq->private;
2068 struct net *net = seq_file_net(seq);
2071 for (st->bucket = 0; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2073 struct hlist_nulls_node *node;
2074 struct inet_timewait_sock *tw;
2075 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2077 /* Lockless fast path for the common case of empty buckets */
2078 if (empty_bucket(st))
2082 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2083 if (sk->sk_family != st->family ||
2084 !net_eq(sock_net(sk), net)) {
2090 st->state = TCP_SEQ_STATE_TIME_WAIT;
2091 inet_twsk_for_each(tw, node,
2092 &tcp_hashinfo.ehash[st->bucket].twchain) {
2093 if (tw->tw_family != st->family ||
2094 !net_eq(twsk_net(tw), net)) {
2100 spin_unlock_bh(lock);
2101 st->state = TCP_SEQ_STATE_ESTABLISHED;
2107 static void *established_get_next(struct seq_file *seq, void *cur)
2109 struct sock *sk = cur;
2110 struct inet_timewait_sock *tw;
2111 struct hlist_nulls_node *node;
2112 struct tcp_iter_state *st = seq->private;
2113 struct net *net = seq_file_net(seq);
2117 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2121 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2128 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2129 st->state = TCP_SEQ_STATE_ESTABLISHED;
2131 /* Look for next non empty bucket */
2132 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2135 if (st->bucket > tcp_hashinfo.ehash_mask)
2138 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2139 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
2141 sk = sk_nulls_next(sk);
2143 sk_nulls_for_each_from(sk, node) {
2144 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2148 st->state = TCP_SEQ_STATE_TIME_WAIT;
2149 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2157 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2159 void *rc = established_get_first(seq);
2162 rc = established_get_next(seq, rc);
2168 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2171 struct tcp_iter_state *st = seq->private;
2173 st->state = TCP_SEQ_STATE_LISTENING;
2174 rc = listening_get_idx(seq, &pos);
2177 st->state = TCP_SEQ_STATE_ESTABLISHED;
2178 rc = established_get_idx(seq, pos);
2184 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2186 struct tcp_iter_state *st = seq->private;
2187 st->state = TCP_SEQ_STATE_LISTENING;
2189 return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2192 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2195 struct tcp_iter_state *st;
2197 if (v == SEQ_START_TOKEN) {
2198 rc = tcp_get_idx(seq, 0);
2203 switch (st->state) {
2204 case TCP_SEQ_STATE_OPENREQ:
2205 case TCP_SEQ_STATE_LISTENING:
2206 rc = listening_get_next(seq, v);
2208 st->state = TCP_SEQ_STATE_ESTABLISHED;
2209 rc = established_get_first(seq);
2212 case TCP_SEQ_STATE_ESTABLISHED:
2213 case TCP_SEQ_STATE_TIME_WAIT:
2214 rc = established_get_next(seq, v);
2222 static void tcp_seq_stop(struct seq_file *seq, void *v)
2224 struct tcp_iter_state *st = seq->private;
2226 switch (st->state) {
2227 case TCP_SEQ_STATE_OPENREQ:
2229 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2230 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2232 case TCP_SEQ_STATE_LISTENING:
2233 if (v != SEQ_START_TOKEN)
2234 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2236 case TCP_SEQ_STATE_TIME_WAIT:
2237 case TCP_SEQ_STATE_ESTABLISHED:
2239 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2244 static int tcp_seq_open(struct inode *inode, struct file *file)
2246 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2247 struct tcp_iter_state *s;
2250 err = seq_open_net(inode, file, &afinfo->seq_ops,
2251 sizeof(struct tcp_iter_state));
2255 s = ((struct seq_file *)file->private_data)->private;
2256 s->family = afinfo->family;
2260 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2263 struct proc_dir_entry *p;
2265 afinfo->seq_fops.open = tcp_seq_open;
2266 afinfo->seq_fops.read = seq_read;
2267 afinfo->seq_fops.llseek = seq_lseek;
2268 afinfo->seq_fops.release = seq_release_net;
2270 afinfo->seq_ops.start = tcp_seq_start;
2271 afinfo->seq_ops.next = tcp_seq_next;
2272 afinfo->seq_ops.stop = tcp_seq_stop;
2274 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2275 &afinfo->seq_fops, afinfo);
2281 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2283 proc_net_remove(net, afinfo->name);
2286 static void get_openreq4(struct sock *sk, struct request_sock *req,
2287 struct seq_file *f, int i, int uid, int *len)
2289 const struct inet_request_sock *ireq = inet_rsk(req);
2290 int ttd = req->expires - jiffies;
2292 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2293 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p%n",
2296 ntohs(inet_sk(sk)->inet_sport),
2298 ntohs(ireq->rmt_port),
2300 0, 0, /* could print option size, but that is af dependent. */
2301 1, /* timers active (only the expire timer) */
2302 jiffies_to_clock_t(ttd),
2305 0, /* non standard timer */
2306 0, /* open_requests have no inode */
2307 atomic_read(&sk->sk_refcnt),
2312 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2315 unsigned long timer_expires;
2316 struct tcp_sock *tp = tcp_sk(sk);
2317 const struct inet_connection_sock *icsk = inet_csk(sk);
2318 struct inet_sock *inet = inet_sk(sk);
2319 __be32 dest = inet->inet_daddr;
2320 __be32 src = inet->inet_rcv_saddr;
2321 __u16 destp = ntohs(inet->inet_dport);
2322 __u16 srcp = ntohs(inet->inet_sport);
2325 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2327 timer_expires = icsk->icsk_timeout;
2328 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2330 timer_expires = icsk->icsk_timeout;
2331 } else if (timer_pending(&sk->sk_timer)) {
2333 timer_expires = sk->sk_timer.expires;
2336 timer_expires = jiffies;
2339 if (sk->sk_state == TCP_LISTEN)
2340 rx_queue = sk->sk_ack_backlog;
2343 * because we dont lock socket, we might find a transient negative value
2345 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2347 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2348 "%08X %5d %8d %lu %d %p %lu %lu %u %u %d%n",
2349 i, src, srcp, dest, destp, sk->sk_state,
2350 tp->write_seq - tp->snd_una,
2353 jiffies_to_clock_t(timer_expires - jiffies),
2354 icsk->icsk_retransmits,
2356 icsk->icsk_probes_out,
2358 atomic_read(&sk->sk_refcnt), sk,
2359 jiffies_to_clock_t(icsk->icsk_rto),
2360 jiffies_to_clock_t(icsk->icsk_ack.ato),
2361 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2363 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh,
2367 static void get_timewait4_sock(struct inet_timewait_sock *tw,
2368 struct seq_file *f, int i, int *len)
2372 int ttd = tw->tw_ttd - jiffies;
2377 dest = tw->tw_daddr;
2378 src = tw->tw_rcv_saddr;
2379 destp = ntohs(tw->tw_dport);
2380 srcp = ntohs(tw->tw_sport);
2382 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2383 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
2384 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2385 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2386 atomic_read(&tw->tw_refcnt), tw, len);
2391 static int tcp4_seq_show(struct seq_file *seq, void *v)
2393 struct tcp_iter_state *st;
2396 if (v == SEQ_START_TOKEN) {
2397 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2398 " sl local_address rem_address st tx_queue "
2399 "rx_queue tr tm->when retrnsmt uid timeout "
2405 switch (st->state) {
2406 case TCP_SEQ_STATE_LISTENING:
2407 case TCP_SEQ_STATE_ESTABLISHED:
2408 get_tcp4_sock(v, seq, st->num, &len);
2410 case TCP_SEQ_STATE_OPENREQ:
2411 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
2413 case TCP_SEQ_STATE_TIME_WAIT:
2414 get_timewait4_sock(v, seq, st->num, &len);
2417 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2422 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2426 .owner = THIS_MODULE,
2429 .show = tcp4_seq_show,
2433 static int __net_init tcp4_proc_init_net(struct net *net)
2435 return tcp_proc_register(net, &tcp4_seq_afinfo);
2438 static void __net_exit tcp4_proc_exit_net(struct net *net)
2440 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2443 static struct pernet_operations tcp4_net_ops = {
2444 .init = tcp4_proc_init_net,
2445 .exit = tcp4_proc_exit_net,
2448 int __init tcp4_proc_init(void)
2450 return register_pernet_subsys(&tcp4_net_ops);
2453 void tcp4_proc_exit(void)
2455 unregister_pernet_subsys(&tcp4_net_ops);
2457 #endif /* CONFIG_PROC_FS */
2459 struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2461 struct iphdr *iph = skb_gro_network_header(skb);
2463 switch (skb->ip_summed) {
2464 case CHECKSUM_COMPLETE:
2465 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
2467 skb->ip_summed = CHECKSUM_UNNECESSARY;
2473 NAPI_GRO_CB(skb)->flush = 1;
2477 return tcp_gro_receive(head, skb);
2479 EXPORT_SYMBOL(tcp4_gro_receive);
2481 int tcp4_gro_complete(struct sk_buff *skb)
2483 struct iphdr *iph = ip_hdr(skb);
2484 struct tcphdr *th = tcp_hdr(skb);
2486 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2487 iph->saddr, iph->daddr, 0);
2488 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2490 return tcp_gro_complete(skb);
2492 EXPORT_SYMBOL(tcp4_gro_complete);
2494 struct proto tcp_prot = {
2496 .owner = THIS_MODULE,
2498 .connect = tcp_v4_connect,
2499 .disconnect = tcp_disconnect,
2500 .accept = inet_csk_accept,
2502 .init = tcp_v4_init_sock,
2503 .destroy = tcp_v4_destroy_sock,
2504 .shutdown = tcp_shutdown,
2505 .setsockopt = tcp_setsockopt,
2506 .getsockopt = tcp_getsockopt,
2507 .recvmsg = tcp_recvmsg,
2508 .backlog_rcv = tcp_v4_do_rcv,
2510 .unhash = inet_unhash,
2511 .get_port = inet_csk_get_port,
2512 .enter_memory_pressure = tcp_enter_memory_pressure,
2513 .sockets_allocated = &tcp_sockets_allocated,
2514 .orphan_count = &tcp_orphan_count,
2515 .memory_allocated = &tcp_memory_allocated,
2516 .memory_pressure = &tcp_memory_pressure,
2517 .sysctl_mem = sysctl_tcp_mem,
2518 .sysctl_wmem = sysctl_tcp_wmem,
2519 .sysctl_rmem = sysctl_tcp_rmem,
2520 .max_header = MAX_TCP_HEADER,
2521 .obj_size = sizeof(struct tcp_sock),
2522 .slab_flags = SLAB_DESTROY_BY_RCU,
2523 .twsk_prot = &tcp_timewait_sock_ops,
2524 .rsk_prot = &tcp_request_sock_ops,
2525 .h.hashinfo = &tcp_hashinfo,
2526 #ifdef CONFIG_COMPAT
2527 .compat_setsockopt = compat_tcp_setsockopt,
2528 .compat_getsockopt = compat_tcp_getsockopt,
2533 static int __net_init tcp_sk_init(struct net *net)
2535 return inet_ctl_sock_create(&net->ipv4.tcp_sock,
2536 PF_INET, SOCK_RAW, IPPROTO_TCP, net);
2539 static void __net_exit tcp_sk_exit(struct net *net)
2541 inet_ctl_sock_destroy(net->ipv4.tcp_sock);
2544 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2546 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2549 static struct pernet_operations __net_initdata tcp_sk_ops = {
2550 .init = tcp_sk_init,
2551 .exit = tcp_sk_exit,
2552 .exit_batch = tcp_sk_exit_batch,
2555 void __init tcp_v4_init(void)
2557 inet_hashinfo_init(&tcp_hashinfo);
2558 if (register_pernet_subsys(&tcp_sk_ops))
2559 panic("Failed to create the TCP control socket.\n");
2562 EXPORT_SYMBOL(ipv4_specific);
2563 EXPORT_SYMBOL(tcp_hashinfo);
2564 EXPORT_SYMBOL(tcp_prot);
2565 EXPORT_SYMBOL(tcp_v4_conn_request);
2566 EXPORT_SYMBOL(tcp_v4_connect);
2567 EXPORT_SYMBOL(tcp_v4_do_rcv);
2568 EXPORT_SYMBOL(tcp_v4_remember_stamp);
2569 EXPORT_SYMBOL(tcp_v4_send_check);
2570 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
2572 #ifdef CONFIG_PROC_FS
2573 EXPORT_SYMBOL(tcp_proc_register);
2574 EXPORT_SYMBOL(tcp_proc_unregister);
2576 EXPORT_SYMBOL(sysctl_tcp_low_latency);