Merge tag 'lsk-v3.10-android-14.12'
[firefly-linux-kernel-4.4.55.git] / net / ipv4 / tcp_ipv4.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              Implementation of the Transmission Control Protocol(TCP).
7  *
8  *              IPv4 specific functions
9  *
10  *
11  *              code split from:
12  *              linux/ipv4/tcp.c
13  *              linux/ipv4/tcp_input.c
14  *              linux/ipv4/tcp_output.c
15  *
16  *              See tcp.c for author information
17  *
18  *      This program is free software; you can redistribute it and/or
19  *      modify it under the terms of the GNU General Public License
20  *      as published by the Free Software Foundation; either version
21  *      2 of the License, or (at your option) any later version.
22  */
23
24 /*
25  * Changes:
26  *              David S. Miller :       New socket lookup architecture.
27  *                                      This code is dedicated to John Dyson.
28  *              David S. Miller :       Change semantics of established hash,
29  *                                      half is devoted to TIME_WAIT sockets
30  *                                      and the rest go in the other half.
31  *              Andi Kleen :            Add support for syncookies and fixed
32  *                                      some bugs: ip options weren't passed to
33  *                                      the TCP layer, missed a check for an
34  *                                      ACK bit.
35  *              Andi Kleen :            Implemented fast path mtu discovery.
36  *                                      Fixed many serious bugs in the
37  *                                      request_sock handling and moved
38  *                                      most of it into the af independent code.
39  *                                      Added tail drop and some other bugfixes.
40  *                                      Added new listen semantics.
41  *              Mike McLagan    :       Routing by source
42  *      Juan Jose Ciarlante:            ip_dynaddr bits
43  *              Andi Kleen:             various fixes.
44  *      Vitaly E. Lavrov        :       Transparent proxy revived after year
45  *                                      coma.
46  *      Andi Kleen              :       Fix new listen.
47  *      Andi Kleen              :       Fix accept error reporting.
48  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
49  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
50  *                                      a single port at the same time.
51  */
52
53 #define pr_fmt(fmt) "TCP: " fmt
54
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
65
66 #include <net/net_namespace.h>
67 #include <net/icmp.h>
68 #include <net/inet_hashtables.h>
69 #include <net/tcp.h>
70 #include <net/transp_v6.h>
71 #include <net/ipv6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
74 #include <net/xfrm.h>
75 #include <net/netdma.h>
76 #include <net/secure_seq.h>
77 #include <net/tcp_memcontrol.h>
78
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
84
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
87
88 int sysctl_tcp_tw_reuse __read_mostly;
89 int sysctl_tcp_low_latency __read_mostly;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
91
92
93 #ifdef CONFIG_TCP_MD5SIG
94 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
95                                __be32 daddr, __be32 saddr, const struct tcphdr *th);
96 #endif
97
98 struct inet_hashinfo tcp_hashinfo;
99 EXPORT_SYMBOL(tcp_hashinfo);
100
101 static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
102 {
103         return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
104                                           ip_hdr(skb)->saddr,
105                                           tcp_hdr(skb)->dest,
106                                           tcp_hdr(skb)->source);
107 }
108
109 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
110 {
111         const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
112         struct tcp_sock *tp = tcp_sk(sk);
113
114         /* With PAWS, it is safe from the viewpoint
115            of data integrity. Even without PAWS it is safe provided sequence
116            spaces do not overlap i.e. at data rates <= 80Mbit/sec.
117
118            Actually, the idea is close to VJ's one, only timestamp cache is
119            held not per host, but per port pair and TW bucket is used as state
120            holder.
121
122            If TW bucket has been already destroyed we fall back to VJ's scheme
123            and use initial timestamp retrieved from peer table.
124          */
125         if (tcptw->tw_ts_recent_stamp &&
126             (twp == NULL || (sysctl_tcp_tw_reuse &&
127                              get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
128                 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
129                 if (tp->write_seq == 0)
130                         tp->write_seq = 1;
131                 tp->rx_opt.ts_recent       = tcptw->tw_ts_recent;
132                 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
133                 sock_hold(sktw);
134                 return 1;
135         }
136
137         return 0;
138 }
139 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
140
141 /* This will initiate an outgoing connection. */
142 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
143 {
144         struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
145         struct inet_sock *inet = inet_sk(sk);
146         struct tcp_sock *tp = tcp_sk(sk);
147         __be16 orig_sport, orig_dport;
148         __be32 daddr, nexthop;
149         struct flowi4 *fl4;
150         struct rtable *rt;
151         int err;
152         struct ip_options_rcu *inet_opt;
153
154         if (addr_len < sizeof(struct sockaddr_in))
155                 return -EINVAL;
156
157         if (usin->sin_family != AF_INET)
158                 return -EAFNOSUPPORT;
159
160         nexthop = daddr = usin->sin_addr.s_addr;
161         inet_opt = rcu_dereference_protected(inet->inet_opt,
162                                              sock_owned_by_user(sk));
163         if (inet_opt && inet_opt->opt.srr) {
164                 if (!daddr)
165                         return -EINVAL;
166                 nexthop = inet_opt->opt.faddr;
167         }
168
169         orig_sport = inet->inet_sport;
170         orig_dport = usin->sin_port;
171         fl4 = &inet->cork.fl.u.ip4;
172         rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
173                               RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
174                               IPPROTO_TCP,
175                               orig_sport, orig_dport, sk, true);
176         if (IS_ERR(rt)) {
177                 err = PTR_ERR(rt);
178                 if (err == -ENETUNREACH)
179                         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
180                 return err;
181         }
182
183         if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
184                 ip_rt_put(rt);
185                 return -ENETUNREACH;
186         }
187
188         if (!inet_opt || !inet_opt->opt.srr)
189                 daddr = fl4->daddr;
190
191         if (!inet->inet_saddr)
192                 inet->inet_saddr = fl4->saddr;
193         inet->inet_rcv_saddr = inet->inet_saddr;
194
195         if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
196                 /* Reset inherited state */
197                 tp->rx_opt.ts_recent       = 0;
198                 tp->rx_opt.ts_recent_stamp = 0;
199                 if (likely(!tp->repair))
200                         tp->write_seq      = 0;
201         }
202
203         if (tcp_death_row.sysctl_tw_recycle &&
204             !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
205                 tcp_fetch_timewait_stamp(sk, &rt->dst);
206
207         inet->inet_dport = usin->sin_port;
208         inet->inet_daddr = daddr;
209
210         inet_csk(sk)->icsk_ext_hdr_len = 0;
211         if (inet_opt)
212                 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
213
214         tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
215
216         /* Socket identity is still unknown (sport may be zero).
217          * However we set state to SYN-SENT and not releasing socket
218          * lock select source port, enter ourselves into the hash tables and
219          * complete initialization after this.
220          */
221         tcp_set_state(sk, TCP_SYN_SENT);
222         err = inet_hash_connect(&tcp_death_row, sk);
223         if (err)
224                 goto failure;
225
226         rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
227                                inet->inet_sport, inet->inet_dport, sk);
228         if (IS_ERR(rt)) {
229                 err = PTR_ERR(rt);
230                 rt = NULL;
231                 goto failure;
232         }
233         /* OK, now commit destination to socket.  */
234         sk->sk_gso_type = SKB_GSO_TCPV4;
235         sk_setup_caps(sk, &rt->dst);
236
237         if (!tp->write_seq && likely(!tp->repair))
238                 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
239                                                            inet->inet_daddr,
240                                                            inet->inet_sport,
241                                                            usin->sin_port);
242
243         inet->inet_id = tp->write_seq ^ jiffies;
244
245         err = tcp_connect(sk);
246
247         rt = NULL;
248         if (err)
249                 goto failure;
250
251         return 0;
252
253 failure:
254         /*
255          * This unhashes the socket and releases the local port,
256          * if necessary.
257          */
258         tcp_set_state(sk, TCP_CLOSE);
259         ip_rt_put(rt);
260         sk->sk_route_caps = 0;
261         inet->inet_dport = 0;
262         return err;
263 }
264 EXPORT_SYMBOL(tcp_v4_connect);
265
266 /*
267  * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
268  * It can be called through tcp_release_cb() if socket was owned by user
269  * at the time tcp_v4_err() was called to handle ICMP message.
270  */
271 void tcp_v4_mtu_reduced(struct sock *sk)
272 {
273         struct dst_entry *dst;
274         struct inet_sock *inet = inet_sk(sk);
275         u32 mtu = tcp_sk(sk)->mtu_info;
276
277         dst = inet_csk_update_pmtu(sk, mtu);
278         if (!dst)
279                 return;
280
281         /* Something is about to be wrong... Remember soft error
282          * for the case, if this connection will not able to recover.
283          */
284         if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
285                 sk->sk_err_soft = EMSGSIZE;
286
287         mtu = dst_mtu(dst);
288
289         if (inet->pmtudisc != IP_PMTUDISC_DONT &&
290             inet_csk(sk)->icsk_pmtu_cookie > mtu) {
291                 tcp_sync_mss(sk, mtu);
292
293                 /* Resend the TCP packet because it's
294                  * clear that the old packet has been
295                  * dropped. This is the new "fast" path mtu
296                  * discovery.
297                  */
298                 tcp_simple_retransmit(sk);
299         } /* else let the usual retransmit timer handle it */
300 }
301 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
302
303 static void do_redirect(struct sk_buff *skb, struct sock *sk)
304 {
305         struct dst_entry *dst = __sk_dst_check(sk, 0);
306
307         if (dst)
308                 dst->ops->redirect(dst, sk, skb);
309 }
310
311 /*
312  * This routine is called by the ICMP module when it gets some
313  * sort of error condition.  If err < 0 then the socket should
314  * be closed and the error returned to the user.  If err > 0
315  * it's just the icmp type << 8 | icmp code.  After adjustment
316  * header points to the first 8 bytes of the tcp header.  We need
317  * to find the appropriate port.
318  *
319  * The locking strategy used here is very "optimistic". When
320  * someone else accesses the socket the ICMP is just dropped
321  * and for some paths there is no check at all.
322  * A more general error queue to queue errors for later handling
323  * is probably better.
324  *
325  */
326
327 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
328 {
329         const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
330         struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
331         struct inet_connection_sock *icsk;
332         struct tcp_sock *tp;
333         struct inet_sock *inet;
334         const int type = icmp_hdr(icmp_skb)->type;
335         const int code = icmp_hdr(icmp_skb)->code;
336         struct sock *sk;
337         struct sk_buff *skb;
338         struct request_sock *req;
339         __u32 seq;
340         __u32 remaining;
341         int err;
342         struct net *net = dev_net(icmp_skb->dev);
343
344         if (icmp_skb->len < (iph->ihl << 2) + 8) {
345                 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
346                 return;
347         }
348
349         sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
350                         iph->saddr, th->source, inet_iif(icmp_skb));
351         if (!sk) {
352                 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
353                 return;
354         }
355         if (sk->sk_state == TCP_TIME_WAIT) {
356                 inet_twsk_put(inet_twsk(sk));
357                 return;
358         }
359
360         bh_lock_sock(sk);
361         /* If too many ICMPs get dropped on busy
362          * servers this needs to be solved differently.
363          * We do take care of PMTU discovery (RFC1191) special case :
364          * we can receive locally generated ICMP messages while socket is held.
365          */
366         if (sock_owned_by_user(sk)) {
367                 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
368                         NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
369         }
370         if (sk->sk_state == TCP_CLOSE)
371                 goto out;
372
373         if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
374                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
375                 goto out;
376         }
377
378         icsk = inet_csk(sk);
379         tp = tcp_sk(sk);
380         req = tp->fastopen_rsk;
381         seq = ntohl(th->seq);
382         if (sk->sk_state != TCP_LISTEN &&
383             !between(seq, tp->snd_una, tp->snd_nxt) &&
384             (req == NULL || seq != tcp_rsk(req)->snt_isn)) {
385                 /* For a Fast Open socket, allow seq to be snt_isn. */
386                 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
387                 goto out;
388         }
389
390         switch (type) {
391         case ICMP_REDIRECT:
392                 do_redirect(icmp_skb, sk);
393                 goto out;
394         case ICMP_SOURCE_QUENCH:
395                 /* Just silently ignore these. */
396                 goto out;
397         case ICMP_PARAMETERPROB:
398                 err = EPROTO;
399                 break;
400         case ICMP_DEST_UNREACH:
401                 if (code > NR_ICMP_UNREACH)
402                         goto out;
403
404                 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
405                         /* We are not interested in TCP_LISTEN and open_requests
406                          * (SYN-ACKs send out by Linux are always <576bytes so
407                          * they should go through unfragmented).
408                          */
409                         if (sk->sk_state == TCP_LISTEN)
410                                 goto out;
411
412                         tp->mtu_info = info;
413                         if (!sock_owned_by_user(sk)) {
414                                 tcp_v4_mtu_reduced(sk);
415                         } else {
416                                 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
417                                         sock_hold(sk);
418                         }
419                         goto out;
420                 }
421
422                 err = icmp_err_convert[code].errno;
423                 /* check if icmp_skb allows revert of backoff
424                  * (see draft-zimmermann-tcp-lcd) */
425                 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
426                         break;
427                 if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
428                     !icsk->icsk_backoff)
429                         break;
430
431                 /* XXX (TFO) - revisit the following logic for TFO */
432
433                 if (sock_owned_by_user(sk))
434                         break;
435
436                 icsk->icsk_backoff--;
437                 inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
438                         TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
439                 tcp_bound_rto(sk);
440
441                 skb = tcp_write_queue_head(sk);
442                 BUG_ON(!skb);
443
444                 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
445                                 tcp_time_stamp - TCP_SKB_CB(skb)->when);
446
447                 if (remaining) {
448                         inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
449                                                   remaining, TCP_RTO_MAX);
450                 } else {
451                         /* RTO revert clocked out retransmission.
452                          * Will retransmit now */
453                         tcp_retransmit_timer(sk);
454                 }
455
456                 break;
457         case ICMP_TIME_EXCEEDED:
458                 err = EHOSTUNREACH;
459                 break;
460         default:
461                 goto out;
462         }
463
464         /* XXX (TFO) - if it's a TFO socket and has been accepted, rather
465          * than following the TCP_SYN_RECV case and closing the socket,
466          * we ignore the ICMP error and keep trying like a fully established
467          * socket. Is this the right thing to do?
468          */
469         if (req && req->sk == NULL)
470                 goto out;
471
472         switch (sk->sk_state) {
473                 struct request_sock *req, **prev;
474         case TCP_LISTEN:
475                 if (sock_owned_by_user(sk))
476                         goto out;
477
478                 req = inet_csk_search_req(sk, &prev, th->dest,
479                                           iph->daddr, iph->saddr);
480                 if (!req)
481                         goto out;
482
483                 /* ICMPs are not backlogged, hence we cannot get
484                    an established socket here.
485                  */
486                 WARN_ON(req->sk);
487
488                 if (seq != tcp_rsk(req)->snt_isn) {
489                         NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
490                         goto out;
491                 }
492
493                 /*
494                  * Still in SYN_RECV, just remove it silently.
495                  * There is no good way to pass the error to the newly
496                  * created socket, and POSIX does not want network
497                  * errors returned from accept().
498                  */
499                 inet_csk_reqsk_queue_drop(sk, req, prev);
500                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
501                 goto out;
502
503         case TCP_SYN_SENT:
504         case TCP_SYN_RECV:  /* Cannot happen.
505                                It can f.e. if SYNs crossed,
506                                or Fast Open.
507                              */
508                 if (!sock_owned_by_user(sk)) {
509                         sk->sk_err = err;
510
511                         sk->sk_error_report(sk);
512
513                         tcp_done(sk);
514                 } else {
515                         sk->sk_err_soft = err;
516                 }
517                 goto out;
518         }
519
520         /* If we've already connected we will keep trying
521          * until we time out, or the user gives up.
522          *
523          * rfc1122 4.2.3.9 allows to consider as hard errors
524          * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
525          * but it is obsoleted by pmtu discovery).
526          *
527          * Note, that in modern internet, where routing is unreliable
528          * and in each dark corner broken firewalls sit, sending random
529          * errors ordered by their masters even this two messages finally lose
530          * their original sense (even Linux sends invalid PORT_UNREACHs)
531          *
532          * Now we are in compliance with RFCs.
533          *                                                      --ANK (980905)
534          */
535
536         inet = inet_sk(sk);
537         if (!sock_owned_by_user(sk) && inet->recverr) {
538                 sk->sk_err = err;
539                 sk->sk_error_report(sk);
540         } else  { /* Only an error on timeout */
541                 sk->sk_err_soft = err;
542         }
543
544 out:
545         bh_unlock_sock(sk);
546         sock_put(sk);
547 }
548
549 static void __tcp_v4_send_check(struct sk_buff *skb,
550                                 __be32 saddr, __be32 daddr)
551 {
552         struct tcphdr *th = tcp_hdr(skb);
553
554         if (skb->ip_summed == CHECKSUM_PARTIAL) {
555                 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
556                 skb->csum_start = skb_transport_header(skb) - skb->head;
557                 skb->csum_offset = offsetof(struct tcphdr, check);
558         } else {
559                 th->check = tcp_v4_check(skb->len, saddr, daddr,
560                                          csum_partial(th,
561                                                       th->doff << 2,
562                                                       skb->csum));
563         }
564 }
565
566 /* This routine computes an IPv4 TCP checksum. */
567 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
568 {
569         const struct inet_sock *inet = inet_sk(sk);
570
571         __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
572 }
573 EXPORT_SYMBOL(tcp_v4_send_check);
574
575 int tcp_v4_gso_send_check(struct sk_buff *skb)
576 {
577         const struct iphdr *iph;
578         struct tcphdr *th;
579
580         if (!pskb_may_pull(skb, sizeof(*th)))
581                 return -EINVAL;
582
583         iph = ip_hdr(skb);
584         th = tcp_hdr(skb);
585
586         th->check = 0;
587         skb->ip_summed = CHECKSUM_PARTIAL;
588         __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
589         return 0;
590 }
591
592 /*
593  *      This routine will send an RST to the other tcp.
594  *
595  *      Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
596  *                    for reset.
597  *      Answer: if a packet caused RST, it is not for a socket
598  *              existing in our system, if it is matched to a socket,
599  *              it is just duplicate segment or bug in other side's TCP.
600  *              So that we build reply only basing on parameters
601  *              arrived with segment.
602  *      Exception: precedence violation. We do not implement it in any case.
603  */
604
605 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
606 {
607         const struct tcphdr *th = tcp_hdr(skb);
608         struct {
609                 struct tcphdr th;
610 #ifdef CONFIG_TCP_MD5SIG
611                 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
612 #endif
613         } rep;
614         struct ip_reply_arg arg;
615 #ifdef CONFIG_TCP_MD5SIG
616         struct tcp_md5sig_key *key;
617         const __u8 *hash_location = NULL;
618         unsigned char newhash[16];
619         int genhash;
620         struct sock *sk1 = NULL;
621 #endif
622         struct net *net;
623
624         /* Never send a reset in response to a reset. */
625         if (th->rst)
626                 return;
627
628         if (skb_rtable(skb)->rt_type != RTN_LOCAL)
629                 return;
630
631         /* Swap the send and the receive. */
632         memset(&rep, 0, sizeof(rep));
633         rep.th.dest   = th->source;
634         rep.th.source = th->dest;
635         rep.th.doff   = sizeof(struct tcphdr) / 4;
636         rep.th.rst    = 1;
637
638         if (th->ack) {
639                 rep.th.seq = th->ack_seq;
640         } else {
641                 rep.th.ack = 1;
642                 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
643                                        skb->len - (th->doff << 2));
644         }
645
646         memset(&arg, 0, sizeof(arg));
647         arg.iov[0].iov_base = (unsigned char *)&rep;
648         arg.iov[0].iov_len  = sizeof(rep.th);
649
650 #ifdef CONFIG_TCP_MD5SIG
651         hash_location = tcp_parse_md5sig_option(th);
652         if (!sk && hash_location) {
653                 /*
654                  * active side is lost. Try to find listening socket through
655                  * source port, and then find md5 key through listening socket.
656                  * we are not loose security here:
657                  * Incoming packet is checked with md5 hash with finding key,
658                  * no RST generated if md5 hash doesn't match.
659                  */
660                 sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
661                                              &tcp_hashinfo, ip_hdr(skb)->saddr,
662                                              th->source, ip_hdr(skb)->daddr,
663                                              ntohs(th->source), inet_iif(skb));
664                 /* don't send rst if it can't find key */
665                 if (!sk1)
666                         return;
667                 rcu_read_lock();
668                 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
669                                         &ip_hdr(skb)->saddr, AF_INET);
670                 if (!key)
671                         goto release_sk1;
672
673                 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
674                 if (genhash || memcmp(hash_location, newhash, 16) != 0)
675                         goto release_sk1;
676         } else {
677                 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
678                                              &ip_hdr(skb)->saddr,
679                                              AF_INET) : NULL;
680         }
681
682         if (key) {
683                 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
684                                    (TCPOPT_NOP << 16) |
685                                    (TCPOPT_MD5SIG << 8) |
686                                    TCPOLEN_MD5SIG);
687                 /* Update length and the length the header thinks exists */
688                 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
689                 rep.th.doff = arg.iov[0].iov_len / 4;
690
691                 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
692                                      key, ip_hdr(skb)->saddr,
693                                      ip_hdr(skb)->daddr, &rep.th);
694         }
695 #endif
696         arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
697                                       ip_hdr(skb)->saddr, /* XXX */
698                                       arg.iov[0].iov_len, IPPROTO_TCP, 0);
699         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
700         arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
701         /* When socket is gone, all binding information is lost.
702          * routing might fail in this case. No choice here, if we choose to force
703          * input interface, we will misroute in case of asymmetric route.
704          */
705         if (sk)
706                 arg.bound_dev_if = sk->sk_bound_dev_if;
707
708         net = dev_net(skb_dst(skb)->dev);
709         arg.tos = ip_hdr(skb)->tos;
710         ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
711                               ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
712
713         TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
714         TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
715
716 #ifdef CONFIG_TCP_MD5SIG
717 release_sk1:
718         if (sk1) {
719                 rcu_read_unlock();
720                 sock_put(sk1);
721         }
722 #endif
723 }
724
725 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
726    outside socket context is ugly, certainly. What can I do?
727  */
728
729 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
730                             u32 win, u32 tsval, u32 tsecr, int oif,
731                             struct tcp_md5sig_key *key,
732                             int reply_flags, u8 tos)
733 {
734         const struct tcphdr *th = tcp_hdr(skb);
735         struct {
736                 struct tcphdr th;
737                 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
738 #ifdef CONFIG_TCP_MD5SIG
739                            + (TCPOLEN_MD5SIG_ALIGNED >> 2)
740 #endif
741                         ];
742         } rep;
743         struct ip_reply_arg arg;
744         struct net *net = dev_net(skb_dst(skb)->dev);
745
746         memset(&rep.th, 0, sizeof(struct tcphdr));
747         memset(&arg, 0, sizeof(arg));
748
749         arg.iov[0].iov_base = (unsigned char *)&rep;
750         arg.iov[0].iov_len  = sizeof(rep.th);
751         if (tsecr) {
752                 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
753                                    (TCPOPT_TIMESTAMP << 8) |
754                                    TCPOLEN_TIMESTAMP);
755                 rep.opt[1] = htonl(tsval);
756                 rep.opt[2] = htonl(tsecr);
757                 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
758         }
759
760         /* Swap the send and the receive. */
761         rep.th.dest    = th->source;
762         rep.th.source  = th->dest;
763         rep.th.doff    = arg.iov[0].iov_len / 4;
764         rep.th.seq     = htonl(seq);
765         rep.th.ack_seq = htonl(ack);
766         rep.th.ack     = 1;
767         rep.th.window  = htons(win);
768
769 #ifdef CONFIG_TCP_MD5SIG
770         if (key) {
771                 int offset = (tsecr) ? 3 : 0;
772
773                 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
774                                           (TCPOPT_NOP << 16) |
775                                           (TCPOPT_MD5SIG << 8) |
776                                           TCPOLEN_MD5SIG);
777                 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
778                 rep.th.doff = arg.iov[0].iov_len/4;
779
780                 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
781                                     key, ip_hdr(skb)->saddr,
782                                     ip_hdr(skb)->daddr, &rep.th);
783         }
784 #endif
785         arg.flags = reply_flags;
786         arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
787                                       ip_hdr(skb)->saddr, /* XXX */
788                                       arg.iov[0].iov_len, IPPROTO_TCP, 0);
789         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
790         if (oif)
791                 arg.bound_dev_if = oif;
792         arg.tos = tos;
793         ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
794                               ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
795
796         TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
797 }
798
799 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
800 {
801         struct inet_timewait_sock *tw = inet_twsk(sk);
802         struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
803
804         tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
805                         tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
806                         tcp_time_stamp + tcptw->tw_ts_offset,
807                         tcptw->tw_ts_recent,
808                         tw->tw_bound_dev_if,
809                         tcp_twsk_md5_key(tcptw),
810                         tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
811                         tw->tw_tos
812                         );
813
814         inet_twsk_put(tw);
815 }
816
817 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
818                                   struct request_sock *req)
819 {
820         /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
821          * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
822          */
823         tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
824                         tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
825                         tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
826                         tcp_time_stamp,
827                         req->ts_recent,
828                         0,
829                         tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
830                                           AF_INET),
831                         inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
832                         ip_hdr(skb)->tos);
833 }
834
835 /*
836  *      Send a SYN-ACK after having received a SYN.
837  *      This still operates on a request_sock only, not on a big
838  *      socket.
839  */
840 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
841                               struct request_sock *req,
842                               u16 queue_mapping,
843                               bool nocache)
844 {
845         const struct inet_request_sock *ireq = inet_rsk(req);
846         struct flowi4 fl4;
847         int err = -1;
848         struct sk_buff * skb;
849
850         /* First, grab a route. */
851         if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
852                 return -1;
853
854         skb = tcp_make_synack(sk, dst, req, NULL);
855
856         if (skb) {
857                 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
858
859                 skb_set_queue_mapping(skb, queue_mapping);
860                 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
861                                             ireq->rmt_addr,
862                                             ireq->opt);
863                 err = net_xmit_eval(err);
864                 if (!tcp_rsk(req)->snt_synack && !err)
865                         tcp_rsk(req)->snt_synack = tcp_time_stamp;
866         }
867
868         return err;
869 }
870
871 static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req)
872 {
873         int res = tcp_v4_send_synack(sk, NULL, req, 0, false);
874
875         if (!res)
876                 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
877         return res;
878 }
879
880 /*
881  *      IPv4 request_sock destructor.
882  */
883 static void tcp_v4_reqsk_destructor(struct request_sock *req)
884 {
885         kfree(inet_rsk(req)->opt);
886 }
887
888 /*
889  * Return true if a syncookie should be sent
890  */
891 bool tcp_syn_flood_action(struct sock *sk,
892                          const struct sk_buff *skb,
893                          const char *proto)
894 {
895         const char *msg = "Dropping request";
896         bool want_cookie = false;
897         struct listen_sock *lopt;
898
899
900
901 #ifdef CONFIG_SYN_COOKIES
902         if (sysctl_tcp_syncookies) {
903                 msg = "Sending cookies";
904                 want_cookie = true;
905                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
906         } else
907 #endif
908                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
909
910         lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
911         if (!lopt->synflood_warned) {
912                 lopt->synflood_warned = 1;
913                 pr_info("%s: Possible SYN flooding on port %d. %s.  Check SNMP counters.\n",
914                         proto, ntohs(tcp_hdr(skb)->dest), msg);
915         }
916         return want_cookie;
917 }
918 EXPORT_SYMBOL(tcp_syn_flood_action);
919
920 /*
921  * Save and compile IPv4 options into the request_sock if needed.
922  */
923 static struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
924 {
925         const struct ip_options *opt = &(IPCB(skb)->opt);
926         struct ip_options_rcu *dopt = NULL;
927
928         if (opt && opt->optlen) {
929                 int opt_size = sizeof(*dopt) + opt->optlen;
930
931                 dopt = kmalloc(opt_size, GFP_ATOMIC);
932                 if (dopt) {
933                         if (ip_options_echo(&dopt->opt, skb)) {
934                                 kfree(dopt);
935                                 dopt = NULL;
936                         }
937                 }
938         }
939         return dopt;
940 }
941
942 #ifdef CONFIG_TCP_MD5SIG
943 /*
944  * RFC2385 MD5 checksumming requires a mapping of
945  * IP address->MD5 Key.
946  * We need to maintain these in the sk structure.
947  */
948
949 /* Find the Key structure for an address.  */
950 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
951                                          const union tcp_md5_addr *addr,
952                                          int family)
953 {
954         struct tcp_sock *tp = tcp_sk(sk);
955         struct tcp_md5sig_key *key;
956         unsigned int size = sizeof(struct in_addr);
957         struct tcp_md5sig_info *md5sig;
958
959         /* caller either holds rcu_read_lock() or socket lock */
960         md5sig = rcu_dereference_check(tp->md5sig_info,
961                                        sock_owned_by_user(sk) ||
962                                        lockdep_is_held(&sk->sk_lock.slock));
963         if (!md5sig)
964                 return NULL;
965 #if IS_ENABLED(CONFIG_IPV6)
966         if (family == AF_INET6)
967                 size = sizeof(struct in6_addr);
968 #endif
969         hlist_for_each_entry_rcu(key, &md5sig->head, node) {
970                 if (key->family != family)
971                         continue;
972                 if (!memcmp(&key->addr, addr, size))
973                         return key;
974         }
975         return NULL;
976 }
977 EXPORT_SYMBOL(tcp_md5_do_lookup);
978
979 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
980                                          struct sock *addr_sk)
981 {
982         union tcp_md5_addr *addr;
983
984         addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
985         return tcp_md5_do_lookup(sk, addr, AF_INET);
986 }
987 EXPORT_SYMBOL(tcp_v4_md5_lookup);
988
989 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
990                                                       struct request_sock *req)
991 {
992         union tcp_md5_addr *addr;
993
994         addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr;
995         return tcp_md5_do_lookup(sk, addr, AF_INET);
996 }
997
998 /* This can be called on a newly created socket, from other files */
999 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1000                    int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
1001 {
1002         /* Add Key to the list */
1003         struct tcp_md5sig_key *key;
1004         struct tcp_sock *tp = tcp_sk(sk);
1005         struct tcp_md5sig_info *md5sig;
1006
1007         key = tcp_md5_do_lookup(sk, addr, family);
1008         if (key) {
1009                 /* Pre-existing entry - just update that one. */
1010                 memcpy(key->key, newkey, newkeylen);
1011                 key->keylen = newkeylen;
1012                 return 0;
1013         }
1014
1015         md5sig = rcu_dereference_protected(tp->md5sig_info,
1016                                            sock_owned_by_user(sk));
1017         if (!md5sig) {
1018                 md5sig = kmalloc(sizeof(*md5sig), gfp);
1019                 if (!md5sig)
1020                         return -ENOMEM;
1021
1022                 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1023                 INIT_HLIST_HEAD(&md5sig->head);
1024                 rcu_assign_pointer(tp->md5sig_info, md5sig);
1025         }
1026
1027         key = sock_kmalloc(sk, sizeof(*key), gfp);
1028         if (!key)
1029                 return -ENOMEM;
1030         if (hlist_empty(&md5sig->head) && !tcp_alloc_md5sig_pool(sk)) {
1031                 sock_kfree_s(sk, key, sizeof(*key));
1032                 return -ENOMEM;
1033         }
1034
1035         memcpy(key->key, newkey, newkeylen);
1036         key->keylen = newkeylen;
1037         key->family = family;
1038         memcpy(&key->addr, addr,
1039                (family == AF_INET6) ? sizeof(struct in6_addr) :
1040                                       sizeof(struct in_addr));
1041         hlist_add_head_rcu(&key->node, &md5sig->head);
1042         return 0;
1043 }
1044 EXPORT_SYMBOL(tcp_md5_do_add);
1045
1046 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
1047 {
1048         struct tcp_sock *tp = tcp_sk(sk);
1049         struct tcp_md5sig_key *key;
1050         struct tcp_md5sig_info *md5sig;
1051
1052         key = tcp_md5_do_lookup(sk, addr, family);
1053         if (!key)
1054                 return -ENOENT;
1055         hlist_del_rcu(&key->node);
1056         atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1057         kfree_rcu(key, rcu);
1058         md5sig = rcu_dereference_protected(tp->md5sig_info,
1059                                            sock_owned_by_user(sk));
1060         if (hlist_empty(&md5sig->head))
1061                 tcp_free_md5sig_pool();
1062         return 0;
1063 }
1064 EXPORT_SYMBOL(tcp_md5_do_del);
1065
1066 static void tcp_clear_md5_list(struct sock *sk)
1067 {
1068         struct tcp_sock *tp = tcp_sk(sk);
1069         struct tcp_md5sig_key *key;
1070         struct hlist_node *n;
1071         struct tcp_md5sig_info *md5sig;
1072
1073         md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1074
1075         if (!hlist_empty(&md5sig->head))
1076                 tcp_free_md5sig_pool();
1077         hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1078                 hlist_del_rcu(&key->node);
1079                 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1080                 kfree_rcu(key, rcu);
1081         }
1082 }
1083
1084 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1085                                  int optlen)
1086 {
1087         struct tcp_md5sig cmd;
1088         struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1089
1090         if (optlen < sizeof(cmd))
1091                 return -EINVAL;
1092
1093         if (copy_from_user(&cmd, optval, sizeof(cmd)))
1094                 return -EFAULT;
1095
1096         if (sin->sin_family != AF_INET)
1097                 return -EINVAL;
1098
1099         if (!cmd.tcpm_key || !cmd.tcpm_keylen)
1100                 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1101                                       AF_INET);
1102
1103         if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1104                 return -EINVAL;
1105
1106         return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1107                               AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1108                               GFP_KERNEL);
1109 }
1110
1111 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1112                                         __be32 daddr, __be32 saddr, int nbytes)
1113 {
1114         struct tcp4_pseudohdr *bp;
1115         struct scatterlist sg;
1116
1117         bp = &hp->md5_blk.ip4;
1118
1119         /*
1120          * 1. the TCP pseudo-header (in the order: source IP address,
1121          * destination IP address, zero-padded protocol number, and
1122          * segment length)
1123          */
1124         bp->saddr = saddr;
1125         bp->daddr = daddr;
1126         bp->pad = 0;
1127         bp->protocol = IPPROTO_TCP;
1128         bp->len = cpu_to_be16(nbytes);
1129
1130         sg_init_one(&sg, bp, sizeof(*bp));
1131         return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1132 }
1133
1134 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1135                                __be32 daddr, __be32 saddr, const struct tcphdr *th)
1136 {
1137         struct tcp_md5sig_pool *hp;
1138         struct hash_desc *desc;
1139
1140         hp = tcp_get_md5sig_pool();
1141         if (!hp)
1142                 goto clear_hash_noput;
1143         desc = &hp->md5_desc;
1144
1145         if (crypto_hash_init(desc))
1146                 goto clear_hash;
1147         if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1148                 goto clear_hash;
1149         if (tcp_md5_hash_header(hp, th))
1150                 goto clear_hash;
1151         if (tcp_md5_hash_key(hp, key))
1152                 goto clear_hash;
1153         if (crypto_hash_final(desc, md5_hash))
1154                 goto clear_hash;
1155
1156         tcp_put_md5sig_pool();
1157         return 0;
1158
1159 clear_hash:
1160         tcp_put_md5sig_pool();
1161 clear_hash_noput:
1162         memset(md5_hash, 0, 16);
1163         return 1;
1164 }
1165
1166 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1167                         const struct sock *sk, const struct request_sock *req,
1168                         const struct sk_buff *skb)
1169 {
1170         struct tcp_md5sig_pool *hp;
1171         struct hash_desc *desc;
1172         const struct tcphdr *th = tcp_hdr(skb);
1173         __be32 saddr, daddr;
1174
1175         if (sk) {
1176                 saddr = inet_sk(sk)->inet_saddr;
1177                 daddr = inet_sk(sk)->inet_daddr;
1178         } else if (req) {
1179                 saddr = inet_rsk(req)->loc_addr;
1180                 daddr = inet_rsk(req)->rmt_addr;
1181         } else {
1182                 const struct iphdr *iph = ip_hdr(skb);
1183                 saddr = iph->saddr;
1184                 daddr = iph->daddr;
1185         }
1186
1187         hp = tcp_get_md5sig_pool();
1188         if (!hp)
1189                 goto clear_hash_noput;
1190         desc = &hp->md5_desc;
1191
1192         if (crypto_hash_init(desc))
1193                 goto clear_hash;
1194
1195         if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1196                 goto clear_hash;
1197         if (tcp_md5_hash_header(hp, th))
1198                 goto clear_hash;
1199         if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1200                 goto clear_hash;
1201         if (tcp_md5_hash_key(hp, key))
1202                 goto clear_hash;
1203         if (crypto_hash_final(desc, md5_hash))
1204                 goto clear_hash;
1205
1206         tcp_put_md5sig_pool();
1207         return 0;
1208
1209 clear_hash:
1210         tcp_put_md5sig_pool();
1211 clear_hash_noput:
1212         memset(md5_hash, 0, 16);
1213         return 1;
1214 }
1215 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1216
1217 static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1218 {
1219         /*
1220          * This gets called for each TCP segment that arrives
1221          * so we want to be efficient.
1222          * We have 3 drop cases:
1223          * o No MD5 hash and one expected.
1224          * o MD5 hash and we're not expecting one.
1225          * o MD5 hash and its wrong.
1226          */
1227         const __u8 *hash_location = NULL;
1228         struct tcp_md5sig_key *hash_expected;
1229         const struct iphdr *iph = ip_hdr(skb);
1230         const struct tcphdr *th = tcp_hdr(skb);
1231         int genhash;
1232         unsigned char newhash[16];
1233
1234         hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1235                                           AF_INET);
1236         hash_location = tcp_parse_md5sig_option(th);
1237
1238         /* We've parsed the options - do we have a hash? */
1239         if (!hash_expected && !hash_location)
1240                 return false;
1241
1242         if (hash_expected && !hash_location) {
1243                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1244                 return true;
1245         }
1246
1247         if (!hash_expected && hash_location) {
1248                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1249                 return true;
1250         }
1251
1252         /* Okay, so this is hash_expected and hash_location -
1253          * so we need to calculate the checksum.
1254          */
1255         genhash = tcp_v4_md5_hash_skb(newhash,
1256                                       hash_expected,
1257                                       NULL, NULL, skb);
1258
1259         if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1260                 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1261                                      &iph->saddr, ntohs(th->source),
1262                                      &iph->daddr, ntohs(th->dest),
1263                                      genhash ? " tcp_v4_calc_md5_hash failed"
1264                                      : "");
1265                 return true;
1266         }
1267         return false;
1268 }
1269
1270 #endif
1271
1272 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1273         .family         =       PF_INET,
1274         .obj_size       =       sizeof(struct tcp_request_sock),
1275         .rtx_syn_ack    =       tcp_v4_rtx_synack,
1276         .send_ack       =       tcp_v4_reqsk_send_ack,
1277         .destructor     =       tcp_v4_reqsk_destructor,
1278         .send_reset     =       tcp_v4_send_reset,
1279         .syn_ack_timeout =      tcp_syn_ack_timeout,
1280 };
1281
1282 #ifdef CONFIG_TCP_MD5SIG
1283 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1284         .md5_lookup     =       tcp_v4_reqsk_md5_lookup,
1285         .calc_md5_hash  =       tcp_v4_md5_hash_skb,
1286 };
1287 #endif
1288
1289 static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb,
1290                                struct request_sock *req,
1291                                struct tcp_fastopen_cookie *foc,
1292                                struct tcp_fastopen_cookie *valid_foc)
1293 {
1294         bool skip_cookie = false;
1295         struct fastopen_queue *fastopenq;
1296
1297         if (likely(!fastopen_cookie_present(foc))) {
1298                 /* See include/net/tcp.h for the meaning of these knobs */
1299                 if ((sysctl_tcp_fastopen & TFO_SERVER_ALWAYS) ||
1300                     ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD) &&
1301                     (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1)))
1302                         skip_cookie = true; /* no cookie to validate */
1303                 else
1304                         return false;
1305         }
1306         fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
1307         /* A FO option is present; bump the counter. */
1308         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVE);
1309
1310         /* Make sure the listener has enabled fastopen, and we don't
1311          * exceed the max # of pending TFO requests allowed before trying
1312          * to validating the cookie in order to avoid burning CPU cycles
1313          * unnecessarily.
1314          *
1315          * XXX (TFO) - The implication of checking the max_qlen before
1316          * processing a cookie request is that clients can't differentiate
1317          * between qlen overflow causing Fast Open to be disabled
1318          * temporarily vs a server not supporting Fast Open at all.
1319          */
1320         if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) == 0 ||
1321             fastopenq == NULL || fastopenq->max_qlen == 0)
1322                 return false;
1323
1324         if (fastopenq->qlen >= fastopenq->max_qlen) {
1325                 struct request_sock *req1;
1326                 spin_lock(&fastopenq->lock);
1327                 req1 = fastopenq->rskq_rst_head;
1328                 if ((req1 == NULL) || time_after(req1->expires, jiffies)) {
1329                         spin_unlock(&fastopenq->lock);
1330                         NET_INC_STATS_BH(sock_net(sk),
1331                             LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
1332                         /* Avoid bumping LINUX_MIB_TCPFASTOPENPASSIVEFAIL*/
1333                         foc->len = -1;
1334                         return false;
1335                 }
1336                 fastopenq->rskq_rst_head = req1->dl_next;
1337                 fastopenq->qlen--;
1338                 spin_unlock(&fastopenq->lock);
1339                 reqsk_free(req1);
1340         }
1341         if (skip_cookie) {
1342                 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1343                 return true;
1344         }
1345         if (foc->len == TCP_FASTOPEN_COOKIE_SIZE) {
1346                 if ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_CHKED) == 0) {
1347                         tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1348                         if ((valid_foc->len != TCP_FASTOPEN_COOKIE_SIZE) ||
1349                             memcmp(&foc->val[0], &valid_foc->val[0],
1350                             TCP_FASTOPEN_COOKIE_SIZE) != 0)
1351                                 return false;
1352                         valid_foc->len = -1;
1353                 }
1354                 /* Acknowledge the data received from the peer. */
1355                 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1356                 return true;
1357         } else if (foc->len == 0) { /* Client requesting a cookie */
1358                 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1359                 NET_INC_STATS_BH(sock_net(sk),
1360                     LINUX_MIB_TCPFASTOPENCOOKIEREQD);
1361         } else {
1362                 /* Client sent a cookie with wrong size. Treat it
1363                  * the same as invalid and return a valid one.
1364                  */
1365                 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1366         }
1367         return false;
1368 }
1369
1370 static int tcp_v4_conn_req_fastopen(struct sock *sk,
1371                                     struct sk_buff *skb,
1372                                     struct sk_buff *skb_synack,
1373                                     struct request_sock *req)
1374 {
1375         struct tcp_sock *tp = tcp_sk(sk);
1376         struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
1377         const struct inet_request_sock *ireq = inet_rsk(req);
1378         struct sock *child;
1379         int err;
1380
1381         req->num_retrans = 0;
1382         req->num_timeout = 0;
1383         req->sk = NULL;
1384
1385         child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
1386         if (child == NULL) {
1387                 NET_INC_STATS_BH(sock_net(sk),
1388                                  LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1389                 kfree_skb(skb_synack);
1390                 return -1;
1391         }
1392         err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
1393                                     ireq->rmt_addr, ireq->opt);
1394         err = net_xmit_eval(err);
1395         if (!err)
1396                 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1397         /* XXX (TFO) - is it ok to ignore error and continue? */
1398
1399         spin_lock(&queue->fastopenq->lock);
1400         queue->fastopenq->qlen++;
1401         spin_unlock(&queue->fastopenq->lock);
1402
1403         /* Initialize the child socket. Have to fix some values to take
1404          * into account the child is a Fast Open socket and is created
1405          * only out of the bits carried in the SYN packet.
1406          */
1407         tp = tcp_sk(child);
1408
1409         tp->fastopen_rsk = req;
1410         /* Do a hold on the listner sk so that if the listener is being
1411          * closed, the child that has been accepted can live on and still
1412          * access listen_lock.
1413          */
1414         sock_hold(sk);
1415         tcp_rsk(req)->listener = sk;
1416
1417         /* RFC1323: The window in SYN & SYN/ACK segments is never
1418          * scaled. So correct it appropriately.
1419          */
1420         tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
1421
1422         /* Activate the retrans timer so that SYNACK can be retransmitted.
1423          * The request socket is not added to the SYN table of the parent
1424          * because it's been added to the accept queue directly.
1425          */
1426         inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
1427             TCP_TIMEOUT_INIT, TCP_RTO_MAX);
1428
1429         /* Add the child socket directly into the accept queue */
1430         inet_csk_reqsk_queue_add(sk, req, child);
1431
1432         /* Now finish processing the fastopen child socket. */
1433         inet_csk(child)->icsk_af_ops->rebuild_header(child);
1434         tcp_init_congestion_control(child);
1435         tcp_mtup_init(child);
1436         tcp_init_buffer_space(child);
1437         tcp_init_metrics(child);
1438
1439         /* Queue the data carried in the SYN packet. We need to first
1440          * bump skb's refcnt because the caller will attempt to free it.
1441          *
1442          * XXX (TFO) - we honor a zero-payload TFO request for now.
1443          * (Any reason not to?)
1444          */
1445         if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq + 1) {
1446                 /* Don't queue the skb if there is no payload in SYN.
1447                  * XXX (TFO) - How about SYN+FIN?
1448                  */
1449                 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1450         } else {
1451                 skb = skb_get(skb);
1452                 skb_dst_drop(skb);
1453                 __skb_pull(skb, tcp_hdr(skb)->doff * 4);
1454                 skb_set_owner_r(skb, child);
1455                 __skb_queue_tail(&child->sk_receive_queue, skb);
1456                 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1457                 tp->syn_data_acked = 1;
1458         }
1459         sk->sk_data_ready(sk, 0);
1460         bh_unlock_sock(child);
1461         sock_put(child);
1462         WARN_ON(req->sk == NULL);
1463         return 0;
1464 }
1465
1466 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1467 {
1468         struct tcp_options_received tmp_opt;
1469         struct request_sock *req;
1470         struct inet_request_sock *ireq;
1471         struct tcp_sock *tp = tcp_sk(sk);
1472         struct dst_entry *dst = NULL;
1473         __be32 saddr = ip_hdr(skb)->saddr;
1474         __be32 daddr = ip_hdr(skb)->daddr;
1475         __u32 isn = TCP_SKB_CB(skb)->when;
1476         bool want_cookie = false;
1477         struct flowi4 fl4;
1478         struct tcp_fastopen_cookie foc = { .len = -1 };
1479         struct tcp_fastopen_cookie valid_foc = { .len = -1 };
1480         struct sk_buff *skb_synack;
1481         int do_fastopen;
1482
1483         /* Never answer to SYNs send to broadcast or multicast */
1484         if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1485                 goto drop;
1486
1487         /* TW buckets are converted to open requests without
1488          * limitations, they conserve resources and peer is
1489          * evidently real one.
1490          */
1491         if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1492                 want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1493                 if (!want_cookie)
1494                         goto drop;
1495         }
1496
1497         /* Accept backlog is full. If we have already queued enough
1498          * of warm entries in syn queue, drop request. It is better than
1499          * clogging syn queue with openreqs with exponentially increasing
1500          * timeout.
1501          */
1502         if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
1503                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1504                 goto drop;
1505         }
1506
1507         req = inet_reqsk_alloc(&tcp_request_sock_ops);
1508         if (!req)
1509                 goto drop;
1510
1511 #ifdef CONFIG_TCP_MD5SIG
1512         tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1513 #endif
1514
1515         tcp_clear_options(&tmp_opt);
1516         tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1517         tmp_opt.user_mss  = tp->rx_opt.user_mss;
1518         tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
1519
1520         if (want_cookie && !tmp_opt.saw_tstamp)
1521                 tcp_clear_options(&tmp_opt);
1522
1523         tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1524         tcp_openreq_init(req, &tmp_opt, skb);
1525
1526         ireq = inet_rsk(req);
1527         ireq->loc_addr = daddr;
1528         ireq->rmt_addr = saddr;
1529         ireq->no_srccheck = inet_sk(sk)->transparent;
1530         ireq->opt = tcp_v4_save_options(skb);
1531         ireq->ir_mark = inet_request_mark(sk, skb);
1532
1533         if (security_inet_conn_request(sk, skb, req))
1534                 goto drop_and_free;
1535
1536         if (!want_cookie || tmp_opt.tstamp_ok)
1537                 TCP_ECN_create_request(req, skb, sock_net(sk));
1538
1539         if (want_cookie) {
1540                 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1541                 req->cookie_ts = tmp_opt.tstamp_ok;
1542         } else if (!isn) {
1543                 /* VJ's idea. We save last timestamp seen
1544                  * from the destination in peer table, when entering
1545                  * state TIME-WAIT, and check against it before
1546                  * accepting new connection request.
1547                  *
1548                  * If "isn" is not zero, this request hit alive
1549                  * timewait bucket, so that all the necessary checks
1550                  * are made in the function processing timewait state.
1551                  */
1552                 if (tmp_opt.saw_tstamp &&
1553                     tcp_death_row.sysctl_tw_recycle &&
1554                     (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
1555                     fl4.daddr == saddr) {
1556                         if (!tcp_peer_is_proven(req, dst, true)) {
1557                                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1558                                 goto drop_and_release;
1559                         }
1560                 }
1561                 /* Kill the following clause, if you dislike this way. */
1562                 else if (!sysctl_tcp_syncookies &&
1563                          (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1564                           (sysctl_max_syn_backlog >> 2)) &&
1565                          !tcp_peer_is_proven(req, dst, false)) {
1566                         /* Without syncookies last quarter of
1567                          * backlog is filled with destinations,
1568                          * proven to be alive.
1569                          * It means that we continue to communicate
1570                          * to destinations, already remembered
1571                          * to the moment of synflood.
1572                          */
1573                         LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
1574                                        &saddr, ntohs(tcp_hdr(skb)->source));
1575                         goto drop_and_release;
1576                 }
1577
1578                 isn = tcp_v4_init_sequence(skb);
1579         }
1580         tcp_rsk(req)->snt_isn = isn;
1581
1582         if (dst == NULL) {
1583                 dst = inet_csk_route_req(sk, &fl4, req);
1584                 if (dst == NULL)
1585                         goto drop_and_free;
1586         }
1587         do_fastopen = tcp_fastopen_check(sk, skb, req, &foc, &valid_foc);
1588
1589         /* We don't call tcp_v4_send_synack() directly because we need
1590          * to make sure a child socket can be created successfully before
1591          * sending back synack!
1592          *
1593          * XXX (TFO) - Ideally one would simply call tcp_v4_send_synack()
1594          * (or better yet, call tcp_send_synack() in the child context
1595          * directly, but will have to fix bunch of other code first)
1596          * after syn_recv_sock() except one will need to first fix the
1597          * latter to remove its dependency on the current implementation
1598          * of tcp_v4_send_synack()->tcp_select_initial_window().
1599          */
1600         skb_synack = tcp_make_synack(sk, dst, req,
1601             fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL);
1602
1603         if (skb_synack) {
1604                 __tcp_v4_send_check(skb_synack, ireq->loc_addr, ireq->rmt_addr);
1605                 skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb));
1606         } else
1607                 goto drop_and_free;
1608
1609         if (likely(!do_fastopen)) {
1610                 int err;
1611                 err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
1612                      ireq->rmt_addr, ireq->opt);
1613                 err = net_xmit_eval(err);
1614                 if (err || want_cookie)
1615                         goto drop_and_free;
1616
1617                 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1618                 tcp_rsk(req)->listener = NULL;
1619                 /* Add the request_sock to the SYN table */
1620                 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1621                 if (fastopen_cookie_present(&foc) && foc.len != 0)
1622                         NET_INC_STATS_BH(sock_net(sk),
1623                             LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1624         } else if (tcp_v4_conn_req_fastopen(sk, skb, skb_synack, req))
1625                 goto drop_and_free;
1626
1627         return 0;
1628
1629 drop_and_release:
1630         dst_release(dst);
1631 drop_and_free:
1632         reqsk_free(req);
1633 drop:
1634         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1635         return 0;
1636 }
1637 EXPORT_SYMBOL(tcp_v4_conn_request);
1638
1639
1640 /*
1641  * The three way handshake has completed - we got a valid synack -
1642  * now create the new socket.
1643  */
1644 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1645                                   struct request_sock *req,
1646                                   struct dst_entry *dst)
1647 {
1648         struct inet_request_sock *ireq;
1649         struct inet_sock *newinet;
1650         struct tcp_sock *newtp;
1651         struct sock *newsk;
1652 #ifdef CONFIG_TCP_MD5SIG
1653         struct tcp_md5sig_key *key;
1654 #endif
1655         struct ip_options_rcu *inet_opt;
1656
1657         if (sk_acceptq_is_full(sk))
1658                 goto exit_overflow;
1659
1660         newsk = tcp_create_openreq_child(sk, req, skb);
1661         if (!newsk)
1662                 goto exit_nonewsk;
1663
1664         newsk->sk_gso_type = SKB_GSO_TCPV4;
1665         inet_sk_rx_dst_set(newsk, skb);
1666
1667         newtp                 = tcp_sk(newsk);
1668         newinet               = inet_sk(newsk);
1669         ireq                  = inet_rsk(req);
1670         newinet->inet_daddr   = ireq->rmt_addr;
1671         newinet->inet_rcv_saddr = ireq->loc_addr;
1672         newinet->inet_saddr           = ireq->loc_addr;
1673         inet_opt              = ireq->opt;
1674         rcu_assign_pointer(newinet->inet_opt, inet_opt);
1675         ireq->opt             = NULL;
1676         newinet->mc_index     = inet_iif(skb);
1677         newinet->mc_ttl       = ip_hdr(skb)->ttl;
1678         newinet->rcv_tos      = ip_hdr(skb)->tos;
1679         inet_csk(newsk)->icsk_ext_hdr_len = 0;
1680         if (inet_opt)
1681                 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1682         newinet->inet_id = newtp->write_seq ^ jiffies;
1683
1684         if (!dst) {
1685                 dst = inet_csk_route_child_sock(sk, newsk, req);
1686                 if (!dst)
1687                         goto put_and_exit;
1688         } else {
1689                 /* syncookie case : see end of cookie_v4_check() */
1690         }
1691         sk_setup_caps(newsk, dst);
1692
1693         tcp_mtup_init(newsk);
1694         tcp_sync_mss(newsk, dst_mtu(dst));
1695         newtp->advmss = dst_metric_advmss(dst);
1696         if (tcp_sk(sk)->rx_opt.user_mss &&
1697             tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1698                 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1699
1700         tcp_initialize_rcv_mss(newsk);
1701         tcp_synack_rtt_meas(newsk, req);
1702         newtp->total_retrans = req->num_retrans;
1703
1704 #ifdef CONFIG_TCP_MD5SIG
1705         /* Copy over the MD5 key from the original socket */
1706         key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1707                                 AF_INET);
1708         if (key != NULL) {
1709                 /*
1710                  * We're using one, so create a matching key
1711                  * on the newsk structure. If we fail to get
1712                  * memory, then we end up not copying the key
1713                  * across. Shucks.
1714                  */
1715                 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1716                                AF_INET, key->key, key->keylen, GFP_ATOMIC);
1717                 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1718         }
1719 #endif
1720
1721         if (__inet_inherit_port(sk, newsk) < 0)
1722                 goto put_and_exit;
1723         __inet_hash_nolisten(newsk, NULL);
1724
1725         return newsk;
1726
1727 exit_overflow:
1728         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1729 exit_nonewsk:
1730         dst_release(dst);
1731 exit:
1732         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1733         return NULL;
1734 put_and_exit:
1735         inet_csk_prepare_forced_close(newsk);
1736         tcp_done(newsk);
1737         goto exit;
1738 }
1739 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1740
1741 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1742 {
1743         struct tcphdr *th = tcp_hdr(skb);
1744         const struct iphdr *iph = ip_hdr(skb);
1745         struct sock *nsk;
1746         struct request_sock **prev;
1747         /* Find possible connection requests. */
1748         struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1749                                                        iph->saddr, iph->daddr);
1750         if (req)
1751                 return tcp_check_req(sk, skb, req, prev, false);
1752
1753         nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1754                         th->source, iph->daddr, th->dest, inet_iif(skb));
1755
1756         if (nsk) {
1757                 if (nsk->sk_state != TCP_TIME_WAIT) {
1758                         bh_lock_sock(nsk);
1759                         return nsk;
1760                 }
1761                 inet_twsk_put(inet_twsk(nsk));
1762                 return NULL;
1763         }
1764
1765 #ifdef CONFIG_SYN_COOKIES
1766         if (!th->syn)
1767                 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1768 #endif
1769         return sk;
1770 }
1771
1772 static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1773 {
1774         const struct iphdr *iph = ip_hdr(skb);
1775
1776         if (skb->ip_summed == CHECKSUM_COMPLETE) {
1777                 if (!tcp_v4_check(skb->len, iph->saddr,
1778                                   iph->daddr, skb->csum)) {
1779                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1780                         return 0;
1781                 }
1782         }
1783
1784         skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1785                                        skb->len, IPPROTO_TCP, 0);
1786
1787         if (skb->len <= 76) {
1788                 return __skb_checksum_complete(skb);
1789         }
1790         return 0;
1791 }
1792
1793
1794 /* The socket must have it's spinlock held when we get
1795  * here.
1796  *
1797  * We have a potential double-lock case here, so even when
1798  * doing backlog processing we use the BH locking scheme.
1799  * This is because we cannot sleep with the original spinlock
1800  * held.
1801  */
1802 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1803 {
1804         struct sock *rsk;
1805 #ifdef CONFIG_TCP_MD5SIG
1806         /*
1807          * We really want to reject the packet as early as possible
1808          * if:
1809          *  o We're expecting an MD5'd packet and this is no MD5 tcp option
1810          *  o There is an MD5 option and we're not expecting one
1811          */
1812         if (tcp_v4_inbound_md5_hash(sk, skb))
1813                 goto discard;
1814 #endif
1815
1816         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1817                 struct dst_entry *dst = sk->sk_rx_dst;
1818
1819                 sock_rps_save_rxhash(sk, skb);
1820                 if (dst) {
1821                         if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1822                             dst->ops->check(dst, 0) == NULL) {
1823                                 dst_release(dst);
1824                                 sk->sk_rx_dst = NULL;
1825                         }
1826                 }
1827                 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1828                         rsk = sk;
1829                         goto reset;
1830                 }
1831                 return 0;
1832         }
1833
1834         if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1835                 goto csum_err;
1836
1837         if (sk->sk_state == TCP_LISTEN) {
1838                 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1839                 if (!nsk)
1840                         goto discard;
1841
1842                 if (nsk != sk) {
1843                         sock_rps_save_rxhash(nsk, skb);
1844                         if (tcp_child_process(sk, nsk, skb)) {
1845                                 rsk = nsk;
1846                                 goto reset;
1847                         }
1848                         return 0;
1849                 }
1850         } else
1851                 sock_rps_save_rxhash(sk, skb);
1852
1853         if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1854                 rsk = sk;
1855                 goto reset;
1856         }
1857         return 0;
1858
1859 reset:
1860         tcp_v4_send_reset(rsk, skb);
1861 discard:
1862         kfree_skb(skb);
1863         /* Be careful here. If this function gets more complicated and
1864          * gcc suffers from register pressure on the x86, sk (in %ebx)
1865          * might be destroyed here. This current version compiles correctly,
1866          * but you have been warned.
1867          */
1868         return 0;
1869
1870 csum_err:
1871         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1872         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1873         goto discard;
1874 }
1875 EXPORT_SYMBOL(tcp_v4_do_rcv);
1876
1877 void tcp_v4_early_demux(struct sk_buff *skb)
1878 {
1879         const struct iphdr *iph;
1880         const struct tcphdr *th;
1881         struct sock *sk;
1882
1883         if (skb->pkt_type != PACKET_HOST)
1884                 return;
1885
1886         if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1887                 return;
1888
1889         iph = ip_hdr(skb);
1890         th = tcp_hdr(skb);
1891
1892         if (th->doff < sizeof(struct tcphdr) / 4)
1893                 return;
1894
1895         sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1896                                        iph->saddr, th->source,
1897                                        iph->daddr, ntohs(th->dest),
1898                                        skb->skb_iif);
1899         if (sk) {
1900                 skb->sk = sk;
1901                 skb->destructor = sock_edemux;
1902                 if (sk->sk_state != TCP_TIME_WAIT) {
1903                         struct dst_entry *dst = sk->sk_rx_dst;
1904
1905                         if (dst)
1906                                 dst = dst_check(dst, 0);
1907                         if (dst &&
1908                             inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1909                                 skb_dst_set_noref(skb, dst);
1910                 }
1911         }
1912 }
1913
1914 /* Packet is added to VJ-style prequeue for processing in process
1915  * context, if a reader task is waiting. Apparently, this exciting
1916  * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1917  * failed somewhere. Latency? Burstiness? Well, at least now we will
1918  * see, why it failed. 8)8)                               --ANK
1919  *
1920  */
1921 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1922 {
1923         struct tcp_sock *tp = tcp_sk(sk);
1924
1925         if (sysctl_tcp_low_latency || !tp->ucopy.task)
1926                 return false;
1927
1928         if (skb->len <= tcp_hdrlen(skb) &&
1929             skb_queue_len(&tp->ucopy.prequeue) == 0)
1930                 return false;
1931
1932         skb_dst_force(skb);
1933         __skb_queue_tail(&tp->ucopy.prequeue, skb);
1934         tp->ucopy.memory += skb->truesize;
1935         if (tp->ucopy.memory > sk->sk_rcvbuf) {
1936                 struct sk_buff *skb1;
1937
1938                 BUG_ON(sock_owned_by_user(sk));
1939
1940                 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1941                         sk_backlog_rcv(sk, skb1);
1942                         NET_INC_STATS_BH(sock_net(sk),
1943                                          LINUX_MIB_TCPPREQUEUEDROPPED);
1944                 }
1945
1946                 tp->ucopy.memory = 0;
1947         } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1948                 wake_up_interruptible_sync_poll(sk_sleep(sk),
1949                                            POLLIN | POLLRDNORM | POLLRDBAND);
1950                 if (!inet_csk_ack_scheduled(sk))
1951                         inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1952                                                   (3 * tcp_rto_min(sk)) / 4,
1953                                                   TCP_RTO_MAX);
1954         }
1955         return true;
1956 }
1957 EXPORT_SYMBOL(tcp_prequeue);
1958
1959 /*
1960  *      From tcp_input.c
1961  */
1962
1963 int tcp_v4_rcv(struct sk_buff *skb)
1964 {
1965         const struct iphdr *iph;
1966         const struct tcphdr *th;
1967         struct sock *sk;
1968         int ret;
1969         struct net *net = dev_net(skb->dev);
1970
1971         if (skb->pkt_type != PACKET_HOST)
1972                 goto discard_it;
1973
1974         /* Count it even if it's bad */
1975         TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1976
1977         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1978                 goto discard_it;
1979
1980         th = tcp_hdr(skb);
1981
1982         if (th->doff < sizeof(struct tcphdr) / 4)
1983                 goto bad_packet;
1984         if (!pskb_may_pull(skb, th->doff * 4))
1985                 goto discard_it;
1986
1987         /* An explanation is required here, I think.
1988          * Packet length and doff are validated by header prediction,
1989          * provided case of th->doff==0 is eliminated.
1990          * So, we defer the checks. */
1991         if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1992                 goto csum_error;
1993
1994         th = tcp_hdr(skb);
1995         iph = ip_hdr(skb);
1996         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1997         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1998                                     skb->len - th->doff * 4);
1999         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
2000         TCP_SKB_CB(skb)->when    = 0;
2001         TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
2002         TCP_SKB_CB(skb)->sacked  = 0;
2003
2004         sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
2005         if (!sk)
2006                 goto no_tcp_socket;
2007
2008 process:
2009         if (sk->sk_state == TCP_TIME_WAIT)
2010                 goto do_time_wait;
2011
2012         if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
2013                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
2014                 goto discard_and_relse;
2015         }
2016
2017         if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
2018                 goto discard_and_relse;
2019         nf_reset(skb);
2020
2021         if (sk_filter(sk, skb))
2022                 goto discard_and_relse;
2023
2024         skb->dev = NULL;
2025
2026         bh_lock_sock_nested(sk);
2027         ret = 0;
2028         if (!sock_owned_by_user(sk)) {
2029 #ifdef CONFIG_NET_DMA
2030                 struct tcp_sock *tp = tcp_sk(sk);
2031                 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
2032                         tp->ucopy.dma_chan = net_dma_find_channel();
2033                 if (tp->ucopy.dma_chan)
2034                         ret = tcp_v4_do_rcv(sk, skb);
2035                 else
2036 #endif
2037                 {
2038                         if (!tcp_prequeue(sk, skb))
2039                                 ret = tcp_v4_do_rcv(sk, skb);
2040                 }
2041         } else if (unlikely(sk_add_backlog(sk, skb,
2042                                            sk->sk_rcvbuf + sk->sk_sndbuf))) {
2043                 bh_unlock_sock(sk);
2044                 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
2045                 goto discard_and_relse;
2046         }
2047         bh_unlock_sock(sk);
2048
2049         sock_put(sk);
2050
2051         return ret;
2052
2053 no_tcp_socket:
2054         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
2055                 goto discard_it;
2056
2057         if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
2058 csum_error:
2059                 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
2060 bad_packet:
2061                 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
2062         } else {
2063                 tcp_v4_send_reset(NULL, skb);
2064         }
2065
2066 discard_it:
2067         /* Discard frame. */
2068         kfree_skb(skb);
2069         return 0;
2070
2071 discard_and_relse:
2072         sock_put(sk);
2073         goto discard_it;
2074
2075 do_time_wait:
2076         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
2077                 inet_twsk_put(inet_twsk(sk));
2078                 goto discard_it;
2079         }
2080
2081         if (skb->len < (th->doff << 2)) {
2082                 inet_twsk_put(inet_twsk(sk));
2083                 goto bad_packet;
2084         }
2085         if (tcp_checksum_complete(skb)) {
2086                 inet_twsk_put(inet_twsk(sk));
2087                 goto csum_error;
2088         }
2089         switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
2090         case TCP_TW_SYN: {
2091                 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
2092                                                         &tcp_hashinfo,
2093                                                         iph->saddr, th->source,
2094                                                         iph->daddr, th->dest,
2095                                                         inet_iif(skb));
2096                 if (sk2) {
2097                         inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
2098                         inet_twsk_put(inet_twsk(sk));
2099                         sk = sk2;
2100                         goto process;
2101                 }
2102                 /* Fall through to ACK */
2103         }
2104         case TCP_TW_ACK:
2105                 tcp_v4_timewait_ack(sk, skb);
2106                 break;
2107         case TCP_TW_RST:
2108                 goto no_tcp_socket;
2109         case TCP_TW_SUCCESS:;
2110         }
2111         goto discard_it;
2112 }
2113
2114 static struct timewait_sock_ops tcp_timewait_sock_ops = {
2115         .twsk_obj_size  = sizeof(struct tcp_timewait_sock),
2116         .twsk_unique    = tcp_twsk_unique,
2117         .twsk_destructor= tcp_twsk_destructor,
2118 };
2119
2120 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
2121 {
2122         struct dst_entry *dst = skb_dst(skb);
2123
2124         dst_hold(dst);
2125         sk->sk_rx_dst = dst;
2126         inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
2127 }
2128 EXPORT_SYMBOL(inet_sk_rx_dst_set);
2129
2130 const struct inet_connection_sock_af_ops ipv4_specific = {
2131         .queue_xmit        = ip_queue_xmit,
2132         .send_check        = tcp_v4_send_check,
2133         .rebuild_header    = inet_sk_rebuild_header,
2134         .sk_rx_dst_set     = inet_sk_rx_dst_set,
2135         .conn_request      = tcp_v4_conn_request,
2136         .syn_recv_sock     = tcp_v4_syn_recv_sock,
2137         .net_header_len    = sizeof(struct iphdr),
2138         .setsockopt        = ip_setsockopt,
2139         .getsockopt        = ip_getsockopt,
2140         .addr2sockaddr     = inet_csk_addr2sockaddr,
2141         .sockaddr_len      = sizeof(struct sockaddr_in),
2142         .bind_conflict     = inet_csk_bind_conflict,
2143 #ifdef CONFIG_COMPAT
2144         .compat_setsockopt = compat_ip_setsockopt,
2145         .compat_getsockopt = compat_ip_getsockopt,
2146 #endif
2147         .mtu_reduced       = tcp_v4_mtu_reduced,
2148 };
2149 EXPORT_SYMBOL(ipv4_specific);
2150
2151 #ifdef CONFIG_TCP_MD5SIG
2152 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
2153         .md5_lookup             = tcp_v4_md5_lookup,
2154         .calc_md5_hash          = tcp_v4_md5_hash_skb,
2155         .md5_parse              = tcp_v4_parse_md5_keys,
2156 };
2157 #endif
2158
2159 /* NOTE: A lot of things set to zero explicitly by call to
2160  *       sk_alloc() so need not be done here.
2161  */
2162 static int tcp_v4_init_sock(struct sock *sk)
2163 {
2164         struct inet_connection_sock *icsk = inet_csk(sk);
2165
2166         tcp_init_sock(sk);
2167
2168         icsk->icsk_af_ops = &ipv4_specific;
2169
2170 #ifdef CONFIG_TCP_MD5SIG
2171         tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
2172 #endif
2173
2174         return 0;
2175 }
2176
2177 void tcp_v4_destroy_sock(struct sock *sk)
2178 {
2179         struct tcp_sock *tp = tcp_sk(sk);
2180
2181         tcp_clear_xmit_timers(sk);
2182
2183         tcp_cleanup_congestion_control(sk);
2184
2185         /* Cleanup up the write buffer. */
2186         tcp_write_queue_purge(sk);
2187
2188         /* Cleans up our, hopefully empty, out_of_order_queue. */
2189         __skb_queue_purge(&tp->out_of_order_queue);
2190
2191 #ifdef CONFIG_TCP_MD5SIG
2192         /* Clean up the MD5 key list, if any */
2193         if (tp->md5sig_info) {
2194                 tcp_clear_md5_list(sk);
2195                 kfree_rcu(tp->md5sig_info, rcu);
2196                 tp->md5sig_info = NULL;
2197         }
2198 #endif
2199
2200 #ifdef CONFIG_NET_DMA
2201         /* Cleans up our sk_async_wait_queue */
2202         __skb_queue_purge(&sk->sk_async_wait_queue);
2203 #endif
2204
2205         /* Clean prequeue, it must be empty really */
2206         __skb_queue_purge(&tp->ucopy.prequeue);
2207
2208         /* Clean up a referenced TCP bind bucket. */
2209         if (inet_csk(sk)->icsk_bind_hash)
2210                 inet_put_port(sk);
2211
2212         BUG_ON(tp->fastopen_rsk != NULL);
2213
2214         /* If socket is aborted during connect operation */
2215         tcp_free_fastopen_req(tp);
2216
2217         sk_sockets_allocated_dec(sk);
2218         sock_release_memcg(sk);
2219 }
2220 EXPORT_SYMBOL(tcp_v4_destroy_sock);
2221
2222 #ifdef CONFIG_PROC_FS
2223 /* Proc filesystem TCP sock list dumping. */
2224
2225 static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
2226 {
2227         return hlist_nulls_empty(head) ? NULL :
2228                 list_entry(head->first, struct inet_timewait_sock, tw_node);
2229 }
2230
2231 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
2232 {
2233         return !is_a_nulls(tw->tw_node.next) ?
2234                 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
2235 }
2236
2237 /*
2238  * Get next listener socket follow cur.  If cur is NULL, get first socket
2239  * starting from bucket given in st->bucket; when st->bucket is zero the
2240  * very first socket in the hash table is returned.
2241  */
2242 static void *listening_get_next(struct seq_file *seq, void *cur)
2243 {
2244         struct inet_connection_sock *icsk;
2245         struct hlist_nulls_node *node;
2246         struct sock *sk = cur;
2247         struct inet_listen_hashbucket *ilb;
2248         struct tcp_iter_state *st = seq->private;
2249         struct net *net = seq_file_net(seq);
2250
2251         if (!sk) {
2252                 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2253                 spin_lock_bh(&ilb->lock);
2254                 sk = sk_nulls_head(&ilb->head);
2255                 st->offset = 0;
2256                 goto get_sk;
2257         }
2258         ilb = &tcp_hashinfo.listening_hash[st->bucket];
2259         ++st->num;
2260         ++st->offset;
2261
2262         if (st->state == TCP_SEQ_STATE_OPENREQ) {
2263                 struct request_sock *req = cur;
2264
2265                 icsk = inet_csk(st->syn_wait_sk);
2266                 req = req->dl_next;
2267                 while (1) {
2268                         while (req) {
2269                                 if (req->rsk_ops->family == st->family) {
2270                                         cur = req;
2271                                         goto out;
2272                                 }
2273                                 req = req->dl_next;
2274                         }
2275                         if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
2276                                 break;
2277 get_req:
2278                         req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2279                 }
2280                 sk        = sk_nulls_next(st->syn_wait_sk);
2281                 st->state = TCP_SEQ_STATE_LISTENING;
2282                 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2283         } else {
2284                 icsk = inet_csk(sk);
2285                 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2286                 if (reqsk_queue_len(&icsk->icsk_accept_queue))
2287                         goto start_req;
2288                 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2289                 sk = sk_nulls_next(sk);
2290         }
2291 get_sk:
2292         sk_nulls_for_each_from(sk, node) {
2293                 if (!net_eq(sock_net(sk), net))
2294                         continue;
2295                 if (sk->sk_family == st->family) {
2296                         cur = sk;
2297                         goto out;
2298                 }
2299                 icsk = inet_csk(sk);
2300                 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2301                 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2302 start_req:
2303                         st->uid         = sock_i_uid(sk);
2304                         st->syn_wait_sk = sk;
2305                         st->state       = TCP_SEQ_STATE_OPENREQ;
2306                         st->sbucket     = 0;
2307                         goto get_req;
2308                 }
2309                 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2310         }
2311         spin_unlock_bh(&ilb->lock);
2312         st->offset = 0;
2313         if (++st->bucket < INET_LHTABLE_SIZE) {
2314                 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2315                 spin_lock_bh(&ilb->lock);
2316                 sk = sk_nulls_head(&ilb->head);
2317                 goto get_sk;
2318         }
2319         cur = NULL;
2320 out:
2321         return cur;
2322 }
2323
2324 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2325 {
2326         struct tcp_iter_state *st = seq->private;
2327         void *rc;
2328
2329         st->bucket = 0;
2330         st->offset = 0;
2331         rc = listening_get_next(seq, NULL);
2332
2333         while (rc && *pos) {
2334                 rc = listening_get_next(seq, rc);
2335                 --*pos;
2336         }
2337         return rc;
2338 }
2339
2340 static inline bool empty_bucket(struct tcp_iter_state *st)
2341 {
2342         return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2343                 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
2344 }
2345
2346 /*
2347  * Get first established socket starting from bucket given in st->bucket.
2348  * If st->bucket is zero, the very first socket in the hash is returned.
2349  */
2350 static void *established_get_first(struct seq_file *seq)
2351 {
2352         struct tcp_iter_state *st = seq->private;
2353         struct net *net = seq_file_net(seq);
2354         void *rc = NULL;
2355
2356         st->offset = 0;
2357         for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2358                 struct sock *sk;
2359                 struct hlist_nulls_node *node;
2360                 struct inet_timewait_sock *tw;
2361                 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2362
2363                 /* Lockless fast path for the common case of empty buckets */
2364                 if (empty_bucket(st))
2365                         continue;
2366
2367                 spin_lock_bh(lock);
2368                 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2369                         if (sk->sk_family != st->family ||
2370                             !net_eq(sock_net(sk), net)) {
2371                                 continue;
2372                         }
2373                         rc = sk;
2374                         goto out;
2375                 }
2376                 st->state = TCP_SEQ_STATE_TIME_WAIT;
2377                 inet_twsk_for_each(tw, node,
2378                                    &tcp_hashinfo.ehash[st->bucket].twchain) {
2379                         if (tw->tw_family != st->family ||
2380                             !net_eq(twsk_net(tw), net)) {
2381                                 continue;
2382                         }
2383                         rc = tw;
2384                         goto out;
2385                 }
2386                 spin_unlock_bh(lock);
2387                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2388         }
2389 out:
2390         return rc;
2391 }
2392
2393 static void *established_get_next(struct seq_file *seq, void *cur)
2394 {
2395         struct sock *sk = cur;
2396         struct inet_timewait_sock *tw;
2397         struct hlist_nulls_node *node;
2398         struct tcp_iter_state *st = seq->private;
2399         struct net *net = seq_file_net(seq);
2400
2401         ++st->num;
2402         ++st->offset;
2403
2404         if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2405                 tw = cur;
2406                 tw = tw_next(tw);
2407 get_tw:
2408                 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2409                         tw = tw_next(tw);
2410                 }
2411                 if (tw) {
2412                         cur = tw;
2413                         goto out;
2414                 }
2415                 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2416                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2417
2418                 /* Look for next non empty bucket */
2419                 st->offset = 0;
2420                 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2421                                 empty_bucket(st))
2422                         ;
2423                 if (st->bucket > tcp_hashinfo.ehash_mask)
2424                         return NULL;
2425
2426                 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2427                 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
2428         } else
2429                 sk = sk_nulls_next(sk);
2430
2431         sk_nulls_for_each_from(sk, node) {
2432                 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2433                         goto found;
2434         }
2435
2436         st->state = TCP_SEQ_STATE_TIME_WAIT;
2437         tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2438         goto get_tw;
2439 found:
2440         cur = sk;
2441 out:
2442         return cur;
2443 }
2444
2445 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2446 {
2447         struct tcp_iter_state *st = seq->private;
2448         void *rc;
2449
2450         st->bucket = 0;
2451         rc = established_get_first(seq);
2452
2453         while (rc && pos) {
2454                 rc = established_get_next(seq, rc);
2455                 --pos;
2456         }
2457         return rc;
2458 }
2459
2460 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2461 {
2462         void *rc;
2463         struct tcp_iter_state *st = seq->private;
2464
2465         st->state = TCP_SEQ_STATE_LISTENING;
2466         rc        = listening_get_idx(seq, &pos);
2467
2468         if (!rc) {
2469                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2470                 rc        = established_get_idx(seq, pos);
2471         }
2472
2473         return rc;
2474 }
2475
2476 static void *tcp_seek_last_pos(struct seq_file *seq)
2477 {
2478         struct tcp_iter_state *st = seq->private;
2479         int offset = st->offset;
2480         int orig_num = st->num;
2481         void *rc = NULL;
2482
2483         switch (st->state) {
2484         case TCP_SEQ_STATE_OPENREQ:
2485         case TCP_SEQ_STATE_LISTENING:
2486                 if (st->bucket >= INET_LHTABLE_SIZE)
2487                         break;
2488                 st->state = TCP_SEQ_STATE_LISTENING;
2489                 rc = listening_get_next(seq, NULL);
2490                 while (offset-- && rc)
2491                         rc = listening_get_next(seq, rc);
2492                 if (rc)
2493                         break;
2494                 st->bucket = 0;
2495                 /* Fallthrough */
2496         case TCP_SEQ_STATE_ESTABLISHED:
2497         case TCP_SEQ_STATE_TIME_WAIT:
2498                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2499                 if (st->bucket > tcp_hashinfo.ehash_mask)
2500                         break;
2501                 rc = established_get_first(seq);
2502                 while (offset-- && rc)
2503                         rc = established_get_next(seq, rc);
2504         }
2505
2506         st->num = orig_num;
2507
2508         return rc;
2509 }
2510
2511 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2512 {
2513         struct tcp_iter_state *st = seq->private;
2514         void *rc;
2515
2516         if (*pos && *pos == st->last_pos) {
2517                 rc = tcp_seek_last_pos(seq);
2518                 if (rc)
2519                         goto out;
2520         }
2521
2522         st->state = TCP_SEQ_STATE_LISTENING;
2523         st->num = 0;
2524         st->bucket = 0;
2525         st->offset = 0;
2526         rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2527
2528 out:
2529         st->last_pos = *pos;
2530         return rc;
2531 }
2532
2533 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2534 {
2535         struct tcp_iter_state *st = seq->private;
2536         void *rc = NULL;
2537
2538         if (v == SEQ_START_TOKEN) {
2539                 rc = tcp_get_idx(seq, 0);
2540                 goto out;
2541         }
2542
2543         switch (st->state) {
2544         case TCP_SEQ_STATE_OPENREQ:
2545         case TCP_SEQ_STATE_LISTENING:
2546                 rc = listening_get_next(seq, v);
2547                 if (!rc) {
2548                         st->state = TCP_SEQ_STATE_ESTABLISHED;
2549                         st->bucket = 0;
2550                         st->offset = 0;
2551                         rc        = established_get_first(seq);
2552                 }
2553                 break;
2554         case TCP_SEQ_STATE_ESTABLISHED:
2555         case TCP_SEQ_STATE_TIME_WAIT:
2556                 rc = established_get_next(seq, v);
2557                 break;
2558         }
2559 out:
2560         ++*pos;
2561         st->last_pos = *pos;
2562         return rc;
2563 }
2564
2565 static void tcp_seq_stop(struct seq_file *seq, void *v)
2566 {
2567         struct tcp_iter_state *st = seq->private;
2568
2569         switch (st->state) {
2570         case TCP_SEQ_STATE_OPENREQ:
2571                 if (v) {
2572                         struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2573                         read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2574                 }
2575         case TCP_SEQ_STATE_LISTENING:
2576                 if (v != SEQ_START_TOKEN)
2577                         spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2578                 break;
2579         case TCP_SEQ_STATE_TIME_WAIT:
2580         case TCP_SEQ_STATE_ESTABLISHED:
2581                 if (v)
2582                         spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2583                 break;
2584         }
2585 }
2586
2587 int tcp_seq_open(struct inode *inode, struct file *file)
2588 {
2589         struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2590         struct tcp_iter_state *s;
2591         int err;
2592
2593         err = seq_open_net(inode, file, &afinfo->seq_ops,
2594                           sizeof(struct tcp_iter_state));
2595         if (err < 0)
2596                 return err;
2597
2598         s = ((struct seq_file *)file->private_data)->private;
2599         s->family               = afinfo->family;
2600         s->last_pos             = 0;
2601         return 0;
2602 }
2603 EXPORT_SYMBOL(tcp_seq_open);
2604
2605 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2606 {
2607         int rc = 0;
2608         struct proc_dir_entry *p;
2609
2610         afinfo->seq_ops.start           = tcp_seq_start;
2611         afinfo->seq_ops.next            = tcp_seq_next;
2612         afinfo->seq_ops.stop            = tcp_seq_stop;
2613
2614         p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2615                              afinfo->seq_fops, afinfo);
2616         if (!p)
2617                 rc = -ENOMEM;
2618         return rc;
2619 }
2620 EXPORT_SYMBOL(tcp_proc_register);
2621
2622 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2623 {
2624         remove_proc_entry(afinfo->name, net->proc_net);
2625 }
2626 EXPORT_SYMBOL(tcp_proc_unregister);
2627
2628 static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2629                          struct seq_file *f, int i, kuid_t uid, int *len)
2630 {
2631         const struct inet_request_sock *ireq = inet_rsk(req);
2632         long delta = req->expires - jiffies;
2633
2634         seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2635                 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
2636                 i,
2637                 ireq->loc_addr,
2638                 ntohs(inet_sk(sk)->inet_sport),
2639                 ireq->rmt_addr,
2640                 ntohs(ireq->rmt_port),
2641                 TCP_SYN_RECV,
2642                 0, 0, /* could print option size, but that is af dependent. */
2643                 1,    /* timers active (only the expire timer) */
2644                 jiffies_delta_to_clock_t(delta),
2645                 req->num_timeout,
2646                 from_kuid_munged(seq_user_ns(f), uid),
2647                 0,  /* non standard timer */
2648                 0, /* open_requests have no inode */
2649                 atomic_read(&sk->sk_refcnt),
2650                 req,
2651                 len);
2652 }
2653
2654 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2655 {
2656         int timer_active;
2657         unsigned long timer_expires;
2658         const struct tcp_sock *tp = tcp_sk(sk);
2659         const struct inet_connection_sock *icsk = inet_csk(sk);
2660         const struct inet_sock *inet = inet_sk(sk);
2661         struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
2662         __be32 dest = inet->inet_daddr;
2663         __be32 src = inet->inet_rcv_saddr;
2664         __u16 destp = ntohs(inet->inet_dport);
2665         __u16 srcp = ntohs(inet->inet_sport);
2666         int rx_queue;
2667
2668         if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2669             icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2670             icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2671                 timer_active    = 1;
2672                 timer_expires   = icsk->icsk_timeout;
2673         } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2674                 timer_active    = 4;
2675                 timer_expires   = icsk->icsk_timeout;
2676         } else if (timer_pending(&sk->sk_timer)) {
2677                 timer_active    = 2;
2678                 timer_expires   = sk->sk_timer.expires;
2679         } else {
2680                 timer_active    = 0;
2681                 timer_expires = jiffies;
2682         }
2683
2684         if (sk->sk_state == TCP_LISTEN)
2685                 rx_queue = sk->sk_ack_backlog;
2686         else
2687                 /*
2688                  * because we dont lock socket, we might find a transient negative value
2689                  */
2690                 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2691
2692         seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2693                         "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
2694                 i, src, srcp, dest, destp, sk->sk_state,
2695                 tp->write_seq - tp->snd_una,
2696                 rx_queue,
2697                 timer_active,
2698                 jiffies_delta_to_clock_t(timer_expires - jiffies),
2699                 icsk->icsk_retransmits,
2700                 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2701                 icsk->icsk_probes_out,
2702                 sock_i_ino(sk),
2703                 atomic_read(&sk->sk_refcnt), sk,
2704                 jiffies_to_clock_t(icsk->icsk_rto),
2705                 jiffies_to_clock_t(icsk->icsk_ack.ato),
2706                 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2707                 tp->snd_cwnd,
2708                 sk->sk_state == TCP_LISTEN ?
2709                     (fastopenq ? fastopenq->max_qlen : 0) :
2710                     (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh),
2711                 len);
2712 }
2713
2714 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2715                                struct seq_file *f, int i, int *len)
2716 {
2717         __be32 dest, src;
2718         __u16 destp, srcp;
2719         long delta = tw->tw_ttd - jiffies;
2720
2721         dest  = tw->tw_daddr;
2722         src   = tw->tw_rcv_saddr;
2723         destp = ntohs(tw->tw_dport);
2724         srcp  = ntohs(tw->tw_sport);
2725
2726         seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2727                 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
2728                 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2729                 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2730                 atomic_read(&tw->tw_refcnt), tw, len);
2731 }
2732
2733 #define TMPSZ 150
2734
2735 static int tcp4_seq_show(struct seq_file *seq, void *v)
2736 {
2737         struct tcp_iter_state *st;
2738         int len;
2739
2740         if (v == SEQ_START_TOKEN) {
2741                 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2742                            "  sl  local_address rem_address   st tx_queue "
2743                            "rx_queue tr tm->when retrnsmt   uid  timeout "
2744                            "inode");
2745                 goto out;
2746         }
2747         st = seq->private;
2748
2749         switch (st->state) {
2750         case TCP_SEQ_STATE_LISTENING:
2751         case TCP_SEQ_STATE_ESTABLISHED:
2752                 get_tcp4_sock(v, seq, st->num, &len);
2753                 break;
2754         case TCP_SEQ_STATE_OPENREQ:
2755                 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
2756                 break;
2757         case TCP_SEQ_STATE_TIME_WAIT:
2758                 get_timewait4_sock(v, seq, st->num, &len);
2759                 break;
2760         }
2761         seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2762 out:
2763         return 0;
2764 }
2765
2766 static const struct file_operations tcp_afinfo_seq_fops = {
2767         .owner   = THIS_MODULE,
2768         .open    = tcp_seq_open,
2769         .read    = seq_read,
2770         .llseek  = seq_lseek,
2771         .release = seq_release_net
2772 };
2773
2774 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2775         .name           = "tcp",
2776         .family         = AF_INET,
2777         .seq_fops       = &tcp_afinfo_seq_fops,
2778         .seq_ops        = {
2779                 .show           = tcp4_seq_show,
2780         },
2781 };
2782
2783 static int __net_init tcp4_proc_init_net(struct net *net)
2784 {
2785         return tcp_proc_register(net, &tcp4_seq_afinfo);
2786 }
2787
2788 static void __net_exit tcp4_proc_exit_net(struct net *net)
2789 {
2790         tcp_proc_unregister(net, &tcp4_seq_afinfo);
2791 }
2792
2793 static struct pernet_operations tcp4_net_ops = {
2794         .init = tcp4_proc_init_net,
2795         .exit = tcp4_proc_exit_net,
2796 };
2797
2798 int __init tcp4_proc_init(void)
2799 {
2800         return register_pernet_subsys(&tcp4_net_ops);
2801 }
2802
2803 void tcp4_proc_exit(void)
2804 {
2805         unregister_pernet_subsys(&tcp4_net_ops);
2806 }
2807 #endif /* CONFIG_PROC_FS */
2808
2809 struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2810 {
2811         const struct iphdr *iph = skb_gro_network_header(skb);
2812         __wsum wsum;
2813         __sum16 sum;
2814
2815         switch (skb->ip_summed) {
2816         case CHECKSUM_COMPLETE:
2817                 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
2818                                   skb->csum)) {
2819                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2820                         break;
2821                 }
2822 flush:
2823                 NAPI_GRO_CB(skb)->flush = 1;
2824                 return NULL;
2825
2826         case CHECKSUM_NONE:
2827                 wsum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
2828                                           skb_gro_len(skb), IPPROTO_TCP, 0);
2829                 sum = csum_fold(skb_checksum(skb,
2830                                              skb_gro_offset(skb),
2831                                              skb_gro_len(skb),
2832                                              wsum));
2833                 if (sum)
2834                         goto flush;
2835
2836                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2837                 break;
2838         }
2839
2840         return tcp_gro_receive(head, skb);
2841 }
2842
2843 int tcp4_gro_complete(struct sk_buff *skb)
2844 {
2845         const struct iphdr *iph = ip_hdr(skb);
2846         struct tcphdr *th = tcp_hdr(skb);
2847
2848         th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2849                                   iph->saddr, iph->daddr, 0);
2850         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2851
2852         return tcp_gro_complete(skb);
2853 }
2854
2855 struct proto tcp_prot = {
2856         .name                   = "TCP",
2857         .owner                  = THIS_MODULE,
2858         .close                  = tcp_close,
2859         .connect                = tcp_v4_connect,
2860         .disconnect             = tcp_disconnect,
2861         .accept                 = inet_csk_accept,
2862         .ioctl                  = tcp_ioctl,
2863         .init                   = tcp_v4_init_sock,
2864         .destroy                = tcp_v4_destroy_sock,
2865         .shutdown               = tcp_shutdown,
2866         .setsockopt             = tcp_setsockopt,
2867         .getsockopt             = tcp_getsockopt,
2868         .recvmsg                = tcp_recvmsg,
2869         .sendmsg                = tcp_sendmsg,
2870         .sendpage               = tcp_sendpage,
2871         .backlog_rcv            = tcp_v4_do_rcv,
2872         .release_cb             = tcp_release_cb,
2873         .hash                   = inet_hash,
2874         .unhash                 = inet_unhash,
2875         .get_port               = inet_csk_get_port,
2876         .enter_memory_pressure  = tcp_enter_memory_pressure,
2877         .sockets_allocated      = &tcp_sockets_allocated,
2878         .orphan_count           = &tcp_orphan_count,
2879         .memory_allocated       = &tcp_memory_allocated,
2880         .memory_pressure        = &tcp_memory_pressure,
2881         .sysctl_wmem            = sysctl_tcp_wmem,
2882         .sysctl_rmem            = sysctl_tcp_rmem,
2883         .max_header             = MAX_TCP_HEADER,
2884         .obj_size               = sizeof(struct tcp_sock),
2885         .slab_flags             = SLAB_DESTROY_BY_RCU,
2886         .twsk_prot              = &tcp_timewait_sock_ops,
2887         .rsk_prot               = &tcp_request_sock_ops,
2888         .h.hashinfo             = &tcp_hashinfo,
2889         .no_autobind            = true,
2890 #ifdef CONFIG_COMPAT
2891         .compat_setsockopt      = compat_tcp_setsockopt,
2892         .compat_getsockopt      = compat_tcp_getsockopt,
2893 #endif
2894 #ifdef CONFIG_MEMCG_KMEM
2895         .init_cgroup            = tcp_init_cgroup,
2896         .destroy_cgroup         = tcp_destroy_cgroup,
2897         .proto_cgroup           = tcp_proto_cgroup,
2898 #endif
2899 };
2900 EXPORT_SYMBOL(tcp_prot);
2901
2902 static int __net_init tcp_sk_init(struct net *net)
2903 {
2904         net->ipv4.sysctl_tcp_ecn = 2;
2905         return 0;
2906 }
2907
2908 static void __net_exit tcp_sk_exit(struct net *net)
2909 {
2910 }
2911
2912 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2913 {
2914         inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2915 }
2916
2917 static struct pernet_operations __net_initdata tcp_sk_ops = {
2918        .init       = tcp_sk_init,
2919        .exit       = tcp_sk_exit,
2920        .exit_batch = tcp_sk_exit_batch,
2921 };
2922
2923 void __init tcp_v4_init(void)
2924 {
2925         inet_hashinfo_init(&tcp_hashinfo);
2926         if (register_pernet_subsys(&tcp_sk_ops))
2927                 panic("Failed to create the TCP control socket.\n");
2928 }