ixgbe, ixgbevf: Add new mbox API xcast mode
[firefly-linux-kernel-4.4.55.git] / net / ipv4 / tcp_ipv4.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              Implementation of the Transmission Control Protocol(TCP).
7  *
8  *              IPv4 specific functions
9  *
10  *
11  *              code split from:
12  *              linux/ipv4/tcp.c
13  *              linux/ipv4/tcp_input.c
14  *              linux/ipv4/tcp_output.c
15  *
16  *              See tcp.c for author information
17  *
18  *      This program is free software; you can redistribute it and/or
19  *      modify it under the terms of the GNU General Public License
20  *      as published by the Free Software Foundation; either version
21  *      2 of the License, or (at your option) any later version.
22  */
23
24 /*
25  * Changes:
26  *              David S. Miller :       New socket lookup architecture.
27  *                                      This code is dedicated to John Dyson.
28  *              David S. Miller :       Change semantics of established hash,
29  *                                      half is devoted to TIME_WAIT sockets
30  *                                      and the rest go in the other half.
31  *              Andi Kleen :            Add support for syncookies and fixed
32  *                                      some bugs: ip options weren't passed to
33  *                                      the TCP layer, missed a check for an
34  *                                      ACK bit.
35  *              Andi Kleen :            Implemented fast path mtu discovery.
36  *                                      Fixed many serious bugs in the
37  *                                      request_sock handling and moved
38  *                                      most of it into the af independent code.
39  *                                      Added tail drop and some other bugfixes.
40  *                                      Added new listen semantics.
41  *              Mike McLagan    :       Routing by source
42  *      Juan Jose Ciarlante:            ip_dynaddr bits
43  *              Andi Kleen:             various fixes.
44  *      Vitaly E. Lavrov        :       Transparent proxy revived after year
45  *                                      coma.
46  *      Andi Kleen              :       Fix new listen.
47  *      Andi Kleen              :       Fix accept error reporting.
48  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
49  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
50  *                                      a single port at the same time.
51  */
52
53 #define pr_fmt(fmt) "TCP: " fmt
54
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
65
66 #include <net/net_namespace.h>
67 #include <net/icmp.h>
68 #include <net/inet_hashtables.h>
69 #include <net/tcp.h>
70 #include <net/transp_v6.h>
71 #include <net/ipv6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
74 #include <net/xfrm.h>
75 #include <net/secure_seq.h>
76 #include <net/tcp_memcontrol.h>
77 #include <net/busy_poll.h>
78
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
84
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
87
88 int sysctl_tcp_tw_reuse __read_mostly;
89 int sysctl_tcp_low_latency __read_mostly;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
91
92 #ifdef CONFIG_TCP_MD5SIG
93 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
94                                __be32 daddr, __be32 saddr, const struct tcphdr *th);
95 #endif
96
97 struct inet_hashinfo tcp_hashinfo;
98 EXPORT_SYMBOL(tcp_hashinfo);
99
100 static  __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
101 {
102         return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
103                                           ip_hdr(skb)->saddr,
104                                           tcp_hdr(skb)->dest,
105                                           tcp_hdr(skb)->source);
106 }
107
108 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
109 {
110         const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
111         struct tcp_sock *tp = tcp_sk(sk);
112
113         /* With PAWS, it is safe from the viewpoint
114            of data integrity. Even without PAWS it is safe provided sequence
115            spaces do not overlap i.e. at data rates <= 80Mbit/sec.
116
117            Actually, the idea is close to VJ's one, only timestamp cache is
118            held not per host, but per port pair and TW bucket is used as state
119            holder.
120
121            If TW bucket has been already destroyed we fall back to VJ's scheme
122            and use initial timestamp retrieved from peer table.
123          */
124         if (tcptw->tw_ts_recent_stamp &&
125             (!twp || (sysctl_tcp_tw_reuse &&
126                              get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
127                 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
128                 if (tp->write_seq == 0)
129                         tp->write_seq = 1;
130                 tp->rx_opt.ts_recent       = tcptw->tw_ts_recent;
131                 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
132                 sock_hold(sktw);
133                 return 1;
134         }
135
136         return 0;
137 }
138 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
139
140 /* This will initiate an outgoing connection. */
141 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
142 {
143         struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
144         struct inet_sock *inet = inet_sk(sk);
145         struct tcp_sock *tp = tcp_sk(sk);
146         __be16 orig_sport, orig_dport;
147         __be32 daddr, nexthop;
148         struct flowi4 *fl4;
149         struct rtable *rt;
150         int err;
151         struct ip_options_rcu *inet_opt;
152
153         if (addr_len < sizeof(struct sockaddr_in))
154                 return -EINVAL;
155
156         if (usin->sin_family != AF_INET)
157                 return -EAFNOSUPPORT;
158
159         nexthop = daddr = usin->sin_addr.s_addr;
160         inet_opt = rcu_dereference_protected(inet->inet_opt,
161                                              sock_owned_by_user(sk));
162         if (inet_opt && inet_opt->opt.srr) {
163                 if (!daddr)
164                         return -EINVAL;
165                 nexthop = inet_opt->opt.faddr;
166         }
167
168         orig_sport = inet->inet_sport;
169         orig_dport = usin->sin_port;
170         fl4 = &inet->cork.fl.u.ip4;
171         rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
172                               RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
173                               IPPROTO_TCP,
174                               orig_sport, orig_dport, sk);
175         if (IS_ERR(rt)) {
176                 err = PTR_ERR(rt);
177                 if (err == -ENETUNREACH)
178                         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
179                 return err;
180         }
181
182         if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
183                 ip_rt_put(rt);
184                 return -ENETUNREACH;
185         }
186
187         if (!inet_opt || !inet_opt->opt.srr)
188                 daddr = fl4->daddr;
189
190         if (!inet->inet_saddr)
191                 inet->inet_saddr = fl4->saddr;
192         sk_rcv_saddr_set(sk, inet->inet_saddr);
193
194         if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
195                 /* Reset inherited state */
196                 tp->rx_opt.ts_recent       = 0;
197                 tp->rx_opt.ts_recent_stamp = 0;
198                 if (likely(!tp->repair))
199                         tp->write_seq      = 0;
200         }
201
202         if (tcp_death_row.sysctl_tw_recycle &&
203             !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
204                 tcp_fetch_timewait_stamp(sk, &rt->dst);
205
206         inet->inet_dport = usin->sin_port;
207         sk_daddr_set(sk, daddr);
208
209         inet_csk(sk)->icsk_ext_hdr_len = 0;
210         if (inet_opt)
211                 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
212
213         tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
214
215         /* Socket identity is still unknown (sport may be zero).
216          * However we set state to SYN-SENT and not releasing socket
217          * lock select source port, enter ourselves into the hash tables and
218          * complete initialization after this.
219          */
220         tcp_set_state(sk, TCP_SYN_SENT);
221         err = inet_hash_connect(&tcp_death_row, sk);
222         if (err)
223                 goto failure;
224
225         sk_set_txhash(sk);
226
227         rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
228                                inet->inet_sport, inet->inet_dport, sk);
229         if (IS_ERR(rt)) {
230                 err = PTR_ERR(rt);
231                 rt = NULL;
232                 goto failure;
233         }
234         /* OK, now commit destination to socket.  */
235         sk->sk_gso_type = SKB_GSO_TCPV4;
236         sk_setup_caps(sk, &rt->dst);
237
238         if (!tp->write_seq && likely(!tp->repair))
239                 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
240                                                            inet->inet_daddr,
241                                                            inet->inet_sport,
242                                                            usin->sin_port);
243
244         inet->inet_id = tp->write_seq ^ jiffies;
245
246         err = tcp_connect(sk);
247
248         rt = NULL;
249         if (err)
250                 goto failure;
251
252         return 0;
253
254 failure:
255         /*
256          * This unhashes the socket and releases the local port,
257          * if necessary.
258          */
259         tcp_set_state(sk, TCP_CLOSE);
260         ip_rt_put(rt);
261         sk->sk_route_caps = 0;
262         inet->inet_dport = 0;
263         return err;
264 }
265 EXPORT_SYMBOL(tcp_v4_connect);
266
267 /*
268  * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
269  * It can be called through tcp_release_cb() if socket was owned by user
270  * at the time tcp_v4_err() was called to handle ICMP message.
271  */
272 void tcp_v4_mtu_reduced(struct sock *sk)
273 {
274         struct dst_entry *dst;
275         struct inet_sock *inet = inet_sk(sk);
276         u32 mtu = tcp_sk(sk)->mtu_info;
277
278         dst = inet_csk_update_pmtu(sk, mtu);
279         if (!dst)
280                 return;
281
282         /* Something is about to be wrong... Remember soft error
283          * for the case, if this connection will not able to recover.
284          */
285         if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
286                 sk->sk_err_soft = EMSGSIZE;
287
288         mtu = dst_mtu(dst);
289
290         if (inet->pmtudisc != IP_PMTUDISC_DONT &&
291             ip_sk_accept_pmtu(sk) &&
292             inet_csk(sk)->icsk_pmtu_cookie > mtu) {
293                 tcp_sync_mss(sk, mtu);
294
295                 /* Resend the TCP packet because it's
296                  * clear that the old packet has been
297                  * dropped. This is the new "fast" path mtu
298                  * discovery.
299                  */
300                 tcp_simple_retransmit(sk);
301         } /* else let the usual retransmit timer handle it */
302 }
303 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
304
305 static void do_redirect(struct sk_buff *skb, struct sock *sk)
306 {
307         struct dst_entry *dst = __sk_dst_check(sk, 0);
308
309         if (dst)
310                 dst->ops->redirect(dst, sk, skb);
311 }
312
313
314 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
315 void tcp_req_err(struct sock *sk, u32 seq)
316 {
317         struct request_sock *req = inet_reqsk(sk);
318         struct net *net = sock_net(sk);
319
320         /* ICMPs are not backlogged, hence we cannot get
321          * an established socket here.
322          */
323         WARN_ON(req->sk);
324
325         if (seq != tcp_rsk(req)->snt_isn) {
326                 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
327         } else {
328                 /*
329                  * Still in SYN_RECV, just remove it silently.
330                  * There is no good way to pass the error to the newly
331                  * created socket, and POSIX does not want network
332                  * errors returned from accept().
333                  */
334                 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
335                 NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS);
336         }
337         reqsk_put(req);
338 }
339 EXPORT_SYMBOL(tcp_req_err);
340
341 /*
342  * This routine is called by the ICMP module when it gets some
343  * sort of error condition.  If err < 0 then the socket should
344  * be closed and the error returned to the user.  If err > 0
345  * it's just the icmp type << 8 | icmp code.  After adjustment
346  * header points to the first 8 bytes of the tcp header.  We need
347  * to find the appropriate port.
348  *
349  * The locking strategy used here is very "optimistic". When
350  * someone else accesses the socket the ICMP is just dropped
351  * and for some paths there is no check at all.
352  * A more general error queue to queue errors for later handling
353  * is probably better.
354  *
355  */
356
357 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
358 {
359         const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
360         struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
361         struct inet_connection_sock *icsk;
362         struct tcp_sock *tp;
363         struct inet_sock *inet;
364         const int type = icmp_hdr(icmp_skb)->type;
365         const int code = icmp_hdr(icmp_skb)->code;
366         struct sock *sk;
367         struct sk_buff *skb;
368         struct request_sock *fastopen;
369         __u32 seq, snd_una;
370         __u32 remaining;
371         int err;
372         struct net *net = dev_net(icmp_skb->dev);
373
374         sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
375                                        th->dest, iph->saddr, ntohs(th->source),
376                                        inet_iif(icmp_skb));
377         if (!sk) {
378                 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
379                 return;
380         }
381         if (sk->sk_state == TCP_TIME_WAIT) {
382                 inet_twsk_put(inet_twsk(sk));
383                 return;
384         }
385         seq = ntohl(th->seq);
386         if (sk->sk_state == TCP_NEW_SYN_RECV)
387                 return tcp_req_err(sk, seq);
388
389         bh_lock_sock(sk);
390         /* If too many ICMPs get dropped on busy
391          * servers this needs to be solved differently.
392          * We do take care of PMTU discovery (RFC1191) special case :
393          * we can receive locally generated ICMP messages while socket is held.
394          */
395         if (sock_owned_by_user(sk)) {
396                 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
397                         NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
398         }
399         if (sk->sk_state == TCP_CLOSE)
400                 goto out;
401
402         if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
403                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
404                 goto out;
405         }
406
407         icsk = inet_csk(sk);
408         tp = tcp_sk(sk);
409         /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
410         fastopen = tp->fastopen_rsk;
411         snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
412         if (sk->sk_state != TCP_LISTEN &&
413             !between(seq, snd_una, tp->snd_nxt)) {
414                 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
415                 goto out;
416         }
417
418         switch (type) {
419         case ICMP_REDIRECT:
420                 do_redirect(icmp_skb, sk);
421                 goto out;
422         case ICMP_SOURCE_QUENCH:
423                 /* Just silently ignore these. */
424                 goto out;
425         case ICMP_PARAMETERPROB:
426                 err = EPROTO;
427                 break;
428         case ICMP_DEST_UNREACH:
429                 if (code > NR_ICMP_UNREACH)
430                         goto out;
431
432                 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
433                         /* We are not interested in TCP_LISTEN and open_requests
434                          * (SYN-ACKs send out by Linux are always <576bytes so
435                          * they should go through unfragmented).
436                          */
437                         if (sk->sk_state == TCP_LISTEN)
438                                 goto out;
439
440                         tp->mtu_info = info;
441                         if (!sock_owned_by_user(sk)) {
442                                 tcp_v4_mtu_reduced(sk);
443                         } else {
444                                 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
445                                         sock_hold(sk);
446                         }
447                         goto out;
448                 }
449
450                 err = icmp_err_convert[code].errno;
451                 /* check if icmp_skb allows revert of backoff
452                  * (see draft-zimmermann-tcp-lcd) */
453                 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
454                         break;
455                 if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
456                     !icsk->icsk_backoff || fastopen)
457                         break;
458
459                 if (sock_owned_by_user(sk))
460                         break;
461
462                 icsk->icsk_backoff--;
463                 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
464                                                TCP_TIMEOUT_INIT;
465                 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
466
467                 skb = tcp_write_queue_head(sk);
468                 BUG_ON(!skb);
469
470                 remaining = icsk->icsk_rto -
471                             min(icsk->icsk_rto,
472                                 tcp_time_stamp - tcp_skb_timestamp(skb));
473
474                 if (remaining) {
475                         inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
476                                                   remaining, TCP_RTO_MAX);
477                 } else {
478                         /* RTO revert clocked out retransmission.
479                          * Will retransmit now */
480                         tcp_retransmit_timer(sk);
481                 }
482
483                 break;
484         case ICMP_TIME_EXCEEDED:
485                 err = EHOSTUNREACH;
486                 break;
487         default:
488                 goto out;
489         }
490
491         switch (sk->sk_state) {
492         case TCP_SYN_SENT:
493         case TCP_SYN_RECV:
494                 /* Only in fast or simultaneous open. If a fast open socket is
495                  * is already accepted it is treated as a connected one below.
496                  */
497                 if (fastopen && !fastopen->sk)
498                         break;
499
500                 if (!sock_owned_by_user(sk)) {
501                         sk->sk_err = err;
502
503                         sk->sk_error_report(sk);
504
505                         tcp_done(sk);
506                 } else {
507                         sk->sk_err_soft = err;
508                 }
509                 goto out;
510         }
511
512         /* If we've already connected we will keep trying
513          * until we time out, or the user gives up.
514          *
515          * rfc1122 4.2.3.9 allows to consider as hard errors
516          * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
517          * but it is obsoleted by pmtu discovery).
518          *
519          * Note, that in modern internet, where routing is unreliable
520          * and in each dark corner broken firewalls sit, sending random
521          * errors ordered by their masters even this two messages finally lose
522          * their original sense (even Linux sends invalid PORT_UNREACHs)
523          *
524          * Now we are in compliance with RFCs.
525          *                                                      --ANK (980905)
526          */
527
528         inet = inet_sk(sk);
529         if (!sock_owned_by_user(sk) && inet->recverr) {
530                 sk->sk_err = err;
531                 sk->sk_error_report(sk);
532         } else  { /* Only an error on timeout */
533                 sk->sk_err_soft = err;
534         }
535
536 out:
537         bh_unlock_sock(sk);
538         sock_put(sk);
539 }
540
541 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
542 {
543         struct tcphdr *th = tcp_hdr(skb);
544
545         if (skb->ip_summed == CHECKSUM_PARTIAL) {
546                 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
547                 skb->csum_start = skb_transport_header(skb) - skb->head;
548                 skb->csum_offset = offsetof(struct tcphdr, check);
549         } else {
550                 th->check = tcp_v4_check(skb->len, saddr, daddr,
551                                          csum_partial(th,
552                                                       th->doff << 2,
553                                                       skb->csum));
554         }
555 }
556
557 /* This routine computes an IPv4 TCP checksum. */
558 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
559 {
560         const struct inet_sock *inet = inet_sk(sk);
561
562         __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
563 }
564 EXPORT_SYMBOL(tcp_v4_send_check);
565
566 /*
567  *      This routine will send an RST to the other tcp.
568  *
569  *      Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
570  *                    for reset.
571  *      Answer: if a packet caused RST, it is not for a socket
572  *              existing in our system, if it is matched to a socket,
573  *              it is just duplicate segment or bug in other side's TCP.
574  *              So that we build reply only basing on parameters
575  *              arrived with segment.
576  *      Exception: precedence violation. We do not implement it in any case.
577  */
578
579 static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
580 {
581         const struct tcphdr *th = tcp_hdr(skb);
582         struct {
583                 struct tcphdr th;
584 #ifdef CONFIG_TCP_MD5SIG
585                 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
586 #endif
587         } rep;
588         struct ip_reply_arg arg;
589 #ifdef CONFIG_TCP_MD5SIG
590         struct tcp_md5sig_key *key;
591         const __u8 *hash_location = NULL;
592         unsigned char newhash[16];
593         int genhash;
594         struct sock *sk1 = NULL;
595 #endif
596         struct net *net;
597
598         /* Never send a reset in response to a reset. */
599         if (th->rst)
600                 return;
601
602         /* If sk not NULL, it means we did a successful lookup and incoming
603          * route had to be correct. prequeue might have dropped our dst.
604          */
605         if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
606                 return;
607
608         /* Swap the send and the receive. */
609         memset(&rep, 0, sizeof(rep));
610         rep.th.dest   = th->source;
611         rep.th.source = th->dest;
612         rep.th.doff   = sizeof(struct tcphdr) / 4;
613         rep.th.rst    = 1;
614
615         if (th->ack) {
616                 rep.th.seq = th->ack_seq;
617         } else {
618                 rep.th.ack = 1;
619                 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
620                                        skb->len - (th->doff << 2));
621         }
622
623         memset(&arg, 0, sizeof(arg));
624         arg.iov[0].iov_base = (unsigned char *)&rep;
625         arg.iov[0].iov_len  = sizeof(rep.th);
626
627         net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
628 #ifdef CONFIG_TCP_MD5SIG
629         hash_location = tcp_parse_md5sig_option(th);
630         if (!sk && hash_location) {
631                 /*
632                  * active side is lost. Try to find listening socket through
633                  * source port, and then find md5 key through listening socket.
634                  * we are not loose security here:
635                  * Incoming packet is checked with md5 hash with finding key,
636                  * no RST generated if md5 hash doesn't match.
637                  */
638                 sk1 = __inet_lookup_listener(net,
639                                              &tcp_hashinfo, ip_hdr(skb)->saddr,
640                                              th->source, ip_hdr(skb)->daddr,
641                                              ntohs(th->source), inet_iif(skb));
642                 /* don't send rst if it can't find key */
643                 if (!sk1)
644                         return;
645                 rcu_read_lock();
646                 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
647                                         &ip_hdr(skb)->saddr, AF_INET);
648                 if (!key)
649                         goto release_sk1;
650
651                 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
652                 if (genhash || memcmp(hash_location, newhash, 16) != 0)
653                         goto release_sk1;
654         } else {
655                 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
656                                              &ip_hdr(skb)->saddr,
657                                              AF_INET) : NULL;
658         }
659
660         if (key) {
661                 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
662                                    (TCPOPT_NOP << 16) |
663                                    (TCPOPT_MD5SIG << 8) |
664                                    TCPOLEN_MD5SIG);
665                 /* Update length and the length the header thinks exists */
666                 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
667                 rep.th.doff = arg.iov[0].iov_len / 4;
668
669                 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
670                                      key, ip_hdr(skb)->saddr,
671                                      ip_hdr(skb)->daddr, &rep.th);
672         }
673 #endif
674         arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
675                                       ip_hdr(skb)->saddr, /* XXX */
676                                       arg.iov[0].iov_len, IPPROTO_TCP, 0);
677         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
678         arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
679         /* When socket is gone, all binding information is lost.
680          * routing might fail in this case. No choice here, if we choose to force
681          * input interface, we will misroute in case of asymmetric route.
682          */
683         if (sk)
684                 arg.bound_dev_if = sk->sk_bound_dev_if;
685
686         arg.tos = ip_hdr(skb)->tos;
687         ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
688                               skb, &TCP_SKB_CB(skb)->header.h4.opt,
689                               ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
690                               &arg, arg.iov[0].iov_len);
691
692         TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
693         TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
694
695 #ifdef CONFIG_TCP_MD5SIG
696 release_sk1:
697         if (sk1) {
698                 rcu_read_unlock();
699                 sock_put(sk1);
700         }
701 #endif
702 }
703
704 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
705    outside socket context is ugly, certainly. What can I do?
706  */
707
708 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
709                             u32 win, u32 tsval, u32 tsecr, int oif,
710                             struct tcp_md5sig_key *key,
711                             int reply_flags, u8 tos)
712 {
713         const struct tcphdr *th = tcp_hdr(skb);
714         struct {
715                 struct tcphdr th;
716                 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
717 #ifdef CONFIG_TCP_MD5SIG
718                            + (TCPOLEN_MD5SIG_ALIGNED >> 2)
719 #endif
720                         ];
721         } rep;
722         struct ip_reply_arg arg;
723         struct net *net = dev_net(skb_dst(skb)->dev);
724
725         memset(&rep.th, 0, sizeof(struct tcphdr));
726         memset(&arg, 0, sizeof(arg));
727
728         arg.iov[0].iov_base = (unsigned char *)&rep;
729         arg.iov[0].iov_len  = sizeof(rep.th);
730         if (tsecr) {
731                 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
732                                    (TCPOPT_TIMESTAMP << 8) |
733                                    TCPOLEN_TIMESTAMP);
734                 rep.opt[1] = htonl(tsval);
735                 rep.opt[2] = htonl(tsecr);
736                 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
737         }
738
739         /* Swap the send and the receive. */
740         rep.th.dest    = th->source;
741         rep.th.source  = th->dest;
742         rep.th.doff    = arg.iov[0].iov_len / 4;
743         rep.th.seq     = htonl(seq);
744         rep.th.ack_seq = htonl(ack);
745         rep.th.ack     = 1;
746         rep.th.window  = htons(win);
747
748 #ifdef CONFIG_TCP_MD5SIG
749         if (key) {
750                 int offset = (tsecr) ? 3 : 0;
751
752                 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
753                                           (TCPOPT_NOP << 16) |
754                                           (TCPOPT_MD5SIG << 8) |
755                                           TCPOLEN_MD5SIG);
756                 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
757                 rep.th.doff = arg.iov[0].iov_len/4;
758
759                 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
760                                     key, ip_hdr(skb)->saddr,
761                                     ip_hdr(skb)->daddr, &rep.th);
762         }
763 #endif
764         arg.flags = reply_flags;
765         arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
766                                       ip_hdr(skb)->saddr, /* XXX */
767                                       arg.iov[0].iov_len, IPPROTO_TCP, 0);
768         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
769         if (oif)
770                 arg.bound_dev_if = oif;
771         arg.tos = tos;
772         ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
773                               skb, &TCP_SKB_CB(skb)->header.h4.opt,
774                               ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
775                               &arg, arg.iov[0].iov_len);
776
777         TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
778 }
779
780 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
781 {
782         struct inet_timewait_sock *tw = inet_twsk(sk);
783         struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
784
785         tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
786                         tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
787                         tcp_time_stamp + tcptw->tw_ts_offset,
788                         tcptw->tw_ts_recent,
789                         tw->tw_bound_dev_if,
790                         tcp_twsk_md5_key(tcptw),
791                         tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
792                         tw->tw_tos
793                         );
794
795         inet_twsk_put(tw);
796 }
797
798 static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
799                                   struct request_sock *req)
800 {
801         /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
802          * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
803          */
804         tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
805                         tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
806                         tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
807                         tcp_time_stamp,
808                         req->ts_recent,
809                         0,
810                         tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
811                                           AF_INET),
812                         inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
813                         ip_hdr(skb)->tos);
814 }
815
816 /*
817  *      Send a SYN-ACK after having received a SYN.
818  *      This still operates on a request_sock only, not on a big
819  *      socket.
820  */
821 static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
822                               struct flowi *fl,
823                               struct request_sock *req,
824                               struct tcp_fastopen_cookie *foc,
825                                   bool attach_req)
826 {
827         const struct inet_request_sock *ireq = inet_rsk(req);
828         struct flowi4 fl4;
829         int err = -1;
830         struct sk_buff *skb;
831
832         /* First, grab a route. */
833         if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
834                 return -1;
835
836         skb = tcp_make_synack(sk, dst, req, foc, attach_req);
837
838         if (skb) {
839                 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
840
841                 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
842                                             ireq->ir_rmt_addr,
843                                             ireq->opt);
844                 err = net_xmit_eval(err);
845         }
846
847         return err;
848 }
849
850 /*
851  *      IPv4 request_sock destructor.
852  */
853 static void tcp_v4_reqsk_destructor(struct request_sock *req)
854 {
855         kfree(inet_rsk(req)->opt);
856 }
857
858
859 #ifdef CONFIG_TCP_MD5SIG
860 /*
861  * RFC2385 MD5 checksumming requires a mapping of
862  * IP address->MD5 Key.
863  * We need to maintain these in the sk structure.
864  */
865
866 /* Find the Key structure for an address.  */
867 struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
868                                          const union tcp_md5_addr *addr,
869                                          int family)
870 {
871         const struct tcp_sock *tp = tcp_sk(sk);
872         struct tcp_md5sig_key *key;
873         unsigned int size = sizeof(struct in_addr);
874         const struct tcp_md5sig_info *md5sig;
875
876         /* caller either holds rcu_read_lock() or socket lock */
877         md5sig = rcu_dereference_check(tp->md5sig_info,
878                                        sock_owned_by_user(sk) ||
879                                        lockdep_is_held((spinlock_t *)&sk->sk_lock.slock));
880         if (!md5sig)
881                 return NULL;
882 #if IS_ENABLED(CONFIG_IPV6)
883         if (family == AF_INET6)
884                 size = sizeof(struct in6_addr);
885 #endif
886         hlist_for_each_entry_rcu(key, &md5sig->head, node) {
887                 if (key->family != family)
888                         continue;
889                 if (!memcmp(&key->addr, addr, size))
890                         return key;
891         }
892         return NULL;
893 }
894 EXPORT_SYMBOL(tcp_md5_do_lookup);
895
896 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
897                                          const struct sock *addr_sk)
898 {
899         const union tcp_md5_addr *addr;
900
901         addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
902         return tcp_md5_do_lookup(sk, addr, AF_INET);
903 }
904 EXPORT_SYMBOL(tcp_v4_md5_lookup);
905
906 /* This can be called on a newly created socket, from other files */
907 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
908                    int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
909 {
910         /* Add Key to the list */
911         struct tcp_md5sig_key *key;
912         struct tcp_sock *tp = tcp_sk(sk);
913         struct tcp_md5sig_info *md5sig;
914
915         key = tcp_md5_do_lookup(sk, addr, family);
916         if (key) {
917                 /* Pre-existing entry - just update that one. */
918                 memcpy(key->key, newkey, newkeylen);
919                 key->keylen = newkeylen;
920                 return 0;
921         }
922
923         md5sig = rcu_dereference_protected(tp->md5sig_info,
924                                            sock_owned_by_user(sk));
925         if (!md5sig) {
926                 md5sig = kmalloc(sizeof(*md5sig), gfp);
927                 if (!md5sig)
928                         return -ENOMEM;
929
930                 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
931                 INIT_HLIST_HEAD(&md5sig->head);
932                 rcu_assign_pointer(tp->md5sig_info, md5sig);
933         }
934
935         key = sock_kmalloc(sk, sizeof(*key), gfp);
936         if (!key)
937                 return -ENOMEM;
938         if (!tcp_alloc_md5sig_pool()) {
939                 sock_kfree_s(sk, key, sizeof(*key));
940                 return -ENOMEM;
941         }
942
943         memcpy(key->key, newkey, newkeylen);
944         key->keylen = newkeylen;
945         key->family = family;
946         memcpy(&key->addr, addr,
947                (family == AF_INET6) ? sizeof(struct in6_addr) :
948                                       sizeof(struct in_addr));
949         hlist_add_head_rcu(&key->node, &md5sig->head);
950         return 0;
951 }
952 EXPORT_SYMBOL(tcp_md5_do_add);
953
954 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
955 {
956         struct tcp_md5sig_key *key;
957
958         key = tcp_md5_do_lookup(sk, addr, family);
959         if (!key)
960                 return -ENOENT;
961         hlist_del_rcu(&key->node);
962         atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
963         kfree_rcu(key, rcu);
964         return 0;
965 }
966 EXPORT_SYMBOL(tcp_md5_do_del);
967
968 static void tcp_clear_md5_list(struct sock *sk)
969 {
970         struct tcp_sock *tp = tcp_sk(sk);
971         struct tcp_md5sig_key *key;
972         struct hlist_node *n;
973         struct tcp_md5sig_info *md5sig;
974
975         md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
976
977         hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
978                 hlist_del_rcu(&key->node);
979                 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
980                 kfree_rcu(key, rcu);
981         }
982 }
983
984 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
985                                  int optlen)
986 {
987         struct tcp_md5sig cmd;
988         struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
989
990         if (optlen < sizeof(cmd))
991                 return -EINVAL;
992
993         if (copy_from_user(&cmd, optval, sizeof(cmd)))
994                 return -EFAULT;
995
996         if (sin->sin_family != AF_INET)
997                 return -EINVAL;
998
999         if (!cmd.tcpm_keylen)
1000                 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1001                                       AF_INET);
1002
1003         if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1004                 return -EINVAL;
1005
1006         return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1007                               AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1008                               GFP_KERNEL);
1009 }
1010
1011 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1012                                         __be32 daddr, __be32 saddr, int nbytes)
1013 {
1014         struct tcp4_pseudohdr *bp;
1015         struct scatterlist sg;
1016
1017         bp = &hp->md5_blk.ip4;
1018
1019         /*
1020          * 1. the TCP pseudo-header (in the order: source IP address,
1021          * destination IP address, zero-padded protocol number, and
1022          * segment length)
1023          */
1024         bp->saddr = saddr;
1025         bp->daddr = daddr;
1026         bp->pad = 0;
1027         bp->protocol = IPPROTO_TCP;
1028         bp->len = cpu_to_be16(nbytes);
1029
1030         sg_init_one(&sg, bp, sizeof(*bp));
1031         return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1032 }
1033
1034 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1035                                __be32 daddr, __be32 saddr, const struct tcphdr *th)
1036 {
1037         struct tcp_md5sig_pool *hp;
1038         struct hash_desc *desc;
1039
1040         hp = tcp_get_md5sig_pool();
1041         if (!hp)
1042                 goto clear_hash_noput;
1043         desc = &hp->md5_desc;
1044
1045         if (crypto_hash_init(desc))
1046                 goto clear_hash;
1047         if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1048                 goto clear_hash;
1049         if (tcp_md5_hash_header(hp, th))
1050                 goto clear_hash;
1051         if (tcp_md5_hash_key(hp, key))
1052                 goto clear_hash;
1053         if (crypto_hash_final(desc, md5_hash))
1054                 goto clear_hash;
1055
1056         tcp_put_md5sig_pool();
1057         return 0;
1058
1059 clear_hash:
1060         tcp_put_md5sig_pool();
1061 clear_hash_noput:
1062         memset(md5_hash, 0, 16);
1063         return 1;
1064 }
1065
1066 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1067                         const struct sock *sk,
1068                         const struct sk_buff *skb)
1069 {
1070         struct tcp_md5sig_pool *hp;
1071         struct hash_desc *desc;
1072         const struct tcphdr *th = tcp_hdr(skb);
1073         __be32 saddr, daddr;
1074
1075         if (sk) { /* valid for establish/request sockets */
1076                 saddr = sk->sk_rcv_saddr;
1077                 daddr = sk->sk_daddr;
1078         } else {
1079                 const struct iphdr *iph = ip_hdr(skb);
1080                 saddr = iph->saddr;
1081                 daddr = iph->daddr;
1082         }
1083
1084         hp = tcp_get_md5sig_pool();
1085         if (!hp)
1086                 goto clear_hash_noput;
1087         desc = &hp->md5_desc;
1088
1089         if (crypto_hash_init(desc))
1090                 goto clear_hash;
1091
1092         if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1093                 goto clear_hash;
1094         if (tcp_md5_hash_header(hp, th))
1095                 goto clear_hash;
1096         if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1097                 goto clear_hash;
1098         if (tcp_md5_hash_key(hp, key))
1099                 goto clear_hash;
1100         if (crypto_hash_final(desc, md5_hash))
1101                 goto clear_hash;
1102
1103         tcp_put_md5sig_pool();
1104         return 0;
1105
1106 clear_hash:
1107         tcp_put_md5sig_pool();
1108 clear_hash_noput:
1109         memset(md5_hash, 0, 16);
1110         return 1;
1111 }
1112 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1113
1114 #endif
1115
1116 /* Called with rcu_read_lock() */
1117 static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1118                                     const struct sk_buff *skb)
1119 {
1120 #ifdef CONFIG_TCP_MD5SIG
1121         /*
1122          * This gets called for each TCP segment that arrives
1123          * so we want to be efficient.
1124          * We have 3 drop cases:
1125          * o No MD5 hash and one expected.
1126          * o MD5 hash and we're not expecting one.
1127          * o MD5 hash and its wrong.
1128          */
1129         const __u8 *hash_location = NULL;
1130         struct tcp_md5sig_key *hash_expected;
1131         const struct iphdr *iph = ip_hdr(skb);
1132         const struct tcphdr *th = tcp_hdr(skb);
1133         int genhash;
1134         unsigned char newhash[16];
1135
1136         hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1137                                           AF_INET);
1138         hash_location = tcp_parse_md5sig_option(th);
1139
1140         /* We've parsed the options - do we have a hash? */
1141         if (!hash_expected && !hash_location)
1142                 return false;
1143
1144         if (hash_expected && !hash_location) {
1145                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1146                 return true;
1147         }
1148
1149         if (!hash_expected && hash_location) {
1150                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1151                 return true;
1152         }
1153
1154         /* Okay, so this is hash_expected and hash_location -
1155          * so we need to calculate the checksum.
1156          */
1157         genhash = tcp_v4_md5_hash_skb(newhash,
1158                                       hash_expected,
1159                                       NULL, skb);
1160
1161         if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1162                 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1163                                      &iph->saddr, ntohs(th->source),
1164                                      &iph->daddr, ntohs(th->dest),
1165                                      genhash ? " tcp_v4_calc_md5_hash failed"
1166                                      : "");
1167                 return true;
1168         }
1169         return false;
1170 #endif
1171         return false;
1172 }
1173
1174 static void tcp_v4_init_req(struct request_sock *req,
1175                             const struct sock *sk_listener,
1176                             struct sk_buff *skb)
1177 {
1178         struct inet_request_sock *ireq = inet_rsk(req);
1179
1180         sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1181         sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1182         ireq->no_srccheck = inet_sk(sk_listener)->transparent;
1183         ireq->opt = tcp_v4_save_options(skb);
1184 }
1185
1186 static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1187                                           struct flowi *fl,
1188                                           const struct request_sock *req,
1189                                           bool *strict)
1190 {
1191         struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1192
1193         if (strict) {
1194                 if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1195                         *strict = true;
1196                 else
1197                         *strict = false;
1198         }
1199
1200         return dst;
1201 }
1202
1203 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1204         .family         =       PF_INET,
1205         .obj_size       =       sizeof(struct tcp_request_sock),
1206         .rtx_syn_ack    =       tcp_rtx_synack,
1207         .send_ack       =       tcp_v4_reqsk_send_ack,
1208         .destructor     =       tcp_v4_reqsk_destructor,
1209         .send_reset     =       tcp_v4_send_reset,
1210         .syn_ack_timeout =      tcp_syn_ack_timeout,
1211 };
1212
1213 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1214         .mss_clamp      =       TCP_MSS_DEFAULT,
1215 #ifdef CONFIG_TCP_MD5SIG
1216         .req_md5_lookup =       tcp_v4_md5_lookup,
1217         .calc_md5_hash  =       tcp_v4_md5_hash_skb,
1218 #endif
1219         .init_req       =       tcp_v4_init_req,
1220 #ifdef CONFIG_SYN_COOKIES
1221         .cookie_init_seq =      cookie_v4_init_sequence,
1222 #endif
1223         .route_req      =       tcp_v4_route_req,
1224         .init_seq       =       tcp_v4_init_sequence,
1225         .send_synack    =       tcp_v4_send_synack,
1226 };
1227
1228 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1229 {
1230         /* Never answer to SYNs send to broadcast or multicast */
1231         if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1232                 goto drop;
1233
1234         return tcp_conn_request(&tcp_request_sock_ops,
1235                                 &tcp_request_sock_ipv4_ops, sk, skb);
1236
1237 drop:
1238         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1239         return 0;
1240 }
1241 EXPORT_SYMBOL(tcp_v4_conn_request);
1242
1243
1244 /*
1245  * The three way handshake has completed - we got a valid synack -
1246  * now create the new socket.
1247  */
1248 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1249                                   struct request_sock *req,
1250                                   struct dst_entry *dst)
1251 {
1252         struct inet_request_sock *ireq;
1253         struct inet_sock *newinet;
1254         struct tcp_sock *newtp;
1255         struct sock *newsk;
1256 #ifdef CONFIG_TCP_MD5SIG
1257         struct tcp_md5sig_key *key;
1258 #endif
1259         struct ip_options_rcu *inet_opt;
1260
1261         if (sk_acceptq_is_full(sk))
1262                 goto exit_overflow;
1263
1264         newsk = tcp_create_openreq_child(sk, req, skb);
1265         if (!newsk)
1266                 goto exit_nonewsk;
1267
1268         newsk->sk_gso_type = SKB_GSO_TCPV4;
1269         inet_sk_rx_dst_set(newsk, skb);
1270
1271         newtp                 = tcp_sk(newsk);
1272         newinet               = inet_sk(newsk);
1273         ireq                  = inet_rsk(req);
1274         sk_daddr_set(newsk, ireq->ir_rmt_addr);
1275         sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1276         newinet->inet_saddr           = ireq->ir_loc_addr;
1277         inet_opt              = ireq->opt;
1278         rcu_assign_pointer(newinet->inet_opt, inet_opt);
1279         ireq->opt             = NULL;
1280         newinet->mc_index     = inet_iif(skb);
1281         newinet->mc_ttl       = ip_hdr(skb)->ttl;
1282         newinet->rcv_tos      = ip_hdr(skb)->tos;
1283         inet_csk(newsk)->icsk_ext_hdr_len = 0;
1284         if (inet_opt)
1285                 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1286         newinet->inet_id = newtp->write_seq ^ jiffies;
1287
1288         if (!dst) {
1289                 dst = inet_csk_route_child_sock(sk, newsk, req);
1290                 if (!dst)
1291                         goto put_and_exit;
1292         } else {
1293                 /* syncookie case : see end of cookie_v4_check() */
1294         }
1295         sk_setup_caps(newsk, dst);
1296
1297         tcp_ca_openreq_child(newsk, dst);
1298
1299         tcp_sync_mss(newsk, dst_mtu(dst));
1300         newtp->advmss = dst_metric_advmss(dst);
1301         if (tcp_sk(sk)->rx_opt.user_mss &&
1302             tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1303                 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1304
1305         tcp_initialize_rcv_mss(newsk);
1306
1307 #ifdef CONFIG_TCP_MD5SIG
1308         /* Copy over the MD5 key from the original socket */
1309         key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1310                                 AF_INET);
1311         if (key) {
1312                 /*
1313                  * We're using one, so create a matching key
1314                  * on the newsk structure. If we fail to get
1315                  * memory, then we end up not copying the key
1316                  * across. Shucks.
1317                  */
1318                 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1319                                AF_INET, key->key, key->keylen, GFP_ATOMIC);
1320                 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1321         }
1322 #endif
1323
1324         if (__inet_inherit_port(sk, newsk) < 0)
1325                 goto put_and_exit;
1326         __inet_hash_nolisten(newsk, NULL);
1327
1328         return newsk;
1329
1330 exit_overflow:
1331         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1332 exit_nonewsk:
1333         dst_release(dst);
1334 exit:
1335         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1336         return NULL;
1337 put_and_exit:
1338         inet_csk_prepare_forced_close(newsk);
1339         tcp_done(newsk);
1340         goto exit;
1341 }
1342 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1343
1344 static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1345 {
1346 #ifdef CONFIG_SYN_COOKIES
1347         const struct tcphdr *th = tcp_hdr(skb);
1348
1349         if (!th->syn)
1350                 sk = cookie_v4_check(sk, skb);
1351 #endif
1352         return sk;
1353 }
1354
1355 /* The socket must have it's spinlock held when we get
1356  * here, unless it is a TCP_LISTEN socket.
1357  *
1358  * We have a potential double-lock case here, so even when
1359  * doing backlog processing we use the BH locking scheme.
1360  * This is because we cannot sleep with the original spinlock
1361  * held.
1362  */
1363 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1364 {
1365         struct sock *rsk;
1366
1367         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1368                 struct dst_entry *dst = sk->sk_rx_dst;
1369
1370                 sock_rps_save_rxhash(sk, skb);
1371                 sk_mark_napi_id(sk, skb);
1372                 if (dst) {
1373                         if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1374                             !dst->ops->check(dst, 0)) {
1375                                 dst_release(dst);
1376                                 sk->sk_rx_dst = NULL;
1377                         }
1378                 }
1379                 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1380                 return 0;
1381         }
1382
1383         if (tcp_checksum_complete(skb))
1384                 goto csum_err;
1385
1386         if (sk->sk_state == TCP_LISTEN) {
1387                 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1388
1389                 if (!nsk)
1390                         goto discard;
1391                 if (nsk != sk) {
1392                         sock_rps_save_rxhash(nsk, skb);
1393                         sk_mark_napi_id(nsk, skb);
1394                         if (tcp_child_process(sk, nsk, skb)) {
1395                                 rsk = nsk;
1396                                 goto reset;
1397                         }
1398                         return 0;
1399                 }
1400         } else
1401                 sock_rps_save_rxhash(sk, skb);
1402
1403         if (tcp_rcv_state_process(sk, skb)) {
1404                 rsk = sk;
1405                 goto reset;
1406         }
1407         return 0;
1408
1409 reset:
1410         tcp_v4_send_reset(rsk, skb);
1411 discard:
1412         kfree_skb(skb);
1413         /* Be careful here. If this function gets more complicated and
1414          * gcc suffers from register pressure on the x86, sk (in %ebx)
1415          * might be destroyed here. This current version compiles correctly,
1416          * but you have been warned.
1417          */
1418         return 0;
1419
1420 csum_err:
1421         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1422         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1423         goto discard;
1424 }
1425 EXPORT_SYMBOL(tcp_v4_do_rcv);
1426
1427 void tcp_v4_early_demux(struct sk_buff *skb)
1428 {
1429         const struct iphdr *iph;
1430         const struct tcphdr *th;
1431         struct sock *sk;
1432
1433         if (skb->pkt_type != PACKET_HOST)
1434                 return;
1435
1436         if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1437                 return;
1438
1439         iph = ip_hdr(skb);
1440         th = tcp_hdr(skb);
1441
1442         if (th->doff < sizeof(struct tcphdr) / 4)
1443                 return;
1444
1445         sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1446                                        iph->saddr, th->source,
1447                                        iph->daddr, ntohs(th->dest),
1448                                        skb->skb_iif);
1449         if (sk) {
1450                 skb->sk = sk;
1451                 skb->destructor = sock_edemux;
1452                 if (sk_fullsock(sk)) {
1453                         struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1454
1455                         if (dst)
1456                                 dst = dst_check(dst, 0);
1457                         if (dst &&
1458                             inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1459                                 skb_dst_set_noref(skb, dst);
1460                 }
1461         }
1462 }
1463
1464 /* Packet is added to VJ-style prequeue for processing in process
1465  * context, if a reader task is waiting. Apparently, this exciting
1466  * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1467  * failed somewhere. Latency? Burstiness? Well, at least now we will
1468  * see, why it failed. 8)8)                               --ANK
1469  *
1470  */
1471 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1472 {
1473         struct tcp_sock *tp = tcp_sk(sk);
1474
1475         if (sysctl_tcp_low_latency || !tp->ucopy.task)
1476                 return false;
1477
1478         if (skb->len <= tcp_hdrlen(skb) &&
1479             skb_queue_len(&tp->ucopy.prequeue) == 0)
1480                 return false;
1481
1482         /* Before escaping RCU protected region, we need to take care of skb
1483          * dst. Prequeue is only enabled for established sockets.
1484          * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1485          * Instead of doing full sk_rx_dst validity here, let's perform
1486          * an optimistic check.
1487          */
1488         if (likely(sk->sk_rx_dst))
1489                 skb_dst_drop(skb);
1490         else
1491                 skb_dst_force(skb);
1492
1493         __skb_queue_tail(&tp->ucopy.prequeue, skb);
1494         tp->ucopy.memory += skb->truesize;
1495         if (tp->ucopy.memory > sk->sk_rcvbuf) {
1496                 struct sk_buff *skb1;
1497
1498                 BUG_ON(sock_owned_by_user(sk));
1499
1500                 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1501                         sk_backlog_rcv(sk, skb1);
1502                         NET_INC_STATS_BH(sock_net(sk),
1503                                          LINUX_MIB_TCPPREQUEUEDROPPED);
1504                 }
1505
1506                 tp->ucopy.memory = 0;
1507         } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1508                 wake_up_interruptible_sync_poll(sk_sleep(sk),
1509                                            POLLIN | POLLRDNORM | POLLRDBAND);
1510                 if (!inet_csk_ack_scheduled(sk))
1511                         inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1512                                                   (3 * tcp_rto_min(sk)) / 4,
1513                                                   TCP_RTO_MAX);
1514         }
1515         return true;
1516 }
1517 EXPORT_SYMBOL(tcp_prequeue);
1518
1519 /*
1520  *      From tcp_input.c
1521  */
1522
1523 int tcp_v4_rcv(struct sk_buff *skb)
1524 {
1525         const struct iphdr *iph;
1526         const struct tcphdr *th;
1527         struct sock *sk;
1528         int ret;
1529         struct net *net = dev_net(skb->dev);
1530
1531         if (skb->pkt_type != PACKET_HOST)
1532                 goto discard_it;
1533
1534         /* Count it even if it's bad */
1535         TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1536
1537         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1538                 goto discard_it;
1539
1540         th = tcp_hdr(skb);
1541
1542         if (th->doff < sizeof(struct tcphdr) / 4)
1543                 goto bad_packet;
1544         if (!pskb_may_pull(skb, th->doff * 4))
1545                 goto discard_it;
1546
1547         /* An explanation is required here, I think.
1548          * Packet length and doff are validated by header prediction,
1549          * provided case of th->doff==0 is eliminated.
1550          * So, we defer the checks. */
1551
1552         if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1553                 goto csum_error;
1554
1555         th = tcp_hdr(skb);
1556         iph = ip_hdr(skb);
1557         /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1558          * barrier() makes sure compiler wont play fool^Waliasing games.
1559          */
1560         memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1561                 sizeof(struct inet_skb_parm));
1562         barrier();
1563
1564         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1565         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1566                                     skb->len - th->doff * 4);
1567         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1568         TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1569         TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1570         TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1571         TCP_SKB_CB(skb)->sacked  = 0;
1572
1573 lookup:
1574         sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1575         if (!sk)
1576                 goto no_tcp_socket;
1577
1578 process:
1579         if (sk->sk_state == TCP_TIME_WAIT)
1580                 goto do_time_wait;
1581
1582         if (sk->sk_state == TCP_NEW_SYN_RECV) {
1583                 struct request_sock *req = inet_reqsk(sk);
1584                 struct sock *nsk = NULL;
1585
1586                 sk = req->rsk_listener;
1587                 if (tcp_v4_inbound_md5_hash(sk, skb))
1588                         goto discard_and_relse;
1589                 if (likely(sk->sk_state == TCP_LISTEN)) {
1590                         nsk = tcp_check_req(sk, skb, req, false);
1591                 } else {
1592                         inet_csk_reqsk_queue_drop_and_put(sk, req);
1593                         goto lookup;
1594                 }
1595                 if (!nsk) {
1596                         reqsk_put(req);
1597                         goto discard_it;
1598                 }
1599                 if (nsk == sk) {
1600                         sock_hold(sk);
1601                         reqsk_put(req);
1602                 } else if (tcp_child_process(sk, nsk, skb)) {
1603                         tcp_v4_send_reset(nsk, skb);
1604                         goto discard_it;
1605                 } else {
1606                         return 0;
1607                 }
1608         }
1609         if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1610                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1611                 goto discard_and_relse;
1612         }
1613
1614         if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1615                 goto discard_and_relse;
1616
1617         if (tcp_v4_inbound_md5_hash(sk, skb))
1618                 goto discard_and_relse;
1619
1620         nf_reset(skb);
1621
1622         if (sk_filter(sk, skb))
1623                 goto discard_and_relse;
1624
1625         skb->dev = NULL;
1626
1627         if (sk->sk_state == TCP_LISTEN) {
1628                 ret = tcp_v4_do_rcv(sk, skb);
1629                 goto put_and_return;
1630         }
1631
1632         sk_incoming_cpu_update(sk);
1633
1634         bh_lock_sock_nested(sk);
1635         tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1636         ret = 0;
1637         if (!sock_owned_by_user(sk)) {
1638                 if (!tcp_prequeue(sk, skb))
1639                         ret = tcp_v4_do_rcv(sk, skb);
1640         } else if (unlikely(sk_add_backlog(sk, skb,
1641                                            sk->sk_rcvbuf + sk->sk_sndbuf))) {
1642                 bh_unlock_sock(sk);
1643                 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1644                 goto discard_and_relse;
1645         }
1646         bh_unlock_sock(sk);
1647
1648 put_and_return:
1649         sock_put(sk);
1650
1651         return ret;
1652
1653 no_tcp_socket:
1654         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1655                 goto discard_it;
1656
1657         if (tcp_checksum_complete(skb)) {
1658 csum_error:
1659                 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1660 bad_packet:
1661                 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1662         } else {
1663                 tcp_v4_send_reset(NULL, skb);
1664         }
1665
1666 discard_it:
1667         /* Discard frame. */
1668         kfree_skb(skb);
1669         return 0;
1670
1671 discard_and_relse:
1672         sock_put(sk);
1673         goto discard_it;
1674
1675 do_time_wait:
1676         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1677                 inet_twsk_put(inet_twsk(sk));
1678                 goto discard_it;
1679         }
1680
1681         if (tcp_checksum_complete(skb)) {
1682                 inet_twsk_put(inet_twsk(sk));
1683                 goto csum_error;
1684         }
1685         switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1686         case TCP_TW_SYN: {
1687                 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1688                                                         &tcp_hashinfo,
1689                                                         iph->saddr, th->source,
1690                                                         iph->daddr, th->dest,
1691                                                         inet_iif(skb));
1692                 if (sk2) {
1693                         inet_twsk_deschedule_put(inet_twsk(sk));
1694                         sk = sk2;
1695                         goto process;
1696                 }
1697                 /* Fall through to ACK */
1698         }
1699         case TCP_TW_ACK:
1700                 tcp_v4_timewait_ack(sk, skb);
1701                 break;
1702         case TCP_TW_RST:
1703                 goto no_tcp_socket;
1704         case TCP_TW_SUCCESS:;
1705         }
1706         goto discard_it;
1707 }
1708
1709 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1710         .twsk_obj_size  = sizeof(struct tcp_timewait_sock),
1711         .twsk_unique    = tcp_twsk_unique,
1712         .twsk_destructor= tcp_twsk_destructor,
1713 };
1714
1715 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1716 {
1717         struct dst_entry *dst = skb_dst(skb);
1718
1719         if (dst) {
1720                 dst_hold(dst);
1721                 sk->sk_rx_dst = dst;
1722                 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1723         }
1724 }
1725 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1726
1727 const struct inet_connection_sock_af_ops ipv4_specific = {
1728         .queue_xmit        = ip_queue_xmit,
1729         .send_check        = tcp_v4_send_check,
1730         .rebuild_header    = inet_sk_rebuild_header,
1731         .sk_rx_dst_set     = inet_sk_rx_dst_set,
1732         .conn_request      = tcp_v4_conn_request,
1733         .syn_recv_sock     = tcp_v4_syn_recv_sock,
1734         .net_header_len    = sizeof(struct iphdr),
1735         .setsockopt        = ip_setsockopt,
1736         .getsockopt        = ip_getsockopt,
1737         .addr2sockaddr     = inet_csk_addr2sockaddr,
1738         .sockaddr_len      = sizeof(struct sockaddr_in),
1739         .bind_conflict     = inet_csk_bind_conflict,
1740 #ifdef CONFIG_COMPAT
1741         .compat_setsockopt = compat_ip_setsockopt,
1742         .compat_getsockopt = compat_ip_getsockopt,
1743 #endif
1744         .mtu_reduced       = tcp_v4_mtu_reduced,
1745 };
1746 EXPORT_SYMBOL(ipv4_specific);
1747
1748 #ifdef CONFIG_TCP_MD5SIG
1749 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1750         .md5_lookup             = tcp_v4_md5_lookup,
1751         .calc_md5_hash          = tcp_v4_md5_hash_skb,
1752         .md5_parse              = tcp_v4_parse_md5_keys,
1753 };
1754 #endif
1755
1756 /* NOTE: A lot of things set to zero explicitly by call to
1757  *       sk_alloc() so need not be done here.
1758  */
1759 static int tcp_v4_init_sock(struct sock *sk)
1760 {
1761         struct inet_connection_sock *icsk = inet_csk(sk);
1762
1763         tcp_init_sock(sk);
1764
1765         icsk->icsk_af_ops = &ipv4_specific;
1766
1767 #ifdef CONFIG_TCP_MD5SIG
1768         tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1769 #endif
1770
1771         return 0;
1772 }
1773
1774 void tcp_v4_destroy_sock(struct sock *sk)
1775 {
1776         struct tcp_sock *tp = tcp_sk(sk);
1777
1778         tcp_clear_xmit_timers(sk);
1779
1780         tcp_cleanup_congestion_control(sk);
1781
1782         /* Cleanup up the write buffer. */
1783         tcp_write_queue_purge(sk);
1784
1785         /* Cleans up our, hopefully empty, out_of_order_queue. */
1786         __skb_queue_purge(&tp->out_of_order_queue);
1787
1788 #ifdef CONFIG_TCP_MD5SIG
1789         /* Clean up the MD5 key list, if any */
1790         if (tp->md5sig_info) {
1791                 tcp_clear_md5_list(sk);
1792                 kfree_rcu(tp->md5sig_info, rcu);
1793                 tp->md5sig_info = NULL;
1794         }
1795 #endif
1796
1797         /* Clean prequeue, it must be empty really */
1798         __skb_queue_purge(&tp->ucopy.prequeue);
1799
1800         /* Clean up a referenced TCP bind bucket. */
1801         if (inet_csk(sk)->icsk_bind_hash)
1802                 inet_put_port(sk);
1803
1804         BUG_ON(tp->fastopen_rsk);
1805
1806         /* If socket is aborted during connect operation */
1807         tcp_free_fastopen_req(tp);
1808         tcp_saved_syn_free(tp);
1809
1810         sk_sockets_allocated_dec(sk);
1811         sock_release_memcg(sk);
1812 }
1813 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1814
1815 #ifdef CONFIG_PROC_FS
1816 /* Proc filesystem TCP sock list dumping. */
1817
1818 /*
1819  * Get next listener socket follow cur.  If cur is NULL, get first socket
1820  * starting from bucket given in st->bucket; when st->bucket is zero the
1821  * very first socket in the hash table is returned.
1822  */
1823 static void *listening_get_next(struct seq_file *seq, void *cur)
1824 {
1825         struct inet_connection_sock *icsk;
1826         struct hlist_nulls_node *node;
1827         struct sock *sk = cur;
1828         struct inet_listen_hashbucket *ilb;
1829         struct tcp_iter_state *st = seq->private;
1830         struct net *net = seq_file_net(seq);
1831
1832         if (!sk) {
1833                 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1834                 spin_lock_bh(&ilb->lock);
1835                 sk = sk_nulls_head(&ilb->head);
1836                 st->offset = 0;
1837                 goto get_sk;
1838         }
1839         ilb = &tcp_hashinfo.listening_hash[st->bucket];
1840         ++st->num;
1841         ++st->offset;
1842
1843         sk = sk_nulls_next(sk);
1844 get_sk:
1845         sk_nulls_for_each_from(sk, node) {
1846                 if (!net_eq(sock_net(sk), net))
1847                         continue;
1848                 if (sk->sk_family == st->family) {
1849                         cur = sk;
1850                         goto out;
1851                 }
1852                 icsk = inet_csk(sk);
1853         }
1854         spin_unlock_bh(&ilb->lock);
1855         st->offset = 0;
1856         if (++st->bucket < INET_LHTABLE_SIZE) {
1857                 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1858                 spin_lock_bh(&ilb->lock);
1859                 sk = sk_nulls_head(&ilb->head);
1860                 goto get_sk;
1861         }
1862         cur = NULL;
1863 out:
1864         return cur;
1865 }
1866
1867 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1868 {
1869         struct tcp_iter_state *st = seq->private;
1870         void *rc;
1871
1872         st->bucket = 0;
1873         st->offset = 0;
1874         rc = listening_get_next(seq, NULL);
1875
1876         while (rc && *pos) {
1877                 rc = listening_get_next(seq, rc);
1878                 --*pos;
1879         }
1880         return rc;
1881 }
1882
1883 static inline bool empty_bucket(const struct tcp_iter_state *st)
1884 {
1885         return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
1886 }
1887
1888 /*
1889  * Get first established socket starting from bucket given in st->bucket.
1890  * If st->bucket is zero, the very first socket in the hash is returned.
1891  */
1892 static void *established_get_first(struct seq_file *seq)
1893 {
1894         struct tcp_iter_state *st = seq->private;
1895         struct net *net = seq_file_net(seq);
1896         void *rc = NULL;
1897
1898         st->offset = 0;
1899         for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1900                 struct sock *sk;
1901                 struct hlist_nulls_node *node;
1902                 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1903
1904                 /* Lockless fast path for the common case of empty buckets */
1905                 if (empty_bucket(st))
1906                         continue;
1907
1908                 spin_lock_bh(lock);
1909                 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1910                         if (sk->sk_family != st->family ||
1911                             !net_eq(sock_net(sk), net)) {
1912                                 continue;
1913                         }
1914                         rc = sk;
1915                         goto out;
1916                 }
1917                 spin_unlock_bh(lock);
1918         }
1919 out:
1920         return rc;
1921 }
1922
1923 static void *established_get_next(struct seq_file *seq, void *cur)
1924 {
1925         struct sock *sk = cur;
1926         struct hlist_nulls_node *node;
1927         struct tcp_iter_state *st = seq->private;
1928         struct net *net = seq_file_net(seq);
1929
1930         ++st->num;
1931         ++st->offset;
1932
1933         sk = sk_nulls_next(sk);
1934
1935         sk_nulls_for_each_from(sk, node) {
1936                 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
1937                         return sk;
1938         }
1939
1940         spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1941         ++st->bucket;
1942         return established_get_first(seq);
1943 }
1944
1945 static void *established_get_idx(struct seq_file *seq, loff_t pos)
1946 {
1947         struct tcp_iter_state *st = seq->private;
1948         void *rc;
1949
1950         st->bucket = 0;
1951         rc = established_get_first(seq);
1952
1953         while (rc && pos) {
1954                 rc = established_get_next(seq, rc);
1955                 --pos;
1956         }
1957         return rc;
1958 }
1959
1960 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
1961 {
1962         void *rc;
1963         struct tcp_iter_state *st = seq->private;
1964
1965         st->state = TCP_SEQ_STATE_LISTENING;
1966         rc        = listening_get_idx(seq, &pos);
1967
1968         if (!rc) {
1969                 st->state = TCP_SEQ_STATE_ESTABLISHED;
1970                 rc        = established_get_idx(seq, pos);
1971         }
1972
1973         return rc;
1974 }
1975
1976 static void *tcp_seek_last_pos(struct seq_file *seq)
1977 {
1978         struct tcp_iter_state *st = seq->private;
1979         int offset = st->offset;
1980         int orig_num = st->num;
1981         void *rc = NULL;
1982
1983         switch (st->state) {
1984         case TCP_SEQ_STATE_LISTENING:
1985                 if (st->bucket >= INET_LHTABLE_SIZE)
1986                         break;
1987                 st->state = TCP_SEQ_STATE_LISTENING;
1988                 rc = listening_get_next(seq, NULL);
1989                 while (offset-- && rc)
1990                         rc = listening_get_next(seq, rc);
1991                 if (rc)
1992                         break;
1993                 st->bucket = 0;
1994                 st->state = TCP_SEQ_STATE_ESTABLISHED;
1995                 /* Fallthrough */
1996         case TCP_SEQ_STATE_ESTABLISHED:
1997                 if (st->bucket > tcp_hashinfo.ehash_mask)
1998                         break;
1999                 rc = established_get_first(seq);
2000                 while (offset-- && rc)
2001                         rc = established_get_next(seq, rc);
2002         }
2003
2004         st->num = orig_num;
2005
2006         return rc;
2007 }
2008
2009 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2010 {
2011         struct tcp_iter_state *st = seq->private;
2012         void *rc;
2013
2014         if (*pos && *pos == st->last_pos) {
2015                 rc = tcp_seek_last_pos(seq);
2016                 if (rc)
2017                         goto out;
2018         }
2019
2020         st->state = TCP_SEQ_STATE_LISTENING;
2021         st->num = 0;
2022         st->bucket = 0;
2023         st->offset = 0;
2024         rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2025
2026 out:
2027         st->last_pos = *pos;
2028         return rc;
2029 }
2030
2031 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2032 {
2033         struct tcp_iter_state *st = seq->private;
2034         void *rc = NULL;
2035
2036         if (v == SEQ_START_TOKEN) {
2037                 rc = tcp_get_idx(seq, 0);
2038                 goto out;
2039         }
2040
2041         switch (st->state) {
2042         case TCP_SEQ_STATE_LISTENING:
2043                 rc = listening_get_next(seq, v);
2044                 if (!rc) {
2045                         st->state = TCP_SEQ_STATE_ESTABLISHED;
2046                         st->bucket = 0;
2047                         st->offset = 0;
2048                         rc        = established_get_first(seq);
2049                 }
2050                 break;
2051         case TCP_SEQ_STATE_ESTABLISHED:
2052                 rc = established_get_next(seq, v);
2053                 break;
2054         }
2055 out:
2056         ++*pos;
2057         st->last_pos = *pos;
2058         return rc;
2059 }
2060
2061 static void tcp_seq_stop(struct seq_file *seq, void *v)
2062 {
2063         struct tcp_iter_state *st = seq->private;
2064
2065         switch (st->state) {
2066         case TCP_SEQ_STATE_LISTENING:
2067                 if (v != SEQ_START_TOKEN)
2068                         spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2069                 break;
2070         case TCP_SEQ_STATE_ESTABLISHED:
2071                 if (v)
2072                         spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2073                 break;
2074         }
2075 }
2076
2077 int tcp_seq_open(struct inode *inode, struct file *file)
2078 {
2079         struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2080         struct tcp_iter_state *s;
2081         int err;
2082
2083         err = seq_open_net(inode, file, &afinfo->seq_ops,
2084                           sizeof(struct tcp_iter_state));
2085         if (err < 0)
2086                 return err;
2087
2088         s = ((struct seq_file *)file->private_data)->private;
2089         s->family               = afinfo->family;
2090         s->last_pos             = 0;
2091         return 0;
2092 }
2093 EXPORT_SYMBOL(tcp_seq_open);
2094
2095 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2096 {
2097         int rc = 0;
2098         struct proc_dir_entry *p;
2099
2100         afinfo->seq_ops.start           = tcp_seq_start;
2101         afinfo->seq_ops.next            = tcp_seq_next;
2102         afinfo->seq_ops.stop            = tcp_seq_stop;
2103
2104         p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2105                              afinfo->seq_fops, afinfo);
2106         if (!p)
2107                 rc = -ENOMEM;
2108         return rc;
2109 }
2110 EXPORT_SYMBOL(tcp_proc_register);
2111
2112 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2113 {
2114         remove_proc_entry(afinfo->name, net->proc_net);
2115 }
2116 EXPORT_SYMBOL(tcp_proc_unregister);
2117
2118 static void get_openreq4(const struct request_sock *req,
2119                          struct seq_file *f, int i)
2120 {
2121         const struct inet_request_sock *ireq = inet_rsk(req);
2122         long delta = req->rsk_timer.expires - jiffies;
2123
2124         seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2125                 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2126                 i,
2127                 ireq->ir_loc_addr,
2128                 ireq->ir_num,
2129                 ireq->ir_rmt_addr,
2130                 ntohs(ireq->ir_rmt_port),
2131                 TCP_SYN_RECV,
2132                 0, 0, /* could print option size, but that is af dependent. */
2133                 1,    /* timers active (only the expire timer) */
2134                 jiffies_delta_to_clock_t(delta),
2135                 req->num_timeout,
2136                 from_kuid_munged(seq_user_ns(f),
2137                                  sock_i_uid(req->rsk_listener)),
2138                 0,  /* non standard timer */
2139                 0, /* open_requests have no inode */
2140                 0,
2141                 req);
2142 }
2143
2144 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2145 {
2146         int timer_active;
2147         unsigned long timer_expires;
2148         const struct tcp_sock *tp = tcp_sk(sk);
2149         const struct inet_connection_sock *icsk = inet_csk(sk);
2150         const struct inet_sock *inet = inet_sk(sk);
2151         const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2152         __be32 dest = inet->inet_daddr;
2153         __be32 src = inet->inet_rcv_saddr;
2154         __u16 destp = ntohs(inet->inet_dport);
2155         __u16 srcp = ntohs(inet->inet_sport);
2156         int rx_queue;
2157
2158         if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2159             icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2160             icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2161                 timer_active    = 1;
2162                 timer_expires   = icsk->icsk_timeout;
2163         } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2164                 timer_active    = 4;
2165                 timer_expires   = icsk->icsk_timeout;
2166         } else if (timer_pending(&sk->sk_timer)) {
2167                 timer_active    = 2;
2168                 timer_expires   = sk->sk_timer.expires;
2169         } else {
2170                 timer_active    = 0;
2171                 timer_expires = jiffies;
2172         }
2173
2174         if (sk->sk_state == TCP_LISTEN)
2175                 rx_queue = sk->sk_ack_backlog;
2176         else
2177                 /*
2178                  * because we dont lock socket, we might find a transient negative value
2179                  */
2180                 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2181
2182         seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2183                         "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2184                 i, src, srcp, dest, destp, sk->sk_state,
2185                 tp->write_seq - tp->snd_una,
2186                 rx_queue,
2187                 timer_active,
2188                 jiffies_delta_to_clock_t(timer_expires - jiffies),
2189                 icsk->icsk_retransmits,
2190                 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2191                 icsk->icsk_probes_out,
2192                 sock_i_ino(sk),
2193                 atomic_read(&sk->sk_refcnt), sk,
2194                 jiffies_to_clock_t(icsk->icsk_rto),
2195                 jiffies_to_clock_t(icsk->icsk_ack.ato),
2196                 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2197                 tp->snd_cwnd,
2198                 sk->sk_state == TCP_LISTEN ?
2199                     (fastopenq ? fastopenq->max_qlen : 0) :
2200                     (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2201 }
2202
2203 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2204                                struct seq_file *f, int i)
2205 {
2206         long delta = tw->tw_timer.expires - jiffies;
2207         __be32 dest, src;
2208         __u16 destp, srcp;
2209
2210         dest  = tw->tw_daddr;
2211         src   = tw->tw_rcv_saddr;
2212         destp = ntohs(tw->tw_dport);
2213         srcp  = ntohs(tw->tw_sport);
2214
2215         seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2216                 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2217                 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2218                 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2219                 atomic_read(&tw->tw_refcnt), tw);
2220 }
2221
2222 #define TMPSZ 150
2223
2224 static int tcp4_seq_show(struct seq_file *seq, void *v)
2225 {
2226         struct tcp_iter_state *st;
2227         struct sock *sk = v;
2228
2229         seq_setwidth(seq, TMPSZ - 1);
2230         if (v == SEQ_START_TOKEN) {
2231                 seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
2232                            "rx_queue tr tm->when retrnsmt   uid  timeout "
2233                            "inode");
2234                 goto out;
2235         }
2236         st = seq->private;
2237
2238         if (sk->sk_state == TCP_TIME_WAIT)
2239                 get_timewait4_sock(v, seq, st->num);
2240         else if (sk->sk_state == TCP_NEW_SYN_RECV)
2241                 get_openreq4(v, seq, st->num);
2242         else
2243                 get_tcp4_sock(v, seq, st->num);
2244 out:
2245         seq_pad(seq, '\n');
2246         return 0;
2247 }
2248
2249 static const struct file_operations tcp_afinfo_seq_fops = {
2250         .owner   = THIS_MODULE,
2251         .open    = tcp_seq_open,
2252         .read    = seq_read,
2253         .llseek  = seq_lseek,
2254         .release = seq_release_net
2255 };
2256
2257 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2258         .name           = "tcp",
2259         .family         = AF_INET,
2260         .seq_fops       = &tcp_afinfo_seq_fops,
2261         .seq_ops        = {
2262                 .show           = tcp4_seq_show,
2263         },
2264 };
2265
2266 static int __net_init tcp4_proc_init_net(struct net *net)
2267 {
2268         return tcp_proc_register(net, &tcp4_seq_afinfo);
2269 }
2270
2271 static void __net_exit tcp4_proc_exit_net(struct net *net)
2272 {
2273         tcp_proc_unregister(net, &tcp4_seq_afinfo);
2274 }
2275
2276 static struct pernet_operations tcp4_net_ops = {
2277         .init = tcp4_proc_init_net,
2278         .exit = tcp4_proc_exit_net,
2279 };
2280
2281 int __init tcp4_proc_init(void)
2282 {
2283         return register_pernet_subsys(&tcp4_net_ops);
2284 }
2285
2286 void tcp4_proc_exit(void)
2287 {
2288         unregister_pernet_subsys(&tcp4_net_ops);
2289 }
2290 #endif /* CONFIG_PROC_FS */
2291
2292 struct proto tcp_prot = {
2293         .name                   = "TCP",
2294         .owner                  = THIS_MODULE,
2295         .close                  = tcp_close,
2296         .connect                = tcp_v4_connect,
2297         .disconnect             = tcp_disconnect,
2298         .accept                 = inet_csk_accept,
2299         .ioctl                  = tcp_ioctl,
2300         .init                   = tcp_v4_init_sock,
2301         .destroy                = tcp_v4_destroy_sock,
2302         .shutdown               = tcp_shutdown,
2303         .setsockopt             = tcp_setsockopt,
2304         .getsockopt             = tcp_getsockopt,
2305         .recvmsg                = tcp_recvmsg,
2306         .sendmsg                = tcp_sendmsg,
2307         .sendpage               = tcp_sendpage,
2308         .backlog_rcv            = tcp_v4_do_rcv,
2309         .release_cb             = tcp_release_cb,
2310         .hash                   = inet_hash,
2311         .unhash                 = inet_unhash,
2312         .get_port               = inet_csk_get_port,
2313         .enter_memory_pressure  = tcp_enter_memory_pressure,
2314         .stream_memory_free     = tcp_stream_memory_free,
2315         .sockets_allocated      = &tcp_sockets_allocated,
2316         .orphan_count           = &tcp_orphan_count,
2317         .memory_allocated       = &tcp_memory_allocated,
2318         .memory_pressure        = &tcp_memory_pressure,
2319         .sysctl_mem             = sysctl_tcp_mem,
2320         .sysctl_wmem            = sysctl_tcp_wmem,
2321         .sysctl_rmem            = sysctl_tcp_rmem,
2322         .max_header             = MAX_TCP_HEADER,
2323         .obj_size               = sizeof(struct tcp_sock),
2324         .slab_flags             = SLAB_DESTROY_BY_RCU,
2325         .twsk_prot              = &tcp_timewait_sock_ops,
2326         .rsk_prot               = &tcp_request_sock_ops,
2327         .h.hashinfo             = &tcp_hashinfo,
2328         .no_autobind            = true,
2329 #ifdef CONFIG_COMPAT
2330         .compat_setsockopt      = compat_tcp_setsockopt,
2331         .compat_getsockopt      = compat_tcp_getsockopt,
2332 #endif
2333 #ifdef CONFIG_MEMCG_KMEM
2334         .init_cgroup            = tcp_init_cgroup,
2335         .destroy_cgroup         = tcp_destroy_cgroup,
2336         .proto_cgroup           = tcp_proto_cgroup,
2337 #endif
2338 };
2339 EXPORT_SYMBOL(tcp_prot);
2340
2341 static void __net_exit tcp_sk_exit(struct net *net)
2342 {
2343         int cpu;
2344
2345         for_each_possible_cpu(cpu)
2346                 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2347         free_percpu(net->ipv4.tcp_sk);
2348 }
2349
2350 static int __net_init tcp_sk_init(struct net *net)
2351 {
2352         int res, cpu;
2353
2354         net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2355         if (!net->ipv4.tcp_sk)
2356                 return -ENOMEM;
2357
2358         for_each_possible_cpu(cpu) {
2359                 struct sock *sk;
2360
2361                 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2362                                            IPPROTO_TCP, net);
2363                 if (res)
2364                         goto fail;
2365                 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2366         }
2367
2368         net->ipv4.sysctl_tcp_ecn = 2;
2369         net->ipv4.sysctl_tcp_ecn_fallback = 1;
2370
2371         net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2372         net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2373         net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2374
2375         return 0;
2376 fail:
2377         tcp_sk_exit(net);
2378
2379         return res;
2380 }
2381
2382 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2383 {
2384         inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2385 }
2386
2387 static struct pernet_operations __net_initdata tcp_sk_ops = {
2388        .init       = tcp_sk_init,
2389        .exit       = tcp_sk_exit,
2390        .exit_batch = tcp_sk_exit_batch,
2391 };
2392
2393 void __init tcp_v4_init(void)
2394 {
2395         inet_hashinfo_init(&tcp_hashinfo);
2396         if (register_pernet_subsys(&tcp_sk_ops))
2397                 panic("Failed to create the TCP control socket.\n");
2398 }