Merge branch 'linux-linaro-lsk-v4.4' into linux-linaro-lsk-v4.4-android
[firefly-linux-kernel-4.4.55.git] / net / ipv6 / tcp_ipv6.c
1 /*
2  *      TCP over IPv6
3  *      Linux INET6 implementation
4  *
5  *      Authors:
6  *      Pedro Roque             <roque@di.fc.ul.pt>
7  *
8  *      Based on:
9  *      linux/net/ipv4/tcp.c
10  *      linux/net/ipv4/tcp_input.c
11  *      linux/net/ipv4/tcp_output.c
12  *
13  *      Fixes:
14  *      Hideaki YOSHIFUJI       :       sin6_scope_id support
15  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
16  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
17  *                                      a single port at the same time.
18  *      YOSHIFUJI Hideaki @USAGI:       convert /proc/net/tcp6 to seq_file.
19  *
20  *      This program is free software; you can redistribute it and/or
21  *      modify it under the terms of the GNU General Public License
22  *      as published by the Free Software Foundation; either version
23  *      2 of the License, or (at your option) any later version.
24  */
25
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/tcp_memcontrol.h>
65 #include <net/busy_poll.h>
66
67 #include <linux/proc_fs.h>
68 #include <linux/seq_file.h>
69
70 #include <linux/crypto.h>
71 #include <linux/scatterlist.h>
72
73 static void     tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
74 static void     tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
75                                       struct request_sock *req);
76
77 static int      tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
78
79 static const struct inet_connection_sock_af_ops ipv6_mapped;
80 static const struct inet_connection_sock_af_ops ipv6_specific;
81 #ifdef CONFIG_TCP_MD5SIG
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
84 #else
85 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
86                                                    const struct in6_addr *addr)
87 {
88         return NULL;
89 }
90 #endif
91
92 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
93 {
94         struct dst_entry *dst = skb_dst(skb);
95
96         if (dst && dst_hold_safe(dst)) {
97                 const struct rt6_info *rt = (const struct rt6_info *)dst;
98
99                 sk->sk_rx_dst = dst;
100                 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
101                 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
102         }
103 }
104
105 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
106 {
107         return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
108                                             ipv6_hdr(skb)->saddr.s6_addr32,
109                                             tcp_hdr(skb)->dest,
110                                             tcp_hdr(skb)->source);
111 }
112
113 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
114                           int addr_len)
115 {
116         struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
117         struct inet_sock *inet = inet_sk(sk);
118         struct inet_connection_sock *icsk = inet_csk(sk);
119         struct ipv6_pinfo *np = inet6_sk(sk);
120         struct tcp_sock *tp = tcp_sk(sk);
121         struct in6_addr *saddr = NULL, *final_p, final;
122         struct ipv6_txoptions *opt;
123         struct flowi6 fl6;
124         struct dst_entry *dst;
125         int addr_type;
126         int err;
127
128         if (addr_len < SIN6_LEN_RFC2133)
129                 return -EINVAL;
130
131         if (usin->sin6_family != AF_INET6)
132                 return -EAFNOSUPPORT;
133
134         memset(&fl6, 0, sizeof(fl6));
135
136         if (np->sndflow) {
137                 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
138                 IP6_ECN_flow_init(fl6.flowlabel);
139                 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
140                         struct ip6_flowlabel *flowlabel;
141                         flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
142                         if (!flowlabel)
143                                 return -EINVAL;
144                         fl6_sock_release(flowlabel);
145                 }
146         }
147
148         /*
149          *      connect() to INADDR_ANY means loopback (BSD'ism).
150          */
151
152         if (ipv6_addr_any(&usin->sin6_addr))
153                 usin->sin6_addr.s6_addr[15] = 0x1;
154
155         addr_type = ipv6_addr_type(&usin->sin6_addr);
156
157         if (addr_type & IPV6_ADDR_MULTICAST)
158                 return -ENETUNREACH;
159
160         if (addr_type&IPV6_ADDR_LINKLOCAL) {
161                 if (addr_len >= sizeof(struct sockaddr_in6) &&
162                     usin->sin6_scope_id) {
163                         /* If interface is set while binding, indices
164                          * must coincide.
165                          */
166                         if (sk->sk_bound_dev_if &&
167                             sk->sk_bound_dev_if != usin->sin6_scope_id)
168                                 return -EINVAL;
169
170                         sk->sk_bound_dev_if = usin->sin6_scope_id;
171                 }
172
173                 /* Connect to link-local address requires an interface */
174                 if (!sk->sk_bound_dev_if)
175                         return -EINVAL;
176         }
177
178         if (tp->rx_opt.ts_recent_stamp &&
179             !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
180                 tp->rx_opt.ts_recent = 0;
181                 tp->rx_opt.ts_recent_stamp = 0;
182                 tp->write_seq = 0;
183         }
184
185         sk->sk_v6_daddr = usin->sin6_addr;
186         np->flow_label = fl6.flowlabel;
187
188         /*
189          *      TCP over IPv4
190          */
191
192         if (addr_type == IPV6_ADDR_MAPPED) {
193                 u32 exthdrlen = icsk->icsk_ext_hdr_len;
194                 struct sockaddr_in sin;
195
196                 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
197
198                 if (__ipv6_only_sock(sk))
199                         return -ENETUNREACH;
200
201                 sin.sin_family = AF_INET;
202                 sin.sin_port = usin->sin6_port;
203                 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
204
205                 icsk->icsk_af_ops = &ipv6_mapped;
206                 sk->sk_backlog_rcv = tcp_v4_do_rcv;
207 #ifdef CONFIG_TCP_MD5SIG
208                 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
209 #endif
210
211                 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
212
213                 if (err) {
214                         icsk->icsk_ext_hdr_len = exthdrlen;
215                         icsk->icsk_af_ops = &ipv6_specific;
216                         sk->sk_backlog_rcv = tcp_v6_do_rcv;
217 #ifdef CONFIG_TCP_MD5SIG
218                         tp->af_specific = &tcp_sock_ipv6_specific;
219 #endif
220                         goto failure;
221                 }
222                 np->saddr = sk->sk_v6_rcv_saddr;
223
224                 return err;
225         }
226
227         if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
228                 saddr = &sk->sk_v6_rcv_saddr;
229
230         fl6.flowi6_proto = IPPROTO_TCP;
231         fl6.daddr = sk->sk_v6_daddr;
232         fl6.saddr = saddr ? *saddr : np->saddr;
233         fl6.flowi6_oif = sk->sk_bound_dev_if;
234         fl6.flowi6_mark = sk->sk_mark;
235         fl6.fl6_dport = usin->sin6_port;
236         fl6.fl6_sport = inet->inet_sport;
237         fl6.flowi6_uid = sock_i_uid(sk);
238
239         opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
240         final_p = fl6_update_dst(&fl6, opt, &final);
241
242         security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
243
244         dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
245         if (IS_ERR(dst)) {
246                 err = PTR_ERR(dst);
247                 goto failure;
248         }
249
250         if (!saddr) {
251                 saddr = &fl6.saddr;
252                 sk->sk_v6_rcv_saddr = *saddr;
253         }
254
255         /* set the source address */
256         np->saddr = *saddr;
257         inet->inet_rcv_saddr = LOOPBACK4_IPV6;
258
259         sk->sk_gso_type = SKB_GSO_TCPV6;
260         ip6_dst_store(sk, dst, NULL, NULL);
261
262         if (tcp_death_row.sysctl_tw_recycle &&
263             !tp->rx_opt.ts_recent_stamp &&
264             ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
265                 tcp_fetch_timewait_stamp(sk, dst);
266
267         icsk->icsk_ext_hdr_len = 0;
268         if (opt)
269                 icsk->icsk_ext_hdr_len = opt->opt_flen +
270                                          opt->opt_nflen;
271
272         tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
273
274         inet->inet_dport = usin->sin6_port;
275
276         tcp_set_state(sk, TCP_SYN_SENT);
277         err = inet6_hash_connect(&tcp_death_row, sk);
278         if (err)
279                 goto late_failure;
280
281         sk_set_txhash(sk);
282
283         if (!tp->write_seq && likely(!tp->repair))
284                 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
285                                                              sk->sk_v6_daddr.s6_addr32,
286                                                              inet->inet_sport,
287                                                              inet->inet_dport);
288
289         err = tcp_connect(sk);
290         if (err)
291                 goto late_failure;
292
293         return 0;
294
295 late_failure:
296         tcp_set_state(sk, TCP_CLOSE);
297         __sk_dst_reset(sk);
298 failure:
299         inet->inet_dport = 0;
300         sk->sk_route_caps = 0;
301         return err;
302 }
303
304 static void tcp_v6_mtu_reduced(struct sock *sk)
305 {
306         struct dst_entry *dst;
307
308         if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
309                 return;
310
311         dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
312         if (!dst)
313                 return;
314
315         if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
316                 tcp_sync_mss(sk, dst_mtu(dst));
317                 tcp_simple_retransmit(sk);
318         }
319 }
320
321 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
322                 u8 type, u8 code, int offset, __be32 info)
323 {
324         const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
325         const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
326         struct net *net = dev_net(skb->dev);
327         struct request_sock *fastopen;
328         struct ipv6_pinfo *np;
329         struct tcp_sock *tp;
330         __u32 seq, snd_una;
331         struct sock *sk;
332         bool fatal;
333         int err;
334
335         sk = __inet6_lookup_established(net, &tcp_hashinfo,
336                                         &hdr->daddr, th->dest,
337                                         &hdr->saddr, ntohs(th->source),
338                                         skb->dev->ifindex);
339
340         if (!sk) {
341                 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
342                                    ICMP6_MIB_INERRORS);
343                 return;
344         }
345
346         if (sk->sk_state == TCP_TIME_WAIT) {
347                 inet_twsk_put(inet_twsk(sk));
348                 return;
349         }
350         seq = ntohl(th->seq);
351         fatal = icmpv6_err_convert(type, code, &err);
352         if (sk->sk_state == TCP_NEW_SYN_RECV)
353                 return tcp_req_err(sk, seq, fatal);
354
355         bh_lock_sock(sk);
356         if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
357                 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
358
359         if (sk->sk_state == TCP_CLOSE)
360                 goto out;
361
362         if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
363                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
364                 goto out;
365         }
366
367         tp = tcp_sk(sk);
368         /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
369         fastopen = tp->fastopen_rsk;
370         snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
371         if (sk->sk_state != TCP_LISTEN &&
372             !between(seq, snd_una, tp->snd_nxt)) {
373                 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
374                 goto out;
375         }
376
377         np = inet6_sk(sk);
378
379         if (type == NDISC_REDIRECT) {
380                 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
381
382                 if (dst)
383                         dst->ops->redirect(dst, sk, skb);
384                 goto out;
385         }
386
387         if (type == ICMPV6_PKT_TOOBIG) {
388                 /* We are not interested in TCP_LISTEN and open_requests
389                  * (SYN-ACKs send out by Linux are always <576bytes so
390                  * they should go through unfragmented).
391                  */
392                 if (sk->sk_state == TCP_LISTEN)
393                         goto out;
394
395                 if (!ip6_sk_accept_pmtu(sk))
396                         goto out;
397
398                 tp->mtu_info = ntohl(info);
399                 if (!sock_owned_by_user(sk))
400                         tcp_v6_mtu_reduced(sk);
401                 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
402                                            &tp->tsq_flags))
403                         sock_hold(sk);
404                 goto out;
405         }
406
407
408         /* Might be for an request_sock */
409         switch (sk->sk_state) {
410         case TCP_SYN_SENT:
411         case TCP_SYN_RECV:
412                 /* Only in fast or simultaneous open. If a fast open socket is
413                  * is already accepted it is treated as a connected one below.
414                  */
415                 if (fastopen && !fastopen->sk)
416                         break;
417
418                 if (!sock_owned_by_user(sk)) {
419                         sk->sk_err = err;
420                         sk->sk_error_report(sk);                /* Wake people up to see the error (see connect in sock.c) */
421
422                         tcp_done(sk);
423                 } else
424                         sk->sk_err_soft = err;
425                 goto out;
426         }
427
428         if (!sock_owned_by_user(sk) && np->recverr) {
429                 sk->sk_err = err;
430                 sk->sk_error_report(sk);
431         } else
432                 sk->sk_err_soft = err;
433
434 out:
435         bh_unlock_sock(sk);
436         sock_put(sk);
437 }
438
439
440 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
441                               struct flowi *fl,
442                               struct request_sock *req,
443                               struct tcp_fastopen_cookie *foc,
444                               bool attach_req)
445 {
446         struct inet_request_sock *ireq = inet_rsk(req);
447         struct ipv6_pinfo *np = inet6_sk(sk);
448         struct flowi6 *fl6 = &fl->u.ip6;
449         struct sk_buff *skb;
450         int err = -ENOMEM;
451
452         /* First, grab a route. */
453         if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
454                                                IPPROTO_TCP)) == NULL)
455                 goto done;
456
457         skb = tcp_make_synack(sk, dst, req, foc, attach_req);
458
459         if (skb) {
460                 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
461                                     &ireq->ir_v6_rmt_addr);
462
463                 fl6->daddr = ireq->ir_v6_rmt_addr;
464                 if (np->repflow && ireq->pktopts)
465                         fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
466
467                 rcu_read_lock();
468                 err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt),
469                                np->tclass);
470                 rcu_read_unlock();
471                 err = net_xmit_eval(err);
472         }
473
474 done:
475         return err;
476 }
477
478
479 static void tcp_v6_reqsk_destructor(struct request_sock *req)
480 {
481         kfree_skb(inet_rsk(req)->pktopts);
482 }
483
484 #ifdef CONFIG_TCP_MD5SIG
485 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
486                                                    const struct in6_addr *addr)
487 {
488         return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
489 }
490
491 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
492                                                 const struct sock *addr_sk)
493 {
494         return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
495 }
496
497 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
498                                  int optlen)
499 {
500         struct tcp_md5sig cmd;
501         struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
502
503         if (optlen < sizeof(cmd))
504                 return -EINVAL;
505
506         if (copy_from_user(&cmd, optval, sizeof(cmd)))
507                 return -EFAULT;
508
509         if (sin6->sin6_family != AF_INET6)
510                 return -EINVAL;
511
512         if (!cmd.tcpm_keylen) {
513                 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
514                         return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
515                                               AF_INET);
516                 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
517                                       AF_INET6);
518         }
519
520         if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
521                 return -EINVAL;
522
523         if (ipv6_addr_v4mapped(&sin6->sin6_addr))
524                 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
525                                       AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
526
527         return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
528                               AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
529 }
530
531 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
532                                         const struct in6_addr *daddr,
533                                         const struct in6_addr *saddr, int nbytes)
534 {
535         struct tcp6_pseudohdr *bp;
536         struct scatterlist sg;
537
538         bp = &hp->md5_blk.ip6;
539         /* 1. TCP pseudo-header (RFC2460) */
540         bp->saddr = *saddr;
541         bp->daddr = *daddr;
542         bp->protocol = cpu_to_be32(IPPROTO_TCP);
543         bp->len = cpu_to_be32(nbytes);
544
545         sg_init_one(&sg, bp, sizeof(*bp));
546         return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
547 }
548
549 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
550                                const struct in6_addr *daddr, struct in6_addr *saddr,
551                                const struct tcphdr *th)
552 {
553         struct tcp_md5sig_pool *hp;
554         struct hash_desc *desc;
555
556         hp = tcp_get_md5sig_pool();
557         if (!hp)
558                 goto clear_hash_noput;
559         desc = &hp->md5_desc;
560
561         if (crypto_hash_init(desc))
562                 goto clear_hash;
563         if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
564                 goto clear_hash;
565         if (tcp_md5_hash_header(hp, th))
566                 goto clear_hash;
567         if (tcp_md5_hash_key(hp, key))
568                 goto clear_hash;
569         if (crypto_hash_final(desc, md5_hash))
570                 goto clear_hash;
571
572         tcp_put_md5sig_pool();
573         return 0;
574
575 clear_hash:
576         tcp_put_md5sig_pool();
577 clear_hash_noput:
578         memset(md5_hash, 0, 16);
579         return 1;
580 }
581
582 static int tcp_v6_md5_hash_skb(char *md5_hash,
583                                const struct tcp_md5sig_key *key,
584                                const struct sock *sk,
585                                const struct sk_buff *skb)
586 {
587         const struct in6_addr *saddr, *daddr;
588         struct tcp_md5sig_pool *hp;
589         struct hash_desc *desc;
590         const struct tcphdr *th = tcp_hdr(skb);
591
592         if (sk) { /* valid for establish/request sockets */
593                 saddr = &sk->sk_v6_rcv_saddr;
594                 daddr = &sk->sk_v6_daddr;
595         } else {
596                 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
597                 saddr = &ip6h->saddr;
598                 daddr = &ip6h->daddr;
599         }
600
601         hp = tcp_get_md5sig_pool();
602         if (!hp)
603                 goto clear_hash_noput;
604         desc = &hp->md5_desc;
605
606         if (crypto_hash_init(desc))
607                 goto clear_hash;
608
609         if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
610                 goto clear_hash;
611         if (tcp_md5_hash_header(hp, th))
612                 goto clear_hash;
613         if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
614                 goto clear_hash;
615         if (tcp_md5_hash_key(hp, key))
616                 goto clear_hash;
617         if (crypto_hash_final(desc, md5_hash))
618                 goto clear_hash;
619
620         tcp_put_md5sig_pool();
621         return 0;
622
623 clear_hash:
624         tcp_put_md5sig_pool();
625 clear_hash_noput:
626         memset(md5_hash, 0, 16);
627         return 1;
628 }
629
630 #endif
631
632 static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
633                                     const struct sk_buff *skb)
634 {
635 #ifdef CONFIG_TCP_MD5SIG
636         const __u8 *hash_location = NULL;
637         struct tcp_md5sig_key *hash_expected;
638         const struct ipv6hdr *ip6h = ipv6_hdr(skb);
639         const struct tcphdr *th = tcp_hdr(skb);
640         int genhash;
641         u8 newhash[16];
642
643         hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
644         hash_location = tcp_parse_md5sig_option(th);
645
646         /* We've parsed the options - do we have a hash? */
647         if (!hash_expected && !hash_location)
648                 return false;
649
650         if (hash_expected && !hash_location) {
651                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
652                 return true;
653         }
654
655         if (!hash_expected && hash_location) {
656                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
657                 return true;
658         }
659
660         /* check the signature */
661         genhash = tcp_v6_md5_hash_skb(newhash,
662                                       hash_expected,
663                                       NULL, skb);
664
665         if (genhash || memcmp(hash_location, newhash, 16) != 0) {
666                 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
667                                      genhash ? "failed" : "mismatch",
668                                      &ip6h->saddr, ntohs(th->source),
669                                      &ip6h->daddr, ntohs(th->dest));
670                 return true;
671         }
672 #endif
673         return false;
674 }
675
676 static void tcp_v6_init_req(struct request_sock *req,
677                             const struct sock *sk_listener,
678                             struct sk_buff *skb)
679 {
680         struct inet_request_sock *ireq = inet_rsk(req);
681         const struct ipv6_pinfo *np = inet6_sk(sk_listener);
682
683         ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
684         ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
685
686         /* So that link locals have meaning */
687         if (!sk_listener->sk_bound_dev_if &&
688             ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
689                 ireq->ir_iif = tcp_v6_iif(skb);
690
691         if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
692             (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
693              np->rxopt.bits.rxinfo ||
694              np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
695              np->rxopt.bits.rxohlim || np->repflow)) {
696                 atomic_inc(&skb->users);
697                 ireq->pktopts = skb;
698         }
699 }
700
701 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
702                                           struct flowi *fl,
703                                           const struct request_sock *req,
704                                           bool *strict)
705 {
706         if (strict)
707                 *strict = true;
708         return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
709 }
710
711 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
712         .family         =       AF_INET6,
713         .obj_size       =       sizeof(struct tcp6_request_sock),
714         .rtx_syn_ack    =       tcp_rtx_synack,
715         .send_ack       =       tcp_v6_reqsk_send_ack,
716         .destructor     =       tcp_v6_reqsk_destructor,
717         .send_reset     =       tcp_v6_send_reset,
718         .syn_ack_timeout =      tcp_syn_ack_timeout,
719 };
720
721 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
722         .mss_clamp      =       IPV6_MIN_MTU - sizeof(struct tcphdr) -
723                                 sizeof(struct ipv6hdr),
724 #ifdef CONFIG_TCP_MD5SIG
725         .req_md5_lookup =       tcp_v6_md5_lookup,
726         .calc_md5_hash  =       tcp_v6_md5_hash_skb,
727 #endif
728         .init_req       =       tcp_v6_init_req,
729 #ifdef CONFIG_SYN_COOKIES
730         .cookie_init_seq =      cookie_v6_init_sequence,
731 #endif
732         .route_req      =       tcp_v6_route_req,
733         .init_seq       =       tcp_v6_init_sequence,
734         .send_synack    =       tcp_v6_send_synack,
735 };
736
737 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
738                                  u32 ack, u32 win, u32 tsval, u32 tsecr,
739                                  int oif, struct tcp_md5sig_key *key, int rst,
740                                  u8 tclass, u32 label)
741 {
742         const struct tcphdr *th = tcp_hdr(skb);
743         struct tcphdr *t1;
744         struct sk_buff *buff;
745         struct flowi6 fl6;
746         struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
747         struct sock *ctl_sk = net->ipv6.tcp_sk;
748         unsigned int tot_len = sizeof(struct tcphdr);
749         struct dst_entry *dst;
750         __be32 *topt;
751
752         if (tsecr)
753                 tot_len += TCPOLEN_TSTAMP_ALIGNED;
754 #ifdef CONFIG_TCP_MD5SIG
755         if (key)
756                 tot_len += TCPOLEN_MD5SIG_ALIGNED;
757 #endif
758
759         buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
760                          GFP_ATOMIC);
761         if (!buff)
762                 return;
763
764         skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
765
766         t1 = (struct tcphdr *) skb_push(buff, tot_len);
767         skb_reset_transport_header(buff);
768
769         /* Swap the send and the receive. */
770         memset(t1, 0, sizeof(*t1));
771         t1->dest = th->source;
772         t1->source = th->dest;
773         t1->doff = tot_len / 4;
774         t1->seq = htonl(seq);
775         t1->ack_seq = htonl(ack);
776         t1->ack = !rst || !th->ack;
777         t1->rst = rst;
778         t1->window = htons(win);
779
780         topt = (__be32 *)(t1 + 1);
781
782         if (tsecr) {
783                 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
784                                 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
785                 *topt++ = htonl(tsval);
786                 *topt++ = htonl(tsecr);
787         }
788
789 #ifdef CONFIG_TCP_MD5SIG
790         if (key) {
791                 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
792                                 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
793                 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
794                                     &ipv6_hdr(skb)->saddr,
795                                     &ipv6_hdr(skb)->daddr, t1);
796         }
797 #endif
798
799         memset(&fl6, 0, sizeof(fl6));
800         fl6.daddr = ipv6_hdr(skb)->saddr;
801         fl6.saddr = ipv6_hdr(skb)->daddr;
802         fl6.flowlabel = label;
803
804         buff->ip_summed = CHECKSUM_PARTIAL;
805         buff->csum = 0;
806
807         __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
808
809         fl6.flowi6_proto = IPPROTO_TCP;
810         if (rt6_need_strict(&fl6.daddr) && !oif)
811                 fl6.flowi6_oif = tcp_v6_iif(skb);
812         else
813                 fl6.flowi6_oif = oif;
814         fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
815         fl6.fl6_dport = t1->dest;
816         fl6.fl6_sport = t1->source;
817         security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
818
819         /* Pass a socket to ip6_dst_lookup either it is for RST
820          * Underlying function will use this to retrieve the network
821          * namespace
822          */
823         dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
824         if (!IS_ERR(dst)) {
825                 skb_dst_set(buff, dst);
826                 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
827                 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
828                 if (rst)
829                         TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
830                 return;
831         }
832
833         kfree_skb(buff);
834 }
835
836 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
837 {
838         const struct tcphdr *th = tcp_hdr(skb);
839         u32 seq = 0, ack_seq = 0;
840         struct tcp_md5sig_key *key = NULL;
841 #ifdef CONFIG_TCP_MD5SIG
842         const __u8 *hash_location = NULL;
843         struct ipv6hdr *ipv6h = ipv6_hdr(skb);
844         unsigned char newhash[16];
845         int genhash;
846         struct sock *sk1 = NULL;
847 #endif
848         int oif;
849
850         if (th->rst)
851                 return;
852
853         /* If sk not NULL, it means we did a successful lookup and incoming
854          * route had to be correct. prequeue might have dropped our dst.
855          */
856         if (!sk && !ipv6_unicast_destination(skb))
857                 return;
858
859 #ifdef CONFIG_TCP_MD5SIG
860         hash_location = tcp_parse_md5sig_option(th);
861         if (!sk && hash_location) {
862                 /*
863                  * active side is lost. Try to find listening socket through
864                  * source port, and then find md5 key through listening socket.
865                  * we are not loose security here:
866                  * Incoming packet is checked with md5 hash with finding key,
867                  * no RST generated if md5 hash doesn't match.
868                  */
869                 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
870                                            &tcp_hashinfo, &ipv6h->saddr,
871                                            th->source, &ipv6h->daddr,
872                                            ntohs(th->source), tcp_v6_iif(skb));
873                 if (!sk1)
874                         return;
875
876                 rcu_read_lock();
877                 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
878                 if (!key)
879                         goto release_sk1;
880
881                 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
882                 if (genhash || memcmp(hash_location, newhash, 16) != 0)
883                         goto release_sk1;
884         } else {
885                 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
886         }
887 #endif
888
889         if (th->ack)
890                 seq = ntohl(th->ack_seq);
891         else
892                 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
893                           (th->doff << 2);
894
895         oif = sk ? sk->sk_bound_dev_if : 0;
896         tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
897
898 #ifdef CONFIG_TCP_MD5SIG
899 release_sk1:
900         if (sk1) {
901                 rcu_read_unlock();
902                 sock_put(sk1);
903         }
904 #endif
905 }
906
907 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
908                             u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
909                             struct tcp_md5sig_key *key, u8 tclass,
910                             u32 label)
911 {
912         tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
913                              tclass, label);
914 }
915
916 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
917 {
918         struct inet_timewait_sock *tw = inet_twsk(sk);
919         struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
920
921         tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
922                         tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
923                         tcp_time_stamp + tcptw->tw_ts_offset,
924                         tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
925                         tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
926
927         inet_twsk_put(tw);
928 }
929
930 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
931                                   struct request_sock *req)
932 {
933         /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
934          * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
935          */
936         tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
937                         tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
938                         tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
939                         tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
940                         tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
941                         0, 0);
942 }
943
944
945 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
946 {
947 #ifdef CONFIG_SYN_COOKIES
948         const struct tcphdr *th = tcp_hdr(skb);
949
950         if (!th->syn)
951                 sk = cookie_v6_check(sk, skb);
952 #endif
953         return sk;
954 }
955
956 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
957 {
958         if (skb->protocol == htons(ETH_P_IP))
959                 return tcp_v4_conn_request(sk, skb);
960
961         if (!ipv6_unicast_destination(skb))
962                 goto drop;
963
964         return tcp_conn_request(&tcp6_request_sock_ops,
965                                 &tcp_request_sock_ipv6_ops, sk, skb);
966
967 drop:
968         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
969         return 0; /* don't send reset */
970 }
971
972 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
973                                          struct request_sock *req,
974                                          struct dst_entry *dst,
975                                          struct request_sock *req_unhash,
976                                          bool *own_req)
977 {
978         struct inet_request_sock *ireq;
979         struct ipv6_pinfo *newnp;
980         const struct ipv6_pinfo *np = inet6_sk(sk);
981         struct ipv6_txoptions *opt;
982         struct tcp6_sock *newtcp6sk;
983         struct inet_sock *newinet;
984         struct tcp_sock *newtp;
985         struct sock *newsk;
986 #ifdef CONFIG_TCP_MD5SIG
987         struct tcp_md5sig_key *key;
988 #endif
989         struct flowi6 fl6;
990
991         if (skb->protocol == htons(ETH_P_IP)) {
992                 /*
993                  *      v6 mapped
994                  */
995
996                 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
997                                              req_unhash, own_req);
998
999                 if (!newsk)
1000                         return NULL;
1001
1002                 newtcp6sk = (struct tcp6_sock *)newsk;
1003                 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1004
1005                 newinet = inet_sk(newsk);
1006                 newnp = inet6_sk(newsk);
1007                 newtp = tcp_sk(newsk);
1008
1009                 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1010
1011                 newnp->saddr = newsk->sk_v6_rcv_saddr;
1012
1013                 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1014                 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1015 #ifdef CONFIG_TCP_MD5SIG
1016                 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1017 #endif
1018
1019                 newnp->ipv6_ac_list = NULL;
1020                 newnp->ipv6_fl_list = NULL;
1021                 newnp->pktoptions  = NULL;
1022                 newnp->opt         = NULL;
1023                 newnp->mcast_oif   = tcp_v6_iif(skb);
1024                 newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1025                 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1026                 if (np->repflow)
1027                         newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1028
1029                 /*
1030                  * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1031                  * here, tcp_create_openreq_child now does this for us, see the comment in
1032                  * that function for the gory details. -acme
1033                  */
1034
1035                 /* It is tricky place. Until this moment IPv4 tcp
1036                    worked with IPv6 icsk.icsk_af_ops.
1037                    Sync it now.
1038                  */
1039                 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1040
1041                 return newsk;
1042         }
1043
1044         ireq = inet_rsk(req);
1045
1046         if (sk_acceptq_is_full(sk))
1047                 goto out_overflow;
1048
1049         if (!dst) {
1050                 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1051                 if (!dst)
1052                         goto out;
1053         }
1054
1055         newsk = tcp_create_openreq_child(sk, req, skb);
1056         if (!newsk)
1057                 goto out_nonewsk;
1058
1059         /*
1060          * No need to charge this sock to the relevant IPv6 refcnt debug socks
1061          * count here, tcp_create_openreq_child now does this for us, see the
1062          * comment in that function for the gory details. -acme
1063          */
1064
1065         newsk->sk_gso_type = SKB_GSO_TCPV6;
1066         ip6_dst_store(newsk, dst, NULL, NULL);
1067         inet6_sk_rx_dst_set(newsk, skb);
1068
1069         newtcp6sk = (struct tcp6_sock *)newsk;
1070         inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1071
1072         newtp = tcp_sk(newsk);
1073         newinet = inet_sk(newsk);
1074         newnp = inet6_sk(newsk);
1075
1076         memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1077
1078         newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1079         newnp->saddr = ireq->ir_v6_loc_addr;
1080         newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1081         newsk->sk_bound_dev_if = ireq->ir_iif;
1082
1083         /* Now IPv6 options...
1084
1085            First: no IPv4 options.
1086          */
1087         newinet->inet_opt = NULL;
1088         newnp->ipv6_ac_list = NULL;
1089         newnp->ipv6_fl_list = NULL;
1090
1091         /* Clone RX bits */
1092         newnp->rxopt.all = np->rxopt.all;
1093
1094         newnp->pktoptions = NULL;
1095         newnp->opt        = NULL;
1096         newnp->mcast_oif  = tcp_v6_iif(skb);
1097         newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1098         newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1099         if (np->repflow)
1100                 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1101
1102         /* Clone native IPv6 options from listening socket (if any)
1103
1104            Yes, keeping reference count would be much more clever,
1105            but we make one more one thing there: reattach optmem
1106            to newsk.
1107          */
1108         opt = rcu_dereference(np->opt);
1109         if (opt) {
1110                 opt = ipv6_dup_options(newsk, opt);
1111                 RCU_INIT_POINTER(newnp->opt, opt);
1112         }
1113         inet_csk(newsk)->icsk_ext_hdr_len = 0;
1114         if (opt)
1115                 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1116                                                     opt->opt_flen;
1117
1118         tcp_ca_openreq_child(newsk, dst);
1119
1120         tcp_sync_mss(newsk, dst_mtu(dst));
1121         newtp->advmss = dst_metric_advmss(dst);
1122         if (tcp_sk(sk)->rx_opt.user_mss &&
1123             tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1124                 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1125
1126         tcp_initialize_rcv_mss(newsk);
1127
1128         newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1129         newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1130
1131 #ifdef CONFIG_TCP_MD5SIG
1132         /* Copy over the MD5 key from the original socket */
1133         key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1134         if (key) {
1135                 /* We're using one, so create a matching key
1136                  * on the newsk structure. If we fail to get
1137                  * memory, then we end up not copying the key
1138                  * across. Shucks.
1139                  */
1140                 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1141                                AF_INET6, key->key, key->keylen,
1142                                sk_gfp_atomic(sk, GFP_ATOMIC));
1143         }
1144 #endif
1145
1146         if (__inet_inherit_port(sk, newsk) < 0) {
1147                 inet_csk_prepare_forced_close(newsk);
1148                 tcp_done(newsk);
1149                 goto out;
1150         }
1151         *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1152         if (*own_req) {
1153                 tcp_move_syn(newtp, req);
1154
1155                 /* Clone pktoptions received with SYN, if we own the req */
1156                 if (ireq->pktopts) {
1157                         newnp->pktoptions = skb_clone(ireq->pktopts,
1158                                                       sk_gfp_atomic(sk, GFP_ATOMIC));
1159                         consume_skb(ireq->pktopts);
1160                         ireq->pktopts = NULL;
1161                         if (newnp->pktoptions)
1162                                 skb_set_owner_r(newnp->pktoptions, newsk);
1163                 }
1164         }
1165
1166         return newsk;
1167
1168 out_overflow:
1169         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1170 out_nonewsk:
1171         dst_release(dst);
1172 out:
1173         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1174         return NULL;
1175 }
1176
1177 /* The socket must have it's spinlock held when we get
1178  * here, unless it is a TCP_LISTEN socket.
1179  *
1180  * We have a potential double-lock case here, so even when
1181  * doing backlog processing we use the BH locking scheme.
1182  * This is because we cannot sleep with the original spinlock
1183  * held.
1184  */
1185 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1186 {
1187         struct ipv6_pinfo *np = inet6_sk(sk);
1188         struct tcp_sock *tp;
1189         struct sk_buff *opt_skb = NULL;
1190
1191         /* Imagine: socket is IPv6. IPv4 packet arrives,
1192            goes to IPv4 receive handler and backlogged.
1193            From backlog it always goes here. Kerboom...
1194            Fortunately, tcp_rcv_established and rcv_established
1195            handle them correctly, but it is not case with
1196            tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1197          */
1198
1199         if (skb->protocol == htons(ETH_P_IP))
1200                 return tcp_v4_do_rcv(sk, skb);
1201
1202         if (sk_filter(sk, skb))
1203                 goto discard;
1204
1205         /*
1206          *      socket locking is here for SMP purposes as backlog rcv
1207          *      is currently called with bh processing disabled.
1208          */
1209
1210         /* Do Stevens' IPV6_PKTOPTIONS.
1211
1212            Yes, guys, it is the only place in our code, where we
1213            may make it not affecting IPv4.
1214            The rest of code is protocol independent,
1215            and I do not like idea to uglify IPv4.
1216
1217            Actually, all the idea behind IPV6_PKTOPTIONS
1218            looks not very well thought. For now we latch
1219            options, received in the last packet, enqueued
1220            by tcp. Feel free to propose better solution.
1221                                                --ANK (980728)
1222          */
1223         if (np->rxopt.all)
1224                 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1225
1226         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1227                 struct dst_entry *dst = sk->sk_rx_dst;
1228
1229                 sock_rps_save_rxhash(sk, skb);
1230                 sk_mark_napi_id(sk, skb);
1231                 if (dst) {
1232                         if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1233                             dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1234                                 dst_release(dst);
1235                                 sk->sk_rx_dst = NULL;
1236                         }
1237                 }
1238
1239                 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1240                 if (opt_skb)
1241                         goto ipv6_pktoptions;
1242                 return 0;
1243         }
1244
1245         if (tcp_checksum_complete(skb))
1246                 goto csum_err;
1247
1248         if (sk->sk_state == TCP_LISTEN) {
1249                 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1250
1251                 if (!nsk)
1252                         goto discard;
1253
1254                 if (nsk != sk) {
1255                         sock_rps_save_rxhash(nsk, skb);
1256                         sk_mark_napi_id(nsk, skb);
1257                         if (tcp_child_process(sk, nsk, skb))
1258                                 goto reset;
1259                         if (opt_skb)
1260                                 __kfree_skb(opt_skb);
1261                         return 0;
1262                 }
1263         } else
1264                 sock_rps_save_rxhash(sk, skb);
1265
1266         if (tcp_rcv_state_process(sk, skb))
1267                 goto reset;
1268         if (opt_skb)
1269                 goto ipv6_pktoptions;
1270         return 0;
1271
1272 reset:
1273         tcp_v6_send_reset(sk, skb);
1274 discard:
1275         if (opt_skb)
1276                 __kfree_skb(opt_skb);
1277         kfree_skb(skb);
1278         return 0;
1279 csum_err:
1280         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1281         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1282         goto discard;
1283
1284
1285 ipv6_pktoptions:
1286         /* Do you ask, what is it?
1287
1288            1. skb was enqueued by tcp.
1289            2. skb is added to tail of read queue, rather than out of order.
1290            3. socket is not in passive state.
1291            4. Finally, it really contains options, which user wants to receive.
1292          */
1293         tp = tcp_sk(sk);
1294         if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1295             !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1296                 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1297                         np->mcast_oif = tcp_v6_iif(opt_skb);
1298                 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1299                         np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1300                 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1301                         np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1302                 if (np->repflow)
1303                         np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1304                 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1305                         skb_set_owner_r(opt_skb, sk);
1306                         opt_skb = xchg(&np->pktoptions, opt_skb);
1307                 } else {
1308                         __kfree_skb(opt_skb);
1309                         opt_skb = xchg(&np->pktoptions, NULL);
1310                 }
1311         }
1312
1313         kfree_skb(opt_skb);
1314         return 0;
1315 }
1316
1317 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1318                            const struct tcphdr *th)
1319 {
1320         /* This is tricky: we move IP6CB at its correct location into
1321          * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1322          * _decode_session6() uses IP6CB().
1323          * barrier() makes sure compiler won't play aliasing games.
1324          */
1325         memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1326                 sizeof(struct inet6_skb_parm));
1327         barrier();
1328
1329         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1330         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1331                                     skb->len - th->doff*4);
1332         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1333         TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1334         TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1335         TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1336         TCP_SKB_CB(skb)->sacked = 0;
1337 }
1338
1339 static void tcp_v6_restore_cb(struct sk_buff *skb)
1340 {
1341         /* We need to move header back to the beginning if xfrm6_policy_check()
1342          * and tcp_v6_fill_cb() are going to be called again.
1343          */
1344         memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1345                 sizeof(struct inet6_skb_parm));
1346 }
1347
1348 static int tcp_v6_rcv(struct sk_buff *skb)
1349 {
1350         const struct tcphdr *th;
1351         const struct ipv6hdr *hdr;
1352         struct sock *sk;
1353         int ret;
1354         struct net *net = dev_net(skb->dev);
1355
1356         if (skb->pkt_type != PACKET_HOST)
1357                 goto discard_it;
1358
1359         /*
1360          *      Count it even if it's bad.
1361          */
1362         TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1363
1364         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1365                 goto discard_it;
1366
1367         th = tcp_hdr(skb);
1368
1369         if (th->doff < sizeof(struct tcphdr)/4)
1370                 goto bad_packet;
1371         if (!pskb_may_pull(skb, th->doff*4))
1372                 goto discard_it;
1373
1374         if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1375                 goto csum_error;
1376
1377         th = tcp_hdr(skb);
1378         hdr = ipv6_hdr(skb);
1379
1380 lookup:
1381         sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
1382                                 inet6_iif(skb));
1383         if (!sk)
1384                 goto no_tcp_socket;
1385
1386 process:
1387         if (sk->sk_state == TCP_TIME_WAIT)
1388                 goto do_time_wait;
1389
1390         if (sk->sk_state == TCP_NEW_SYN_RECV) {
1391                 struct request_sock *req = inet_reqsk(sk);
1392                 struct sock *nsk;
1393
1394                 sk = req->rsk_listener;
1395                 tcp_v6_fill_cb(skb, hdr, th);
1396                 if (tcp_v6_inbound_md5_hash(sk, skb)) {
1397                         reqsk_put(req);
1398                         goto discard_it;
1399                 }
1400                 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1401                         inet_csk_reqsk_queue_drop_and_put(sk, req);
1402                         goto lookup;
1403                 }
1404                 sock_hold(sk);
1405                 nsk = tcp_check_req(sk, skb, req, false);
1406                 if (!nsk) {
1407                         reqsk_put(req);
1408                         goto discard_and_relse;
1409                 }
1410                 if (nsk == sk) {
1411                         reqsk_put(req);
1412                         tcp_v6_restore_cb(skb);
1413                 } else if (tcp_child_process(sk, nsk, skb)) {
1414                         tcp_v6_send_reset(nsk, skb);
1415                         goto discard_and_relse;
1416                 } else {
1417                         sock_put(sk);
1418                         return 0;
1419                 }
1420         }
1421         if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1422                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1423                 goto discard_and_relse;
1424         }
1425
1426         if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1427                 goto discard_and_relse;
1428
1429         tcp_v6_fill_cb(skb, hdr, th);
1430
1431         if (tcp_v6_inbound_md5_hash(sk, skb))
1432                 goto discard_and_relse;
1433
1434         if (sk_filter(sk, skb))
1435                 goto discard_and_relse;
1436
1437         skb->dev = NULL;
1438
1439         if (sk->sk_state == TCP_LISTEN) {
1440                 ret = tcp_v6_do_rcv(sk, skb);
1441                 goto put_and_return;
1442         }
1443
1444         sk_incoming_cpu_update(sk);
1445
1446         bh_lock_sock_nested(sk);
1447         tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1448         ret = 0;
1449         if (!sock_owned_by_user(sk)) {
1450                 if (!tcp_prequeue(sk, skb))
1451                         ret = tcp_v6_do_rcv(sk, skb);
1452         } else if (unlikely(sk_add_backlog(sk, skb,
1453                                            sk->sk_rcvbuf + sk->sk_sndbuf))) {
1454                 bh_unlock_sock(sk);
1455                 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1456                 goto discard_and_relse;
1457         }
1458         bh_unlock_sock(sk);
1459
1460 put_and_return:
1461         sock_put(sk);
1462         return ret ? -1 : 0;
1463
1464 no_tcp_socket:
1465         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1466                 goto discard_it;
1467
1468         tcp_v6_fill_cb(skb, hdr, th);
1469
1470         if (tcp_checksum_complete(skb)) {
1471 csum_error:
1472                 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1473 bad_packet:
1474                 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1475         } else {
1476                 tcp_v6_send_reset(NULL, skb);
1477         }
1478
1479 discard_it:
1480         kfree_skb(skb);
1481         return 0;
1482
1483 discard_and_relse:
1484         sock_put(sk);
1485         goto discard_it;
1486
1487 do_time_wait:
1488         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1489                 inet_twsk_put(inet_twsk(sk));
1490                 goto discard_it;
1491         }
1492
1493         tcp_v6_fill_cb(skb, hdr, th);
1494
1495         if (tcp_checksum_complete(skb)) {
1496                 inet_twsk_put(inet_twsk(sk));
1497                 goto csum_error;
1498         }
1499
1500         switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1501         case TCP_TW_SYN:
1502         {
1503                 struct sock *sk2;
1504
1505                 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1506                                             &ipv6_hdr(skb)->saddr, th->source,
1507                                             &ipv6_hdr(skb)->daddr,
1508                                             ntohs(th->dest), tcp_v6_iif(skb));
1509                 if (sk2) {
1510                         struct inet_timewait_sock *tw = inet_twsk(sk);
1511                         inet_twsk_deschedule_put(tw);
1512                         sk = sk2;
1513                         tcp_v6_restore_cb(skb);
1514                         goto process;
1515                 }
1516                 /* Fall through to ACK */
1517         }
1518         case TCP_TW_ACK:
1519                 tcp_v6_timewait_ack(sk, skb);
1520                 break;
1521         case TCP_TW_RST:
1522                 tcp_v6_restore_cb(skb);
1523                 goto no_tcp_socket;
1524         case TCP_TW_SUCCESS:
1525                 ;
1526         }
1527         goto discard_it;
1528 }
1529
1530 static void tcp_v6_early_demux(struct sk_buff *skb)
1531 {
1532         const struct ipv6hdr *hdr;
1533         const struct tcphdr *th;
1534         struct sock *sk;
1535
1536         if (skb->pkt_type != PACKET_HOST)
1537                 return;
1538
1539         if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1540                 return;
1541
1542         hdr = ipv6_hdr(skb);
1543         th = tcp_hdr(skb);
1544
1545         if (th->doff < sizeof(struct tcphdr) / 4)
1546                 return;
1547
1548         /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1549         sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1550                                         &hdr->saddr, th->source,
1551                                         &hdr->daddr, ntohs(th->dest),
1552                                         inet6_iif(skb));
1553         if (sk) {
1554                 skb->sk = sk;
1555                 skb->destructor = sock_edemux;
1556                 if (sk_fullsock(sk)) {
1557                         struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1558
1559                         if (dst)
1560                                 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1561                         if (dst &&
1562                             inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1563                                 skb_dst_set_noref(skb, dst);
1564                 }
1565         }
1566 }
1567
1568 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1569         .twsk_obj_size  = sizeof(struct tcp6_timewait_sock),
1570         .twsk_unique    = tcp_twsk_unique,
1571         .twsk_destructor = tcp_twsk_destructor,
1572 };
1573
1574 static const struct inet_connection_sock_af_ops ipv6_specific = {
1575         .queue_xmit        = inet6_csk_xmit,
1576         .send_check        = tcp_v6_send_check,
1577         .rebuild_header    = inet6_sk_rebuild_header,
1578         .sk_rx_dst_set     = inet6_sk_rx_dst_set,
1579         .conn_request      = tcp_v6_conn_request,
1580         .syn_recv_sock     = tcp_v6_syn_recv_sock,
1581         .net_header_len    = sizeof(struct ipv6hdr),
1582         .net_frag_header_len = sizeof(struct frag_hdr),
1583         .setsockopt        = ipv6_setsockopt,
1584         .getsockopt        = ipv6_getsockopt,
1585         .addr2sockaddr     = inet6_csk_addr2sockaddr,
1586         .sockaddr_len      = sizeof(struct sockaddr_in6),
1587         .bind_conflict     = inet6_csk_bind_conflict,
1588 #ifdef CONFIG_COMPAT
1589         .compat_setsockopt = compat_ipv6_setsockopt,
1590         .compat_getsockopt = compat_ipv6_getsockopt,
1591 #endif
1592         .mtu_reduced       = tcp_v6_mtu_reduced,
1593 };
1594
1595 #ifdef CONFIG_TCP_MD5SIG
1596 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1597         .md5_lookup     =       tcp_v6_md5_lookup,
1598         .calc_md5_hash  =       tcp_v6_md5_hash_skb,
1599         .md5_parse      =       tcp_v6_parse_md5_keys,
1600 };
1601 #endif
1602
1603 /*
1604  *      TCP over IPv4 via INET6 API
1605  */
1606 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1607         .queue_xmit        = ip_queue_xmit,
1608         .send_check        = tcp_v4_send_check,
1609         .rebuild_header    = inet_sk_rebuild_header,
1610         .sk_rx_dst_set     = inet_sk_rx_dst_set,
1611         .conn_request      = tcp_v6_conn_request,
1612         .syn_recv_sock     = tcp_v6_syn_recv_sock,
1613         .net_header_len    = sizeof(struct iphdr),
1614         .setsockopt        = ipv6_setsockopt,
1615         .getsockopt        = ipv6_getsockopt,
1616         .addr2sockaddr     = inet6_csk_addr2sockaddr,
1617         .sockaddr_len      = sizeof(struct sockaddr_in6),
1618         .bind_conflict     = inet6_csk_bind_conflict,
1619 #ifdef CONFIG_COMPAT
1620         .compat_setsockopt = compat_ipv6_setsockopt,
1621         .compat_getsockopt = compat_ipv6_getsockopt,
1622 #endif
1623         .mtu_reduced       = tcp_v4_mtu_reduced,
1624 };
1625
1626 #ifdef CONFIG_TCP_MD5SIG
1627 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1628         .md5_lookup     =       tcp_v4_md5_lookup,
1629         .calc_md5_hash  =       tcp_v4_md5_hash_skb,
1630         .md5_parse      =       tcp_v6_parse_md5_keys,
1631 };
1632 #endif
1633
1634 /* NOTE: A lot of things set to zero explicitly by call to
1635  *       sk_alloc() so need not be done here.
1636  */
1637 static int tcp_v6_init_sock(struct sock *sk)
1638 {
1639         struct inet_connection_sock *icsk = inet_csk(sk);
1640
1641         tcp_init_sock(sk);
1642
1643         icsk->icsk_af_ops = &ipv6_specific;
1644
1645 #ifdef CONFIG_TCP_MD5SIG
1646         tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1647 #endif
1648
1649         return 0;
1650 }
1651
1652 static void tcp_v6_destroy_sock(struct sock *sk)
1653 {
1654         tcp_v4_destroy_sock(sk);
1655         inet6_destroy_sock(sk);
1656 }
1657
1658 #ifdef CONFIG_PROC_FS
1659 /* Proc filesystem TCPv6 sock list dumping. */
1660 static void get_openreq6(struct seq_file *seq,
1661                          const struct request_sock *req, int i)
1662 {
1663         long ttd = req->rsk_timer.expires - jiffies;
1664         const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1665         const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1666
1667         if (ttd < 0)
1668                 ttd = 0;
1669
1670         seq_printf(seq,
1671                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1672                    "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1673                    i,
1674                    src->s6_addr32[0], src->s6_addr32[1],
1675                    src->s6_addr32[2], src->s6_addr32[3],
1676                    inet_rsk(req)->ir_num,
1677                    dest->s6_addr32[0], dest->s6_addr32[1],
1678                    dest->s6_addr32[2], dest->s6_addr32[3],
1679                    ntohs(inet_rsk(req)->ir_rmt_port),
1680                    TCP_SYN_RECV,
1681                    0, 0, /* could print option size, but that is af dependent. */
1682                    1,   /* timers active (only the expire timer) */
1683                    jiffies_to_clock_t(ttd),
1684                    req->num_timeout,
1685                    from_kuid_munged(seq_user_ns(seq),
1686                                     sock_i_uid(req->rsk_listener)),
1687                    0,  /* non standard timer */
1688                    0, /* open_requests have no inode */
1689                    0, req);
1690 }
1691
1692 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1693 {
1694         const struct in6_addr *dest, *src;
1695         __u16 destp, srcp;
1696         int timer_active;
1697         unsigned long timer_expires;
1698         const struct inet_sock *inet = inet_sk(sp);
1699         const struct tcp_sock *tp = tcp_sk(sp);
1700         const struct inet_connection_sock *icsk = inet_csk(sp);
1701         const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1702         int rx_queue;
1703         int state;
1704
1705         dest  = &sp->sk_v6_daddr;
1706         src   = &sp->sk_v6_rcv_saddr;
1707         destp = ntohs(inet->inet_dport);
1708         srcp  = ntohs(inet->inet_sport);
1709
1710         if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1711                 timer_active    = 1;
1712                 timer_expires   = icsk->icsk_timeout;
1713         } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1714                 timer_active    = 4;
1715                 timer_expires   = icsk->icsk_timeout;
1716         } else if (timer_pending(&sp->sk_timer)) {
1717                 timer_active    = 2;
1718                 timer_expires   = sp->sk_timer.expires;
1719         } else {
1720                 timer_active    = 0;
1721                 timer_expires = jiffies;
1722         }
1723
1724         state = sk_state_load(sp);
1725         if (state == TCP_LISTEN)
1726                 rx_queue = sp->sk_ack_backlog;
1727         else
1728                 /* Because we don't lock the socket,
1729                  * we might find a transient negative value.
1730                  */
1731                 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1732
1733         seq_printf(seq,
1734                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1735                    "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1736                    i,
1737                    src->s6_addr32[0], src->s6_addr32[1],
1738                    src->s6_addr32[2], src->s6_addr32[3], srcp,
1739                    dest->s6_addr32[0], dest->s6_addr32[1],
1740                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
1741                    state,
1742                    tp->write_seq - tp->snd_una,
1743                    rx_queue,
1744                    timer_active,
1745                    jiffies_delta_to_clock_t(timer_expires - jiffies),
1746                    icsk->icsk_retransmits,
1747                    from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1748                    icsk->icsk_probes_out,
1749                    sock_i_ino(sp),
1750                    atomic_read(&sp->sk_refcnt), sp,
1751                    jiffies_to_clock_t(icsk->icsk_rto),
1752                    jiffies_to_clock_t(icsk->icsk_ack.ato),
1753                    (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1754                    tp->snd_cwnd,
1755                    state == TCP_LISTEN ?
1756                         fastopenq->max_qlen :
1757                         (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1758                    );
1759 }
1760
1761 static void get_timewait6_sock(struct seq_file *seq,
1762                                struct inet_timewait_sock *tw, int i)
1763 {
1764         long delta = tw->tw_timer.expires - jiffies;
1765         const struct in6_addr *dest, *src;
1766         __u16 destp, srcp;
1767
1768         dest = &tw->tw_v6_daddr;
1769         src  = &tw->tw_v6_rcv_saddr;
1770         destp = ntohs(tw->tw_dport);
1771         srcp  = ntohs(tw->tw_sport);
1772
1773         seq_printf(seq,
1774                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1775                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1776                    i,
1777                    src->s6_addr32[0], src->s6_addr32[1],
1778                    src->s6_addr32[2], src->s6_addr32[3], srcp,
1779                    dest->s6_addr32[0], dest->s6_addr32[1],
1780                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
1781                    tw->tw_substate, 0, 0,
1782                    3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1783                    atomic_read(&tw->tw_refcnt), tw);
1784 }
1785
1786 static int tcp6_seq_show(struct seq_file *seq, void *v)
1787 {
1788         struct tcp_iter_state *st;
1789         struct sock *sk = v;
1790
1791         if (v == SEQ_START_TOKEN) {
1792                 seq_puts(seq,
1793                          "  sl  "
1794                          "local_address                         "
1795                          "remote_address                        "
1796                          "st tx_queue rx_queue tr tm->when retrnsmt"
1797                          "   uid  timeout inode\n");
1798                 goto out;
1799         }
1800         st = seq->private;
1801
1802         if (sk->sk_state == TCP_TIME_WAIT)
1803                 get_timewait6_sock(seq, v, st->num);
1804         else if (sk->sk_state == TCP_NEW_SYN_RECV)
1805                 get_openreq6(seq, v, st->num);
1806         else
1807                 get_tcp6_sock(seq, v, st->num);
1808 out:
1809         return 0;
1810 }
1811
1812 static const struct file_operations tcp6_afinfo_seq_fops = {
1813         .owner   = THIS_MODULE,
1814         .open    = tcp_seq_open,
1815         .read    = seq_read,
1816         .llseek  = seq_lseek,
1817         .release = seq_release_net
1818 };
1819
1820 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1821         .name           = "tcp6",
1822         .family         = AF_INET6,
1823         .seq_fops       = &tcp6_afinfo_seq_fops,
1824         .seq_ops        = {
1825                 .show           = tcp6_seq_show,
1826         },
1827 };
1828
1829 int __net_init tcp6_proc_init(struct net *net)
1830 {
1831         return tcp_proc_register(net, &tcp6_seq_afinfo);
1832 }
1833
1834 void tcp6_proc_exit(struct net *net)
1835 {
1836         tcp_proc_unregister(net, &tcp6_seq_afinfo);
1837 }
1838 #endif
1839
1840 static void tcp_v6_clear_sk(struct sock *sk, int size)
1841 {
1842         struct inet_sock *inet = inet_sk(sk);
1843
1844         /* we do not want to clear pinet6 field, because of RCU lookups */
1845         sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1846
1847         size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1848         memset(&inet->pinet6 + 1, 0, size);
1849 }
1850
1851 struct proto tcpv6_prot = {
1852         .name                   = "TCPv6",
1853         .owner                  = THIS_MODULE,
1854         .close                  = tcp_close,
1855         .connect                = tcp_v6_connect,
1856         .disconnect             = tcp_disconnect,
1857         .accept                 = inet_csk_accept,
1858         .ioctl                  = tcp_ioctl,
1859         .init                   = tcp_v6_init_sock,
1860         .destroy                = tcp_v6_destroy_sock,
1861         .shutdown               = tcp_shutdown,
1862         .setsockopt             = tcp_setsockopt,
1863         .getsockopt             = tcp_getsockopt,
1864         .recvmsg                = tcp_recvmsg,
1865         .sendmsg                = tcp_sendmsg,
1866         .sendpage               = tcp_sendpage,
1867         .backlog_rcv            = tcp_v6_do_rcv,
1868         .release_cb             = tcp_release_cb,
1869         .hash                   = inet_hash,
1870         .unhash                 = inet_unhash,
1871         .get_port               = inet_csk_get_port,
1872         .enter_memory_pressure  = tcp_enter_memory_pressure,
1873         .stream_memory_free     = tcp_stream_memory_free,
1874         .sockets_allocated      = &tcp_sockets_allocated,
1875         .memory_allocated       = &tcp_memory_allocated,
1876         .memory_pressure        = &tcp_memory_pressure,
1877         .orphan_count           = &tcp_orphan_count,
1878         .sysctl_mem             = sysctl_tcp_mem,
1879         .sysctl_wmem            = sysctl_tcp_wmem,
1880         .sysctl_rmem            = sysctl_tcp_rmem,
1881         .max_header             = MAX_TCP_HEADER,
1882         .obj_size               = sizeof(struct tcp6_sock),
1883         .slab_flags             = SLAB_DESTROY_BY_RCU,
1884         .twsk_prot              = &tcp6_timewait_sock_ops,
1885         .rsk_prot               = &tcp6_request_sock_ops,
1886         .h.hashinfo             = &tcp_hashinfo,
1887         .no_autobind            = true,
1888 #ifdef CONFIG_COMPAT
1889         .compat_setsockopt      = compat_tcp_setsockopt,
1890         .compat_getsockopt      = compat_tcp_getsockopt,
1891 #endif
1892 #ifdef CONFIG_MEMCG_KMEM
1893         .proto_cgroup           = tcp_proto_cgroup,
1894 #endif
1895         .clear_sk               = tcp_v6_clear_sk,
1896 };
1897
1898 static const struct inet6_protocol tcpv6_protocol = {
1899         .early_demux    =       tcp_v6_early_demux,
1900         .handler        =       tcp_v6_rcv,
1901         .err_handler    =       tcp_v6_err,
1902         .flags          =       INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1903 };
1904
1905 static struct inet_protosw tcpv6_protosw = {
1906         .type           =       SOCK_STREAM,
1907         .protocol       =       IPPROTO_TCP,
1908         .prot           =       &tcpv6_prot,
1909         .ops            =       &inet6_stream_ops,
1910         .flags          =       INET_PROTOSW_PERMANENT |
1911                                 INET_PROTOSW_ICSK,
1912 };
1913
1914 static int __net_init tcpv6_net_init(struct net *net)
1915 {
1916         return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1917                                     SOCK_RAW, IPPROTO_TCP, net);
1918 }
1919
1920 static void __net_exit tcpv6_net_exit(struct net *net)
1921 {
1922         inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1923 }
1924
1925 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1926 {
1927         inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1928 }
1929
1930 static struct pernet_operations tcpv6_net_ops = {
1931         .init       = tcpv6_net_init,
1932         .exit       = tcpv6_net_exit,
1933         .exit_batch = tcpv6_net_exit_batch,
1934 };
1935
1936 int __init tcpv6_init(void)
1937 {
1938         int ret;
1939
1940         ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1941         if (ret)
1942                 goto out;
1943
1944         /* register inet6 protocol */
1945         ret = inet6_register_protosw(&tcpv6_protosw);
1946         if (ret)
1947                 goto out_tcpv6_protocol;
1948
1949         ret = register_pernet_subsys(&tcpv6_net_ops);
1950         if (ret)
1951                 goto out_tcpv6_protosw;
1952 out:
1953         return ret;
1954
1955 out_tcpv6_protosw:
1956         inet6_unregister_protosw(&tcpv6_protosw);
1957 out_tcpv6_protocol:
1958         inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1959         goto out;
1960 }
1961
1962 void tcpv6_exit(void)
1963 {
1964         unregister_pernet_subsys(&tcpv6_net_ops);
1965         inet6_unregister_protosw(&tcpv6_protosw);
1966         inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1967 }