Merge branch 'tcpflags'
[firefly-linux-kernel-4.4.55.git] / net / ipv6 / tcp_ipv6.c
1 /*
2  *      TCP over IPv6
3  *      Linux INET6 implementation
4  *
5  *      Authors:
6  *      Pedro Roque             <roque@di.fc.ul.pt>
7  *
8  *      Based on:
9  *      linux/net/ipv4/tcp.c
10  *      linux/net/ipv4/tcp_input.c
11  *      linux/net/ipv4/tcp_output.c
12  *
13  *      Fixes:
14  *      Hideaki YOSHIFUJI       :       sin6_scope_id support
15  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
16  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
17  *                                      a single port at the same time.
18  *      YOSHIFUJI Hideaki @USAGI:       convert /proc/net/tcp6 to seq_file.
19  *
20  *      This program is free software; you can redistribute it and/or
21  *      modify it under the terms of the GNU General Public License
22  *      as published by the Free Software Foundation; either version
23  *      2 of the License, or (at your option) any later version.
24  */
25
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
64 #include <net/secure_seq.h>
65 #include <net/tcp_memcontrol.h>
66 #include <net/busy_poll.h>
67
68 #include <linux/proc_fs.h>
69 #include <linux/seq_file.h>
70
71 #include <linux/crypto.h>
72 #include <linux/scatterlist.h>
73
74 static void     tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
75 static void     tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
76                                       struct request_sock *req);
77
78 static int      tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
79
80 static const struct inet_connection_sock_af_ops ipv6_mapped;
81 static const struct inet_connection_sock_af_ops ipv6_specific;
82 #ifdef CONFIG_TCP_MD5SIG
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
84 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
85 #else
86 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
87                                                    const struct in6_addr *addr)
88 {
89         return NULL;
90 }
91 #endif
92
93 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
94 {
95         struct dst_entry *dst = skb_dst(skb);
96
97         if (dst) {
98                 const struct rt6_info *rt = (const struct rt6_info *)dst;
99
100                 dst_hold(dst);
101                 sk->sk_rx_dst = dst;
102                 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
103                 if (rt->rt6i_node)
104                         inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
105         }
106 }
107
108 static void tcp_v6_hash(struct sock *sk)
109 {
110         if (sk->sk_state != TCP_CLOSE) {
111                 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
112                         tcp_prot.hash(sk);
113                         return;
114                 }
115                 local_bh_disable();
116                 __inet6_hash(sk, NULL);
117                 local_bh_enable();
118         }
119 }
120
121 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
122 {
123         return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
124                                             ipv6_hdr(skb)->saddr.s6_addr32,
125                                             tcp_hdr(skb)->dest,
126                                             tcp_hdr(skb)->source);
127 }
128
129 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
130                           int addr_len)
131 {
132         struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
133         struct inet_sock *inet = inet_sk(sk);
134         struct inet_connection_sock *icsk = inet_csk(sk);
135         struct ipv6_pinfo *np = inet6_sk(sk);
136         struct tcp_sock *tp = tcp_sk(sk);
137         struct in6_addr *saddr = NULL, *final_p, final;
138         struct rt6_info *rt;
139         struct flowi6 fl6;
140         struct dst_entry *dst;
141         int addr_type;
142         int err;
143
144         if (addr_len < SIN6_LEN_RFC2133)
145                 return -EINVAL;
146
147         if (usin->sin6_family != AF_INET6)
148                 return -EAFNOSUPPORT;
149
150         memset(&fl6, 0, sizeof(fl6));
151
152         if (np->sndflow) {
153                 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
154                 IP6_ECN_flow_init(fl6.flowlabel);
155                 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
156                         struct ip6_flowlabel *flowlabel;
157                         flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
158                         if (flowlabel == NULL)
159                                 return -EINVAL;
160                         fl6_sock_release(flowlabel);
161                 }
162         }
163
164         /*
165          *      connect() to INADDR_ANY means loopback (BSD'ism).
166          */
167
168         if (ipv6_addr_any(&usin->sin6_addr))
169                 usin->sin6_addr.s6_addr[15] = 0x1;
170
171         addr_type = ipv6_addr_type(&usin->sin6_addr);
172
173         if (addr_type & IPV6_ADDR_MULTICAST)
174                 return -ENETUNREACH;
175
176         if (addr_type&IPV6_ADDR_LINKLOCAL) {
177                 if (addr_len >= sizeof(struct sockaddr_in6) &&
178                     usin->sin6_scope_id) {
179                         /* If interface is set while binding, indices
180                          * must coincide.
181                          */
182                         if (sk->sk_bound_dev_if &&
183                             sk->sk_bound_dev_if != usin->sin6_scope_id)
184                                 return -EINVAL;
185
186                         sk->sk_bound_dev_if = usin->sin6_scope_id;
187                 }
188
189                 /* Connect to link-local address requires an interface */
190                 if (!sk->sk_bound_dev_if)
191                         return -EINVAL;
192         }
193
194         if (tp->rx_opt.ts_recent_stamp &&
195             !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
196                 tp->rx_opt.ts_recent = 0;
197                 tp->rx_opt.ts_recent_stamp = 0;
198                 tp->write_seq = 0;
199         }
200
201         sk->sk_v6_daddr = usin->sin6_addr;
202         np->flow_label = fl6.flowlabel;
203
204         ip6_set_txhash(sk);
205
206         /*
207          *      TCP over IPv4
208          */
209
210         if (addr_type == IPV6_ADDR_MAPPED) {
211                 u32 exthdrlen = icsk->icsk_ext_hdr_len;
212                 struct sockaddr_in sin;
213
214                 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
215
216                 if (__ipv6_only_sock(sk))
217                         return -ENETUNREACH;
218
219                 sin.sin_family = AF_INET;
220                 sin.sin_port = usin->sin6_port;
221                 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
222
223                 icsk->icsk_af_ops = &ipv6_mapped;
224                 sk->sk_backlog_rcv = tcp_v4_do_rcv;
225 #ifdef CONFIG_TCP_MD5SIG
226                 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
227 #endif
228
229                 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
230
231                 if (err) {
232                         icsk->icsk_ext_hdr_len = exthdrlen;
233                         icsk->icsk_af_ops = &ipv6_specific;
234                         sk->sk_backlog_rcv = tcp_v6_do_rcv;
235 #ifdef CONFIG_TCP_MD5SIG
236                         tp->af_specific = &tcp_sock_ipv6_specific;
237 #endif
238                         goto failure;
239                 } else {
240                         ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
241                         ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
242                                                &sk->sk_v6_rcv_saddr);
243                 }
244
245                 return err;
246         }
247
248         if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
249                 saddr = &sk->sk_v6_rcv_saddr;
250
251         fl6.flowi6_proto = IPPROTO_TCP;
252         fl6.daddr = sk->sk_v6_daddr;
253         fl6.saddr = saddr ? *saddr : np->saddr;
254         fl6.flowi6_oif = sk->sk_bound_dev_if;
255         fl6.flowi6_mark = sk->sk_mark;
256         fl6.fl6_dport = usin->sin6_port;
257         fl6.fl6_sport = inet->inet_sport;
258
259         final_p = fl6_update_dst(&fl6, np->opt, &final);
260
261         security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
262
263         dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
264         if (IS_ERR(dst)) {
265                 err = PTR_ERR(dst);
266                 goto failure;
267         }
268
269         if (saddr == NULL) {
270                 saddr = &fl6.saddr;
271                 sk->sk_v6_rcv_saddr = *saddr;
272         }
273
274         /* set the source address */
275         np->saddr = *saddr;
276         inet->inet_rcv_saddr = LOOPBACK4_IPV6;
277
278         sk->sk_gso_type = SKB_GSO_TCPV6;
279         __ip6_dst_store(sk, dst, NULL, NULL);
280
281         rt = (struct rt6_info *) dst;
282         if (tcp_death_row.sysctl_tw_recycle &&
283             !tp->rx_opt.ts_recent_stamp &&
284             ipv6_addr_equal(&rt->rt6i_dst.addr, &sk->sk_v6_daddr))
285                 tcp_fetch_timewait_stamp(sk, dst);
286
287         icsk->icsk_ext_hdr_len = 0;
288         if (np->opt)
289                 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
290                                           np->opt->opt_nflen);
291
292         tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
293
294         inet->inet_dport = usin->sin6_port;
295
296         tcp_set_state(sk, TCP_SYN_SENT);
297         err = inet6_hash_connect(&tcp_death_row, sk);
298         if (err)
299                 goto late_failure;
300
301         if (!tp->write_seq && likely(!tp->repair))
302                 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
303                                                              sk->sk_v6_daddr.s6_addr32,
304                                                              inet->inet_sport,
305                                                              inet->inet_dport);
306
307         err = tcp_connect(sk);
308         if (err)
309                 goto late_failure;
310
311         return 0;
312
313 late_failure:
314         tcp_set_state(sk, TCP_CLOSE);
315         __sk_dst_reset(sk);
316 failure:
317         inet->inet_dport = 0;
318         sk->sk_route_caps = 0;
319         return err;
320 }
321
322 static void tcp_v6_mtu_reduced(struct sock *sk)
323 {
324         struct dst_entry *dst;
325
326         if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
327                 return;
328
329         dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
330         if (!dst)
331                 return;
332
333         if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
334                 tcp_sync_mss(sk, dst_mtu(dst));
335                 tcp_simple_retransmit(sk);
336         }
337 }
338
339 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
340                 u8 type, u8 code, int offset, __be32 info)
341 {
342         const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
343         const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
344         struct ipv6_pinfo *np;
345         struct sock *sk;
346         int err;
347         struct tcp_sock *tp;
348         struct request_sock *fastopen;
349         __u32 seq, snd_una;
350         struct net *net = dev_net(skb->dev);
351
352         sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
353                         th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
354
355         if (sk == NULL) {
356                 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
357                                    ICMP6_MIB_INERRORS);
358                 return;
359         }
360
361         if (sk->sk_state == TCP_TIME_WAIT) {
362                 inet_twsk_put(inet_twsk(sk));
363                 return;
364         }
365
366         bh_lock_sock(sk);
367         if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
368                 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
369
370         if (sk->sk_state == TCP_CLOSE)
371                 goto out;
372
373         if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
374                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
375                 goto out;
376         }
377
378         tp = tcp_sk(sk);
379         seq = ntohl(th->seq);
380         /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
381         fastopen = tp->fastopen_rsk;
382         snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
383         if (sk->sk_state != TCP_LISTEN &&
384             !between(seq, snd_una, tp->snd_nxt)) {
385                 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
386                 goto out;
387         }
388
389         np = inet6_sk(sk);
390
391         if (type == NDISC_REDIRECT) {
392                 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
393
394                 if (dst)
395                         dst->ops->redirect(dst, sk, skb);
396                 goto out;
397         }
398
399         if (type == ICMPV6_PKT_TOOBIG) {
400                 /* We are not interested in TCP_LISTEN and open_requests
401                  * (SYN-ACKs send out by Linux are always <576bytes so
402                  * they should go through unfragmented).
403                  */
404                 if (sk->sk_state == TCP_LISTEN)
405                         goto out;
406
407                 if (!ip6_sk_accept_pmtu(sk))
408                         goto out;
409
410                 tp->mtu_info = ntohl(info);
411                 if (!sock_owned_by_user(sk))
412                         tcp_v6_mtu_reduced(sk);
413                 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
414                                            &tp->tsq_flags))
415                         sock_hold(sk);
416                 goto out;
417         }
418
419         icmpv6_err_convert(type, code, &err);
420
421         /* Might be for an request_sock */
422         switch (sk->sk_state) {
423                 struct request_sock *req, **prev;
424         case TCP_LISTEN:
425                 if (sock_owned_by_user(sk))
426                         goto out;
427
428                 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
429                                            &hdr->saddr, inet6_iif(skb));
430                 if (!req)
431                         goto out;
432
433                 /* ICMPs are not backlogged, hence we cannot get
434                  * an established socket here.
435                  */
436                 WARN_ON(req->sk != NULL);
437
438                 if (seq != tcp_rsk(req)->snt_isn) {
439                         NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
440                         goto out;
441                 }
442
443                 inet_csk_reqsk_queue_drop(sk, req, prev);
444                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
445                 goto out;
446
447         case TCP_SYN_SENT:
448         case TCP_SYN_RECV:
449                 /* Only in fast or simultaneous open. If a fast open socket is
450                  * is already accepted it is treated as a connected one below.
451                  */
452                 if (fastopen && fastopen->sk == NULL)
453                         break;
454
455                 if (!sock_owned_by_user(sk)) {
456                         sk->sk_err = err;
457                         sk->sk_error_report(sk);                /* Wake people up to see the error (see connect in sock.c) */
458
459                         tcp_done(sk);
460                 } else
461                         sk->sk_err_soft = err;
462                 goto out;
463         }
464
465         if (!sock_owned_by_user(sk) && np->recverr) {
466                 sk->sk_err = err;
467                 sk->sk_error_report(sk);
468         } else
469                 sk->sk_err_soft = err;
470
471 out:
472         bh_unlock_sock(sk);
473         sock_put(sk);
474 }
475
476
477 static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
478                               struct flowi *fl,
479                               struct request_sock *req,
480                               u16 queue_mapping,
481                               struct tcp_fastopen_cookie *foc)
482 {
483         struct inet_request_sock *ireq = inet_rsk(req);
484         struct ipv6_pinfo *np = inet6_sk(sk);
485         struct flowi6 *fl6 = &fl->u.ip6;
486         struct sk_buff *skb;
487         int err = -ENOMEM;
488
489         /* First, grab a route. */
490         if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
491                 goto done;
492
493         skb = tcp_make_synack(sk, dst, req, foc);
494
495         if (skb) {
496                 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
497                                     &ireq->ir_v6_rmt_addr);
498
499                 fl6->daddr = ireq->ir_v6_rmt_addr;
500                 if (np->repflow && (ireq->pktopts != NULL))
501                         fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
502
503                 skb_set_queue_mapping(skb, queue_mapping);
504                 err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
505                 err = net_xmit_eval(err);
506         }
507
508 done:
509         return err;
510 }
511
512
513 static void tcp_v6_reqsk_destructor(struct request_sock *req)
514 {
515         kfree_skb(inet_rsk(req)->pktopts);
516 }
517
518 #ifdef CONFIG_TCP_MD5SIG
519 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
520                                                    const struct in6_addr *addr)
521 {
522         return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
523 }
524
525 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
526                                                 struct sock *addr_sk)
527 {
528         return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
529 }
530
531 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
532                                                       struct request_sock *req)
533 {
534         return tcp_v6_md5_do_lookup(sk, &inet_rsk(req)->ir_v6_rmt_addr);
535 }
536
537 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
538                                  int optlen)
539 {
540         struct tcp_md5sig cmd;
541         struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
542
543         if (optlen < sizeof(cmd))
544                 return -EINVAL;
545
546         if (copy_from_user(&cmd, optval, sizeof(cmd)))
547                 return -EFAULT;
548
549         if (sin6->sin6_family != AF_INET6)
550                 return -EINVAL;
551
552         if (!cmd.tcpm_keylen) {
553                 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
554                         return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
555                                               AF_INET);
556                 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
557                                       AF_INET6);
558         }
559
560         if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
561                 return -EINVAL;
562
563         if (ipv6_addr_v4mapped(&sin6->sin6_addr))
564                 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
565                                       AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
566
567         return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
568                               AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
569 }
570
571 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
572                                         const struct in6_addr *daddr,
573                                         const struct in6_addr *saddr, int nbytes)
574 {
575         struct tcp6_pseudohdr *bp;
576         struct scatterlist sg;
577
578         bp = &hp->md5_blk.ip6;
579         /* 1. TCP pseudo-header (RFC2460) */
580         bp->saddr = *saddr;
581         bp->daddr = *daddr;
582         bp->protocol = cpu_to_be32(IPPROTO_TCP);
583         bp->len = cpu_to_be32(nbytes);
584
585         sg_init_one(&sg, bp, sizeof(*bp));
586         return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
587 }
588
589 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
590                                const struct in6_addr *daddr, struct in6_addr *saddr,
591                                const struct tcphdr *th)
592 {
593         struct tcp_md5sig_pool *hp;
594         struct hash_desc *desc;
595
596         hp = tcp_get_md5sig_pool();
597         if (!hp)
598                 goto clear_hash_noput;
599         desc = &hp->md5_desc;
600
601         if (crypto_hash_init(desc))
602                 goto clear_hash;
603         if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
604                 goto clear_hash;
605         if (tcp_md5_hash_header(hp, th))
606                 goto clear_hash;
607         if (tcp_md5_hash_key(hp, key))
608                 goto clear_hash;
609         if (crypto_hash_final(desc, md5_hash))
610                 goto clear_hash;
611
612         tcp_put_md5sig_pool();
613         return 0;
614
615 clear_hash:
616         tcp_put_md5sig_pool();
617 clear_hash_noput:
618         memset(md5_hash, 0, 16);
619         return 1;
620 }
621
622 static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
623                                const struct sock *sk,
624                                const struct request_sock *req,
625                                const struct sk_buff *skb)
626 {
627         const struct in6_addr *saddr, *daddr;
628         struct tcp_md5sig_pool *hp;
629         struct hash_desc *desc;
630         const struct tcphdr *th = tcp_hdr(skb);
631
632         if (sk) {
633                 saddr = &inet6_sk(sk)->saddr;
634                 daddr = &sk->sk_v6_daddr;
635         } else if (req) {
636                 saddr = &inet_rsk(req)->ir_v6_loc_addr;
637                 daddr = &inet_rsk(req)->ir_v6_rmt_addr;
638         } else {
639                 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
640                 saddr = &ip6h->saddr;
641                 daddr = &ip6h->daddr;
642         }
643
644         hp = tcp_get_md5sig_pool();
645         if (!hp)
646                 goto clear_hash_noput;
647         desc = &hp->md5_desc;
648
649         if (crypto_hash_init(desc))
650                 goto clear_hash;
651
652         if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
653                 goto clear_hash;
654         if (tcp_md5_hash_header(hp, th))
655                 goto clear_hash;
656         if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
657                 goto clear_hash;
658         if (tcp_md5_hash_key(hp, key))
659                 goto clear_hash;
660         if (crypto_hash_final(desc, md5_hash))
661                 goto clear_hash;
662
663         tcp_put_md5sig_pool();
664         return 0;
665
666 clear_hash:
667         tcp_put_md5sig_pool();
668 clear_hash_noput:
669         memset(md5_hash, 0, 16);
670         return 1;
671 }
672
673 static int __tcp_v6_inbound_md5_hash(struct sock *sk,
674                                      const struct sk_buff *skb)
675 {
676         const __u8 *hash_location = NULL;
677         struct tcp_md5sig_key *hash_expected;
678         const struct ipv6hdr *ip6h = ipv6_hdr(skb);
679         const struct tcphdr *th = tcp_hdr(skb);
680         int genhash;
681         u8 newhash[16];
682
683         hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
684         hash_location = tcp_parse_md5sig_option(th);
685
686         /* We've parsed the options - do we have a hash? */
687         if (!hash_expected && !hash_location)
688                 return 0;
689
690         if (hash_expected && !hash_location) {
691                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
692                 return 1;
693         }
694
695         if (!hash_expected && hash_location) {
696                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
697                 return 1;
698         }
699
700         /* check the signature */
701         genhash = tcp_v6_md5_hash_skb(newhash,
702                                       hash_expected,
703                                       NULL, NULL, skb);
704
705         if (genhash || memcmp(hash_location, newhash, 16) != 0) {
706                 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
707                                      genhash ? "failed" : "mismatch",
708                                      &ip6h->saddr, ntohs(th->source),
709                                      &ip6h->daddr, ntohs(th->dest));
710                 return 1;
711         }
712         return 0;
713 }
714
715 static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
716 {
717         int ret;
718
719         rcu_read_lock();
720         ret = __tcp_v6_inbound_md5_hash(sk, skb);
721         rcu_read_unlock();
722
723         return ret;
724 }
725
726 #endif
727
728 static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
729                             struct sk_buff *skb)
730 {
731         struct inet_request_sock *ireq = inet_rsk(req);
732         struct ipv6_pinfo *np = inet6_sk(sk);
733
734         ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
735         ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
736
737         ireq->ir_iif = sk->sk_bound_dev_if;
738
739         /* So that link locals have meaning */
740         if (!sk->sk_bound_dev_if &&
741             ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
742                 ireq->ir_iif = inet6_iif(skb);
743
744         if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
745             (ipv6_opt_accepted(sk, skb) || np->rxopt.bits.rxinfo ||
746              np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
747              np->rxopt.bits.rxohlim || np->repflow)) {
748                 atomic_inc(&skb->users);
749                 ireq->pktopts = skb;
750         }
751 }
752
753 static struct dst_entry *tcp_v6_route_req(struct sock *sk, struct flowi *fl,
754                                           const struct request_sock *req,
755                                           bool *strict)
756 {
757         if (strict)
758                 *strict = true;
759         return inet6_csk_route_req(sk, &fl->u.ip6, req);
760 }
761
762 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
763         .family         =       AF_INET6,
764         .obj_size       =       sizeof(struct tcp6_request_sock),
765         .rtx_syn_ack    =       tcp_rtx_synack,
766         .send_ack       =       tcp_v6_reqsk_send_ack,
767         .destructor     =       tcp_v6_reqsk_destructor,
768         .send_reset     =       tcp_v6_send_reset,
769         .syn_ack_timeout =      tcp_syn_ack_timeout,
770 };
771
772 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
773         .mss_clamp      =       IPV6_MIN_MTU - sizeof(struct tcphdr) -
774                                 sizeof(struct ipv6hdr),
775 #ifdef CONFIG_TCP_MD5SIG
776         .md5_lookup     =       tcp_v6_reqsk_md5_lookup,
777         .calc_md5_hash  =       tcp_v6_md5_hash_skb,
778 #endif
779         .init_req       =       tcp_v6_init_req,
780 #ifdef CONFIG_SYN_COOKIES
781         .cookie_init_seq =      cookie_v6_init_sequence,
782 #endif
783         .route_req      =       tcp_v6_route_req,
784         .init_seq       =       tcp_v6_init_sequence,
785         .send_synack    =       tcp_v6_send_synack,
786         .queue_hash_add =       inet6_csk_reqsk_queue_hash_add,
787 };
788
789 static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
790                                  u32 tsval, u32 tsecr, int oif,
791                                  struct tcp_md5sig_key *key, int rst, u8 tclass,
792                                  u32 label)
793 {
794         const struct tcphdr *th = tcp_hdr(skb);
795         struct tcphdr *t1;
796         struct sk_buff *buff;
797         struct flowi6 fl6;
798         struct net *net = dev_net(skb_dst(skb)->dev);
799         struct sock *ctl_sk = net->ipv6.tcp_sk;
800         unsigned int tot_len = sizeof(struct tcphdr);
801         struct dst_entry *dst;
802         __be32 *topt;
803
804         if (tsecr)
805                 tot_len += TCPOLEN_TSTAMP_ALIGNED;
806 #ifdef CONFIG_TCP_MD5SIG
807         if (key)
808                 tot_len += TCPOLEN_MD5SIG_ALIGNED;
809 #endif
810
811         buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
812                          GFP_ATOMIC);
813         if (buff == NULL)
814                 return;
815
816         skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
817
818         t1 = (struct tcphdr *) skb_push(buff, tot_len);
819         skb_reset_transport_header(buff);
820
821         /* Swap the send and the receive. */
822         memset(t1, 0, sizeof(*t1));
823         t1->dest = th->source;
824         t1->source = th->dest;
825         t1->doff = tot_len / 4;
826         t1->seq = htonl(seq);
827         t1->ack_seq = htonl(ack);
828         t1->ack = !rst || !th->ack;
829         t1->rst = rst;
830         t1->window = htons(win);
831
832         topt = (__be32 *)(t1 + 1);
833
834         if (tsecr) {
835                 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
836                                 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
837                 *topt++ = htonl(tsval);
838                 *topt++ = htonl(tsecr);
839         }
840
841 #ifdef CONFIG_TCP_MD5SIG
842         if (key) {
843                 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
844                                 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
845                 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
846                                     &ipv6_hdr(skb)->saddr,
847                                     &ipv6_hdr(skb)->daddr, t1);
848         }
849 #endif
850
851         memset(&fl6, 0, sizeof(fl6));
852         fl6.daddr = ipv6_hdr(skb)->saddr;
853         fl6.saddr = ipv6_hdr(skb)->daddr;
854         fl6.flowlabel = label;
855
856         buff->ip_summed = CHECKSUM_PARTIAL;
857         buff->csum = 0;
858
859         __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
860
861         fl6.flowi6_proto = IPPROTO_TCP;
862         if (rt6_need_strict(&fl6.daddr) && !oif)
863                 fl6.flowi6_oif = inet6_iif(skb);
864         else
865                 fl6.flowi6_oif = oif;
866         fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
867         fl6.fl6_dport = t1->dest;
868         fl6.fl6_sport = t1->source;
869         security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
870
871         /* Pass a socket to ip6_dst_lookup either it is for RST
872          * Underlying function will use this to retrieve the network
873          * namespace
874          */
875         dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
876         if (!IS_ERR(dst)) {
877                 skb_dst_set(buff, dst);
878                 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
879                 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
880                 if (rst)
881                         TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
882                 return;
883         }
884
885         kfree_skb(buff);
886 }
887
888 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
889 {
890         const struct tcphdr *th = tcp_hdr(skb);
891         u32 seq = 0, ack_seq = 0;
892         struct tcp_md5sig_key *key = NULL;
893 #ifdef CONFIG_TCP_MD5SIG
894         const __u8 *hash_location = NULL;
895         struct ipv6hdr *ipv6h = ipv6_hdr(skb);
896         unsigned char newhash[16];
897         int genhash;
898         struct sock *sk1 = NULL;
899 #endif
900         int oif;
901
902         if (th->rst)
903                 return;
904
905         if (!ipv6_unicast_destination(skb))
906                 return;
907
908 #ifdef CONFIG_TCP_MD5SIG
909         hash_location = tcp_parse_md5sig_option(th);
910         if (!sk && hash_location) {
911                 /*
912                  * active side is lost. Try to find listening socket through
913                  * source port, and then find md5 key through listening socket.
914                  * we are not loose security here:
915                  * Incoming packet is checked with md5 hash with finding key,
916                  * no RST generated if md5 hash doesn't match.
917                  */
918                 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
919                                            &tcp_hashinfo, &ipv6h->saddr,
920                                            th->source, &ipv6h->daddr,
921                                            ntohs(th->source), inet6_iif(skb));
922                 if (!sk1)
923                         return;
924
925                 rcu_read_lock();
926                 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
927                 if (!key)
928                         goto release_sk1;
929
930                 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
931                 if (genhash || memcmp(hash_location, newhash, 16) != 0)
932                         goto release_sk1;
933         } else {
934                 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
935         }
936 #endif
937
938         if (th->ack)
939                 seq = ntohl(th->ack_seq);
940         else
941                 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
942                           (th->doff << 2);
943
944         oif = sk ? sk->sk_bound_dev_if : 0;
945         tcp_v6_send_response(skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
946
947 #ifdef CONFIG_TCP_MD5SIG
948 release_sk1:
949         if (sk1) {
950                 rcu_read_unlock();
951                 sock_put(sk1);
952         }
953 #endif
954 }
955
956 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
957                             u32 win, u32 tsval, u32 tsecr, int oif,
958                             struct tcp_md5sig_key *key, u8 tclass,
959                             u32 label)
960 {
961         tcp_v6_send_response(skb, seq, ack, win, tsval, tsecr, oif, key, 0, tclass,
962                              label);
963 }
964
965 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
966 {
967         struct inet_timewait_sock *tw = inet_twsk(sk);
968         struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
969
970         tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
971                         tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
972                         tcp_time_stamp + tcptw->tw_ts_offset,
973                         tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
974                         tw->tw_tclass, (tw->tw_flowlabel << 12));
975
976         inet_twsk_put(tw);
977 }
978
979 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
980                                   struct request_sock *req)
981 {
982         /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
983          * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
984          */
985         tcp_v6_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
986                         tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
987                         tcp_rsk(req)->rcv_nxt,
988                         req->rcv_wnd, tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
989                         tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
990                         0, 0);
991 }
992
993
994 static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
995 {
996         struct request_sock *req, **prev;
997         const struct tcphdr *th = tcp_hdr(skb);
998         struct sock *nsk;
999
1000         /* Find possible connection requests. */
1001         req = inet6_csk_search_req(sk, &prev, th->source,
1002                                    &ipv6_hdr(skb)->saddr,
1003                                    &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1004         if (req)
1005                 return tcp_check_req(sk, skb, req, prev, false);
1006
1007         nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1008                         &ipv6_hdr(skb)->saddr, th->source,
1009                         &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1010
1011         if (nsk) {
1012                 if (nsk->sk_state != TCP_TIME_WAIT) {
1013                         bh_lock_sock(nsk);
1014                         return nsk;
1015                 }
1016                 inet_twsk_put(inet_twsk(nsk));
1017                 return NULL;
1018         }
1019
1020 #ifdef CONFIG_SYN_COOKIES
1021         if (!th->syn)
1022                 sk = cookie_v6_check(sk, skb);
1023 #endif
1024         return sk;
1025 }
1026
1027 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1028 {
1029         if (skb->protocol == htons(ETH_P_IP))
1030                 return tcp_v4_conn_request(sk, skb);
1031
1032         if (!ipv6_unicast_destination(skb))
1033                 goto drop;
1034
1035         return tcp_conn_request(&tcp6_request_sock_ops,
1036                                 &tcp_request_sock_ipv6_ops, sk, skb);
1037
1038 drop:
1039         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1040         return 0; /* don't send reset */
1041 }
1042
1043 static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1044                                          struct request_sock *req,
1045                                          struct dst_entry *dst)
1046 {
1047         struct inet_request_sock *ireq;
1048         struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1049         struct tcp6_sock *newtcp6sk;
1050         struct inet_sock *newinet;
1051         struct tcp_sock *newtp;
1052         struct sock *newsk;
1053 #ifdef CONFIG_TCP_MD5SIG
1054         struct tcp_md5sig_key *key;
1055 #endif
1056         struct flowi6 fl6;
1057
1058         if (skb->protocol == htons(ETH_P_IP)) {
1059                 /*
1060                  *      v6 mapped
1061                  */
1062
1063                 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1064
1065                 if (newsk == NULL)
1066                         return NULL;
1067
1068                 newtcp6sk = (struct tcp6_sock *)newsk;
1069                 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1070
1071                 newinet = inet_sk(newsk);
1072                 newnp = inet6_sk(newsk);
1073                 newtp = tcp_sk(newsk);
1074
1075                 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1076
1077                 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newsk->sk_v6_daddr);
1078
1079                 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1080
1081                 newsk->sk_v6_rcv_saddr = newnp->saddr;
1082
1083                 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1084                 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1085 #ifdef CONFIG_TCP_MD5SIG
1086                 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1087 #endif
1088
1089                 newnp->ipv6_ac_list = NULL;
1090                 newnp->ipv6_fl_list = NULL;
1091                 newnp->pktoptions  = NULL;
1092                 newnp->opt         = NULL;
1093                 newnp->mcast_oif   = inet6_iif(skb);
1094                 newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1095                 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1096                 if (np->repflow)
1097                         newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1098
1099                 /*
1100                  * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1101                  * here, tcp_create_openreq_child now does this for us, see the comment in
1102                  * that function for the gory details. -acme
1103                  */
1104
1105                 /* It is tricky place. Until this moment IPv4 tcp
1106                    worked with IPv6 icsk.icsk_af_ops.
1107                    Sync it now.
1108                  */
1109                 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1110
1111                 return newsk;
1112         }
1113
1114         ireq = inet_rsk(req);
1115
1116         if (sk_acceptq_is_full(sk))
1117                 goto out_overflow;
1118
1119         if (!dst) {
1120                 dst = inet6_csk_route_req(sk, &fl6, req);
1121                 if (!dst)
1122                         goto out;
1123         }
1124
1125         newsk = tcp_create_openreq_child(sk, req, skb);
1126         if (newsk == NULL)
1127                 goto out_nonewsk;
1128
1129         /*
1130          * No need to charge this sock to the relevant IPv6 refcnt debug socks
1131          * count here, tcp_create_openreq_child now does this for us, see the
1132          * comment in that function for the gory details. -acme
1133          */
1134
1135         newsk->sk_gso_type = SKB_GSO_TCPV6;
1136         __ip6_dst_store(newsk, dst, NULL, NULL);
1137         inet6_sk_rx_dst_set(newsk, skb);
1138
1139         newtcp6sk = (struct tcp6_sock *)newsk;
1140         inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1141
1142         newtp = tcp_sk(newsk);
1143         newinet = inet_sk(newsk);
1144         newnp = inet6_sk(newsk);
1145
1146         memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1147
1148         newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1149         newnp->saddr = ireq->ir_v6_loc_addr;
1150         newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1151         newsk->sk_bound_dev_if = ireq->ir_iif;
1152
1153         ip6_set_txhash(newsk);
1154
1155         /* Now IPv6 options...
1156
1157            First: no IPv4 options.
1158          */
1159         newinet->inet_opt = NULL;
1160         newnp->ipv6_ac_list = NULL;
1161         newnp->ipv6_fl_list = NULL;
1162
1163         /* Clone RX bits */
1164         newnp->rxopt.all = np->rxopt.all;
1165
1166         /* Clone pktoptions received with SYN */
1167         newnp->pktoptions = NULL;
1168         if (ireq->pktopts != NULL) {
1169                 newnp->pktoptions = skb_clone(ireq->pktopts,
1170                                               sk_gfp_atomic(sk, GFP_ATOMIC));
1171                 consume_skb(ireq->pktopts);
1172                 ireq->pktopts = NULL;
1173                 if (newnp->pktoptions)
1174                         skb_set_owner_r(newnp->pktoptions, newsk);
1175         }
1176         newnp->opt        = NULL;
1177         newnp->mcast_oif  = inet6_iif(skb);
1178         newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1179         newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1180         if (np->repflow)
1181                 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1182
1183         /* Clone native IPv6 options from listening socket (if any)
1184
1185            Yes, keeping reference count would be much more clever,
1186            but we make one more one thing there: reattach optmem
1187            to newsk.
1188          */
1189         if (np->opt)
1190                 newnp->opt = ipv6_dup_options(newsk, np->opt);
1191
1192         inet_csk(newsk)->icsk_ext_hdr_len = 0;
1193         if (newnp->opt)
1194                 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1195                                                      newnp->opt->opt_flen);
1196
1197         tcp_sync_mss(newsk, dst_mtu(dst));
1198         newtp->advmss = dst_metric_advmss(dst);
1199         if (tcp_sk(sk)->rx_opt.user_mss &&
1200             tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1201                 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1202
1203         tcp_initialize_rcv_mss(newsk);
1204
1205         newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1206         newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1207
1208 #ifdef CONFIG_TCP_MD5SIG
1209         /* Copy over the MD5 key from the original socket */
1210         key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1211         if (key != NULL) {
1212                 /* We're using one, so create a matching key
1213                  * on the newsk structure. If we fail to get
1214                  * memory, then we end up not copying the key
1215                  * across. Shucks.
1216                  */
1217                 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1218                                AF_INET6, key->key, key->keylen,
1219                                sk_gfp_atomic(sk, GFP_ATOMIC));
1220         }
1221 #endif
1222
1223         if (__inet_inherit_port(sk, newsk) < 0) {
1224                 inet_csk_prepare_forced_close(newsk);
1225                 tcp_done(newsk);
1226                 goto out;
1227         }
1228         __inet6_hash(newsk, NULL);
1229
1230         return newsk;
1231
1232 out_overflow:
1233         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1234 out_nonewsk:
1235         dst_release(dst);
1236 out:
1237         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1238         return NULL;
1239 }
1240
1241 /* The socket must have it's spinlock held when we get
1242  * here.
1243  *
1244  * We have a potential double-lock case here, so even when
1245  * doing backlog processing we use the BH locking scheme.
1246  * This is because we cannot sleep with the original spinlock
1247  * held.
1248  */
1249 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1250 {
1251         struct ipv6_pinfo *np = inet6_sk(sk);
1252         struct tcp_sock *tp;
1253         struct sk_buff *opt_skb = NULL;
1254
1255         /* Imagine: socket is IPv6. IPv4 packet arrives,
1256            goes to IPv4 receive handler and backlogged.
1257            From backlog it always goes here. Kerboom...
1258            Fortunately, tcp_rcv_established and rcv_established
1259            handle them correctly, but it is not case with
1260            tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1261          */
1262
1263         if (skb->protocol == htons(ETH_P_IP))
1264                 return tcp_v4_do_rcv(sk, skb);
1265
1266         if (sk_filter(sk, skb))
1267                 goto discard;
1268
1269         /*
1270          *      socket locking is here for SMP purposes as backlog rcv
1271          *      is currently called with bh processing disabled.
1272          */
1273
1274         /* Do Stevens' IPV6_PKTOPTIONS.
1275
1276            Yes, guys, it is the only place in our code, where we
1277            may make it not affecting IPv4.
1278            The rest of code is protocol independent,
1279            and I do not like idea to uglify IPv4.
1280
1281            Actually, all the idea behind IPV6_PKTOPTIONS
1282            looks not very well thought. For now we latch
1283            options, received in the last packet, enqueued
1284            by tcp. Feel free to propose better solution.
1285                                                --ANK (980728)
1286          */
1287         if (np->rxopt.all)
1288                 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1289
1290         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1291                 struct dst_entry *dst = sk->sk_rx_dst;
1292
1293                 sock_rps_save_rxhash(sk, skb);
1294                 if (dst) {
1295                         if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1296                             dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1297                                 dst_release(dst);
1298                                 sk->sk_rx_dst = NULL;
1299                         }
1300                 }
1301
1302                 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1303                 if (opt_skb)
1304                         goto ipv6_pktoptions;
1305                 return 0;
1306         }
1307
1308         if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1309                 goto csum_err;
1310
1311         if (sk->sk_state == TCP_LISTEN) {
1312                 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1313                 if (!nsk)
1314                         goto discard;
1315
1316                 /*
1317                  * Queue it on the new socket if the new socket is active,
1318                  * otherwise we just shortcircuit this and continue with
1319                  * the new socket..
1320                  */
1321                 if (nsk != sk) {
1322                         sock_rps_save_rxhash(nsk, skb);
1323                         if (tcp_child_process(sk, nsk, skb))
1324                                 goto reset;
1325                         if (opt_skb)
1326                                 __kfree_skb(opt_skb);
1327                         return 0;
1328                 }
1329         } else
1330                 sock_rps_save_rxhash(sk, skb);
1331
1332         if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1333                 goto reset;
1334         if (opt_skb)
1335                 goto ipv6_pktoptions;
1336         return 0;
1337
1338 reset:
1339         tcp_v6_send_reset(sk, skb);
1340 discard:
1341         if (opt_skb)
1342                 __kfree_skb(opt_skb);
1343         kfree_skb(skb);
1344         return 0;
1345 csum_err:
1346         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1347         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1348         goto discard;
1349
1350
1351 ipv6_pktoptions:
1352         /* Do you ask, what is it?
1353
1354            1. skb was enqueued by tcp.
1355            2. skb is added to tail of read queue, rather than out of order.
1356            3. socket is not in passive state.
1357            4. Finally, it really contains options, which user wants to receive.
1358          */
1359         tp = tcp_sk(sk);
1360         if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1361             !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1362                 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1363                         np->mcast_oif = inet6_iif(opt_skb);
1364                 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1365                         np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1366                 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1367                         np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1368                 if (np->repflow)
1369                         np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1370                 if (ipv6_opt_accepted(sk, opt_skb)) {
1371                         skb_set_owner_r(opt_skb, sk);
1372                         opt_skb = xchg(&np->pktoptions, opt_skb);
1373                 } else {
1374                         __kfree_skb(opt_skb);
1375                         opt_skb = xchg(&np->pktoptions, NULL);
1376                 }
1377         }
1378
1379         kfree_skb(opt_skb);
1380         return 0;
1381 }
1382
1383 static int tcp_v6_rcv(struct sk_buff *skb)
1384 {
1385         const struct tcphdr *th;
1386         const struct ipv6hdr *hdr;
1387         struct sock *sk;
1388         int ret;
1389         struct net *net = dev_net(skb->dev);
1390
1391         if (skb->pkt_type != PACKET_HOST)
1392                 goto discard_it;
1393
1394         /*
1395          *      Count it even if it's bad.
1396          */
1397         TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1398
1399         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1400                 goto discard_it;
1401
1402         th = tcp_hdr(skb);
1403
1404         if (th->doff < sizeof(struct tcphdr)/4)
1405                 goto bad_packet;
1406         if (!pskb_may_pull(skb, th->doff*4))
1407                 goto discard_it;
1408
1409         if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1410                 goto csum_error;
1411
1412         th = tcp_hdr(skb);
1413         hdr = ipv6_hdr(skb);
1414         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1415         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1416                                     skb->len - th->doff*4);
1417         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1418         TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1419         TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1420         TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1421         TCP_SKB_CB(skb)->sacked = 0;
1422
1423         sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1424         if (!sk)
1425                 goto no_tcp_socket;
1426
1427 process:
1428         if (sk->sk_state == TCP_TIME_WAIT)
1429                 goto do_time_wait;
1430
1431         if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1432                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1433                 goto discard_and_relse;
1434         }
1435
1436         if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1437                 goto discard_and_relse;
1438
1439 #ifdef CONFIG_TCP_MD5SIG
1440         if (tcp_v6_inbound_md5_hash(sk, skb))
1441                 goto discard_and_relse;
1442 #endif
1443
1444         if (sk_filter(sk, skb))
1445                 goto discard_and_relse;
1446
1447         sk_mark_napi_id(sk, skb);
1448         skb->dev = NULL;
1449
1450         bh_lock_sock_nested(sk);
1451         ret = 0;
1452         if (!sock_owned_by_user(sk)) {
1453 #ifdef CONFIG_NET_DMA
1454                 struct tcp_sock *tp = tcp_sk(sk);
1455                 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1456                         tp->ucopy.dma_chan = net_dma_find_channel();
1457                 if (tp->ucopy.dma_chan)
1458                         ret = tcp_v6_do_rcv(sk, skb);
1459                 else
1460 #endif
1461                 {
1462                         if (!tcp_prequeue(sk, skb))
1463                                 ret = tcp_v6_do_rcv(sk, skb);
1464                 }
1465         } else if (unlikely(sk_add_backlog(sk, skb,
1466                                            sk->sk_rcvbuf + sk->sk_sndbuf))) {
1467                 bh_unlock_sock(sk);
1468                 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1469                 goto discard_and_relse;
1470         }
1471         bh_unlock_sock(sk);
1472
1473         sock_put(sk);
1474         return ret ? -1 : 0;
1475
1476 no_tcp_socket:
1477         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1478                 goto discard_it;
1479
1480         if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1481 csum_error:
1482                 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1483 bad_packet:
1484                 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1485         } else {
1486                 tcp_v6_send_reset(NULL, skb);
1487         }
1488
1489 discard_it:
1490         kfree_skb(skb);
1491         return 0;
1492
1493 discard_and_relse:
1494         sock_put(sk);
1495         goto discard_it;
1496
1497 do_time_wait:
1498         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1499                 inet_twsk_put(inet_twsk(sk));
1500                 goto discard_it;
1501         }
1502
1503         if (skb->len < (th->doff<<2)) {
1504                 inet_twsk_put(inet_twsk(sk));
1505                 goto bad_packet;
1506         }
1507         if (tcp_checksum_complete(skb)) {
1508                 inet_twsk_put(inet_twsk(sk));
1509                 goto csum_error;
1510         }
1511
1512         switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1513         case TCP_TW_SYN:
1514         {
1515                 struct sock *sk2;
1516
1517                 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1518                                             &ipv6_hdr(skb)->saddr, th->source,
1519                                             &ipv6_hdr(skb)->daddr,
1520                                             ntohs(th->dest), inet6_iif(skb));
1521                 if (sk2 != NULL) {
1522                         struct inet_timewait_sock *tw = inet_twsk(sk);
1523                         inet_twsk_deschedule(tw, &tcp_death_row);
1524                         inet_twsk_put(tw);
1525                         sk = sk2;
1526                         goto process;
1527                 }
1528                 /* Fall through to ACK */
1529         }
1530         case TCP_TW_ACK:
1531                 tcp_v6_timewait_ack(sk, skb);
1532                 break;
1533         case TCP_TW_RST:
1534                 goto no_tcp_socket;
1535         case TCP_TW_SUCCESS:
1536                 ;
1537         }
1538         goto discard_it;
1539 }
1540
1541 static void tcp_v6_early_demux(struct sk_buff *skb)
1542 {
1543         const struct ipv6hdr *hdr;
1544         const struct tcphdr *th;
1545         struct sock *sk;
1546
1547         if (skb->pkt_type != PACKET_HOST)
1548                 return;
1549
1550         if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1551                 return;
1552
1553         hdr = ipv6_hdr(skb);
1554         th = tcp_hdr(skb);
1555
1556         if (th->doff < sizeof(struct tcphdr) / 4)
1557                 return;
1558
1559         sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1560                                         &hdr->saddr, th->source,
1561                                         &hdr->daddr, ntohs(th->dest),
1562                                         inet6_iif(skb));
1563         if (sk) {
1564                 skb->sk = sk;
1565                 skb->destructor = sock_edemux;
1566                 if (sk->sk_state != TCP_TIME_WAIT) {
1567                         struct dst_entry *dst = sk->sk_rx_dst;
1568
1569                         if (dst)
1570                                 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1571                         if (dst &&
1572                             inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1573                                 skb_dst_set_noref(skb, dst);
1574                 }
1575         }
1576 }
1577
1578 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1579         .twsk_obj_size  = sizeof(struct tcp6_timewait_sock),
1580         .twsk_unique    = tcp_twsk_unique,
1581         .twsk_destructor = tcp_twsk_destructor,
1582 };
1583
1584 static const struct inet_connection_sock_af_ops ipv6_specific = {
1585         .queue_xmit        = inet6_csk_xmit,
1586         .send_check        = tcp_v6_send_check,
1587         .rebuild_header    = inet6_sk_rebuild_header,
1588         .sk_rx_dst_set     = inet6_sk_rx_dst_set,
1589         .conn_request      = tcp_v6_conn_request,
1590         .syn_recv_sock     = tcp_v6_syn_recv_sock,
1591         .net_header_len    = sizeof(struct ipv6hdr),
1592         .net_frag_header_len = sizeof(struct frag_hdr),
1593         .setsockopt        = ipv6_setsockopt,
1594         .getsockopt        = ipv6_getsockopt,
1595         .addr2sockaddr     = inet6_csk_addr2sockaddr,
1596         .sockaddr_len      = sizeof(struct sockaddr_in6),
1597         .bind_conflict     = inet6_csk_bind_conflict,
1598 #ifdef CONFIG_COMPAT
1599         .compat_setsockopt = compat_ipv6_setsockopt,
1600         .compat_getsockopt = compat_ipv6_getsockopt,
1601 #endif
1602         .mtu_reduced       = tcp_v6_mtu_reduced,
1603 };
1604
1605 #ifdef CONFIG_TCP_MD5SIG
1606 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1607         .md5_lookup     =       tcp_v6_md5_lookup,
1608         .calc_md5_hash  =       tcp_v6_md5_hash_skb,
1609         .md5_parse      =       tcp_v6_parse_md5_keys,
1610 };
1611 #endif
1612
1613 /*
1614  *      TCP over IPv4 via INET6 API
1615  */
1616 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1617         .queue_xmit        = ip_queue_xmit,
1618         .send_check        = tcp_v4_send_check,
1619         .rebuild_header    = inet_sk_rebuild_header,
1620         .sk_rx_dst_set     = inet_sk_rx_dst_set,
1621         .conn_request      = tcp_v6_conn_request,
1622         .syn_recv_sock     = tcp_v6_syn_recv_sock,
1623         .net_header_len    = sizeof(struct iphdr),
1624         .setsockopt        = ipv6_setsockopt,
1625         .getsockopt        = ipv6_getsockopt,
1626         .addr2sockaddr     = inet6_csk_addr2sockaddr,
1627         .sockaddr_len      = sizeof(struct sockaddr_in6),
1628         .bind_conflict     = inet6_csk_bind_conflict,
1629 #ifdef CONFIG_COMPAT
1630         .compat_setsockopt = compat_ipv6_setsockopt,
1631         .compat_getsockopt = compat_ipv6_getsockopt,
1632 #endif
1633         .mtu_reduced       = tcp_v4_mtu_reduced,
1634 };
1635
1636 #ifdef CONFIG_TCP_MD5SIG
1637 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1638         .md5_lookup     =       tcp_v4_md5_lookup,
1639         .calc_md5_hash  =       tcp_v4_md5_hash_skb,
1640         .md5_parse      =       tcp_v6_parse_md5_keys,
1641 };
1642 #endif
1643
1644 /* NOTE: A lot of things set to zero explicitly by call to
1645  *       sk_alloc() so need not be done here.
1646  */
1647 static int tcp_v6_init_sock(struct sock *sk)
1648 {
1649         struct inet_connection_sock *icsk = inet_csk(sk);
1650
1651         tcp_init_sock(sk);
1652
1653         icsk->icsk_af_ops = &ipv6_specific;
1654
1655 #ifdef CONFIG_TCP_MD5SIG
1656         tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1657 #endif
1658
1659         return 0;
1660 }
1661
1662 static void tcp_v6_destroy_sock(struct sock *sk)
1663 {
1664         tcp_v4_destroy_sock(sk);
1665         inet6_destroy_sock(sk);
1666 }
1667
1668 #ifdef CONFIG_PROC_FS
1669 /* Proc filesystem TCPv6 sock list dumping. */
1670 static void get_openreq6(struct seq_file *seq,
1671                          const struct sock *sk, struct request_sock *req, int i, kuid_t uid)
1672 {
1673         int ttd = req->expires - jiffies;
1674         const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1675         const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1676
1677         if (ttd < 0)
1678                 ttd = 0;
1679
1680         seq_printf(seq,
1681                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1682                    "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1683                    i,
1684                    src->s6_addr32[0], src->s6_addr32[1],
1685                    src->s6_addr32[2], src->s6_addr32[3],
1686                    inet_rsk(req)->ir_num,
1687                    dest->s6_addr32[0], dest->s6_addr32[1],
1688                    dest->s6_addr32[2], dest->s6_addr32[3],
1689                    ntohs(inet_rsk(req)->ir_rmt_port),
1690                    TCP_SYN_RECV,
1691                    0, 0, /* could print option size, but that is af dependent. */
1692                    1,   /* timers active (only the expire timer) */
1693                    jiffies_to_clock_t(ttd),
1694                    req->num_timeout,
1695                    from_kuid_munged(seq_user_ns(seq), uid),
1696                    0,  /* non standard timer */
1697                    0, /* open_requests have no inode */
1698                    0, req);
1699 }
1700
1701 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1702 {
1703         const struct in6_addr *dest, *src;
1704         __u16 destp, srcp;
1705         int timer_active;
1706         unsigned long timer_expires;
1707         const struct inet_sock *inet = inet_sk(sp);
1708         const struct tcp_sock *tp = tcp_sk(sp);
1709         const struct inet_connection_sock *icsk = inet_csk(sp);
1710         struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
1711
1712         dest  = &sp->sk_v6_daddr;
1713         src   = &sp->sk_v6_rcv_saddr;
1714         destp = ntohs(inet->inet_dport);
1715         srcp  = ntohs(inet->inet_sport);
1716
1717         if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1718                 timer_active    = 1;
1719                 timer_expires   = icsk->icsk_timeout;
1720         } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1721                 timer_active    = 4;
1722                 timer_expires   = icsk->icsk_timeout;
1723         } else if (timer_pending(&sp->sk_timer)) {
1724                 timer_active    = 2;
1725                 timer_expires   = sp->sk_timer.expires;
1726         } else {
1727                 timer_active    = 0;
1728                 timer_expires = jiffies;
1729         }
1730
1731         seq_printf(seq,
1732                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1733                    "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1734                    i,
1735                    src->s6_addr32[0], src->s6_addr32[1],
1736                    src->s6_addr32[2], src->s6_addr32[3], srcp,
1737                    dest->s6_addr32[0], dest->s6_addr32[1],
1738                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
1739                    sp->sk_state,
1740                    tp->write_seq-tp->snd_una,
1741                    (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1742                    timer_active,
1743                    jiffies_delta_to_clock_t(timer_expires - jiffies),
1744                    icsk->icsk_retransmits,
1745                    from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1746                    icsk->icsk_probes_out,
1747                    sock_i_ino(sp),
1748                    atomic_read(&sp->sk_refcnt), sp,
1749                    jiffies_to_clock_t(icsk->icsk_rto),
1750                    jiffies_to_clock_t(icsk->icsk_ack.ato),
1751                    (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1752                    tp->snd_cwnd,
1753                    sp->sk_state == TCP_LISTEN ?
1754                         (fastopenq ? fastopenq->max_qlen : 0) :
1755                         (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1756                    );
1757 }
1758
1759 static void get_timewait6_sock(struct seq_file *seq,
1760                                struct inet_timewait_sock *tw, int i)
1761 {
1762         const struct in6_addr *dest, *src;
1763         __u16 destp, srcp;
1764         s32 delta = tw->tw_ttd - inet_tw_time_stamp();
1765
1766         dest = &tw->tw_v6_daddr;
1767         src  = &tw->tw_v6_rcv_saddr;
1768         destp = ntohs(tw->tw_dport);
1769         srcp  = ntohs(tw->tw_sport);
1770
1771         seq_printf(seq,
1772                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1773                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1774                    i,
1775                    src->s6_addr32[0], src->s6_addr32[1],
1776                    src->s6_addr32[2], src->s6_addr32[3], srcp,
1777                    dest->s6_addr32[0], dest->s6_addr32[1],
1778                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
1779                    tw->tw_substate, 0, 0,
1780                    3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1781                    atomic_read(&tw->tw_refcnt), tw);
1782 }
1783
1784 static int tcp6_seq_show(struct seq_file *seq, void *v)
1785 {
1786         struct tcp_iter_state *st;
1787         struct sock *sk = v;
1788
1789         if (v == SEQ_START_TOKEN) {
1790                 seq_puts(seq,
1791                          "  sl  "
1792                          "local_address                         "
1793                          "remote_address                        "
1794                          "st tx_queue rx_queue tr tm->when retrnsmt"
1795                          "   uid  timeout inode\n");
1796                 goto out;
1797         }
1798         st = seq->private;
1799
1800         switch (st->state) {
1801         case TCP_SEQ_STATE_LISTENING:
1802         case TCP_SEQ_STATE_ESTABLISHED:
1803                 if (sk->sk_state == TCP_TIME_WAIT)
1804                         get_timewait6_sock(seq, v, st->num);
1805                 else
1806                         get_tcp6_sock(seq, v, st->num);
1807                 break;
1808         case TCP_SEQ_STATE_OPENREQ:
1809                 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1810                 break;
1811         }
1812 out:
1813         return 0;
1814 }
1815
1816 static const struct file_operations tcp6_afinfo_seq_fops = {
1817         .owner   = THIS_MODULE,
1818         .open    = tcp_seq_open,
1819         .read    = seq_read,
1820         .llseek  = seq_lseek,
1821         .release = seq_release_net
1822 };
1823
1824 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1825         .name           = "tcp6",
1826         .family         = AF_INET6,
1827         .seq_fops       = &tcp6_afinfo_seq_fops,
1828         .seq_ops        = {
1829                 .show           = tcp6_seq_show,
1830         },
1831 };
1832
1833 int __net_init tcp6_proc_init(struct net *net)
1834 {
1835         return tcp_proc_register(net, &tcp6_seq_afinfo);
1836 }
1837
1838 void tcp6_proc_exit(struct net *net)
1839 {
1840         tcp_proc_unregister(net, &tcp6_seq_afinfo);
1841 }
1842 #endif
1843
1844 static void tcp_v6_clear_sk(struct sock *sk, int size)
1845 {
1846         struct inet_sock *inet = inet_sk(sk);
1847
1848         /* we do not want to clear pinet6 field, because of RCU lookups */
1849         sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1850
1851         size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1852         memset(&inet->pinet6 + 1, 0, size);
1853 }
1854
1855 struct proto tcpv6_prot = {
1856         .name                   = "TCPv6",
1857         .owner                  = THIS_MODULE,
1858         .close                  = tcp_close,
1859         .connect                = tcp_v6_connect,
1860         .disconnect             = tcp_disconnect,
1861         .accept                 = inet_csk_accept,
1862         .ioctl                  = tcp_ioctl,
1863         .init                   = tcp_v6_init_sock,
1864         .destroy                = tcp_v6_destroy_sock,
1865         .shutdown               = tcp_shutdown,
1866         .setsockopt             = tcp_setsockopt,
1867         .getsockopt             = tcp_getsockopt,
1868         .recvmsg                = tcp_recvmsg,
1869         .sendmsg                = tcp_sendmsg,
1870         .sendpage               = tcp_sendpage,
1871         .backlog_rcv            = tcp_v6_do_rcv,
1872         .release_cb             = tcp_release_cb,
1873         .hash                   = tcp_v6_hash,
1874         .unhash                 = inet_unhash,
1875         .get_port               = inet_csk_get_port,
1876         .enter_memory_pressure  = tcp_enter_memory_pressure,
1877         .stream_memory_free     = tcp_stream_memory_free,
1878         .sockets_allocated      = &tcp_sockets_allocated,
1879         .memory_allocated       = &tcp_memory_allocated,
1880         .memory_pressure        = &tcp_memory_pressure,
1881         .orphan_count           = &tcp_orphan_count,
1882         .sysctl_mem             = sysctl_tcp_mem,
1883         .sysctl_wmem            = sysctl_tcp_wmem,
1884         .sysctl_rmem            = sysctl_tcp_rmem,
1885         .max_header             = MAX_TCP_HEADER,
1886         .obj_size               = sizeof(struct tcp6_sock),
1887         .slab_flags             = SLAB_DESTROY_BY_RCU,
1888         .twsk_prot              = &tcp6_timewait_sock_ops,
1889         .rsk_prot               = &tcp6_request_sock_ops,
1890         .h.hashinfo             = &tcp_hashinfo,
1891         .no_autobind            = true,
1892 #ifdef CONFIG_COMPAT
1893         .compat_setsockopt      = compat_tcp_setsockopt,
1894         .compat_getsockopt      = compat_tcp_getsockopt,
1895 #endif
1896 #ifdef CONFIG_MEMCG_KMEM
1897         .proto_cgroup           = tcp_proto_cgroup,
1898 #endif
1899         .clear_sk               = tcp_v6_clear_sk,
1900 };
1901
1902 static const struct inet6_protocol tcpv6_protocol = {
1903         .early_demux    =       tcp_v6_early_demux,
1904         .handler        =       tcp_v6_rcv,
1905         .err_handler    =       tcp_v6_err,
1906         .flags          =       INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1907 };
1908
1909 static struct inet_protosw tcpv6_protosw = {
1910         .type           =       SOCK_STREAM,
1911         .protocol       =       IPPROTO_TCP,
1912         .prot           =       &tcpv6_prot,
1913         .ops            =       &inet6_stream_ops,
1914         .flags          =       INET_PROTOSW_PERMANENT |
1915                                 INET_PROTOSW_ICSK,
1916 };
1917
1918 static int __net_init tcpv6_net_init(struct net *net)
1919 {
1920         return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1921                                     SOCK_RAW, IPPROTO_TCP, net);
1922 }
1923
1924 static void __net_exit tcpv6_net_exit(struct net *net)
1925 {
1926         inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1927 }
1928
1929 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1930 {
1931         inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1932 }
1933
1934 static struct pernet_operations tcpv6_net_ops = {
1935         .init       = tcpv6_net_init,
1936         .exit       = tcpv6_net_exit,
1937         .exit_batch = tcpv6_net_exit_batch,
1938 };
1939
1940 int __init tcpv6_init(void)
1941 {
1942         int ret;
1943
1944         ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1945         if (ret)
1946                 goto out;
1947
1948         /* register inet6 protocol */
1949         ret = inet6_register_protosw(&tcpv6_protosw);
1950         if (ret)
1951                 goto out_tcpv6_protocol;
1952
1953         ret = register_pernet_subsys(&tcpv6_net_ops);
1954         if (ret)
1955                 goto out_tcpv6_protosw;
1956 out:
1957         return ret;
1958
1959 out_tcpv6_protosw:
1960         inet6_unregister_protosw(&tcpv6_protosw);
1961 out_tcpv6_protocol:
1962         inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1963         goto out;
1964 }
1965
1966 void tcpv6_exit(void)
1967 {
1968         unregister_pernet_subsys(&tcpv6_net_ops);
1969         inet6_unregister_protosw(&tcpv6_protosw);
1970         inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1971 }