3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
64 #include <net/secure_seq.h>
65 #include <net/tcp_memcontrol.h>
67 #include <asm/uaccess.h>
69 #include <linux/proc_fs.h>
70 #include <linux/seq_file.h>
72 #include <linux/crypto.h>
73 #include <linux/scatterlist.h>
75 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
76 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
77 struct request_sock *req);
79 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
81 static const struct inet_connection_sock_af_ops ipv6_mapped;
82 static const struct inet_connection_sock_af_ops ipv6_specific;
83 #ifdef CONFIG_TCP_MD5SIG
84 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
85 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
87 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
88 const struct in6_addr *addr)
94 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
96 struct dst_entry *dst = skb_dst(skb);
97 const struct rt6_info *rt = (const struct rt6_info *)dst;
101 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
103 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
106 static void tcp_v6_hash(struct sock *sk)
108 if (sk->sk_state != TCP_CLOSE) {
109 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
114 __inet6_hash(sk, NULL);
119 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
121 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
122 ipv6_hdr(skb)->saddr.s6_addr32,
124 tcp_hdr(skb)->source);
127 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
130 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
131 struct inet_sock *inet = inet_sk(sk);
132 struct inet_connection_sock *icsk = inet_csk(sk);
133 struct ipv6_pinfo *np = inet6_sk(sk);
134 struct tcp_sock *tp = tcp_sk(sk);
135 struct in6_addr *saddr = NULL, *final_p, final;
138 struct dst_entry *dst;
142 if (addr_len < SIN6_LEN_RFC2133)
145 if (usin->sin6_family != AF_INET6)
146 return -EAFNOSUPPORT;
148 memset(&fl6, 0, sizeof(fl6));
151 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
152 IP6_ECN_flow_init(fl6.flowlabel);
153 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
154 struct ip6_flowlabel *flowlabel;
155 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
156 if (flowlabel == NULL)
158 usin->sin6_addr = flowlabel->dst;
159 fl6_sock_release(flowlabel);
164 * connect() to INADDR_ANY means loopback (BSD'ism).
167 if(ipv6_addr_any(&usin->sin6_addr))
168 usin->sin6_addr.s6_addr[15] = 0x1;
170 addr_type = ipv6_addr_type(&usin->sin6_addr);
172 if(addr_type & IPV6_ADDR_MULTICAST)
175 if (addr_type&IPV6_ADDR_LINKLOCAL) {
176 if (addr_len >= sizeof(struct sockaddr_in6) &&
177 usin->sin6_scope_id) {
178 /* If interface is set while binding, indices
181 if (sk->sk_bound_dev_if &&
182 sk->sk_bound_dev_if != usin->sin6_scope_id)
185 sk->sk_bound_dev_if = usin->sin6_scope_id;
188 /* Connect to link-local address requires an interface */
189 if (!sk->sk_bound_dev_if)
193 if (tp->rx_opt.ts_recent_stamp &&
194 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
195 tp->rx_opt.ts_recent = 0;
196 tp->rx_opt.ts_recent_stamp = 0;
200 np->daddr = usin->sin6_addr;
201 np->flow_label = fl6.flowlabel;
207 if (addr_type == IPV6_ADDR_MAPPED) {
208 u32 exthdrlen = icsk->icsk_ext_hdr_len;
209 struct sockaddr_in sin;
211 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
213 if (__ipv6_only_sock(sk))
216 sin.sin_family = AF_INET;
217 sin.sin_port = usin->sin6_port;
218 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
220 icsk->icsk_af_ops = &ipv6_mapped;
221 sk->sk_backlog_rcv = tcp_v4_do_rcv;
222 #ifdef CONFIG_TCP_MD5SIG
223 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
226 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
229 icsk->icsk_ext_hdr_len = exthdrlen;
230 icsk->icsk_af_ops = &ipv6_specific;
231 sk->sk_backlog_rcv = tcp_v6_do_rcv;
232 #ifdef CONFIG_TCP_MD5SIG
233 tp->af_specific = &tcp_sock_ipv6_specific;
237 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
238 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
245 if (!ipv6_addr_any(&np->rcv_saddr))
246 saddr = &np->rcv_saddr;
248 fl6.flowi6_proto = IPPROTO_TCP;
249 fl6.daddr = np->daddr;
250 fl6.saddr = saddr ? *saddr : np->saddr;
251 fl6.flowi6_oif = sk->sk_bound_dev_if;
252 fl6.flowi6_mark = sk->sk_mark;
253 fl6.fl6_dport = usin->sin6_port;
254 fl6.fl6_sport = inet->inet_sport;
256 final_p = fl6_update_dst(&fl6, np->opt, &final);
258 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
260 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
268 np->rcv_saddr = *saddr;
271 /* set the source address */
273 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
275 sk->sk_gso_type = SKB_GSO_TCPV6;
276 __ip6_dst_store(sk, dst, NULL, NULL);
278 rt = (struct rt6_info *) dst;
279 if (tcp_death_row.sysctl_tw_recycle &&
280 !tp->rx_opt.ts_recent_stamp &&
281 ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr))
282 tcp_fetch_timewait_stamp(sk, dst);
284 icsk->icsk_ext_hdr_len = 0;
286 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
289 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
291 inet->inet_dport = usin->sin6_port;
293 tcp_set_state(sk, TCP_SYN_SENT);
294 err = inet6_hash_connect(&tcp_death_row, sk);
298 if (!tp->write_seq && likely(!tp->repair))
299 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
304 err = tcp_connect(sk);
311 tcp_set_state(sk, TCP_CLOSE);
314 inet->inet_dport = 0;
315 sk->sk_route_caps = 0;
319 static void tcp_v6_mtu_reduced(struct sock *sk)
321 struct dst_entry *dst;
323 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
326 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
330 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
331 tcp_sync_mss(sk, dst_mtu(dst));
332 tcp_simple_retransmit(sk);
336 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
337 u8 type, u8 code, int offset, __be32 info)
339 const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data;
340 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
341 struct ipv6_pinfo *np;
346 struct net *net = dev_net(skb->dev);
348 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
349 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
352 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
357 if (sk->sk_state == TCP_TIME_WAIT) {
358 inet_twsk_put(inet_twsk(sk));
363 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
364 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
366 if (sk->sk_state == TCP_CLOSE)
369 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
370 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
375 seq = ntohl(th->seq);
376 if (sk->sk_state != TCP_LISTEN &&
377 !between(seq, tp->snd_una, tp->snd_nxt)) {
378 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
384 if (type == NDISC_REDIRECT) {
385 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
388 dst->ops->redirect(dst, sk, skb);
392 if (type == ICMPV6_PKT_TOOBIG) {
393 /* We are not interested in TCP_LISTEN and open_requests
394 * (SYN-ACKs send out by Linux are always <576bytes so
395 * they should go through unfragmented).
397 if (sk->sk_state == TCP_LISTEN)
400 tp->mtu_info = ntohl(info);
401 if (!sock_owned_by_user(sk))
402 tcp_v6_mtu_reduced(sk);
403 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
409 icmpv6_err_convert(type, code, &err);
411 /* Might be for an request_sock */
412 switch (sk->sk_state) {
413 struct request_sock *req, **prev;
415 if (sock_owned_by_user(sk))
418 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
419 &hdr->saddr, inet6_iif(skb));
423 /* ICMPs are not backlogged, hence we cannot get
424 * an established socket here.
426 WARN_ON(req->sk != NULL);
428 if (seq != tcp_rsk(req)->snt_isn) {
429 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
433 inet_csk_reqsk_queue_drop(sk, req, prev);
434 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
438 case TCP_SYN_RECV: /* Cannot happen.
439 It can, it SYNs are crossed. --ANK */
440 if (!sock_owned_by_user(sk)) {
442 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
446 sk->sk_err_soft = err;
450 if (!sock_owned_by_user(sk) && np->recverr) {
452 sk->sk_error_report(sk);
454 sk->sk_err_soft = err;
462 static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
464 struct request_sock *req,
467 struct inet6_request_sock *treq = inet6_rsk(req);
468 struct ipv6_pinfo *np = inet6_sk(sk);
469 struct sk_buff * skb;
472 /* First, grab a route. */
473 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
476 skb = tcp_make_synack(sk, dst, req, NULL);
479 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
481 fl6->daddr = treq->rmt_addr;
482 skb_set_queue_mapping(skb, queue_mapping);
483 err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
484 err = net_xmit_eval(err);
491 static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req)
496 res = tcp_v6_send_synack(sk, NULL, &fl6, req, 0);
498 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
502 static void tcp_v6_reqsk_destructor(struct request_sock *req)
504 kfree_skb(inet6_rsk(req)->pktopts);
507 #ifdef CONFIG_TCP_MD5SIG
508 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
509 const struct in6_addr *addr)
511 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
514 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
515 struct sock *addr_sk)
517 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
520 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
521 struct request_sock *req)
523 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
526 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
529 struct tcp_md5sig cmd;
530 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
532 if (optlen < sizeof(cmd))
535 if (copy_from_user(&cmd, optval, sizeof(cmd)))
538 if (sin6->sin6_family != AF_INET6)
541 if (!cmd.tcpm_keylen) {
542 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
543 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
545 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
549 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
552 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
553 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
554 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
556 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
557 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
560 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
561 const struct in6_addr *daddr,
562 const struct in6_addr *saddr, int nbytes)
564 struct tcp6_pseudohdr *bp;
565 struct scatterlist sg;
567 bp = &hp->md5_blk.ip6;
568 /* 1. TCP pseudo-header (RFC2460) */
571 bp->protocol = cpu_to_be32(IPPROTO_TCP);
572 bp->len = cpu_to_be32(nbytes);
574 sg_init_one(&sg, bp, sizeof(*bp));
575 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
578 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
579 const struct in6_addr *daddr, struct in6_addr *saddr,
580 const struct tcphdr *th)
582 struct tcp_md5sig_pool *hp;
583 struct hash_desc *desc;
585 hp = tcp_get_md5sig_pool();
587 goto clear_hash_noput;
588 desc = &hp->md5_desc;
590 if (crypto_hash_init(desc))
592 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
594 if (tcp_md5_hash_header(hp, th))
596 if (tcp_md5_hash_key(hp, key))
598 if (crypto_hash_final(desc, md5_hash))
601 tcp_put_md5sig_pool();
605 tcp_put_md5sig_pool();
607 memset(md5_hash, 0, 16);
611 static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
612 const struct sock *sk,
613 const struct request_sock *req,
614 const struct sk_buff *skb)
616 const struct in6_addr *saddr, *daddr;
617 struct tcp_md5sig_pool *hp;
618 struct hash_desc *desc;
619 const struct tcphdr *th = tcp_hdr(skb);
622 saddr = &inet6_sk(sk)->saddr;
623 daddr = &inet6_sk(sk)->daddr;
625 saddr = &inet6_rsk(req)->loc_addr;
626 daddr = &inet6_rsk(req)->rmt_addr;
628 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
629 saddr = &ip6h->saddr;
630 daddr = &ip6h->daddr;
633 hp = tcp_get_md5sig_pool();
635 goto clear_hash_noput;
636 desc = &hp->md5_desc;
638 if (crypto_hash_init(desc))
641 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
643 if (tcp_md5_hash_header(hp, th))
645 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
647 if (tcp_md5_hash_key(hp, key))
649 if (crypto_hash_final(desc, md5_hash))
652 tcp_put_md5sig_pool();
656 tcp_put_md5sig_pool();
658 memset(md5_hash, 0, 16);
662 static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
664 const __u8 *hash_location = NULL;
665 struct tcp_md5sig_key *hash_expected;
666 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
667 const struct tcphdr *th = tcp_hdr(skb);
671 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
672 hash_location = tcp_parse_md5sig_option(th);
674 /* We've parsed the options - do we have a hash? */
675 if (!hash_expected && !hash_location)
678 if (hash_expected && !hash_location) {
679 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
683 if (!hash_expected && hash_location) {
684 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
688 /* check the signature */
689 genhash = tcp_v6_md5_hash_skb(newhash,
693 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
694 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
695 genhash ? "failed" : "mismatch",
696 &ip6h->saddr, ntohs(th->source),
697 &ip6h->daddr, ntohs(th->dest));
704 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
706 .obj_size = sizeof(struct tcp6_request_sock),
707 .rtx_syn_ack = tcp_v6_rtx_synack,
708 .send_ack = tcp_v6_reqsk_send_ack,
709 .destructor = tcp_v6_reqsk_destructor,
710 .send_reset = tcp_v6_send_reset,
711 .syn_ack_timeout = tcp_syn_ack_timeout,
714 #ifdef CONFIG_TCP_MD5SIG
715 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
716 .md5_lookup = tcp_v6_reqsk_md5_lookup,
717 .calc_md5_hash = tcp_v6_md5_hash_skb,
721 static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
722 u32 tsval, u32 tsecr,
723 struct tcp_md5sig_key *key, int rst, u8 tclass)
725 const struct tcphdr *th = tcp_hdr(skb);
727 struct sk_buff *buff;
729 struct net *net = dev_net(skb_dst(skb)->dev);
730 struct sock *ctl_sk = net->ipv6.tcp_sk;
731 unsigned int tot_len = sizeof(struct tcphdr);
732 struct dst_entry *dst;
736 tot_len += TCPOLEN_TSTAMP_ALIGNED;
737 #ifdef CONFIG_TCP_MD5SIG
739 tot_len += TCPOLEN_MD5SIG_ALIGNED;
742 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
747 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
749 t1 = (struct tcphdr *) skb_push(buff, tot_len);
750 skb_reset_transport_header(buff);
752 /* Swap the send and the receive. */
753 memset(t1, 0, sizeof(*t1));
754 t1->dest = th->source;
755 t1->source = th->dest;
756 t1->doff = tot_len / 4;
757 t1->seq = htonl(seq);
758 t1->ack_seq = htonl(ack);
759 t1->ack = !rst || !th->ack;
761 t1->window = htons(win);
763 topt = (__be32 *)(t1 + 1);
766 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
767 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
768 *topt++ = htonl(tsval);
769 *topt++ = htonl(tsecr);
772 #ifdef CONFIG_TCP_MD5SIG
774 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
775 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
776 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
777 &ipv6_hdr(skb)->saddr,
778 &ipv6_hdr(skb)->daddr, t1);
782 memset(&fl6, 0, sizeof(fl6));
783 fl6.daddr = ipv6_hdr(skb)->saddr;
784 fl6.saddr = ipv6_hdr(skb)->daddr;
786 buff->ip_summed = CHECKSUM_PARTIAL;
789 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
791 fl6.flowi6_proto = IPPROTO_TCP;
792 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
793 fl6.flowi6_oif = inet6_iif(skb);
794 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
795 fl6.fl6_dport = t1->dest;
796 fl6.fl6_sport = t1->source;
797 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
799 /* Pass a socket to ip6_dst_lookup either it is for RST
800 * Underlying function will use this to retrieve the network
803 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
805 skb_dst_set(buff, dst);
806 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
807 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
809 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
816 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
818 const struct tcphdr *th = tcp_hdr(skb);
819 u32 seq = 0, ack_seq = 0;
820 struct tcp_md5sig_key *key = NULL;
821 #ifdef CONFIG_TCP_MD5SIG
822 const __u8 *hash_location = NULL;
823 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
824 unsigned char newhash[16];
826 struct sock *sk1 = NULL;
832 if (!ipv6_unicast_destination(skb))
835 #ifdef CONFIG_TCP_MD5SIG
836 hash_location = tcp_parse_md5sig_option(th);
837 if (!sk && hash_location) {
839 * active side is lost. Try to find listening socket through
840 * source port, and then find md5 key through listening socket.
841 * we are not loose security here:
842 * Incoming packet is checked with md5 hash with finding key,
843 * no RST generated if md5 hash doesn't match.
845 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
846 &tcp_hashinfo, &ipv6h->saddr,
847 th->source, &ipv6h->daddr,
848 ntohs(th->source), inet6_iif(skb));
853 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
857 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
858 if (genhash || memcmp(hash_location, newhash, 16) != 0)
861 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
866 seq = ntohl(th->ack_seq);
868 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
871 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, 0, key, 1, 0);
873 #ifdef CONFIG_TCP_MD5SIG
882 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
883 u32 win, u32 tsval, u32 tsecr,
884 struct tcp_md5sig_key *key, u8 tclass)
886 tcp_v6_send_response(skb, seq, ack, win, tsval, tsecr, key, 0, tclass);
889 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
891 struct inet_timewait_sock *tw = inet_twsk(sk);
892 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
894 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
895 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
896 tcp_time_stamp + tcptw->tw_ts_offset,
897 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw),
903 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
904 struct request_sock *req)
906 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1,
907 req->rcv_wnd, tcp_time_stamp, req->ts_recent,
908 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0);
912 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
914 struct request_sock *req, **prev;
915 const struct tcphdr *th = tcp_hdr(skb);
918 /* Find possible connection requests. */
919 req = inet6_csk_search_req(sk, &prev, th->source,
920 &ipv6_hdr(skb)->saddr,
921 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
923 return tcp_check_req(sk, skb, req, prev, false);
925 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
926 &ipv6_hdr(skb)->saddr, th->source,
927 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
930 if (nsk->sk_state != TCP_TIME_WAIT) {
934 inet_twsk_put(inet_twsk(nsk));
938 #ifdef CONFIG_SYN_COOKIES
940 sk = cookie_v6_check(sk, skb);
945 /* FIXME: this is substantially similar to the ipv4 code.
946 * Can some kind of merge be done? -- erics
948 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
950 struct tcp_options_received tmp_opt;
951 struct request_sock *req;
952 struct inet6_request_sock *treq;
953 struct ipv6_pinfo *np = inet6_sk(sk);
954 struct tcp_sock *tp = tcp_sk(sk);
955 __u32 isn = TCP_SKB_CB(skb)->when;
956 struct dst_entry *dst = NULL;
958 bool want_cookie = false;
960 if (skb->protocol == htons(ETH_P_IP))
961 return tcp_v4_conn_request(sk, skb);
963 if (!ipv6_unicast_destination(skb))
966 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
967 want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
972 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
973 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
977 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
981 #ifdef CONFIG_TCP_MD5SIG
982 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
985 tcp_clear_options(&tmp_opt);
986 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
987 tmp_opt.user_mss = tp->rx_opt.user_mss;
988 tcp_parse_options(skb, &tmp_opt, 0, NULL);
990 if (want_cookie && !tmp_opt.saw_tstamp)
991 tcp_clear_options(&tmp_opt);
993 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
994 tcp_openreq_init(req, &tmp_opt, skb);
996 treq = inet6_rsk(req);
997 treq->rmt_addr = ipv6_hdr(skb)->saddr;
998 treq->loc_addr = ipv6_hdr(skb)->daddr;
999 if (!want_cookie || tmp_opt.tstamp_ok)
1000 TCP_ECN_create_request(req, skb, sock_net(sk));
1002 treq->iif = sk->sk_bound_dev_if;
1003 inet_rsk(req)->ir_mark = inet_request_mark(sk, skb);
1005 /* So that link locals have meaning */
1006 if (!sk->sk_bound_dev_if &&
1007 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1008 treq->iif = inet6_iif(skb);
1011 if (ipv6_opt_accepted(sk, skb) ||
1012 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1013 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1014 atomic_inc(&skb->users);
1015 treq->pktopts = skb;
1019 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1020 req->cookie_ts = tmp_opt.tstamp_ok;
1024 /* VJ's idea. We save last timestamp seen
1025 * from the destination in peer table, when entering
1026 * state TIME-WAIT, and check against it before
1027 * accepting new connection request.
1029 * If "isn" is not zero, this request hit alive
1030 * timewait bucket, so that all the necessary checks
1031 * are made in the function processing timewait state.
1033 if (tmp_opt.saw_tstamp &&
1034 tcp_death_row.sysctl_tw_recycle &&
1035 (dst = inet6_csk_route_req(sk, &fl6, req)) != NULL) {
1036 if (!tcp_peer_is_proven(req, dst, true)) {
1037 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1038 goto drop_and_release;
1041 /* Kill the following clause, if you dislike this way. */
1042 else if (!sysctl_tcp_syncookies &&
1043 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1044 (sysctl_max_syn_backlog >> 2)) &&
1045 !tcp_peer_is_proven(req, dst, false)) {
1046 /* Without syncookies last quarter of
1047 * backlog is filled with destinations,
1048 * proven to be alive.
1049 * It means that we continue to communicate
1050 * to destinations, already remembered
1051 * to the moment of synflood.
1053 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1054 &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
1055 goto drop_and_release;
1058 isn = tcp_v6_init_sequence(skb);
1061 tcp_rsk(req)->snt_isn = isn;
1063 if (security_inet_conn_request(sk, skb, req))
1064 goto drop_and_release;
1066 if (tcp_v6_send_synack(sk, dst, &fl6, req,
1067 skb_get_queue_mapping(skb)) ||
1071 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1072 tcp_rsk(req)->listener = NULL;
1073 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1081 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1082 return 0; /* don't send reset */
1085 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1086 struct request_sock *req,
1087 struct dst_entry *dst)
1089 struct inet6_request_sock *treq;
1090 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1091 struct tcp6_sock *newtcp6sk;
1092 struct inet_sock *newinet;
1093 struct tcp_sock *newtp;
1095 #ifdef CONFIG_TCP_MD5SIG
1096 struct tcp_md5sig_key *key;
1100 if (skb->protocol == htons(ETH_P_IP)) {
1105 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1110 newtcp6sk = (struct tcp6_sock *)newsk;
1111 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1113 newinet = inet_sk(newsk);
1114 newnp = inet6_sk(newsk);
1115 newtp = tcp_sk(newsk);
1117 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1119 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1121 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1123 newnp->rcv_saddr = newnp->saddr;
1125 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1126 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1127 #ifdef CONFIG_TCP_MD5SIG
1128 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1131 newnp->ipv6_ac_list = NULL;
1132 newnp->ipv6_fl_list = NULL;
1133 newnp->pktoptions = NULL;
1135 newnp->mcast_oif = inet6_iif(skb);
1136 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1137 newnp->rcv_tclass = ipv6_get_dsfield(ipv6_hdr(skb));
1140 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1141 * here, tcp_create_openreq_child now does this for us, see the comment in
1142 * that function for the gory details. -acme
1145 /* It is tricky place. Until this moment IPv4 tcp
1146 worked with IPv6 icsk.icsk_af_ops.
1149 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1154 treq = inet6_rsk(req);
1156 if (sk_acceptq_is_full(sk))
1160 dst = inet6_csk_route_req(sk, &fl6, req);
1165 newsk = tcp_create_openreq_child(sk, req, skb);
1170 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1171 * count here, tcp_create_openreq_child now does this for us, see the
1172 * comment in that function for the gory details. -acme
1175 newsk->sk_gso_type = SKB_GSO_TCPV6;
1176 __ip6_dst_store(newsk, dst, NULL, NULL);
1177 inet6_sk_rx_dst_set(newsk, skb);
1179 newtcp6sk = (struct tcp6_sock *)newsk;
1180 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1182 newtp = tcp_sk(newsk);
1183 newinet = inet_sk(newsk);
1184 newnp = inet6_sk(newsk);
1186 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1188 newnp->daddr = treq->rmt_addr;
1189 newnp->saddr = treq->loc_addr;
1190 newnp->rcv_saddr = treq->loc_addr;
1191 newsk->sk_bound_dev_if = treq->iif;
1193 /* Now IPv6 options...
1195 First: no IPv4 options.
1197 newinet->inet_opt = NULL;
1198 newnp->ipv6_ac_list = NULL;
1199 newnp->ipv6_fl_list = NULL;
1202 newnp->rxopt.all = np->rxopt.all;
1204 /* Clone pktoptions received with SYN */
1205 newnp->pktoptions = NULL;
1206 if (treq->pktopts != NULL) {
1207 newnp->pktoptions = skb_clone(treq->pktopts,
1208 sk_gfp_atomic(sk, GFP_ATOMIC));
1209 consume_skb(treq->pktopts);
1210 treq->pktopts = NULL;
1211 if (newnp->pktoptions)
1212 skb_set_owner_r(newnp->pktoptions, newsk);
1215 newnp->mcast_oif = inet6_iif(skb);
1216 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1217 newnp->rcv_tclass = ipv6_get_dsfield(ipv6_hdr(skb));
1219 /* Clone native IPv6 options from listening socket (if any)
1221 Yes, keeping reference count would be much more clever,
1222 but we make one more one thing there: reattach optmem
1226 newnp->opt = ipv6_dup_options(newsk, np->opt);
1228 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1230 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1231 newnp->opt->opt_flen);
1233 tcp_mtup_init(newsk);
1234 tcp_sync_mss(newsk, dst_mtu(dst));
1235 newtp->advmss = dst_metric_advmss(dst);
1236 if (tcp_sk(sk)->rx_opt.user_mss &&
1237 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1238 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1240 tcp_initialize_rcv_mss(newsk);
1241 tcp_synack_rtt_meas(newsk, req);
1242 newtp->total_retrans = req->num_retrans;
1244 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1245 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1247 #ifdef CONFIG_TCP_MD5SIG
1248 /* Copy over the MD5 key from the original socket */
1249 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1250 /* We're using one, so create a matching key
1251 * on the newsk structure. If we fail to get
1252 * memory, then we end up not copying the key
1255 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newnp->daddr,
1256 AF_INET6, key->key, key->keylen,
1257 sk_gfp_atomic(sk, GFP_ATOMIC));
1261 if (__inet_inherit_port(sk, newsk) < 0) {
1262 inet_csk_prepare_forced_close(newsk);
1266 __inet6_hash(newsk, NULL);
1271 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1275 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1279 static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1281 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1282 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1283 &ipv6_hdr(skb)->daddr, skb->csum)) {
1284 skb->ip_summed = CHECKSUM_UNNECESSARY;
1289 skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1290 &ipv6_hdr(skb)->saddr,
1291 &ipv6_hdr(skb)->daddr, 0));
1293 if (skb->len <= 76) {
1294 return __skb_checksum_complete(skb);
1299 /* The socket must have it's spinlock held when we get
1302 * We have a potential double-lock case here, so even when
1303 * doing backlog processing we use the BH locking scheme.
1304 * This is because we cannot sleep with the original spinlock
1307 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1309 struct ipv6_pinfo *np = inet6_sk(sk);
1310 struct tcp_sock *tp;
1311 struct sk_buff *opt_skb = NULL;
1313 /* Imagine: socket is IPv6. IPv4 packet arrives,
1314 goes to IPv4 receive handler and backlogged.
1315 From backlog it always goes here. Kerboom...
1316 Fortunately, tcp_rcv_established and rcv_established
1317 handle them correctly, but it is not case with
1318 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1321 if (skb->protocol == htons(ETH_P_IP))
1322 return tcp_v4_do_rcv(sk, skb);
1324 #ifdef CONFIG_TCP_MD5SIG
1325 if (tcp_v6_inbound_md5_hash (sk, skb))
1329 if (sk_filter(sk, skb))
1333 * socket locking is here for SMP purposes as backlog rcv
1334 * is currently called with bh processing disabled.
1337 /* Do Stevens' IPV6_PKTOPTIONS.
1339 Yes, guys, it is the only place in our code, where we
1340 may make it not affecting IPv4.
1341 The rest of code is protocol independent,
1342 and I do not like idea to uglify IPv4.
1344 Actually, all the idea behind IPV6_PKTOPTIONS
1345 looks not very well thought. For now we latch
1346 options, received in the last packet, enqueued
1347 by tcp. Feel free to propose better solution.
1351 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1353 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1354 struct dst_entry *dst = sk->sk_rx_dst;
1356 sock_rps_save_rxhash(sk, skb);
1358 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1359 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1361 sk->sk_rx_dst = NULL;
1365 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1368 goto ipv6_pktoptions;
1372 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1375 if (sk->sk_state == TCP_LISTEN) {
1376 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1381 * Queue it on the new socket if the new socket is active,
1382 * otherwise we just shortcircuit this and continue with
1386 sock_rps_save_rxhash(nsk, skb);
1387 if (tcp_child_process(sk, nsk, skb))
1390 __kfree_skb(opt_skb);
1394 sock_rps_save_rxhash(sk, skb);
1396 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1399 goto ipv6_pktoptions;
1403 tcp_v6_send_reset(sk, skb);
1406 __kfree_skb(opt_skb);
1410 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1411 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1416 /* Do you ask, what is it?
1418 1. skb was enqueued by tcp.
1419 2. skb is added to tail of read queue, rather than out of order.
1420 3. socket is not in passive state.
1421 4. Finally, it really contains options, which user wants to receive.
1424 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1425 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1426 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1427 np->mcast_oif = inet6_iif(opt_skb);
1428 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1429 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1430 if (np->rxopt.bits.rxtclass)
1431 np->rcv_tclass = ipv6_get_dsfield(ipv6_hdr(opt_skb));
1432 if (ipv6_opt_accepted(sk, opt_skb)) {
1433 skb_set_owner_r(opt_skb, sk);
1434 opt_skb = xchg(&np->pktoptions, opt_skb);
1436 __kfree_skb(opt_skb);
1437 opt_skb = xchg(&np->pktoptions, NULL);
1445 static int tcp_v6_rcv(struct sk_buff *skb)
1447 const struct tcphdr *th;
1448 const struct ipv6hdr *hdr;
1451 struct net *net = dev_net(skb->dev);
1453 if (skb->pkt_type != PACKET_HOST)
1457 * Count it even if it's bad.
1459 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1461 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1466 if (th->doff < sizeof(struct tcphdr)/4)
1468 if (!pskb_may_pull(skb, th->doff*4))
1471 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1475 hdr = ipv6_hdr(skb);
1476 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1477 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1478 skb->len - th->doff*4);
1479 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1480 TCP_SKB_CB(skb)->when = 0;
1481 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1482 TCP_SKB_CB(skb)->sacked = 0;
1484 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1489 if (sk->sk_state == TCP_TIME_WAIT)
1492 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1493 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1494 goto discard_and_relse;
1497 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1498 goto discard_and_relse;
1500 if (sk_filter(sk, skb))
1501 goto discard_and_relse;
1505 bh_lock_sock_nested(sk);
1507 if (!sock_owned_by_user(sk)) {
1508 #ifdef CONFIG_NET_DMA
1509 struct tcp_sock *tp = tcp_sk(sk);
1510 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1511 tp->ucopy.dma_chan = net_dma_find_channel();
1512 if (tp->ucopy.dma_chan)
1513 ret = tcp_v6_do_rcv(sk, skb);
1517 if (!tcp_prequeue(sk, skb))
1518 ret = tcp_v6_do_rcv(sk, skb);
1520 } else if (unlikely(sk_add_backlog(sk, skb,
1521 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1523 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1524 goto discard_and_relse;
1529 return ret ? -1 : 0;
1532 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1535 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1537 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1539 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1541 tcp_v6_send_reset(NULL, skb);
1553 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1554 inet_twsk_put(inet_twsk(sk));
1558 if (skb->len < (th->doff<<2)) {
1559 inet_twsk_put(inet_twsk(sk));
1562 if (tcp_checksum_complete(skb)) {
1563 inet_twsk_put(inet_twsk(sk));
1567 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1572 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1573 &ipv6_hdr(skb)->saddr, th->source,
1574 &ipv6_hdr(skb)->daddr,
1575 ntohs(th->dest), inet6_iif(skb));
1577 struct inet_timewait_sock *tw = inet_twsk(sk);
1578 inet_twsk_deschedule(tw, &tcp_death_row);
1583 /* Fall through to ACK */
1586 tcp_v6_timewait_ack(sk, skb);
1590 case TCP_TW_SUCCESS:;
1595 static void tcp_v6_early_demux(struct sk_buff *skb)
1597 const struct ipv6hdr *hdr;
1598 const struct tcphdr *th;
1601 if (skb->pkt_type != PACKET_HOST)
1604 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1607 hdr = ipv6_hdr(skb);
1610 if (th->doff < sizeof(struct tcphdr) / 4)
1613 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1614 &hdr->saddr, th->source,
1615 &hdr->daddr, ntohs(th->dest),
1619 skb->destructor = sock_edemux;
1620 if (sk->sk_state != TCP_TIME_WAIT) {
1621 struct dst_entry *dst = sk->sk_rx_dst;
1624 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1626 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1627 skb_dst_set_noref(skb, dst);
1632 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1633 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1634 .twsk_unique = tcp_twsk_unique,
1635 .twsk_destructor= tcp_twsk_destructor,
1638 static const struct inet_connection_sock_af_ops ipv6_specific = {
1639 .queue_xmit = inet6_csk_xmit,
1640 .send_check = tcp_v6_send_check,
1641 .rebuild_header = inet6_sk_rebuild_header,
1642 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1643 .conn_request = tcp_v6_conn_request,
1644 .syn_recv_sock = tcp_v6_syn_recv_sock,
1645 .net_header_len = sizeof(struct ipv6hdr),
1646 .net_frag_header_len = sizeof(struct frag_hdr),
1647 .setsockopt = ipv6_setsockopt,
1648 .getsockopt = ipv6_getsockopt,
1649 .addr2sockaddr = inet6_csk_addr2sockaddr,
1650 .sockaddr_len = sizeof(struct sockaddr_in6),
1651 .bind_conflict = inet6_csk_bind_conflict,
1652 #ifdef CONFIG_COMPAT
1653 .compat_setsockopt = compat_ipv6_setsockopt,
1654 .compat_getsockopt = compat_ipv6_getsockopt,
1658 #ifdef CONFIG_TCP_MD5SIG
1659 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1660 .md5_lookup = tcp_v6_md5_lookup,
1661 .calc_md5_hash = tcp_v6_md5_hash_skb,
1662 .md5_parse = tcp_v6_parse_md5_keys,
1667 * TCP over IPv4 via INET6 API
1670 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1671 .queue_xmit = ip_queue_xmit,
1672 .send_check = tcp_v4_send_check,
1673 .rebuild_header = inet_sk_rebuild_header,
1674 .sk_rx_dst_set = inet_sk_rx_dst_set,
1675 .conn_request = tcp_v6_conn_request,
1676 .syn_recv_sock = tcp_v6_syn_recv_sock,
1677 .net_header_len = sizeof(struct iphdr),
1678 .setsockopt = ipv6_setsockopt,
1679 .getsockopt = ipv6_getsockopt,
1680 .addr2sockaddr = inet6_csk_addr2sockaddr,
1681 .sockaddr_len = sizeof(struct sockaddr_in6),
1682 .bind_conflict = inet6_csk_bind_conflict,
1683 #ifdef CONFIG_COMPAT
1684 .compat_setsockopt = compat_ipv6_setsockopt,
1685 .compat_getsockopt = compat_ipv6_getsockopt,
1689 #ifdef CONFIG_TCP_MD5SIG
1690 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1691 .md5_lookup = tcp_v4_md5_lookup,
1692 .calc_md5_hash = tcp_v4_md5_hash_skb,
1693 .md5_parse = tcp_v6_parse_md5_keys,
1697 /* NOTE: A lot of things set to zero explicitly by call to
1698 * sk_alloc() so need not be done here.
1700 static int tcp_v6_init_sock(struct sock *sk)
1702 struct inet_connection_sock *icsk = inet_csk(sk);
1706 icsk->icsk_af_ops = &ipv6_specific;
1708 #ifdef CONFIG_TCP_MD5SIG
1709 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1715 static void tcp_v6_destroy_sock(struct sock *sk)
1717 tcp_v4_destroy_sock(sk);
1718 inet6_destroy_sock(sk);
1721 #ifdef CONFIG_PROC_FS
1722 /* Proc filesystem TCPv6 sock list dumping. */
1723 static void get_openreq6(struct seq_file *seq,
1724 const struct sock *sk, struct request_sock *req, int i, kuid_t uid)
1726 int ttd = req->expires - jiffies;
1727 const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1728 const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1734 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1735 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1737 src->s6_addr32[0], src->s6_addr32[1],
1738 src->s6_addr32[2], src->s6_addr32[3],
1739 ntohs(inet_rsk(req)->loc_port),
1740 dest->s6_addr32[0], dest->s6_addr32[1],
1741 dest->s6_addr32[2], dest->s6_addr32[3],
1742 ntohs(inet_rsk(req)->rmt_port),
1744 0,0, /* could print option size, but that is af dependent. */
1745 1, /* timers active (only the expire timer) */
1746 jiffies_to_clock_t(ttd),
1748 from_kuid_munged(seq_user_ns(seq), uid),
1749 0, /* non standard timer */
1750 0, /* open_requests have no inode */
1754 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1756 const struct in6_addr *dest, *src;
1759 unsigned long timer_expires;
1760 const struct inet_sock *inet = inet_sk(sp);
1761 const struct tcp_sock *tp = tcp_sk(sp);
1762 const struct inet_connection_sock *icsk = inet_csk(sp);
1763 const struct ipv6_pinfo *np = inet6_sk(sp);
1766 src = &np->rcv_saddr;
1767 destp = ntohs(inet->inet_dport);
1768 srcp = ntohs(inet->inet_sport);
1770 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1772 timer_expires = icsk->icsk_timeout;
1773 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1775 timer_expires = icsk->icsk_timeout;
1776 } else if (timer_pending(&sp->sk_timer)) {
1778 timer_expires = sp->sk_timer.expires;
1781 timer_expires = jiffies;
1785 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1786 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
1788 src->s6_addr32[0], src->s6_addr32[1],
1789 src->s6_addr32[2], src->s6_addr32[3], srcp,
1790 dest->s6_addr32[0], dest->s6_addr32[1],
1791 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1793 tp->write_seq-tp->snd_una,
1794 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1796 jiffies_delta_to_clock_t(timer_expires - jiffies),
1797 icsk->icsk_retransmits,
1798 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1799 icsk->icsk_probes_out,
1801 atomic_read(&sp->sk_refcnt), sp,
1802 jiffies_to_clock_t(icsk->icsk_rto),
1803 jiffies_to_clock_t(icsk->icsk_ack.ato),
1804 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
1806 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
1810 static void get_timewait6_sock(struct seq_file *seq,
1811 struct inet_timewait_sock *tw, int i)
1813 const struct in6_addr *dest, *src;
1815 const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1816 long delta = tw->tw_ttd - jiffies;
1818 dest = &tw6->tw_v6_daddr;
1819 src = &tw6->tw_v6_rcv_saddr;
1820 destp = ntohs(tw->tw_dport);
1821 srcp = ntohs(tw->tw_sport);
1824 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1825 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1827 src->s6_addr32[0], src->s6_addr32[1],
1828 src->s6_addr32[2], src->s6_addr32[3], srcp,
1829 dest->s6_addr32[0], dest->s6_addr32[1],
1830 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1831 tw->tw_substate, 0, 0,
1832 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1833 atomic_read(&tw->tw_refcnt), tw);
1836 static int tcp6_seq_show(struct seq_file *seq, void *v)
1838 struct tcp_iter_state *st;
1840 if (v == SEQ_START_TOKEN) {
1845 "st tx_queue rx_queue tr tm->when retrnsmt"
1846 " uid timeout inode\n");
1851 switch (st->state) {
1852 case TCP_SEQ_STATE_LISTENING:
1853 case TCP_SEQ_STATE_ESTABLISHED:
1854 get_tcp6_sock(seq, v, st->num);
1856 case TCP_SEQ_STATE_OPENREQ:
1857 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1859 case TCP_SEQ_STATE_TIME_WAIT:
1860 get_timewait6_sock(seq, v, st->num);
1867 static const struct file_operations tcp6_afinfo_seq_fops = {
1868 .owner = THIS_MODULE,
1869 .open = tcp_seq_open,
1871 .llseek = seq_lseek,
1872 .release = seq_release_net
1875 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1878 .seq_fops = &tcp6_afinfo_seq_fops,
1880 .show = tcp6_seq_show,
1884 int __net_init tcp6_proc_init(struct net *net)
1886 return tcp_proc_register(net, &tcp6_seq_afinfo);
1889 void tcp6_proc_exit(struct net *net)
1891 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1895 static void tcp_v6_clear_sk(struct sock *sk, int size)
1897 struct inet_sock *inet = inet_sk(sk);
1899 /* we do not want to clear pinet6 field, because of RCU lookups */
1900 sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1902 size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1903 memset(&inet->pinet6 + 1, 0, size);
1906 struct proto tcpv6_prot = {
1908 .owner = THIS_MODULE,
1910 .connect = tcp_v6_connect,
1911 .disconnect = tcp_disconnect,
1912 .accept = inet_csk_accept,
1914 .init = tcp_v6_init_sock,
1915 .destroy = tcp_v6_destroy_sock,
1916 .shutdown = tcp_shutdown,
1917 .setsockopt = tcp_setsockopt,
1918 .getsockopt = tcp_getsockopt,
1919 .recvmsg = tcp_recvmsg,
1920 .sendmsg = tcp_sendmsg,
1921 .sendpage = tcp_sendpage,
1922 .backlog_rcv = tcp_v6_do_rcv,
1923 .release_cb = tcp_release_cb,
1924 .mtu_reduced = tcp_v6_mtu_reduced,
1925 .hash = tcp_v6_hash,
1926 .unhash = inet_unhash,
1927 .get_port = inet_csk_get_port,
1928 .enter_memory_pressure = tcp_enter_memory_pressure,
1929 .sockets_allocated = &tcp_sockets_allocated,
1930 .memory_allocated = &tcp_memory_allocated,
1931 .memory_pressure = &tcp_memory_pressure,
1932 .orphan_count = &tcp_orphan_count,
1933 .sysctl_wmem = sysctl_tcp_wmem,
1934 .sysctl_rmem = sysctl_tcp_rmem,
1935 .max_header = MAX_TCP_HEADER,
1936 .obj_size = sizeof(struct tcp6_sock),
1937 .slab_flags = SLAB_DESTROY_BY_RCU,
1938 .twsk_prot = &tcp6_timewait_sock_ops,
1939 .rsk_prot = &tcp6_request_sock_ops,
1940 .h.hashinfo = &tcp_hashinfo,
1941 .no_autobind = true,
1942 #ifdef CONFIG_COMPAT
1943 .compat_setsockopt = compat_tcp_setsockopt,
1944 .compat_getsockopt = compat_tcp_getsockopt,
1946 #ifdef CONFIG_MEMCG_KMEM
1947 .proto_cgroup = tcp_proto_cgroup,
1949 .clear_sk = tcp_v6_clear_sk,
1952 static const struct inet6_protocol tcpv6_protocol = {
1953 .early_demux = tcp_v6_early_demux,
1954 .handler = tcp_v6_rcv,
1955 .err_handler = tcp_v6_err,
1956 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1959 static struct inet_protosw tcpv6_protosw = {
1960 .type = SOCK_STREAM,
1961 .protocol = IPPROTO_TCP,
1962 .prot = &tcpv6_prot,
1963 .ops = &inet6_stream_ops,
1965 .flags = INET_PROTOSW_PERMANENT |
1969 static int __net_init tcpv6_net_init(struct net *net)
1971 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1972 SOCK_RAW, IPPROTO_TCP, net);
1975 static void __net_exit tcpv6_net_exit(struct net *net)
1977 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1980 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1982 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1985 static struct pernet_operations tcpv6_net_ops = {
1986 .init = tcpv6_net_init,
1987 .exit = tcpv6_net_exit,
1988 .exit_batch = tcpv6_net_exit_batch,
1991 int __init tcpv6_init(void)
1995 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1999 /* register inet6 protocol */
2000 ret = inet6_register_protosw(&tcpv6_protosw);
2002 goto out_tcpv6_protocol;
2004 ret = register_pernet_subsys(&tcpv6_net_ops);
2006 goto out_tcpv6_protosw;
2011 inet6_unregister_protosw(&tcpv6_protosw);
2013 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2017 void tcpv6_exit(void)
2019 unregister_pernet_subsys(&tcpv6_net_ops);
2020 inet6_unregister_protosw(&tcpv6_protosw);
2021 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);