nvmem: rockchip-efuse: add support for rk3288 secure efuse
[firefly-linux-kernel-4.4.55.git] / net / ipv4 / tcp.c
index e8c126a52551802e9ee90fc6052c02fb0d34cd25..dd2a41b999d49ab161dcfda9fc8c7a101f52834b 100644 (file)
 #include <linux/crypto.h>
 #include <linux/time.h>
 #include <linux/slab.h>
-#include <linux/uid_stat.h>
 
 #include <net/icmp.h>
 #include <net/inet_common.h>
 #include <net/tcp.h>
 #include <net/xfrm.h>
 #include <net/ip.h>
-#include <net/ip6_route.h>
-#include <net/ipv6.h>
-#include <net/transp_v6.h>
 #include <net/sock.h>
 
 #include <asm/uaccess.h>
 #include <asm/ioctls.h>
+#include <asm/unaligned.h>
 #include <net/busy_poll.h>
 
 int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
@@ -786,6 +783,12 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
                                ret = -EAGAIN;
                                break;
                        }
+                       /* if __tcp_splice_read() got nothing while we have
+                        * an skb in receive queue, we do not want to loop.
+                        * This might happen with URG data.
+                        */
+                       if (!skb_queue_empty(&sk->sk_receive_queue))
+                               break;
                        sk_wait_data(sk, &timeo, NULL);
                        if (signal_pending(current)) {
                                ret = sock_intr_errno(timeo);
@@ -942,7 +945,7 @@ new_segment:
 
                i = skb_shinfo(skb)->nr_frags;
                can_coalesce = skb_can_coalesce(skb, i, page, offset);
-               if (!can_coalesce && i >= MAX_SKB_FRAGS) {
+               if (!can_coalesce && i >= sysctl_max_skb_frags) {
                        tcp_mark_push(tp, skb);
                        goto new_segment;
                }
@@ -1215,7 +1218,7 @@ new_segment:
 
                        if (!skb_can_coalesce(skb, i, pfrag->page,
                                              pfrag->offset)) {
-                               if (i == MAX_SKB_FRAGS || !sg) {
+                               if (i >= sysctl_max_skb_frags || !sg) {
                                        tcp_mark_push(tp, skb);
                                        goto new_segment;
                                }
@@ -1287,10 +1290,6 @@ out:
                tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
 out_nopush:
        release_sock(sk);
-
-       if (copied + copied_syn)
-               uid_stat_tcp_snd(from_kuid(&init_user_ns, current_uid()),
-                                copied + copied_syn);
        return copied + copied_syn;
 
 do_fault:
@@ -1565,8 +1564,6 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
        if (copied > 0) {
                tcp_recv_skb(sk, seq, &offset);
                tcp_cleanup_rbuf(sk, copied);
-               uid_stat_tcp_rcv(from_kuid(&init_user_ns, current_uid()),
-                                copied);
        }
        return copied;
 }
@@ -1900,10 +1897,6 @@ skip_copy:
        tcp_cleanup_rbuf(sk, copied);
 
        release_sock(sk);
-
-       if (copied > 0)
-               uid_stat_tcp_rcv(from_kuid(&init_user_ns, current_uid()),
-                                copied);
        return copied;
 
 out:
@@ -1912,9 +1905,6 @@ out:
 
 recv_urg:
        err = tcp_recv_urg(sk, msg, len, flags);
-       if (err > 0)
-               uid_stat_tcp_rcv(from_kuid(&init_user_ns, current_uid()),
-                                err);
        goto out;
 
 recv_sndq:
@@ -2654,6 +2644,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
        const struct inet_connection_sock *icsk = inet_csk(sk);
        u32 now = tcp_time_stamp;
        unsigned int start;
+       u64 rate64;
        u32 rate;
 
        memset(info, 0, sizeof(*info));
@@ -2719,15 +2710,17 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
        info->tcpi_total_retrans = tp->total_retrans;
 
        rate = READ_ONCE(sk->sk_pacing_rate);
-       info->tcpi_pacing_rate = rate != ~0U ? rate : ~0ULL;
+       rate64 = rate != ~0U ? rate : ~0ULL;
+       put_unaligned(rate64, &info->tcpi_pacing_rate);
 
        rate = READ_ONCE(sk->sk_max_pacing_rate);
-       info->tcpi_max_pacing_rate = rate != ~0U ? rate : ~0ULL;
+       rate64 = rate != ~0U ? rate : ~0ULL;
+       put_unaligned(rate64, &info->tcpi_max_pacing_rate);
 
        do {
                start = u64_stats_fetch_begin_irq(&tp->syncp);
-               info->tcpi_bytes_acked = tp->bytes_acked;
-               info->tcpi_bytes_received = tp->bytes_received;
+               put_unaligned(tp->bytes_acked, &info->tcpi_bytes_acked);
+               put_unaligned(tp->bytes_received, &info->tcpi_bytes_received);
        } while (u64_stats_fetch_retry_irq(&tp->syncp, start));
        info->tcpi_segs_out = tp->segs_out;
        info->tcpi_segs_in = tp->segs_in;
@@ -3097,6 +3090,52 @@ void tcp_done(struct sock *sk)
 }
 EXPORT_SYMBOL_GPL(tcp_done);
 
+int tcp_abort(struct sock *sk, int err)
+{
+       if (!sk_fullsock(sk)) {
+               if (sk->sk_state == TCP_NEW_SYN_RECV) {
+                       struct request_sock *req = inet_reqsk(sk);
+
+                       local_bh_disable();
+                       inet_csk_reqsk_queue_drop_and_put(req->rsk_listener,
+                                                         req);
+                       local_bh_enable();
+                       return 0;
+               }
+               sock_gen_put(sk);
+               return -EOPNOTSUPP;
+       }
+
+       /* Don't race with userspace socket closes such as tcp_close. */
+       lock_sock(sk);
+
+       if (sk->sk_state == TCP_LISTEN) {
+               tcp_set_state(sk, TCP_CLOSE);
+               inet_csk_listen_stop(sk);
+       }
+
+       /* Don't race with BH socket closes such as inet_csk_listen_stop. */
+       local_bh_disable();
+       bh_lock_sock(sk);
+
+       if (!sock_flag(sk, SOCK_DEAD)) {
+               sk->sk_err = err;
+               /* This barrier is coupled with smp_rmb() in tcp_poll() */
+               smp_wmb();
+               sk->sk_error_report(sk);
+               if (tcp_need_reset(sk->sk_state))
+                       tcp_send_active_reset(sk, GFP_ATOMIC);
+               tcp_done(sk);
+       }
+
+       bh_unlock_sock(sk);
+       local_bh_enable();
+       release_sock(sk);
+       sock_put(sk);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(tcp_abort);
+
 extern struct tcp_congestion_ops tcp_reno;
 
 static __initdata unsigned long thash_entries;
@@ -3204,119 +3243,3 @@ void __init tcp_init(void)
        BUG_ON(tcp_register_congestion_control(&tcp_reno) != 0);
        tcp_tasklet_init();
 }
-
-static int tcp_is_local(struct net *net, __be32 addr) {
-       struct rtable *rt;
-       struct flowi4 fl4 = { .daddr = addr };
-       rt = ip_route_output_key(net, &fl4);
-       if (IS_ERR_OR_NULL(rt))
-               return 0;
-       return rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK);
-}
-
-#if defined(CONFIG_IPV6)
-static int tcp_is_local6(struct net *net, struct in6_addr *addr) {
-       struct rt6_info *rt6 = rt6_lookup(net, addr, addr, 0, 0);
-       return rt6 && rt6->dst.dev && (rt6->dst.dev->flags & IFF_LOOPBACK);
-}
-#endif
-
-/*
- * tcp_nuke_addr - destroy all sockets on the given local address
- * if local address is the unspecified address (0.0.0.0 or ::), destroy all
- * sockets with local addresses that are not configured.
- */
-int tcp_nuke_addr(struct net *net, struct sockaddr *addr)
-{
-       int family = addr->sa_family;
-       unsigned int bucket;
-
-       struct in_addr *in;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-       struct in6_addr *in6 = NULL;
-#endif
-       if (family == AF_INET) {
-               in = &((struct sockaddr_in *)addr)->sin_addr;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-       } else if (family == AF_INET6) {
-               in6 = &((struct sockaddr_in6 *)addr)->sin6_addr;
-#endif
-       } else {
-               return -EAFNOSUPPORT;
-       }
-
-       for (bucket = 0; bucket <= tcp_hashinfo.ehash_mask; bucket++) {
-               struct hlist_nulls_node *node;
-               struct sock *sk;
-               spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, bucket);
-
-restart:
-               spin_lock_bh(lock);
-               sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[bucket].chain) {
-                       struct inet_sock *inet = inet_sk(sk);
-
-                       if (sk->sk_state == TCP_TIME_WAIT) {
-                               /*
-                                * Sockets that are in TIME_WAIT state are
-                                * instances of lightweight inet_timewait_sock,
-                                * we should simply skip them (or we'll try to
-                                * access non-existing fields and crash).
-                                */
-                               continue;
-                       }
-
-                       if (sysctl_ip_dynaddr && sk->sk_state == TCP_SYN_SENT)
-                               continue;
-
-                       if (sock_flag(sk, SOCK_DEAD))
-                               continue;
-
-                       if (family == AF_INET) {
-                               __be32 s4 = inet->inet_rcv_saddr;
-                               if (s4 == LOOPBACK4_IPV6)
-                                       continue;
-
-                               if (in->s_addr != s4 &&
-                                   !(in->s_addr == INADDR_ANY &&
-                                     !tcp_is_local(net, s4)))
-                                       continue;
-                       }
-
-#if defined(CONFIG_IPV6)
-                       if (family == AF_INET6) {
-                               struct in6_addr *s6;
-                               if (!inet->pinet6)
-                                       continue;
-
-                               s6 = &sk->sk_v6_rcv_saddr;
-                               if (ipv6_addr_type(s6) == IPV6_ADDR_MAPPED)
-                                       continue;
-
-                               if (!ipv6_addr_equal(in6, s6) &&
-                                   !(ipv6_addr_equal(in6, &in6addr_any) &&
-                                     !tcp_is_local6(net, s6)))
-                               continue;
-                       }
-#endif
-
-                       sock_hold(sk);
-                       spin_unlock_bh(lock);
-
-                       local_bh_disable();
-                       bh_lock_sock(sk);
-                       sk->sk_err = ETIMEDOUT;
-                       sk->sk_error_report(sk);
-
-                       tcp_done(sk);
-                       bh_unlock_sock(sk);
-                       local_bh_enable();
-                       sock_put(sk);
-
-                       goto restart;
-               }
-               spin_unlock_bh(lock);
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(tcp_nuke_addr);