nvmem: rockchip-efuse: add support for rk3288 secure efuse
[firefly-linux-kernel-4.4.55.git] / net / ipv4 / tcp.c
index 036a76ba2ac293bcf6e33655cbc7508e74f7cf8f..dd2a41b999d49ab161dcfda9fc8c7a101f52834b 100644 (file)
@@ -783,6 +783,12 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
                                ret = -EAGAIN;
                                break;
                        }
+                       /* if __tcp_splice_read() got nothing while we have
+                        * an skb in receive queue, we do not want to loop.
+                        * This might happen with URG data.
+                        */
+                       if (!skb_queue_empty(&sk->sk_receive_queue))
+                               break;
                        sk_wait_data(sk, &timeo, NULL);
                        if (signal_pending(current)) {
                                ret = sock_intr_errno(timeo);
@@ -1212,7 +1218,7 @@ new_segment:
 
                        if (!skb_can_coalesce(skb, i, pfrag->page,
                                              pfrag->offset)) {
-                               if (i == sysctl_max_skb_frags || !sg) {
+                               if (i >= sysctl_max_skb_frags || !sg) {
                                        tcp_mark_push(tp, skb);
                                        goto new_segment;
                                }
@@ -3084,6 +3090,52 @@ void tcp_done(struct sock *sk)
 }
 EXPORT_SYMBOL_GPL(tcp_done);
 
+int tcp_abort(struct sock *sk, int err)
+{
+       if (!sk_fullsock(sk)) {
+               if (sk->sk_state == TCP_NEW_SYN_RECV) {
+                       struct request_sock *req = inet_reqsk(sk);
+
+                       local_bh_disable();
+                       inet_csk_reqsk_queue_drop_and_put(req->rsk_listener,
+                                                         req);
+                       local_bh_enable();
+                       return 0;
+               }
+               sock_gen_put(sk);
+               return -EOPNOTSUPP;
+       }
+
+       /* Don't race with userspace socket closes such as tcp_close. */
+       lock_sock(sk);
+
+       if (sk->sk_state == TCP_LISTEN) {
+               tcp_set_state(sk, TCP_CLOSE);
+               inet_csk_listen_stop(sk);
+       }
+
+       /* Don't race with BH socket closes such as inet_csk_listen_stop. */
+       local_bh_disable();
+       bh_lock_sock(sk);
+
+       if (!sock_flag(sk, SOCK_DEAD)) {
+               sk->sk_err = err;
+               /* This barrier is coupled with smp_rmb() in tcp_poll() */
+               smp_wmb();
+               sk->sk_error_report(sk);
+               if (tcp_need_reset(sk->sk_state))
+                       tcp_send_active_reset(sk, GFP_ATOMIC);
+               tcp_done(sk);
+       }
+
+       bh_unlock_sock(sk);
+       local_bh_enable();
+       release_sock(sk);
+       sock_put(sk);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(tcp_abort);
+
 extern struct tcp_congestion_ops tcp_reno;
 
 static __initdata unsigned long thash_entries;