Merge remote-tracking branch 'lsk/v3.10/topic/gator' into linux-linaro-lsk
[firefly-linux-kernel-4.4.55.git] / net / ipv4 / tcp_output.c
index 5560abfe6d301b21f137866bc19f1a9d69f7567b..56e29f0e230e4a6c06a24d7a5ee08856a644f697 100644 (file)
@@ -686,7 +686,8 @@ static void tcp_tsq_handler(struct sock *sk)
        if ((1 << sk->sk_state) &
            (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
             TCPF_CLOSE_WAIT  | TCPF_LAST_ACK))
-               tcp_write_xmit(sk, tcp_current_mss(sk), 0, 0, GFP_ATOMIC);
+               tcp_write_xmit(sk, tcp_current_mss(sk), tcp_sk(sk)->nonagle,
+                              0, GFP_ATOMIC);
 }
 /*
  * One tasklest per cpu tries to send more skbs.
@@ -754,6 +755,17 @@ void tcp_release_cb(struct sock *sk)
        if (flags & (1UL << TCP_TSQ_DEFERRED))
                tcp_tsq_handler(sk);
 
+       /* Here begins the tricky part :
+        * We are called from release_sock() with :
+        * 1) BH disabled
+        * 2) sk_lock.slock spinlock held
+        * 3) socket owned by us (sk->sk_lock.owned == 1)
+        *
+        * But following code is meant to be called from BH handlers,
+        * so we should keep BH disabled, but early release socket ownership
+        */
+       sock_release_ownership(sk);
+
        if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) {
                tcp_write_timer_handler(sk);
                __sock_put(sk);
@@ -1875,7 +1887,15 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
 
                if (atomic_read(&sk->sk_wmem_alloc) > limit) {
                        set_bit(TSQ_THROTTLED, &tp->tsq_flags);
-                       break;
+                       /* It is possible TX completion already happened
+                        * before we set TSQ_THROTTLED, so we must
+                        * test again the condition.
+                        * We abuse smp_mb__after_clear_bit() because
+                        * there is no smp_mb__after_set_bit() yet
+                        */
+                       smp_mb__after_clear_bit();
+                       if (atomic_read(&sk->sk_wmem_alloc) > limit)
+                               break;
                }
 
                limit = mss_now;
@@ -2408,13 +2428,15 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
                if (!tp->retrans_stamp)
                        tp->retrans_stamp = TCP_SKB_CB(skb)->when;
 
-               tp->undo_retrans += tcp_skb_pcount(skb);
-
                /* snd_nxt is stored to detect loss of retransmitted segment,
                 * see tcp_input.c tcp_sacktag_write_queue().
                 */
                TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
        }
+
+       if (tp->undo_retrans < 0)
+               tp->undo_retrans = 0;
+       tp->undo_retrans += tcp_skb_pcount(skb);
        return err;
 }
 
@@ -2883,7 +2905,12 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
        space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
                MAX_TCP_OPTION_SPACE;
 
-       syn_data = skb_copy_expand(syn, skb_headroom(syn), space,
+       space = min_t(size_t, space, fo->size);
+
+       /* limit to order-0 allocations */
+       space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
+
+       syn_data = skb_copy_expand(syn, MAX_TCP_HEADER, space,
                                   sk->sk_allocation);
        if (syn_data == NULL)
                goto fallback;