drm/bridge: dw_hdmi: set vdisplay for frame packing 3d mode
[firefly-linux-kernel-4.4.55.git] / drivers / hv / ring_buffer.c
index d6fbb5772b8d62ff371d968bde11779a3a47302b..70a1a9a22f872761aceeaaa56032dd5ec044a729 100644 (file)
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/hyperv.h>
+#include <linux/uio.h>
 
 #include "hyperv_vmbus.h"
 
 void hv_begin_read(struct hv_ring_buffer_info *rbi)
 {
        rbi->ring_buffer->interrupt_mask = 1;
-       smp_mb();
+       mb();
 }
 
 u32 hv_end_read(struct hv_ring_buffer_info *rbi)
@@ -41,7 +42,7 @@ u32 hv_end_read(struct hv_ring_buffer_info *rbi)
        u32 write;
 
        rbi->ring_buffer->interrupt_mask = 0;
-       smp_mb();
+       mb();
 
        /*
         * Now check to see if the ring buffer is still empty.
@@ -71,10 +72,12 @@ u32 hv_end_read(struct hv_ring_buffer_info *rbi)
 
 static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
 {
-       smp_mb();
+       mb();
        if (rbi->ring_buffer->interrupt_mask)
                return false;
 
+       /* check interrupt_mask before read_index */
+       rmb();
        /*
         * This is the only case we need to signal when the
         * ring transitions from being empty to non-empty.
@@ -100,10 +103,9 @@ static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
  *    there is room for the producer to send the pending packet.
  */
 
-static bool hv_need_to_signal_on_read(u32 old_rd,
-                                        struct hv_ring_buffer_info *rbi)
+static bool hv_need_to_signal_on_read(u32 prev_write_sz,
+                                     struct hv_ring_buffer_info *rbi)
 {
-       u32 prev_write_sz;
        u32 cur_write_sz;
        u32 r_size;
        u32 write_loc = rbi->ring_buffer->write_index;
@@ -120,10 +122,6 @@ static bool hv_need_to_signal_on_read(u32 old_rd,
        cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
                        read_loc - write_loc;
 
-       prev_write_sz = write_loc >= old_rd ? r_size - (write_loc - old_rd) :
-                       old_rd - write_loc;
-
-
        if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
                return true;
 
@@ -358,6 +356,11 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
        ring_info->ring_buffer->read_index =
                ring_info->ring_buffer->write_index = 0;
 
+       /*
+        * Set the feature bit for enabling flow control.
+        */
+       ring_info->ring_buffer->feature_bits.value = 1;
+
        ring_info->ring_size = buflen;
        ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer);
 
@@ -385,23 +388,20 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
  *
  */
 int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
-                   struct scatterlist *sglist, u32 sgcount, bool *signal)
+                   struct kvec *kv_list, u32 kv_count, bool *signal)
 {
        int i = 0;
        u32 bytes_avail_towrite;
        u32 bytes_avail_toread;
        u32 totalbytes_towrite = 0;
 
-       struct scatterlist *sg;
        u32 next_write_location;
        u32 old_write;
        u64 prev_indices = 0;
        unsigned long flags;
 
-       for_each_sg(sglist, sg, sgcount, i)
-       {
-               totalbytes_towrite += sg->length;
-       }
+       for (i = 0; i < kv_count; i++)
+               totalbytes_towrite += kv_list[i].iov_len;
 
        totalbytes_towrite += sizeof(u64);
 
@@ -425,12 +425,11 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
 
        old_write = next_write_location;
 
-       for_each_sg(sglist, sg, sgcount, i)
-       {
+       for (i = 0; i < kv_count; i++) {
                next_write_location = hv_copyto_ringbuffer(outring_info,
                                                     next_write_location,
-                                                    sg_virt(sg),
-                                                    sg->length);
+                                                    kv_list[i].iov_base,
+                                                    kv_list[i].iov_len);
        }
 
        /* Set previous packet start */
@@ -442,7 +441,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
                                             sizeof(u64));
 
        /* Issue a full memory barrier before updating the write index */
-       smp_mb();
+       mb();
 
        /* Now, update the write location */
        hv_set_next_write_location(outring_info, next_write_location);
@@ -513,7 +512,6 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
        u32 next_read_location = 0;
        u64 prev_indices = 0;
        unsigned long flags;
-       u32 old_read;
 
        if (buflen <= 0)
                return -EINVAL;
@@ -524,8 +522,6 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
                                &bytes_avail_toread,
                                &bytes_avail_towrite);
 
-       old_read = bytes_avail_toread;
-
        /* Make sure there is something to read */
        if (bytes_avail_toread < buflen) {
                spin_unlock_irqrestore(&inring_info->ring_lock, flags);
@@ -549,14 +545,14 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
        /* Make sure all reads are done before we update the read index since */
        /* the writer may start writing to the read area once the read index */
        /*is updated */
-       smp_mb();
+       mb();
 
        /* Update the read index */
        hv_set_next_read_location(inring_info, next_read_location);
 
        spin_unlock_irqrestore(&inring_info->ring_lock, flags);
 
-       *signal = hv_need_to_signal_on_read(old_read, inring_info);
+       *signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info);
 
        return 0;
 }