soc: rockchip: add cpuinfo support
[firefly-linux-kernel-4.4.55.git] / drivers / hv / ring_buffer.c
1 /*
2  *
3  * Copyright (c) 2009, Microsoft Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16  * Place - Suite 330, Boston, MA 02111-1307 USA.
17  *
18  * Authors:
19  *   Haiyang Zhang <haiyangz@microsoft.com>
20  *   Hank Janssen  <hjanssen@microsoft.com>
21  *   K. Y. Srinivasan <kys@microsoft.com>
22  *
23  */
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28 #include <linux/hyperv.h>
29 #include <linux/uio.h>
30
31 #include "hyperv_vmbus.h"
32
33 void hv_begin_read(struct hv_ring_buffer_info *rbi)
34 {
35         rbi->ring_buffer->interrupt_mask = 1;
36         mb();
37 }
38
39 u32 hv_end_read(struct hv_ring_buffer_info *rbi)
40 {
41         u32 read;
42         u32 write;
43
44         rbi->ring_buffer->interrupt_mask = 0;
45         mb();
46
47         /*
48          * Now check to see if the ring buffer is still empty.
49          * If it is not, we raced and we need to process new
50          * incoming messages.
51          */
52         hv_get_ringbuffer_availbytes(rbi, &read, &write);
53
54         return read;
55 }
56
57 /*
58  * When we write to the ring buffer, check if the host needs to
59  * be signaled. Here is the details of this protocol:
60  *
61  *      1. The host guarantees that while it is draining the
62  *         ring buffer, it will set the interrupt_mask to
63  *         indicate it does not need to be interrupted when
64  *         new data is placed.
65  *
66  *      2. The host guarantees that it will completely drain
67  *         the ring buffer before exiting the read loop. Further,
68  *         once the ring buffer is empty, it will clear the
69  *         interrupt_mask and re-check to see if new data has
70  *         arrived.
71  */
72
73 static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
74 {
75         mb();
76         if (rbi->ring_buffer->interrupt_mask)
77                 return false;
78
79         /* check interrupt_mask before read_index */
80         rmb();
81         /*
82          * This is the only case we need to signal when the
83          * ring transitions from being empty to non-empty.
84          */
85         if (old_write == rbi->ring_buffer->read_index)
86                 return true;
87
88         return false;
89 }
90
91 /*
92  * To optimize the flow management on the send-side,
93  * when the sender is blocked because of lack of
94  * sufficient space in the ring buffer, potential the
95  * consumer of the ring buffer can signal the producer.
96  * This is controlled by the following parameters:
97  *
98  * 1. pending_send_sz: This is the size in bytes that the
99  *    producer is trying to send.
100  * 2. The feature bit feat_pending_send_sz set to indicate if
101  *    the consumer of the ring will signal when the ring
102  *    state transitions from being full to a state where
103  *    there is room for the producer to send the pending packet.
104  */
105
106 static bool hv_need_to_signal_on_read(u32 prev_write_sz,
107                                       struct hv_ring_buffer_info *rbi)
108 {
109         u32 cur_write_sz;
110         u32 r_size;
111         u32 write_loc = rbi->ring_buffer->write_index;
112         u32 read_loc = rbi->ring_buffer->read_index;
113         u32 pending_sz = rbi->ring_buffer->pending_send_sz;
114
115         /*
116          * If the other end is not blocked on write don't bother.
117          */
118         if (pending_sz == 0)
119                 return false;
120
121         r_size = rbi->ring_datasize;
122         cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
123                         read_loc - write_loc;
124
125         if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
126                 return true;
127
128         return false;
129 }
130
131 /*
132  * hv_get_next_write_location()
133  *
134  * Get the next write location for the specified ring buffer
135  *
136  */
137 static inline u32
138 hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
139 {
140         u32 next = ring_info->ring_buffer->write_index;
141
142         return next;
143 }
144
145 /*
146  * hv_set_next_write_location()
147  *
148  * Set the next write location for the specified ring buffer
149  *
150  */
151 static inline void
152 hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
153                      u32 next_write_location)
154 {
155         ring_info->ring_buffer->write_index = next_write_location;
156 }
157
158 /*
159  * hv_get_next_read_location()
160  *
161  * Get the next read location for the specified ring buffer
162  */
163 static inline u32
164 hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
165 {
166         u32 next = ring_info->ring_buffer->read_index;
167
168         return next;
169 }
170
171 /*
172  * hv_get_next_readlocation_withoffset()
173  *
174  * Get the next read location + offset for the specified ring buffer.
175  * This allows the caller to skip
176  */
177 static inline u32
178 hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
179                                  u32 offset)
180 {
181         u32 next = ring_info->ring_buffer->read_index;
182
183         next += offset;
184         next %= ring_info->ring_datasize;
185
186         return next;
187 }
188
189 /*
190  *
191  * hv_set_next_read_location()
192  *
193  * Set the next read location for the specified ring buffer
194  *
195  */
196 static inline void
197 hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
198                     u32 next_read_location)
199 {
200         ring_info->ring_buffer->read_index = next_read_location;
201 }
202
203
204 /*
205  *
206  * hv_get_ring_buffer()
207  *
208  * Get the start of the ring buffer
209  */
210 static inline void *
211 hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
212 {
213         return (void *)ring_info->ring_buffer->buffer;
214 }
215
216
217 /*
218  *
219  * hv_get_ring_buffersize()
220  *
221  * Get the size of the ring buffer
222  */
223 static inline u32
224 hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
225 {
226         return ring_info->ring_datasize;
227 }
228
229 /*
230  *
231  * hv_get_ring_bufferindices()
232  *
233  * Get the read and write indices as u64 of the specified ring buffer
234  *
235  */
236 static inline u64
237 hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
238 {
239         return (u64)ring_info->ring_buffer->write_index << 32;
240 }
241
242 /*
243  *
244  * hv_copyfrom_ringbuffer()
245  *
246  * Helper routine to copy to source from ring buffer.
247  * Assume there is enough room. Handles wrap-around in src case only!!
248  *
249  */
250 static u32 hv_copyfrom_ringbuffer(
251         struct hv_ring_buffer_info      *ring_info,
252         void                            *dest,
253         u32                             destlen,
254         u32                             start_read_offset)
255 {
256         void *ring_buffer = hv_get_ring_buffer(ring_info);
257         u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
258
259         u32 frag_len;
260
261         /* wrap-around detected at the src */
262         if (destlen > ring_buffer_size - start_read_offset) {
263                 frag_len = ring_buffer_size - start_read_offset;
264
265                 memcpy(dest, ring_buffer + start_read_offset, frag_len);
266                 memcpy(dest + frag_len, ring_buffer, destlen - frag_len);
267         } else
268
269                 memcpy(dest, ring_buffer + start_read_offset, destlen);
270
271
272         start_read_offset += destlen;
273         start_read_offset %= ring_buffer_size;
274
275         return start_read_offset;
276 }
277
278
279 /*
280  *
281  * hv_copyto_ringbuffer()
282  *
283  * Helper routine to copy from source to ring buffer.
284  * Assume there is enough room. Handles wrap-around in dest case only!!
285  *
286  */
287 static u32 hv_copyto_ringbuffer(
288         struct hv_ring_buffer_info      *ring_info,
289         u32                             start_write_offset,
290         void                            *src,
291         u32                             srclen)
292 {
293         void *ring_buffer = hv_get_ring_buffer(ring_info);
294         u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
295         u32 frag_len;
296
297         /* wrap-around detected! */
298         if (srclen > ring_buffer_size - start_write_offset) {
299                 frag_len = ring_buffer_size - start_write_offset;
300                 memcpy(ring_buffer + start_write_offset, src, frag_len);
301                 memcpy(ring_buffer, src + frag_len, srclen - frag_len);
302         } else
303                 memcpy(ring_buffer + start_write_offset, src, srclen);
304
305         start_write_offset += srclen;
306         start_write_offset %= ring_buffer_size;
307
308         return start_write_offset;
309 }
310
311 /*
312  *
313  * hv_ringbuffer_get_debuginfo()
314  *
315  * Get various debug metrics for the specified ring buffer
316  *
317  */
318 void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
319                             struct hv_ring_buffer_debug_info *debug_info)
320 {
321         u32 bytes_avail_towrite;
322         u32 bytes_avail_toread;
323
324         if (ring_info->ring_buffer) {
325                 hv_get_ringbuffer_availbytes(ring_info,
326                                         &bytes_avail_toread,
327                                         &bytes_avail_towrite);
328
329                 debug_info->bytes_avail_toread = bytes_avail_toread;
330                 debug_info->bytes_avail_towrite = bytes_avail_towrite;
331                 debug_info->current_read_index =
332                         ring_info->ring_buffer->read_index;
333                 debug_info->current_write_index =
334                         ring_info->ring_buffer->write_index;
335                 debug_info->current_interrupt_mask =
336                         ring_info->ring_buffer->interrupt_mask;
337         }
338 }
339
340 /*
341  *
342  * hv_ringbuffer_init()
343  *
344  *Initialize the ring buffer
345  *
346  */
347 int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
348                    void *buffer, u32 buflen)
349 {
350         if (sizeof(struct hv_ring_buffer) != PAGE_SIZE)
351                 return -EINVAL;
352
353         memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
354
355         ring_info->ring_buffer = (struct hv_ring_buffer *)buffer;
356         ring_info->ring_buffer->read_index =
357                 ring_info->ring_buffer->write_index = 0;
358
359         /*
360          * Set the feature bit for enabling flow control.
361          */
362         ring_info->ring_buffer->feature_bits.value = 1;
363
364         ring_info->ring_size = buflen;
365         ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer);
366
367         spin_lock_init(&ring_info->ring_lock);
368
369         return 0;
370 }
371
372 /*
373  *
374  * hv_ringbuffer_cleanup()
375  *
376  * Cleanup the ring buffer
377  *
378  */
379 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
380 {
381 }
382
383 /*
384  *
385  * hv_ringbuffer_write()
386  *
387  * Write to the ring buffer
388  *
389  */
390 int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
391                     struct kvec *kv_list, u32 kv_count, bool *signal)
392 {
393         int i = 0;
394         u32 bytes_avail_towrite;
395         u32 bytes_avail_toread;
396         u32 totalbytes_towrite = 0;
397
398         u32 next_write_location;
399         u32 old_write;
400         u64 prev_indices = 0;
401         unsigned long flags;
402
403         for (i = 0; i < kv_count; i++)
404                 totalbytes_towrite += kv_list[i].iov_len;
405
406         totalbytes_towrite += sizeof(u64);
407
408         spin_lock_irqsave(&outring_info->ring_lock, flags);
409
410         hv_get_ringbuffer_availbytes(outring_info,
411                                 &bytes_avail_toread,
412                                 &bytes_avail_towrite);
413
414
415         /* If there is only room for the packet, assume it is full. */
416         /* Otherwise, the next time around, we think the ring buffer */
417         /* is empty since the read index == write index */
418         if (bytes_avail_towrite <= totalbytes_towrite) {
419                 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
420                 return -EAGAIN;
421         }
422
423         /* Write to the ring buffer */
424         next_write_location = hv_get_next_write_location(outring_info);
425
426         old_write = next_write_location;
427
428         for (i = 0; i < kv_count; i++) {
429                 next_write_location = hv_copyto_ringbuffer(outring_info,
430                                                      next_write_location,
431                                                      kv_list[i].iov_base,
432                                                      kv_list[i].iov_len);
433         }
434
435         /* Set previous packet start */
436         prev_indices = hv_get_ring_bufferindices(outring_info);
437
438         next_write_location = hv_copyto_ringbuffer(outring_info,
439                                              next_write_location,
440                                              &prev_indices,
441                                              sizeof(u64));
442
443         /* Issue a full memory barrier before updating the write index */
444         mb();
445
446         /* Now, update the write location */
447         hv_set_next_write_location(outring_info, next_write_location);
448
449
450         spin_unlock_irqrestore(&outring_info->ring_lock, flags);
451
452         *signal = hv_need_to_signal(old_write, outring_info);
453         return 0;
454 }
455
456
457 /*
458  *
459  * hv_ringbuffer_peek()
460  *
461  * Read without advancing the read index
462  *
463  */
464 int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
465                    void *Buffer, u32 buflen)
466 {
467         u32 bytes_avail_towrite;
468         u32 bytes_avail_toread;
469         u32 next_read_location = 0;
470         unsigned long flags;
471
472         spin_lock_irqsave(&Inring_info->ring_lock, flags);
473
474         hv_get_ringbuffer_availbytes(Inring_info,
475                                 &bytes_avail_toread,
476                                 &bytes_avail_towrite);
477
478         /* Make sure there is something to read */
479         if (bytes_avail_toread < buflen) {
480
481                 spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
482
483                 return -EAGAIN;
484         }
485
486         /* Convert to byte offset */
487         next_read_location = hv_get_next_read_location(Inring_info);
488
489         next_read_location = hv_copyfrom_ringbuffer(Inring_info,
490                                                 Buffer,
491                                                 buflen,
492                                                 next_read_location);
493
494         spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
495
496         return 0;
497 }
498
499
500 /*
501  *
502  * hv_ringbuffer_read()
503  *
504  * Read and advance the read index
505  *
506  */
507 int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
508                    u32 buflen, u32 offset, bool *signal)
509 {
510         u32 bytes_avail_towrite;
511         u32 bytes_avail_toread;
512         u32 next_read_location = 0;
513         u64 prev_indices = 0;
514         unsigned long flags;
515
516         if (buflen <= 0)
517                 return -EINVAL;
518
519         spin_lock_irqsave(&inring_info->ring_lock, flags);
520
521         hv_get_ringbuffer_availbytes(inring_info,
522                                 &bytes_avail_toread,
523                                 &bytes_avail_towrite);
524
525         /* Make sure there is something to read */
526         if (bytes_avail_toread < buflen) {
527                 spin_unlock_irqrestore(&inring_info->ring_lock, flags);
528
529                 return -EAGAIN;
530         }
531
532         next_read_location =
533                 hv_get_next_readlocation_withoffset(inring_info, offset);
534
535         next_read_location = hv_copyfrom_ringbuffer(inring_info,
536                                                 buffer,
537                                                 buflen,
538                                                 next_read_location);
539
540         next_read_location = hv_copyfrom_ringbuffer(inring_info,
541                                                 &prev_indices,
542                                                 sizeof(u64),
543                                                 next_read_location);
544
545         /* Make sure all reads are done before we update the read index since */
546         /* the writer may start writing to the read area once the read index */
547         /*is updated */
548         mb();
549
550         /* Update the read index */
551         hv_set_next_read_location(inring_info, next_read_location);
552
553         spin_unlock_irqrestore(&inring_info->ring_lock, flags);
554
555         *signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info);
556
557         return 0;
558 }