Merge branch 'upstream' of git://git.infradead.org/users/pcmoore/selinux into for...
[firefly-linux-kernel-4.4.55.git] / drivers / iio / industrialio-buffer.c
1 /* The industrial I/O core
2  *
3  * Copyright (c) 2008 Jonathan Cameron
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * Handling of buffer allocation / resizing.
10  *
11  *
12  * Things to look at here.
13  * - Better memory allocation techniques?
14  * - Alternative access techniques?
15  */
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/device.h>
19 #include <linux/fs.h>
20 #include <linux/cdev.h>
21 #include <linux/slab.h>
22 #include <linux/poll.h>
23 #include <linux/sched.h>
24
25 #include <linux/iio/iio.h>
26 #include "iio_core.h"
27 #include <linux/iio/sysfs.h>
28 #include <linux/iio/buffer.h>
29
30 static const char * const iio_endian_prefix[] = {
31         [IIO_BE] = "be",
32         [IIO_LE] = "le",
33 };
34
35 static bool iio_buffer_is_active(struct iio_buffer *buf)
36 {
37         return !list_empty(&buf->buffer_list);
38 }
39
40 static bool iio_buffer_data_available(struct iio_buffer *buf)
41 {
42         return buf->access->data_available(buf);
43 }
44
45 /**
46  * iio_buffer_read_first_n_outer() - chrdev read for buffer access
47  *
48  * This function relies on all buffer implementations having an
49  * iio_buffer as their first element.
50  **/
51 ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
52                                       size_t n, loff_t *f_ps)
53 {
54         struct iio_dev *indio_dev = filp->private_data;
55         struct iio_buffer *rb = indio_dev->buffer;
56         int ret;
57
58         if (!indio_dev->info)
59                 return -ENODEV;
60
61         if (!rb || !rb->access->read_first_n)
62                 return -EINVAL;
63
64         do {
65                 if (!iio_buffer_data_available(rb)) {
66                         if (filp->f_flags & O_NONBLOCK)
67                                 return -EAGAIN;
68
69                         ret = wait_event_interruptible(rb->pollq,
70                                         iio_buffer_data_available(rb) ||
71                                         indio_dev->info == NULL);
72                         if (ret)
73                                 return ret;
74                         if (indio_dev->info == NULL)
75                                 return -ENODEV;
76                 }
77
78                 ret = rb->access->read_first_n(rb, n, buf);
79                 if (ret == 0 && (filp->f_flags & O_NONBLOCK))
80                         ret = -EAGAIN;
81          } while (ret == 0);
82
83         return ret;
84 }
85
86 /**
87  * iio_buffer_poll() - poll the buffer to find out if it has data
88  */
89 unsigned int iio_buffer_poll(struct file *filp,
90                              struct poll_table_struct *wait)
91 {
92         struct iio_dev *indio_dev = filp->private_data;
93         struct iio_buffer *rb = indio_dev->buffer;
94
95         if (!indio_dev->info)
96                 return -ENODEV;
97
98         poll_wait(filp, &rb->pollq, wait);
99         if (iio_buffer_data_available(rb))
100                 return POLLIN | POLLRDNORM;
101         /* need a way of knowing if there may be enough data... */
102         return 0;
103 }
104
105 /**
106  * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
107  * @indio_dev: The IIO device
108  *
109  * Wakes up the event waitqueue used for poll(). Should usually
110  * be called when the device is unregistered.
111  */
112 void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
113 {
114         if (!indio_dev->buffer)
115                 return;
116
117         wake_up(&indio_dev->buffer->pollq);
118 }
119
120 void iio_buffer_init(struct iio_buffer *buffer)
121 {
122         INIT_LIST_HEAD(&buffer->demux_list);
123         INIT_LIST_HEAD(&buffer->buffer_list);
124         init_waitqueue_head(&buffer->pollq);
125         kref_init(&buffer->ref);
126 }
127 EXPORT_SYMBOL(iio_buffer_init);
128
129 static ssize_t iio_show_scan_index(struct device *dev,
130                                    struct device_attribute *attr,
131                                    char *buf)
132 {
133         return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
134 }
135
136 static ssize_t iio_show_fixed_type(struct device *dev,
137                                    struct device_attribute *attr,
138                                    char *buf)
139 {
140         struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
141         u8 type = this_attr->c->scan_type.endianness;
142
143         if (type == IIO_CPU) {
144 #ifdef __LITTLE_ENDIAN
145                 type = IIO_LE;
146 #else
147                 type = IIO_BE;
148 #endif
149         }
150         if (this_attr->c->scan_type.repeat > 1)
151                 return sprintf(buf, "%s:%c%d/%dX%d>>%u\n",
152                        iio_endian_prefix[type],
153                        this_attr->c->scan_type.sign,
154                        this_attr->c->scan_type.realbits,
155                        this_attr->c->scan_type.storagebits,
156                        this_attr->c->scan_type.repeat,
157                        this_attr->c->scan_type.shift);
158                 else
159                         return sprintf(buf, "%s:%c%d/%d>>%u\n",
160                        iio_endian_prefix[type],
161                        this_attr->c->scan_type.sign,
162                        this_attr->c->scan_type.realbits,
163                        this_attr->c->scan_type.storagebits,
164                        this_attr->c->scan_type.shift);
165 }
166
167 static ssize_t iio_scan_el_show(struct device *dev,
168                                 struct device_attribute *attr,
169                                 char *buf)
170 {
171         int ret;
172         struct iio_dev *indio_dev = dev_to_iio_dev(dev);
173
174         /* Ensure ret is 0 or 1. */
175         ret = !!test_bit(to_iio_dev_attr(attr)->address,
176                        indio_dev->buffer->scan_mask);
177
178         return sprintf(buf, "%d\n", ret);
179 }
180
181 /* Note NULL used as error indicator as it doesn't make sense. */
182 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
183                                           unsigned int masklength,
184                                           const unsigned long *mask)
185 {
186         if (bitmap_empty(mask, masklength))
187                 return NULL;
188         while (*av_masks) {
189                 if (bitmap_subset(mask, av_masks, masklength))
190                         return av_masks;
191                 av_masks += BITS_TO_LONGS(masklength);
192         }
193         return NULL;
194 }
195
196 static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
197         const unsigned long *mask)
198 {
199         if (!indio_dev->setup_ops->validate_scan_mask)
200                 return true;
201
202         return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
203 }
204
205 /**
206  * iio_scan_mask_set() - set particular bit in the scan mask
207  * @indio_dev: the iio device
208  * @buffer: the buffer whose scan mask we are interested in
209  * @bit: the bit to be set.
210  *
211  * Note that at this point we have no way of knowing what other
212  * buffers might request, hence this code only verifies that the
213  * individual buffers request is plausible.
214  */
215 static int iio_scan_mask_set(struct iio_dev *indio_dev,
216                       struct iio_buffer *buffer, int bit)
217 {
218         const unsigned long *mask;
219         unsigned long *trialmask;
220
221         trialmask = kmalloc(sizeof(*trialmask)*
222                             BITS_TO_LONGS(indio_dev->masklength),
223                             GFP_KERNEL);
224
225         if (trialmask == NULL)
226                 return -ENOMEM;
227         if (!indio_dev->masklength) {
228                 WARN_ON("Trying to set scanmask prior to registering buffer\n");
229                 goto err_invalid_mask;
230         }
231         bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
232         set_bit(bit, trialmask);
233
234         if (!iio_validate_scan_mask(indio_dev, trialmask))
235                 goto err_invalid_mask;
236
237         if (indio_dev->available_scan_masks) {
238                 mask = iio_scan_mask_match(indio_dev->available_scan_masks,
239                                            indio_dev->masklength,
240                                            trialmask);
241                 if (!mask)
242                         goto err_invalid_mask;
243         }
244         bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
245
246         kfree(trialmask);
247
248         return 0;
249
250 err_invalid_mask:
251         kfree(trialmask);
252         return -EINVAL;
253 }
254
255 static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
256 {
257         clear_bit(bit, buffer->scan_mask);
258         return 0;
259 }
260
261 static ssize_t iio_scan_el_store(struct device *dev,
262                                  struct device_attribute *attr,
263                                  const char *buf,
264                                  size_t len)
265 {
266         int ret;
267         bool state;
268         struct iio_dev *indio_dev = dev_to_iio_dev(dev);
269         struct iio_buffer *buffer = indio_dev->buffer;
270         struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
271
272         ret = strtobool(buf, &state);
273         if (ret < 0)
274                 return ret;
275         mutex_lock(&indio_dev->mlock);
276         if (iio_buffer_is_active(indio_dev->buffer)) {
277                 ret = -EBUSY;
278                 goto error_ret;
279         }
280         ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
281         if (ret < 0)
282                 goto error_ret;
283         if (!state && ret) {
284                 ret = iio_scan_mask_clear(buffer, this_attr->address);
285                 if (ret)
286                         goto error_ret;
287         } else if (state && !ret) {
288                 ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
289                 if (ret)
290                         goto error_ret;
291         }
292
293 error_ret:
294         mutex_unlock(&indio_dev->mlock);
295
296         return ret < 0 ? ret : len;
297
298 }
299
300 static ssize_t iio_scan_el_ts_show(struct device *dev,
301                                    struct device_attribute *attr,
302                                    char *buf)
303 {
304         struct iio_dev *indio_dev = dev_to_iio_dev(dev);
305         return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
306 }
307
308 static ssize_t iio_scan_el_ts_store(struct device *dev,
309                                     struct device_attribute *attr,
310                                     const char *buf,
311                                     size_t len)
312 {
313         int ret;
314         struct iio_dev *indio_dev = dev_to_iio_dev(dev);
315         bool state;
316
317         ret = strtobool(buf, &state);
318         if (ret < 0)
319                 return ret;
320
321         mutex_lock(&indio_dev->mlock);
322         if (iio_buffer_is_active(indio_dev->buffer)) {
323                 ret = -EBUSY;
324                 goto error_ret;
325         }
326         indio_dev->buffer->scan_timestamp = state;
327 error_ret:
328         mutex_unlock(&indio_dev->mlock);
329
330         return ret ? ret : len;
331 }
332
333 static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
334                                         const struct iio_chan_spec *chan)
335 {
336         int ret, attrcount = 0;
337         struct iio_buffer *buffer = indio_dev->buffer;
338
339         ret = __iio_add_chan_devattr("index",
340                                      chan,
341                                      &iio_show_scan_index,
342                                      NULL,
343                                      0,
344                                      IIO_SEPARATE,
345                                      &indio_dev->dev,
346                                      &buffer->scan_el_dev_attr_list);
347         if (ret)
348                 return ret;
349         attrcount++;
350         ret = __iio_add_chan_devattr("type",
351                                      chan,
352                                      &iio_show_fixed_type,
353                                      NULL,
354                                      0,
355                                      0,
356                                      &indio_dev->dev,
357                                      &buffer->scan_el_dev_attr_list);
358         if (ret)
359                 return ret;
360         attrcount++;
361         if (chan->type != IIO_TIMESTAMP)
362                 ret = __iio_add_chan_devattr("en",
363                                              chan,
364                                              &iio_scan_el_show,
365                                              &iio_scan_el_store,
366                                              chan->scan_index,
367                                              0,
368                                              &indio_dev->dev,
369                                              &buffer->scan_el_dev_attr_list);
370         else
371                 ret = __iio_add_chan_devattr("en",
372                                              chan,
373                                              &iio_scan_el_ts_show,
374                                              &iio_scan_el_ts_store,
375                                              chan->scan_index,
376                                              0,
377                                              &indio_dev->dev,
378                                              &buffer->scan_el_dev_attr_list);
379         if (ret)
380                 return ret;
381         attrcount++;
382         ret = attrcount;
383         return ret;
384 }
385
386 static ssize_t iio_buffer_read_length(struct device *dev,
387                                       struct device_attribute *attr,
388                                       char *buf)
389 {
390         struct iio_dev *indio_dev = dev_to_iio_dev(dev);
391         struct iio_buffer *buffer = indio_dev->buffer;
392
393         return sprintf(buf, "%d\n", buffer->length);
394 }
395
396 static ssize_t iio_buffer_write_length(struct device *dev,
397                                        struct device_attribute *attr,
398                                        const char *buf, size_t len)
399 {
400         struct iio_dev *indio_dev = dev_to_iio_dev(dev);
401         struct iio_buffer *buffer = indio_dev->buffer;
402         unsigned int val;
403         int ret;
404
405         ret = kstrtouint(buf, 10, &val);
406         if (ret)
407                 return ret;
408
409         if (val == buffer->length)
410                 return len;
411
412         mutex_lock(&indio_dev->mlock);
413         if (iio_buffer_is_active(indio_dev->buffer)) {
414                 ret = -EBUSY;
415         } else {
416                 buffer->access->set_length(buffer, val);
417                 ret = 0;
418         }
419         mutex_unlock(&indio_dev->mlock);
420
421         return ret ? ret : len;
422 }
423
424 static ssize_t iio_buffer_show_enable(struct device *dev,
425                                       struct device_attribute *attr,
426                                       char *buf)
427 {
428         struct iio_dev *indio_dev = dev_to_iio_dev(dev);
429         return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
430 }
431
432 static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
433                                 const unsigned long *mask, bool timestamp)
434 {
435         const struct iio_chan_spec *ch;
436         unsigned bytes = 0;
437         int length, i;
438
439         /* How much space will the demuxed element take? */
440         for_each_set_bit(i, mask,
441                          indio_dev->masklength) {
442                 ch = iio_find_channel_from_si(indio_dev, i);
443                 if (ch->scan_type.repeat > 1)
444                         length = ch->scan_type.storagebits / 8 *
445                                 ch->scan_type.repeat;
446                 else
447                         length = ch->scan_type.storagebits / 8;
448                 bytes = ALIGN(bytes, length);
449                 bytes += length;
450         }
451         if (timestamp) {
452                 ch = iio_find_channel_from_si(indio_dev,
453                                               indio_dev->scan_index_timestamp);
454                 if (ch->scan_type.repeat > 1)
455                         length = ch->scan_type.storagebits / 8 *
456                                 ch->scan_type.repeat;
457                 else
458                         length = ch->scan_type.storagebits / 8;
459                 bytes = ALIGN(bytes, length);
460                 bytes += length;
461         }
462         return bytes;
463 }
464
465 static void iio_buffer_activate(struct iio_dev *indio_dev,
466         struct iio_buffer *buffer)
467 {
468         iio_buffer_get(buffer);
469         list_add(&buffer->buffer_list, &indio_dev->buffer_list);
470 }
471
472 static void iio_buffer_deactivate(struct iio_buffer *buffer)
473 {
474         list_del_init(&buffer->buffer_list);
475         iio_buffer_put(buffer);
476 }
477
478 void iio_disable_all_buffers(struct iio_dev *indio_dev)
479 {
480         struct iio_buffer *buffer, *_buffer;
481
482         if (list_empty(&indio_dev->buffer_list))
483                 return;
484
485         if (indio_dev->setup_ops->predisable)
486                 indio_dev->setup_ops->predisable(indio_dev);
487
488         list_for_each_entry_safe(buffer, _buffer,
489                         &indio_dev->buffer_list, buffer_list)
490                 iio_buffer_deactivate(buffer);
491
492         indio_dev->currentmode = INDIO_DIRECT_MODE;
493         if (indio_dev->setup_ops->postdisable)
494                 indio_dev->setup_ops->postdisable(indio_dev);
495
496         if (indio_dev->available_scan_masks == NULL)
497                 kfree(indio_dev->active_scan_mask);
498 }
499
500 static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
501         struct iio_buffer *buffer)
502 {
503         unsigned int bytes;
504
505         if (!buffer->access->set_bytes_per_datum)
506                 return;
507
508         bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
509                 buffer->scan_timestamp);
510
511         buffer->access->set_bytes_per_datum(buffer, bytes);
512 }
513
514 static int __iio_update_buffers(struct iio_dev *indio_dev,
515                        struct iio_buffer *insert_buffer,
516                        struct iio_buffer *remove_buffer)
517 {
518         int ret;
519         int success = 0;
520         struct iio_buffer *buffer;
521         unsigned long *compound_mask;
522         const unsigned long *old_mask;
523
524         /* Wind down existing buffers - iff there are any */
525         if (!list_empty(&indio_dev->buffer_list)) {
526                 if (indio_dev->setup_ops->predisable) {
527                         ret = indio_dev->setup_ops->predisable(indio_dev);
528                         if (ret)
529                                 return ret;
530                 }
531                 indio_dev->currentmode = INDIO_DIRECT_MODE;
532                 if (indio_dev->setup_ops->postdisable) {
533                         ret = indio_dev->setup_ops->postdisable(indio_dev);
534                         if (ret)
535                                 return ret;
536                 }
537         }
538         /* Keep a copy of current setup to allow roll back */
539         old_mask = indio_dev->active_scan_mask;
540         if (!indio_dev->available_scan_masks)
541                 indio_dev->active_scan_mask = NULL;
542
543         if (remove_buffer)
544                 iio_buffer_deactivate(remove_buffer);
545         if (insert_buffer)
546                 iio_buffer_activate(indio_dev, insert_buffer);
547
548         /* If no buffers in list, we are done */
549         if (list_empty(&indio_dev->buffer_list)) {
550                 indio_dev->currentmode = INDIO_DIRECT_MODE;
551                 if (indio_dev->available_scan_masks == NULL)
552                         kfree(old_mask);
553                 return 0;
554         }
555
556         /* What scan mask do we actually have? */
557         compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
558                                 sizeof(long), GFP_KERNEL);
559         if (compound_mask == NULL) {
560                 if (indio_dev->available_scan_masks == NULL)
561                         kfree(old_mask);
562                 return -ENOMEM;
563         }
564         indio_dev->scan_timestamp = 0;
565
566         list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
567                 bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
568                           indio_dev->masklength);
569                 indio_dev->scan_timestamp |= buffer->scan_timestamp;
570         }
571         if (indio_dev->available_scan_masks) {
572                 indio_dev->active_scan_mask =
573                         iio_scan_mask_match(indio_dev->available_scan_masks,
574                                             indio_dev->masklength,
575                                             compound_mask);
576                 if (indio_dev->active_scan_mask == NULL) {
577                         /*
578                          * Roll back.
579                          * Note can only occur when adding a buffer.
580                          */
581                         iio_buffer_deactivate(insert_buffer);
582                         if (old_mask) {
583                                 indio_dev->active_scan_mask = old_mask;
584                                 success = -EINVAL;
585                         }
586                         else {
587                                 kfree(compound_mask);
588                                 ret = -EINVAL;
589                                 return ret;
590                         }
591                 }
592         } else {
593                 indio_dev->active_scan_mask = compound_mask;
594         }
595
596         iio_update_demux(indio_dev);
597
598         /* Wind up again */
599         if (indio_dev->setup_ops->preenable) {
600                 ret = indio_dev->setup_ops->preenable(indio_dev);
601                 if (ret) {
602                         printk(KERN_ERR
603                                "Buffer not started: buffer preenable failed (%d)\n", ret);
604                         goto error_remove_inserted;
605                 }
606         }
607         indio_dev->scan_bytes =
608                 iio_compute_scan_bytes(indio_dev,
609                                        indio_dev->active_scan_mask,
610                                        indio_dev->scan_timestamp);
611         list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
612                 iio_buffer_update_bytes_per_datum(indio_dev, buffer);
613                 if (buffer->access->request_update) {
614                         ret = buffer->access->request_update(buffer);
615                         if (ret) {
616                                 printk(KERN_INFO
617                                        "Buffer not started: buffer parameter update failed (%d)\n", ret);
618                                 goto error_run_postdisable;
619                         }
620                 }
621         }
622         if (indio_dev->info->update_scan_mode) {
623                 ret = indio_dev->info
624                         ->update_scan_mode(indio_dev,
625                                            indio_dev->active_scan_mask);
626                 if (ret < 0) {
627                         printk(KERN_INFO "Buffer not started: update scan mode failed (%d)\n", ret);
628                         goto error_run_postdisable;
629                 }
630         }
631         /* Definitely possible for devices to support both of these. */
632         if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
633                 if (!indio_dev->trig) {
634                         printk(KERN_INFO "Buffer not started: no trigger\n");
635                         ret = -EINVAL;
636                         /* Can only occur on first buffer */
637                         goto error_run_postdisable;
638                 }
639                 indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
640         } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
641                 indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
642         } else if (indio_dev->modes & INDIO_BUFFER_SOFTWARE) {
643                 indio_dev->currentmode = INDIO_BUFFER_SOFTWARE;
644         } else { /* Should never be reached */
645                 ret = -EINVAL;
646                 goto error_run_postdisable;
647         }
648
649         if (indio_dev->setup_ops->postenable) {
650                 ret = indio_dev->setup_ops->postenable(indio_dev);
651                 if (ret) {
652                         printk(KERN_INFO
653                                "Buffer not started: postenable failed (%d)\n", ret);
654                         indio_dev->currentmode = INDIO_DIRECT_MODE;
655                         if (indio_dev->setup_ops->postdisable)
656                                 indio_dev->setup_ops->postdisable(indio_dev);
657                         goto error_disable_all_buffers;
658                 }
659         }
660
661         if (indio_dev->available_scan_masks)
662                 kfree(compound_mask);
663         else
664                 kfree(old_mask);
665
666         return success;
667
668 error_disable_all_buffers:
669         indio_dev->currentmode = INDIO_DIRECT_MODE;
670 error_run_postdisable:
671         if (indio_dev->setup_ops->postdisable)
672                 indio_dev->setup_ops->postdisable(indio_dev);
673 error_remove_inserted:
674         if (insert_buffer)
675                 iio_buffer_deactivate(insert_buffer);
676         indio_dev->active_scan_mask = old_mask;
677         kfree(compound_mask);
678         return ret;
679 }
680
681 int iio_update_buffers(struct iio_dev *indio_dev,
682                        struct iio_buffer *insert_buffer,
683                        struct iio_buffer *remove_buffer)
684 {
685         int ret;
686
687         if (insert_buffer == remove_buffer)
688                 return 0;
689
690         mutex_lock(&indio_dev->info_exist_lock);
691         mutex_lock(&indio_dev->mlock);
692
693         if (insert_buffer && iio_buffer_is_active(insert_buffer))
694                 insert_buffer = NULL;
695
696         if (remove_buffer && !iio_buffer_is_active(remove_buffer))
697                 remove_buffer = NULL;
698
699         if (!insert_buffer && !remove_buffer) {
700                 ret = 0;
701                 goto out_unlock;
702         }
703
704         if (indio_dev->info == NULL) {
705                 ret = -ENODEV;
706                 goto out_unlock;
707         }
708
709         ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
710
711 out_unlock:
712         mutex_unlock(&indio_dev->mlock);
713         mutex_unlock(&indio_dev->info_exist_lock);
714
715         return ret;
716 }
717 EXPORT_SYMBOL_GPL(iio_update_buffers);
718
719 static ssize_t iio_buffer_store_enable(struct device *dev,
720                                        struct device_attribute *attr,
721                                        const char *buf,
722                                        size_t len)
723 {
724         int ret;
725         bool requested_state;
726         struct iio_dev *indio_dev = dev_to_iio_dev(dev);
727         bool inlist;
728
729         ret = strtobool(buf, &requested_state);
730         if (ret < 0)
731                 return ret;
732
733         mutex_lock(&indio_dev->mlock);
734
735         /* Find out if it is in the list */
736         inlist = iio_buffer_is_active(indio_dev->buffer);
737         /* Already in desired state */
738         if (inlist == requested_state)
739                 goto done;
740
741         if (requested_state)
742                 ret = __iio_update_buffers(indio_dev,
743                                          indio_dev->buffer, NULL);
744         else
745                 ret = __iio_update_buffers(indio_dev,
746                                          NULL, indio_dev->buffer);
747
748         if (ret < 0)
749                 goto done;
750 done:
751         mutex_unlock(&indio_dev->mlock);
752         return (ret < 0) ? ret : len;
753 }
754
755 static const char * const iio_scan_elements_group_name = "scan_elements";
756
757 static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length,
758                    iio_buffer_write_length);
759 static struct device_attribute dev_attr_length_ro = __ATTR(length,
760         S_IRUGO, iio_buffer_read_length, NULL);
761 static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR,
762                    iio_buffer_show_enable, iio_buffer_store_enable);
763
764 int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
765 {
766         struct iio_dev_attr *p;
767         struct attribute **attr;
768         struct iio_buffer *buffer = indio_dev->buffer;
769         int ret, i, attrn, attrcount, attrcount_orig = 0;
770         const struct iio_chan_spec *channels;
771
772         if (!buffer)
773                 return 0;
774
775         attrcount = 0;
776         if (buffer->attrs) {
777                 while (buffer->attrs[attrcount] != NULL)
778                         attrcount++;
779         }
780
781         buffer->buffer_group.name = "buffer";
782         buffer->buffer_group.attrs = kcalloc(attrcount + 3,
783                         sizeof(*buffer->buffer_group.attrs), GFP_KERNEL);
784         if (!buffer->buffer_group.attrs)
785                 return -ENOMEM;
786
787         if (buffer->access->set_length)
788                 buffer->buffer_group.attrs[0] = &dev_attr_length.attr;
789         else
790                 buffer->buffer_group.attrs[0] = &dev_attr_length_ro.attr;
791         buffer->buffer_group.attrs[1] = &dev_attr_enable.attr;
792         if (buffer->attrs)
793                 memcpy(&buffer->buffer_group.attrs[2], buffer->attrs,
794                         sizeof(*&buffer->buffer_group.attrs) * attrcount);
795         buffer->buffer_group.attrs[attrcount+2] = NULL;
796
797         indio_dev->groups[indio_dev->groupcounter++] = &buffer->buffer_group;
798
799         if (buffer->scan_el_attrs != NULL) {
800                 attr = buffer->scan_el_attrs->attrs;
801                 while (*attr++ != NULL)
802                         attrcount_orig++;
803         }
804         attrcount = attrcount_orig;
805         INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
806         channels = indio_dev->channels;
807         if (channels) {
808                 /* new magic */
809                 for (i = 0; i < indio_dev->num_channels; i++) {
810                         if (channels[i].scan_index < 0)
811                                 continue;
812
813                         /* Establish necessary mask length */
814                         if (channels[i].scan_index >
815                             (int)indio_dev->masklength - 1)
816                                 indio_dev->masklength
817                                         = channels[i].scan_index + 1;
818
819                         ret = iio_buffer_add_channel_sysfs(indio_dev,
820                                                          &channels[i]);
821                         if (ret < 0)
822                                 goto error_cleanup_dynamic;
823                         attrcount += ret;
824                         if (channels[i].type == IIO_TIMESTAMP)
825                                 indio_dev->scan_index_timestamp =
826                                         channels[i].scan_index;
827                 }
828                 if (indio_dev->masklength && buffer->scan_mask == NULL) {
829                         buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
830                                                     sizeof(*buffer->scan_mask),
831                                                     GFP_KERNEL);
832                         if (buffer->scan_mask == NULL) {
833                                 ret = -ENOMEM;
834                                 goto error_cleanup_dynamic;
835                         }
836                 }
837         }
838
839         buffer->scan_el_group.name = iio_scan_elements_group_name;
840
841         buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
842                                               sizeof(buffer->scan_el_group.attrs[0]),
843                                               GFP_KERNEL);
844         if (buffer->scan_el_group.attrs == NULL) {
845                 ret = -ENOMEM;
846                 goto error_free_scan_mask;
847         }
848         if (buffer->scan_el_attrs)
849                 memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
850                        sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
851         attrn = attrcount_orig;
852
853         list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
854                 buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
855         indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
856
857         return 0;
858
859 error_free_scan_mask:
860         kfree(buffer->scan_mask);
861 error_cleanup_dynamic:
862         iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
863         kfree(indio_dev->buffer->buffer_group.attrs);
864
865         return ret;
866 }
867
868 void iio_buffer_free_sysfs_and_mask(struct iio_dev *indio_dev)
869 {
870         if (!indio_dev->buffer)
871                 return;
872
873         kfree(indio_dev->buffer->scan_mask);
874         kfree(indio_dev->buffer->buffer_group.attrs);
875         kfree(indio_dev->buffer->scan_el_group.attrs);
876         iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list);
877 }
878
879 /**
880  * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
881  * @indio_dev: the iio device
882  * @mask: scan mask to be checked
883  *
884  * Return true if exactly one bit is set in the scan mask, false otherwise. It
885  * can be used for devices where only one channel can be active for sampling at
886  * a time.
887  */
888 bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
889         const unsigned long *mask)
890 {
891         return bitmap_weight(mask, indio_dev->masklength) == 1;
892 }
893 EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
894
895 int iio_scan_mask_query(struct iio_dev *indio_dev,
896                         struct iio_buffer *buffer, int bit)
897 {
898         if (bit > indio_dev->masklength)
899                 return -EINVAL;
900
901         if (!buffer->scan_mask)
902                 return 0;
903
904         /* Ensure return value is 0 or 1. */
905         return !!test_bit(bit, buffer->scan_mask);
906 };
907 EXPORT_SYMBOL_GPL(iio_scan_mask_query);
908
909 /**
910  * struct iio_demux_table() - table describing demux memcpy ops
911  * @from:       index to copy from
912  * @to:         index to copy to
913  * @length:     how many bytes to copy
914  * @l:          list head used for management
915  */
916 struct iio_demux_table {
917         unsigned from;
918         unsigned to;
919         unsigned length;
920         struct list_head l;
921 };
922
923 static const void *iio_demux(struct iio_buffer *buffer,
924                                  const void *datain)
925 {
926         struct iio_demux_table *t;
927
928         if (list_empty(&buffer->demux_list))
929                 return datain;
930         list_for_each_entry(t, &buffer->demux_list, l)
931                 memcpy(buffer->demux_bounce + t->to,
932                        datain + t->from, t->length);
933
934         return buffer->demux_bounce;
935 }
936
937 static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
938 {
939         const void *dataout = iio_demux(buffer, data);
940
941         return buffer->access->store_to(buffer, dataout);
942 }
943
944 static void iio_buffer_demux_free(struct iio_buffer *buffer)
945 {
946         struct iio_demux_table *p, *q;
947         list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
948                 list_del(&p->l);
949                 kfree(p);
950         }
951 }
952
953
954 int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
955 {
956         int ret;
957         struct iio_buffer *buf;
958
959         list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
960                 ret = iio_push_to_buffer(buf, data);
961                 if (ret < 0)
962                         return ret;
963         }
964
965         return 0;
966 }
967 EXPORT_SYMBOL_GPL(iio_push_to_buffers);
968
969 static int iio_buffer_add_demux(struct iio_buffer *buffer,
970         struct iio_demux_table **p, unsigned int in_loc, unsigned int out_loc,
971         unsigned int length)
972 {
973
974         if (*p && (*p)->from + (*p)->length == in_loc &&
975                 (*p)->to + (*p)->length == out_loc) {
976                 (*p)->length += length;
977         } else {
978                 *p = kmalloc(sizeof(**p), GFP_KERNEL);
979                 if (*p == NULL)
980                         return -ENOMEM;
981                 (*p)->from = in_loc;
982                 (*p)->to = out_loc;
983                 (*p)->length = length;
984                 list_add_tail(&(*p)->l, &buffer->demux_list);
985         }
986
987         return 0;
988 }
989
990 static int iio_buffer_update_demux(struct iio_dev *indio_dev,
991                                    struct iio_buffer *buffer)
992 {
993         const struct iio_chan_spec *ch;
994         int ret, in_ind = -1, out_ind, length;
995         unsigned in_loc = 0, out_loc = 0;
996         struct iio_demux_table *p = NULL;
997
998         /* Clear out any old demux */
999         iio_buffer_demux_free(buffer);
1000         kfree(buffer->demux_bounce);
1001         buffer->demux_bounce = NULL;
1002
1003         /* First work out which scan mode we will actually have */
1004         if (bitmap_equal(indio_dev->active_scan_mask,
1005                          buffer->scan_mask,
1006                          indio_dev->masklength))
1007                 return 0;
1008
1009         /* Now we have the two masks, work from least sig and build up sizes */
1010         for_each_set_bit(out_ind,
1011                          buffer->scan_mask,
1012                          indio_dev->masklength) {
1013                 in_ind = find_next_bit(indio_dev->active_scan_mask,
1014                                        indio_dev->masklength,
1015                                        in_ind + 1);
1016                 while (in_ind != out_ind) {
1017                         in_ind = find_next_bit(indio_dev->active_scan_mask,
1018                                                indio_dev->masklength,
1019                                                in_ind + 1);
1020                         ch = iio_find_channel_from_si(indio_dev, in_ind);
1021                         if (ch->scan_type.repeat > 1)
1022                                 length = ch->scan_type.storagebits / 8 *
1023                                         ch->scan_type.repeat;
1024                         else
1025                                 length = ch->scan_type.storagebits / 8;
1026                         /* Make sure we are aligned */
1027                         in_loc = roundup(in_loc, length) + length;
1028                 }
1029                 ch = iio_find_channel_from_si(indio_dev, in_ind);
1030                 if (ch->scan_type.repeat > 1)
1031                         length = ch->scan_type.storagebits / 8 *
1032                                 ch->scan_type.repeat;
1033                 else
1034                         length = ch->scan_type.storagebits / 8;
1035                 out_loc = roundup(out_loc, length);
1036                 in_loc = roundup(in_loc, length);
1037                 ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
1038                 if (ret)
1039                         goto error_clear_mux_table;
1040                 out_loc += length;
1041                 in_loc += length;
1042         }
1043         /* Relies on scan_timestamp being last */
1044         if (buffer->scan_timestamp) {
1045                 ch = iio_find_channel_from_si(indio_dev,
1046                         indio_dev->scan_index_timestamp);
1047                 if (ch->scan_type.repeat > 1)
1048                         length = ch->scan_type.storagebits / 8 *
1049                                 ch->scan_type.repeat;
1050                 else
1051                         length = ch->scan_type.storagebits / 8;
1052                 out_loc = roundup(out_loc, length);
1053                 in_loc = roundup(in_loc, length);
1054                 ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
1055                 if (ret)
1056                         goto error_clear_mux_table;
1057                 out_loc += length;
1058                 in_loc += length;
1059         }
1060         buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
1061         if (buffer->demux_bounce == NULL) {
1062                 ret = -ENOMEM;
1063                 goto error_clear_mux_table;
1064         }
1065         return 0;
1066
1067 error_clear_mux_table:
1068         iio_buffer_demux_free(buffer);
1069
1070         return ret;
1071 }
1072
1073 int iio_update_demux(struct iio_dev *indio_dev)
1074 {
1075         struct iio_buffer *buffer;
1076         int ret;
1077
1078         list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
1079                 ret = iio_buffer_update_demux(indio_dev, buffer);
1080                 if (ret < 0)
1081                         goto error_clear_mux_table;
1082         }
1083         return 0;
1084
1085 error_clear_mux_table:
1086         list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
1087                 iio_buffer_demux_free(buffer);
1088
1089         return ret;
1090 }
1091 EXPORT_SYMBOL_GPL(iio_update_demux);
1092
1093 /**
1094  * iio_buffer_release() - Free a buffer's resources
1095  * @ref: Pointer to the kref embedded in the iio_buffer struct
1096  *
1097  * This function is called when the last reference to the buffer has been
1098  * dropped. It will typically free all resources allocated by the buffer. Do not
1099  * call this function manually, always use iio_buffer_put() when done using a
1100  * buffer.
1101  */
1102 static void iio_buffer_release(struct kref *ref)
1103 {
1104         struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
1105
1106         buffer->access->release(buffer);
1107 }
1108
1109 /**
1110  * iio_buffer_get() - Grab a reference to the buffer
1111  * @buffer: The buffer to grab a reference for, may be NULL
1112  *
1113  * Returns the pointer to the buffer that was passed into the function.
1114  */
1115 struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
1116 {
1117         if (buffer)
1118                 kref_get(&buffer->ref);
1119
1120         return buffer;
1121 }
1122 EXPORT_SYMBOL_GPL(iio_buffer_get);
1123
1124 /**
1125  * iio_buffer_put() - Release the reference to the buffer
1126  * @buffer: The buffer to release the reference for, may be NULL
1127  */
1128 void iio_buffer_put(struct iio_buffer *buffer)
1129 {
1130         if (buffer)
1131                 kref_put(&buffer->ref, iio_buffer_release);
1132 }
1133 EXPORT_SYMBOL_GPL(iio_buffer_put);