1 /* Industrial I/O event handling
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * Based on elements of hwmon and input subsystems.
12 #include <linux/anon_inodes.h>
13 #include <linux/device.h>
15 #include <linux/kernel.h>
16 #include <linux/kfifo.h>
17 #include <linux/module.h>
18 #include <linux/poll.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/uaccess.h>
22 #include <linux/wait.h>
23 #include <linux/iio/iio.h>
25 #include <linux/iio/sysfs.h>
26 #include <linux/iio/events.h>
29 * struct iio_event_interface - chrdev interface for an event line
30 * @wait: wait queue to allow blocking reads of events
31 * @det_events: list of detected events
32 * @dev_attr_list: list of event interface sysfs attribute
33 * @flags: file operations related flags including busy flag.
34 * @group: event interface sysfs attribute group
36 struct iio_event_interface {
37 wait_queue_head_t wait;
38 struct mutex read_lock;
39 DECLARE_KFIFO(det_events, struct iio_event_data, 16);
41 struct list_head dev_attr_list;
43 struct attribute_group group;
46 int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
48 struct iio_event_interface *ev_int = indio_dev->event_interface;
49 struct iio_event_data ev;
53 /* Does anyone care? */
54 spin_lock_irqsave(&ev_int->wait.lock, flags);
55 if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
58 ev.timestamp = timestamp;
60 copied = kfifo_put(&ev_int->det_events, &ev);
62 wake_up_locked_poll(&ev_int->wait, POLLIN);
64 spin_unlock_irqrestore(&ev_int->wait.lock, flags);
68 EXPORT_SYMBOL(iio_push_event);
71 * iio_event_poll() - poll the event queue to find out if it has data
73 static unsigned int iio_event_poll(struct file *filep,
74 struct poll_table_struct *wait)
76 struct iio_event_interface *ev_int = filep->private_data;
77 unsigned int events = 0;
79 poll_wait(filep, &ev_int->wait, wait);
81 spin_lock_irq(&ev_int->wait.lock);
82 if (!kfifo_is_empty(&ev_int->det_events))
83 events = POLLIN | POLLRDNORM;
84 spin_unlock_irq(&ev_int->wait.lock);
89 static ssize_t iio_event_chrdev_read(struct file *filep,
94 struct iio_event_interface *ev_int = filep->private_data;
98 if (count < sizeof(struct iio_event_data))
101 if (mutex_lock_interruptible(&ev_int->read_lock))
104 if (kfifo_is_empty(&ev_int->det_events)) {
105 if (filep->f_flags & O_NONBLOCK) {
109 /* Blocking on device; waiting for something to be there */
110 ret = wait_event_interruptible(ev_int->wait,
111 !kfifo_is_empty(&ev_int->det_events));
114 /* Single access device so no one else can get the data */
117 ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
120 mutex_unlock(&ev_int->read_lock);
122 return ret ? ret : copied;
125 static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
127 struct iio_event_interface *ev_int = filep->private_data;
129 spin_lock_irq(&ev_int->wait.lock);
130 __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
132 * In order to maintain a clean state for reopening,
133 * clear out any awaiting events. The mask will prevent
134 * any new __iio_push_event calls running.
136 kfifo_reset_out(&ev_int->det_events);
137 spin_unlock_irq(&ev_int->wait.lock);
142 static const struct file_operations iio_event_chrdev_fileops = {
143 .read = iio_event_chrdev_read,
144 .poll = iio_event_poll,
145 .release = iio_event_chrdev_release,
146 .owner = THIS_MODULE,
147 .llseek = noop_llseek,
150 int iio_event_getfd(struct iio_dev *indio_dev)
152 struct iio_event_interface *ev_int = indio_dev->event_interface;
158 spin_lock_irq(&ev_int->wait.lock);
159 if (__test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
160 spin_unlock_irq(&ev_int->wait.lock);
163 spin_unlock_irq(&ev_int->wait.lock);
164 fd = anon_inode_getfd("iio:event",
165 &iio_event_chrdev_fileops, ev_int, O_RDONLY);
167 spin_lock_irq(&ev_int->wait.lock);
168 __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
169 spin_unlock_irq(&ev_int->wait.lock);
174 static const char * const iio_ev_type_text[] = {
175 [IIO_EV_TYPE_THRESH] = "thresh",
176 [IIO_EV_TYPE_MAG] = "mag",
177 [IIO_EV_TYPE_ROC] = "roc",
178 [IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive",
179 [IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
182 static const char * const iio_ev_dir_text[] = {
183 [IIO_EV_DIR_EITHER] = "either",
184 [IIO_EV_DIR_RISING] = "rising",
185 [IIO_EV_DIR_FALLING] = "falling"
188 static ssize_t iio_ev_state_store(struct device *dev,
189 struct device_attribute *attr,
193 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
194 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
198 ret = strtobool(buf, &val);
202 ret = indio_dev->info->write_event_config(indio_dev,
205 return (ret < 0) ? ret : len;
208 static ssize_t iio_ev_state_show(struct device *dev,
209 struct device_attribute *attr,
212 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
213 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
214 int val = indio_dev->info->read_event_config(indio_dev,
220 return sprintf(buf, "%d\n", val);
223 static ssize_t iio_ev_value_show(struct device *dev,
224 struct device_attribute *attr,
227 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
228 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
231 ret = indio_dev->info->read_event_value(indio_dev,
232 this_attr->address, &val);
236 return sprintf(buf, "%d\n", val);
239 static ssize_t iio_ev_value_store(struct device *dev,
240 struct device_attribute *attr,
244 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
245 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
249 if (!indio_dev->info->write_event_value)
252 ret = kstrtoint(buf, 10, &val);
256 ret = indio_dev->info->write_event_value(indio_dev, this_attr->address,
264 static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
265 struct iio_chan_spec const *chan)
267 int ret = 0, i, attrcount = 0;
270 if (!chan->event_mask)
273 for_each_set_bit(i, &chan->event_mask, sizeof(chan->event_mask)*8) {
274 postfix = kasprintf(GFP_KERNEL, "%s_%s_en",
275 iio_ev_type_text[i/IIO_EV_DIR_MAX],
276 iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
277 if (postfix == NULL) {
282 mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel,
285 else if (chan->differential)
286 mask = IIO_EVENT_CODE(chan->type,
294 mask = IIO_UNMOD_EVENT_CODE(chan->type,
299 ret = __iio_add_chan_devattr(postfix,
306 &indio_dev->event_interface->
312 postfix = kasprintf(GFP_KERNEL, "%s_%s_value",
313 iio_ev_type_text[i/IIO_EV_DIR_MAX],
314 iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
315 if (postfix == NULL) {
319 ret = __iio_add_chan_devattr(postfix, chan,
325 &indio_dev->event_interface->
337 static inline void __iio_remove_event_config_attrs(struct iio_dev *indio_dev)
339 struct iio_dev_attr *p, *n;
340 list_for_each_entry_safe(p, n,
341 &indio_dev->event_interface->
343 kfree(p->dev_attr.attr.name);
348 static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev)
350 int j, ret, attrcount = 0;
352 /* Dynically created from the channels array */
353 for (j = 0; j < indio_dev->num_channels; j++) {
354 ret = iio_device_add_event_sysfs(indio_dev,
355 &indio_dev->channels[j]);
363 static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev)
367 for (j = 0; j < indio_dev->num_channels; j++)
368 if (indio_dev->channels[j].event_mask != 0)
373 static void iio_setup_ev_int(struct iio_event_interface *ev_int)
375 INIT_KFIFO(ev_int->det_events);
376 init_waitqueue_head(&ev_int->wait);
377 mutex_init(&ev_int->read_lock);
380 static const char *iio_event_group_name = "events";
381 int iio_device_register_eventset(struct iio_dev *indio_dev)
383 struct iio_dev_attr *p;
384 int ret = 0, attrcount_orig = 0, attrcount, attrn;
385 struct attribute **attr;
387 if (!(indio_dev->info->event_attrs ||
388 iio_check_for_dynamic_events(indio_dev)))
391 indio_dev->event_interface =
392 kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL);
393 if (indio_dev->event_interface == NULL) {
398 INIT_LIST_HEAD(&indio_dev->event_interface->dev_attr_list);
400 iio_setup_ev_int(indio_dev->event_interface);
401 if (indio_dev->info->event_attrs != NULL) {
402 attr = indio_dev->info->event_attrs->attrs;
403 while (*attr++ != NULL)
406 attrcount = attrcount_orig;
407 if (indio_dev->channels) {
408 ret = __iio_add_event_config_attrs(indio_dev);
410 goto error_free_setup_event_lines;
414 indio_dev->event_interface->group.name = iio_event_group_name;
415 indio_dev->event_interface->group.attrs = kcalloc(attrcount + 1,
416 sizeof(indio_dev->event_interface->group.attrs[0]),
418 if (indio_dev->event_interface->group.attrs == NULL) {
420 goto error_free_setup_event_lines;
422 if (indio_dev->info->event_attrs)
423 memcpy(indio_dev->event_interface->group.attrs,
424 indio_dev->info->event_attrs->attrs,
425 sizeof(indio_dev->event_interface->group.attrs[0])
427 attrn = attrcount_orig;
428 /* Add all elements from the list. */
429 list_for_each_entry(p,
430 &indio_dev->event_interface->dev_attr_list,
432 indio_dev->event_interface->group.attrs[attrn++] =
434 indio_dev->groups[indio_dev->groupcounter++] =
435 &indio_dev->event_interface->group;
439 error_free_setup_event_lines:
440 __iio_remove_event_config_attrs(indio_dev);
441 mutex_destroy(&indio_dev->event_interface->read_lock);
442 kfree(indio_dev->event_interface);
448 void iio_device_unregister_eventset(struct iio_dev *indio_dev)
450 if (indio_dev->event_interface == NULL)
452 __iio_remove_event_config_attrs(indio_dev);
453 kfree(indio_dev->event_interface->group.attrs);
454 mutex_destroy(&indio_dev->event_interface->read_lock);
455 kfree(indio_dev->event_interface);