2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com)
3 * Licensed under the GPL
6 #include <linux/slab.h>
8 #include <linux/tty_flip.h>
12 #ifdef CONFIG_NOCONFIG_CHAN
13 static void *not_configged_init(char *str, int device,
14 const struct chan_opts *opts)
16 printk(KERN_ERR "Using a channel type which is configured out of "
21 static int not_configged_open(int input, int output, int primary, void *data,
24 printk(KERN_ERR "Using a channel type which is configured out of "
29 static void not_configged_close(int fd, void *data)
31 printk(KERN_ERR "Using a channel type which is configured out of "
35 static int not_configged_read(int fd, char *c_out, void *data)
37 printk(KERN_ERR "Using a channel type which is configured out of "
42 static int not_configged_write(int fd, const char *buf, int len, void *data)
44 printk(KERN_ERR "Using a channel type which is configured out of "
49 static int not_configged_console_write(int fd, const char *buf, int len)
51 printk(KERN_ERR "Using a channel type which is configured out of "
56 static int not_configged_window_size(int fd, void *data, unsigned short *rows,
59 printk(KERN_ERR "Using a channel type which is configured out of "
64 static void not_configged_free(void *data)
66 printk(KERN_ERR "Using a channel type which is configured out of "
70 static const struct chan_ops not_configged_ops = {
71 .init = not_configged_init,
72 .open = not_configged_open,
73 .close = not_configged_close,
74 .read = not_configged_read,
75 .write = not_configged_write,
76 .console_write = not_configged_console_write,
77 .window_size = not_configged_window_size,
78 .free = not_configged_free,
81 #endif /* CONFIG_NOCONFIG_CHAN */
83 static void tty_receive_char(struct tty_struct *tty, char ch)
88 if (I_IXON(tty) && !I_IXOFF(tty) && !tty->raw) {
89 if (ch == STOP_CHAR(tty)) {
93 else if (ch == START_CHAR(tty)) {
99 tty_insert_flip_char(tty, ch, TTY_NORMAL);
102 static int open_one_chan(struct chan *chan)
109 if (chan->ops->open == NULL)
111 else fd = (*chan->ops->open)(chan->input, chan->output, chan->primary,
112 chan->data, &chan->dev);
116 err = os_set_fd_block(fd, 0);
118 (*chan->ops->close)(fd, chan->data);
128 static int open_chan(struct list_head *chans)
130 struct list_head *ele;
134 list_for_each(ele, chans) {
135 chan = list_entry(ele, struct chan, list);
136 ret = open_one_chan(chan);
143 void chan_enable_winch(struct chan *chan, struct tty_struct *tty)
145 if (chan && chan->primary && chan->ops->winch)
146 register_winch(chan->fd, tty);
149 static void line_timer_cb(struct work_struct *work)
151 struct line *line = container_of(work, struct line, task.work);
153 if (!line->throttled)
154 chan_interrupt(line, line->tty, line->driver->read_irq);
157 int enable_chan(struct line *line)
159 struct list_head *ele;
163 INIT_DELAYED_WORK(&line->task, line_timer_cb);
165 list_for_each(ele, &line->chan_list) {
166 chan = list_entry(ele, struct chan, list);
167 err = open_one_chan(chan);
177 err = line_setup_irq(chan->fd, chan->input, chan->output, line,
192 /* Items are added in IRQ context, when free_irq can't be called, and
193 * removed in process context, when it can.
194 * This handles interrupt sources which disappear, and which need to
195 * be permanently disabled. This is discovered in IRQ context, but
196 * the freeing of the IRQ must be done later.
198 static DEFINE_SPINLOCK(irqs_to_free_lock);
199 static LIST_HEAD(irqs_to_free);
205 struct list_head *ele;
208 spin_lock_irqsave(&irqs_to_free_lock, flags);
209 list_splice_init(&irqs_to_free, &list);
210 spin_unlock_irqrestore(&irqs_to_free_lock, flags);
212 list_for_each(ele, &list) {
213 chan = list_entry(ele, struct chan, free_list);
215 if (chan->input && chan->enabled)
216 free_irq(chan->line->driver->read_irq, chan);
217 if (chan->output && chan->enabled)
218 free_irq(chan->line->driver->write_irq, chan);
223 static void close_one_chan(struct chan *chan, int delay_free_irq)
230 if (delay_free_irq) {
231 spin_lock_irqsave(&irqs_to_free_lock, flags);
232 list_add(&chan->free_list, &irqs_to_free);
233 spin_unlock_irqrestore(&irqs_to_free_lock, flags);
236 if (chan->input && chan->enabled)
237 free_irq(chan->line->driver->read_irq, chan);
238 if (chan->output && chan->enabled)
239 free_irq(chan->line->driver->write_irq, chan);
242 if (chan->ops->close != NULL)
243 (*chan->ops->close)(chan->fd, chan->data);
249 void close_chan(struct line *line)
253 /* Close in reverse order as open in case more than one of them
254 * refers to the same device and they save and restore that device's
255 * state. Then, the first one opened will have the original state,
256 * so it must be the last closed.
258 list_for_each_entry_reverse(chan, &line->chan_list, list) {
259 close_one_chan(chan, 0);
263 void deactivate_chan(struct chan *chan, int irq)
265 if (chan && chan->enabled)
266 deactivate_fd(chan->fd, irq);
269 void reactivate_chan(struct chan *chan, int irq)
271 if (chan && chan->enabled)
272 reactivate_fd(chan->fd, irq);
275 int write_chan(struct chan *chan, const char *buf, int len,
280 if (len == 0 || !chan || !chan->ops->write)
283 n = chan->ops->write(chan->fd, buf, len, chan->data);
286 if ((ret == -EAGAIN) || ((ret >= 0) && (ret < len)))
287 reactivate_fd(chan->fd, write_irq);
292 int console_write_chan(struct chan *chan, const char *buf, int len)
296 if (!chan || !chan->ops->console_write)
299 n = chan->ops->console_write(chan->fd, buf, len);
305 int console_open_chan(struct line *line, struct console *co)
309 err = open_chan(&line->chan_list);
313 printk(KERN_INFO "Console initialized on /dev/%s%d\n", co->name,
318 int chan_window_size(struct line *line, unsigned short *rows_out,
319 unsigned short *cols_out)
323 chan = line->chan_in;
324 if (chan && chan->primary) {
325 if (chan->ops->window_size == NULL)
327 return chan->ops->window_size(chan->fd, chan->data,
330 chan = line->chan_out;
331 if (chan && chan->primary) {
332 if (chan->ops->window_size == NULL)
334 return chan->ops->window_size(chan->fd, chan->data,
340 static void free_one_chan(struct chan *chan)
342 list_del(&chan->list);
344 close_one_chan(chan, 0);
346 if (chan->ops->free != NULL)
347 (*chan->ops->free)(chan->data);
349 if (chan->primary && chan->output)
350 ignore_sigio_fd(chan->fd);
354 static void free_chan(struct list_head *chans)
356 struct list_head *ele, *next;
359 list_for_each_safe(ele, next, chans) {
360 chan = list_entry(ele, struct chan, list);
365 static int one_chan_config_string(struct chan *chan, char *str, int size,
371 CONFIG_CHUNK(str, size, n, "none", 1);
375 CONFIG_CHUNK(str, size, n, chan->ops->type, 0);
377 if (chan->dev == NULL) {
378 CONFIG_CHUNK(str, size, n, "", 1);
382 CONFIG_CHUNK(str, size, n, ":", 0);
383 CONFIG_CHUNK(str, size, n, chan->dev, 0);
388 static int chan_pair_config_string(struct chan *in, struct chan *out,
389 char *str, int size, char **error_out)
393 n = one_chan_config_string(in, str, size, error_out);
398 CONFIG_CHUNK(str, size, n, "", 1);
402 CONFIG_CHUNK(str, size, n, ",", 1);
403 n = one_chan_config_string(out, str, size, error_out);
406 CONFIG_CHUNK(str, size, n, "", 1);
411 int chan_config_string(struct line *line, char *str, int size,
414 struct chan *in = line->chan_in, *out = line->chan_out;
416 if (in && !in->primary)
418 if (out && !out->primary)
421 return chan_pair_config_string(in, out, str, size, error_out);
426 const struct chan_ops *ops;
429 static const struct chan_type chan_table[] = {
432 #ifdef CONFIG_NULL_CHAN
433 { "null", &null_ops },
435 { "null", ¬_configged_ops },
438 #ifdef CONFIG_PORT_CHAN
439 { "port", &port_ops },
441 { "port", ¬_configged_ops },
444 #ifdef CONFIG_PTY_CHAN
448 { "pty", ¬_configged_ops },
449 { "pts", ¬_configged_ops },
452 #ifdef CONFIG_TTY_CHAN
455 { "tty", ¬_configged_ops },
458 #ifdef CONFIG_XTERM_CHAN
459 { "xterm", &xterm_ops },
461 { "xterm", ¬_configged_ops },
465 static struct chan *parse_chan(struct line *line, char *str, int device,
466 const struct chan_opts *opts, char **error_out)
468 const struct chan_type *entry;
469 const struct chan_ops *ops;
476 for(i = 0; i < ARRAY_SIZE(chan_table); i++) {
477 entry = &chan_table[i];
478 if (!strncmp(str, entry->key, strlen(entry->key))) {
480 str += strlen(entry->key);
485 *error_out = "No match for configured backends";
489 data = (*ops->init)(str, device, opts);
491 *error_out = "Configuration failed";
495 chan = kmalloc(sizeof(*chan), GFP_ATOMIC);
497 *error_out = "Memory allocation failed";
500 *chan = ((struct chan) { .list = LIST_HEAD_INIT(chan->list),
502 LIST_HEAD_INIT(chan->free_list),
515 int parse_chan_pair(char *str, struct line *line, int device,
516 const struct chan_opts *opts, char **error_out)
518 struct list_head *chans = &line->chan_list;
522 if (!list_empty(chans)) {
523 line->chan_in = line->chan_out = NULL;
525 INIT_LIST_HEAD(chans);
531 out = strchr(str, ',');
536 new = parse_chan(line, in, device, opts, error_out);
541 list_add(&new->list, chans);
544 new = parse_chan(line, out, device, opts, error_out);
548 list_add(&new->list, chans);
550 line->chan_out = new;
553 new = parse_chan(line, str, device, opts, error_out);
557 list_add(&new->list, chans);
560 line->chan_in = line->chan_out = new;
565 void chan_interrupt(struct line *line, struct tty_struct *tty, int irq)
567 struct chan *chan = line->chan_in;
571 if (!chan || !chan->ops->read)
575 if (tty && !tty_buffer_request_room(tty, 1)) {
576 schedule_delayed_work(&line->task, 1);
579 err = chan->ops->read(chan->fd, &c, chan->data);
581 tty_receive_char(tty, c);
585 reactivate_fd(chan->fd, irq);
590 if (line->chan_out != chan)
591 close_one_chan(line->chan_out, 1);
593 close_one_chan(chan, 1);
599 tty_flip_buffer_push(tty);