615e9507de1516c4a3d34d0e4a8ca2180573df7b
[firefly-linux-kernel-4.4.55.git] / drivers / gpio / gpio-omap.c
1 /*
2  * Support functions for OMAP GPIO
3  *
4  * Copyright (C) 2003-2005 Nokia Corporation
5  * Written by Juha Yrjölä <juha.yrjola@nokia.com>
6  *
7  * Copyright (C) 2009 Texas Instruments
8  * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/interrupt.h>
18 #include <linux/syscore_ops.h>
19 #include <linux/err.h>
20 #include <linux/clk.h>
21 #include <linux/io.h>
22 #include <linux/device.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/pm.h>
25 #include <linux/of.h>
26 #include <linux/of_device.h>
27 #include <linux/gpio.h>
28 #include <linux/bitops.h>
29 #include <linux/platform_data/gpio-omap.h>
30
31 #define OFF_MODE        1
32
33 static LIST_HEAD(omap_gpio_list);
34
35 struct gpio_regs {
36         u32 irqenable1;
37         u32 irqenable2;
38         u32 wake_en;
39         u32 ctrl;
40         u32 oe;
41         u32 leveldetect0;
42         u32 leveldetect1;
43         u32 risingdetect;
44         u32 fallingdetect;
45         u32 dataout;
46         u32 debounce;
47         u32 debounce_en;
48 };
49
50 struct gpio_bank {
51         struct list_head node;
52         void __iomem *base;
53         u16 irq;
54         u32 non_wakeup_gpios;
55         u32 enabled_non_wakeup_gpios;
56         struct gpio_regs context;
57         u32 saved_datain;
58         u32 level_mask;
59         u32 toggle_mask;
60         spinlock_t lock;
61         struct gpio_chip chip;
62         struct clk *dbck;
63         u32 mod_usage;
64         u32 irq_usage;
65         u32 dbck_enable_mask;
66         bool dbck_enabled;
67         struct device *dev;
68         bool is_mpuio;
69         bool dbck_flag;
70         bool loses_context;
71         bool context_valid;
72         int stride;
73         u32 width;
74         int context_loss_count;
75         int power_mode;
76         bool workaround_enabled;
77
78         void (*set_dataout)(struct gpio_bank *bank, unsigned gpio, int enable);
79         int (*get_context_loss_count)(struct device *dev);
80
81         struct omap_gpio_reg_offs *regs;
82 };
83
84 #define GPIO_INDEX(bank, gpio) (gpio % bank->width)
85 #define GPIO_MOD_CTRL_BIT       BIT(0)
86
87 #define BANK_USED(bank) (bank->mod_usage || bank->irq_usage)
88 #define LINE_USED(line, offset) (line & (BIT(offset)))
89
90 static void omap_gpio_unmask_irq(struct irq_data *d);
91
92 static inline struct gpio_bank *omap_irq_data_get_bank(struct irq_data *d)
93 {
94         struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
95         return container_of(chip, struct gpio_bank, chip);
96 }
97
98 static void omap_set_gpio_direction(struct gpio_bank *bank, int gpio,
99                                     int is_input)
100 {
101         void __iomem *reg = bank->base;
102         u32 l;
103
104         reg += bank->regs->direction;
105         l = readl_relaxed(reg);
106         if (is_input)
107                 l |= BIT(gpio);
108         else
109                 l &= ~(BIT(gpio));
110         writel_relaxed(l, reg);
111         bank->context.oe = l;
112 }
113
114
115 /* set data out value using dedicate set/clear register */
116 static void omap_set_gpio_dataout_reg(struct gpio_bank *bank, unsigned offset,
117                                       int enable)
118 {
119         void __iomem *reg = bank->base;
120         u32 l = BIT(offset);
121
122         if (enable) {
123                 reg += bank->regs->set_dataout;
124                 bank->context.dataout |= l;
125         } else {
126                 reg += bank->regs->clr_dataout;
127                 bank->context.dataout &= ~l;
128         }
129
130         writel_relaxed(l, reg);
131 }
132
133 /* set data out value using mask register */
134 static void omap_set_gpio_dataout_mask(struct gpio_bank *bank, unsigned offset,
135                                        int enable)
136 {
137         void __iomem *reg = bank->base + bank->regs->dataout;
138         u32 gpio_bit = BIT(offset);
139         u32 l;
140
141         l = readl_relaxed(reg);
142         if (enable)
143                 l |= gpio_bit;
144         else
145                 l &= ~gpio_bit;
146         writel_relaxed(l, reg);
147         bank->context.dataout = l;
148 }
149
150 static int omap_get_gpio_datain(struct gpio_bank *bank, int offset)
151 {
152         void __iomem *reg = bank->base + bank->regs->datain;
153
154         return (readl_relaxed(reg) & (BIT(offset))) != 0;
155 }
156
157 static int omap_get_gpio_dataout(struct gpio_bank *bank, int offset)
158 {
159         void __iomem *reg = bank->base + bank->regs->dataout;
160
161         return (readl_relaxed(reg) & (BIT(offset))) != 0;
162 }
163
164 static inline void omap_gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set)
165 {
166         int l = readl_relaxed(base + reg);
167
168         if (set)
169                 l |= mask;
170         else
171                 l &= ~mask;
172
173         writel_relaxed(l, base + reg);
174 }
175
176 static inline void omap_gpio_dbck_enable(struct gpio_bank *bank)
177 {
178         if (bank->dbck_enable_mask && !bank->dbck_enabled) {
179                 clk_prepare_enable(bank->dbck);
180                 bank->dbck_enabled = true;
181
182                 writel_relaxed(bank->dbck_enable_mask,
183                              bank->base + bank->regs->debounce_en);
184         }
185 }
186
187 static inline void omap_gpio_dbck_disable(struct gpio_bank *bank)
188 {
189         if (bank->dbck_enable_mask && bank->dbck_enabled) {
190                 /*
191                  * Disable debounce before cutting it's clock. If debounce is
192                  * enabled but the clock is not, GPIO module seems to be unable
193                  * to detect events and generate interrupts at least on OMAP3.
194                  */
195                 writel_relaxed(0, bank->base + bank->regs->debounce_en);
196
197                 clk_disable_unprepare(bank->dbck);
198                 bank->dbck_enabled = false;
199         }
200 }
201
202 /**
203  * omap2_set_gpio_debounce - low level gpio debounce time
204  * @bank: the gpio bank we're acting upon
205  * @offset: the gpio number on this @bank
206  * @debounce: debounce time to use
207  *
208  * OMAP's debounce time is in 31us steps so we need
209  * to convert and round up to the closest unit.
210  */
211 static void omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset,
212                                     unsigned debounce)
213 {
214         void __iomem            *reg;
215         u32                     val;
216         u32                     l;
217
218         if (!bank->dbck_flag)
219                 return;
220
221         if (debounce < 32)
222                 debounce = 0x01;
223         else if (debounce > 7936)
224                 debounce = 0xff;
225         else
226                 debounce = (debounce / 0x1f) - 1;
227
228         l = BIT(offset);
229
230         clk_prepare_enable(bank->dbck);
231         reg = bank->base + bank->regs->debounce;
232         writel_relaxed(debounce, reg);
233
234         reg = bank->base + bank->regs->debounce_en;
235         val = readl_relaxed(reg);
236
237         if (debounce)
238                 val |= l;
239         else
240                 val &= ~l;
241         bank->dbck_enable_mask = val;
242
243         writel_relaxed(val, reg);
244         clk_disable_unprepare(bank->dbck);
245         /*
246          * Enable debounce clock per module.
247          * This call is mandatory because in omap_gpio_request() when
248          * *_runtime_get_sync() is called,  _gpio_dbck_enable() within
249          * runtime callbck fails to turn on dbck because dbck_enable_mask
250          * used within _gpio_dbck_enable() is still not initialized at
251          * that point. Therefore we have to enable dbck here.
252          */
253         omap_gpio_dbck_enable(bank);
254         if (bank->dbck_enable_mask) {
255                 bank->context.debounce = debounce;
256                 bank->context.debounce_en = val;
257         }
258 }
259
260 /**
261  * omap_clear_gpio_debounce - clear debounce settings for a gpio
262  * @bank: the gpio bank we're acting upon
263  * @offset: the gpio number on this @bank
264  *
265  * If a gpio is using debounce, then clear the debounce enable bit and if
266  * this is the only gpio in this bank using debounce, then clear the debounce
267  * time too. The debounce clock will also be disabled when calling this function
268  * if this is the only gpio in the bank using debounce.
269  */
270 static void omap_clear_gpio_debounce(struct gpio_bank *bank, unsigned offset)
271 {
272         u32 gpio_bit = BIT(offset);
273
274         if (!bank->dbck_flag)
275                 return;
276
277         if (!(bank->dbck_enable_mask & gpio_bit))
278                 return;
279
280         bank->dbck_enable_mask &= ~gpio_bit;
281         bank->context.debounce_en &= ~gpio_bit;
282         writel_relaxed(bank->context.debounce_en,
283                      bank->base + bank->regs->debounce_en);
284
285         if (!bank->dbck_enable_mask) {
286                 bank->context.debounce = 0;
287                 writel_relaxed(bank->context.debounce, bank->base +
288                              bank->regs->debounce);
289                 clk_disable_unprepare(bank->dbck);
290                 bank->dbck_enabled = false;
291         }
292 }
293
294 static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
295                                                 unsigned trigger)
296 {
297         void __iomem *base = bank->base;
298         u32 gpio_bit = BIT(gpio);
299
300         omap_gpio_rmw(base, bank->regs->leveldetect0, gpio_bit,
301                       trigger & IRQ_TYPE_LEVEL_LOW);
302         omap_gpio_rmw(base, bank->regs->leveldetect1, gpio_bit,
303                       trigger & IRQ_TYPE_LEVEL_HIGH);
304         omap_gpio_rmw(base, bank->regs->risingdetect, gpio_bit,
305                       trigger & IRQ_TYPE_EDGE_RISING);
306         omap_gpio_rmw(base, bank->regs->fallingdetect, gpio_bit,
307                       trigger & IRQ_TYPE_EDGE_FALLING);
308
309         bank->context.leveldetect0 =
310                         readl_relaxed(bank->base + bank->regs->leveldetect0);
311         bank->context.leveldetect1 =
312                         readl_relaxed(bank->base + bank->regs->leveldetect1);
313         bank->context.risingdetect =
314                         readl_relaxed(bank->base + bank->regs->risingdetect);
315         bank->context.fallingdetect =
316                         readl_relaxed(bank->base + bank->regs->fallingdetect);
317
318         if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
319                 omap_gpio_rmw(base, bank->regs->wkup_en, gpio_bit, trigger != 0);
320                 bank->context.wake_en =
321                         readl_relaxed(bank->base + bank->regs->wkup_en);
322         }
323
324         /* This part needs to be executed always for OMAP{34xx, 44xx} */
325         if (!bank->regs->irqctrl) {
326                 /* On omap24xx proceed only when valid GPIO bit is set */
327                 if (bank->non_wakeup_gpios) {
328                         if (!(bank->non_wakeup_gpios & gpio_bit))
329                                 goto exit;
330                 }
331
332                 /*
333                  * Log the edge gpio and manually trigger the IRQ
334                  * after resume if the input level changes
335                  * to avoid irq lost during PER RET/OFF mode
336                  * Applies for omap2 non-wakeup gpio and all omap3 gpios
337                  */
338                 if (trigger & IRQ_TYPE_EDGE_BOTH)
339                         bank->enabled_non_wakeup_gpios |= gpio_bit;
340                 else
341                         bank->enabled_non_wakeup_gpios &= ~gpio_bit;
342         }
343
344 exit:
345         bank->level_mask =
346                 readl_relaxed(bank->base + bank->regs->leveldetect0) |
347                 readl_relaxed(bank->base + bank->regs->leveldetect1);
348 }
349
350 #ifdef CONFIG_ARCH_OMAP1
351 /*
352  * This only applies to chips that can't do both rising and falling edge
353  * detection at once.  For all other chips, this function is a noop.
354  */
355 static void omap_toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio)
356 {
357         void __iomem *reg = bank->base;
358         u32 l = 0;
359
360         if (!bank->regs->irqctrl)
361                 return;
362
363         reg += bank->regs->irqctrl;
364
365         l = readl_relaxed(reg);
366         if ((l >> gpio) & 1)
367                 l &= ~(BIT(gpio));
368         else
369                 l |= BIT(gpio);
370
371         writel_relaxed(l, reg);
372 }
373 #else
374 static void omap_toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) {}
375 #endif
376
377 static int omap_set_gpio_triggering(struct gpio_bank *bank, int gpio,
378                                     unsigned trigger)
379 {
380         void __iomem *reg = bank->base;
381         void __iomem *base = bank->base;
382         u32 l = 0;
383
384         if (bank->regs->leveldetect0 && bank->regs->wkup_en) {
385                 omap_set_gpio_trigger(bank, gpio, trigger);
386         } else if (bank->regs->irqctrl) {
387                 reg += bank->regs->irqctrl;
388
389                 l = readl_relaxed(reg);
390                 if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
391                         bank->toggle_mask |= BIT(gpio);
392                 if (trigger & IRQ_TYPE_EDGE_RISING)
393                         l |= BIT(gpio);
394                 else if (trigger & IRQ_TYPE_EDGE_FALLING)
395                         l &= ~(BIT(gpio));
396                 else
397                         return -EINVAL;
398
399                 writel_relaxed(l, reg);
400         } else if (bank->regs->edgectrl1) {
401                 if (gpio & 0x08)
402                         reg += bank->regs->edgectrl2;
403                 else
404                         reg += bank->regs->edgectrl1;
405
406                 gpio &= 0x07;
407                 l = readl_relaxed(reg);
408                 l &= ~(3 << (gpio << 1));
409                 if (trigger & IRQ_TYPE_EDGE_RISING)
410                         l |= 2 << (gpio << 1);
411                 if (trigger & IRQ_TYPE_EDGE_FALLING)
412                         l |= BIT(gpio << 1);
413
414                 /* Enable wake-up during idle for dynamic tick */
415                 omap_gpio_rmw(base, bank->regs->wkup_en, BIT(gpio), trigger);
416                 bank->context.wake_en =
417                         readl_relaxed(bank->base + bank->regs->wkup_en);
418                 writel_relaxed(l, reg);
419         }
420         return 0;
421 }
422
423 static void omap_enable_gpio_module(struct gpio_bank *bank, unsigned offset)
424 {
425         if (bank->regs->pinctrl) {
426                 void __iomem *reg = bank->base + bank->regs->pinctrl;
427
428                 /* Claim the pin for MPU */
429                 writel_relaxed(readl_relaxed(reg) | (BIT(offset)), reg);
430         }
431
432         if (bank->regs->ctrl && !BANK_USED(bank)) {
433                 void __iomem *reg = bank->base + bank->regs->ctrl;
434                 u32 ctrl;
435
436                 ctrl = readl_relaxed(reg);
437                 /* Module is enabled, clocks are not gated */
438                 ctrl &= ~GPIO_MOD_CTRL_BIT;
439                 writel_relaxed(ctrl, reg);
440                 bank->context.ctrl = ctrl;
441         }
442 }
443
444 static void omap_disable_gpio_module(struct gpio_bank *bank, unsigned offset)
445 {
446         void __iomem *base = bank->base;
447
448         if (bank->regs->wkup_en &&
449             !LINE_USED(bank->mod_usage, offset) &&
450             !LINE_USED(bank->irq_usage, offset)) {
451                 /* Disable wake-up during idle for dynamic tick */
452                 omap_gpio_rmw(base, bank->regs->wkup_en, BIT(offset), 0);
453                 bank->context.wake_en =
454                         readl_relaxed(bank->base + bank->regs->wkup_en);
455         }
456
457         if (bank->regs->ctrl && !BANK_USED(bank)) {
458                 void __iomem *reg = bank->base + bank->regs->ctrl;
459                 u32 ctrl;
460
461                 ctrl = readl_relaxed(reg);
462                 /* Module is disabled, clocks are gated */
463                 ctrl |= GPIO_MOD_CTRL_BIT;
464                 writel_relaxed(ctrl, reg);
465                 bank->context.ctrl = ctrl;
466         }
467 }
468
469 static int omap_gpio_is_input(struct gpio_bank *bank, unsigned offset)
470 {
471         void __iomem *reg = bank->base + bank->regs->direction;
472
473         return readl_relaxed(reg) & BIT(offset);
474 }
475
476 static void omap_gpio_init_irq(struct gpio_bank *bank, unsigned offset)
477 {
478         if (!LINE_USED(bank->mod_usage, offset)) {
479                 omap_enable_gpio_module(bank, offset);
480                 omap_set_gpio_direction(bank, offset, 1);
481         }
482         bank->irq_usage |= BIT(offset);
483 }
484
485 static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
486 {
487         struct gpio_bank *bank = omap_irq_data_get_bank(d);
488         int retval;
489         unsigned long flags;
490         unsigned offset = d->hwirq;
491
492         if (!BANK_USED(bank))
493                 pm_runtime_get_sync(bank->dev);
494
495 #ifdef CONFIG_ARCH_OMAP1
496         if (d->irq > IH_MPUIO_BASE) {
497                 unsigned gpio = 0;
498                 gpio = OMAP_MPUIO(d->irq - IH_MPUIO_BASE);
499                 offset = GPIO_INDEX(bank, gpio);
500         }
501 #endif
502
503         if (type & ~IRQ_TYPE_SENSE_MASK)
504                 return -EINVAL;
505
506         if (!bank->regs->leveldetect0 &&
507                 (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)))
508                 return -EINVAL;
509
510         spin_lock_irqsave(&bank->lock, flags);
511         retval = omap_set_gpio_triggering(bank, offset, type);
512         omap_gpio_init_irq(bank, offset);
513         if (!omap_gpio_is_input(bank, offset)) {
514                 spin_unlock_irqrestore(&bank->lock, flags);
515                 return -EINVAL;
516         }
517         spin_unlock_irqrestore(&bank->lock, flags);
518
519         if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
520                 __irq_set_handler_locked(d->irq, handle_level_irq);
521         else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
522                 __irq_set_handler_locked(d->irq, handle_edge_irq);
523
524         return retval;
525 }
526
527 static void omap_clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
528 {
529         void __iomem *reg = bank->base;
530
531         reg += bank->regs->irqstatus;
532         writel_relaxed(gpio_mask, reg);
533
534         /* Workaround for clearing DSP GPIO interrupts to allow retention */
535         if (bank->regs->irqstatus2) {
536                 reg = bank->base + bank->regs->irqstatus2;
537                 writel_relaxed(gpio_mask, reg);
538         }
539
540         /* Flush posted write for the irq status to avoid spurious interrupts */
541         readl_relaxed(reg);
542 }
543
544 static inline void omap_clear_gpio_irqstatus(struct gpio_bank *bank,
545                                              unsigned offset)
546 {
547         omap_clear_gpio_irqbank(bank, BIT(offset));
548 }
549
550 static u32 omap_get_gpio_irqbank_mask(struct gpio_bank *bank)
551 {
552         void __iomem *reg = bank->base;
553         u32 l;
554         u32 mask = (BIT(bank->width)) - 1;
555
556         reg += bank->regs->irqenable;
557         l = readl_relaxed(reg);
558         if (bank->regs->irqenable_inv)
559                 l = ~l;
560         l &= mask;
561         return l;
562 }
563
564 static void omap_enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
565 {
566         void __iomem *reg = bank->base;
567         u32 l;
568
569         if (bank->regs->set_irqenable) {
570                 reg += bank->regs->set_irqenable;
571                 l = gpio_mask;
572                 bank->context.irqenable1 |= gpio_mask;
573         } else {
574                 reg += bank->regs->irqenable;
575                 l = readl_relaxed(reg);
576                 if (bank->regs->irqenable_inv)
577                         l &= ~gpio_mask;
578                 else
579                         l |= gpio_mask;
580                 bank->context.irqenable1 = l;
581         }
582
583         writel_relaxed(l, reg);
584 }
585
586 static void omap_disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
587 {
588         void __iomem *reg = bank->base;
589         u32 l;
590
591         if (bank->regs->clr_irqenable) {
592                 reg += bank->regs->clr_irqenable;
593                 l = gpio_mask;
594                 bank->context.irqenable1 &= ~gpio_mask;
595         } else {
596                 reg += bank->regs->irqenable;
597                 l = readl_relaxed(reg);
598                 if (bank->regs->irqenable_inv)
599                         l |= gpio_mask;
600                 else
601                         l &= ~gpio_mask;
602                 bank->context.irqenable1 = l;
603         }
604
605         writel_relaxed(l, reg);
606 }
607
608 static inline void omap_set_gpio_irqenable(struct gpio_bank *bank,
609                                            unsigned offset, int enable)
610 {
611         if (enable)
612                 omap_enable_gpio_irqbank(bank, BIT(offset));
613         else
614                 omap_disable_gpio_irqbank(bank, BIT(offset));
615 }
616
617 /*
618  * Note that ENAWAKEUP needs to be enabled in GPIO_SYSCONFIG register.
619  * 1510 does not seem to have a wake-up register. If JTAG is connected
620  * to the target, system will wake up always on GPIO events. While
621  * system is running all registered GPIO interrupts need to have wake-up
622  * enabled. When system is suspended, only selected GPIO interrupts need
623  * to have wake-up enabled.
624  */
625 static int omap_set_gpio_wakeup(struct gpio_bank *bank, unsigned offset,
626                                 int enable)
627 {
628         u32 gpio_bit = BIT(offset);
629         unsigned long flags;
630
631         if (bank->non_wakeup_gpios & gpio_bit) {
632                 dev_err(bank->dev,
633                         "Unable to modify wakeup on non-wakeup GPIO%d\n",
634                         offset);
635                 return -EINVAL;
636         }
637
638         spin_lock_irqsave(&bank->lock, flags);
639         if (enable)
640                 bank->context.wake_en |= gpio_bit;
641         else
642                 bank->context.wake_en &= ~gpio_bit;
643
644         writel_relaxed(bank->context.wake_en, bank->base + bank->regs->wkup_en);
645         spin_unlock_irqrestore(&bank->lock, flags);
646
647         return 0;
648 }
649
650 static void omap_reset_gpio(struct gpio_bank *bank, unsigned offset)
651 {
652         omap_set_gpio_direction(bank, offset, 1);
653         omap_set_gpio_irqenable(bank, offset, 0);
654         omap_clear_gpio_irqstatus(bank, offset);
655         omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
656         omap_clear_gpio_debounce(bank, offset);
657 }
658
659 /* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
660 static int omap_gpio_wake_enable(struct irq_data *d, unsigned int enable)
661 {
662         struct gpio_bank *bank = omap_irq_data_get_bank(d);
663         unsigned offset = d->hwirq;
664
665         return omap_set_gpio_wakeup(bank, offset, enable);
666 }
667
668 static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
669 {
670         struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
671         unsigned long flags;
672
673         /*
674          * If this is the first gpio_request for the bank,
675          * enable the bank module.
676          */
677         if (!BANK_USED(bank))
678                 pm_runtime_get_sync(bank->dev);
679
680         spin_lock_irqsave(&bank->lock, flags);
681         /* Set trigger to none. You need to enable the desired trigger with
682          * request_irq() or set_irq_type(). Only do this if the IRQ line has
683          * not already been requested.
684          */
685         if (!LINE_USED(bank->irq_usage, offset)) {
686                 omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
687                 omap_enable_gpio_module(bank, offset);
688         }
689         bank->mod_usage |= BIT(offset);
690         spin_unlock_irqrestore(&bank->lock, flags);
691
692         return 0;
693 }
694
695 static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
696 {
697         struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
698         unsigned long flags;
699
700         spin_lock_irqsave(&bank->lock, flags);
701         bank->mod_usage &= ~(BIT(offset));
702         omap_disable_gpio_module(bank, offset);
703         omap_reset_gpio(bank, offset);
704         spin_unlock_irqrestore(&bank->lock, flags);
705
706         /*
707          * If this is the last gpio to be freed in the bank,
708          * disable the bank module.
709          */
710         if (!BANK_USED(bank))
711                 pm_runtime_put(bank->dev);
712 }
713
714 /*
715  * We need to unmask the GPIO bank interrupt as soon as possible to
716  * avoid missing GPIO interrupts for other lines in the bank.
717  * Then we need to mask-read-clear-unmask the triggered GPIO lines
718  * in the bank to avoid missing nested interrupts for a GPIO line.
719  * If we wait to unmask individual GPIO lines in the bank after the
720  * line's interrupt handler has been run, we may miss some nested
721  * interrupts.
722  */
723 static void omap_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
724 {
725         void __iomem *isr_reg = NULL;
726         u32 isr;
727         unsigned int bit;
728         struct gpio_bank *bank;
729         int unmasked = 0;
730         struct irq_chip *irqchip = irq_desc_get_chip(desc);
731         struct gpio_chip *chip = irq_get_handler_data(irq);
732
733         chained_irq_enter(irqchip, desc);
734
735         bank = container_of(chip, struct gpio_bank, chip);
736         isr_reg = bank->base + bank->regs->irqstatus;
737         pm_runtime_get_sync(bank->dev);
738
739         if (WARN_ON(!isr_reg))
740                 goto exit;
741
742         while (1) {
743                 u32 isr_saved, level_mask = 0;
744                 u32 enabled;
745
746                 enabled = omap_get_gpio_irqbank_mask(bank);
747                 isr_saved = isr = readl_relaxed(isr_reg) & enabled;
748
749                 if (bank->level_mask)
750                         level_mask = bank->level_mask & enabled;
751
752                 /* clear edge sensitive interrupts before handler(s) are
753                 called so that we don't miss any interrupt occurred while
754                 executing them */
755                 omap_disable_gpio_irqbank(bank, isr_saved & ~level_mask);
756                 omap_clear_gpio_irqbank(bank, isr_saved & ~level_mask);
757                 omap_enable_gpio_irqbank(bank, isr_saved & ~level_mask);
758
759                 /* if there is only edge sensitive GPIO pin interrupts
760                 configured, we could unmask GPIO bank interrupt immediately */
761                 if (!level_mask && !unmasked) {
762                         unmasked = 1;
763                         chained_irq_exit(irqchip, desc);
764                 }
765
766                 if (!isr)
767                         break;
768
769                 while (isr) {
770                         bit = __ffs(isr);
771                         isr &= ~(BIT(bit));
772
773                         /*
774                          * Some chips can't respond to both rising and falling
775                          * at the same time.  If this irq was requested with
776                          * both flags, we need to flip the ICR data for the IRQ
777                          * to respond to the IRQ for the opposite direction.
778                          * This will be indicated in the bank toggle_mask.
779                          */
780                         if (bank->toggle_mask & (BIT(bit)))
781                                 omap_toggle_gpio_edge_triggering(bank, bit);
782
783                         generic_handle_irq(irq_find_mapping(bank->chip.irqdomain,
784                                                             bit));
785                 }
786         }
787         /* if bank has any level sensitive GPIO pin interrupt
788         configured, we must unmask the bank interrupt only after
789         handler(s) are executed in order to avoid spurious bank
790         interrupt */
791 exit:
792         if (!unmasked)
793                 chained_irq_exit(irqchip, desc);
794         pm_runtime_put(bank->dev);
795 }
796
797 static unsigned int omap_gpio_irq_startup(struct irq_data *d)
798 {
799         struct gpio_bank *bank = omap_irq_data_get_bank(d);
800         unsigned long flags;
801         unsigned offset = d->hwirq;
802
803         if (!BANK_USED(bank))
804                 pm_runtime_get_sync(bank->dev);
805
806         spin_lock_irqsave(&bank->lock, flags);
807         omap_gpio_init_irq(bank, offset);
808         spin_unlock_irqrestore(&bank->lock, flags);
809         omap_gpio_unmask_irq(d);
810
811         return 0;
812 }
813
814 static void omap_gpio_irq_shutdown(struct irq_data *d)
815 {
816         struct gpio_bank *bank = omap_irq_data_get_bank(d);
817         unsigned long flags;
818         unsigned offset = d->hwirq;
819
820         spin_lock_irqsave(&bank->lock, flags);
821         bank->irq_usage &= ~(BIT(offset));
822         omap_disable_gpio_module(bank, offset);
823         omap_reset_gpio(bank, offset);
824         spin_unlock_irqrestore(&bank->lock, flags);
825
826         /*
827          * If this is the last IRQ to be freed in the bank,
828          * disable the bank module.
829          */
830         if (!BANK_USED(bank))
831                 pm_runtime_put(bank->dev);
832 }
833
834 static void omap_gpio_ack_irq(struct irq_data *d)
835 {
836         struct gpio_bank *bank = omap_irq_data_get_bank(d);
837         unsigned offset = d->hwirq;
838
839         omap_clear_gpio_irqstatus(bank, offset);
840 }
841
842 static void omap_gpio_mask_irq(struct irq_data *d)
843 {
844         struct gpio_bank *bank = omap_irq_data_get_bank(d);
845         unsigned offset = d->hwirq;
846         unsigned long flags;
847
848         spin_lock_irqsave(&bank->lock, flags);
849         omap_set_gpio_irqenable(bank, offset, 0);
850         omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
851         spin_unlock_irqrestore(&bank->lock, flags);
852 }
853
854 static void omap_gpio_unmask_irq(struct irq_data *d)
855 {
856         struct gpio_bank *bank = omap_irq_data_get_bank(d);
857         unsigned offset = d->hwirq;
858         u32 trigger = irqd_get_trigger_type(d);
859         unsigned long flags;
860
861         spin_lock_irqsave(&bank->lock, flags);
862         if (trigger)
863                 omap_set_gpio_triggering(bank, offset, trigger);
864
865         /* For level-triggered GPIOs, the clearing must be done after
866          * the HW source is cleared, thus after the handler has run */
867         if (bank->level_mask & BIT(offset)) {
868                 omap_set_gpio_irqenable(bank, offset, 0);
869                 omap_clear_gpio_irqstatus(bank, offset);
870         }
871
872         omap_set_gpio_irqenable(bank, offset, 1);
873         spin_unlock_irqrestore(&bank->lock, flags);
874 }
875
876 /*---------------------------------------------------------------------*/
877
878 static int omap_mpuio_suspend_noirq(struct device *dev)
879 {
880         struct platform_device *pdev = to_platform_device(dev);
881         struct gpio_bank        *bank = platform_get_drvdata(pdev);
882         void __iomem            *mask_reg = bank->base +
883                                         OMAP_MPUIO_GPIO_MASKIT / bank->stride;
884         unsigned long           flags;
885
886         spin_lock_irqsave(&bank->lock, flags);
887         writel_relaxed(0xffff & ~bank->context.wake_en, mask_reg);
888         spin_unlock_irqrestore(&bank->lock, flags);
889
890         return 0;
891 }
892
893 static int omap_mpuio_resume_noirq(struct device *dev)
894 {
895         struct platform_device *pdev = to_platform_device(dev);
896         struct gpio_bank        *bank = platform_get_drvdata(pdev);
897         void __iomem            *mask_reg = bank->base +
898                                         OMAP_MPUIO_GPIO_MASKIT / bank->stride;
899         unsigned long           flags;
900
901         spin_lock_irqsave(&bank->lock, flags);
902         writel_relaxed(bank->context.wake_en, mask_reg);
903         spin_unlock_irqrestore(&bank->lock, flags);
904
905         return 0;
906 }
907
908 static const struct dev_pm_ops omap_mpuio_dev_pm_ops = {
909         .suspend_noirq = omap_mpuio_suspend_noirq,
910         .resume_noirq = omap_mpuio_resume_noirq,
911 };
912
913 /* use platform_driver for this. */
914 static struct platform_driver omap_mpuio_driver = {
915         .driver         = {
916                 .name   = "mpuio",
917                 .pm     = &omap_mpuio_dev_pm_ops,
918         },
919 };
920
921 static struct platform_device omap_mpuio_device = {
922         .name           = "mpuio",
923         .id             = -1,
924         .dev = {
925                 .driver = &omap_mpuio_driver.driver,
926         }
927         /* could list the /proc/iomem resources */
928 };
929
930 static inline void omap_mpuio_init(struct gpio_bank *bank)
931 {
932         platform_set_drvdata(&omap_mpuio_device, bank);
933
934         if (platform_driver_register(&omap_mpuio_driver) == 0)
935                 (void) platform_device_register(&omap_mpuio_device);
936 }
937
938 /*---------------------------------------------------------------------*/
939
940 static int omap_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
941 {
942         struct gpio_bank *bank;
943         unsigned long flags;
944         void __iomem *reg;
945         int dir;
946
947         bank = container_of(chip, struct gpio_bank, chip);
948         reg = bank->base + bank->regs->direction;
949         spin_lock_irqsave(&bank->lock, flags);
950         dir = !!(readl_relaxed(reg) & BIT(offset));
951         spin_unlock_irqrestore(&bank->lock, flags);
952         return dir;
953 }
954
955 static int omap_gpio_input(struct gpio_chip *chip, unsigned offset)
956 {
957         struct gpio_bank *bank;
958         unsigned long flags;
959
960         bank = container_of(chip, struct gpio_bank, chip);
961         spin_lock_irqsave(&bank->lock, flags);
962         omap_set_gpio_direction(bank, offset, 1);
963         spin_unlock_irqrestore(&bank->lock, flags);
964         return 0;
965 }
966
967 static int omap_gpio_get(struct gpio_chip *chip, unsigned offset)
968 {
969         struct gpio_bank *bank;
970
971         bank = container_of(chip, struct gpio_bank, chip);
972
973         if (omap_gpio_is_input(bank, offset))
974                 return omap_get_gpio_datain(bank, offset);
975         else
976                 return omap_get_gpio_dataout(bank, offset);
977 }
978
979 static int omap_gpio_output(struct gpio_chip *chip, unsigned offset, int value)
980 {
981         struct gpio_bank *bank;
982         unsigned long flags;
983
984         bank = container_of(chip, struct gpio_bank, chip);
985         spin_lock_irqsave(&bank->lock, flags);
986         bank->set_dataout(bank, offset, value);
987         omap_set_gpio_direction(bank, offset, 0);
988         spin_unlock_irqrestore(&bank->lock, flags);
989         return 0;
990 }
991
992 static int omap_gpio_debounce(struct gpio_chip *chip, unsigned offset,
993                               unsigned debounce)
994 {
995         struct gpio_bank *bank;
996         unsigned long flags;
997
998         bank = container_of(chip, struct gpio_bank, chip);
999
1000         spin_lock_irqsave(&bank->lock, flags);
1001         omap2_set_gpio_debounce(bank, offset, debounce);
1002         spin_unlock_irqrestore(&bank->lock, flags);
1003
1004         return 0;
1005 }
1006
1007 static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
1008 {
1009         struct gpio_bank *bank;
1010         unsigned long flags;
1011
1012         bank = container_of(chip, struct gpio_bank, chip);
1013         spin_lock_irqsave(&bank->lock, flags);
1014         bank->set_dataout(bank, offset, value);
1015         spin_unlock_irqrestore(&bank->lock, flags);
1016 }
1017
1018 /*---------------------------------------------------------------------*/
1019
1020 static void __init omap_gpio_show_rev(struct gpio_bank *bank)
1021 {
1022         static bool called;
1023         u32 rev;
1024
1025         if (called || bank->regs->revision == USHRT_MAX)
1026                 return;
1027
1028         rev = readw_relaxed(bank->base + bank->regs->revision);
1029         pr_info("OMAP GPIO hardware version %d.%d\n",
1030                 (rev >> 4) & 0x0f, rev & 0x0f);
1031
1032         called = true;
1033 }
1034
1035 static void omap_gpio_mod_init(struct gpio_bank *bank)
1036 {
1037         void __iomem *base = bank->base;
1038         u32 l = 0xffffffff;
1039
1040         if (bank->width == 16)
1041                 l = 0xffff;
1042
1043         if (bank->is_mpuio) {
1044                 writel_relaxed(l, bank->base + bank->regs->irqenable);
1045                 return;
1046         }
1047
1048         omap_gpio_rmw(base, bank->regs->irqenable, l,
1049                       bank->regs->irqenable_inv);
1050         omap_gpio_rmw(base, bank->regs->irqstatus, l,
1051                       !bank->regs->irqenable_inv);
1052         if (bank->regs->debounce_en)
1053                 writel_relaxed(0, base + bank->regs->debounce_en);
1054
1055         /* Save OE default value (0xffffffff) in the context */
1056         bank->context.oe = readl_relaxed(bank->base + bank->regs->direction);
1057          /* Initialize interface clk ungated, module enabled */
1058         if (bank->regs->ctrl)
1059                 writel_relaxed(0, base + bank->regs->ctrl);
1060
1061         bank->dbck = clk_get(bank->dev, "dbclk");
1062         if (IS_ERR(bank->dbck))
1063                 dev_err(bank->dev, "Could not get gpio dbck\n");
1064 }
1065
1066 static void
1067 omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start,
1068                     unsigned int num)
1069 {
1070         struct irq_chip_generic *gc;
1071         struct irq_chip_type *ct;
1072
1073         gc = irq_alloc_generic_chip("MPUIO", 1, irq_start, bank->base,
1074                                     handle_simple_irq);
1075         if (!gc) {
1076                 dev_err(bank->dev, "Memory alloc failed for gc\n");
1077                 return;
1078         }
1079
1080         ct = gc->chip_types;
1081
1082         /* NOTE: No ack required, reading IRQ status clears it. */
1083         ct->chip.irq_mask = irq_gc_mask_set_bit;
1084         ct->chip.irq_unmask = irq_gc_mask_clr_bit;
1085         ct->chip.irq_set_type = omap_gpio_irq_type;
1086
1087         if (bank->regs->wkup_en)
1088                 ct->chip.irq_set_wake = omap_gpio_wake_enable;
1089
1090         ct->regs.mask = OMAP_MPUIO_GPIO_INT / bank->stride;
1091         irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
1092                                IRQ_NOREQUEST | IRQ_NOPROBE, 0);
1093 }
1094
1095 static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
1096 {
1097         int j;
1098         static int gpio;
1099         int irq_base = 0;
1100         int ret;
1101
1102         /*
1103          * REVISIT eventually switch from OMAP-specific gpio structs
1104          * over to the generic ones
1105          */
1106         bank->chip.request = omap_gpio_request;
1107         bank->chip.free = omap_gpio_free;
1108         bank->chip.get_direction = omap_gpio_get_direction;
1109         bank->chip.direction_input = omap_gpio_input;
1110         bank->chip.get = omap_gpio_get;
1111         bank->chip.direction_output = omap_gpio_output;
1112         bank->chip.set_debounce = omap_gpio_debounce;
1113         bank->chip.set = omap_gpio_set;
1114         if (bank->is_mpuio) {
1115                 bank->chip.label = "mpuio";
1116                 if (bank->regs->wkup_en)
1117                         bank->chip.dev = &omap_mpuio_device.dev;
1118                 bank->chip.base = OMAP_MPUIO(0);
1119         } else {
1120                 bank->chip.label = "gpio";
1121                 bank->chip.base = gpio;
1122                 gpio += bank->width;
1123         }
1124         bank->chip.ngpio = bank->width;
1125
1126         ret = gpiochip_add(&bank->chip);
1127         if (ret) {
1128                 dev_err(bank->dev, "Could not register gpio chip %d\n", ret);
1129                 return ret;
1130         }
1131
1132 #ifdef CONFIG_ARCH_OMAP1
1133         /*
1134          * REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop
1135          * irq_alloc_descs() since a base IRQ offset will no longer be needed.
1136          */
1137         irq_base = irq_alloc_descs(-1, 0, bank->width, 0);
1138         if (irq_base < 0) {
1139                 dev_err(bank->dev, "Couldn't allocate IRQ numbers\n");
1140                 return -ENODEV;
1141         }
1142 #endif
1143
1144         ret = gpiochip_irqchip_add(&bank->chip, irqc,
1145                                    irq_base, omap_gpio_irq_handler,
1146                                    IRQ_TYPE_NONE);
1147
1148         if (ret) {
1149                 dev_err(bank->dev, "Couldn't add irqchip to gpiochip %d\n", ret);
1150                 gpiochip_remove(&bank->chip);
1151                 return -ENODEV;
1152         }
1153
1154         gpiochip_set_chained_irqchip(&bank->chip, irqc,
1155                                      bank->irq, omap_gpio_irq_handler);
1156
1157         for (j = 0; j < bank->width; j++) {
1158                 int irq = irq_find_mapping(bank->chip.irqdomain, j);
1159                 if (bank->is_mpuio) {
1160                         omap_mpuio_alloc_gc(bank, irq, bank->width);
1161                         irq_set_chip_and_handler(irq, NULL, NULL);
1162                         set_irq_flags(irq, 0);
1163                 }
1164         }
1165
1166         return 0;
1167 }
1168
1169 static const struct of_device_id omap_gpio_match[];
1170
1171 static int omap_gpio_probe(struct platform_device *pdev)
1172 {
1173         struct device *dev = &pdev->dev;
1174         struct device_node *node = dev->of_node;
1175         const struct of_device_id *match;
1176         const struct omap_gpio_platform_data *pdata;
1177         struct resource *res;
1178         struct gpio_bank *bank;
1179         struct irq_chip *irqc;
1180         int ret;
1181
1182         match = of_match_device(of_match_ptr(omap_gpio_match), dev);
1183
1184         pdata = match ? match->data : dev_get_platdata(dev);
1185         if (!pdata)
1186                 return -EINVAL;
1187
1188         bank = devm_kzalloc(dev, sizeof(struct gpio_bank), GFP_KERNEL);
1189         if (!bank) {
1190                 dev_err(dev, "Memory alloc failed\n");
1191                 return -ENOMEM;
1192         }
1193
1194         irqc = devm_kzalloc(dev, sizeof(*irqc), GFP_KERNEL);
1195         if (!irqc)
1196                 return -ENOMEM;
1197
1198         irqc->irq_startup = omap_gpio_irq_startup,
1199         irqc->irq_shutdown = omap_gpio_irq_shutdown,
1200         irqc->irq_ack = omap_gpio_ack_irq,
1201         irqc->irq_mask = omap_gpio_mask_irq,
1202         irqc->irq_unmask = omap_gpio_unmask_irq,
1203         irqc->irq_set_type = omap_gpio_irq_type,
1204         irqc->irq_set_wake = omap_gpio_wake_enable,
1205         irqc->name = dev_name(&pdev->dev);
1206
1207         res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1208         if (unlikely(!res)) {
1209                 dev_err(dev, "Invalid IRQ resource\n");
1210                 return -ENODEV;
1211         }
1212
1213         bank->irq = res->start;
1214         bank->dev = dev;
1215         bank->chip.dev = dev;
1216         bank->dbck_flag = pdata->dbck_flag;
1217         bank->stride = pdata->bank_stride;
1218         bank->width = pdata->bank_width;
1219         bank->is_mpuio = pdata->is_mpuio;
1220         bank->non_wakeup_gpios = pdata->non_wakeup_gpios;
1221         bank->regs = pdata->regs;
1222 #ifdef CONFIG_OF_GPIO
1223         bank->chip.of_node = of_node_get(node);
1224 #endif
1225         if (node) {
1226                 if (!of_property_read_bool(node, "ti,gpio-always-on"))
1227                         bank->loses_context = true;
1228         } else {
1229                 bank->loses_context = pdata->loses_context;
1230
1231                 if (bank->loses_context)
1232                         bank->get_context_loss_count =
1233                                 pdata->get_context_loss_count;
1234         }
1235
1236         if (bank->regs->set_dataout && bank->regs->clr_dataout)
1237                 bank->set_dataout = omap_set_gpio_dataout_reg;
1238         else
1239                 bank->set_dataout = omap_set_gpio_dataout_mask;
1240
1241         spin_lock_init(&bank->lock);
1242
1243         /* Static mapping, never released */
1244         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1245         bank->base = devm_ioremap_resource(dev, res);
1246         if (IS_ERR(bank->base)) {
1247                 irq_domain_remove(bank->chip.irqdomain);
1248                 return PTR_ERR(bank->base);
1249         }
1250
1251         platform_set_drvdata(pdev, bank);
1252
1253         pm_runtime_enable(bank->dev);
1254         pm_runtime_irq_safe(bank->dev);
1255         pm_runtime_get_sync(bank->dev);
1256
1257         if (bank->is_mpuio)
1258                 omap_mpuio_init(bank);
1259
1260         omap_gpio_mod_init(bank);
1261
1262         ret = omap_gpio_chip_init(bank, irqc);
1263         if (ret)
1264                 return ret;
1265
1266         omap_gpio_show_rev(bank);
1267
1268         pm_runtime_put(bank->dev);
1269
1270         list_add_tail(&bank->node, &omap_gpio_list);
1271
1272         return 0;
1273 }
1274
1275 #ifdef CONFIG_ARCH_OMAP2PLUS
1276
1277 #if defined(CONFIG_PM)
1278 static void omap_gpio_restore_context(struct gpio_bank *bank);
1279
1280 static int omap_gpio_runtime_suspend(struct device *dev)
1281 {
1282         struct platform_device *pdev = to_platform_device(dev);
1283         struct gpio_bank *bank = platform_get_drvdata(pdev);
1284         u32 l1 = 0, l2 = 0;
1285         unsigned long flags;
1286         u32 wake_low, wake_hi;
1287
1288         spin_lock_irqsave(&bank->lock, flags);
1289
1290         /*
1291          * Only edges can generate a wakeup event to the PRCM.
1292          *
1293          * Therefore, ensure any wake-up capable GPIOs have
1294          * edge-detection enabled before going idle to ensure a wakeup
1295          * to the PRCM is generated on a GPIO transition. (c.f. 34xx
1296          * NDA TRM 25.5.3.1)
1297          *
1298          * The normal values will be restored upon ->runtime_resume()
1299          * by writing back the values saved in bank->context.
1300          */
1301         wake_low = bank->context.leveldetect0 & bank->context.wake_en;
1302         if (wake_low)
1303                 writel_relaxed(wake_low | bank->context.fallingdetect,
1304                              bank->base + bank->regs->fallingdetect);
1305         wake_hi = bank->context.leveldetect1 & bank->context.wake_en;
1306         if (wake_hi)
1307                 writel_relaxed(wake_hi | bank->context.risingdetect,
1308                              bank->base + bank->regs->risingdetect);
1309
1310         if (!bank->enabled_non_wakeup_gpios)
1311                 goto update_gpio_context_count;
1312
1313         if (bank->power_mode != OFF_MODE) {
1314                 bank->power_mode = 0;
1315                 goto update_gpio_context_count;
1316         }
1317         /*
1318          * If going to OFF, remove triggering for all
1319          * non-wakeup GPIOs.  Otherwise spurious IRQs will be
1320          * generated.  See OMAP2420 Errata item 1.101.
1321          */
1322         bank->saved_datain = readl_relaxed(bank->base +
1323                                                 bank->regs->datain);
1324         l1 = bank->context.fallingdetect;
1325         l2 = bank->context.risingdetect;
1326
1327         l1 &= ~bank->enabled_non_wakeup_gpios;
1328         l2 &= ~bank->enabled_non_wakeup_gpios;
1329
1330         writel_relaxed(l1, bank->base + bank->regs->fallingdetect);
1331         writel_relaxed(l2, bank->base + bank->regs->risingdetect);
1332
1333         bank->workaround_enabled = true;
1334
1335 update_gpio_context_count:
1336         if (bank->get_context_loss_count)
1337                 bank->context_loss_count =
1338                                 bank->get_context_loss_count(bank->dev);
1339
1340         omap_gpio_dbck_disable(bank);
1341         spin_unlock_irqrestore(&bank->lock, flags);
1342
1343         return 0;
1344 }
1345
1346 static void omap_gpio_init_context(struct gpio_bank *p);
1347
1348 static int omap_gpio_runtime_resume(struct device *dev)
1349 {
1350         struct platform_device *pdev = to_platform_device(dev);
1351         struct gpio_bank *bank = platform_get_drvdata(pdev);
1352         u32 l = 0, gen, gen0, gen1;
1353         unsigned long flags;
1354         int c;
1355
1356         spin_lock_irqsave(&bank->lock, flags);
1357
1358         /*
1359          * On the first resume during the probe, the context has not
1360          * been initialised and so initialise it now. Also initialise
1361          * the context loss count.
1362          */
1363         if (bank->loses_context && !bank->context_valid) {
1364                 omap_gpio_init_context(bank);
1365
1366                 if (bank->get_context_loss_count)
1367                         bank->context_loss_count =
1368                                 bank->get_context_loss_count(bank->dev);
1369         }
1370
1371         omap_gpio_dbck_enable(bank);
1372
1373         /*
1374          * In ->runtime_suspend(), level-triggered, wakeup-enabled
1375          * GPIOs were set to edge trigger also in order to be able to
1376          * generate a PRCM wakeup.  Here we restore the
1377          * pre-runtime_suspend() values for edge triggering.
1378          */
1379         writel_relaxed(bank->context.fallingdetect,
1380                      bank->base + bank->regs->fallingdetect);
1381         writel_relaxed(bank->context.risingdetect,
1382                      bank->base + bank->regs->risingdetect);
1383
1384         if (bank->loses_context) {
1385                 if (!bank->get_context_loss_count) {
1386                         omap_gpio_restore_context(bank);
1387                 } else {
1388                         c = bank->get_context_loss_count(bank->dev);
1389                         if (c != bank->context_loss_count) {
1390                                 omap_gpio_restore_context(bank);
1391                         } else {
1392                                 spin_unlock_irqrestore(&bank->lock, flags);
1393                                 return 0;
1394                         }
1395                 }
1396         }
1397
1398         if (!bank->workaround_enabled) {
1399                 spin_unlock_irqrestore(&bank->lock, flags);
1400                 return 0;
1401         }
1402
1403         l = readl_relaxed(bank->base + bank->regs->datain);
1404
1405         /*
1406          * Check if any of the non-wakeup interrupt GPIOs have changed
1407          * state.  If so, generate an IRQ by software.  This is
1408          * horribly racy, but it's the best we can do to work around
1409          * this silicon bug.
1410          */
1411         l ^= bank->saved_datain;
1412         l &= bank->enabled_non_wakeup_gpios;
1413
1414         /*
1415          * No need to generate IRQs for the rising edge for gpio IRQs
1416          * configured with falling edge only; and vice versa.
1417          */
1418         gen0 = l & bank->context.fallingdetect;
1419         gen0 &= bank->saved_datain;
1420
1421         gen1 = l & bank->context.risingdetect;
1422         gen1 &= ~(bank->saved_datain);
1423
1424         /* FIXME: Consider GPIO IRQs with level detections properly! */
1425         gen = l & (~(bank->context.fallingdetect) &
1426                                          ~(bank->context.risingdetect));
1427         /* Consider all GPIO IRQs needed to be updated */
1428         gen |= gen0 | gen1;
1429
1430         if (gen) {
1431                 u32 old0, old1;
1432
1433                 old0 = readl_relaxed(bank->base + bank->regs->leveldetect0);
1434                 old1 = readl_relaxed(bank->base + bank->regs->leveldetect1);
1435
1436                 if (!bank->regs->irqstatus_raw0) {
1437                         writel_relaxed(old0 | gen, bank->base +
1438                                                 bank->regs->leveldetect0);
1439                         writel_relaxed(old1 | gen, bank->base +
1440                                                 bank->regs->leveldetect1);
1441                 }
1442
1443                 if (bank->regs->irqstatus_raw0) {
1444                         writel_relaxed(old0 | l, bank->base +
1445                                                 bank->regs->leveldetect0);
1446                         writel_relaxed(old1 | l, bank->base +
1447                                                 bank->regs->leveldetect1);
1448                 }
1449                 writel_relaxed(old0, bank->base + bank->regs->leveldetect0);
1450                 writel_relaxed(old1, bank->base + bank->regs->leveldetect1);
1451         }
1452
1453         bank->workaround_enabled = false;
1454         spin_unlock_irqrestore(&bank->lock, flags);
1455
1456         return 0;
1457 }
1458 #endif /* CONFIG_PM */
1459
1460 void omap2_gpio_prepare_for_idle(int pwr_mode)
1461 {
1462         struct gpio_bank *bank;
1463
1464         list_for_each_entry(bank, &omap_gpio_list, node) {
1465                 if (!BANK_USED(bank) || !bank->loses_context)
1466                         continue;
1467
1468                 bank->power_mode = pwr_mode;
1469
1470                 pm_runtime_put_sync_suspend(bank->dev);
1471         }
1472 }
1473
1474 void omap2_gpio_resume_after_idle(void)
1475 {
1476         struct gpio_bank *bank;
1477
1478         list_for_each_entry(bank, &omap_gpio_list, node) {
1479                 if (!BANK_USED(bank) || !bank->loses_context)
1480                         continue;
1481
1482                 pm_runtime_get_sync(bank->dev);
1483         }
1484 }
1485
1486 #if defined(CONFIG_PM)
1487 static void omap_gpio_init_context(struct gpio_bank *p)
1488 {
1489         struct omap_gpio_reg_offs *regs = p->regs;
1490         void __iomem *base = p->base;
1491
1492         p->context.ctrl         = readl_relaxed(base + regs->ctrl);
1493         p->context.oe           = readl_relaxed(base + regs->direction);
1494         p->context.wake_en      = readl_relaxed(base + regs->wkup_en);
1495         p->context.leveldetect0 = readl_relaxed(base + regs->leveldetect0);
1496         p->context.leveldetect1 = readl_relaxed(base + regs->leveldetect1);
1497         p->context.risingdetect = readl_relaxed(base + regs->risingdetect);
1498         p->context.fallingdetect = readl_relaxed(base + regs->fallingdetect);
1499         p->context.irqenable1   = readl_relaxed(base + regs->irqenable);
1500         p->context.irqenable2   = readl_relaxed(base + regs->irqenable2);
1501
1502         if (regs->set_dataout && p->regs->clr_dataout)
1503                 p->context.dataout = readl_relaxed(base + regs->set_dataout);
1504         else
1505                 p->context.dataout = readl_relaxed(base + regs->dataout);
1506
1507         p->context_valid = true;
1508 }
1509
1510 static void omap_gpio_restore_context(struct gpio_bank *bank)
1511 {
1512         writel_relaxed(bank->context.wake_en,
1513                                 bank->base + bank->regs->wkup_en);
1514         writel_relaxed(bank->context.ctrl, bank->base + bank->regs->ctrl);
1515         writel_relaxed(bank->context.leveldetect0,
1516                                 bank->base + bank->regs->leveldetect0);
1517         writel_relaxed(bank->context.leveldetect1,
1518                                 bank->base + bank->regs->leveldetect1);
1519         writel_relaxed(bank->context.risingdetect,
1520                                 bank->base + bank->regs->risingdetect);
1521         writel_relaxed(bank->context.fallingdetect,
1522                                 bank->base + bank->regs->fallingdetect);
1523         if (bank->regs->set_dataout && bank->regs->clr_dataout)
1524                 writel_relaxed(bank->context.dataout,
1525                                 bank->base + bank->regs->set_dataout);
1526         else
1527                 writel_relaxed(bank->context.dataout,
1528                                 bank->base + bank->regs->dataout);
1529         writel_relaxed(bank->context.oe, bank->base + bank->regs->direction);
1530
1531         if (bank->dbck_enable_mask) {
1532                 writel_relaxed(bank->context.debounce, bank->base +
1533                                         bank->regs->debounce);
1534                 writel_relaxed(bank->context.debounce_en,
1535                                         bank->base + bank->regs->debounce_en);
1536         }
1537
1538         writel_relaxed(bank->context.irqenable1,
1539                                 bank->base + bank->regs->irqenable);
1540         writel_relaxed(bank->context.irqenable2,
1541                                 bank->base + bank->regs->irqenable2);
1542 }
1543 #endif /* CONFIG_PM */
1544 #else
1545 #define omap_gpio_runtime_suspend NULL
1546 #define omap_gpio_runtime_resume NULL
1547 static inline void omap_gpio_init_context(struct gpio_bank *p) {}
1548 #endif
1549
1550 static const struct dev_pm_ops gpio_pm_ops = {
1551         SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume,
1552                                                                         NULL)
1553 };
1554
1555 #if defined(CONFIG_OF)
1556 static struct omap_gpio_reg_offs omap2_gpio_regs = {
1557         .revision =             OMAP24XX_GPIO_REVISION,
1558         .direction =            OMAP24XX_GPIO_OE,
1559         .datain =               OMAP24XX_GPIO_DATAIN,
1560         .dataout =              OMAP24XX_GPIO_DATAOUT,
1561         .set_dataout =          OMAP24XX_GPIO_SETDATAOUT,
1562         .clr_dataout =          OMAP24XX_GPIO_CLEARDATAOUT,
1563         .irqstatus =            OMAP24XX_GPIO_IRQSTATUS1,
1564         .irqstatus2 =           OMAP24XX_GPIO_IRQSTATUS2,
1565         .irqenable =            OMAP24XX_GPIO_IRQENABLE1,
1566         .irqenable2 =           OMAP24XX_GPIO_IRQENABLE2,
1567         .set_irqenable =        OMAP24XX_GPIO_SETIRQENABLE1,
1568         .clr_irqenable =        OMAP24XX_GPIO_CLEARIRQENABLE1,
1569         .debounce =             OMAP24XX_GPIO_DEBOUNCE_VAL,
1570         .debounce_en =          OMAP24XX_GPIO_DEBOUNCE_EN,
1571         .ctrl =                 OMAP24XX_GPIO_CTRL,
1572         .wkup_en =              OMAP24XX_GPIO_WAKE_EN,
1573         .leveldetect0 =         OMAP24XX_GPIO_LEVELDETECT0,
1574         .leveldetect1 =         OMAP24XX_GPIO_LEVELDETECT1,
1575         .risingdetect =         OMAP24XX_GPIO_RISINGDETECT,
1576         .fallingdetect =        OMAP24XX_GPIO_FALLINGDETECT,
1577 };
1578
1579 static struct omap_gpio_reg_offs omap4_gpio_regs = {
1580         .revision =             OMAP4_GPIO_REVISION,
1581         .direction =            OMAP4_GPIO_OE,
1582         .datain =               OMAP4_GPIO_DATAIN,
1583         .dataout =              OMAP4_GPIO_DATAOUT,
1584         .set_dataout =          OMAP4_GPIO_SETDATAOUT,
1585         .clr_dataout =          OMAP4_GPIO_CLEARDATAOUT,
1586         .irqstatus =            OMAP4_GPIO_IRQSTATUS0,
1587         .irqstatus2 =           OMAP4_GPIO_IRQSTATUS1,
1588         .irqenable =            OMAP4_GPIO_IRQSTATUSSET0,
1589         .irqenable2 =           OMAP4_GPIO_IRQSTATUSSET1,
1590         .set_irqenable =        OMAP4_GPIO_IRQSTATUSSET0,
1591         .clr_irqenable =        OMAP4_GPIO_IRQSTATUSCLR0,
1592         .debounce =             OMAP4_GPIO_DEBOUNCINGTIME,
1593         .debounce_en =          OMAP4_GPIO_DEBOUNCENABLE,
1594         .ctrl =                 OMAP4_GPIO_CTRL,
1595         .wkup_en =              OMAP4_GPIO_IRQWAKEN0,
1596         .leveldetect0 =         OMAP4_GPIO_LEVELDETECT0,
1597         .leveldetect1 =         OMAP4_GPIO_LEVELDETECT1,
1598         .risingdetect =         OMAP4_GPIO_RISINGDETECT,
1599         .fallingdetect =        OMAP4_GPIO_FALLINGDETECT,
1600 };
1601
1602 static const struct omap_gpio_platform_data omap2_pdata = {
1603         .regs = &omap2_gpio_regs,
1604         .bank_width = 32,
1605         .dbck_flag = false,
1606 };
1607
1608 static const struct omap_gpio_platform_data omap3_pdata = {
1609         .regs = &omap2_gpio_regs,
1610         .bank_width = 32,
1611         .dbck_flag = true,
1612 };
1613
1614 static const struct omap_gpio_platform_data omap4_pdata = {
1615         .regs = &omap4_gpio_regs,
1616         .bank_width = 32,
1617         .dbck_flag = true,
1618 };
1619
1620 static const struct of_device_id omap_gpio_match[] = {
1621         {
1622                 .compatible = "ti,omap4-gpio",
1623                 .data = &omap4_pdata,
1624         },
1625         {
1626                 .compatible = "ti,omap3-gpio",
1627                 .data = &omap3_pdata,
1628         },
1629         {
1630                 .compatible = "ti,omap2-gpio",
1631                 .data = &omap2_pdata,
1632         },
1633         { },
1634 };
1635 MODULE_DEVICE_TABLE(of, omap_gpio_match);
1636 #endif
1637
1638 static struct platform_driver omap_gpio_driver = {
1639         .probe          = omap_gpio_probe,
1640         .driver         = {
1641                 .name   = "omap_gpio",
1642                 .pm     = &gpio_pm_ops,
1643                 .of_match_table = of_match_ptr(omap_gpio_match),
1644         },
1645 };
1646
1647 /*
1648  * gpio driver register needs to be done before
1649  * machine_init functions access gpio APIs.
1650  * Hence omap_gpio_drv_reg() is a postcore_initcall.
1651  */
1652 static int __init omap_gpio_drv_reg(void)
1653 {
1654         return platform_driver_register(&omap_gpio_driver);
1655 }
1656 postcore_initcall(omap_gpio_drv_reg);