b2fcdf4f8bd769c87d0df55d51746b958b752880
[firefly-linux-kernel-4.4.55.git] / drivers / gpio / gpio-omap.c
1 /*
2  * Support functions for OMAP GPIO
3  *
4  * Copyright (C) 2003-2005 Nokia Corporation
5  * Written by Juha Yrjölä <juha.yrjola@nokia.com>
6  *
7  * Copyright (C) 2009 Texas Instruments
8  * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/interrupt.h>
18 #include <linux/syscore_ops.h>
19 #include <linux/err.h>
20 #include <linux/clk.h>
21 #include <linux/io.h>
22 #include <linux/device.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/pm.h>
25 #include <linux/of.h>
26 #include <linux/of_device.h>
27 #include <linux/gpio.h>
28 #include <linux/bitops.h>
29 #include <linux/platform_data/gpio-omap.h>
30
31 #define OFF_MODE        1
32
33 static LIST_HEAD(omap_gpio_list);
34
35 struct gpio_regs {
36         u32 irqenable1;
37         u32 irqenable2;
38         u32 wake_en;
39         u32 ctrl;
40         u32 oe;
41         u32 leveldetect0;
42         u32 leveldetect1;
43         u32 risingdetect;
44         u32 fallingdetect;
45         u32 dataout;
46         u32 debounce;
47         u32 debounce_en;
48 };
49
50 struct gpio_bank {
51         struct list_head node;
52         void __iomem *base;
53         u16 irq;
54         u32 non_wakeup_gpios;
55         u32 enabled_non_wakeup_gpios;
56         struct gpio_regs context;
57         u32 saved_datain;
58         u32 level_mask;
59         u32 toggle_mask;
60         spinlock_t lock;
61         struct gpio_chip chip;
62         struct clk *dbck;
63         u32 mod_usage;
64         u32 irq_usage;
65         u32 dbck_enable_mask;
66         bool dbck_enabled;
67         struct device *dev;
68         bool is_mpuio;
69         bool dbck_flag;
70         bool loses_context;
71         bool context_valid;
72         int stride;
73         u32 width;
74         int context_loss_count;
75         int power_mode;
76         bool workaround_enabled;
77
78         void (*set_dataout)(struct gpio_bank *bank, unsigned gpio, int enable);
79         int (*get_context_loss_count)(struct device *dev);
80
81         struct omap_gpio_reg_offs *regs;
82 };
83
84 #define GPIO_MOD_CTRL_BIT       BIT(0)
85
86 #define BANK_USED(bank) (bank->mod_usage || bank->irq_usage)
87 #define LINE_USED(line, offset) (line & (BIT(offset)))
88
89 static void omap_gpio_unmask_irq(struct irq_data *d);
90
91 static inline struct gpio_bank *omap_irq_data_get_bank(struct irq_data *d)
92 {
93         struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
94         return container_of(chip, struct gpio_bank, chip);
95 }
96
97 static void omap_set_gpio_direction(struct gpio_bank *bank, int gpio,
98                                     int is_input)
99 {
100         void __iomem *reg = bank->base;
101         u32 l;
102
103         reg += bank->regs->direction;
104         l = readl_relaxed(reg);
105         if (is_input)
106                 l |= BIT(gpio);
107         else
108                 l &= ~(BIT(gpio));
109         writel_relaxed(l, reg);
110         bank->context.oe = l;
111 }
112
113
114 /* set data out value using dedicate set/clear register */
115 static void omap_set_gpio_dataout_reg(struct gpio_bank *bank, unsigned offset,
116                                       int enable)
117 {
118         void __iomem *reg = bank->base;
119         u32 l = BIT(offset);
120
121         if (enable) {
122                 reg += bank->regs->set_dataout;
123                 bank->context.dataout |= l;
124         } else {
125                 reg += bank->regs->clr_dataout;
126                 bank->context.dataout &= ~l;
127         }
128
129         writel_relaxed(l, reg);
130 }
131
132 /* set data out value using mask register */
133 static void omap_set_gpio_dataout_mask(struct gpio_bank *bank, unsigned offset,
134                                        int enable)
135 {
136         void __iomem *reg = bank->base + bank->regs->dataout;
137         u32 gpio_bit = BIT(offset);
138         u32 l;
139
140         l = readl_relaxed(reg);
141         if (enable)
142                 l |= gpio_bit;
143         else
144                 l &= ~gpio_bit;
145         writel_relaxed(l, reg);
146         bank->context.dataout = l;
147 }
148
149 static int omap_get_gpio_datain(struct gpio_bank *bank, int offset)
150 {
151         void __iomem *reg = bank->base + bank->regs->datain;
152
153         return (readl_relaxed(reg) & (BIT(offset))) != 0;
154 }
155
156 static int omap_get_gpio_dataout(struct gpio_bank *bank, int offset)
157 {
158         void __iomem *reg = bank->base + bank->regs->dataout;
159
160         return (readl_relaxed(reg) & (BIT(offset))) != 0;
161 }
162
163 static inline void omap_gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set)
164 {
165         int l = readl_relaxed(base + reg);
166
167         if (set)
168                 l |= mask;
169         else
170                 l &= ~mask;
171
172         writel_relaxed(l, base + reg);
173 }
174
175 static inline void omap_gpio_dbck_enable(struct gpio_bank *bank)
176 {
177         if (bank->dbck_enable_mask && !bank->dbck_enabled) {
178                 clk_prepare_enable(bank->dbck);
179                 bank->dbck_enabled = true;
180
181                 writel_relaxed(bank->dbck_enable_mask,
182                              bank->base + bank->regs->debounce_en);
183         }
184 }
185
186 static inline void omap_gpio_dbck_disable(struct gpio_bank *bank)
187 {
188         if (bank->dbck_enable_mask && bank->dbck_enabled) {
189                 /*
190                  * Disable debounce before cutting it's clock. If debounce is
191                  * enabled but the clock is not, GPIO module seems to be unable
192                  * to detect events and generate interrupts at least on OMAP3.
193                  */
194                 writel_relaxed(0, bank->base + bank->regs->debounce_en);
195
196                 clk_disable_unprepare(bank->dbck);
197                 bank->dbck_enabled = false;
198         }
199 }
200
201 /**
202  * omap2_set_gpio_debounce - low level gpio debounce time
203  * @bank: the gpio bank we're acting upon
204  * @offset: the gpio number on this @bank
205  * @debounce: debounce time to use
206  *
207  * OMAP's debounce time is in 31us steps so we need
208  * to convert and round up to the closest unit.
209  */
210 static void omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset,
211                                     unsigned debounce)
212 {
213         void __iomem            *reg;
214         u32                     val;
215         u32                     l;
216
217         if (!bank->dbck_flag)
218                 return;
219
220         if (debounce < 32)
221                 debounce = 0x01;
222         else if (debounce > 7936)
223                 debounce = 0xff;
224         else
225                 debounce = (debounce / 0x1f) - 1;
226
227         l = BIT(offset);
228
229         clk_prepare_enable(bank->dbck);
230         reg = bank->base + bank->regs->debounce;
231         writel_relaxed(debounce, reg);
232
233         reg = bank->base + bank->regs->debounce_en;
234         val = readl_relaxed(reg);
235
236         if (debounce)
237                 val |= l;
238         else
239                 val &= ~l;
240         bank->dbck_enable_mask = val;
241
242         writel_relaxed(val, reg);
243         clk_disable_unprepare(bank->dbck);
244         /*
245          * Enable debounce clock per module.
246          * This call is mandatory because in omap_gpio_request() when
247          * *_runtime_get_sync() is called,  _gpio_dbck_enable() within
248          * runtime callbck fails to turn on dbck because dbck_enable_mask
249          * used within _gpio_dbck_enable() is still not initialized at
250          * that point. Therefore we have to enable dbck here.
251          */
252         omap_gpio_dbck_enable(bank);
253         if (bank->dbck_enable_mask) {
254                 bank->context.debounce = debounce;
255                 bank->context.debounce_en = val;
256         }
257 }
258
259 /**
260  * omap_clear_gpio_debounce - clear debounce settings for a gpio
261  * @bank: the gpio bank we're acting upon
262  * @offset: the gpio number on this @bank
263  *
264  * If a gpio is using debounce, then clear the debounce enable bit and if
265  * this is the only gpio in this bank using debounce, then clear the debounce
266  * time too. The debounce clock will also be disabled when calling this function
267  * if this is the only gpio in the bank using debounce.
268  */
269 static void omap_clear_gpio_debounce(struct gpio_bank *bank, unsigned offset)
270 {
271         u32 gpio_bit = BIT(offset);
272
273         if (!bank->dbck_flag)
274                 return;
275
276         if (!(bank->dbck_enable_mask & gpio_bit))
277                 return;
278
279         bank->dbck_enable_mask &= ~gpio_bit;
280         bank->context.debounce_en &= ~gpio_bit;
281         writel_relaxed(bank->context.debounce_en,
282                      bank->base + bank->regs->debounce_en);
283
284         if (!bank->dbck_enable_mask) {
285                 bank->context.debounce = 0;
286                 writel_relaxed(bank->context.debounce, bank->base +
287                              bank->regs->debounce);
288                 clk_disable_unprepare(bank->dbck);
289                 bank->dbck_enabled = false;
290         }
291 }
292
293 static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
294                                                 unsigned trigger)
295 {
296         void __iomem *base = bank->base;
297         u32 gpio_bit = BIT(gpio);
298
299         omap_gpio_rmw(base, bank->regs->leveldetect0, gpio_bit,
300                       trigger & IRQ_TYPE_LEVEL_LOW);
301         omap_gpio_rmw(base, bank->regs->leveldetect1, gpio_bit,
302                       trigger & IRQ_TYPE_LEVEL_HIGH);
303         omap_gpio_rmw(base, bank->regs->risingdetect, gpio_bit,
304                       trigger & IRQ_TYPE_EDGE_RISING);
305         omap_gpio_rmw(base, bank->regs->fallingdetect, gpio_bit,
306                       trigger & IRQ_TYPE_EDGE_FALLING);
307
308         bank->context.leveldetect0 =
309                         readl_relaxed(bank->base + bank->regs->leveldetect0);
310         bank->context.leveldetect1 =
311                         readl_relaxed(bank->base + bank->regs->leveldetect1);
312         bank->context.risingdetect =
313                         readl_relaxed(bank->base + bank->regs->risingdetect);
314         bank->context.fallingdetect =
315                         readl_relaxed(bank->base + bank->regs->fallingdetect);
316
317         if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
318                 omap_gpio_rmw(base, bank->regs->wkup_en, gpio_bit, trigger != 0);
319                 bank->context.wake_en =
320                         readl_relaxed(bank->base + bank->regs->wkup_en);
321         }
322
323         /* This part needs to be executed always for OMAP{34xx, 44xx} */
324         if (!bank->regs->irqctrl) {
325                 /* On omap24xx proceed only when valid GPIO bit is set */
326                 if (bank->non_wakeup_gpios) {
327                         if (!(bank->non_wakeup_gpios & gpio_bit))
328                                 goto exit;
329                 }
330
331                 /*
332                  * Log the edge gpio and manually trigger the IRQ
333                  * after resume if the input level changes
334                  * to avoid irq lost during PER RET/OFF mode
335                  * Applies for omap2 non-wakeup gpio and all omap3 gpios
336                  */
337                 if (trigger & IRQ_TYPE_EDGE_BOTH)
338                         bank->enabled_non_wakeup_gpios |= gpio_bit;
339                 else
340                         bank->enabled_non_wakeup_gpios &= ~gpio_bit;
341         }
342
343 exit:
344         bank->level_mask =
345                 readl_relaxed(bank->base + bank->regs->leveldetect0) |
346                 readl_relaxed(bank->base + bank->regs->leveldetect1);
347 }
348
349 #ifdef CONFIG_ARCH_OMAP1
350 /*
351  * This only applies to chips that can't do both rising and falling edge
352  * detection at once.  For all other chips, this function is a noop.
353  */
354 static void omap_toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio)
355 {
356         void __iomem *reg = bank->base;
357         u32 l = 0;
358
359         if (!bank->regs->irqctrl)
360                 return;
361
362         reg += bank->regs->irqctrl;
363
364         l = readl_relaxed(reg);
365         if ((l >> gpio) & 1)
366                 l &= ~(BIT(gpio));
367         else
368                 l |= BIT(gpio);
369
370         writel_relaxed(l, reg);
371 }
372 #else
373 static void omap_toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) {}
374 #endif
375
376 static int omap_set_gpio_triggering(struct gpio_bank *bank, int gpio,
377                                     unsigned trigger)
378 {
379         void __iomem *reg = bank->base;
380         void __iomem *base = bank->base;
381         u32 l = 0;
382
383         if (bank->regs->leveldetect0 && bank->regs->wkup_en) {
384                 omap_set_gpio_trigger(bank, gpio, trigger);
385         } else if (bank->regs->irqctrl) {
386                 reg += bank->regs->irqctrl;
387
388                 l = readl_relaxed(reg);
389                 if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
390                         bank->toggle_mask |= BIT(gpio);
391                 if (trigger & IRQ_TYPE_EDGE_RISING)
392                         l |= BIT(gpio);
393                 else if (trigger & IRQ_TYPE_EDGE_FALLING)
394                         l &= ~(BIT(gpio));
395                 else
396                         return -EINVAL;
397
398                 writel_relaxed(l, reg);
399         } else if (bank->regs->edgectrl1) {
400                 if (gpio & 0x08)
401                         reg += bank->regs->edgectrl2;
402                 else
403                         reg += bank->regs->edgectrl1;
404
405                 gpio &= 0x07;
406                 l = readl_relaxed(reg);
407                 l &= ~(3 << (gpio << 1));
408                 if (trigger & IRQ_TYPE_EDGE_RISING)
409                         l |= 2 << (gpio << 1);
410                 if (trigger & IRQ_TYPE_EDGE_FALLING)
411                         l |= BIT(gpio << 1);
412
413                 /* Enable wake-up during idle for dynamic tick */
414                 omap_gpio_rmw(base, bank->regs->wkup_en, BIT(gpio), trigger);
415                 bank->context.wake_en =
416                         readl_relaxed(bank->base + bank->regs->wkup_en);
417                 writel_relaxed(l, reg);
418         }
419         return 0;
420 }
421
422 static void omap_enable_gpio_module(struct gpio_bank *bank, unsigned offset)
423 {
424         if (bank->regs->pinctrl) {
425                 void __iomem *reg = bank->base + bank->regs->pinctrl;
426
427                 /* Claim the pin for MPU */
428                 writel_relaxed(readl_relaxed(reg) | (BIT(offset)), reg);
429         }
430
431         if (bank->regs->ctrl && !BANK_USED(bank)) {
432                 void __iomem *reg = bank->base + bank->regs->ctrl;
433                 u32 ctrl;
434
435                 ctrl = readl_relaxed(reg);
436                 /* Module is enabled, clocks are not gated */
437                 ctrl &= ~GPIO_MOD_CTRL_BIT;
438                 writel_relaxed(ctrl, reg);
439                 bank->context.ctrl = ctrl;
440         }
441 }
442
443 static void omap_disable_gpio_module(struct gpio_bank *bank, unsigned offset)
444 {
445         void __iomem *base = bank->base;
446
447         if (bank->regs->wkup_en &&
448             !LINE_USED(bank->mod_usage, offset) &&
449             !LINE_USED(bank->irq_usage, offset)) {
450                 /* Disable wake-up during idle for dynamic tick */
451                 omap_gpio_rmw(base, bank->regs->wkup_en, BIT(offset), 0);
452                 bank->context.wake_en =
453                         readl_relaxed(bank->base + bank->regs->wkup_en);
454         }
455
456         if (bank->regs->ctrl && !BANK_USED(bank)) {
457                 void __iomem *reg = bank->base + bank->regs->ctrl;
458                 u32 ctrl;
459
460                 ctrl = readl_relaxed(reg);
461                 /* Module is disabled, clocks are gated */
462                 ctrl |= GPIO_MOD_CTRL_BIT;
463                 writel_relaxed(ctrl, reg);
464                 bank->context.ctrl = ctrl;
465         }
466 }
467
468 static int omap_gpio_is_input(struct gpio_bank *bank, unsigned offset)
469 {
470         void __iomem *reg = bank->base + bank->regs->direction;
471
472         return readl_relaxed(reg) & BIT(offset);
473 }
474
475 static void omap_gpio_init_irq(struct gpio_bank *bank, unsigned offset)
476 {
477         if (!LINE_USED(bank->mod_usage, offset)) {
478                 omap_enable_gpio_module(bank, offset);
479                 omap_set_gpio_direction(bank, offset, 1);
480         }
481         bank->irq_usage |= BIT(offset);
482 }
483
484 static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
485 {
486         struct gpio_bank *bank = omap_irq_data_get_bank(d);
487         int retval;
488         unsigned long flags;
489         unsigned offset = d->hwirq;
490
491         if (type & ~IRQ_TYPE_SENSE_MASK)
492                 return -EINVAL;
493
494         if (!bank->regs->leveldetect0 &&
495                 (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)))
496                 return -EINVAL;
497
498         if (!BANK_USED(bank))
499                 pm_runtime_get_sync(bank->dev);
500
501         spin_lock_irqsave(&bank->lock, flags);
502         retval = omap_set_gpio_triggering(bank, offset, type);
503         if (retval)
504                 goto error;
505         omap_gpio_init_irq(bank, offset);
506         if (!omap_gpio_is_input(bank, offset)) {
507                 spin_unlock_irqrestore(&bank->lock, flags);
508                 retval = -EINVAL;
509                 goto error;
510         }
511         spin_unlock_irqrestore(&bank->lock, flags);
512
513         if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
514                 __irq_set_handler_locked(d->irq, handle_level_irq);
515         else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
516                 __irq_set_handler_locked(d->irq, handle_edge_irq);
517
518         return 0;
519
520 error:
521         if (!BANK_USED(bank))
522                 pm_runtime_put(bank->dev);
523         return retval;
524 }
525
526 static void omap_clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
527 {
528         void __iomem *reg = bank->base;
529
530         reg += bank->regs->irqstatus;
531         writel_relaxed(gpio_mask, reg);
532
533         /* Workaround for clearing DSP GPIO interrupts to allow retention */
534         if (bank->regs->irqstatus2) {
535                 reg = bank->base + bank->regs->irqstatus2;
536                 writel_relaxed(gpio_mask, reg);
537         }
538
539         /* Flush posted write for the irq status to avoid spurious interrupts */
540         readl_relaxed(reg);
541 }
542
543 static inline void omap_clear_gpio_irqstatus(struct gpio_bank *bank,
544                                              unsigned offset)
545 {
546         omap_clear_gpio_irqbank(bank, BIT(offset));
547 }
548
549 static u32 omap_get_gpio_irqbank_mask(struct gpio_bank *bank)
550 {
551         void __iomem *reg = bank->base;
552         u32 l;
553         u32 mask = (BIT(bank->width)) - 1;
554
555         reg += bank->regs->irqenable;
556         l = readl_relaxed(reg);
557         if (bank->regs->irqenable_inv)
558                 l = ~l;
559         l &= mask;
560         return l;
561 }
562
563 static void omap_enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
564 {
565         void __iomem *reg = bank->base;
566         u32 l;
567
568         if (bank->regs->set_irqenable) {
569                 reg += bank->regs->set_irqenable;
570                 l = gpio_mask;
571                 bank->context.irqenable1 |= gpio_mask;
572         } else {
573                 reg += bank->regs->irqenable;
574                 l = readl_relaxed(reg);
575                 if (bank->regs->irqenable_inv)
576                         l &= ~gpio_mask;
577                 else
578                         l |= gpio_mask;
579                 bank->context.irqenable1 = l;
580         }
581
582         writel_relaxed(l, reg);
583 }
584
585 static void omap_disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
586 {
587         void __iomem *reg = bank->base;
588         u32 l;
589
590         if (bank->regs->clr_irqenable) {
591                 reg += bank->regs->clr_irqenable;
592                 l = gpio_mask;
593                 bank->context.irqenable1 &= ~gpio_mask;
594         } else {
595                 reg += bank->regs->irqenable;
596                 l = readl_relaxed(reg);
597                 if (bank->regs->irqenable_inv)
598                         l |= gpio_mask;
599                 else
600                         l &= ~gpio_mask;
601                 bank->context.irqenable1 = l;
602         }
603
604         writel_relaxed(l, reg);
605 }
606
607 static inline void omap_set_gpio_irqenable(struct gpio_bank *bank,
608                                            unsigned offset, int enable)
609 {
610         if (enable)
611                 omap_enable_gpio_irqbank(bank, BIT(offset));
612         else
613                 omap_disable_gpio_irqbank(bank, BIT(offset));
614 }
615
616 /*
617  * Note that ENAWAKEUP needs to be enabled in GPIO_SYSCONFIG register.
618  * 1510 does not seem to have a wake-up register. If JTAG is connected
619  * to the target, system will wake up always on GPIO events. While
620  * system is running all registered GPIO interrupts need to have wake-up
621  * enabled. When system is suspended, only selected GPIO interrupts need
622  * to have wake-up enabled.
623  */
624 static int omap_set_gpio_wakeup(struct gpio_bank *bank, unsigned offset,
625                                 int enable)
626 {
627         u32 gpio_bit = BIT(offset);
628         unsigned long flags;
629
630         if (bank->non_wakeup_gpios & gpio_bit) {
631                 dev_err(bank->dev,
632                         "Unable to modify wakeup on non-wakeup GPIO%d\n",
633                         offset);
634                 return -EINVAL;
635         }
636
637         spin_lock_irqsave(&bank->lock, flags);
638         if (enable)
639                 bank->context.wake_en |= gpio_bit;
640         else
641                 bank->context.wake_en &= ~gpio_bit;
642
643         writel_relaxed(bank->context.wake_en, bank->base + bank->regs->wkup_en);
644         spin_unlock_irqrestore(&bank->lock, flags);
645
646         return 0;
647 }
648
649 /* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
650 static int omap_gpio_wake_enable(struct irq_data *d, unsigned int enable)
651 {
652         struct gpio_bank *bank = omap_irq_data_get_bank(d);
653         unsigned offset = d->hwirq;
654
655         return omap_set_gpio_wakeup(bank, offset, enable);
656 }
657
658 static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
659 {
660         struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
661         unsigned long flags;
662
663         /*
664          * If this is the first gpio_request for the bank,
665          * enable the bank module.
666          */
667         if (!BANK_USED(bank))
668                 pm_runtime_get_sync(bank->dev);
669
670         spin_lock_irqsave(&bank->lock, flags);
671         /* Set trigger to none. You need to enable the desired trigger with
672          * request_irq() or set_irq_type(). Only do this if the IRQ line has
673          * not already been requested.
674          */
675         if (!LINE_USED(bank->irq_usage, offset)) {
676                 omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
677                 omap_enable_gpio_module(bank, offset);
678         }
679         bank->mod_usage |= BIT(offset);
680         spin_unlock_irqrestore(&bank->lock, flags);
681
682         return 0;
683 }
684
685 static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
686 {
687         struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
688         unsigned long flags;
689
690         spin_lock_irqsave(&bank->lock, flags);
691         bank->mod_usage &= ~(BIT(offset));
692         if (!LINE_USED(bank->irq_usage, offset)) {
693                 omap_set_gpio_direction(bank, offset, 1);
694                 omap_clear_gpio_debounce(bank, offset);
695         }
696         omap_disable_gpio_module(bank, offset);
697         spin_unlock_irqrestore(&bank->lock, flags);
698
699         /*
700          * If this is the last gpio to be freed in the bank,
701          * disable the bank module.
702          */
703         if (!BANK_USED(bank))
704                 pm_runtime_put(bank->dev);
705 }
706
707 /*
708  * We need to unmask the GPIO bank interrupt as soon as possible to
709  * avoid missing GPIO interrupts for other lines in the bank.
710  * Then we need to mask-read-clear-unmask the triggered GPIO lines
711  * in the bank to avoid missing nested interrupts for a GPIO line.
712  * If we wait to unmask individual GPIO lines in the bank after the
713  * line's interrupt handler has been run, we may miss some nested
714  * interrupts.
715  */
716 static void omap_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
717 {
718         void __iomem *isr_reg = NULL;
719         u32 isr;
720         unsigned int bit;
721         struct gpio_bank *bank;
722         int unmasked = 0;
723         struct irq_chip *irqchip = irq_desc_get_chip(desc);
724         struct gpio_chip *chip = irq_get_handler_data(irq);
725
726         chained_irq_enter(irqchip, desc);
727
728         bank = container_of(chip, struct gpio_bank, chip);
729         isr_reg = bank->base + bank->regs->irqstatus;
730         pm_runtime_get_sync(bank->dev);
731
732         if (WARN_ON(!isr_reg))
733                 goto exit;
734
735         while (1) {
736                 u32 isr_saved, level_mask = 0;
737                 u32 enabled;
738
739                 enabled = omap_get_gpio_irqbank_mask(bank);
740                 isr_saved = isr = readl_relaxed(isr_reg) & enabled;
741
742                 if (bank->level_mask)
743                         level_mask = bank->level_mask & enabled;
744
745                 /* clear edge sensitive interrupts before handler(s) are
746                 called so that we don't miss any interrupt occurred while
747                 executing them */
748                 omap_disable_gpio_irqbank(bank, isr_saved & ~level_mask);
749                 omap_clear_gpio_irqbank(bank, isr_saved & ~level_mask);
750                 omap_enable_gpio_irqbank(bank, isr_saved & ~level_mask);
751
752                 /* if there is only edge sensitive GPIO pin interrupts
753                 configured, we could unmask GPIO bank interrupt immediately */
754                 if (!level_mask && !unmasked) {
755                         unmasked = 1;
756                         chained_irq_exit(irqchip, desc);
757                 }
758
759                 if (!isr)
760                         break;
761
762                 while (isr) {
763                         bit = __ffs(isr);
764                         isr &= ~(BIT(bit));
765
766                         /*
767                          * Some chips can't respond to both rising and falling
768                          * at the same time.  If this irq was requested with
769                          * both flags, we need to flip the ICR data for the IRQ
770                          * to respond to the IRQ for the opposite direction.
771                          * This will be indicated in the bank toggle_mask.
772                          */
773                         if (bank->toggle_mask & (BIT(bit)))
774                                 omap_toggle_gpio_edge_triggering(bank, bit);
775
776                         generic_handle_irq(irq_find_mapping(bank->chip.irqdomain,
777                                                             bit));
778                 }
779         }
780         /* if bank has any level sensitive GPIO pin interrupt
781         configured, we must unmask the bank interrupt only after
782         handler(s) are executed in order to avoid spurious bank
783         interrupt */
784 exit:
785         if (!unmasked)
786                 chained_irq_exit(irqchip, desc);
787         pm_runtime_put(bank->dev);
788 }
789
790 static unsigned int omap_gpio_irq_startup(struct irq_data *d)
791 {
792         struct gpio_bank *bank = omap_irq_data_get_bank(d);
793         unsigned long flags;
794         unsigned offset = d->hwirq;
795
796         if (!BANK_USED(bank))
797                 pm_runtime_get_sync(bank->dev);
798
799         spin_lock_irqsave(&bank->lock, flags);
800         omap_gpio_init_irq(bank, offset);
801         spin_unlock_irqrestore(&bank->lock, flags);
802         omap_gpio_unmask_irq(d);
803
804         return 0;
805 }
806
807 static void omap_gpio_irq_shutdown(struct irq_data *d)
808 {
809         struct gpio_bank *bank = omap_irq_data_get_bank(d);
810         unsigned long flags;
811         unsigned offset = d->hwirq;
812
813         spin_lock_irqsave(&bank->lock, flags);
814         bank->irq_usage &= ~(BIT(offset));
815         omap_set_gpio_irqenable(bank, offset, 0);
816         omap_clear_gpio_irqstatus(bank, offset);
817         omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
818         if (!LINE_USED(bank->mod_usage, offset))
819                 omap_clear_gpio_debounce(bank, offset);
820         omap_disable_gpio_module(bank, offset);
821         spin_unlock_irqrestore(&bank->lock, flags);
822
823         /*
824          * If this is the last IRQ to be freed in the bank,
825          * disable the bank module.
826          */
827         if (!BANK_USED(bank))
828                 pm_runtime_put(bank->dev);
829 }
830
831 static void omap_gpio_ack_irq(struct irq_data *d)
832 {
833         struct gpio_bank *bank = omap_irq_data_get_bank(d);
834         unsigned offset = d->hwirq;
835
836         omap_clear_gpio_irqstatus(bank, offset);
837 }
838
839 static void omap_gpio_mask_irq(struct irq_data *d)
840 {
841         struct gpio_bank *bank = omap_irq_data_get_bank(d);
842         unsigned offset = d->hwirq;
843         unsigned long flags;
844
845         spin_lock_irqsave(&bank->lock, flags);
846         omap_set_gpio_irqenable(bank, offset, 0);
847         omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
848         spin_unlock_irqrestore(&bank->lock, flags);
849 }
850
851 static void omap_gpio_unmask_irq(struct irq_data *d)
852 {
853         struct gpio_bank *bank = omap_irq_data_get_bank(d);
854         unsigned offset = d->hwirq;
855         u32 trigger = irqd_get_trigger_type(d);
856         unsigned long flags;
857
858         spin_lock_irqsave(&bank->lock, flags);
859         if (trigger)
860                 omap_set_gpio_triggering(bank, offset, trigger);
861
862         /* For level-triggered GPIOs, the clearing must be done after
863          * the HW source is cleared, thus after the handler has run */
864         if (bank->level_mask & BIT(offset)) {
865                 omap_set_gpio_irqenable(bank, offset, 0);
866                 omap_clear_gpio_irqstatus(bank, offset);
867         }
868
869         omap_set_gpio_irqenable(bank, offset, 1);
870         spin_unlock_irqrestore(&bank->lock, flags);
871 }
872
873 /*---------------------------------------------------------------------*/
874
875 static int omap_mpuio_suspend_noirq(struct device *dev)
876 {
877         struct platform_device *pdev = to_platform_device(dev);
878         struct gpio_bank        *bank = platform_get_drvdata(pdev);
879         void __iomem            *mask_reg = bank->base +
880                                         OMAP_MPUIO_GPIO_MASKIT / bank->stride;
881         unsigned long           flags;
882
883         spin_lock_irqsave(&bank->lock, flags);
884         writel_relaxed(0xffff & ~bank->context.wake_en, mask_reg);
885         spin_unlock_irqrestore(&bank->lock, flags);
886
887         return 0;
888 }
889
890 static int omap_mpuio_resume_noirq(struct device *dev)
891 {
892         struct platform_device *pdev = to_platform_device(dev);
893         struct gpio_bank        *bank = platform_get_drvdata(pdev);
894         void __iomem            *mask_reg = bank->base +
895                                         OMAP_MPUIO_GPIO_MASKIT / bank->stride;
896         unsigned long           flags;
897
898         spin_lock_irqsave(&bank->lock, flags);
899         writel_relaxed(bank->context.wake_en, mask_reg);
900         spin_unlock_irqrestore(&bank->lock, flags);
901
902         return 0;
903 }
904
905 static const struct dev_pm_ops omap_mpuio_dev_pm_ops = {
906         .suspend_noirq = omap_mpuio_suspend_noirq,
907         .resume_noirq = omap_mpuio_resume_noirq,
908 };
909
910 /* use platform_driver for this. */
911 static struct platform_driver omap_mpuio_driver = {
912         .driver         = {
913                 .name   = "mpuio",
914                 .pm     = &omap_mpuio_dev_pm_ops,
915         },
916 };
917
918 static struct platform_device omap_mpuio_device = {
919         .name           = "mpuio",
920         .id             = -1,
921         .dev = {
922                 .driver = &omap_mpuio_driver.driver,
923         }
924         /* could list the /proc/iomem resources */
925 };
926
927 static inline void omap_mpuio_init(struct gpio_bank *bank)
928 {
929         platform_set_drvdata(&omap_mpuio_device, bank);
930
931         if (platform_driver_register(&omap_mpuio_driver) == 0)
932                 (void) platform_device_register(&omap_mpuio_device);
933 }
934
935 /*---------------------------------------------------------------------*/
936
937 static int omap_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
938 {
939         struct gpio_bank *bank;
940         unsigned long flags;
941         void __iomem *reg;
942         int dir;
943
944         bank = container_of(chip, struct gpio_bank, chip);
945         reg = bank->base + bank->regs->direction;
946         spin_lock_irqsave(&bank->lock, flags);
947         dir = !!(readl_relaxed(reg) & BIT(offset));
948         spin_unlock_irqrestore(&bank->lock, flags);
949         return dir;
950 }
951
952 static int omap_gpio_input(struct gpio_chip *chip, unsigned offset)
953 {
954         struct gpio_bank *bank;
955         unsigned long flags;
956
957         bank = container_of(chip, struct gpio_bank, chip);
958         spin_lock_irqsave(&bank->lock, flags);
959         omap_set_gpio_direction(bank, offset, 1);
960         spin_unlock_irqrestore(&bank->lock, flags);
961         return 0;
962 }
963
964 static int omap_gpio_get(struct gpio_chip *chip, unsigned offset)
965 {
966         struct gpio_bank *bank;
967
968         bank = container_of(chip, struct gpio_bank, chip);
969
970         if (omap_gpio_is_input(bank, offset))
971                 return omap_get_gpio_datain(bank, offset);
972         else
973                 return omap_get_gpio_dataout(bank, offset);
974 }
975
976 static int omap_gpio_output(struct gpio_chip *chip, unsigned offset, int value)
977 {
978         struct gpio_bank *bank;
979         unsigned long flags;
980
981         bank = container_of(chip, struct gpio_bank, chip);
982         spin_lock_irqsave(&bank->lock, flags);
983         bank->set_dataout(bank, offset, value);
984         omap_set_gpio_direction(bank, offset, 0);
985         spin_unlock_irqrestore(&bank->lock, flags);
986         return 0;
987 }
988
989 static int omap_gpio_debounce(struct gpio_chip *chip, unsigned offset,
990                               unsigned debounce)
991 {
992         struct gpio_bank *bank;
993         unsigned long flags;
994
995         bank = container_of(chip, struct gpio_bank, chip);
996
997         spin_lock_irqsave(&bank->lock, flags);
998         omap2_set_gpio_debounce(bank, offset, debounce);
999         spin_unlock_irqrestore(&bank->lock, flags);
1000
1001         return 0;
1002 }
1003
1004 static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
1005 {
1006         struct gpio_bank *bank;
1007         unsigned long flags;
1008
1009         bank = container_of(chip, struct gpio_bank, chip);
1010         spin_lock_irqsave(&bank->lock, flags);
1011         bank->set_dataout(bank, offset, value);
1012         spin_unlock_irqrestore(&bank->lock, flags);
1013 }
1014
1015 /*---------------------------------------------------------------------*/
1016
1017 static void __init omap_gpio_show_rev(struct gpio_bank *bank)
1018 {
1019         static bool called;
1020         u32 rev;
1021
1022         if (called || bank->regs->revision == USHRT_MAX)
1023                 return;
1024
1025         rev = readw_relaxed(bank->base + bank->regs->revision);
1026         pr_info("OMAP GPIO hardware version %d.%d\n",
1027                 (rev >> 4) & 0x0f, rev & 0x0f);
1028
1029         called = true;
1030 }
1031
1032 static void omap_gpio_mod_init(struct gpio_bank *bank)
1033 {
1034         void __iomem *base = bank->base;
1035         u32 l = 0xffffffff;
1036
1037         if (bank->width == 16)
1038                 l = 0xffff;
1039
1040         if (bank->is_mpuio) {
1041                 writel_relaxed(l, bank->base + bank->regs->irqenable);
1042                 return;
1043         }
1044
1045         omap_gpio_rmw(base, bank->regs->irqenable, l,
1046                       bank->regs->irqenable_inv);
1047         omap_gpio_rmw(base, bank->regs->irqstatus, l,
1048                       !bank->regs->irqenable_inv);
1049         if (bank->regs->debounce_en)
1050                 writel_relaxed(0, base + bank->regs->debounce_en);
1051
1052         /* Save OE default value (0xffffffff) in the context */
1053         bank->context.oe = readl_relaxed(bank->base + bank->regs->direction);
1054          /* Initialize interface clk ungated, module enabled */
1055         if (bank->regs->ctrl)
1056                 writel_relaxed(0, base + bank->regs->ctrl);
1057
1058         bank->dbck = clk_get(bank->dev, "dbclk");
1059         if (IS_ERR(bank->dbck))
1060                 dev_err(bank->dev, "Could not get gpio dbck\n");
1061 }
1062
1063 static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
1064 {
1065         static int gpio;
1066         int irq_base = 0;
1067         int ret;
1068
1069         /*
1070          * REVISIT eventually switch from OMAP-specific gpio structs
1071          * over to the generic ones
1072          */
1073         bank->chip.request = omap_gpio_request;
1074         bank->chip.free = omap_gpio_free;
1075         bank->chip.get_direction = omap_gpio_get_direction;
1076         bank->chip.direction_input = omap_gpio_input;
1077         bank->chip.get = omap_gpio_get;
1078         bank->chip.direction_output = omap_gpio_output;
1079         bank->chip.set_debounce = omap_gpio_debounce;
1080         bank->chip.set = omap_gpio_set;
1081         if (bank->is_mpuio) {
1082                 bank->chip.label = "mpuio";
1083                 if (bank->regs->wkup_en)
1084                         bank->chip.dev = &omap_mpuio_device.dev;
1085                 bank->chip.base = OMAP_MPUIO(0);
1086         } else {
1087                 bank->chip.label = "gpio";
1088                 bank->chip.base = gpio;
1089                 gpio += bank->width;
1090         }
1091         bank->chip.ngpio = bank->width;
1092
1093         ret = gpiochip_add(&bank->chip);
1094         if (ret) {
1095                 dev_err(bank->dev, "Could not register gpio chip %d\n", ret);
1096                 return ret;
1097         }
1098
1099 #ifdef CONFIG_ARCH_OMAP1
1100         /*
1101          * REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop
1102          * irq_alloc_descs() since a base IRQ offset will no longer be needed.
1103          */
1104         irq_base = irq_alloc_descs(-1, 0, bank->width, 0);
1105         if (irq_base < 0) {
1106                 dev_err(bank->dev, "Couldn't allocate IRQ numbers\n");
1107                 return -ENODEV;
1108         }
1109 #endif
1110
1111         /* MPUIO is a bit different, reading IRQ status clears it */
1112         if (bank->is_mpuio) {
1113                 irqc->irq_ack = dummy_irq_chip.irq_ack;
1114                 irqc->irq_mask = irq_gc_mask_set_bit;
1115                 irqc->irq_unmask = irq_gc_mask_clr_bit;
1116                 if (!bank->regs->wkup_en)
1117                         irqc->irq_set_wake = NULL;
1118         }
1119
1120         ret = gpiochip_irqchip_add(&bank->chip, irqc,
1121                                    irq_base, omap_gpio_irq_handler,
1122                                    IRQ_TYPE_NONE);
1123
1124         if (ret) {
1125                 dev_err(bank->dev, "Couldn't add irqchip to gpiochip %d\n", ret);
1126                 gpiochip_remove(&bank->chip);
1127                 return -ENODEV;
1128         }
1129
1130         gpiochip_set_chained_irqchip(&bank->chip, irqc,
1131                                      bank->irq, omap_gpio_irq_handler);
1132
1133         return 0;
1134 }
1135
1136 static const struct of_device_id omap_gpio_match[];
1137
1138 static int omap_gpio_probe(struct platform_device *pdev)
1139 {
1140         struct device *dev = &pdev->dev;
1141         struct device_node *node = dev->of_node;
1142         const struct of_device_id *match;
1143         const struct omap_gpio_platform_data *pdata;
1144         struct resource *res;
1145         struct gpio_bank *bank;
1146         struct irq_chip *irqc;
1147         int ret;
1148
1149         match = of_match_device(of_match_ptr(omap_gpio_match), dev);
1150
1151         pdata = match ? match->data : dev_get_platdata(dev);
1152         if (!pdata)
1153                 return -EINVAL;
1154
1155         bank = devm_kzalloc(dev, sizeof(struct gpio_bank), GFP_KERNEL);
1156         if (!bank) {
1157                 dev_err(dev, "Memory alloc failed\n");
1158                 return -ENOMEM;
1159         }
1160
1161         irqc = devm_kzalloc(dev, sizeof(*irqc), GFP_KERNEL);
1162         if (!irqc)
1163                 return -ENOMEM;
1164
1165         irqc->irq_startup = omap_gpio_irq_startup,
1166         irqc->irq_shutdown = omap_gpio_irq_shutdown,
1167         irqc->irq_ack = omap_gpio_ack_irq,
1168         irqc->irq_mask = omap_gpio_mask_irq,
1169         irqc->irq_unmask = omap_gpio_unmask_irq,
1170         irqc->irq_set_type = omap_gpio_irq_type,
1171         irqc->irq_set_wake = omap_gpio_wake_enable,
1172         irqc->name = dev_name(&pdev->dev);
1173
1174         res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1175         if (unlikely(!res)) {
1176                 dev_err(dev, "Invalid IRQ resource\n");
1177                 return -ENODEV;
1178         }
1179
1180         bank->irq = res->start;
1181         bank->dev = dev;
1182         bank->chip.dev = dev;
1183         bank->dbck_flag = pdata->dbck_flag;
1184         bank->stride = pdata->bank_stride;
1185         bank->width = pdata->bank_width;
1186         bank->is_mpuio = pdata->is_mpuio;
1187         bank->non_wakeup_gpios = pdata->non_wakeup_gpios;
1188         bank->regs = pdata->regs;
1189 #ifdef CONFIG_OF_GPIO
1190         bank->chip.of_node = of_node_get(node);
1191 #endif
1192         if (node) {
1193                 if (!of_property_read_bool(node, "ti,gpio-always-on"))
1194                         bank->loses_context = true;
1195         } else {
1196                 bank->loses_context = pdata->loses_context;
1197
1198                 if (bank->loses_context)
1199                         bank->get_context_loss_count =
1200                                 pdata->get_context_loss_count;
1201         }
1202
1203         if (bank->regs->set_dataout && bank->regs->clr_dataout)
1204                 bank->set_dataout = omap_set_gpio_dataout_reg;
1205         else
1206                 bank->set_dataout = omap_set_gpio_dataout_mask;
1207
1208         spin_lock_init(&bank->lock);
1209
1210         /* Static mapping, never released */
1211         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1212         bank->base = devm_ioremap_resource(dev, res);
1213         if (IS_ERR(bank->base)) {
1214                 irq_domain_remove(bank->chip.irqdomain);
1215                 return PTR_ERR(bank->base);
1216         }
1217
1218         platform_set_drvdata(pdev, bank);
1219
1220         pm_runtime_enable(bank->dev);
1221         pm_runtime_irq_safe(bank->dev);
1222         pm_runtime_get_sync(bank->dev);
1223
1224         if (bank->is_mpuio)
1225                 omap_mpuio_init(bank);
1226
1227         omap_gpio_mod_init(bank);
1228
1229         ret = omap_gpio_chip_init(bank, irqc);
1230         if (ret)
1231                 return ret;
1232
1233         omap_gpio_show_rev(bank);
1234
1235         pm_runtime_put(bank->dev);
1236
1237         list_add_tail(&bank->node, &omap_gpio_list);
1238
1239         return 0;
1240 }
1241
1242 static int omap_gpio_remove(struct platform_device *pdev)
1243 {
1244         struct gpio_bank *bank = platform_get_drvdata(pdev);
1245
1246         list_del(&bank->node);
1247         gpiochip_remove(&bank->chip);
1248         pm_runtime_disable(bank->dev);
1249
1250         return 0;
1251 }
1252
1253 #ifdef CONFIG_ARCH_OMAP2PLUS
1254
1255 #if defined(CONFIG_PM)
1256 static void omap_gpio_restore_context(struct gpio_bank *bank);
1257
1258 static int omap_gpio_runtime_suspend(struct device *dev)
1259 {
1260         struct platform_device *pdev = to_platform_device(dev);
1261         struct gpio_bank *bank = platform_get_drvdata(pdev);
1262         u32 l1 = 0, l2 = 0;
1263         unsigned long flags;
1264         u32 wake_low, wake_hi;
1265
1266         spin_lock_irqsave(&bank->lock, flags);
1267
1268         /*
1269          * Only edges can generate a wakeup event to the PRCM.
1270          *
1271          * Therefore, ensure any wake-up capable GPIOs have
1272          * edge-detection enabled before going idle to ensure a wakeup
1273          * to the PRCM is generated on a GPIO transition. (c.f. 34xx
1274          * NDA TRM 25.5.3.1)
1275          *
1276          * The normal values will be restored upon ->runtime_resume()
1277          * by writing back the values saved in bank->context.
1278          */
1279         wake_low = bank->context.leveldetect0 & bank->context.wake_en;
1280         if (wake_low)
1281                 writel_relaxed(wake_low | bank->context.fallingdetect,
1282                              bank->base + bank->regs->fallingdetect);
1283         wake_hi = bank->context.leveldetect1 & bank->context.wake_en;
1284         if (wake_hi)
1285                 writel_relaxed(wake_hi | bank->context.risingdetect,
1286                              bank->base + bank->regs->risingdetect);
1287
1288         if (!bank->enabled_non_wakeup_gpios)
1289                 goto update_gpio_context_count;
1290
1291         if (bank->power_mode != OFF_MODE) {
1292                 bank->power_mode = 0;
1293                 goto update_gpio_context_count;
1294         }
1295         /*
1296          * If going to OFF, remove triggering for all
1297          * non-wakeup GPIOs.  Otherwise spurious IRQs will be
1298          * generated.  See OMAP2420 Errata item 1.101.
1299          */
1300         bank->saved_datain = readl_relaxed(bank->base +
1301                                                 bank->regs->datain);
1302         l1 = bank->context.fallingdetect;
1303         l2 = bank->context.risingdetect;
1304
1305         l1 &= ~bank->enabled_non_wakeup_gpios;
1306         l2 &= ~bank->enabled_non_wakeup_gpios;
1307
1308         writel_relaxed(l1, bank->base + bank->regs->fallingdetect);
1309         writel_relaxed(l2, bank->base + bank->regs->risingdetect);
1310
1311         bank->workaround_enabled = true;
1312
1313 update_gpio_context_count:
1314         if (bank->get_context_loss_count)
1315                 bank->context_loss_count =
1316                                 bank->get_context_loss_count(bank->dev);
1317
1318         omap_gpio_dbck_disable(bank);
1319         spin_unlock_irqrestore(&bank->lock, flags);
1320
1321         return 0;
1322 }
1323
1324 static void omap_gpio_init_context(struct gpio_bank *p);
1325
1326 static int omap_gpio_runtime_resume(struct device *dev)
1327 {
1328         struct platform_device *pdev = to_platform_device(dev);
1329         struct gpio_bank *bank = platform_get_drvdata(pdev);
1330         u32 l = 0, gen, gen0, gen1;
1331         unsigned long flags;
1332         int c;
1333
1334         spin_lock_irqsave(&bank->lock, flags);
1335
1336         /*
1337          * On the first resume during the probe, the context has not
1338          * been initialised and so initialise it now. Also initialise
1339          * the context loss count.
1340          */
1341         if (bank->loses_context && !bank->context_valid) {
1342                 omap_gpio_init_context(bank);
1343
1344                 if (bank->get_context_loss_count)
1345                         bank->context_loss_count =
1346                                 bank->get_context_loss_count(bank->dev);
1347         }
1348
1349         omap_gpio_dbck_enable(bank);
1350
1351         /*
1352          * In ->runtime_suspend(), level-triggered, wakeup-enabled
1353          * GPIOs were set to edge trigger also in order to be able to
1354          * generate a PRCM wakeup.  Here we restore the
1355          * pre-runtime_suspend() values for edge triggering.
1356          */
1357         writel_relaxed(bank->context.fallingdetect,
1358                      bank->base + bank->regs->fallingdetect);
1359         writel_relaxed(bank->context.risingdetect,
1360                      bank->base + bank->regs->risingdetect);
1361
1362         if (bank->loses_context) {
1363                 if (!bank->get_context_loss_count) {
1364                         omap_gpio_restore_context(bank);
1365                 } else {
1366                         c = bank->get_context_loss_count(bank->dev);
1367                         if (c != bank->context_loss_count) {
1368                                 omap_gpio_restore_context(bank);
1369                         } else {
1370                                 spin_unlock_irqrestore(&bank->lock, flags);
1371                                 return 0;
1372                         }
1373                 }
1374         }
1375
1376         if (!bank->workaround_enabled) {
1377                 spin_unlock_irqrestore(&bank->lock, flags);
1378                 return 0;
1379         }
1380
1381         l = readl_relaxed(bank->base + bank->regs->datain);
1382
1383         /*
1384          * Check if any of the non-wakeup interrupt GPIOs have changed
1385          * state.  If so, generate an IRQ by software.  This is
1386          * horribly racy, but it's the best we can do to work around
1387          * this silicon bug.
1388          */
1389         l ^= bank->saved_datain;
1390         l &= bank->enabled_non_wakeup_gpios;
1391
1392         /*
1393          * No need to generate IRQs for the rising edge for gpio IRQs
1394          * configured with falling edge only; and vice versa.
1395          */
1396         gen0 = l & bank->context.fallingdetect;
1397         gen0 &= bank->saved_datain;
1398
1399         gen1 = l & bank->context.risingdetect;
1400         gen1 &= ~(bank->saved_datain);
1401
1402         /* FIXME: Consider GPIO IRQs with level detections properly! */
1403         gen = l & (~(bank->context.fallingdetect) &
1404                                          ~(bank->context.risingdetect));
1405         /* Consider all GPIO IRQs needed to be updated */
1406         gen |= gen0 | gen1;
1407
1408         if (gen) {
1409                 u32 old0, old1;
1410
1411                 old0 = readl_relaxed(bank->base + bank->regs->leveldetect0);
1412                 old1 = readl_relaxed(bank->base + bank->regs->leveldetect1);
1413
1414                 if (!bank->regs->irqstatus_raw0) {
1415                         writel_relaxed(old0 | gen, bank->base +
1416                                                 bank->regs->leveldetect0);
1417                         writel_relaxed(old1 | gen, bank->base +
1418                                                 bank->regs->leveldetect1);
1419                 }
1420
1421                 if (bank->regs->irqstatus_raw0) {
1422                         writel_relaxed(old0 | l, bank->base +
1423                                                 bank->regs->leveldetect0);
1424                         writel_relaxed(old1 | l, bank->base +
1425                                                 bank->regs->leveldetect1);
1426                 }
1427                 writel_relaxed(old0, bank->base + bank->regs->leveldetect0);
1428                 writel_relaxed(old1, bank->base + bank->regs->leveldetect1);
1429         }
1430
1431         bank->workaround_enabled = false;
1432         spin_unlock_irqrestore(&bank->lock, flags);
1433
1434         return 0;
1435 }
1436 #endif /* CONFIG_PM */
1437
1438 #if IS_BUILTIN(CONFIG_GPIO_OMAP)
1439 void omap2_gpio_prepare_for_idle(int pwr_mode)
1440 {
1441         struct gpio_bank *bank;
1442
1443         list_for_each_entry(bank, &omap_gpio_list, node) {
1444                 if (!BANK_USED(bank) || !bank->loses_context)
1445                         continue;
1446
1447                 bank->power_mode = pwr_mode;
1448
1449                 pm_runtime_put_sync_suspend(bank->dev);
1450         }
1451 }
1452
1453 void omap2_gpio_resume_after_idle(void)
1454 {
1455         struct gpio_bank *bank;
1456
1457         list_for_each_entry(bank, &omap_gpio_list, node) {
1458                 if (!BANK_USED(bank) || !bank->loses_context)
1459                         continue;
1460
1461                 pm_runtime_get_sync(bank->dev);
1462         }
1463 }
1464 #endif
1465
1466 #if defined(CONFIG_PM)
1467 static void omap_gpio_init_context(struct gpio_bank *p)
1468 {
1469         struct omap_gpio_reg_offs *regs = p->regs;
1470         void __iomem *base = p->base;
1471
1472         p->context.ctrl         = readl_relaxed(base + regs->ctrl);
1473         p->context.oe           = readl_relaxed(base + regs->direction);
1474         p->context.wake_en      = readl_relaxed(base + regs->wkup_en);
1475         p->context.leveldetect0 = readl_relaxed(base + regs->leveldetect0);
1476         p->context.leveldetect1 = readl_relaxed(base + regs->leveldetect1);
1477         p->context.risingdetect = readl_relaxed(base + regs->risingdetect);
1478         p->context.fallingdetect = readl_relaxed(base + regs->fallingdetect);
1479         p->context.irqenable1   = readl_relaxed(base + regs->irqenable);
1480         p->context.irqenable2   = readl_relaxed(base + regs->irqenable2);
1481
1482         if (regs->set_dataout && p->regs->clr_dataout)
1483                 p->context.dataout = readl_relaxed(base + regs->set_dataout);
1484         else
1485                 p->context.dataout = readl_relaxed(base + regs->dataout);
1486
1487         p->context_valid = true;
1488 }
1489
1490 static void omap_gpio_restore_context(struct gpio_bank *bank)
1491 {
1492         writel_relaxed(bank->context.wake_en,
1493                                 bank->base + bank->regs->wkup_en);
1494         writel_relaxed(bank->context.ctrl, bank->base + bank->regs->ctrl);
1495         writel_relaxed(bank->context.leveldetect0,
1496                                 bank->base + bank->regs->leveldetect0);
1497         writel_relaxed(bank->context.leveldetect1,
1498                                 bank->base + bank->regs->leveldetect1);
1499         writel_relaxed(bank->context.risingdetect,
1500                                 bank->base + bank->regs->risingdetect);
1501         writel_relaxed(bank->context.fallingdetect,
1502                                 bank->base + bank->regs->fallingdetect);
1503         if (bank->regs->set_dataout && bank->regs->clr_dataout)
1504                 writel_relaxed(bank->context.dataout,
1505                                 bank->base + bank->regs->set_dataout);
1506         else
1507                 writel_relaxed(bank->context.dataout,
1508                                 bank->base + bank->regs->dataout);
1509         writel_relaxed(bank->context.oe, bank->base + bank->regs->direction);
1510
1511         if (bank->dbck_enable_mask) {
1512                 writel_relaxed(bank->context.debounce, bank->base +
1513                                         bank->regs->debounce);
1514                 writel_relaxed(bank->context.debounce_en,
1515                                         bank->base + bank->regs->debounce_en);
1516         }
1517
1518         writel_relaxed(bank->context.irqenable1,
1519                                 bank->base + bank->regs->irqenable);
1520         writel_relaxed(bank->context.irqenable2,
1521                                 bank->base + bank->regs->irqenable2);
1522 }
1523 #endif /* CONFIG_PM */
1524 #else
1525 #define omap_gpio_runtime_suspend NULL
1526 #define omap_gpio_runtime_resume NULL
1527 static inline void omap_gpio_init_context(struct gpio_bank *p) {}
1528 #endif
1529
1530 static const struct dev_pm_ops gpio_pm_ops = {
1531         SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume,
1532                                                                         NULL)
1533 };
1534
1535 #if defined(CONFIG_OF)
1536 static struct omap_gpio_reg_offs omap2_gpio_regs = {
1537         .revision =             OMAP24XX_GPIO_REVISION,
1538         .direction =            OMAP24XX_GPIO_OE,
1539         .datain =               OMAP24XX_GPIO_DATAIN,
1540         .dataout =              OMAP24XX_GPIO_DATAOUT,
1541         .set_dataout =          OMAP24XX_GPIO_SETDATAOUT,
1542         .clr_dataout =          OMAP24XX_GPIO_CLEARDATAOUT,
1543         .irqstatus =            OMAP24XX_GPIO_IRQSTATUS1,
1544         .irqstatus2 =           OMAP24XX_GPIO_IRQSTATUS2,
1545         .irqenable =            OMAP24XX_GPIO_IRQENABLE1,
1546         .irqenable2 =           OMAP24XX_GPIO_IRQENABLE2,
1547         .set_irqenable =        OMAP24XX_GPIO_SETIRQENABLE1,
1548         .clr_irqenable =        OMAP24XX_GPIO_CLEARIRQENABLE1,
1549         .debounce =             OMAP24XX_GPIO_DEBOUNCE_VAL,
1550         .debounce_en =          OMAP24XX_GPIO_DEBOUNCE_EN,
1551         .ctrl =                 OMAP24XX_GPIO_CTRL,
1552         .wkup_en =              OMAP24XX_GPIO_WAKE_EN,
1553         .leveldetect0 =         OMAP24XX_GPIO_LEVELDETECT0,
1554         .leveldetect1 =         OMAP24XX_GPIO_LEVELDETECT1,
1555         .risingdetect =         OMAP24XX_GPIO_RISINGDETECT,
1556         .fallingdetect =        OMAP24XX_GPIO_FALLINGDETECT,
1557 };
1558
1559 static struct omap_gpio_reg_offs omap4_gpio_regs = {
1560         .revision =             OMAP4_GPIO_REVISION,
1561         .direction =            OMAP4_GPIO_OE,
1562         .datain =               OMAP4_GPIO_DATAIN,
1563         .dataout =              OMAP4_GPIO_DATAOUT,
1564         .set_dataout =          OMAP4_GPIO_SETDATAOUT,
1565         .clr_dataout =          OMAP4_GPIO_CLEARDATAOUT,
1566         .irqstatus =            OMAP4_GPIO_IRQSTATUS0,
1567         .irqstatus2 =           OMAP4_GPIO_IRQSTATUS1,
1568         .irqenable =            OMAP4_GPIO_IRQSTATUSSET0,
1569         .irqenable2 =           OMAP4_GPIO_IRQSTATUSSET1,
1570         .set_irqenable =        OMAP4_GPIO_IRQSTATUSSET0,
1571         .clr_irqenable =        OMAP4_GPIO_IRQSTATUSCLR0,
1572         .debounce =             OMAP4_GPIO_DEBOUNCINGTIME,
1573         .debounce_en =          OMAP4_GPIO_DEBOUNCENABLE,
1574         .ctrl =                 OMAP4_GPIO_CTRL,
1575         .wkup_en =              OMAP4_GPIO_IRQWAKEN0,
1576         .leveldetect0 =         OMAP4_GPIO_LEVELDETECT0,
1577         .leveldetect1 =         OMAP4_GPIO_LEVELDETECT1,
1578         .risingdetect =         OMAP4_GPIO_RISINGDETECT,
1579         .fallingdetect =        OMAP4_GPIO_FALLINGDETECT,
1580 };
1581
1582 static const struct omap_gpio_platform_data omap2_pdata = {
1583         .regs = &omap2_gpio_regs,
1584         .bank_width = 32,
1585         .dbck_flag = false,
1586 };
1587
1588 static const struct omap_gpio_platform_data omap3_pdata = {
1589         .regs = &omap2_gpio_regs,
1590         .bank_width = 32,
1591         .dbck_flag = true,
1592 };
1593
1594 static const struct omap_gpio_platform_data omap4_pdata = {
1595         .regs = &omap4_gpio_regs,
1596         .bank_width = 32,
1597         .dbck_flag = true,
1598 };
1599
1600 static const struct of_device_id omap_gpio_match[] = {
1601         {
1602                 .compatible = "ti,omap4-gpio",
1603                 .data = &omap4_pdata,
1604         },
1605         {
1606                 .compatible = "ti,omap3-gpio",
1607                 .data = &omap3_pdata,
1608         },
1609         {
1610                 .compatible = "ti,omap2-gpio",
1611                 .data = &omap2_pdata,
1612         },
1613         { },
1614 };
1615 MODULE_DEVICE_TABLE(of, omap_gpio_match);
1616 #endif
1617
1618 static struct platform_driver omap_gpio_driver = {
1619         .probe          = omap_gpio_probe,
1620         .remove         = omap_gpio_remove,
1621         .driver         = {
1622                 .name   = "omap_gpio",
1623                 .pm     = &gpio_pm_ops,
1624                 .of_match_table = of_match_ptr(omap_gpio_match),
1625         },
1626 };
1627
1628 /*
1629  * gpio driver register needs to be done before
1630  * machine_init functions access gpio APIs.
1631  * Hence omap_gpio_drv_reg() is a postcore_initcall.
1632  */
1633 static int __init omap_gpio_drv_reg(void)
1634 {
1635         return platform_driver_register(&omap_gpio_driver);
1636 }
1637 postcore_initcall(omap_gpio_drv_reg);
1638
1639 static void __exit omap_gpio_exit(void)
1640 {
1641         platform_driver_unregister(&omap_gpio_driver);
1642 }
1643 module_exit(omap_gpio_exit);
1644
1645 MODULE_DESCRIPTION("omap gpio driver");
1646 MODULE_ALIAS("platform:gpio-omap");
1647 MODULE_LICENSE("GPL v2");