Merge branch 'samsung/soc' into next/soc2
[firefly-linux-kernel-4.4.55.git] / arch / arm / mach-exynos / mct.c
1 /* linux/arch/arm/mach-exynos4/mct.c
2  *
3  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4  *              http://www.samsung.com
5  *
6  * EXYNOS4 MCT(Multi-Core Timer) support
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11 */
12
13 #include <linux/sched.h>
14 #include <linux/interrupt.h>
15 #include <linux/irq.h>
16 #include <linux/err.h>
17 #include <linux/clk.h>
18 #include <linux/clockchips.h>
19 #include <linux/platform_device.h>
20 #include <linux/delay.h>
21 #include <linux/percpu.h>
22
23 #include <asm/hardware/gic.h>
24
25 #include <plat/cpu.h>
26
27 #include <mach/map.h>
28 #include <mach/irqs.h>
29 #include <mach/regs-mct.h>
30 #include <asm/mach/time.h>
31
32 #define TICK_BASE_CNT   1
33
34 enum {
35         MCT_INT_SPI,
36         MCT_INT_PPI
37 };
38
39 static unsigned long clk_rate;
40 static unsigned int mct_int_type;
41
42 struct mct_clock_event_device {
43         struct clock_event_device *evt;
44         void __iomem *base;
45         char name[10];
46 };
47
48 static void exynos4_mct_write(unsigned int value, void *addr)
49 {
50         void __iomem *stat_addr;
51         u32 mask;
52         u32 i;
53
54         __raw_writel(value, addr);
55
56         if (likely(addr >= EXYNOS4_MCT_L_BASE(0))) {
57                 u32 base = (u32) addr & EXYNOS4_MCT_L_MASK;
58                 switch ((u32) addr & ~EXYNOS4_MCT_L_MASK) {
59                 case (u32) MCT_L_TCON_OFFSET:
60                         stat_addr = (void __iomem *) base + MCT_L_WSTAT_OFFSET;
61                         mask = 1 << 3;          /* L_TCON write status */
62                         break;
63                 case (u32) MCT_L_ICNTB_OFFSET:
64                         stat_addr = (void __iomem *) base + MCT_L_WSTAT_OFFSET;
65                         mask = 1 << 1;          /* L_ICNTB write status */
66                         break;
67                 case (u32) MCT_L_TCNTB_OFFSET:
68                         stat_addr = (void __iomem *) base + MCT_L_WSTAT_OFFSET;
69                         mask = 1 << 0;          /* L_TCNTB write status */
70                         break;
71                 default:
72                         return;
73                 }
74         } else {
75                 switch ((u32) addr) {
76                 case (u32) EXYNOS4_MCT_G_TCON:
77                         stat_addr = EXYNOS4_MCT_G_WSTAT;
78                         mask = 1 << 16;         /* G_TCON write status */
79                         break;
80                 case (u32) EXYNOS4_MCT_G_COMP0_L:
81                         stat_addr = EXYNOS4_MCT_G_WSTAT;
82                         mask = 1 << 0;          /* G_COMP0_L write status */
83                         break;
84                 case (u32) EXYNOS4_MCT_G_COMP0_U:
85                         stat_addr = EXYNOS4_MCT_G_WSTAT;
86                         mask = 1 << 1;          /* G_COMP0_U write status */
87                         break;
88                 case (u32) EXYNOS4_MCT_G_COMP0_ADD_INCR:
89                         stat_addr = EXYNOS4_MCT_G_WSTAT;
90                         mask = 1 << 2;          /* G_COMP0_ADD_INCR w status */
91                         break;
92                 case (u32) EXYNOS4_MCT_G_CNT_L:
93                         stat_addr = EXYNOS4_MCT_G_CNT_WSTAT;
94                         mask = 1 << 0;          /* G_CNT_L write status */
95                         break;
96                 case (u32) EXYNOS4_MCT_G_CNT_U:
97                         stat_addr = EXYNOS4_MCT_G_CNT_WSTAT;
98                         mask = 1 << 1;          /* G_CNT_U write status */
99                         break;
100                 default:
101                         return;
102                 }
103         }
104
105         /* Wait maximum 1 ms until written values are applied */
106         for (i = 0; i < loops_per_jiffy / 1000 * HZ; i++)
107                 if (__raw_readl(stat_addr) & mask) {
108                         __raw_writel(mask, stat_addr);
109                         return;
110                 }
111
112         panic("MCT hangs after writing %d (addr:0x%08x)\n", value, (u32)addr);
113 }
114
115 /* Clocksource handling */
116 static void exynos4_mct_frc_start(u32 hi, u32 lo)
117 {
118         u32 reg;
119
120         exynos4_mct_write(lo, EXYNOS4_MCT_G_CNT_L);
121         exynos4_mct_write(hi, EXYNOS4_MCT_G_CNT_U);
122
123         reg = __raw_readl(EXYNOS4_MCT_G_TCON);
124         reg |= MCT_G_TCON_START;
125         exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON);
126 }
127
128 static cycle_t exynos4_frc_read(struct clocksource *cs)
129 {
130         unsigned int lo, hi;
131         u32 hi2 = __raw_readl(EXYNOS4_MCT_G_CNT_U);
132
133         do {
134                 hi = hi2;
135                 lo = __raw_readl(EXYNOS4_MCT_G_CNT_L);
136                 hi2 = __raw_readl(EXYNOS4_MCT_G_CNT_U);
137         } while (hi != hi2);
138
139         return ((cycle_t)hi << 32) | lo;
140 }
141
142 static void exynos4_frc_resume(struct clocksource *cs)
143 {
144         exynos4_mct_frc_start(0, 0);
145 }
146
147 struct clocksource mct_frc = {
148         .name           = "mct-frc",
149         .rating         = 400,
150         .read           = exynos4_frc_read,
151         .mask           = CLOCKSOURCE_MASK(64),
152         .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
153         .resume         = exynos4_frc_resume,
154 };
155
156 static void __init exynos4_clocksource_init(void)
157 {
158         exynos4_mct_frc_start(0, 0);
159
160         if (clocksource_register_hz(&mct_frc, clk_rate))
161                 panic("%s: can't register clocksource\n", mct_frc.name);
162 }
163
164 static void exynos4_mct_comp0_stop(void)
165 {
166         unsigned int tcon;
167
168         tcon = __raw_readl(EXYNOS4_MCT_G_TCON);
169         tcon &= ~(MCT_G_TCON_COMP0_ENABLE | MCT_G_TCON_COMP0_AUTO_INC);
170
171         exynos4_mct_write(tcon, EXYNOS4_MCT_G_TCON);
172         exynos4_mct_write(0, EXYNOS4_MCT_G_INT_ENB);
173 }
174
175 static void exynos4_mct_comp0_start(enum clock_event_mode mode,
176                                     unsigned long cycles)
177 {
178         unsigned int tcon;
179         cycle_t comp_cycle;
180
181         tcon = __raw_readl(EXYNOS4_MCT_G_TCON);
182
183         if (mode == CLOCK_EVT_MODE_PERIODIC) {
184                 tcon |= MCT_G_TCON_COMP0_AUTO_INC;
185                 exynos4_mct_write(cycles, EXYNOS4_MCT_G_COMP0_ADD_INCR);
186         }
187
188         comp_cycle = exynos4_frc_read(&mct_frc) + cycles;
189         exynos4_mct_write((u32)comp_cycle, EXYNOS4_MCT_G_COMP0_L);
190         exynos4_mct_write((u32)(comp_cycle >> 32), EXYNOS4_MCT_G_COMP0_U);
191
192         exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_ENB);
193
194         tcon |= MCT_G_TCON_COMP0_ENABLE;
195         exynos4_mct_write(tcon , EXYNOS4_MCT_G_TCON);
196 }
197
198 static int exynos4_comp_set_next_event(unsigned long cycles,
199                                        struct clock_event_device *evt)
200 {
201         exynos4_mct_comp0_start(evt->mode, cycles);
202
203         return 0;
204 }
205
206 static void exynos4_comp_set_mode(enum clock_event_mode mode,
207                                   struct clock_event_device *evt)
208 {
209         unsigned long cycles_per_jiffy;
210         exynos4_mct_comp0_stop();
211
212         switch (mode) {
213         case CLOCK_EVT_MODE_PERIODIC:
214                 cycles_per_jiffy =
215                         (((unsigned long long) NSEC_PER_SEC / HZ * evt->mult) >> evt->shift);
216                 exynos4_mct_comp0_start(mode, cycles_per_jiffy);
217                 break;
218
219         case CLOCK_EVT_MODE_ONESHOT:
220         case CLOCK_EVT_MODE_UNUSED:
221         case CLOCK_EVT_MODE_SHUTDOWN:
222         case CLOCK_EVT_MODE_RESUME:
223                 break;
224         }
225 }
226
227 static struct clock_event_device mct_comp_device = {
228         .name           = "mct-comp",
229         .features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
230         .rating         = 250,
231         .set_next_event = exynos4_comp_set_next_event,
232         .set_mode       = exynos4_comp_set_mode,
233 };
234
235 static irqreturn_t exynos4_mct_comp_isr(int irq, void *dev_id)
236 {
237         struct clock_event_device *evt = dev_id;
238
239         exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_CSTAT);
240
241         evt->event_handler(evt);
242
243         return IRQ_HANDLED;
244 }
245
246 static struct irqaction mct_comp_event_irq = {
247         .name           = "mct_comp_irq",
248         .flags          = IRQF_TIMER | IRQF_IRQPOLL,
249         .handler        = exynos4_mct_comp_isr,
250         .dev_id         = &mct_comp_device,
251 };
252
253 static void exynos4_clockevent_init(void)
254 {
255         clockevents_calc_mult_shift(&mct_comp_device, clk_rate, 5);
256         mct_comp_device.max_delta_ns =
257                 clockevent_delta2ns(0xffffffff, &mct_comp_device);
258         mct_comp_device.min_delta_ns =
259                 clockevent_delta2ns(0xf, &mct_comp_device);
260         mct_comp_device.cpumask = cpumask_of(0);
261         clockevents_register_device(&mct_comp_device);
262
263         if (soc_is_exynos5250())
264                 setup_irq(EXYNOS5_IRQ_MCT_G0, &mct_comp_event_irq);
265         else
266                 setup_irq(EXYNOS4_IRQ_MCT_G0, &mct_comp_event_irq);
267 }
268
269 #ifdef CONFIG_LOCAL_TIMERS
270
271 static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
272
273 /* Clock event handling */
274 static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt)
275 {
276         unsigned long tmp;
277         unsigned long mask = MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START;
278         void __iomem *addr = mevt->base + MCT_L_TCON_OFFSET;
279
280         tmp = __raw_readl(addr);
281         if (tmp & mask) {
282                 tmp &= ~mask;
283                 exynos4_mct_write(tmp, addr);
284         }
285 }
286
287 static void exynos4_mct_tick_start(unsigned long cycles,
288                                    struct mct_clock_event_device *mevt)
289 {
290         unsigned long tmp;
291
292         exynos4_mct_tick_stop(mevt);
293
294         tmp = (1 << 31) | cycles;       /* MCT_L_UPDATE_ICNTB */
295
296         /* update interrupt count buffer */
297         exynos4_mct_write(tmp, mevt->base + MCT_L_ICNTB_OFFSET);
298
299         /* enable MCT tick interrupt */
300         exynos4_mct_write(0x1, mevt->base + MCT_L_INT_ENB_OFFSET);
301
302         tmp = __raw_readl(mevt->base + MCT_L_TCON_OFFSET);
303         tmp |= MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START |
304                MCT_L_TCON_INTERVAL_MODE;
305         exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET);
306 }
307
308 static int exynos4_tick_set_next_event(unsigned long cycles,
309                                        struct clock_event_device *evt)
310 {
311         struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
312
313         exynos4_mct_tick_start(cycles, mevt);
314
315         return 0;
316 }
317
318 static inline void exynos4_tick_set_mode(enum clock_event_mode mode,
319                                          struct clock_event_device *evt)
320 {
321         struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
322         unsigned long cycles_per_jiffy;
323
324         exynos4_mct_tick_stop(mevt);
325
326         switch (mode) {
327         case CLOCK_EVT_MODE_PERIODIC:
328                 cycles_per_jiffy =
329                         (((unsigned long long) NSEC_PER_SEC / HZ * evt->mult) >> evt->shift);
330                 exynos4_mct_tick_start(cycles_per_jiffy, mevt);
331                 break;
332
333         case CLOCK_EVT_MODE_ONESHOT:
334         case CLOCK_EVT_MODE_UNUSED:
335         case CLOCK_EVT_MODE_SHUTDOWN:
336         case CLOCK_EVT_MODE_RESUME:
337                 break;
338         }
339 }
340
341 static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
342 {
343         struct clock_event_device *evt = mevt->evt;
344
345         /*
346          * This is for supporting oneshot mode.
347          * Mct would generate interrupt periodically
348          * without explicit stopping.
349          */
350         if (evt->mode != CLOCK_EVT_MODE_PERIODIC)
351                 exynos4_mct_tick_stop(mevt);
352
353         /* Clear the MCT tick interrupt */
354         if (__raw_readl(mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) {
355                 exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
356                 return 1;
357         } else {
358                 return 0;
359         }
360 }
361
362 static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
363 {
364         struct mct_clock_event_device *mevt = dev_id;
365         struct clock_event_device *evt = mevt->evt;
366
367         exynos4_mct_tick_clear(mevt);
368
369         evt->event_handler(evt);
370
371         return IRQ_HANDLED;
372 }
373
374 static struct irqaction mct_tick0_event_irq = {
375         .name           = "mct_tick0_irq",
376         .flags          = IRQF_TIMER | IRQF_NOBALANCING,
377         .handler        = exynos4_mct_tick_isr,
378 };
379
380 static struct irqaction mct_tick1_event_irq = {
381         .name           = "mct_tick1_irq",
382         .flags          = IRQF_TIMER | IRQF_NOBALANCING,
383         .handler        = exynos4_mct_tick_isr,
384 };
385
386 static void exynos4_mct_tick_init(struct clock_event_device *evt)
387 {
388         struct mct_clock_event_device *mevt;
389         unsigned int cpu = smp_processor_id();
390
391         mevt = this_cpu_ptr(&percpu_mct_tick);
392         mevt->evt = evt;
393
394         mevt->base = EXYNOS4_MCT_L_BASE(cpu);
395         sprintf(mevt->name, "mct_tick%d", cpu);
396
397         evt->name = mevt->name;
398         evt->cpumask = cpumask_of(cpu);
399         evt->set_next_event = exynos4_tick_set_next_event;
400         evt->set_mode = exynos4_tick_set_mode;
401         evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
402         evt->rating = 450;
403
404         clockevents_calc_mult_shift(evt, clk_rate / (TICK_BASE_CNT + 1), 5);
405         evt->max_delta_ns =
406                 clockevent_delta2ns(0x7fffffff, evt);
407         evt->min_delta_ns =
408                 clockevent_delta2ns(0xf, evt);
409
410         clockevents_register_device(evt);
411
412         exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
413
414         if (mct_int_type == MCT_INT_SPI) {
415                 if (cpu == 0) {
416                         mct_tick0_event_irq.dev_id = mevt;
417                         evt->irq = EXYNOS4_IRQ_MCT_L0;
418                         setup_irq(EXYNOS4_IRQ_MCT_L0, &mct_tick0_event_irq);
419                 } else {
420                         mct_tick1_event_irq.dev_id = mevt;
421                         evt->irq = EXYNOS4_IRQ_MCT_L1;
422                         setup_irq(EXYNOS4_IRQ_MCT_L1, &mct_tick1_event_irq);
423                         irq_set_affinity(EXYNOS4_IRQ_MCT_L1, cpumask_of(1));
424                 }
425         } else {
426                 enable_percpu_irq(EXYNOS_IRQ_MCT_LOCALTIMER, 0);
427         }
428 }
429
430 /* Setup the local clock events for a CPU */
431 int __cpuinit local_timer_setup(struct clock_event_device *evt)
432 {
433         exynos4_mct_tick_init(evt);
434
435         return 0;
436 }
437
438 void local_timer_stop(struct clock_event_device *evt)
439 {
440         unsigned int cpu = smp_processor_id();
441         evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
442         if (mct_int_type == MCT_INT_SPI)
443                 if (cpu == 0)
444                         remove_irq(evt->irq, &mct_tick0_event_irq);
445                 else
446                         remove_irq(evt->irq, &mct_tick1_event_irq);
447         else
448                 disable_percpu_irq(EXYNOS_IRQ_MCT_LOCALTIMER);
449 }
450 #endif /* CONFIG_LOCAL_TIMERS */
451
452 static void __init exynos4_timer_resources(void)
453 {
454         struct clk *mct_clk;
455         mct_clk = clk_get(NULL, "xtal");
456
457         clk_rate = clk_get_rate(mct_clk);
458
459 #ifdef CONFIG_LOCAL_TIMERS
460         if (mct_int_type == MCT_INT_PPI) {
461                 int err;
462
463                 err = request_percpu_irq(EXYNOS_IRQ_MCT_LOCALTIMER,
464                                          exynos4_mct_tick_isr, "MCT",
465                                          &percpu_mct_tick);
466                 WARN(err, "MCT: can't request IRQ %d (%d)\n",
467                      EXYNOS_IRQ_MCT_LOCALTIMER, err);
468         }
469 #endif /* CONFIG_LOCAL_TIMERS */
470 }
471
472 static void __init exynos4_timer_init(void)
473 {
474         if (soc_is_exynos4210())
475                 mct_int_type = MCT_INT_SPI;
476         else
477                 mct_int_type = MCT_INT_PPI;
478
479         exynos4_timer_resources();
480         exynos4_clocksource_init();
481         exynos4_clockevent_init();
482 }
483
484 struct sys_timer exynos4_timer = {
485         .init           = exynos4_timer_init,
486 };