2 * Copyright (C) 2013-2014 ROCKCHIP, Inc.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #include <linux/init.h>
10 #include <linux/time.h>
11 #include <linux/interrupt.h>
12 #include <linux/irq.h>
13 #include <linux/clk.h>
14 #include <linux/clockchips.h>
15 #include <linux/delay.h>
16 #include <linux/percpu.h>
18 #include <linux/of_irq.h>
19 #include <linux/of_address.h>
21 #ifdef CONFIG_LOCAL_TIMERS
22 #include <asm/localtimer.h>
25 #include <asm/sched_clock.h>
28 #define TIMER_NAME "rk_timer"
30 #define TIMER_LOAD_COUNT0 0x00
31 #define TIMER_LOAD_COUNT1 0x04
32 #define TIMER_CURRENT_VALUE0 0x08
33 #define TIMER_CURRENT_VALUE1 0x0c
34 #define TIMER_CONTROL_REG 0x10
35 #define TIMER_INT_STATUS 0x18
37 #define TIMER_DISABLE (0 << 0)
38 #define TIMER_ENABLE (1 << 0)
39 #define TIMER_MODE_FREE_RUNNING (0 << 1)
40 #define TIMER_MODE_USER_DEFINED_COUNT (1 << 1)
41 #define TIMER_INT_MASK (0 << 2)
42 #define TIMER_INT_UNMASK (1 << 2)
59 struct clock_event_device ce;
67 static struct cs_timer cs_timer;
68 static DEFINE_PER_CPU(struct ce_timer, ce_timer);
69 static struct bc_timer bc_timer;
71 static inline void rk_timer_disable(void __iomem *base)
73 writel_relaxed(TIMER_DISABLE, base + TIMER_CONTROL_REG);
77 static inline void rk_timer_enable(void __iomem *base, u32 flags)
79 writel_relaxed(TIMER_ENABLE | flags, base + TIMER_CONTROL_REG);
83 static inline u32 rk_timer_read_current_value(void __iomem *base)
85 return readl_relaxed(base + TIMER_CURRENT_VALUE0);
88 static inline u64 rk_timer_read_current_value64(void __iomem *base)
93 upper = readl_relaxed(base + TIMER_CURRENT_VALUE1);
94 lower = readl_relaxed(base + TIMER_CURRENT_VALUE0);
95 } while (upper != readl_relaxed(base + TIMER_CURRENT_VALUE1));
97 return ((u64) upper << 32) + lower;
100 static inline int rk_timer_do_set_next_event(unsigned long cycles, void __iomem *base)
102 rk_timer_disable(base);
103 writel_relaxed(cycles, base + TIMER_LOAD_COUNT0);
104 writel_relaxed(0, base + TIMER_LOAD_COUNT1);
106 rk_timer_enable(base, TIMER_MODE_USER_DEFINED_COUNT | TIMER_INT_UNMASK);
110 static int rk_timer_set_next_event(unsigned long cycles, struct clock_event_device *ce)
112 return rk_timer_do_set_next_event(cycles, __get_cpu_var(ce_timer).base);
115 static int rk_timer_broadcast_set_next_event(unsigned long cycles, struct clock_event_device *ce)
117 return rk_timer_do_set_next_event(cycles, bc_timer.base);
120 static inline void rk_timer_do_set_mode(enum clock_event_mode mode, void __iomem *base)
123 case CLOCK_EVT_MODE_PERIODIC:
124 rk_timer_disable(base);
125 writel_relaxed(24000000 / HZ - 1, base + TIMER_LOAD_COUNT0);
127 rk_timer_enable(base, TIMER_MODE_FREE_RUNNING | TIMER_INT_UNMASK);
128 case CLOCK_EVT_MODE_RESUME:
129 case CLOCK_EVT_MODE_ONESHOT:
131 case CLOCK_EVT_MODE_UNUSED:
132 case CLOCK_EVT_MODE_SHUTDOWN:
133 rk_timer_disable(base);
138 static void rk_timer_set_mode(enum clock_event_mode mode, struct clock_event_device *ce)
140 rk_timer_do_set_mode(mode, __get_cpu_var(ce_timer).base);
143 static void rk_timer_broadcast_set_mode(enum clock_event_mode mode, struct clock_event_device *ce)
145 rk_timer_do_set_mode(mode, bc_timer.base);
148 static inline irqreturn_t rk_timer_interrupt(void __iomem *base, struct clock_event_device *ce)
150 /* clear interrupt */
151 writel_relaxed(1, base + TIMER_INT_STATUS);
152 if (ce->mode == CLOCK_EVT_MODE_ONESHOT) {
153 writel_relaxed(TIMER_DISABLE, base + TIMER_CONTROL_REG);
157 ce->event_handler(ce);
162 static irqreturn_t rk_timer_clockevent_interrupt(int irq, void *dev_id)
164 return rk_timer_interrupt(__get_cpu_var(ce_timer).base, dev_id);
167 static irqreturn_t rk_timer_broadcast_interrupt(int irq, void *dev_id)
169 return rk_timer_interrupt(bc_timer.base, dev_id);
172 static __cpuinit int rk_timer_init_clockevent(struct clock_event_device *ce, unsigned int cpu)
174 struct ce_timer *timer = &per_cpu(ce_timer, cpu);
175 struct irqaction *irq = &timer->irq;
176 void __iomem *base = timer->base;
181 ce->name = timer->name;
182 ce->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
183 ce->set_next_event = rk_timer_set_next_event;
184 ce->set_mode = rk_timer_set_mode;
186 ce->cpumask = cpumask_of(cpu);
188 writel_relaxed(1, base + TIMER_INT_STATUS);
189 rk_timer_disable(base);
192 irq_set_affinity(irq->irq, cpumask_of(cpu));
193 setup_irq(irq->irq, irq);
195 clockevents_config_and_register(ce, 24000000, 0xF, 0x7FFFFFFF);
200 static __init void rk_timer_init_broadcast(struct device_node *np)
202 struct bc_timer *timer = &bc_timer;
203 struct irqaction *irq = &timer->irq;
204 struct clock_event_device *ce = &timer->ce;
207 base = of_iomap(np, 0);
212 snprintf(timer->name, sizeof(timer->name), TIMER_NAME);
213 irq->irq = irq_of_parse_and_map(np, 0);
214 irq->name = timer->name;
215 irq->flags = IRQF_TIMER | IRQF_NOBALANCING;
216 irq->handler = rk_timer_broadcast_interrupt;
218 ce->name = timer->name;
219 ce->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
220 ce->set_next_event = rk_timer_broadcast_set_next_event;
221 ce->set_mode = rk_timer_broadcast_set_mode;
223 ce->cpumask = cpumask_of(0);
226 writel_relaxed(1, base + TIMER_INT_STATUS);
227 rk_timer_disable(base);
230 setup_irq(irq->irq, irq);
232 clockevents_config_and_register(ce, 24000000, 0xF, 0xFFFFFFFF);
235 #ifdef CONFIG_LOCAL_TIMERS
236 static int __cpuinit rk_local_timer_setup(struct clock_event_device *ce)
239 return rk_timer_init_clockevent(ce, smp_processor_id());
242 static void rk_local_timer_stop(struct clock_event_device *ce)
244 ce->set_mode(CLOCK_EVT_MODE_UNUSED, ce);
245 remove_irq(ce->irq, &per_cpu(ce_timer, smp_processor_id()).irq);
248 static struct local_timer_ops rk_local_timer_ops __cpuinitdata = {
249 .setup = rk_local_timer_setup,
250 .stop = rk_local_timer_stop,
253 static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
255 static int __init rk_timer_init_percpu_clockevent(unsigned int cpu)
257 struct clock_event_device *ce = &per_cpu(percpu_clockevent, cpu);
260 return rk_timer_init_clockevent(ce, cpu);
264 static cycle_t rk_timer_read(struct clocksource *cs)
266 return ~rk_timer_read_current_value(cs_timer.base);
269 static cycle_t rk_timer_read_up(struct clocksource *cs)
271 return rk_timer_read_current_value(cs_timer.base);
274 static struct clocksource rk_timer_clocksource = {
277 .read = rk_timer_read,
278 .mask = CLOCKSOURCE_MASK(32),
279 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
282 static void __init rk_timer_init_clocksource(struct device_node *np)
285 struct clocksource *cs = &rk_timer_clocksource;
287 base = of_iomap(np, 0);
288 cs_timer.base = base;
290 rk_timer_disable(base);
291 writel_relaxed(0xFFFFFFFF, base + TIMER_LOAD_COUNT0);
292 writel_relaxed(0xFFFFFFFF, base + TIMER_LOAD_COUNT1);
294 rk_timer_enable(base, TIMER_MODE_FREE_RUNNING | TIMER_INT_MASK);
295 clocksource_register_hz(cs, 24000000);
299 static u32 rockchip_read_sched_clock(void)
301 return ~rk_timer_read_current_value(cs_timer.base);
304 static u32 rockchip_read_sched_clock_up(void)
306 return rk_timer_read_current_value(cs_timer.base);
310 static void __init rk_timer_init_ce_timer(struct device_node *np, unsigned int cpu)
312 struct ce_timer *timer = &per_cpu(ce_timer, cpu);
313 struct irqaction *irq = &timer->irq;
315 timer->base = of_iomap(np, 0);
316 snprintf(timer->name, sizeof(timer->name), TIMER_NAME "%d", cpu);
317 irq->irq = irq_of_parse_and_map(np, 0);
318 irq->name = timer->name;
319 irq->flags = IRQF_TIMER | IRQF_NOBALANCING;
320 irq->handler = rk_timer_clockevent_interrupt;
324 static struct delay_timer rk_delay_timer = {
325 .read_current_timer = (unsigned long (*)(void))rockchip_read_sched_clock,
330 static void __init rk_timer_init(struct device_node *np)
333 if (of_property_read_u32(np, "rockchip,percpu", &val) == 0) {
334 #ifdef CONFIG_LOCAL_TIMERS
335 local_timer_register(&rk_local_timer_ops);
338 rk_timer_init_ce_timer(np, val);
339 #ifndef CONFIG_LOCAL_TIMERS
340 rk_timer_init_percpu_clockevent(val);
342 } else if (of_property_read_u32(np, "rockchip,clocksource", &val) == 0 && val) {
344 of_property_read_u32(np, "rockchip,count-up", &count_up);
346 rk_timer_clocksource.read = rk_timer_read_up;
348 rk_delay_timer.read_current_timer = (unsigned long (*)(void))rockchip_read_sched_clock_up;
351 rk_timer_init_clocksource(np);
355 setup_sched_clock(rockchip_read_sched_clock_up, 32, 24000000);
357 setup_sched_clock(rockchip_read_sched_clock, 32, 24000000);
358 register_current_timer_delay(&rk_delay_timer);
361 } else if (of_property_read_u32(np, "rockchip,broadcast", &val) == 0 && val) {
362 rk_timer_init_broadcast(np);
365 CLOCKSOURCE_OF_DECLARE(rk_timer, "rockchip,timer", rk_timer_init);