2 * Copyright (C) 2013-2014 ROCKCHIP, Inc.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #include <linux/init.h>
10 #include <linux/time.h>
11 #include <linux/interrupt.h>
12 #include <linux/irq.h>
13 #include <linux/clk.h>
14 #include <linux/clockchips.h>
15 #include <linux/delay.h>
16 #include <linux/percpu.h>
18 #include <linux/of_irq.h>
19 #include <linux/of_address.h>
21 #ifdef CONFIG_LOCAL_TIMERS
22 #include <asm/localtimer.h>
25 #include <asm/sched_clock.h>
28 #define TIMER_NAME "rk_timer"
30 #define TIMER_LOAD_COUNT0 0x00
31 #define TIMER_LOAD_COUNT1 0x04
32 #define TIMER_CURRENT_VALUE0 0x08
33 #define TIMER_CURRENT_VALUE1 0x0c
34 #define TIMER_CONTROL_REG 0x10
35 #define TIMER_INT_STATUS 0x18
37 #define TIMER_DISABLE (0 << 0)
38 #define TIMER_ENABLE (1 << 0)
39 #define TIMER_MODE_FREE_RUNNING (0 << 1)
40 #define TIMER_MODE_USER_DEFINED_COUNT (1 << 1)
41 #define TIMER_INT_MASK (0 << 2)
42 #define TIMER_INT_UNMASK (1 << 2)
59 struct clock_event_device ce;
67 static struct cs_timer cs_timer;
68 static DEFINE_PER_CPU(struct ce_timer, ce_timer);
69 static struct bc_timer bc_timer;
71 static inline void rk_timer_disable(void __iomem *base)
73 writel_relaxed(TIMER_DISABLE, base + TIMER_CONTROL_REG);
77 static inline void rk_timer_enable(void __iomem *base, u32 flags)
79 writel_relaxed(TIMER_ENABLE | flags, base + TIMER_CONTROL_REG);
83 static inline u32 rk_timer_read_current_value(void __iomem *base)
85 return readl_relaxed(base + TIMER_CURRENT_VALUE0);
88 static inline u64 rk_timer_read_current_value64(void __iomem *base)
93 upper = readl_relaxed(base + TIMER_CURRENT_VALUE1);
94 lower = readl_relaxed(base + TIMER_CURRENT_VALUE0);
95 } while (upper != readl_relaxed(base + TIMER_CURRENT_VALUE1));
97 return ((u64) upper << 32) + lower;
100 static inline int rk_timer_do_set_next_event(unsigned long cycles, void __iomem *base)
102 rk_timer_disable(base);
103 writel_relaxed(cycles, base + TIMER_LOAD_COUNT0);
104 writel_relaxed(0, base + TIMER_LOAD_COUNT1);
106 rk_timer_enable(base, TIMER_MODE_USER_DEFINED_COUNT | TIMER_INT_UNMASK);
110 #ifdef CONFIG_LOCAL_TIMERS
111 static int rk_timer_set_next_event(unsigned long cycles, struct clock_event_device *ce)
113 return rk_timer_do_set_next_event(cycles, __get_cpu_var(ce_timer).base);
117 static int rk_timer_broadcast_set_next_event(unsigned long cycles, struct clock_event_device *ce)
119 return rk_timer_do_set_next_event(cycles, bc_timer.base);
122 static inline void rk_timer_do_set_mode(enum clock_event_mode mode, void __iomem *base)
125 case CLOCK_EVT_MODE_PERIODIC:
126 rk_timer_disable(base);
127 writel_relaxed(24000000 / HZ - 1, base + TIMER_LOAD_COUNT0);
129 rk_timer_enable(base, TIMER_MODE_FREE_RUNNING | TIMER_INT_UNMASK);
130 case CLOCK_EVT_MODE_RESUME:
131 case CLOCK_EVT_MODE_ONESHOT:
133 case CLOCK_EVT_MODE_UNUSED:
134 case CLOCK_EVT_MODE_SHUTDOWN:
135 rk_timer_disable(base);
140 #ifdef CONFIG_LOCAL_TIMERS
141 static void rk_timer_set_mode(enum clock_event_mode mode, struct clock_event_device *ce)
143 rk_timer_do_set_mode(mode, __get_cpu_var(ce_timer).base);
147 static void rk_timer_broadcast_set_mode(enum clock_event_mode mode, struct clock_event_device *ce)
149 rk_timer_do_set_mode(mode, bc_timer.base);
152 static inline irqreturn_t rk_timer_interrupt(void __iomem *base, struct clock_event_device *ce)
154 /* clear interrupt */
155 writel_relaxed(1, base + TIMER_INT_STATUS);
156 if (ce->mode == CLOCK_EVT_MODE_ONESHOT) {
157 writel_relaxed(TIMER_DISABLE, base + TIMER_CONTROL_REG);
161 ce->event_handler(ce);
166 static irqreturn_t rk_timer_clockevent_interrupt(int irq, void *dev_id)
168 return rk_timer_interrupt(__get_cpu_var(ce_timer).base, dev_id);
171 static irqreturn_t rk_timer_broadcast_interrupt(int irq, void *dev_id)
173 return rk_timer_interrupt(bc_timer.base, dev_id);
176 #ifdef CONFIG_LOCAL_TIMERS
177 static __cpuinit int rk_timer_init_clockevent(struct clock_event_device *ce, unsigned int cpu)
179 struct ce_timer *timer = &per_cpu(ce_timer, cpu);
180 struct irqaction *irq = &timer->irq;
181 void __iomem *base = timer->base;
186 ce->name = timer->name;
187 ce->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
188 ce->set_next_event = rk_timer_set_next_event;
189 ce->set_mode = rk_timer_set_mode;
191 ce->cpumask = cpumask_of(cpu);
193 writel_relaxed(1, base + TIMER_INT_STATUS);
194 rk_timer_disable(base);
197 irq_set_affinity(irq->irq, cpumask_of(cpu));
198 setup_irq(irq->irq, irq);
200 clockevents_config_and_register(ce, 24000000, 0xF, 0xFFFFFFFF);
206 static __init void rk_timer_init_broadcast(struct device_node *np)
208 struct bc_timer *timer = &bc_timer;
209 struct irqaction *irq = &timer->irq;
210 struct clock_event_device *ce = &timer->ce;
213 base = of_iomap(np, 0);
218 snprintf(timer->name, sizeof(timer->name), TIMER_NAME);
219 irq->irq = irq_of_parse_and_map(np, 0);
220 irq->name = timer->name;
221 irq->flags = IRQF_TIMER | IRQF_NOBALANCING;
222 irq->handler = rk_timer_broadcast_interrupt;
224 ce->name = timer->name;
225 ce->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
226 ce->set_next_event = rk_timer_broadcast_set_next_event;
227 ce->set_mode = rk_timer_broadcast_set_mode;
229 ce->cpumask = cpumask_of(0);
232 writel_relaxed(1, base + TIMER_INT_STATUS);
233 rk_timer_disable(base);
236 setup_irq(irq->irq, irq);
238 clockevents_config_and_register(ce, 24000000, 0xF, 0xFFFFFFFF);
241 #ifdef CONFIG_LOCAL_TIMERS
242 static int __cpuinit rk_local_timer_setup(struct clock_event_device *ce)
245 return rk_timer_init_clockevent(ce, smp_processor_id());
248 static void rk_local_timer_stop(struct clock_event_device *ce)
250 ce->set_mode(CLOCK_EVT_MODE_UNUSED, ce);
251 remove_irq(ce->irq, &per_cpu(ce_timer, smp_processor_id()).irq);
254 static struct local_timer_ops rk_local_timer_ops __cpuinitdata = {
255 .setup = rk_local_timer_setup,
256 .stop = rk_local_timer_stop,
260 static cycle_t rk_timer_read(struct clocksource *cs)
262 return ~rk_timer_read_current_value(cs_timer.base);
265 static cycle_t rk_timer_read_up(struct clocksource *cs)
267 return rk_timer_read_current_value(cs_timer.base);
270 static struct clocksource rk_timer_clocksource = {
273 .read = rk_timer_read,
274 .mask = CLOCKSOURCE_MASK(32),
275 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
278 static void __init rk_timer_init_clocksource(struct device_node *np)
281 struct clocksource *cs = &rk_timer_clocksource;
283 base = of_iomap(np, 0);
284 cs_timer.base = base;
286 rk_timer_disable(base);
287 writel_relaxed(0xFFFFFFFF, base + TIMER_LOAD_COUNT0);
288 writel_relaxed(0xFFFFFFFF, base + TIMER_LOAD_COUNT1);
290 rk_timer_enable(base, TIMER_MODE_FREE_RUNNING | TIMER_INT_MASK);
291 clocksource_register_hz(cs, 24000000);
295 static u32 rockchip_read_sched_clock(void)
297 return ~rk_timer_read_current_value(cs_timer.base);
300 static u32 rockchip_read_sched_clock_up(void)
302 return rk_timer_read_current_value(cs_timer.base);
306 static void __init rk_timer_init_ce_timer(struct device_node *np, unsigned int cpu)
308 struct ce_timer *timer = &per_cpu(ce_timer, cpu);
309 struct irqaction *irq = &timer->irq;
311 timer->base = of_iomap(np, 0);
312 snprintf(timer->name, sizeof(timer->name), TIMER_NAME "%d", cpu);
313 irq->irq = irq_of_parse_and_map(np, 0);
314 irq->name = timer->name;
315 irq->flags = IRQF_TIMER | IRQF_NOBALANCING;
316 irq->handler = rk_timer_clockevent_interrupt;
320 static struct delay_timer rk_delay_timer = {
321 .read_current_timer = (unsigned long (*)(void))rockchip_read_sched_clock,
326 static void __init rk_timer_init(struct device_node *np)
329 if (of_property_read_u32(np, "rockchip,percpu", &val) == 0) {
330 #ifdef CONFIG_LOCAL_TIMERS
331 local_timer_register(&rk_local_timer_ops);
333 rk_timer_init_ce_timer(np, val);
334 } else if (of_property_read_u32(np, "rockchip,clocksource", &val) == 0 && val) {
336 of_property_read_u32(np, "rockchip,count-up", &count_up);
338 rk_timer_clocksource.read = rk_timer_read_up;
340 rk_delay_timer.read_current_timer = (unsigned long (*)(void))rockchip_read_sched_clock_up;
343 rk_timer_init_clocksource(np);
347 setup_sched_clock(rockchip_read_sched_clock_up, 32, 24000000);
349 setup_sched_clock(rockchip_read_sched_clock, 32, 24000000);
350 register_current_timer_delay(&rk_delay_timer);
353 } else if (of_property_read_u32(np, "rockchip,broadcast", &val) == 0 && val) {
354 rk_timer_init_broadcast(np);
357 CLOCKSOURCE_OF_DECLARE(rk_timer, "rockchip,timer", rk_timer_init);