return 0;
}
-#ifdef CONFIG_LOCAL_TIMERS
static int rk_timer_set_next_event(unsigned long cycles, struct clock_event_device *ce)
{
return rk_timer_do_set_next_event(cycles, __get_cpu_var(ce_timer).base);
}
-#endif
static int rk_timer_broadcast_set_next_event(unsigned long cycles, struct clock_event_device *ce)
{
}
}
-#ifdef CONFIG_LOCAL_TIMERS
static void rk_timer_set_mode(enum clock_event_mode mode, struct clock_event_device *ce)
{
rk_timer_do_set_mode(mode, __get_cpu_var(ce_timer).base);
}
-#endif
static void rk_timer_broadcast_set_mode(enum clock_event_mode mode, struct clock_event_device *ce)
{
return rk_timer_interrupt(bc_timer.base, dev_id);
}
-#ifdef CONFIG_LOCAL_TIMERS
static __cpuinit int rk_timer_init_clockevent(struct clock_event_device *ce, unsigned int cpu)
{
struct ce_timer *timer = &per_cpu(ce_timer, cpu);
irq_set_affinity(irq->irq, cpumask_of(cpu));
setup_irq(irq->irq, irq);
- clockevents_config_and_register(ce, 24000000, 0xF, 0xFFFFFFFF);
+ clockevents_config_and_register(ce, 24000000, 0xF, 0x7FFFFFFF);
return 0;
}
-#endif
static __init void rk_timer_init_broadcast(struct device_node *np)
{
.setup = rk_local_timer_setup,
.stop = rk_local_timer_stop,
};
+#else
+static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
+
+static int __init rk_timer_init_percpu_clockevent(unsigned int cpu)
+{
+ struct clock_event_device *ce = &per_cpu(percpu_clockevent, cpu);
+
+ ce->rating = 500;
+ return rk_timer_init_clockevent(ce, cpu);
+}
#endif
static cycle_t rk_timer_read(struct clocksource *cs)
if (of_property_read_u32(np, "rockchip,percpu", &val) == 0) {
#ifdef CONFIG_LOCAL_TIMERS
local_timer_register(&rk_local_timer_ops);
+#else
#endif
rk_timer_init_ce_timer(np, val);
+#ifndef CONFIG_LOCAL_TIMERS
+ rk_timer_init_percpu_clockevent(val);
+#endif
} else if (of_property_read_u32(np, "rockchip,clocksource", &val) == 0 && val) {
u32 count_up = 0;
of_property_read_u32(np, "rockchip,count-up", &count_up);