/*
* OMAP L3 Interconnect error handling driver
*
- * Copyright (C) 2011-2014 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2011-2015 Texas Instruments Incorporated - http://www.ti.com/
* Santosh Shilimkar <santosh.shilimkar@ti.com>
* Sricharan <r.sricharan@ti.com>
*
}
static const struct of_device_id l3_noc_match[] = {
- {.compatible = "ti,omap4-l3-noc", .data = &omap_l3_data},
+ {.compatible = "ti,omap4-l3-noc", .data = &omap4_l3_data},
+ {.compatible = "ti,omap5-l3-noc", .data = &omap5_l3_data},
{.compatible = "ti,dra7-l3-noc", .data = &dra_l3_data},
{.compatible = "ti,am4372-l3-noc", .data = &am4372_l3_data},
{},
return ret;
}
- -#ifdef CONFIG_PM
+ +#ifdef CONFIG_PM_SLEEP
/**
* l3_resume_noirq() - resume function for l3_noc
}
static const struct dev_pm_ops l3_dev_pm_ops = {
- - .resume_noirq = l3_resume_noirq,
+ + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, l3_resume_noirq)
};
#define L3_DEV_PM_OPS (&l3_dev_pm_ops)
return -ENODEV;
/* Find lowest-power state that supports long-term idle */
-- for (i = drv->state_count - 1; i >= CPUIDLE_DRIVER_STATE_START; i--)
++ for (i = drv->state_count - 1; i >= 0; i--)
if (drv->states[i].enter_dead)
return drv->states[i].enter_dead(dev, i);
}
static int find_deepest_state(struct cpuidle_driver *drv,
-- struct cpuidle_device *dev, bool freeze)
++ struct cpuidle_device *dev,
++ unsigned int max_latency,
++ unsigned int forbidden_flags,
++ bool freeze)
{
unsigned int latency_req = 0;
-- int i, ret = freeze ? -1 : CPUIDLE_DRIVER_STATE_START - 1;
++ int i, ret = -ENXIO;
-- for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
++ for (i = 0; i < drv->state_count; i++) {
struct cpuidle_state *s = &drv->states[i];
struct cpuidle_state_usage *su = &dev->states_usage[i];
if (s->disabled || su->disable || s->exit_latency <= latency_req
++ || s->exit_latency > max_latency
++ || (s->flags & forbidden_flags)
|| (freeze && !s->enter_freeze))
continue;
return ret;
}
+ +#ifdef CONFIG_SUSPEND
/**
* cpuidle_find_deepest_state - Find the deepest available idle state.
* @drv: cpuidle driver for the given CPU.
int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{
-- return find_deepest_state(drv, dev, false);
++ return find_deepest_state(drv, dev, UINT_MAX, 0, false);
}
static void enter_freeze_proper(struct cpuidle_driver *drv,
* that interrupts won't be enabled when it exits and allows the tick to
* be frozen safely.
*/
-- index = find_deepest_state(drv, dev, true);
++ index = find_deepest_state(drv, dev, UINT_MAX, 0, true);
if (index >= 0)
enter_freeze_proper(drv, dev, index);
return index;
}
+ +#endif /* CONFIG_SUSPEND */
/**
* cpuidle_enter_state - enter the state and update stats
* @dev: cpuidle device for this cpu
* @drv: cpuidle driver for this cpu
-- * @next_state: index into drv->states of the state to enter
++ * @index: index into the states table in @drv of the state to enter
*/
int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
int index)
* local timer will be shut down. If a local timer is used from another
* CPU as a broadcast timer, this call may fail if it is not available.
*/
-- if (broadcast && tick_broadcast_enter())
-- return -EBUSY;
++ if (broadcast && tick_broadcast_enter()) {
++ index = find_deepest_state(drv, dev, target_state->exit_latency,
++ CPUIDLE_FLAG_TIMER_STOP, false);
++ if (index < 0) {
++ default_idle_call();
++ return -EBUSY;
++ }
++ target_state = &drv->states[index];
++ }
++
++ /* Take note of the planned idle state. */
++ sched_idle_set_state(target_state);
trace_cpu_idle_rcuidle(index, dev->cpu);
time_start = ktime_get();
time_end = ktime_get();
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
++ /* The cpu is no longer idle or about to enter idle. */
++ sched_idle_set_state(NULL);
++
if (broadcast) {
if (WARN_ON_ONCE(!irqs_disabled()))
local_irq_disable();
*/
void cpuidle_reflect(struct cpuidle_device *dev, int index)
{
-- if (cpuidle_curr_governor->reflect)
++ if (cpuidle_curr_governor->reflect && index >= 0)
cpuidle_curr_governor->reflect(dev, index);
}
extern int cpuidle_enable_device(struct cpuidle_device *dev);
extern void cpuidle_disable_device(struct cpuidle_device *dev);
extern int cpuidle_play_dead(void);
- -extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
- - struct cpuidle_device *dev);
- -extern int cpuidle_enter_freeze(struct cpuidle_driver *drv,
- - struct cpuidle_device *dev);
extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
#else
{return -ENODEV; }
static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
static inline int cpuidle_play_dead(void) {return -ENODEV; }
+ +static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
+ + struct cpuidle_device *dev) {return NULL; }
+ +#endif
+ +
+ +#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND)
+ +extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
+ + struct cpuidle_device *dev);
+ +extern int cpuidle_enter_freeze(struct cpuidle_driver *drv,
+ + struct cpuidle_device *dev);
+ +#else
static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{return -ENODEV; }
static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{return -ENODEV; }
- -static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
- - struct cpuidle_device *dev) {return NULL; }
#endif
++/* kernel/sched/idle.c */
++extern void sched_idle_set_state(struct cpuidle_state *idle_state);
++extern void default_idle_call(void);
++
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a);
#else