cpufreq: cpufreq_interactive: avoid NULL point access
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / cpufreq_governor.c
index dc9b72e25c1ab66c429fb65e9cb1d410ef4ee908..d994b0f652d32320c525801d140e23b2d4607329 100644 (file)
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
-#include <asm/cputime.h>
-#include <linux/cpufreq.h>
-#include <linux/cpumask.h>
 #include <linux/export.h>
 #include <linux/kernel_stat.h>
-#include <linux/mutex.h>
 #include <linux/slab.h>
-#include <linux/tick.h>
-#include <linux/types.h>
-#include <linux/workqueue.h>
-#include <linux/cpu.h>
 
 #include "cpufreq_governor.h"
 
-static struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
-{
-       if (have_governor_per_policy())
-               return &policy->kobj;
-       else
-               return cpufreq_global_kobject;
-}
-
 static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data)
 {
        if (have_governor_per_policy())
@@ -46,61 +30,39 @@ static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data)
                return dbs_data->cdata->attr_group_gov_sys;
 }
 
-static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
-{
-       u64 idle_time;
-       u64 cur_wall_time;
-       u64 busy_time;
-
-       cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
-
-       busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
-       busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
-       busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
-       busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
-       busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
-       busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
-
-       idle_time = cur_wall_time - busy_time;
-       if (wall)
-               *wall = cputime_to_usecs(cur_wall_time);
-
-       return cputime_to_usecs(idle_time);
-}
-
-u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
-{
-       u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
-
-       if (idle_time == -1ULL)
-               return get_cpu_idle_time_jiffy(cpu, wall);
-       else if (!io_busy)
-               idle_time += get_cpu_iowait_time_us(cpu, wall);
-
-       return idle_time;
-}
-EXPORT_SYMBOL_GPL(get_cpu_idle_time);
-
 void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
 {
-       struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
+       struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
        struct od_dbs_tuners *od_tuners = dbs_data->tuners;
        struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
-       struct cpufreq_policy *policy;
+       struct cpufreq_policy *policy = cdbs->shared->policy;
+       unsigned int sampling_rate;
        unsigned int max_load = 0;
        unsigned int ignore_nice;
        unsigned int j;
 
-       if (dbs_data->cdata->governor == GOV_ONDEMAND)
-               ignore_nice = od_tuners->ignore_nice;
-       else
-               ignore_nice = cs_tuners->ignore_nice;
+       if (dbs_data->cdata->governor == GOV_ONDEMAND) {
+               struct od_cpu_dbs_info_s *od_dbs_info =
+                               dbs_data->cdata->get_cpu_dbs_info_s(cpu);
 
-       policy = cdbs->cur_policy;
+               /*
+                * Sometimes, the ondemand governor uses an additional
+                * multiplier to give long delays. So apply this multiplier to
+                * the 'sampling_rate', so as to keep the wake-up-from-idle
+                * detection logic a bit conservative.
+                */
+               sampling_rate = od_tuners->sampling_rate;
+               sampling_rate *= od_dbs_info->rate_mult;
+
+               ignore_nice = od_tuners->ignore_nice_load;
+       } else {
+               sampling_rate = cs_tuners->sampling_rate;
+               ignore_nice = cs_tuners->ignore_nice_load;
+       }
 
-       /* Get Absolute Load (in terms of freq for ondemand gov) */
+       /* Get Absolute Load */
        for_each_cpu(j, policy->cpus) {
-               struct cpu_dbs_common_info *j_cdbs;
+               struct cpu_dbs_info *j_cdbs;
                u64 cur_wall_time, cur_idle_time;
                unsigned int idle_time, wall_time;
                unsigned int load;
@@ -147,14 +109,45 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
                if (unlikely(!wall_time || wall_time < idle_time))
                        continue;
 
-               load = 100 * (wall_time - idle_time) / wall_time;
-
-               if (dbs_data->cdata->governor == GOV_ONDEMAND) {
-                       int freq_avg = __cpufreq_driver_getavg(policy, j);
-                       if (freq_avg <= 0)
-                               freq_avg = policy->cur;
+               /*
+                * If the CPU had gone completely idle, and a task just woke up
+                * on this CPU now, it would be unfair to calculate 'load' the
+                * usual way for this elapsed time-window, because it will show
+                * near-zero load, irrespective of how CPU intensive that task
+                * actually is. This is undesirable for latency-sensitive bursty
+                * workloads.
+                *
+                * To avoid this, we reuse the 'load' from the previous
+                * time-window and give this task a chance to start with a
+                * reasonably high CPU frequency. (However, we shouldn't over-do
+                * this copy, lest we get stuck at a high load (high frequency)
+                * for too long, even when the current system load has actually
+                * dropped down. So we perform the copy only once, upon the
+                * first wake-up from idle.)
+                *
+                * Detecting this situation is easy: the governor's deferrable
+                * timer would not have fired during CPU-idle periods. Hence
+                * an unusually large 'wall_time' (as compared to the sampling
+                * rate) indicates this scenario.
+                *
+                * prev_load can be zero in two cases and we must recalculate it
+                * for both cases:
+                * - during long idle intervals
+                * - explicitly set to zero
+                */
+               if (unlikely(wall_time > (2 * sampling_rate) &&
+                            j_cdbs->prev_load)) {
+                       load = j_cdbs->prev_load;
 
-                       load *= freq_avg;
+                       /*
+                        * Perform a destructive copy, to ensure that we copy
+                        * the previous load only once, upon the first wake-up
+                        * from idle.
+                        */
+                       j_cdbs->prev_load = 0;
+               } else {
+                       load = 100 * (wall_time - idle_time) / wall_time;
+                       j_cdbs->prev_load = load;
                }
 
                if (load > max_load)
@@ -168,9 +161,9 @@ EXPORT_SYMBOL_GPL(dbs_check_cpu);
 static inline void __gov_queue_work(int cpu, struct dbs_data *dbs_data,
                unsigned int delay)
 {
-       struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
+       struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
 
-       mod_delayed_work_on(cpu, system_wq, &cdbs->work, delay);
+       mod_delayed_work_on(cpu, system_wq, &cdbs->dwork, delay);
 }
 
 void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
@@ -179,12 +172,17 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
        int i;
 
        if (!all_cpus) {
-               __gov_queue_work(smp_processor_id(), dbs_data, delay);
+               /*
+                * Use raw_smp_processor_id() to avoid preemptible warnings.
+                * We know that this is only called with all_cpus == false from
+                * works that have been queued with *_work_on() functions and
+                * those works are canceled during CPU_DOWN_PREPARE so they
+                * can't possibly run on any other CPU.
+                */
+               __gov_queue_work(raw_smp_processor_id(), dbs_data, delay);
        } else {
-               get_online_cpus();
                for_each_cpu(i, policy->cpus)
                        __gov_queue_work(i, dbs_data, delay);
-               put_online_cpus();
        }
 }
 EXPORT_SYMBOL_GPL(gov_queue_work);
@@ -192,33 +190,75 @@ EXPORT_SYMBOL_GPL(gov_queue_work);
 static inline void gov_cancel_work(struct dbs_data *dbs_data,
                struct cpufreq_policy *policy)
 {
-       struct cpu_dbs_common_info *cdbs;
+       struct cpu_dbs_info *cdbs;
        int i;
 
        for_each_cpu(i, policy->cpus) {
                cdbs = dbs_data->cdata->get_cpu_cdbs(i);
-               cancel_delayed_work_sync(&cdbs->work);
+               cancel_delayed_work_sync(&cdbs->dwork);
        }
 }
 
 /* Will return if we need to evaluate cpu load again or not */
-bool need_load_eval(struct cpu_dbs_common_info *cdbs,
-               unsigned int sampling_rate)
+static bool need_load_eval(struct cpu_common_dbs_info *shared,
+                          unsigned int sampling_rate)
 {
-       if (policy_is_shared(cdbs->cur_policy)) {
+       if (policy_is_shared(shared->policy)) {
                ktime_t time_now = ktime_get();
-               s64 delta_us = ktime_us_delta(time_now, cdbs->time_stamp);
+               s64 delta_us = ktime_us_delta(time_now, shared->time_stamp);
 
                /* Do nothing if we recently have sampled */
                if (delta_us < (s64)(sampling_rate / 2))
                        return false;
                else
-                       cdbs->time_stamp = time_now;
+                       shared->time_stamp = time_now;
        }
 
        return true;
 }
-EXPORT_SYMBOL_GPL(need_load_eval);
+
+static void dbs_timer(struct work_struct *work)
+{
+       struct cpu_dbs_info *cdbs = container_of(work, struct cpu_dbs_info,
+                                                dwork.work);
+       struct cpu_common_dbs_info *shared = cdbs->shared;
+       struct cpufreq_policy *policy;
+       struct dbs_data *dbs_data;
+       unsigned int sampling_rate, delay;
+       bool modify_all = true;
+
+       mutex_lock(&shared->timer_mutex);
+
+       policy = shared->policy;
+
+       /*
+        * Governor might already be disabled and there is no point continuing
+        * with the work-handler.
+        */
+       if (!policy)
+               goto unlock;
+
+       dbs_data = policy->governor_data;
+
+       if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
+               struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
+
+               sampling_rate = cs_tuners->sampling_rate;
+       } else {
+               struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+
+               sampling_rate = od_tuners->sampling_rate;
+       }
+
+       if (!need_load_eval(cdbs->shared, sampling_rate))
+               modify_all = false;
+
+       delay = dbs_data->cdata->gov_dbs_timer(cdbs, dbs_data, modify_all);
+       gov_queue_work(dbs_data, policy, delay, modify_all);
+
+unlock:
+       mutex_unlock(&shared->timer_mutex);
+}
 
 static void set_sampling_rate(struct dbs_data *dbs_data,
                unsigned int sampling_rate)
@@ -232,195 +272,303 @@ static void set_sampling_rate(struct dbs_data *dbs_data,
        }
 }
 
-int cpufreq_governor_dbs(struct cpufreq_policy *policy,
-               struct common_dbs_data *cdata, unsigned int event)
+static int alloc_common_dbs_info(struct cpufreq_policy *policy,
+                                struct common_dbs_data *cdata)
 {
-       struct dbs_data *dbs_data;
-       struct od_cpu_dbs_info_s *od_dbs_info = NULL;
-       struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
-       struct od_ops *od_ops = NULL;
-       struct od_dbs_tuners *od_tuners = NULL;
-       struct cs_dbs_tuners *cs_tuners = NULL;
-       struct cpu_dbs_common_info *cpu_cdbs;
-       unsigned int sampling_rate, latency, ignore_nice, j, cpu = policy->cpu;
-       int io_busy = 0;
-       int rc;
+       struct cpu_common_dbs_info *shared;
+       int j;
 
-       if (have_governor_per_policy())
-               dbs_data = policy->governor_data;
-       else
-               dbs_data = cdata->gdbs_data;
+       /* Allocate memory for the common information for policy->cpus */
+       shared = kzalloc(sizeof(*shared), GFP_KERNEL);
+       if (!shared)
+               return -ENOMEM;
 
-       WARN_ON(!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT));
+       /* Set shared for all CPUs, online+offline */
+       for_each_cpu(j, policy->related_cpus)
+               cdata->get_cpu_cdbs(j)->shared = shared;
 
-       switch (event) {
-       case CPUFREQ_GOV_POLICY_INIT:
-               if (have_governor_per_policy()) {
-                       WARN_ON(dbs_data);
-               } else if (dbs_data) {
-                       dbs_data->usage_count++;
-                       policy->governor_data = dbs_data;
-                       return 0;
-               }
+       return 0;
+}
 
-               dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
-               if (!dbs_data) {
-                       pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
-                       return -ENOMEM;
-               }
+static void free_common_dbs_info(struct cpufreq_policy *policy,
+                                struct common_dbs_data *cdata)
+{
+       struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(policy->cpu);
+       struct cpu_common_dbs_info *shared = cdbs->shared;
+       int j;
 
-               dbs_data->cdata = cdata;
-               dbs_data->usage_count = 1;
-               rc = cdata->init(dbs_data);
-               if (rc) {
-                       pr_err("%s: POLICY_INIT: init() failed\n", __func__);
-                       kfree(dbs_data);
-                       return rc;
-               }
+       for_each_cpu(j, policy->cpus)
+               cdata->get_cpu_cdbs(j)->shared = NULL;
 
-               rc = sysfs_create_group(get_governor_parent_kobj(policy),
-                               get_sysfs_attr(dbs_data));
-               if (rc) {
-                       cdata->exit(dbs_data);
-                       kfree(dbs_data);
-                       return rc;
-               }
+       kfree(shared);
+}
+
+static int cpufreq_governor_init(struct cpufreq_policy *policy,
+                                struct dbs_data *dbs_data,
+                                struct common_dbs_data *cdata)
+{
+       unsigned int latency;
+       int ret;
+
+       /* State should be equivalent to EXIT */
+       if (policy->governor_data)
+               return -EBUSY;
 
+       if (dbs_data) {
+               if (WARN_ON(have_governor_per_policy()))
+                       return -EINVAL;
+
+               ret = alloc_common_dbs_info(policy, cdata);
+               if (ret)
+                       return ret;
+
+               dbs_data->usage_count++;
                policy->governor_data = dbs_data;
+               return 0;
+       }
 
-               /* policy latency is in nS. Convert it to uS first */
-               latency = policy->cpuinfo.transition_latency / 1000;
-               if (latency == 0)
-                       latency = 1;
+       dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
+       if (!dbs_data)
+               return -ENOMEM;
 
-               /* Bring kernel and HW constraints together */
-               dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
-                               MIN_LATENCY_MULTIPLIER * latency);
-               set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
+       ret = alloc_common_dbs_info(policy, cdata);
+       if (ret)
+               goto free_dbs_data;
+
+       dbs_data->cdata = cdata;
+       dbs_data->usage_count = 1;
+
+       ret = cdata->init(dbs_data, !policy->governor->initialized);
+       if (ret)
+               goto free_common_dbs_info;
+
+       /* policy latency is in ns. Convert it to us first */
+       latency = policy->cpuinfo.transition_latency / 1000;
+       if (latency == 0)
+               latency = 1;
+
+       /* Bring kernel and HW constraints together */
+       dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
+                                         MIN_LATENCY_MULTIPLIER * latency);
+       set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
                                        latency * LATENCY_MULTIPLIER));
 
-               if ((cdata->governor == GOV_CONSERVATIVE) &&
-                               (!policy->governor->initialized)) {
-                       struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
+       if (!have_governor_per_policy())
+               cdata->gdbs_data = dbs_data;
 
-                       cpufreq_register_notifier(cs_ops->notifier_block,
-                                       CPUFREQ_TRANSITION_NOTIFIER);
-               }
+       policy->governor_data = dbs_data;
 
-               if (!have_governor_per_policy())
-                       cdata->gdbs_data = dbs_data;
+       ret = sysfs_create_group(get_governor_parent_kobj(policy),
+                                get_sysfs_attr(dbs_data));
+       if (ret)
+               goto reset_gdbs_data;
 
-               return 0;
-       case CPUFREQ_GOV_POLICY_EXIT:
-               if (!--dbs_data->usage_count) {
-                       sysfs_remove_group(get_governor_parent_kobj(policy),
-                                       get_sysfs_attr(dbs_data));
+       return 0;
 
-                       if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
-                               (policy->governor->initialized == 1)) {
-                               struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
+reset_gdbs_data:
+       policy->governor_data = NULL;
+
+       if (!have_governor_per_policy())
+               cdata->gdbs_data = NULL;
+       cdata->exit(dbs_data, !policy->governor->initialized);
+free_common_dbs_info:
+       free_common_dbs_info(policy, cdata);
+free_dbs_data:
+       kfree(dbs_data);
+       return ret;
+}
 
-                               cpufreq_unregister_notifier(cs_ops->notifier_block,
-                                               CPUFREQ_TRANSITION_NOTIFIER);
-                       }
+static int cpufreq_governor_exit(struct cpufreq_policy *policy,
+                                struct dbs_data *dbs_data)
+{
+       struct common_dbs_data *cdata = dbs_data->cdata;
+       struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(policy->cpu);
 
-                       cdata->exit(dbs_data);
-                       kfree(dbs_data);
+       /* State should be equivalent to INIT */
+       if (!cdbs->shared || cdbs->shared->policy)
+               return -EBUSY;
+
+       if (!--dbs_data->usage_count) {
+               sysfs_remove_group(get_governor_parent_kobj(policy),
+                                  get_sysfs_attr(dbs_data));
+
+               policy->governor_data = NULL;
+
+               if (!have_governor_per_policy())
                        cdata->gdbs_data = NULL;
-               }
 
+               cdata->exit(dbs_data, policy->governor->initialized == 1);
+               kfree(dbs_data);
+       } else {
                policy->governor_data = NULL;
-               return 0;
        }
 
-       cpu_cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
+       free_common_dbs_info(policy, cdata);
+       return 0;
+}
+
+static int cpufreq_governor_start(struct cpufreq_policy *policy,
+                                 struct dbs_data *dbs_data)
+{
+       struct common_dbs_data *cdata = dbs_data->cdata;
+       unsigned int sampling_rate, ignore_nice, j, cpu = policy->cpu;
+       struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
+       struct cpu_common_dbs_info *shared = cdbs->shared;
+       int io_busy = 0;
+
+       if (!policy->cur)
+               return -EINVAL;
+
+       /* State should be equivalent to INIT */
+       if (!shared || shared->policy)
+               return -EBUSY;
+
+       if (cdata->governor == GOV_CONSERVATIVE) {
+               struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
 
-       if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
-               cs_tuners = dbs_data->tuners;
-               cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
                sampling_rate = cs_tuners->sampling_rate;
-               ignore_nice = cs_tuners->ignore_nice;
+               ignore_nice = cs_tuners->ignore_nice_load;
        } else {
-               od_tuners = dbs_data->tuners;
-               od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
+               struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+
                sampling_rate = od_tuners->sampling_rate;
-               ignore_nice = od_tuners->ignore_nice;
-               od_ops = dbs_data->cdata->gov_ops;
+               ignore_nice = od_tuners->ignore_nice_load;
                io_busy = od_tuners->io_is_busy;
        }
 
-       switch (event) {
-       case CPUFREQ_GOV_START:
-               if (!policy->cur)
-                       return -EINVAL;
+       shared->policy = policy;
+       shared->time_stamp = ktime_get();
+       mutex_init(&shared->timer_mutex);
 
-               mutex_lock(&dbs_data->mutex);
+       for_each_cpu(j, policy->cpus) {
+               struct cpu_dbs_info *j_cdbs = cdata->get_cpu_cdbs(j);
+               unsigned int prev_load;
 
-               for_each_cpu(j, policy->cpus) {
-                       struct cpu_dbs_common_info *j_cdbs =
-                               dbs_data->cdata->get_cpu_cdbs(j);
+               j_cdbs->prev_cpu_idle =
+                       get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy);
 
-                       j_cdbs->cpu = j;
-                       j_cdbs->cur_policy = policy;
-                       j_cdbs->prev_cpu_idle = get_cpu_idle_time(j,
-                                              &j_cdbs->prev_cpu_wall, io_busy);
-                       if (ignore_nice)
-                               j_cdbs->prev_cpu_nice =
-                                       kcpustat_cpu(j).cpustat[CPUTIME_NICE];
+               prev_load = (unsigned int)(j_cdbs->prev_cpu_wall -
+                                           j_cdbs->prev_cpu_idle);
+               j_cdbs->prev_load = 100 * prev_load /
+                                   (unsigned int)j_cdbs->prev_cpu_wall;
 
-                       mutex_init(&j_cdbs->timer_mutex);
-                       INIT_DEFERRABLE_WORK(&j_cdbs->work,
-                                            dbs_data->cdata->gov_dbs_timer);
-               }
+               if (ignore_nice)
+                       j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
 
-               /*
-                * conservative does not implement micro like ondemand
-                * governor, thus we are bound to jiffes/HZ
-                */
-               if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
-                       cs_dbs_info->down_skip = 0;
-                       cs_dbs_info->enable = 1;
-                       cs_dbs_info->requested_freq = policy->cur;
-               } else {
-                       od_dbs_info->rate_mult = 1;
-                       od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
-                       od_ops->powersave_bias_init_cpu(cpu);
-               }
+               INIT_DEFERRABLE_WORK(&j_cdbs->dwork, dbs_timer);
+       }
 
-               mutex_unlock(&dbs_data->mutex);
+       if (cdata->governor == GOV_CONSERVATIVE) {
+               struct cs_cpu_dbs_info_s *cs_dbs_info =
+                       cdata->get_cpu_dbs_info_s(cpu);
 
-               /* Initiate timer time stamp */
-               cpu_cdbs->time_stamp = ktime_get();
+               cs_dbs_info->down_skip = 0;
+               cs_dbs_info->requested_freq = policy->cur;
+       } else {
+               struct od_ops *od_ops = cdata->gov_ops;
+               struct od_cpu_dbs_info_s *od_dbs_info = cdata->get_cpu_dbs_info_s(cpu);
 
-               gov_queue_work(dbs_data, policy,
-                               delay_for_sampling_rate(sampling_rate), true);
-               break;
+               od_dbs_info->rate_mult = 1;
+               od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
+               od_ops->powersave_bias_init_cpu(cpu);
+       }
 
-       case CPUFREQ_GOV_STOP:
-               if (dbs_data->cdata->governor == GOV_CONSERVATIVE)
-                       cs_dbs_info->enable = 0;
+       gov_queue_work(dbs_data, policy, delay_for_sampling_rate(sampling_rate),
+                      true);
+       return 0;
+}
 
-               gov_cancel_work(dbs_data, policy);
+static int cpufreq_governor_stop(struct cpufreq_policy *policy,
+                                struct dbs_data *dbs_data)
+{
+       struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(policy->cpu);
+       struct cpu_common_dbs_info *shared = cdbs->shared;
 
-               mutex_lock(&dbs_data->mutex);
-               mutex_destroy(&cpu_cdbs->timer_mutex);
+       /* State should be equivalent to START */
+       if (!shared || !shared->policy)
+               return -EBUSY;
 
-               mutex_unlock(&dbs_data->mutex);
+       /*
+        * Work-handler must see this updated, as it should not proceed any
+        * further after governor is disabled. And so timer_mutex is taken while
+        * updating this value.
+        */
+       mutex_lock(&shared->timer_mutex);
+       shared->policy = NULL;
+       mutex_unlock(&shared->timer_mutex);
 
-               break;
+       gov_cancel_work(dbs_data, policy);
+
+       mutex_destroy(&shared->timer_mutex);
+       return 0;
+}
+
+static int cpufreq_governor_limits(struct cpufreq_policy *policy,
+                                  struct dbs_data *dbs_data)
+{
+       struct common_dbs_data *cdata = dbs_data->cdata;
+       unsigned int cpu = policy->cpu;
+       struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
+
+       /* State should be equivalent to START */
+       if (!cdbs->shared || !cdbs->shared->policy)
+               return -EBUSY;
+
+       mutex_lock(&cdbs->shared->timer_mutex);
+       if (policy->max < cdbs->shared->policy->cur)
+               __cpufreq_driver_target(cdbs->shared->policy, policy->max,
+                                       CPUFREQ_RELATION_H);
+       else if (policy->min > cdbs->shared->policy->cur)
+               __cpufreq_driver_target(cdbs->shared->policy, policy->min,
+                                       CPUFREQ_RELATION_L);
+       dbs_check_cpu(dbs_data, cpu);
+       mutex_unlock(&cdbs->shared->timer_mutex);
+
+       return 0;
+}
+
+int cpufreq_governor_dbs(struct cpufreq_policy *policy,
+                        struct common_dbs_data *cdata, unsigned int event)
+{
+       struct dbs_data *dbs_data;
+       int ret;
 
+       /* Lock governor to block concurrent initialization of governor */
+       mutex_lock(&cdata->mutex);
+
+       if (have_governor_per_policy())
+               dbs_data = policy->governor_data;
+       else
+               dbs_data = cdata->gdbs_data;
+
+       if (!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT)) {
+               ret = -EINVAL;
+               goto unlock;
+       }
+
+       switch (event) {
+       case CPUFREQ_GOV_POLICY_INIT:
+               ret = cpufreq_governor_init(policy, dbs_data, cdata);
+               break;
+       case CPUFREQ_GOV_POLICY_EXIT:
+               ret = cpufreq_governor_exit(policy, dbs_data);
+               break;
+       case CPUFREQ_GOV_START:
+               ret = cpufreq_governor_start(policy, dbs_data);
+               break;
+       case CPUFREQ_GOV_STOP:
+               ret = cpufreq_governor_stop(policy, dbs_data);
+               break;
        case CPUFREQ_GOV_LIMITS:
-               mutex_lock(&cpu_cdbs->timer_mutex);
-               if (policy->max < cpu_cdbs->cur_policy->cur)
-                       __cpufreq_driver_target(cpu_cdbs->cur_policy,
-                                       policy->max, CPUFREQ_RELATION_H);
-               else if (policy->min > cpu_cdbs->cur_policy->cur)
-                       __cpufreq_driver_target(cpu_cdbs->cur_policy,
-                                       policy->min, CPUFREQ_RELATION_L);
-               dbs_check_cpu(dbs_data, cpu);
-               mutex_unlock(&cpu_cdbs->timer_mutex);
+               ret = cpufreq_governor_limits(policy, dbs_data);
                break;
+       default:
+               ret = -EINVAL;
        }
-       return 0;
+
+unlock:
+       mutex_unlock(&cdata->mutex);
+
+       return ret;
 }
 EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);