cpufreq: cpufreq_interactive: avoid NULL point access
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / cpufreq_interactive.c
index 6c4b6a895e0db24a1f0736c3faacbec758586cbc..055e990a2f1db035cbbff273fccefe8c0dfd13c8 100644 (file)
@@ -19,6 +19,9 @@
 #include <linux/cpu.h>
 #include <linux/cpumask.h>
 #include <linux/cpufreq.h>
+#ifdef CONFIG_ARCH_ROCKCHIP
+#include <linux/input.h>
+#endif
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/rwsem.h>
@@ -47,9 +50,10 @@ struct cpufreq_interactive_cpuinfo {
        spinlock_t target_freq_lock; /*protects target freq */
        unsigned int target_freq;
        unsigned int floor_freq;
-       unsigned int max_freq;
-       u64 floor_validate_time;
-       u64 hispeed_validate_time;
+       u64 pol_floor_val_time; /* policy floor_validate_time */
+       u64 loc_floor_val_time; /* per-cpu floor_validate_time */
+       u64 pol_hispeed_val_time; /* policy hispeed_validate_time */
+       u64 loc_hispeed_val_time; /* per-cpu hispeed_validate_time */
        struct rw_semaphore enable_sem;
        int governor_enabled;
 };
@@ -105,6 +109,15 @@ struct cpufreq_interactive_tunables {
        int boostpulse_duration_val;
        /* End time of boost pulse in ktime converted to usecs */
        u64 boostpulse_endtime;
+#ifdef CONFIG_ARCH_ROCKCHIP
+       /* Frequency to which a touch boost takes the cpus to */
+       unsigned long touchboost_freq;
+       /* Duration of a touchboost pulse in usecs */
+       int touchboostpulse_duration_val;
+       /* End time of touchboost pulse in ktime converted to usecs */
+       u64 touchboostpulse_endtime;
+#endif
+       bool boosted;
        /*
         * Max additional time to wait in idle, beyond timer_rate, at speeds
         * above minimum before wakeup to reduce speed, or -1 if unnecessary.
@@ -115,7 +128,7 @@ struct cpufreq_interactive_tunables {
 };
 
 /* For cases where we have single governor instance for system */
-struct cpufreq_interactive_tunables *common_tunables;
+static struct cpufreq_interactive_tunables *common_tunables;
 
 static struct attribute_group *get_sysfs_attr(void);
 
@@ -344,7 +357,7 @@ static void cpufreq_interactive_timer(unsigned long data)
        unsigned int loadadjfreq;
        unsigned int index;
        unsigned long flags;
-       bool boosted;
+       u64 max_fvtime;
 
        if (!down_read_trylock(&pcpu->enable_sem))
                return;
@@ -363,11 +376,11 @@ static void cpufreq_interactive_timer(unsigned long data)
        spin_lock_irqsave(&pcpu->target_freq_lock, flags);
        do_div(cputime_speedadj, delta_time);
        loadadjfreq = (unsigned int)cputime_speedadj * 100;
-       cpu_load = loadadjfreq / pcpu->target_freq;
-       boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
+       cpu_load = loadadjfreq / pcpu->policy->cur;
+       tunables->boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
 
-       if (cpu_load >= tunables->go_hispeed_load || boosted) {
-               if (pcpu->target_freq < tunables->hispeed_freq) {
+       if (cpu_load >= tunables->go_hispeed_load || tunables->boosted) {
+               if (pcpu->policy->cur < tunables->hispeed_freq) {
                        new_freq = tunables->hispeed_freq;
                } else {
                        new_freq = choose_freq(pcpu, loadadjfreq);
@@ -377,12 +390,20 @@ static void cpufreq_interactive_timer(unsigned long data)
                }
        } else {
                new_freq = choose_freq(pcpu, loadadjfreq);
+               if (new_freq > tunables->hispeed_freq &&
+                               pcpu->policy->cur < tunables->hispeed_freq)
+                       new_freq = tunables->hispeed_freq;
        }
-
-       if (pcpu->target_freq >= tunables->hispeed_freq &&
-           new_freq > pcpu->target_freq &&
-           now - pcpu->hispeed_validate_time <
-           freq_to_above_hispeed_delay(tunables, pcpu->target_freq)) {
+#ifdef CONFIG_ARCH_ROCKCHIP
+       if ((now < tunables->touchboostpulse_endtime) &&
+           (new_freq < tunables->touchboost_freq)) {
+               new_freq = tunables->touchboost_freq;
+       }
+#endif
+       if (pcpu->policy->cur >= tunables->hispeed_freq &&
+           new_freq > pcpu->policy->cur &&
+           now - pcpu->pol_hispeed_val_time <
+           freq_to_above_hispeed_delay(tunables, pcpu->policy->cur)) {
                trace_cpufreq_interactive_notyet(
                        data, cpu_load, pcpu->target_freq,
                        pcpu->policy->cur, new_freq);
@@ -390,7 +411,7 @@ static void cpufreq_interactive_timer(unsigned long data)
                goto rearm;
        }
 
-       pcpu->hispeed_validate_time = now;
+       pcpu->loc_hispeed_val_time = now;
 
        if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
                                           new_freq, CPUFREQ_RELATION_L,
@@ -405,9 +426,10 @@ static void cpufreq_interactive_timer(unsigned long data)
         * Do not scale below floor_freq unless we have been at or above the
         * floor frequency for the minimum sample time since last validated.
         */
-       if (new_freq < pcpu->floor_freq) {
-               if (now - pcpu->floor_validate_time <
-                               tunables->min_sample_time) {
+       max_fvtime = max(pcpu->pol_floor_val_time, pcpu->loc_floor_val_time);
+       if (new_freq < pcpu->floor_freq &&
+           pcpu->target_freq >= pcpu->policy->cur) {
+               if (now - max_fvtime < tunables->min_sample_time) {
                        trace_cpufreq_interactive_notyet(
                                data, cpu_load, pcpu->target_freq,
                                pcpu->policy->cur, new_freq);
@@ -424,17 +446,20 @@ static void cpufreq_interactive_timer(unsigned long data)
         * (or the indefinite boost is turned off).
         */
 
-       if (!boosted || new_freq > tunables->hispeed_freq) {
+       if (!tunables->boosted || new_freq > tunables->hispeed_freq) {
                pcpu->floor_freq = new_freq;
-               pcpu->floor_validate_time = now;
+               if (pcpu->target_freq >= pcpu->policy->cur ||
+                   new_freq >= pcpu->policy->cur)
+                       pcpu->loc_floor_val_time = now;
        }
 
-       if (pcpu->target_freq == new_freq) {
+       if (pcpu->target_freq == new_freq &&
+                       pcpu->target_freq <= pcpu->policy->cur) {
                trace_cpufreq_interactive_already(
                        data, cpu_load, pcpu->target_freq,
                        pcpu->policy->cur, new_freq);
                spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
-               goto rearm_if_notmax;
+               goto rearm;
        }
 
        trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
@@ -447,14 +472,6 @@ static void cpufreq_interactive_timer(unsigned long data)
        spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
        wake_up_process(speedchange_task);
 
-rearm_if_notmax:
-       /*
-        * Already set max speed and don't see a need to change that,
-        * wait until next idle to re-evaluate, don't need timer.
-        */
-       if (pcpu->target_freq == pcpu->policy->max)
-               goto exit;
-
 rearm:
        if (!timer_pending(&pcpu->cpu_timer))
                cpufreq_interactive_timer_resched(pcpu);
@@ -464,37 +481,6 @@ exit:
        return;
 }
 
-static void cpufreq_interactive_idle_start(void)
-{
-       struct cpufreq_interactive_cpuinfo *pcpu =
-               &per_cpu(cpuinfo, smp_processor_id());
-       int pending;
-
-       if (!down_read_trylock(&pcpu->enable_sem))
-               return;
-       if (!pcpu->governor_enabled) {
-               up_read(&pcpu->enable_sem);
-               return;
-       }
-
-       pending = timer_pending(&pcpu->cpu_timer);
-
-       if (pcpu->target_freq != pcpu->policy->min) {
-               /*
-                * Entering idle while not at lowest speed.  On some
-                * platforms this can hold the other CPU(s) at that speed
-                * even though the CPU is idle. Set a timer to re-evaluate
-                * speed so this idle CPU doesn't hold the other CPUs above
-                * min indefinitely.  This should probably be a quirk of
-                * the CPUFreq driver.
-                */
-               if (!pending)
-                       cpufreq_interactive_timer_resched(pcpu);
-       }
-
-       up_read(&pcpu->enable_sem);
-}
-
 static void cpufreq_interactive_idle_end(void)
 {
        struct cpufreq_interactive_cpuinfo *pcpu =
@@ -519,6 +505,58 @@ static void cpufreq_interactive_idle_end(void)
        up_read(&pcpu->enable_sem);
 }
 
+static void cpufreq_interactive_get_policy_info(struct cpufreq_policy *policy,
+                                               unsigned int *pmax_freq,
+                                               u64 *phvt, u64 *pfvt)
+{
+       struct cpufreq_interactive_cpuinfo *pcpu;
+       unsigned int max_freq = 0;
+       u64 hvt = ~0ULL, fvt = 0;
+       unsigned int i;
+
+       for_each_cpu(i, policy->cpus) {
+               pcpu = &per_cpu(cpuinfo, i);
+
+               fvt = max(fvt, pcpu->loc_floor_val_time);
+               if (pcpu->target_freq > max_freq) {
+                       max_freq = pcpu->target_freq;
+                       hvt = pcpu->loc_hispeed_val_time;
+               } else if (pcpu->target_freq == max_freq) {
+                       hvt = min(hvt, pcpu->loc_hispeed_val_time);
+               }
+       }
+
+       *pmax_freq = max_freq;
+       *phvt = hvt;
+       *pfvt = fvt;
+}
+
+static void cpufreq_interactive_adjust_cpu(unsigned int cpu,
+                                          struct cpufreq_policy *policy)
+{
+       struct cpufreq_interactive_cpuinfo *pcpu;
+       u64 hvt, fvt;
+       unsigned int max_freq;
+       int i;
+
+       cpufreq_interactive_get_policy_info(policy, &max_freq, &hvt, &fvt);
+
+       for_each_cpu(i, policy->cpus) {
+               pcpu = &per_cpu(cpuinfo, i);
+               pcpu->pol_floor_val_time = fvt;
+       }
+
+       if (max_freq != policy->cur) {
+               __cpufreq_driver_target(policy, max_freq, CPUFREQ_RELATION_H);
+               for_each_cpu(i, policy->cpus) {
+                       pcpu = &per_cpu(cpuinfo, i);
+                       pcpu->pol_hispeed_val_time = hvt;
+               }
+       }
+
+       trace_cpufreq_interactive_setspeed(cpu, max_freq, policy->cur);
+}
+
 static int cpufreq_interactive_speedchange_task(void *data)
 {
        unsigned int cpu;
@@ -547,71 +585,62 @@ static int cpufreq_interactive_speedchange_task(void *data)
                spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
 
                for_each_cpu(cpu, &tmp_mask) {
-                       unsigned int j;
-                       unsigned int max_freq = 0;
-
                        pcpu = &per_cpu(cpuinfo, cpu);
-                       if (!down_read_trylock(&pcpu->enable_sem))
-                               continue;
-                       if (!pcpu->governor_enabled) {
-                               up_read(&pcpu->enable_sem);
-                               continue;
-                       }
 
-                       for_each_cpu(j, pcpu->policy->cpus) {
-                               struct cpufreq_interactive_cpuinfo *pjcpu =
-                                       &per_cpu(cpuinfo, j);
+                       down_write(&pcpu->policy->rwsem);
 
-                               if (pjcpu->target_freq > max_freq)
-                                       max_freq = pjcpu->target_freq;
+                       if (likely(down_read_trylock(&pcpu->enable_sem))) {
+                               if (likely(pcpu->governor_enabled))
+                                       cpufreq_interactive_adjust_cpu(cpu,
+                                                       pcpu->policy);
+                               up_read(&pcpu->enable_sem);
                        }
 
-                       if (max_freq != pcpu->policy->cur)
-                               __cpufreq_driver_target(pcpu->policy,
-                                                       max_freq,
-                                                       CPUFREQ_RELATION_H);
-                       trace_cpufreq_interactive_setspeed(cpu,
-                                                    pcpu->target_freq,
-                                                    pcpu->policy->cur);
-
-                       up_read(&pcpu->enable_sem);
+                       up_write(&pcpu->policy->rwsem);
                }
        }
 
        return 0;
 }
 
-static void cpufreq_interactive_boost(void)
+static void cpufreq_interactive_boost(struct cpufreq_interactive_tunables *tunables)
 {
        int i;
        int anyboost = 0;
        unsigned long flags[2];
        struct cpufreq_interactive_cpuinfo *pcpu;
-       struct cpufreq_interactive_tunables *tunables;
+
+       tunables->boosted = true;
 
        spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
 
        for_each_online_cpu(i) {
                pcpu = &per_cpu(cpuinfo, i);
-               tunables = pcpu->policy->governor_data;
+
+               if (!down_read_trylock(&pcpu->enable_sem))
+                       continue;
+
+               if (!pcpu->governor_enabled) {
+                       up_read(&pcpu->enable_sem);
+                       continue;
+               }
+
+               if (tunables != pcpu->policy->governor_data) {
+                       up_read(&pcpu->enable_sem);
+                       continue;
+               }
 
                spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]);
                if (pcpu->target_freq < tunables->hispeed_freq) {
                        pcpu->target_freq = tunables->hispeed_freq;
                        cpumask_set_cpu(i, &speedchange_cpumask);
-                       pcpu->hispeed_validate_time =
+                       pcpu->pol_hispeed_val_time =
                                ktime_to_us(ktime_get());
                        anyboost = 1;
                }
-
-               /*
-                * Set floor freq and (re)start timer for when last
-                * validated.
-                */
-
-               pcpu->floor_freq = tunables->hispeed_freq;
-               pcpu->floor_validate_time = ktime_to_us(ktime_get());
                spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]);
+
+               up_read(&pcpu->enable_sem);
        }
 
        spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
@@ -802,7 +831,7 @@ static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
        int ret;
        long unsigned int val;
 
-       ret = strict_strtoul(buf, 0, &val);
+       ret = kstrtoul(buf, 0, &val);
        if (ret < 0)
                return ret;
        tunables->hispeed_freq = val;
@@ -821,7 +850,7 @@ static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
        int ret;
        unsigned long val;
 
-       ret = strict_strtoul(buf, 0, &val);
+       ret = kstrtoul(buf, 0, &val);
        if (ret < 0)
                return ret;
        tunables->go_hispeed_load = val;
@@ -840,7 +869,7 @@ static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
        int ret;
        unsigned long val;
 
-       ret = strict_strtoul(buf, 0, &val);
+       ret = kstrtoul(buf, 0, &val);
        if (ret < 0)
                return ret;
        tunables->min_sample_time = val;
@@ -857,12 +886,18 @@ static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
                const char *buf, size_t count)
 {
        int ret;
-       unsigned long val;
+       unsigned long val, val_round;
 
-       ret = strict_strtoul(buf, 0, &val);
+       ret = kstrtoul(buf, 0, &val);
        if (ret < 0)
                return ret;
-       tunables->timer_rate = val;
+
+       val_round = jiffies_to_usecs(usecs_to_jiffies(val));
+       if (val != val_round)
+               pr_warn("timer_rate not aligned to jiffy. Rounded up to %lu\n",
+                       val_round);
+
+       tunables->timer_rate = val_round;
        return count;
 }
 
@@ -906,7 +941,8 @@ static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
 
        if (tunables->boost_val) {
                trace_cpufreq_interactive_boost("on");
-               cpufreq_interactive_boost();
+               if (!tunables->boosted)
+                       cpufreq_interactive_boost(tunables);
        } else {
                tunables->boostpulse_endtime = ktime_to_us(ktime_get());
                trace_cpufreq_interactive_unboost("off");
@@ -928,7 +964,8 @@ static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
        tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
                tunables->boostpulse_duration_val;
        trace_cpufreq_interactive_boost("pulse");
-       cpufreq_interactive_boost();
+       if (!tunables->boosted)
+               cpufreq_interactive_boost(tunables);
        return count;
 }
 
@@ -1102,14 +1139,8 @@ static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
                                             unsigned long val,
                                             void *data)
 {
-       switch (val) {
-       case IDLE_START:
-               cpufreq_interactive_idle_start();
-               break;
-       case IDLE_END:
+       if (val == IDLE_END)
                cpufreq_interactive_idle_end();
-               break;
-       }
 
        return 0;
 }
@@ -1118,6 +1149,150 @@ static struct notifier_block cpufreq_interactive_idle_nb = {
        .notifier_call = cpufreq_interactive_idle_notifier,
 };
 
+#ifdef CONFIG_ARCH_ROCKCHIP
+static void cpufreq_interactive_input_event(struct input_handle *handle,
+                                           unsigned int type,
+                                           unsigned int code,
+                                           int value)
+{
+       u64 now, endtime;
+       int i;
+       int anyboost = 0;
+       unsigned long flags[2];
+       struct cpufreq_interactive_cpuinfo *pcpu;
+       struct cpufreq_interactive_tunables *tunables;
+
+       if ((type != EV_ABS) && (type != EV_KEY))
+               return;
+
+       trace_cpufreq_interactive_boost("touch");
+       spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
+
+       now = ktime_to_us(ktime_get());
+       for_each_online_cpu(i) {
+               pcpu = &per_cpu(cpuinfo, i);
+               if (!pcpu->policy)
+                       continue;
+
+               if (have_governor_per_policy())
+                       tunables = pcpu->policy->governor_data;
+               else
+                       tunables = common_tunables;
+               if (!tunables)
+                       continue;
+
+               endtime = now + tunables->touchboostpulse_duration_val;
+               if (endtime < (tunables->touchboostpulse_endtime +
+                              10 * USEC_PER_MSEC))
+                       continue;
+               tunables->touchboostpulse_endtime = endtime;
+
+               spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]);
+               if (pcpu->target_freq < tunables->touchboost_freq) {
+                       pcpu->target_freq = tunables->touchboost_freq;
+                       cpumask_set_cpu(i, &speedchange_cpumask);
+                       pcpu->loc_hispeed_val_time =
+                                       ktime_to_us(ktime_get());
+                       anyboost = 1;
+               }
+
+               pcpu->floor_freq = tunables->touchboost_freq;
+               pcpu->loc_floor_val_time = ktime_to_us(ktime_get());
+
+               spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]);
+       }
+
+       spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
+
+       if (anyboost)
+               wake_up_process(speedchange_task);
+}
+
+static int cpufreq_interactive_input_connect(struct input_handler *handler,
+                                            struct input_dev *dev,
+                                            const struct input_device_id *id)
+{
+       struct input_handle *handle;
+       int error;
+
+       handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+       if (!handle)
+               return -ENOMEM;
+
+       handle->dev = dev;
+       handle->handler = handler;
+       handle->name = "cpufreq";
+
+       error = input_register_handle(handle);
+       if (error)
+               goto err2;
+
+       error = input_open_device(handle);
+       if (error)
+               goto err1;
+
+       return 0;
+err1:
+       input_unregister_handle(handle);
+err2:
+       kfree(handle);
+       return error;
+}
+
+static void cpufreq_interactive_input_disconnect(struct input_handle *handle)
+{
+       input_close_device(handle);
+       input_unregister_handle(handle);
+       kfree(handle);
+}
+
+static const struct input_device_id cpufreq_interactive_ids[] = {
+       {
+               .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
+                       INPUT_DEVICE_ID_MATCH_ABSBIT,
+               .evbit = { BIT_MASK(EV_ABS) },
+               .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
+                       BIT_MASK(ABS_MT_POSITION_X) |
+                       BIT_MASK(ABS_MT_POSITION_Y) },
+       },
+       {
+               .flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
+                       INPUT_DEVICE_ID_MATCH_ABSBIT,
+               .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
+               .absbit = { [BIT_WORD(ABS_X)] =
+                       BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
+       },
+       {
+               .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+               .evbit = { BIT_MASK(EV_KEY) },
+       },
+       { },
+};
+
+static struct input_handler cpufreq_interactive_input_handler = {
+       .event          = cpufreq_interactive_input_event,
+       .connect        = cpufreq_interactive_input_connect,
+       .disconnect     = cpufreq_interactive_input_disconnect,
+       .name           = "cpufreq_interactive",
+       .id_table       = cpufreq_interactive_ids,
+};
+
+static void rockchip_cpufreq_policy_init(struct cpufreq_policy *policy)
+{
+       struct cpufreq_interactive_tunables *tunables = policy->governor_data;
+
+       tunables->min_sample_time = 40 * USEC_PER_MSEC;
+       tunables->boostpulse_duration_val = 40 * USEC_PER_MSEC;
+       if (policy->cpu == 0) {
+               tunables->hispeed_freq = 1008000;
+               tunables->touchboostpulse_duration_val = 500 * USEC_PER_MSEC;
+               tunables->touchboost_freq = 1200000;
+       } else {
+               tunables->hispeed_freq = 816000;
+       }
+}
+#endif
+
 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
                unsigned int event)
 {
@@ -1169,16 +1344,20 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
                policy->governor_data = tunables;
                if (!have_governor_per_policy()) {
                        common_tunables = tunables;
-                       WARN_ON(cpufreq_get_global_kobject());
                }
 
+#ifdef CONFIG_ARCH_ROCKCHIP
+               rockchip_cpufreq_policy_init(policy);
+#endif
+
                rc = sysfs_create_group(get_governor_parent_kobj(policy),
                                get_sysfs_attr());
                if (rc) {
                        kfree(tunables);
                        policy->governor_data = NULL;
-                       if (!have_governor_per_policy())
+                       if (!have_governor_per_policy()) {
                                common_tunables = NULL;
+                       }
                        return rc;
                }
 
@@ -1186,6 +1365,10 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
                        idle_notifier_register(&cpufreq_interactive_idle_nb);
                        cpufreq_register_notifier(&cpufreq_notifier_block,
                                        CPUFREQ_TRANSITION_NOTIFIER);
+#ifdef CONFIG_ARCH_ROCKCHIP
+                       rc = input_register_handler(&cpufreq_interactive_input_handler);
+#endif
+
                }
 
                break;
@@ -1193,6 +1376,9 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
        case CPUFREQ_GOV_POLICY_EXIT:
                if (!--tunables->usage_count) {
                        if (policy->governor->initialized == 1) {
+#ifdef CONFIG_ARCH_ROCKCHIP
+                               input_unregister_handler(&cpufreq_interactive_input_handler);
+#endif
                                cpufreq_unregister_notifier(&cpufreq_notifier_block,
                                                CPUFREQ_TRANSITION_NOTIFIER);
                                idle_notifier_unregister(&cpufreq_interactive_idle_nb);
@@ -1201,9 +1387,6 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
                        sysfs_remove_group(get_governor_parent_kobj(policy),
                                        get_sysfs_attr());
 
-                       if (!have_governor_per_policy())
-                               cpufreq_put_global_kobject();
-
                        kfree(tunables);
                        common_tunables = NULL;
                }
@@ -1224,11 +1407,11 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
                        pcpu->target_freq = policy->cur;
                        pcpu->freq_table = freq_table;
                        pcpu->floor_freq = pcpu->target_freq;
-                       pcpu->floor_validate_time =
+                       pcpu->pol_floor_val_time =
                                ktime_to_us(ktime_get());
-                       pcpu->hispeed_validate_time =
-                               pcpu->floor_validate_time;
-                       pcpu->max_freq = policy->max;
+                       pcpu->loc_floor_val_time = pcpu->pol_floor_val_time;
+                       pcpu->pol_hispeed_val_time = pcpu->pol_floor_val_time;
+                       pcpu->loc_hispeed_val_time = pcpu->pol_floor_val_time;
                        down_write(&pcpu->enable_sem);
                        del_timer_sync(&pcpu->cpu_timer);
                        del_timer_sync(&pcpu->cpu_slack_timer);
@@ -1278,23 +1461,6 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
 
                        spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
                        up_read(&pcpu->enable_sem);
-
-                       /* Reschedule timer only if policy->max is raised.
-                        * Delete the timers, else the timer callback may
-                        * return without re-arm the timer when failed
-                        * acquire the semaphore. This race may cause timer
-                        * stopped unexpectedly.
-                        */
-
-                       if (policy->max > pcpu->max_freq) {
-                               down_write(&pcpu->enable_sem);
-                               del_timer_sync(&pcpu->cpu_timer);
-                               del_timer_sync(&pcpu->cpu_slack_timer);
-                               cpufreq_interactive_timer_start(tunables, j);
-                               up_write(&pcpu->enable_sem);
-                       }
-
-                       pcpu->max_freq = policy->max;
                }
                break;
        }