cpufreq: interactive: pin timers to associated CPU
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / cpufreq_interactive.c
index 184140aabda25ae9838d19e0eb766ce33f3beacc..e53eae2214990186bef0026b8bec9fb09fca0f6d 100644 (file)
@@ -30,7 +30,6 @@
 #include <linux/kthread.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
-#include <linux/input.h>
 #include <asm/cputime.h>
 
 #define CREATE_TRACE_POINTS
@@ -43,8 +42,6 @@ struct cpufreq_interactive_cpuinfo {
        int timer_idlecancel;
        u64 time_in_idle;
        u64 idle_exit_time;
-       u64 timer_run_time;
-       int idling;
        u64 target_set_time;
        u64 target_set_time_in_idle;
        struct cpufreq_policy *policy;
@@ -58,18 +55,13 @@ struct cpufreq_interactive_cpuinfo {
 
 static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
 
-/* Workqueues handle frequency scaling */
-static struct task_struct *up_task;
-static struct workqueue_struct *down_wq;
-static struct work_struct freq_scale_down_work;
-static cpumask_t up_cpumask;
-static spinlock_t up_cpumask_lock;
-static cpumask_t down_cpumask;
-static spinlock_t down_cpumask_lock;
-static struct mutex set_speed_lock;
+/* realtime thread handles frequency scaling */
+static struct task_struct *speedchange_task;
+static cpumask_t speedchange_cpumask;
+static spinlock_t speedchange_cpumask_lock;
 
 /* Hi speed to bump to from lo speed when load burst (default max) */
-static u64 hispeed_freq;
+static unsigned int hispeed_freq;
 
 /* Go to hi speed when CPU load at or above this value. */
 #define DEFAULT_GO_HISPEED_LOAD 85
@@ -94,19 +86,6 @@ static unsigned long timer_rate;
 #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
 static unsigned long above_hispeed_delay_val;
 
-/*
- * Boost pulse to hispeed on touchscreen input.
- */
-
-static int input_boost_val;
-
-struct cpufreq_interactive_inputopen {
-       struct input_handle *handle;
-       struct work_struct inputopen_work;
-};
-
-static struct cpufreq_interactive_inputopen inputopen;
-
 /*
  * Non-zero means longer-term speed boost active.
  */
@@ -128,6 +107,7 @@ struct cpufreq_governor cpufreq_gov_interactive = {
 
 static void cpufreq_interactive_timer(unsigned long data)
 {
+       u64 now;
        unsigned int delta_idle;
        unsigned int delta_time;
        int cpu_load;
@@ -146,26 +126,11 @@ static void cpufreq_interactive_timer(unsigned long data)
        if (!pcpu->governor_enabled)
                goto exit;
 
-       /*
-        * Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time,
-        * this lets idle exit know the current idle time sample has
-        * been processed, and idle exit can generate a new sample and
-        * re-arm the timer.  This prevents a concurrent idle
-        * exit on that CPU from writing a new set of info at the same time
-        * the timer function runs (the timer function can't use that info
-        * until more time passes).
-        */
        time_in_idle = pcpu->time_in_idle;
        idle_exit_time = pcpu->idle_exit_time;
-       now_idle = get_cpu_idle_time_us(data, &pcpu->timer_run_time);
-       smp_wmb();
-
-       /* If we raced with cancelling a timer, skip. */
-       if (!idle_exit_time)
-               goto exit;
-
+       now_idle = get_cpu_idle_time_us(data, &now);
        delta_idle = (unsigned int)(now_idle - time_in_idle);
-       delta_time = (unsigned int)(pcpu->timer_run_time - idle_exit_time);
+       delta_time = (unsigned int)(now - idle_exit_time);
 
        /*
         * If timer ran less than 1ms after short-term sample started, retry.
@@ -179,8 +144,7 @@ static void cpufreq_interactive_timer(unsigned long data)
                cpu_load = 100 * (delta_time - delta_idle) / delta_time;
 
        delta_idle = (unsigned int)(now_idle - pcpu->target_set_time_in_idle);
-       delta_time = (unsigned int)(pcpu->timer_run_time -
-                                   pcpu->target_set_time);
+       delta_time = (unsigned int)(now - pcpu->target_set_time);
 
        if ((delta_time == 0) || (delta_idle > delta_time))
                load_since_change = 0;
@@ -197,7 +161,8 @@ static void cpufreq_interactive_timer(unsigned long data)
                cpu_load = load_since_change;
 
        if (cpu_load >= go_hispeed_load || boost_val) {
-               if (pcpu->target_freq <= pcpu->policy->min) {
+               if (pcpu->target_freq < hispeed_freq &&
+                   hispeed_freq < pcpu->policy->max) {
                        new_freq = hispeed_freq;
                } else {
                        new_freq = pcpu->policy->max * cpu_load / 100;
@@ -207,7 +172,7 @@ static void cpufreq_interactive_timer(unsigned long data)
 
                        if (pcpu->target_freq == hispeed_freq &&
                            new_freq > hispeed_freq &&
-                           pcpu->timer_run_time - pcpu->hispeed_validate_time
+                           now - pcpu->hispeed_validate_time
                            < above_hispeed_delay_val) {
                                trace_cpufreq_interactive_notyet(data, cpu_load,
                                                                 pcpu->target_freq,
@@ -216,11 +181,11 @@ static void cpufreq_interactive_timer(unsigned long data)
                        }
                }
        } else {
-               new_freq = pcpu->policy->max * cpu_load / 100;
+               new_freq = hispeed_freq * cpu_load / 100;
        }
 
        if (new_freq <= hispeed_freq)
-               pcpu->hispeed_validate_time = pcpu->timer_run_time;
+               pcpu->hispeed_validate_time = now;
 
        if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
                                           new_freq, CPUFREQ_RELATION_H,
@@ -237,8 +202,7 @@ static void cpufreq_interactive_timer(unsigned long data)
         * floor frequency for the minimum sample time since last validated.
         */
        if (new_freq < pcpu->floor_freq) {
-               if (pcpu->timer_run_time - pcpu->floor_validate_time
-                   < min_sample_time) {
+               if (now - pcpu->floor_validate_time < min_sample_time) {
                        trace_cpufreq_interactive_notyet(data, cpu_load,
                                         pcpu->target_freq, new_freq);
                        goto rearm;
@@ -246,7 +210,7 @@ static void cpufreq_interactive_timer(unsigned long data)
        }
 
        pcpu->floor_freq = new_freq;
-       pcpu->floor_validate_time = pcpu->timer_run_time;
+       pcpu->floor_validate_time = now;
 
        if (pcpu->target_freq == new_freq) {
                trace_cpufreq_interactive_already(data, cpu_load,
@@ -257,21 +221,13 @@ static void cpufreq_interactive_timer(unsigned long data)
        trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
                                         new_freq);
        pcpu->target_set_time_in_idle = now_idle;
-       pcpu->target_set_time = pcpu->timer_run_time;
-
-       if (new_freq < pcpu->target_freq) {
-               pcpu->target_freq = new_freq;
-               spin_lock_irqsave(&down_cpumask_lock, flags);
-               cpumask_set_cpu(data, &down_cpumask);
-               spin_unlock_irqrestore(&down_cpumask_lock, flags);
-               queue_work(down_wq, &freq_scale_down_work);
-       } else {
-               pcpu->target_freq = new_freq;
-               spin_lock_irqsave(&up_cpumask_lock, flags);
-               cpumask_set_cpu(data, &up_cpumask);
-               spin_unlock_irqrestore(&up_cpumask_lock, flags);
-               wake_up_process(up_task);
-       }
+       pcpu->target_set_time = now;
+
+       pcpu->target_freq = new_freq;
+       spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+       cpumask_set_cpu(data, &speedchange_cpumask);
+       spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
+       wake_up_process(speedchange_task);
 
 rearm_if_notmax:
        /*
@@ -284,23 +240,16 @@ rearm_if_notmax:
 rearm:
        if (!timer_pending(&pcpu->cpu_timer)) {
                /*
-                * If already at min: if that CPU is idle, don't set timer.
-                * Else cancel the timer if that CPU goes idle.  We don't
-                * need to re-evaluate speed until the next idle exit.
+                * If already at min, cancel the timer if that CPU goes idle.
+                * We don't need to re-evaluate speed until the next idle exit.
                 */
-               if (pcpu->target_freq == pcpu->policy->min) {
-                       smp_rmb();
-
-                       if (pcpu->idling)
-                               goto exit;
-
+               if (pcpu->target_freq == pcpu->policy->min)
                        pcpu->timer_idlecancel = 1;
-               }
 
                pcpu->time_in_idle = get_cpu_idle_time_us(
                        data, &pcpu->idle_exit_time);
-               mod_timer(&pcpu->cpu_timer,
-                         jiffies + usecs_to_jiffies(timer_rate));
+               mod_timer_pinned(&pcpu->cpu_timer,
+                                jiffies + usecs_to_jiffies(timer_rate));
        }
 
 exit:
@@ -316,8 +265,6 @@ static void cpufreq_interactive_idle_start(void)
        if (!pcpu->governor_enabled)
                return;
 
-       pcpu->idling = 1;
-       smp_wmb();
        pending = timer_pending(&pcpu->cpu_timer);
 
        if (pcpu->target_freq != pcpu->policy->min) {
@@ -334,8 +281,9 @@ static void cpufreq_interactive_idle_start(void)
                        pcpu->time_in_idle = get_cpu_idle_time_us(
                                smp_processor_id(), &pcpu->idle_exit_time);
                        pcpu->timer_idlecancel = 0;
-                       mod_timer(&pcpu->cpu_timer,
-                                 jiffies + usecs_to_jiffies(timer_rate));
+                       mod_timer_pinned(
+                               &pcpu->cpu_timer,
+                               jiffies + usecs_to_jiffies(timer_rate));
                }
 #endif
        } else {
@@ -347,12 +295,6 @@ static void cpufreq_interactive_idle_start(void)
                 */
                if (pending && pcpu->timer_idlecancel) {
                        del_timer(&pcpu->cpu_timer);
-                       /*
-                        * Ensure last timer run time is after current idle
-                        * sample start time, so next idle exit will always
-                        * start a new idle sampling period.
-                        */
-                       pcpu->idle_exit_time = 0;
                        pcpu->timer_idlecancel = 0;
                }
        }
@@ -367,34 +309,20 @@ static void cpufreq_interactive_idle_end(void)
        if (!pcpu->governor_enabled)
                return;
 
-       pcpu->idling = 0;
-       smp_wmb();
-
-       /*
-        * Arm the timer for 1-2 ticks later if not already, and if the timer
-        * function has already processed the previous load sampling
-        * interval.  (If the timer is not pending but has not processed
-        * the previous interval, it is probably racing with us on another
-        * CPU.  Let it compute load based on the previous sample and then
-        * re-arm the timer for another interval when it's done, rather
-        * than updating the interval start time to be "now", which doesn't
-        * give the timer function enough time to make a decision on this
-        * run.)
-        */
-       if (timer_pending(&pcpu->cpu_timer) == 0 &&
-           pcpu->timer_run_time >= pcpu->idle_exit_time &&
-           pcpu->governor_enabled) {
+       /* Arm the timer for 1-2 ticks later if not already. */
+       if (!timer_pending(&pcpu->cpu_timer)) {
                pcpu->time_in_idle =
                        get_cpu_idle_time_us(smp_processor_id(),
                                             &pcpu->idle_exit_time);
                pcpu->timer_idlecancel = 0;
-               mod_timer(&pcpu->cpu_timer,
-                         jiffies + usecs_to_jiffies(timer_rate));
+               mod_timer_pinned(
+                       &pcpu->cpu_timer,
+                       jiffies + usecs_to_jiffies(timer_rate));
        }
 
 }
 
-static int cpufreq_interactive_up_task(void *data)
+static int cpufreq_interactive_speedchange_task(void *data)
 {
        unsigned int cpu;
        cpumask_t tmp_mask;
@@ -403,22 +331,23 @@ static int cpufreq_interactive_up_task(void *data)
 
        while (1) {
                set_current_state(TASK_INTERRUPTIBLE);
-               spin_lock_irqsave(&up_cpumask_lock, flags);
+               spin_lock_irqsave(&speedchange_cpumask_lock, flags);
 
-               if (cpumask_empty(&up_cpumask)) {
-                       spin_unlock_irqrestore(&up_cpumask_lock, flags);
+               if (cpumask_empty(&speedchange_cpumask)) {
+                       spin_unlock_irqrestore(&speedchange_cpumask_lock,
+                                              flags);
                        schedule();
 
                        if (kthread_should_stop())
                                break;
 
-                       spin_lock_irqsave(&up_cpumask_lock, flags);
+                       spin_lock_irqsave(&speedchange_cpumask_lock, flags);
                }
 
                set_current_state(TASK_RUNNING);
-               tmp_mask = up_cpumask;
-               cpumask_clear(&up_cpumask);
-               spin_unlock_irqrestore(&up_cpumask_lock, flags);
+               tmp_mask = speedchange_cpumask;
+               cpumask_clear(&speedchange_cpumask);
+               spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
 
                for_each_cpu(cpu, &tmp_mask) {
                        unsigned int j;
@@ -430,8 +359,6 @@ static int cpufreq_interactive_up_task(void *data)
                        if (!pcpu->governor_enabled)
                                continue;
 
-                       mutex_lock(&set_speed_lock);
-
                        for_each_cpu(j, pcpu->policy->cpus) {
                                struct cpufreq_interactive_cpuinfo *pjcpu =
                                        &per_cpu(cpuinfo, j);
@@ -444,8 +371,8 @@ static int cpufreq_interactive_up_task(void *data)
                                __cpufreq_driver_target(pcpu->policy,
                                                        max_freq,
                                                        CPUFREQ_RELATION_H);
-                       mutex_unlock(&set_speed_lock);
-                       trace_cpufreq_interactive_up(cpu, pcpu->target_freq,
+                       trace_cpufreq_interactive_setspeed(cpu,
+                                                    pcpu->target_freq,
                                                     pcpu->policy->cur);
                }
        }
@@ -453,48 +380,6 @@ static int cpufreq_interactive_up_task(void *data)
        return 0;
 }
 
-static void cpufreq_interactive_freq_down(struct work_struct *work)
-{
-       unsigned int cpu;
-       cpumask_t tmp_mask;
-       unsigned long flags;
-       struct cpufreq_interactive_cpuinfo *pcpu;
-
-       spin_lock_irqsave(&down_cpumask_lock, flags);
-       tmp_mask = down_cpumask;
-       cpumask_clear(&down_cpumask);
-       spin_unlock_irqrestore(&down_cpumask_lock, flags);
-
-       for_each_cpu(cpu, &tmp_mask) {
-               unsigned int j;
-               unsigned int max_freq = 0;
-
-               pcpu = &per_cpu(cpuinfo, cpu);
-               smp_rmb();
-
-               if (!pcpu->governor_enabled)
-                       continue;
-
-               mutex_lock(&set_speed_lock);
-
-               for_each_cpu(j, pcpu->policy->cpus) {
-                       struct cpufreq_interactive_cpuinfo *pjcpu =
-                               &per_cpu(cpuinfo, j);
-
-                       if (pjcpu->target_freq > max_freq)
-                               max_freq = pjcpu->target_freq;
-               }
-
-               if (max_freq != pcpu->policy->cur)
-                       __cpufreq_driver_target(pcpu->policy, max_freq,
-                                               CPUFREQ_RELATION_H);
-
-               mutex_unlock(&set_speed_lock);
-               trace_cpufreq_interactive_down(cpu, pcpu->target_freq,
-                                              pcpu->policy->cur);
-       }
-}
-
 static void cpufreq_interactive_boost(void)
 {
        int i;
@@ -502,14 +387,14 @@ static void cpufreq_interactive_boost(void)
        unsigned long flags;
        struct cpufreq_interactive_cpuinfo *pcpu;
 
-       spin_lock_irqsave(&up_cpumask_lock, flags);
+       spin_lock_irqsave(&speedchange_cpumask_lock, flags);
 
        for_each_online_cpu(i) {
                pcpu = &per_cpu(cpuinfo, i);
 
                if (pcpu->target_freq < hispeed_freq) {
                        pcpu->target_freq = hispeed_freq;
-                       cpumask_set_cpu(i, &up_cpumask);
+                       cpumask_set_cpu(i, &speedchange_cpumask);
                        pcpu->target_set_time_in_idle =
                                get_cpu_idle_time_us(i, &pcpu->target_set_time);
                        pcpu->hispeed_validate_time = pcpu->target_set_time;
@@ -525,106 +410,16 @@ static void cpufreq_interactive_boost(void)
                pcpu->floor_validate_time = ktime_to_us(ktime_get());
        }
 
-       spin_unlock_irqrestore(&up_cpumask_lock, flags);
+       spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
 
        if (anyboost)
-               wake_up_process(up_task);
-}
-
-/*
- * Pulsed boost on input event raises CPUs to hispeed_freq and lets
- * usual algorithm of min_sample_time  decide when to allow speed
- * to drop.
- */
-
-static void cpufreq_interactive_input_event(struct input_handle *handle,
-                                           unsigned int type,
-                                           unsigned int code, int value)
-{
-       if (input_boost_val && type == EV_SYN && code == SYN_REPORT) {
-               trace_cpufreq_interactive_boost("input");
-               cpufreq_interactive_boost();
-       }
-}
-
-static void cpufreq_interactive_input_open(struct work_struct *w)
-{
-       struct cpufreq_interactive_inputopen *io =
-               container_of(w, struct cpufreq_interactive_inputopen,
-                            inputopen_work);
-       int error;
-
-       error = input_open_device(io->handle);
-       if (error)
-               input_unregister_handle(io->handle);
-}
-
-static int cpufreq_interactive_input_connect(struct input_handler *handler,
-                                            struct input_dev *dev,
-                                            const struct input_device_id *id)
-{
-       struct input_handle *handle;
-       int error;
-
-       pr_info("%s: connect to %s\n", __func__, dev->name);
-       handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
-       if (!handle)
-               return -ENOMEM;
-
-       handle->dev = dev;
-       handle->handler = handler;
-       handle->name = "cpufreq_interactive";
-
-       error = input_register_handle(handle);
-       if (error)
-               goto err;
-
-       inputopen.handle = handle;
-       queue_work(down_wq, &inputopen.inputopen_work);
-       return 0;
-err:
-       kfree(handle);
-       return error;
-}
-
-static void cpufreq_interactive_input_disconnect(struct input_handle *handle)
-{
-       input_close_device(handle);
-       input_unregister_handle(handle);
-       kfree(handle);
+               wake_up_process(speedchange_task);
 }
 
-static const struct input_device_id cpufreq_interactive_ids[] = {
-       {
-               .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
-                        INPUT_DEVICE_ID_MATCH_ABSBIT,
-               .evbit = { BIT_MASK(EV_ABS) },
-               .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
-                           BIT_MASK(ABS_MT_POSITION_X) |
-                           BIT_MASK(ABS_MT_POSITION_Y) },
-       }, /* multi-touch touchscreen */
-       {
-               .flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
-                        INPUT_DEVICE_ID_MATCH_ABSBIT,
-               .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
-               .absbit = { [BIT_WORD(ABS_X)] =
-                           BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
-       }, /* touchpad */
-       { },
-};
-
-static struct input_handler cpufreq_interactive_input_handler = {
-       .event          = cpufreq_interactive_input_event,
-       .connect        = cpufreq_interactive_input_connect,
-       .disconnect     = cpufreq_interactive_input_disconnect,
-       .name           = "cpufreq_interactive",
-       .id_table       = cpufreq_interactive_ids,
-};
-
 static ssize_t show_hispeed_freq(struct kobject *kobj,
                                 struct attribute *attr, char *buf)
 {
-       return sprintf(buf, "%llu\n", hispeed_freq);
+       return sprintf(buf, "%u\n", hispeed_freq);
 }
 
 static ssize_t store_hispeed_freq(struct kobject *kobj,
@@ -632,9 +427,9 @@ static ssize_t store_hispeed_freq(struct kobject *kobj,
                                  size_t count)
 {
        int ret;
-       u64 val;
+       long unsigned int val;
 
-       ret = strict_strtoull(buf, 0, &val);
+       ret = strict_strtoul(buf, 0, &val);
        if (ret < 0)
                return ret;
        hispeed_freq = val;
@@ -733,27 +528,6 @@ static ssize_t store_timer_rate(struct kobject *kobj,
 static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
                show_timer_rate, store_timer_rate);
 
-static ssize_t show_input_boost(struct kobject *kobj, struct attribute *attr,
-                               char *buf)
-{
-       return sprintf(buf, "%u\n", input_boost_val);
-}
-
-static ssize_t store_input_boost(struct kobject *kobj, struct attribute *attr,
-                                const char *buf, size_t count)
-{
-       int ret;
-       unsigned long val;
-
-       ret = strict_strtoul(buf, 0, &val);
-       if (ret < 0)
-               return ret;
-       input_boost_val = val;
-       return count;
-}
-
-define_one_global_rw(input_boost);
-
 static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
                          char *buf)
 {
@@ -808,7 +582,6 @@ static struct attribute *interactive_attributes[] = {
        &above_hispeed_delay.attr,
        &min_sample_time_attr.attr,
        &timer_rate_attr.attr,
-       &input_boost.attr,
        &boost.attr,
        &boostpulse.attr,
        NULL,
@@ -854,6 +627,8 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
 
                freq_table =
                        cpufreq_frequency_get_table(policy->cpu);
+               if (!hispeed_freq)
+                       hispeed_freq = policy->max;
 
                for_each_cpu(j, policy->cpus) {
                        pcpu = &per_cpu(cpuinfo, j);
@@ -870,11 +645,11 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
                                pcpu->target_set_time;
                        pcpu->governor_enabled = 1;
                        smp_wmb();
+                       pcpu->cpu_timer.expires =
+                               jiffies + usecs_to_jiffies(timer_rate);
+                       add_timer_on(&pcpu->cpu_timer, j);
                }
 
-               if (!hispeed_freq)
-                       hispeed_freq = policy->max;
-
                /*
                 * Do not register the idle hook and create sysfs
                 * entries if we have already done so.
@@ -887,11 +662,6 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
                if (rc)
                        return rc;
 
-               rc = input_register_handler(&cpufreq_interactive_input_handler);
-               if (rc)
-                       pr_warn("%s: failed to register input handler\n",
-                               __func__);
-
                idle_notifier_register(&cpufreq_interactive_idle_nb);
                break;
 
@@ -901,22 +671,12 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
                        pcpu->governor_enabled = 0;
                        smp_wmb();
                        del_timer_sync(&pcpu->cpu_timer);
-
-                       /*
-                        * Reset idle exit time since we may cancel the timer
-                        * before it can run after the last idle exit time,
-                        * to avoid tripping the check in idle exit for a timer
-                        * that is trying to run.
-                        */
-                       pcpu->idle_exit_time = 0;
                }
 
-               flush_work(&freq_scale_down_work);
                if (atomic_dec_return(&active_count) > 0)
                        return 0;
 
                idle_notifier_unregister(&cpufreq_interactive_idle_nb);
-               input_unregister_handler(&cpufreq_interactive_input_handler);
                sysfs_remove_group(cpufreq_global_kobject,
                                &interactive_attr_group);
 
@@ -953,36 +713,20 @@ static int __init cpufreq_interactive_init(void)
                pcpu->cpu_timer.data = i;
        }
 
-       spin_lock_init(&up_cpumask_lock);
-       spin_lock_init(&down_cpumask_lock);
-       mutex_init(&set_speed_lock);
-
-       up_task = kthread_create(cpufreq_interactive_up_task, NULL,
-                                "kinteractiveup");
-       if (IS_ERR(up_task))
-               return PTR_ERR(up_task);
-
-       sched_setscheduler_nocheck(up_task, SCHED_FIFO, &param);
-       get_task_struct(up_task);
+       spin_lock_init(&speedchange_cpumask_lock);
+       speedchange_task =
+               kthread_create(cpufreq_interactive_speedchange_task, NULL,
+                              "cfinteractive");
+       if (IS_ERR(speedchange_task))
+               return PTR_ERR(speedchange_task);
 
-       /* No rescuer thread, bind to CPU queuing the work for possibly
-          warm cache (probably doesn't matter much). */
-       down_wq = alloc_workqueue("knteractive_down", 0, 1);
-
-       if (!down_wq)
-               goto err_freeuptask;
-
-       INIT_WORK(&freq_scale_down_work, cpufreq_interactive_freq_down);
-       INIT_WORK(&inputopen.inputopen_work, cpufreq_interactive_input_open);
+       sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
+       get_task_struct(speedchange_task);
 
        /* NB: wake up so the thread does not look hung to the freezer */
-       wake_up_process(up_task);
+       wake_up_process(speedchange_task);
 
        return cpufreq_register_governor(&cpufreq_gov_interactive);
-
-err_freeuptask:
-       put_task_struct(up_task);
-       return -ENOMEM;
 }
 
 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
@@ -994,9 +738,8 @@ module_init(cpufreq_interactive_init);
 static void __exit cpufreq_interactive_exit(void)
 {
        cpufreq_unregister_governor(&cpufreq_gov_interactive);
-       kthread_stop(up_task);
-       put_task_struct(up_task);
-       destroy_workqueue(down_wq);
+       kthread_stop(speedchange_task);
+       put_task_struct(speedchange_task);
 }
 
 module_exit(cpufreq_interactive_exit);