cpufreq: Simplify __cpufreq_remove_dev()
authorViresh Kumar <viresh.kumar@linaro.org>
Mon, 14 Jan 2013 13:23:03 +0000 (13:23 +0000)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Fri, 1 Feb 2013 23:01:14 +0000 (00:01 +0100)
__cpufreq_remove_dev() is called on multiple occasions: cpufreq_driver
unregister and cpu removals.

Current implementation of this routine is overly complex without much need. If
the cpu to be removed is the policy->cpu, we remove the policy first and add all
other cpus again from policy->cpus and then finally call __cpufreq_remove_dev()
again to remove the cpu to be deleted. Haahhhh..

There exist a simple solution to removal of a cpu:
- Simply use the old policy structure
- update its fields like: policy->cpu, etc.
- notify any users of cpufreq, which depend on changing policy->cpu

Hence this patch, which tries to implement the above theory. It is tested well
by myself on ARM big.LITTLE TC2 SoC, which has 5 cores (2 A15 and 3 A7). Both
A15's share same struct policy and all A7's share same policy structure.

Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Tested-by: Shawn Guo <shawn.guo@linaro.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
drivers/cpufreq/cpufreq.c
drivers/cpufreq/cpufreq_stats.c
drivers/cpufreq/freq_table.c
include/linux/cpufreq.h

index 034d1836884bd4d56a9e4ad640d3e84a8a82b166..9af14a8bbcdbba16cf60e1ef9ad45dfaec67a9fc 100644 (file)
@@ -1036,6 +1036,25 @@ module_out:
        return ret;
 }
 
+static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
+{
+       int j;
+
+       policy->last_cpu = policy->cpu;
+       policy->cpu = cpu;
+
+       for_each_cpu(j, policy->cpus) {
+               if (!cpu_online(j))
+                       continue;
+               per_cpu(cpufreq_policy_cpu, j) = cpu;
+       }
+
+#ifdef CONFIG_CPU_FREQ_TABLE
+       cpufreq_frequency_table_update_policy_cpu(policy);
+#endif
+       blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+                       CPUFREQ_UPDATE_POLICY_CPU, policy);
+}
 
 /**
  * __cpufreq_remove_dev - remove a CPU device
@@ -1046,132 +1065,92 @@ module_out:
  */
 static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
 {
-       unsigned int cpu = dev->id;
+       unsigned int cpu = dev->id, ret, cpus;
        unsigned long flags;
        struct cpufreq_policy *data;
        struct kobject *kobj;
        struct completion *cmp;
-#ifdef CONFIG_SMP
        struct device *cpu_dev;
-       unsigned int j;
-#endif
 
-       pr_debug("unregistering CPU %u\n", cpu);
+       pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
 
        spin_lock_irqsave(&cpufreq_driver_lock, flags);
        data = per_cpu(cpufreq_cpu_data, cpu);
 
        if (!data) {
+               pr_debug("%s: No cpu_data found\n", __func__);
                spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
                unlock_policy_rwsem_write(cpu);
                return -EINVAL;
        }
-       per_cpu(cpufreq_cpu_data, cpu) = NULL;
 
-#ifdef CONFIG_SMP
-       /* if this isn't the CPU which is the parent of the kobj, we
-        * only need to unlink, put and exit
-        */
-       if (unlikely(cpu != data->cpu)) {
-               pr_debug("removing link\n");
+       if (cpufreq_driver->target)
                __cpufreq_governor(data, CPUFREQ_GOV_STOP);
-               cpumask_clear_cpu(cpu, data->cpus);
-               spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
-               __cpufreq_governor(data, CPUFREQ_GOV_START);
-               __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
-
-               kobj = &dev->kobj;
-               cpufreq_cpu_put(data);
-               unlock_policy_rwsem_write(cpu);
-               sysfs_remove_link(kobj, "cpufreq");
-               return 0;
-       }
-#endif
-
-#ifdef CONFIG_SMP
 
 #ifdef CONFIG_HOTPLUG_CPU
        strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name,
                        CPUFREQ_NAME_LEN);
 #endif
 
-       /* if we have other CPUs still registered, we need to unlink them,
-        * or else wait_for_completion below will lock up. Clean the
-        * per_cpu(cpufreq_cpu_data) while holding the lock, and remove
-        * the sysfs links afterwards.
-        */
-       if (unlikely(cpumask_weight(data->cpus) > 1)) {
-               for_each_cpu(j, data->cpus) {
-                       if (j == cpu)
-                               continue;
-                       per_cpu(cpufreq_cpu_data, j) = NULL;
-               }
-       }
-
-       spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+       per_cpu(cpufreq_cpu_data, cpu) = NULL;
+       cpus = cpumask_weight(data->cpus);
+       cpumask_clear_cpu(cpu, data->cpus);
 
-       if (unlikely(cpumask_weight(data->cpus) > 1)) {
-               for_each_cpu(j, data->cpus) {
-                       if (j == cpu)
-                               continue;
-                       pr_debug("removing link for cpu %u\n", j);
-#ifdef CONFIG_HOTPLUG_CPU
-                       strncpy(per_cpu(cpufreq_cpu_governor, j),
-                               data->governor->name, CPUFREQ_NAME_LEN);
-#endif
-                       cpu_dev = get_cpu_device(j);
-                       kobj = &cpu_dev->kobj;
+       if (unlikely((cpu == data->cpu) && (cpus > 1))) {
+               /* first sibling now owns the new sysfs dir */
+               cpu_dev = get_cpu_device(cpumask_first(data->cpus));
+               sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
+               ret = kobject_move(&data->kobj, &cpu_dev->kobj);
+               if (ret) {
+                       pr_err("%s: Failed to move kobj: %d", __func__, ret);
+                       cpumask_set_cpu(cpu, data->cpus);
+                       ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
+                                       "cpufreq");
+                       spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
                        unlock_policy_rwsem_write(cpu);
-                       sysfs_remove_link(kobj, "cpufreq");
-                       lock_policy_rwsem_write(cpu);
-                       cpufreq_cpu_put(data);
+                       return -EINVAL;
                }
+
+               update_policy_cpu(data, cpu_dev->id);
+               pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
+                               __func__, cpu_dev->id, cpu);
        }
-#else
-       spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-#endif
 
-       if (cpufreq_driver->target)
-               __cpufreq_governor(data, CPUFREQ_GOV_STOP);
+       spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
-       kobj = &data->kobj;
-       cmp = &data->kobj_unregister;
+       pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
+       cpufreq_cpu_put(data);
        unlock_policy_rwsem_write(cpu);
-       kobject_put(kobj);
+       sysfs_remove_link(&dev->kobj, "cpufreq");
 
-       /* we need to make sure that the underlying kobj is actually
-        * not referenced anymore by anybody before we proceed with
-        * unloading.
-        */
-       pr_debug("waiting for dropping of refcount\n");
-       wait_for_completion(cmp);
-       pr_debug("wait complete\n");
-
-       lock_policy_rwsem_write(cpu);
-       if (cpufreq_driver->exit)
-               cpufreq_driver->exit(data);
-       unlock_policy_rwsem_write(cpu);
+       /* If cpu is last user of policy, free policy */
+       if (cpus == 1) {
+               lock_policy_rwsem_write(cpu);
+               kobj = &data->kobj;
+               cmp = &data->kobj_unregister;
+               unlock_policy_rwsem_write(cpu);
+               kobject_put(kobj);
 
-#ifdef CONFIG_HOTPLUG_CPU
-       /* when the CPU which is the parent of the kobj is hotplugged
-        * offline, check for siblings, and create cpufreq sysfs interface
-        * and symlinks
-        */
-       if (unlikely(cpumask_weight(data->cpus) > 1)) {
-               /* first sibling now owns the new sysfs dir */
-               cpumask_clear_cpu(cpu, data->cpus);
-               cpufreq_add_dev(get_cpu_device(cpumask_first(data->cpus)), NULL);
+               /* we need to make sure that the underlying kobj is actually
+                * not referenced anymore by anybody before we proceed with
+                * unloading.
+                */
+               pr_debug("waiting for dropping of refcount\n");
+               wait_for_completion(cmp);
+               pr_debug("wait complete\n");
 
-               /* finally remove our own symlink */
                lock_policy_rwsem_write(cpu);
-               __cpufreq_remove_dev(dev, sif);
-       }
-#endif
+               if (cpufreq_driver->exit)
+                       cpufreq_driver->exit(data);
+               unlock_policy_rwsem_write(cpu);
 
-       free_cpumask_var(data->related_cpus);
-       free_cpumask_var(data->cpus);
-       kfree(data);
+               free_cpumask_var(data->related_cpus);
+               free_cpumask_var(data->cpus);
+               kfree(data);
+       } else if (cpufreq_driver->target) {
+               __cpufreq_governor(data, CPUFREQ_GOV_START);
+               __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
+       }
 
        return 0;
 }
index 9d7732b810445f6b73318e574924b6bed6ac6829..beef6b54382b2982c4d86f6312af5c9e052e3636 100644 (file)
@@ -170,11 +170,13 @@ static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
 static void cpufreq_stats_free_table(unsigned int cpu)
 {
        struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
+
        if (stat) {
+               pr_debug("%s: Free stat table\n", __func__);
                kfree(stat->time_in_state);
                kfree(stat);
+               per_cpu(cpufreq_stats_table, cpu) = NULL;
        }
-       per_cpu(cpufreq_stats_table, cpu) = NULL;
 }
 
 /* must be called early in the CPU removal sequence (before
@@ -183,8 +185,10 @@ static void cpufreq_stats_free_table(unsigned int cpu)
 static void cpufreq_stats_free_sysfs(unsigned int cpu)
 {
        struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
-       if (policy && policy->cpu == cpu)
+       if (policy && (cpumask_weight(policy->cpus) == 1)) {
+               pr_debug("%s: Free sysfs stat\n", __func__);
                sysfs_remove_group(&policy->kobj, &stats_attr_group);
+       }
        if (policy)
                cpufreq_cpu_put(policy);
 }
@@ -262,6 +266,19 @@ error_get_fail:
        return ret;
 }
 
+static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
+{
+       struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table,
+                       policy->last_cpu);
+
+       pr_debug("Updating stats_table for new_cpu %u from last_cpu %u\n",
+                       policy->cpu, policy->last_cpu);
+       per_cpu(cpufreq_stats_table, policy->cpu) = per_cpu(cpufreq_stats_table,
+                       policy->last_cpu);
+       per_cpu(cpufreq_stats_table, policy->last_cpu) = NULL;
+       stat->cpu = policy->cpu;
+}
+
 static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
                unsigned long val, void *data)
 {
@@ -269,6 +286,12 @@ static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
        struct cpufreq_policy *policy = data;
        struct cpufreq_frequency_table *table;
        unsigned int cpu = policy->cpu;
+
+       if (val == CPUFREQ_UPDATE_POLICY_CPU) {
+               cpufreq_stats_update_policy_cpu(policy);
+               return 0;
+       }
+
        if (val != CPUFREQ_NOTIFY)
                return 0;
        table = cpufreq_frequency_get_table(cpu);
index 49cda256efb2149c2311e588d9cdb151f87dc103..aa5bd39d129e6ccef7684bf3a15db9d909dd80c6 100644 (file)
@@ -227,6 +227,15 @@ void cpufreq_frequency_table_put_attr(unsigned int cpu)
 }
 EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr);
 
+void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy)
+{
+       pr_debug("Updating show_table for new_cpu %u from last_cpu %u\n",
+                       policy->cpu, policy->last_cpu);
+       per_cpu(cpufreq_show_table, policy->cpu) = per_cpu(cpufreq_show_table,
+                       policy->last_cpu);
+       per_cpu(cpufreq_show_table, policy->last_cpu) = NULL;
+}
+
 struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
 {
        return per_cpu(cpufreq_show_table, cpu);
index a55b88eaf96a1708ea21cdf120ff87285e07ac8d..52be2d0c994a544c41105e22e83d23cc42009823 100644 (file)
@@ -93,7 +93,9 @@ struct cpufreq_policy {
        cpumask_var_t           related_cpus; /* CPUs with any coordination */
        unsigned int            shared_type; /* ANY or ALL affected CPUs
                                                should set cpufreq */
-       unsigned int            cpu;    /* cpu nr of registered CPU */
+       unsigned int            cpu;    /* cpu nr of CPU managing this policy */
+       unsigned int            last_cpu; /* cpu nr of previous CPU that managed
+                                          * this policy */
        struct cpufreq_cpuinfo  cpuinfo;/* see above */
 
        unsigned int            min;    /* in kHz */
@@ -112,10 +114,11 @@ struct cpufreq_policy {
        struct completion       kobj_unregister;
 };
 
-#define CPUFREQ_ADJUST         (0)
-#define CPUFREQ_INCOMPATIBLE   (1)
-#define CPUFREQ_NOTIFY         (2)
-#define CPUFREQ_START          (3)
+#define CPUFREQ_ADJUST                 (0)
+#define CPUFREQ_INCOMPATIBLE           (1)
+#define CPUFREQ_NOTIFY                 (2)
+#define CPUFREQ_START                  (3)
+#define CPUFREQ_UPDATE_POLICY_CPU      (4)
 
 #define CPUFREQ_SHARED_TYPE_NONE (0) /* None */
 #define CPUFREQ_SHARED_TYPE_HW  (1) /* HW does needed coordination */
@@ -405,6 +408,7 @@ extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
 
 void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
                                      unsigned int cpu);
+void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy);
 
 void cpufreq_frequency_table_put_attr(unsigned int cpu);
 #endif /* _LINUX_CPUFREQ_H */