UPSTREAM: usb: dwc3: gadget: only resume USB2 PHY in <=HIGHSPEED
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / rockchip_big_little.c
index 0b26dd523fc0f30a4ae061f8085ecaed2f5c6381..f14f3b1e2fce9cd0f2bd62bf7fa1490d87d20bde 100644 (file)
@@ -68,6 +68,9 @@ static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS];
 #define DISABLE_FURTHER_CPUFREQ         0x10
 #define ENABLE_FURTHER_CPUFREQ          0x20
 #define MASK_FURTHER_CPUFREQ            0x30
+#define CPU_LOW_FREQ   600000    /* KHz */
+#define CCI_LOW_RATE   288000000 /* Hz */
+#define CCI_HIGH_RATE  576000000 /* Hz */
 /* With 0x00(NOCHANGE), it depends on the previous "further" status */
 #define CPUFREQ_PRIVATE                 0x100
 static unsigned int no_cpufreq_access[MAX_CLUSTERS] = { 0 };
@@ -81,7 +84,10 @@ static DEFINE_MUTEX(cpufreq_mutex);
 static struct dvfs_node *clk_cpu_dvfs_node[MAX_CLUSTERS];
 static struct dvfs_node *clk_gpu_dvfs_node;
 static struct dvfs_node *clk_ddr_dvfs_node;
-static struct cpumask *cluster_policy_mask[MAX_CLUSTERS];
+static cpumask_var_t cluster_policy_mask[MAX_CLUSTERS];
+static struct clk *aclk_cci;
+static unsigned long cci_rate;
+static unsigned int cpu_bl_freq[MAX_CLUSTERS];
 
 #ifdef CONFIG_ROCKCHIP_CPUQUIET
 static void rockchip_bl_balanced_cpufreq_transition(unsigned int cluster,
@@ -163,6 +169,51 @@ static struct notifier_block notifier_policy_block = {
        .notifier_call = rockchip_bl_cpufreq_notifier_policy
 };
 
+static int rockchip_bl_cpufreq_notifier_trans(struct notifier_block *nb,
+                                             unsigned long val, void *data)
+{
+       struct cpufreq_freqs *freq = data;
+       unsigned int cluster = cpu_to_cluster(freq->cpu);
+       int ret;
+
+       cpu_bl_freq[cluster] = freq->new;
+
+       switch (val) {
+       case CPUFREQ_PRECHANGE:
+               if (cpu_bl_freq[B_CLUSTER] > CPU_LOW_FREQ ||
+                   cpu_bl_freq[L_CLUSTER] > CPU_LOW_FREQ) {
+                       if (cci_rate != CCI_HIGH_RATE) {
+                               ret = clk_set_rate(aclk_cci, CCI_HIGH_RATE);
+                               if (ret)
+                                       break;
+                               pr_debug("ccirate %ld-->%d Hz\n",
+                                        cci_rate, CCI_HIGH_RATE);
+                               cci_rate = CCI_HIGH_RATE;
+                       }
+               }
+               break;
+       case CPUFREQ_POSTCHANGE:
+               if (cpu_bl_freq[B_CLUSTER] <= CPU_LOW_FREQ &&
+                   cpu_bl_freq[L_CLUSTER] <= CPU_LOW_FREQ) {
+                       if (cci_rate != CCI_LOW_RATE) {
+                               ret = clk_set_rate(aclk_cci, CCI_LOW_RATE);
+                               if (ret)
+                                       break;
+                               pr_debug("ccirate %ld-->%d Hz\n",
+                                        cci_rate, CCI_LOW_RATE);
+                               cci_rate = CCI_LOW_RATE;
+                       }
+               }
+               break;
+       }
+
+       return 0;
+}
+
+static struct notifier_block notifier_trans_block = {
+       .notifier_call = rockchip_bl_cpufreq_notifier_trans,
+};
+
 static int rockchip_bl_cpufreq_verify(struct cpufreq_policy *policy)
 {
        u32 cur_cluster = cpu_to_cluster(policy->cpu);
@@ -193,10 +244,12 @@ static int rockchip_bl_cpufreq_scale_rate_for_dvfs(struct clk *clk,
 
        cur_cluster = clk_node_get_cluster_id(clk);
        cpu = cpumask_first_and(cluster_policy_mask[cur_cluster],
-                               cpu_online_mask);
+               cpu_online_mask);
+       if (cpu >= nr_cpu_ids)
+               return -EINVAL;
        policy = cpufreq_cpu_get(cpu);
        if (!policy)
-               return 0;
+               return -EINVAL;
 
        freqs.new = rate / 1000;
        freqs.old = clk_get_rate(clk) / 1000;
@@ -273,6 +326,19 @@ static int rockchip_bl_cpufreq_init_cpu0(struct cpufreq_policy *policy)
        cpufreq_register_notifier(&notifier_policy_block,
                                  CPUFREQ_POLICY_NOTIFIER);
 
+       aclk_cci = clk_get(NULL, "aclk_cci");
+       if (!IS_ERR(aclk_cci)) {
+               cci_rate = clk_get_rate(aclk_cci);
+               if (clk_cpu_dvfs_node[L_CLUSTER])
+                       cpu_bl_freq[L_CLUSTER] =
+                       clk_get_rate(clk_cpu_dvfs_node[L_CLUSTER]->clk) / 1000;
+               if (clk_cpu_dvfs_node[B_CLUSTER])
+                       cpu_bl_freq[B_CLUSTER] =
+                       clk_get_rate(clk_cpu_dvfs_node[B_CLUSTER]->clk) / 1000;
+               cpufreq_register_notifier(&notifier_trans_block,
+                                         CPUFREQ_TRANSITION_NOTIFIER);
+       }
+
        pr_info("version " VERSION ", suspend freq %d %d MHz\n",
                suspend_freq[0] / 1000, suspend_freq[1] / 1000);
        return 0;
@@ -288,15 +354,16 @@ static int rockchip_bl_cpufreq_init(struct cpufreq_policy *policy)
        if (cpu0_err)
                return cpu0_err;
 
-       cluster_policy_mask[cur_cluster] = policy->cpus;
-
        /* set freq min max */
        cpufreq_frequency_table_cpuinfo(policy, freq_table[cur_cluster]);
        /* sys nod */
        cpufreq_frequency_table_get_attr(freq_table[cur_cluster], policy->cpu);
 
-       if (cur_cluster < MAX_CLUSTERS)
+       if (cur_cluster < MAX_CLUSTERS) {
                cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
+               cpumask_copy(cluster_policy_mask[cur_cluster],
+                            topology_core_cpumask(policy->cpu));
+       }
 
        policy->cur = clk_get_rate(clk_cpu_dvfs_node[cur_cluster]->clk) / 1000;
 
@@ -427,10 +494,12 @@ static int rockchip_bl_cpufreq_pm_notifier_event(struct notifier_block *this,
 
        for (i = 0; i < MAX_CLUSTERS; i++) {
                cpu = cpumask_first_and(cluster_policy_mask[i],
-                                       cpu_online_mask);
+                       cpu_online_mask);
+               if (cpu >= nr_cpu_ids)
+                       continue;
                policy = cpufreq_cpu_get(cpu);
                if (!policy)
-                       return ret;
+                       continue;
 
                if (!cpufreq_is_ondemand(policy))
                        goto out;
@@ -464,6 +533,7 @@ static int rockchip_bl_cpufreq_pm_notifier_event(struct notifier_block *this,
 out:
                cpufreq_cpu_put(policy);
        }
+
        return ret;
 }
 
@@ -535,7 +605,12 @@ MODULE_DEVICE_TABLE(of, rockchip_bl_cpufreq_match);
 
 static int __init rockchip_bl_cpufreq_probe(struct platform_device *pdev)
 {
-       int ret;
+       int ret, i;
+
+       for (i = 0; i < MAX_CLUSTERS; i++) {
+               if (!alloc_cpumask_var(&cluster_policy_mask[i], GFP_KERNEL))
+                       return -ENOMEM;
+       }
 
        register_reboot_notifier(&rockchip_bl_cpufreq_reboot_notifier);
        register_pm_notifier(&rockchip_bl_cpufreq_pm_notifier);
@@ -551,6 +626,10 @@ static int __init rockchip_bl_cpufreq_probe(struct platform_device *pdev)
 
 static int rockchip_bl_cpufreq_remove(struct platform_device *pdev)
 {
+       int i;
+
+       for (i = 0; i < MAX_CLUSTERS; i++)
+               free_cpumask_var(cluster_policy_mask[i]);
        cpufreq_unregister_driver(&rockchip_bl_cpufreq_driver);
        return 0;
 }