drm/panel: add panel power delay for of_panel
[firefly-linux-kernel-4.4.55.git] / kernel / sched / cpufreq_sched.c
index e1d208e101ed86ab6c835867a7cac9ed97b07d6c..d751bc2d0d6e50f0549551088913c8f5dcdb77ad 100644 (file)
@@ -19,7 +19,8 @@
 
 #include "sched.h"
 
-#define THROTTLE_NSEC          50000000 /* 50ms default */
+#define THROTTLE_DOWN_NSEC     50000000 /* 50ms default */
+#define THROTTLE_UP_NSEC       500000 /* 500us default */
 
 struct static_key __read_mostly __sched_freq = STATIC_KEY_INIT_FALSE;
 static bool __read_mostly cpufreq_driver_slow;
@@ -33,8 +34,10 @@ DEFINE_PER_CPU(struct sched_capacity_reqs, cpu_sched_capacity_reqs);
 
 /**
  * gov_data - per-policy data internal to the governor
- * @throttle: next throttling period expiry. Derived from throttle_nsec
- * @throttle_nsec: throttle period length in nanoseconds
+ * @up_throttle: next throttling period expiry if increasing OPP
+ * @down_throttle: next throttling period expiry if decreasing OPP
+ * @up_throttle_nsec: throttle period length in nanoseconds if increasing OPP
+ * @down_throttle_nsec: throttle period length in nanoseconds if decreasing OPP
  * @task: worker thread for dvfs transition that may block/sleep
  * @irq_work: callback used to wake up worker thread
  * @requested_freq: last frequency requested by the sched governor
@@ -48,8 +51,10 @@ DEFINE_PER_CPU(struct sched_capacity_reqs, cpu_sched_capacity_reqs);
  * call down_write(policy->rwsem).
  */
 struct gov_data {
-       ktime_t throttle;
-       unsigned int throttle_nsec;
+       ktime_t up_throttle;
+       ktime_t down_throttle;
+       unsigned int up_throttle_nsec;
+       unsigned int down_throttle_nsec;
        struct task_struct *task;
        struct irq_work irq_work;
        unsigned int requested_freq;
@@ -66,25 +71,29 @@ static void cpufreq_sched_try_driver_target(struct cpufreq_policy *policy,
 
        __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
 
-       gd->throttle = ktime_add_ns(ktime_get(), gd->throttle_nsec);
+       gd->up_throttle = ktime_add_ns(ktime_get(), gd->up_throttle_nsec);
+       gd->down_throttle = ktime_add_ns(ktime_get(), gd->down_throttle_nsec);
        up_write(&policy->rwsem);
 }
 
-static bool finish_last_request(struct gov_data *gd)
+static bool finish_last_request(struct gov_data *gd, unsigned int cur_freq)
 {
        ktime_t now = ktime_get();
 
-       if (ktime_after(now, gd->throttle))
+       ktime_t throttle = gd->requested_freq < cur_freq ?
+               gd->down_throttle : gd->up_throttle;
+
+       if (ktime_after(now, throttle))
                return false;
 
        while (1) {
-               int usec_left = ktime_to_ns(ktime_sub(gd->throttle, now));
+               int usec_left = ktime_to_ns(ktime_sub(throttle, now));
 
                usec_left /= NSEC_PER_USEC;
                trace_cpufreq_sched_throttled(usec_left);
                usleep_range(usec_left, usec_left + 100);
                now = ktime_get();
-               if (ktime_after(now, gd->throttle))
+               if (ktime_after(now, throttle))
                        return true;
        }
 }
@@ -122,13 +131,15 @@ static int cpufreq_sched_thread(void *data)
                new_request = gd->requested_freq;
                if (new_request == last_request) {
                        set_current_state(TASK_INTERRUPTIBLE);
+                       if (kthread_should_stop())
+                               break;
                        schedule();
                } else {
                        /*
                         * if the frequency thread sleeps while waiting to be
                         * unthrottled, start over to check for a newer request
                         */
-                       if (finish_last_request(gd))
+                       if (finish_last_request(gd, policy->cur))
                                continue;
                        last_request = new_request;
                        cpufreq_sched_try_driver_target(policy, new_request);
@@ -190,9 +201,14 @@ static void update_fdomain_capacity_request(int cpu)
                goto out;
        freq_new = policy->freq_table[index_new].frequency;
 
+       if (freq_new > policy->max)
+               freq_new = policy->max;
+
+       if (freq_new < policy->min)
+               freq_new = policy->min;
+
        trace_cpufreq_sched_request_opp(cpu, capacity, freq_new,
                                        gd->requested_freq);
-
        if (freq_new == gd->requested_freq)
                goto out;
 
@@ -246,10 +262,17 @@ static inline void clear_sched_freq(void)
        static_key_slow_dec(&__sched_freq);
 }
 
+static struct attribute_group sched_attr_group_gov_pol;
+static struct attribute_group *get_sysfs_attr(void)
+{
+       return &sched_attr_group_gov_pol;
+}
+
 static int cpufreq_sched_policy_init(struct cpufreq_policy *policy)
 {
        struct gov_data *gd;
        int cpu;
+       int rc;
 
        for_each_cpu(cpu, policy->cpus)
                memset(&per_cpu(cpu_sched_capacity_reqs, cpu), 0,
@@ -259,12 +282,20 @@ static int cpufreq_sched_policy_init(struct cpufreq_policy *policy)
        if (!gd)
                return -ENOMEM;
 
-       gd->throttle_nsec = policy->cpuinfo.transition_latency ?
+       gd->up_throttle_nsec = policy->cpuinfo.transition_latency ?
                            policy->cpuinfo.transition_latency :
-                           THROTTLE_NSEC;
+                           THROTTLE_UP_NSEC;
+       gd->down_throttle_nsec = THROTTLE_DOWN_NSEC;
        pr_debug("%s: throttle threshold = %u [ns]\n",
-                 __func__, gd->throttle_nsec);
+                 __func__, gd->up_throttle_nsec);
+
+       rc = sysfs_create_group(&policy->kobj, get_sysfs_attr());
+       if (rc) {
+               pr_err("%s: couldn't create sysfs attributes: %d\n", __func__, rc);
+               goto err;
+       }
 
+       policy->governor_data = gd;
        if (cpufreq_driver_is_slow()) {
                cpufreq_driver_slow = true;
                gd->task = kthread_create(cpufreq_sched_thread, policy,
@@ -281,12 +312,12 @@ static int cpufreq_sched_policy_init(struct cpufreq_policy *policy)
                init_irq_work(&gd->irq_work, cpufreq_sched_irq_work);
        }
 
-       policy->governor_data = gd;
        set_sched_freq();
 
        return 0;
 
 err:
+       policy->governor_data = NULL;
        kfree(gd);
        return -ENOMEM;
 }
@@ -301,6 +332,8 @@ static int cpufreq_sched_policy_exit(struct cpufreq_policy *policy)
                put_task_struct(gd->task);
        }
 
+       sysfs_remove_group(&policy->kobj, get_sysfs_attr());
+
        policy->governor_data = NULL;
 
        kfree(gd);
@@ -317,6 +350,21 @@ static int cpufreq_sched_start(struct cpufreq_policy *policy)
        return 0;
 }
 
+static void cpufreq_sched_limits(struct cpufreq_policy *policy)
+{
+       unsigned int clamp_freq;
+       struct gov_data *gd = policy->governor_data;;
+
+       pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz\n",
+               policy->cpu, policy->min, policy->max,
+               policy->cur);
+
+       clamp_freq = clamp(gd->requested_freq, policy->min, policy->max);
+
+       if (policy->cur != clamp_freq)
+               __cpufreq_driver_target(policy, clamp_freq, CPUFREQ_RELATION_L);
+}
+
 static int cpufreq_sched_stop(struct cpufreq_policy *policy)
 {
        int cpu;
@@ -340,11 +388,95 @@ static int cpufreq_sched_setup(struct cpufreq_policy *policy,
        case CPUFREQ_GOV_STOP:
                return cpufreq_sched_stop(policy);
        case CPUFREQ_GOV_LIMITS:
+               cpufreq_sched_limits(policy);
                break;
        }
        return 0;
 }
 
+/* Tunables */
+static ssize_t show_up_throttle_nsec(struct gov_data *gd, char *buf)
+{
+       return sprintf(buf, "%u\n", gd->up_throttle_nsec);
+}
+
+static ssize_t store_up_throttle_nsec(struct gov_data *gd,
+               const char *buf, size_t count)
+{
+       int ret;
+       long unsigned int val;
+
+       ret = kstrtoul(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+       gd->up_throttle_nsec = val;
+       return count;
+}
+
+static ssize_t show_down_throttle_nsec(struct gov_data *gd, char *buf)
+{
+       return sprintf(buf, "%u\n", gd->down_throttle_nsec);
+}
+
+static ssize_t store_down_throttle_nsec(struct gov_data *gd,
+               const char *buf, size_t count)
+{
+       int ret;
+       long unsigned int val;
+
+       ret = kstrtoul(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+       gd->down_throttle_nsec = val;
+       return count;
+}
+
+/*
+ * Create show/store routines
+ * - sys: One governor instance for complete SYSTEM
+ * - pol: One governor instance per struct cpufreq_policy
+ */
+#define show_gov_pol_sys(file_name)                                    \
+static ssize_t show_##file_name##_gov_pol                              \
+(struct cpufreq_policy *policy, char *buf)                             \
+{                                                                      \
+       return show_##file_name(policy->governor_data, buf);            \
+}
+
+#define store_gov_pol_sys(file_name)                                   \
+static ssize_t store_##file_name##_gov_pol                             \
+(struct cpufreq_policy *policy, const char *buf, size_t count)         \
+{                                                                      \
+       return store_##file_name(policy->governor_data, buf, count);    \
+}
+
+#define gov_pol_attr_rw(_name)                                         \
+       static struct freq_attr _name##_gov_pol =                               \
+       __ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
+
+#define show_store_gov_pol_sys(file_name)                              \
+       show_gov_pol_sys(file_name);                                            \
+       store_gov_pol_sys(file_name)
+#define tunable_handlers(file_name) \
+       show_gov_pol_sys(file_name); \
+       store_gov_pol_sys(file_name); \
+       gov_pol_attr_rw(file_name)
+
+tunable_handlers(down_throttle_nsec);
+tunable_handlers(up_throttle_nsec);
+
+/* Per policy governor instance */
+static struct attribute *sched_attributes_gov_pol[] = {
+       &up_throttle_nsec_gov_pol.attr,
+       &down_throttle_nsec_gov_pol.attr,
+       NULL,
+};
+
+static struct attribute_group sched_attr_group_gov_pol = {
+       .attrs = sched_attributes_gov_pol,
+       .name = "sched",
+};
+
 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHED
 static
 #endif