#include <linux/delay.h>
#include <linux/string.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/cpufreq_sched.h>
+
#include "sched.h"
#define THROTTLE_NSEC 50000000 /* 50ms default */
int usec_left = ktime_to_ns(ktime_sub(gd->throttle, now));
usec_left /= NSEC_PER_USEC;
+ trace_cpufreq_sched_throttled(usec_left);
usleep_range(usec_left, usec_left + 100);
now = ktime_get();
if (ktime_after(now, gd->throttle))
}
do {
- set_current_state(TASK_INTERRUPTIBLE);
new_request = gd->requested_freq;
if (new_request == last_request) {
+ set_current_state(TASK_INTERRUPTIBLE);
schedule();
} else {
/*
goto out;
freq_new = policy->freq_table[index_new].frequency;
+ trace_cpufreq_sched_request_opp(cpu, capacity, freq_new,
+ gd->requested_freq);
+
if (freq_new == gd->requested_freq)
goto out;
if (new_capacity == scr->total)
return;
+ trace_cpufreq_sched_update_capacity(cpu, request, scr, new_capacity);
+
scr->total = new_capacity;
if (request)
update_fdomain_capacity_request(cpu);
pr_debug("%s: throttle threshold = %u [ns]\n",
__func__, gd->throttle_nsec);
+ policy->governor_data = gd;
+
if (cpufreq_driver_is_slow()) {
cpufreq_driver_slow = true;
gd->task = kthread_create(cpufreq_sched_thread, policy,
init_irq_work(&gd->irq_work, cpufreq_sched_irq_work);
}
- policy->governor_data = gd;
set_sched_freq();
return 0;
return 0;
}
+static int cpufreq_sched_limits(struct cpufreq_policy *policy)
+{
+ if (policy->max < policy->cur)
+ __cpufreq_driver_target(policy,
+ policy->max,
+ CPUFREQ_RELATION_H);
+ else if (policy->min > policy->cur)
+ __cpufreq_driver_target(policy,
+ policy->min,
+ CPUFREQ_RELATION_L);
+
+ return 0;
+}
+
static int cpufreq_sched_setup(struct cpufreq_policy *policy,
unsigned int event)
{
case CPUFREQ_GOV_STOP:
return cpufreq_sched_stop(policy);
case CPUFREQ_GOV_LIMITS:
- break;
+ return cpufreq_sched_limits(policy);
}
return 0;
}