#include <linux/of.h>
#include <linux/opp.h>
#include <linux/rockchip/dvfs.h>
+#include <linux/rockchip/common.h>
+#include <linux/fb.h>
+#include <linux/reboot.h>
+#include "../../../drivers/clk/rockchip/clk-pd.h"
+
+extern int rockchip_tsadc_get_temp(int chn);
#define MHz (1000 * 1000)
static LIST_HEAD(rk_dvfs_tree);
static DEFINE_MUTEX(rk_dvfs_mutex);
+static struct workqueue_struct *dvfs_wq;
+static struct dvfs_node *clk_cpu_dvfs_node;
+static unsigned int target_temp = 80;
+static int temp_limit_enable = 1;
+
+static int pd_gpu_off, early_suspend;
+static DEFINE_MUTEX(switch_vdd_gpu_mutex);
+struct regulator *vdd_gpu_regulator;
+
+static int vdd_gpu_reboot_notifier_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ int ret;
+
+ DVFS_DBG("%s: enable vdd_gpu\n", __func__);
+ mutex_lock(&switch_vdd_gpu_mutex);
+ if (!regulator_is_enabled(vdd_gpu_regulator))
+ ret = regulator_enable(vdd_gpu_regulator);
+ mutex_unlock(&switch_vdd_gpu_mutex);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block vdd_gpu_reboot_notifier = {
+ .notifier_call = vdd_gpu_reboot_notifier_event,
+};
+
+static int clk_pd_gpu_notifier_call(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ int ret;
+
+ switch (event) {
+ case RK_CLK_PD_PREPARE:
+ mutex_lock(&switch_vdd_gpu_mutex);
+ pd_gpu_off = 0;
+ if (early_suspend) {
+ if (!regulator_is_enabled(vdd_gpu_regulator))
+ ret = regulator_enable(vdd_gpu_regulator);
+ }
+ mutex_unlock(&switch_vdd_gpu_mutex);
+ break;
+ case RK_CLK_PD_UNPREPARE:
+ mutex_lock(&switch_vdd_gpu_mutex);
+ pd_gpu_off = 1;
+ if (early_suspend) {
+ if (regulator_is_enabled(vdd_gpu_regulator))
+ ret = regulator_disable(vdd_gpu_regulator);
+ }
+ mutex_unlock(&switch_vdd_gpu_mutex);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block clk_pd_gpu_notifier = {
+ .notifier_call = clk_pd_gpu_notifier_call,
+};
+
+
+static int early_suspend_notifier_call(struct notifier_block *self,
+ unsigned long action, void *data)
+{
+ struct fb_event *event = data;
+ int blank_mode = *((int *)event->data);
+ int ret;
+
+ mutex_lock(&switch_vdd_gpu_mutex);
+ if (action == FB_EARLY_EVENT_BLANK) {
+ switch (blank_mode) {
+ case FB_BLANK_UNBLANK:
+ early_suspend = 0;
+ if (pd_gpu_off) {
+ if (!regulator_is_enabled(vdd_gpu_regulator))
+ ret = regulator_enable(
+ vdd_gpu_regulator);
+ }
+ break;
+ default:
+ break;
+ }
+ } else if (action == FB_EVENT_BLANK) {
+ switch (blank_mode) {
+ case FB_BLANK_POWERDOWN:
+ early_suspend = 1;
+ if (pd_gpu_off) {
+ if (regulator_is_enabled(vdd_gpu_regulator))
+ ret = regulator_disable(
+ vdd_gpu_regulator);
+ }
+
+ break;
+ default:
+ break;
+ }
+ }
+ mutex_unlock(&switch_vdd_gpu_mutex);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block early_suspend_notifier = {
+ .notifier_call = early_suspend_notifier_call,
+};
+
+#define DVFS_REGULATOR_MODE_STANDBY 1
+#define DVFS_REGULATOR_MODE_IDLE 2
+#define DVFS_REGULATOR_MODE_NORMAL 3
+#define DVFS_REGULATOR_MODE_FAST 4
+
+static const char* dvfs_regu_mode_to_string(unsigned int mode)
+{
+ switch (mode) {
+ case DVFS_REGULATOR_MODE_FAST:
+ return "FAST";
+ case DVFS_REGULATOR_MODE_NORMAL:
+ return "NORMAL";
+ case DVFS_REGULATOR_MODE_IDLE:
+ return "IDLE";
+ case DVFS_REGULATOR_MODE_STANDBY:
+ return "STANDBY";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static int dvfs_regu_mode_convert(unsigned int mode)
+{
+ switch (mode) {
+ case DVFS_REGULATOR_MODE_FAST:
+ return REGULATOR_MODE_FAST;
+ case DVFS_REGULATOR_MODE_NORMAL:
+ return REGULATOR_MODE_NORMAL;
+ case DVFS_REGULATOR_MODE_IDLE:
+ return REGULATOR_MODE_IDLE;
+ case DVFS_REGULATOR_MODE_STANDBY:
+ return REGULATOR_MODE_STANDBY;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int dvfs_regu_mode_deconvert(unsigned int mode)
+{
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ return DVFS_REGULATOR_MODE_FAST;
+ case REGULATOR_MODE_NORMAL:
+ return DVFS_REGULATOR_MODE_NORMAL;
+ case REGULATOR_MODE_IDLE:
+ return DVFS_REGULATOR_MODE_IDLE;
+ case REGULATOR_MODE_STANDBY:
+ return DVFS_REGULATOR_MODE_STANDBY;
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct cpufreq_frequency_table *of_get_regu_mode_table(struct device_node *dev_node)
+{
+ struct cpufreq_frequency_table *regu_mode_table = NULL;
+ const struct property *prop;
+ const __be32 *val;
+ int nr, i;
+
+ prop = of_find_property(dev_node, "regu-mode-table", NULL);
+ if (!prop)
+ return NULL;
+ if (!prop->value)
+ return NULL;
+
+ nr = prop->length / sizeof(u32);
+ if (nr % 2) {
+ pr_err("%s: Invalid freq list\n", __func__);
+ return NULL;
+ }
+
+ regu_mode_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
+ (nr/2+1), GFP_KERNEL);
+ if (!regu_mode_table) {
+ pr_err("%s: could not allocate regu_mode_table!\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ val = prop->value;
+
+ for (i=0; i<nr/2; i++){
+ regu_mode_table[i].frequency = be32_to_cpup(val++) * 1000;
+ regu_mode_table[i].index = be32_to_cpup(val++);
+ }
+
+ if (regu_mode_table[i-1].frequency != 0) {
+ pr_err("%s: Last freq of regu_mode_table is not 0!\n", __func__);
+ kfree(regu_mode_table);
+ return NULL;
+ }
+
+ regu_mode_table[i].index = 0;
+ regu_mode_table[i].frequency = CPUFREQ_TABLE_END;
+
+ return regu_mode_table;
+}
+
+static int dvfs_regu_mode_table_constrain(struct dvfs_node *clk_dvfs_node)
+{
+ int i, ret;
+ int mode, convert_mode, valid_mode;
+
+ if (!clk_dvfs_node)
+ return -EINVAL;
+
+ if (!clk_dvfs_node->regu_mode_table)
+ return -EINVAL;
+
+ if (!clk_dvfs_node->vd)
+ return -EINVAL;
+
+ if (IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator))
+ return -EINVAL;
+
+ for (i = 0; (clk_dvfs_node->regu_mode_table[i].frequency != CPUFREQ_TABLE_END); i++) {
+ mode = clk_dvfs_node->regu_mode_table[i].index;
+ convert_mode = dvfs_regu_mode_convert(mode);
+
+ ret = regulator_is_supported_mode(clk_dvfs_node->vd->regulator,
+ &convert_mode);
+ if (ret) {
+ DVFS_ERR("%s: find mode=%d unsupported\n", __func__,
+ mode);
+ kfree(clk_dvfs_node->regu_mode_table);
+ clk_dvfs_node->regu_mode_table = NULL;
+ return ret;
+ }
+
+ valid_mode = dvfs_regu_mode_deconvert(convert_mode);
+ if (valid_mode != mode) {
+ DVFS_ERR("%s: round mode=%d to valid mode=%d!\n",
+ __func__, mode, valid_mode);
+ clk_dvfs_node->regu_mode_table[i].index = valid_mode;
+ }
+
+ }
+
+ return 0;
+}
+
+static int clk_dvfs_node_get_regu_mode(struct dvfs_node *clk_dvfs_node,
+ unsigned long rate, unsigned int *mode)
+{
+ int i;
+
+
+ if ((!clk_dvfs_node) || (!clk_dvfs_node->regu_mode_table))
+ return -EINVAL;
+
+ for (i = 0; (clk_dvfs_node->regu_mode_table[i].frequency != CPUFREQ_TABLE_END); i++) {
+ if (rate >= clk_dvfs_node->regu_mode_table[i].frequency) {
+ *mode = clk_dvfs_node->regu_mode_table[i].index;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int dvfs_pd_get_newmode_byclk(struct pd_node *pd, struct dvfs_node *clk_dvfs_node)
+{
+ unsigned int mode_max = 0;
+
+
+ if (clk_dvfs_node->regu_mode_en && (clk_dvfs_node->regu_mode >= pd->regu_mode)) {
+ return clk_dvfs_node->regu_mode;
+ }
+
+ list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
+ if (clk_dvfs_node->regu_mode_en)
+ mode_max = max(mode_max, (clk_dvfs_node->regu_mode));
+ }
+
+ return mode_max;
+}
+
+static void dvfs_update_clk_pds_mode(struct dvfs_node *clk_dvfs_node)
+{
+ struct pd_node *pd;
+
+ if (!clk_dvfs_node)
+ return;
+
+ pd = clk_dvfs_node->pd;
+ if (!pd)
+ return;
+
+ pd->regu_mode = dvfs_pd_get_newmode_byclk(pd, clk_dvfs_node);
+}
+
+static int dvfs_vd_get_newmode_bypd(struct vd_node *vd)
+{
+ unsigned int mode_max_vd = 0;
+ struct pd_node *pd;
+
+ if (!vd)
+ return -EINVAL;
+
+ list_for_each_entry(pd, &vd->pd_list, node) {
+ mode_max_vd = max(mode_max_vd, pd->regu_mode);
+ }
+
+ return mode_max_vd;
+}
+
+static int dvfs_vd_get_newmode_byclk(struct dvfs_node *clk_dvfs_node)
+{
+ if (!clk_dvfs_node)
+ return -EINVAL;
+
+ dvfs_update_clk_pds_mode(clk_dvfs_node);
+
+ return dvfs_vd_get_newmode_bypd(clk_dvfs_node->vd);
+}
+
+static int dvfs_regu_set_mode(struct vd_node *vd, unsigned int mode)
+{
+ int convert_mode;
+ int ret = 0;
+
+
+ if (IS_ERR_OR_NULL(vd)) {
+ DVFS_ERR("%s: vd_node error\n", __func__);
+ return -EINVAL;
+ }
+
+ DVFS_DBG("%s: mode=%d(old=%d)\n", __func__, mode, vd->regu_mode);
+
+ convert_mode = dvfs_regu_mode_convert(mode);
+ if (convert_mode < 0) {
+ DVFS_ERR("%s: mode %d convert error\n", __func__, mode);
+ return convert_mode;
+ }
+
+ if (!IS_ERR_OR_NULL(vd->regulator)) {
+ ret = dvfs_regulator_set_mode(vd->regulator, convert_mode);
+ if (ret < 0) {
+ DVFS_ERR("%s: %s set mode %d (was %d) error!\n", __func__,
+ vd->regulator_name, mode, vd->regu_mode);
+ return -EAGAIN;
+ }
+ } else {
+ DVFS_ERR("%s: invalid regulator\n", __func__);
+ return -EINVAL;
+ }
+
+ vd->regu_mode = mode;
+
+ return 0;
+}
+
+static int dvfs_regu_mode_target(struct dvfs_node *clk_dvfs_node, unsigned long rate)
+{
+ int ret;
+ int mode;
+
+
+ if (!clk_dvfs_node)
+ return -EINVAL;
+
+ if (!clk_dvfs_node->regu_mode_en)
+ return 0;
+
+ ret = clk_dvfs_node_get_regu_mode(clk_dvfs_node, rate, &mode);
+ if (ret) {
+ DVFS_ERR("%s: clk(%s) rate %luhz get mode fail\n",
+ __func__, clk_dvfs_node->name, rate);
+ return ret;
+ }
+ clk_dvfs_node->regu_mode = mode;
+
+ mode = dvfs_vd_get_newmode_byclk(clk_dvfs_node);
+ if (mode < 0)
+ return mode;
+
+ ret = dvfs_regu_set_mode(clk_dvfs_node->vd, mode);
+
+ return ret;
+}
static void dvfs_volt_up_delay(struct vd_node *vd, int new_volt, int old_volt)
{
}
}
}
+#endif
-struct regulator *dvfs_get_regulator1(char *regulator_name)
+struct regulator *dvfs_get_regulator(char *regulator_name)
{
struct vd_node *vd;
+
+ mutex_lock(&rk_dvfs_mutex);
list_for_each_entry(vd, &rk_dvfs_tree, node) {
if (strcmp(regulator_name, vd->regulator_name) == 0) {
+ mutex_unlock(&rk_dvfs_mutex);
return vd->regulator;
}
}
+ mutex_unlock(&rk_dvfs_mutex);
return NULL;
}
-#endif
static int dvfs_get_rate_range(struct dvfs_node *clk_dvfs_node)
{
int i = 0;
if (!clk_dvfs_node)
- return -1;
+ return -EINVAL;
clk_dvfs_node->min_rate = 0;
clk_dvfs_node->max_rate = 0;
//ddr rate = real rate+flags
flags = clk_dvfs_node->dvfs_table[i].frequency%1000;
rate = (clk_dvfs_node->dvfs_table[i].frequency/1000)*1000;
- temp_rate = clk_round_rate(clk_dvfs_node->clk, rate*1000);
+ temp_rate = __clk_round_rate(clk_dvfs_node->clk, rate*1000);
if(temp_rate <= 0){
DVFS_WARNING("%s: clk(%s) rate %d round return %d\n",
__func__, clk_dvfs_node->name, clk_dvfs_node->dvfs_table[i].frequency, temp_rate);
clk_fv->frequency = 0;
clk_fv->index = 0;
//DVFS_DBG("%s get corresponding voltage error! out of bound\n", clk_dvfs_node->name);
- return -1;
+ return -EINVAL;
}
static int dvfs_pd_get_newvolt_byclk(struct pd_node *pd, struct dvfs_node *clk_dvfs_node)
{
int volt_max = 0;
- if (!pd || !clk_dvfs_node)
- return 0;
-
- if (clk_dvfs_node->set_volt >= pd->cur_volt) {
+ if (clk_dvfs_node->enable_count && (clk_dvfs_node->set_volt >= pd->cur_volt)) {
return clk_dvfs_node->set_volt;
}
list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
- // DVFS_DBG("%s ,pd(%s),dvfs(%s),volt(%u)\n",__func__,pd->name,
- // clk_dvfs_node->name,clk_dvfs_node->set_volt);
- volt_max = max(volt_max, clk_dvfs_node->set_volt);
+ if (clk_dvfs_node->enable_count)
+ volt_max = max(volt_max, clk_dvfs_node->set_volt);
}
return volt_max;
}
pd = clk_dvfs_node->pd;
if (!pd)
- return ;
+ return;
pd->cur_volt = dvfs_pd_get_newvolt_byclk(pd, clk_dvfs_node);
- /*for (i = 0; (clk_dvfs_node->pds[i].pd != NULL); i++) {
- pd = clk_dvfs_node->pds[i].pd;
- // DVFS_DBG("%s dvfs(%s),pd(%s)\n",__func__,clk_dvfs_node->name,pd->name);
- pd->cur_volt = dvfs_pd_get_newvolt_byclk(pd, clk_dvfs_node);
- }*/
}
static int dvfs_vd_get_newvolt_bypd(struct vd_node *vd)
{
int volt_max_vd = 0;
struct pd_node *pd;
- //struct depend_list *depend;
if (!vd)
return -EINVAL;
list_for_each_entry(pd, &vd->pd_list, node) {
- // DVFS_DBG("%s pd(%s,%u)\n",__func__,pd->name,pd->cur_volt);
volt_max_vd = max(volt_max_vd, pd->cur_volt);
}
- /* some clks depend on this voltage domain */
-/* if (!list_empty(&vd->req_volt_list)) {
- list_for_each_entry(depend, &vd->req_volt_list, node2vd) {
- volt_max_vd = max(volt_max_vd, depend->req_volt);
- }
- }*/
return volt_max_vd;
}
static int dvfs_vd_get_newvolt_byclk(struct dvfs_node *clk_dvfs_node)
{
if (!clk_dvfs_node)
- return -1;
+ return -EINVAL;
+
dvfs_update_clk_pds_volt(clk_dvfs_node);
return dvfs_vd_get_newvolt_bypd(clk_dvfs_node->vd);
}
-int dvfs_clk_enable_limit(struct dvfs_node *clk_dvfs_node, unsigned int min_rate, unsigned max_rate)
+#if 0
+static void dvfs_temp_limit_work_func(struct work_struct *work)
+{
+ unsigned long delay = HZ / 10; // 100ms
+ struct vd_node *vd;
+ struct pd_node *pd;
+ struct dvfs_node *clk_dvfs_node;
+
+ queue_delayed_work_on(0, dvfs_wq, to_delayed_work(work), delay);
+
+ mutex_lock(&rk_dvfs_mutex);
+ list_for_each_entry(vd, &rk_dvfs_tree, node) {
+ mutex_lock(&vd->mutex);
+ list_for_each_entry(pd, &vd->pd_list, node) {
+ list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
+ if (clk_dvfs_node->temp_limit_table) {
+ clk_dvfs_node->temp = rockchip_tsadc_get_temp(clk_dvfs_node->temp_channel);
+ clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, clk_dvfs_node->last_set_rate);
+ }
+ }
+ }
+ mutex_unlock(&vd->mutex);
+ }
+ mutex_unlock(&rk_dvfs_mutex);
+}
+#endif
+
+static void dvfs_temp_limit_work_func(struct work_struct *work)
+{
+ int temp=0, delta_temp=0;
+ unsigned long delay = HZ/10;
+ unsigned long arm_rate_step=0;
+ static int old_temp=0;
+ int i;
+
+ queue_delayed_work_on(0, dvfs_wq, to_delayed_work(work), delay);
+
+ temp = rockchip_tsadc_get_temp(1);
+
+ //debounce
+ delta_temp = (old_temp>temp) ? (old_temp-temp) : (temp-old_temp);
+ if (delta_temp <= 1)
+ return;
+
+ if (ROCKCHIP_PM_POLICY_PERFORMANCE == rockchip_pm_get_policy()) {
+ if (!clk_cpu_dvfs_node->per_temp_limit_table) {
+ return;
+ }
+
+ clk_cpu_dvfs_node->temp_limit_rate = clk_cpu_dvfs_node->max_rate;
+ for (i=0; clk_cpu_dvfs_node->per_temp_limit_table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ if (temp > clk_cpu_dvfs_node->per_temp_limit_table[i].index) {
+ clk_cpu_dvfs_node->temp_limit_rate = clk_cpu_dvfs_node->per_temp_limit_table[i].frequency;
+ }
+ }
+ dvfs_clk_set_rate(clk_cpu_dvfs_node, clk_cpu_dvfs_node->last_set_rate);
+ } else if (ROCKCHIP_PM_POLICY_NORMAL == rockchip_pm_get_policy()){
+ if (!clk_cpu_dvfs_node->nor_temp_limit_table) {
+ return;
+ }
+
+ if (temp > target_temp) {
+ if (temp > old_temp) {
+ delta_temp = temp - target_temp;
+ for (i=0; clk_cpu_dvfs_node->nor_temp_limit_table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ if (delta_temp > clk_cpu_dvfs_node->nor_temp_limit_table[i].index) {
+ arm_rate_step = clk_cpu_dvfs_node->nor_temp_limit_table[i].frequency;
+ }
+ }
+ if (arm_rate_step && (clk_cpu_dvfs_node->temp_limit_rate > arm_rate_step)) {
+ clk_cpu_dvfs_node->temp_limit_rate -= arm_rate_step;
+ dvfs_clk_set_rate(clk_cpu_dvfs_node, clk_cpu_dvfs_node->last_set_rate);
+ }
+ }
+ } else {
+ if (clk_cpu_dvfs_node->temp_limit_rate < clk_cpu_dvfs_node->max_rate) {
+ delta_temp = target_temp - temp;
+ for (i=0; clk_cpu_dvfs_node->nor_temp_limit_table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ if (delta_temp > clk_cpu_dvfs_node->nor_temp_limit_table[i].index) {
+ arm_rate_step = clk_cpu_dvfs_node->nor_temp_limit_table[i].frequency;
+ }
+ }
+
+ if (arm_rate_step) {
+ clk_cpu_dvfs_node->temp_limit_rate += arm_rate_step;
+ if (clk_cpu_dvfs_node->temp_limit_rate > clk_cpu_dvfs_node->max_rate) {
+ clk_cpu_dvfs_node->temp_limit_rate = clk_cpu_dvfs_node->max_rate;
+ }
+ dvfs_clk_set_rate(clk_cpu_dvfs_node, clk_cpu_dvfs_node->last_set_rate);
+ }
+ }
+ }
+ }
+
+ DVFS_DBG("cur temp: %d, temp_limit_core_rate: %lu\n", temp, clk_cpu_dvfs_node->temp_limit_rate);
+
+ old_temp = temp;
+}
+static DECLARE_DELAYED_WORK(dvfs_temp_limit_work, dvfs_temp_limit_work_func);
+
+
+int dvfs_clk_enable_limit(struct dvfs_node *clk_dvfs_node, unsigned int min_rate, unsigned int max_rate)
{
u32 rate = 0, ret = 0;
- if (!clk_dvfs_node)
+ if (!clk_dvfs_node || (min_rate > max_rate))
return -EINVAL;
if (clk_dvfs_node->vd && clk_dvfs_node->vd->vd_dvfs_target){
/* To reset clk_dvfs_node->min_rate/max_rate */
dvfs_get_rate_range(clk_dvfs_node);
clk_dvfs_node->freq_limit_en = 1;
- clk_dvfs_node->min_rate = min_rate > clk_dvfs_node->min_rate ? min_rate : clk_dvfs_node->min_rate;
- clk_dvfs_node->max_rate = max_rate < clk_dvfs_node->max_rate ? max_rate : clk_dvfs_node->max_rate;
+
+ if ((min_rate >= clk_dvfs_node->min_rate) && (min_rate <= clk_dvfs_node->max_rate)) {
+ clk_dvfs_node->min_rate = min_rate;
+ }
+
+ if ((max_rate >= clk_dvfs_node->min_rate) && (max_rate <= clk_dvfs_node->max_rate)) {
+ clk_dvfs_node->max_rate = max_rate;
+ }
+
if (clk_dvfs_node->last_set_rate == 0)
- rate = clk_get_rate(clk_dvfs_node->clk);
+ rate = __clk_get_rate(clk_dvfs_node->clk);
else
rate = clk_dvfs_node->last_set_rate;
ret = clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, rate);
}
EXPORT_SYMBOL(dvfs_clk_disable_limit);
+void dvfs_disable_temp_limit(void) {
+ temp_limit_enable = 0;
+ cancel_delayed_work_sync(&dvfs_temp_limit_work);
+}
+
+int dvfs_clk_get_limit(struct dvfs_node *clk_dvfs_node, unsigned int *min_rate, unsigned int *max_rate)
+{
+ int freq_limit_en;
+
+ if (!clk_dvfs_node)
+ return -EINVAL;
+
+ mutex_lock(&clk_dvfs_node->vd->mutex);
+
+ *min_rate = clk_dvfs_node->min_rate;
+ *max_rate = clk_dvfs_node->max_rate;
+ freq_limit_en = clk_dvfs_node->freq_limit_en;
+
+ mutex_unlock(&clk_dvfs_node->vd->mutex);
+
+ return freq_limit_en;
+}
+EXPORT_SYMBOL(dvfs_clk_get_limit);
int dvfs_clk_register_set_rate_callback(struct dvfs_node *clk_dvfs_node, clk_set_rate_callback clk_dvfs_target)
{
int clk_enable_dvfs(struct dvfs_node *clk_dvfs_node)
{
struct cpufreq_frequency_table clk_fv;
+ int volt_new;
+ unsigned int mode;
+ int ret;
+
if (!clk_dvfs_node)
return -EINVAL;
clk_dvfs_node->freq_limit_en = 1;
dvfs_table_round_volt(clk_dvfs_node);
clk_dvfs_node->set_freq = clk_dvfs_node_get_rate_kz(clk_dvfs_node->clk);
+ clk_dvfs_node->last_set_rate = clk_dvfs_node->set_freq*1000;
DVFS_DBG("%s: %s get freq %u!\n",
__func__, clk_dvfs_node->name, clk_dvfs_node->set_freq);
return 0;
}
}
-
+ clk_dvfs_node->enable_count++;
clk_dvfs_node->set_volt = clk_fv.index;
- dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
+ volt_new = dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
DVFS_DBG("%s: %s, freq %u(ref vol %u)\n",
__func__, clk_dvfs_node->name, clk_dvfs_node->set_freq, clk_dvfs_node->set_volt);
#if 0
clk_notifier_register(clk, clk_dvfs_node->dvfs_nb);
}
#endif
-
-#if 0
- if(clk_dvfs_node->vd->cur_volt < clk_dvfs_node->set_volt) {
- int ret;
- mutex_lock(&rk_dvfs_mutex);
- ret = dvfs_regulator_set_voltage_readback(clk_dvfs_node->vd->regulator, clk_dvfs_node->set_volt, clk_dvfs_node->set_volt);
+ if(clk_dvfs_node->vd->cur_volt != volt_new) {
+ ret = dvfs_regulator_set_voltage_readback(clk_dvfs_node->vd->regulator, volt_new, volt_new);
+ dvfs_volt_up_delay(clk_dvfs_node->vd,volt_new, clk_dvfs_node->vd->cur_volt);
if (ret < 0) {
clk_dvfs_node->vd->volt_set_flag = DVFS_SET_VOLT_FAILURE;
- clk_dvfs_node->enable_dvfs = 0;
+ clk_dvfs_node->enable_count = 0;
DVFS_ERR("dvfs enable clk %s,set volt error \n", clk_dvfs_node->name);
- mutex_unlock(&rk_dvfs_mutex);
- return -1;
+ mutex_unlock(&clk_dvfs_node->vd->mutex);
+ return -EAGAIN;
}
+ clk_dvfs_node->vd->cur_volt = volt_new;
clk_dvfs_node->vd->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
- mutex_unlock(&rk_dvfs_mutex);
}
-#endif
- clk_dvfs_node->enable_count++;
+
} else {
DVFS_DBG("%s: dvfs already enable clk enable = %d!\n",
__func__, clk_dvfs_node->enable_count);
clk_dvfs_node->enable_count++;
}
+ if (clk_dvfs_node->regu_mode_en) {
+ ret = dvfs_regu_mode_table_constrain(clk_dvfs_node);
+ if (ret) {
+ DVFS_ERR("%s: clk(%s) regu_mode_table is unvalid, set regu_mode_en=0!\n",
+ __func__, clk_dvfs_node->name);
+ clk_dvfs_node->regu_mode_en = 0;
+ mutex_unlock(&clk_dvfs_node->vd->mutex);
+ return ret;
+ }
+
+ ret = clk_dvfs_node_get_regu_mode(clk_dvfs_node, clk_dvfs_node->set_freq*1000, &mode);
+ if (ret < 0) {
+ DVFS_ERR("%s: clk(%s) rate %dKhz get regu_mode fail\n",
+ __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq);
+ mutex_unlock(&clk_dvfs_node->vd->mutex);
+ return ret;
+ } else
+ clk_dvfs_node->regu_mode = mode;
+
+ dvfs_update_clk_pds_mode(clk_dvfs_node);
+ }
+
mutex_unlock(&clk_dvfs_node->vd->mutex);
return 0;
int clk_disable_dvfs(struct dvfs_node *clk_dvfs_node)
{
+ int volt_new;
+
if (!clk_dvfs_node)
return -EINVAL;
if (!clk_dvfs_node->enable_count) {
DVFS_WARNING("%s:clk(%s) is already closed!\n",
__func__, __clk_get_name(clk_dvfs_node->clk));
+ mutex_unlock(&clk_dvfs_node->vd->mutex);
return 0;
} else {
clk_dvfs_node->enable_count--;
if (0 == clk_dvfs_node->enable_count) {
DVFS_DBG("%s:dvfs clk(%s) disable dvfs ok!\n",
__func__, __clk_get_name(clk_dvfs_node->clk));
+ volt_new = dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
+ dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
+
#if 0
clk_notifier_unregister(clk, clk_dvfs_node->dvfs_nb);
DVFS_DBG("clk unregister nb!\n");
}
EXPORT_SYMBOL(clk_disable_dvfs);
+static unsigned long dvfs_get_limit_rate(struct dvfs_node *clk_dvfs_node, unsigned long rate)
+{
+ unsigned long limit_rate;
+
+ limit_rate = rate;
+ if (clk_dvfs_node->freq_limit_en) {
+ //dvfs table limit
+ if (rate < clk_dvfs_node->min_rate) {
+ limit_rate = clk_dvfs_node->min_rate;
+ } else if (rate > clk_dvfs_node->max_rate) {
+ limit_rate = clk_dvfs_node->max_rate;
+ }
+ if (temp_limit_enable) {
+ if (limit_rate > clk_dvfs_node->temp_limit_rate) {
+ limit_rate = clk_dvfs_node->temp_limit_rate;
+ }
+ }
+ }
+
+ DVFS_DBG("%s: rate:%ld, limit_rate:%ld,\n", __func__, rate, limit_rate);
+
+ return limit_rate;
+}
+
static int dvfs_target(struct dvfs_node *clk_dvfs_node, unsigned long rate)
{
struct cpufreq_frequency_table clk_fv;
}
}
- /* Check limit rate */
- //if (clk_dvfs_node->freq_limit_en) {
- if (rate < clk_dvfs_node->min_rate) {
- rate = clk_dvfs_node->min_rate;
- } else if (rate > clk_dvfs_node->max_rate) {
- rate = clk_dvfs_node->max_rate;
- }
- //}
-
- new_rate = clk_round_rate(clk, rate);
- old_rate = clk_get_rate(clk);
+ rate = dvfs_get_limit_rate(clk_dvfs_node, rate);
+ new_rate = __clk_round_rate(clk, rate);
+ old_rate = __clk_get_rate(clk);
if (new_rate == old_rate)
return 0;
/* find the clk corresponding voltage */
ret = clk_dvfs_node_get_ref_volt(clk_dvfs_node, new_rate / 1000, &clk_fv);
if (ret) {
- DVFS_ERR("%s:dvfs clk(%s) rate %luhz is larger,not support\n",
+ DVFS_ERR("%s:dvfs clk(%s) rate %luhz is not support\n",
__func__, clk_dvfs_node->name, new_rate);
return ret;
}
DVFS_DBG("%s:%s new rate=%lu(was=%lu),new volt=%lu,(was=%d)\n",
__func__, clk_dvfs_node->name, new_rate, old_rate, volt_new,clk_dvfs_node->vd->cur_volt);
+
/* if up the rate */
if (new_rate > old_rate) {
+ ret = dvfs_regu_mode_target(clk_dvfs_node, new_rate);
+ if (ret)
+ DVFS_ERR("%s: dvfs clk(%s) rate %luhz set mode err\n",
+ __func__, clk_dvfs_node->name, new_rate);
+
ret = dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
if (ret)
goto fail_roll_back;
/* scale rate */
if (clk_dvfs_node->clk_dvfs_target) {
- ret = clk_dvfs_node->clk_dvfs_target(clk, new_rate);
+ ret = clk_dvfs_node->clk_dvfs_target(clk, rate);
} else {
- ret = clk_set_rate(clk, new_rate);
+ ret = clk_set_rate(clk, rate);
}
if (ret) {
clk_dvfs_node->set_freq = new_rate / 1000;
DVFS_DBG("%s:dvfs clk(%s) set rate %lu ok\n",
- __func__, clk_dvfs_node->name, clk_get_rate(clk));
+ __func__, clk_dvfs_node->name, __clk_get_rate(clk));
/* if down the rate */
if (new_rate < old_rate) {
ret = dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
if (ret)
goto out;
+
+ ret = dvfs_regu_mode_target(clk_dvfs_node, new_rate);
+ if (ret)
+ DVFS_ERR("%s:dvfs clk(%s) rate %luhz set mode err\n",
+ __func__, clk_dvfs_node->name, new_rate);
}
-
- clk_dvfs_node->last_set_rate = new_rate;
-
+
return 0;
fail_roll_back:
clk_dvfs_node->set_volt = clk_volt_store;
return ret;
}
+unsigned long dvfs_clk_round_rate(struct dvfs_node *clk_dvfs_node, unsigned long rate)
+{
+ return __clk_round_rate(clk_dvfs_node->clk, rate);
+}
+EXPORT_SYMBOL_GPL(dvfs_clk_round_rate);
+
unsigned long dvfs_clk_get_rate(struct dvfs_node *clk_dvfs_node)
{
- return clk_get_rate(clk_dvfs_node->clk);
+ return __clk_get_rate(clk_dvfs_node->clk);
}
EXPORT_SYMBOL_GPL(dvfs_clk_get_rate);
+unsigned long dvfs_clk_get_last_set_rate(struct dvfs_node *clk_dvfs_node)
+{
+ unsigned long last_set_rate;
+
+ mutex_lock(&clk_dvfs_node->vd->mutex);
+ last_set_rate = clk_dvfs_node->last_set_rate;
+ mutex_unlock(&clk_dvfs_node->vd->mutex);
+
+ return last_set_rate;
+}
+EXPORT_SYMBOL_GPL(dvfs_clk_get_last_set_rate);
+
+
int dvfs_clk_enable(struct dvfs_node *clk_dvfs_node)
{
return clk_enable(clk_dvfs_node->clk);
if (clk_dvfs_node->vd && clk_dvfs_node->vd->vd_dvfs_target) {
mutex_lock(&clk_dvfs_node->vd->mutex);
ret = clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, rate);
+ clk_dvfs_node->last_set_rate = rate;
mutex_unlock(&clk_dvfs_node->vd->mutex);
} else {
DVFS_ERR("%s:dvfs node(%s) has no vd node or target callback!\n",
return 0;
}
+static struct cpufreq_frequency_table *of_get_temp_limit_table(struct device_node *dev_node, const char *propname)
+{
+ struct cpufreq_frequency_table *temp_limt_table = NULL;
+ const struct property *prop;
+ const __be32 *val;
+ int nr, i;
+
+ prop = of_find_property(dev_node, propname, NULL);
+ if (!prop)
+ return NULL;
+ if (!prop->value)
+ return NULL;
+
+ nr = prop->length / sizeof(u32);
+ if (nr % 2) {
+ pr_err("%s: Invalid freq list\n", __func__);
+ return NULL;
+ }
+
+ temp_limt_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
+ (nr/2 + 1), GFP_KERNEL);
+
+ val = prop->value;
+
+ for (i=0; i<nr/2; i++){
+ temp_limt_table[i].index = be32_to_cpup(val++);
+ temp_limt_table[i].frequency = be32_to_cpup(val++) * 1000;
+ }
+
+ temp_limt_table[i].index = 0;
+ temp_limt_table[i].frequency = CPUFREQ_TABLE_END;
+
+ return temp_limt_table;
+
+}
+
int of_dvfs_init(void)
{
struct vd_node *vd;
struct device_node *dvfs_dev_node, *clk_dev_node, *vd_dev_node, *pd_dev_node;
struct dvfs_node *dvfs_node;
struct clk *clk;
+ const __be32 *val;
int ret;
DVFS_DBG("%s\n", __func__);
return PTR_ERR(dvfs_dev_node);
}
+ val = of_get_property(dvfs_dev_node, "target-temp", NULL);
+ if (val) {
+ target_temp = be32_to_cpup(val);
+ }
+
+ val = of_get_property(dvfs_dev_node, "temp-limit-enable", NULL);
+ if (val) {
+ temp_limit_enable = be32_to_cpup(val);
+ }
+
for_each_available_child_of_node(dvfs_dev_node, vd_dev_node) {
vd = kzalloc(sizeof(struct vd_node), GFP_KERNEL);
if (!vd)
continue;
}
- /*vd->regulator = regulator_get(NULL, vd->regulator_name);
- if (IS_ERR(vd->regulator)){
- DVFS_ERR("%s vd(%s) get regulator(%s) failed!\n", __func__, vd->name, vd->regulator_name);
- kfree(vd);
- continue;
- }*/
-
vd->suspend_volt = 0;
vd->volt_set_flag = DVFS_SET_VOLT_FAILURE;
DVFS_DBG("%s:pd(%s) register ok, parent vd:%s\n",
__func__, pd->name, vd->name);
for_each_available_child_of_node(pd_dev_node, clk_dev_node) {
+ if (!of_device_is_available(clk_dev_node))
+ continue;
+
dvfs_node = kzalloc(sizeof(struct dvfs_node), GFP_KERNEL);
if (!dvfs_node)
return -ENOMEM;
dvfs_node->name = clk_dev_node->name;
dvfs_node->pd = pd;
dvfs_node->vd = vd;
+
+ val = of_get_property(clk_dev_node, "regu-mode-en", NULL);
+ if (val)
+ dvfs_node->regu_mode_en = be32_to_cpup(val);
+ if (dvfs_node->regu_mode_en)
+ dvfs_node->regu_mode_table = of_get_regu_mode_table(clk_dev_node);
+ else
+ dvfs_node->regu_mode_table = NULL;
+
+ if (temp_limit_enable) {
+ val = of_get_property(clk_dev_node, "temp-channel", NULL);
+ if (val) {
+ dvfs_node->temp_channel = be32_to_cpup(val);
+ }
+ dvfs_node->nor_temp_limit_table = of_get_temp_limit_table(clk_dev_node, "normal-temp-limit");
+ dvfs_node->per_temp_limit_table = of_get_temp_limit_table(clk_dev_node, "performance-temp-limit");
+ }
+ dvfs_node->temp_limit_rate = -1;
dvfs_node->dev.of_node = clk_dev_node;
ret = of_init_opp_table(&dvfs_node->dev);
if (ret) {
}
/*********************************************************************************/
-
/**
* dump_dbg_map() : Draw all informations of dvfs while debug
*/
mutex_lock(&vd->mutex);
printk( "|\n|- voltage domain:%s\n", vd->name);
printk( "|- current voltage:%d\n", vd->cur_volt);
+ printk( "|- current regu_mode:%s\n", dvfs_regu_mode_to_string(vd->regu_mode));
list_for_each_entry(pd, &vd->pd_list, node) {
- printk( "| |\n| |- power domain:%s, status = %s, current volt = %d\n",
- pd->name, (pd->pd_status == 1) ? "ON" : "OFF", pd->cur_volt);
+ printk( "| |\n| |- power domain:%s, status = %s, current volt = %d, current regu_mode = %s\n",
+ pd->name, (pd->pd_status == 1) ? "ON" : "OFF", pd->cur_volt,
+ dvfs_regu_mode_to_string(pd->regu_mode));
list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
printk( "| | |\n| | |- clock: %s current: rate %d, volt = %d,"
" enable_dvfs = %s\n",
clk_dvfs_node->name, clk_dvfs_node->set_freq, clk_dvfs_node->set_volt,
clk_dvfs_node->enable_count == 0 ? "DISABLE" : "ENABLE");
- printk( "| | |- clk limit:[%u, %u]; last set rate = %u\n",
+ printk( "| | |- clk limit(%s):[%u, %u]; last set rate = %lu\n",
+ clk_dvfs_node->freq_limit_en ? "enable" : "disable",
clk_dvfs_node->min_rate, clk_dvfs_node->max_rate,
- clk_dvfs_node->last_set_rate);
-
+ clk_dvfs_node->last_set_rate/1000);
for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
printk( "| | | |- freq = %d, volt = %d\n",
clk_dvfs_node->dvfs_table[i].frequency,
clk_dvfs_node->dvfs_table[i].index);
}
+ printk( "| | |- clock: %s current: rate %d, regu_mode = %s,"
+ " regu_mode_en = %d\n",
+ clk_dvfs_node->name, clk_dvfs_node->set_freq,
+ dvfs_regu_mode_to_string(clk_dvfs_node->regu_mode),
+ clk_dvfs_node->regu_mode_en);
+ if (clk_dvfs_node->regu_mode_table) {
+ for (i = 0; (clk_dvfs_node->regu_mode_table[i].frequency != CPUFREQ_TABLE_END); i++) {
+ printk( "| | | |- freq = %d, regu_mode = %s\n",
+ clk_dvfs_node->regu_mode_table[i].frequency/1000,
+ dvfs_regu_mode_to_string(clk_dvfs_node->regu_mode_table[i].index));
+ }
+ }
}
}
mutex_unlock(&vd->mutex);
return s - buf;
}
-/*******************************AVS AREA****************************************/
-/*
- * To use AVS function, you must call avs_init in machine_rk30_board_init(void)(board-rk30-sdk.c)
- * And then call(vdd_log):
- * regulator_set_voltage(dcdc, 1100000, 1100000);
- * avs_init_val_get(1,1100000,"wm8326 init");
- * udelay(600);
- * avs_set_scal_val(AVS_BASE);
- * in wm831x_post_init(board-rk30-sdk-wm8326.c)
- * AVS_BASE can use 172
- */
-
-static struct avs_ctr_st *avs_ctr_data=NULL;
-#define init_avs_times 10
-#define init_avs_st_num 5
-
-struct init_avs_st {
- int is_set;
- u8 paramet[init_avs_times];
- int vol;
- char *s;
-};
-
-static struct init_avs_st init_avs_paramet[init_avs_st_num];
-
-static u8 rk_get_avs_val(void)
-{
-
- if(avs_ctr_data&&avs_ctr_data->avs_get_val)
- {
- return avs_ctr_data->avs_get_val();
- }
- return 0;
-
-}
-
-void avs_init_val_get(int index, int vol, char *s)
-{
- int i;
- if(index >= init_avs_times)
- return;
- init_avs_paramet[index].vol = vol;
- init_avs_paramet[index].s = s;
- init_avs_paramet[index].is_set++;
- printk("DVFS MSG:\tAVS Value(index=%d): ", index);
- for(i = 0; i < init_avs_times; i++) {
- init_avs_paramet[index].paramet[i] = rk_get_avs_val();
- mdelay(1);
- printk("%d ", init_avs_paramet[index].paramet[i]);
- }
- printk("\n");
-}
-
-void avs_board_init(struct avs_ctr_st *data)
-{
-
- avs_ctr_data=data;
-}
-void avs_init(void)
-{
- memset(&init_avs_paramet[0].is_set, 0, sizeof(init_avs_paramet));
- if(avs_ctr_data&&avs_ctr_data->avs_init)
- avs_ctr_data->avs_init();
- avs_init_val_get(0, 1200000,"board_init");
-}
-
-int avs_set_scal_val(u8 avs_base)
-{
- return 0;
-}
-
-/*************************interface to get avs value and dvfs tree*************************/
-#define USE_NORMAL_TIME
-#ifdef USE_NORMAL_TIME
-static struct timer_list avs_timer;
-#else
-static struct hrtimer dvfs_hrtimer;
-#endif
-
-static u32 avs_dyn_start = 0;
-static u32 avs_dyn_data_cnt;
-static u8 *avs_dyn_data = NULL;
-static u32 show_line_cnt = 0;
-static u8 dly_min;
-static u8 dly_max;
-
-#define val_per_line (30)
-#define line_pre_show (30)
-#define avs_dyn_data_num (3*1000*1000)
-
-static u32 print_avs_init(char *buf)
-{
- char *s = buf;
- int i, j;
-
- for(j = 0; j < init_avs_st_num; j++) {
- if(init_avs_paramet[j].vol <= 0)
- continue;
- s += sprintf(s, "%s ,vol=%d,paramet following\n",
- init_avs_paramet[j].s, init_avs_paramet[j].vol);
- for(i = 0; i < init_avs_times; i++) {
- s += sprintf(s, "%d ", init_avs_paramet[j].paramet[i]);
- }
-
- s += sprintf(s, "\n");
- }
- return (s - buf);
-}
-
-static ssize_t avs_init_show(struct kobject *kobj, struct kobj_attribute *attr,
- char *buf)
-{
- return print_avs_init(buf);
-}
-
-static ssize_t avs_init_store(struct kobject *kobj, struct kobj_attribute *attr,
- const char *buf, size_t n)
-{
-
- return n;
-}
-static ssize_t avs_now_show(struct kobject *kobj, struct kobj_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%d\n", rk_get_avs_val());
-}
-
-static ssize_t avs_now_store(struct kobject *kobj, struct kobj_attribute *attr,
- const char *buf, size_t n)
-{
- return n;
-}
-static ssize_t avs_dyn_show(struct kobject *kobj, struct kobj_attribute *attr,
- char *buf)
-{
- char *s = buf;
- u32 i;
-
- if(avs_dyn_data == NULL)
- return (s - buf);
-
- if(avs_dyn_start) {
- int start_cnt;
- int end_cnt;
- end_cnt = (avs_dyn_data_cnt ? (avs_dyn_data_cnt - 1) : 0);
- if(end_cnt > (line_pre_show * val_per_line))
- start_cnt = end_cnt - (line_pre_show * val_per_line);
- else
- start_cnt = 0;
-
- dly_min = avs_dyn_data[start_cnt];
- dly_max = avs_dyn_data[start_cnt];
-
- //s += sprintf(s,"data start=%d\n",i);
- for(i = start_cnt; i <= end_cnt;) {
- s += sprintf(s, "%d", avs_dyn_data[i]);
- dly_min = min(dly_min, avs_dyn_data[i]);
- dly_max = max(dly_max, avs_dyn_data[i]);
- i++;
- if(!(i % val_per_line)) {
- s += sprintf(s, "\n");
- } else
- s += sprintf(s, " ");
- }
-
- s += sprintf(s, "\n");
-
- s += sprintf(s, "new data is from=%d to %d\n", start_cnt, end_cnt);
- //s += sprintf(s,"\n max=%d,min=%d,totolcnt=%d,line=%d\n",dly_max,dly_min,avs_dyn_data_cnt,show_line_cnt);
-
-
- } else {
- if(show_line_cnt == 0) {
- dly_min = avs_dyn_data[0];
- dly_max = avs_dyn_data[0];
- }
-
-
- for(i = show_line_cnt * (line_pre_show * val_per_line); i < avs_dyn_data_cnt;) {
- s += sprintf(s, "%d", avs_dyn_data[i]);
- dly_min = min(dly_min, avs_dyn_data[i]);
- dly_max = max(dly_max, avs_dyn_data[i]);
- i++;
- if(!(i % val_per_line)) {
- s += sprintf(s, "\n");
- } else
- s += sprintf(s, " ");
- if(i >= ((show_line_cnt + 1)*line_pre_show * val_per_line))
- break;
- }
-
- s += sprintf(s, "\n");
-
- s += sprintf(s, "max=%d,min=%d,totolcnt=%d,line=%d\n",
- dly_max, dly_min, avs_dyn_data_cnt, show_line_cnt);
- show_line_cnt++;
- if(((show_line_cnt * line_pre_show)*val_per_line) >= avs_dyn_data_cnt) {
-
- show_line_cnt = 0;
-
- s += sprintf(s, "data is over\n");
- }
- }
- return (s - buf);
-}
-
-static ssize_t avs_dyn_store(struct kobject *kobj, struct kobj_attribute *attr,
- const char *buf, size_t n)
-{
- const char *pbuf;
-
- if((strncmp(buf, "start", strlen("start")) == 0)) {
- if(avs_dyn_data == NULL)
- avs_dyn_data = kmalloc(avs_dyn_data_num, GFP_KERNEL);
- if(avs_dyn_data == NULL)
- return n;
-
- pbuf = &buf[strlen("start")];
- avs_dyn_data_cnt = 0;
- show_line_cnt = 0;
- if(avs_dyn_data) {
-#ifdef USE_NORMAL_TIME
- mod_timer(&avs_timer, jiffies + msecs_to_jiffies(5));
-#else
- hrtimer_start(&dvfs_hrtimer, ktime_set(0, 5 * 1000 * 1000), HRTIMER_MODE_REL);
-#endif
- avs_dyn_start = 1;
- }
- //sscanf(pbuf, "%d %d", &number, &voltage);
- //DVFS_DBG("---------ldo %d %d\n", number, voltage);
-
- } else if((strncmp(buf, "stop", strlen("stop")) == 0)) {
- pbuf = &buf[strlen("stop")];
- avs_dyn_start = 0;
- show_line_cnt = 0;
- //sscanf(pbuf, "%d %d", &number, &voltage);
- //DVFS_DBG("---------dcdc %d %d\n", number, voltage);
- }
-
-
-
- return n;
-}
-
-static ssize_t dvfs_tree_store(struct kobject *kobj, struct kobj_attribute *attr,
- const char *buf, size_t n)
-{
- return n;
-}
-static ssize_t dvfs_tree_show(struct kobject *kobj, struct kobj_attribute *attr,
- char *buf)
-{
- return dump_dbg_map(buf);
-
-}
-
-static void avs_timer_fn(unsigned long data)
-{
- int i;
- for(i = 0; i < 1; i++) {
- if(avs_dyn_data_cnt >= avs_dyn_data_num)
- return;
- avs_dyn_data[avs_dyn_data_cnt] = rk_get_avs_val();
- avs_dyn_data_cnt++;
- }
- if(avs_dyn_start)
- mod_timer(&avs_timer, jiffies + msecs_to_jiffies(10));
-}
-#if 0
-struct hrtimer dvfs_hrtimer;
-static enum hrtimer_restart dvfs_hrtimer_timer_func(struct hrtimer *timer)
-{
- int i;
- for(i = 0; i < 1; i++) {
- if(avs_dyn_data_cnt >= avs_dyn_data_num)
- return HRTIMER_NORESTART;
- avs_dyn_data[avs_dyn_data_cnt] = rk_get_avs_val();
- avs_dyn_data_cnt++;
- }
- if(avs_dyn_start)
- hrtimer_start(timer, ktime_set(0, 1 * 1000 * 1000), HRTIMER_MODE_REL);
-
-}
-#endif
/*********************************************************************************/
static struct kobject *dvfs_kobj;
const char *buf, size_t n);
};
+static ssize_t dvfs_tree_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ return n;
+}
+static ssize_t dvfs_tree_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return dump_dbg_map(buf);
+}
+
+
static struct dvfs_attribute dvfs_attrs[] = {
/* node_name permision show_func store_func */
//#ifdef CONFIG_RK_CLOCK_PROC
__ATTR(dvfs_tree, S_IRUSR | S_IRGRP | S_IWUSR, dvfs_tree_show, dvfs_tree_store),
- __ATTR(avs_init, S_IRUSR | S_IRGRP | S_IWUSR, avs_init_show, avs_init_store),
- __ATTR(avs_dyn, S_IRUSR | S_IRGRP | S_IWUSR, avs_dyn_show, avs_dyn_store),
- __ATTR(avs_now, S_IRUSR | S_IRGRP | S_IWUSR, avs_now_show, avs_now_store),
//#endif
};
-
static int __init dvfs_init(void)
{
int i, ret = 0;
-#ifdef USE_NORMAL_TIME
- init_timer(&avs_timer);
- //avs_timer.expires = jiffies+msecs_to_jiffies(1);
- avs_timer.function = avs_timer_fn;
- //mod_timer(&avs_timer,jiffies+msecs_to_jiffies(1));
-#else
- hrtimer_init(&dvfs_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- dvfs_hrtimer.function = dvfs_hrtimer_timer_func;
- //hrtimer_start(&dvfs_hrtimer,ktime_set(0, 5*1000*1000),HRTIMER_MODE_REL);
-#endif
dvfs_kobj = kobject_create_and_add("dvfs", NULL);
if (!dvfs_kobj)
}
}
+ if (temp_limit_enable) {
+ clk_cpu_dvfs_node = clk_get_dvfs_node("clk_core");
+ if (!clk_cpu_dvfs_node){
+ return -EINVAL;
+ }
+
+ clk_cpu_dvfs_node->temp_limit_rate = clk_cpu_dvfs_node->max_rate;
+ dvfs_wq = alloc_workqueue("dvfs", WQ_NON_REENTRANT | WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 1);
+ queue_delayed_work_on(0, dvfs_wq, &dvfs_temp_limit_work, 0*HZ);
+ }
+
+ vdd_gpu_regulator = dvfs_get_regulator("vdd_gpu");
+ if (!IS_ERR_OR_NULL(vdd_gpu_regulator)) {
+ struct clk *clk = clk_get(NULL, "pd_gpu");
+
+ if (clk)
+ rk_clk_pd_notifier_register(clk, &clk_pd_gpu_notifier);
+
+ fb_register_client(&early_suspend_notifier);
+ register_reboot_notifier(&vdd_gpu_reboot_notifier);
+ }
+
return ret;
}
-subsys_initcall(dvfs_init);
+late_initcall(dvfs_init);