#include <linux/of.h>
#include <linux/opp.h>
#include <linux/rockchip/dvfs.h>
-
-static struct workqueue_struct *dvfs_wq;
+#include <linux/rockchip/common.h>
+#include <linux/fb.h>
+#include <linux/reboot.h>
+#include "../../../drivers/clk/rockchip/clk-pd.h"
extern int rockchip_tsadc_get_temp(int chn);
#define MHz (1000 * 1000)
static LIST_HEAD(rk_dvfs_tree);
static DEFINE_MUTEX(rk_dvfs_mutex);
+static struct workqueue_struct *dvfs_wq;
+static struct dvfs_node *clk_cpu_dvfs_node;
+static unsigned int target_temp = 80;
+static int temp_limit_enable = 1;
+
+static int pd_gpu_off, early_suspend;
+static DEFINE_MUTEX(switch_vdd_gpu_mutex);
+struct regulator *vdd_gpu_regulator;
+
+static int vdd_gpu_reboot_notifier_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ int ret;
+
+ DVFS_DBG("%s: enable vdd_gpu\n", __func__);
+ mutex_lock(&switch_vdd_gpu_mutex);
+ if (!regulator_is_enabled(vdd_gpu_regulator))
+ ret = regulator_enable(vdd_gpu_regulator);
+ mutex_unlock(&switch_vdd_gpu_mutex);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block vdd_gpu_reboot_notifier = {
+ .notifier_call = vdd_gpu_reboot_notifier_event,
+};
+
+static int clk_pd_gpu_notifier_call(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ int ret;
+
+ switch (event) {
+ case RK_CLK_PD_PREPARE:
+ mutex_lock(&switch_vdd_gpu_mutex);
+ pd_gpu_off = 0;
+ if (early_suspend) {
+ if (!regulator_is_enabled(vdd_gpu_regulator))
+ ret = regulator_enable(vdd_gpu_regulator);
+ }
+ mutex_unlock(&switch_vdd_gpu_mutex);
+ break;
+ case RK_CLK_PD_UNPREPARE:
+ mutex_lock(&switch_vdd_gpu_mutex);
+ pd_gpu_off = 1;
+ if (early_suspend) {
+ if (regulator_is_enabled(vdd_gpu_regulator))
+ ret = regulator_disable(vdd_gpu_regulator);
+ }
+ mutex_unlock(&switch_vdd_gpu_mutex);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block clk_pd_gpu_notifier = {
+ .notifier_call = clk_pd_gpu_notifier_call,
+};
+
+
+static int early_suspend_notifier_call(struct notifier_block *self,
+ unsigned long action, void *data)
+{
+ struct fb_event *event = data;
+ int blank_mode = *((int *)event->data);
+ int ret;
+
+ mutex_lock(&switch_vdd_gpu_mutex);
+ if (action == FB_EARLY_EVENT_BLANK) {
+ switch (blank_mode) {
+ case FB_BLANK_UNBLANK:
+ early_suspend = 0;
+ if (pd_gpu_off) {
+ if (!regulator_is_enabled(vdd_gpu_regulator))
+ ret = regulator_enable(
+ vdd_gpu_regulator);
+ }
+ break;
+ default:
+ break;
+ }
+ } else if (action == FB_EVENT_BLANK) {
+ switch (blank_mode) {
+ case FB_BLANK_POWERDOWN:
+ early_suspend = 1;
+ if (pd_gpu_off) {
+ if (regulator_is_enabled(vdd_gpu_regulator))
+ ret = regulator_disable(
+ vdd_gpu_regulator);
+ }
+
+ break;
+ default:
+ break;
+ }
+ }
+ mutex_unlock(&switch_vdd_gpu_mutex);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block early_suspend_notifier = {
+ .notifier_call = early_suspend_notifier_call,
+};
+
+#define DVFS_REGULATOR_MODE_STANDBY 1
+#define DVFS_REGULATOR_MODE_IDLE 2
+#define DVFS_REGULATOR_MODE_NORMAL 3
+#define DVFS_REGULATOR_MODE_FAST 4
+
+static const char* dvfs_regu_mode_to_string(unsigned int mode)
+{
+ switch (mode) {
+ case DVFS_REGULATOR_MODE_FAST:
+ return "FAST";
+ case DVFS_REGULATOR_MODE_NORMAL:
+ return "NORMAL";
+ case DVFS_REGULATOR_MODE_IDLE:
+ return "IDLE";
+ case DVFS_REGULATOR_MODE_STANDBY:
+ return "STANDBY";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static int dvfs_regu_mode_convert(unsigned int mode)
+{
+ switch (mode) {
+ case DVFS_REGULATOR_MODE_FAST:
+ return REGULATOR_MODE_FAST;
+ case DVFS_REGULATOR_MODE_NORMAL:
+ return REGULATOR_MODE_NORMAL;
+ case DVFS_REGULATOR_MODE_IDLE:
+ return REGULATOR_MODE_IDLE;
+ case DVFS_REGULATOR_MODE_STANDBY:
+ return REGULATOR_MODE_STANDBY;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int dvfs_regu_mode_deconvert(unsigned int mode)
+{
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ return DVFS_REGULATOR_MODE_FAST;
+ case REGULATOR_MODE_NORMAL:
+ return DVFS_REGULATOR_MODE_NORMAL;
+ case REGULATOR_MODE_IDLE:
+ return DVFS_REGULATOR_MODE_IDLE;
+ case REGULATOR_MODE_STANDBY:
+ return DVFS_REGULATOR_MODE_STANDBY;
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct cpufreq_frequency_table *of_get_regu_mode_table(struct device_node *dev_node)
+{
+ struct cpufreq_frequency_table *regu_mode_table = NULL;
+ const struct property *prop;
+ const __be32 *val;
+ int nr, i;
+
+ prop = of_find_property(dev_node, "regu-mode-table", NULL);
+ if (!prop)
+ return NULL;
+ if (!prop->value)
+ return NULL;
+
+ nr = prop->length / sizeof(u32);
+ if (nr % 2) {
+ pr_err("%s: Invalid freq list\n", __func__);
+ return NULL;
+ }
+
+ regu_mode_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
+ (nr/2+1), GFP_KERNEL);
+ if (!regu_mode_table) {
+ pr_err("%s: could not allocate regu_mode_table!\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ val = prop->value;
+
+ for (i=0; i<nr/2; i++){
+ regu_mode_table[i].frequency = be32_to_cpup(val++) * 1000;
+ regu_mode_table[i].index = be32_to_cpup(val++);
+ }
+
+ if (regu_mode_table[i-1].frequency != 0) {
+ pr_err("%s: Last freq of regu_mode_table is not 0!\n", __func__);
+ kfree(regu_mode_table);
+ return NULL;
+ }
+
+ regu_mode_table[i].index = 0;
+ regu_mode_table[i].frequency = CPUFREQ_TABLE_END;
+
+ return regu_mode_table;
+}
+
+static int dvfs_regu_mode_table_constrain(struct dvfs_node *clk_dvfs_node)
+{
+ int i, ret;
+ int mode, convert_mode, valid_mode;
+
+ if (!clk_dvfs_node)
+ return -EINVAL;
+
+ if (!clk_dvfs_node->regu_mode_table)
+ return -EINVAL;
+
+ if (!clk_dvfs_node->vd)
+ return -EINVAL;
+
+ if (IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator))
+ return -EINVAL;
+
+ for (i = 0; (clk_dvfs_node->regu_mode_table[i].frequency != CPUFREQ_TABLE_END); i++) {
+ mode = clk_dvfs_node->regu_mode_table[i].index;
+ convert_mode = dvfs_regu_mode_convert(mode);
+
+ ret = regulator_is_supported_mode(clk_dvfs_node->vd->regulator,
+ &convert_mode);
+ if (ret) {
+ DVFS_ERR("%s: find mode=%d unsupported\n", __func__,
+ mode);
+ kfree(clk_dvfs_node->regu_mode_table);
+ clk_dvfs_node->regu_mode_table = NULL;
+ return ret;
+ }
+
+ valid_mode = dvfs_regu_mode_deconvert(convert_mode);
+ if (valid_mode != mode) {
+ DVFS_ERR("%s: round mode=%d to valid mode=%d!\n",
+ __func__, mode, valid_mode);
+ clk_dvfs_node->regu_mode_table[i].index = valid_mode;
+ }
+
+ }
+
+ return 0;
+}
+
+static int clk_dvfs_node_get_regu_mode(struct dvfs_node *clk_dvfs_node,
+ unsigned long rate, unsigned int *mode)
+{
+ int i;
+
+
+ if ((!clk_dvfs_node) || (!clk_dvfs_node->regu_mode_table))
+ return -EINVAL;
+
+ for (i = 0; (clk_dvfs_node->regu_mode_table[i].frequency != CPUFREQ_TABLE_END); i++) {
+ if (rate >= clk_dvfs_node->regu_mode_table[i].frequency) {
+ *mode = clk_dvfs_node->regu_mode_table[i].index;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int dvfs_pd_get_newmode_byclk(struct pd_node *pd, struct dvfs_node *clk_dvfs_node)
+{
+ unsigned int mode_max = 0;
+
+
+ if (clk_dvfs_node->regu_mode_en && (clk_dvfs_node->regu_mode >= pd->regu_mode)) {
+ return clk_dvfs_node->regu_mode;
+ }
+
+ list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
+ if (clk_dvfs_node->regu_mode_en)
+ mode_max = max(mode_max, (clk_dvfs_node->regu_mode));
+ }
+
+ return mode_max;
+}
+
+static void dvfs_update_clk_pds_mode(struct dvfs_node *clk_dvfs_node)
+{
+ struct pd_node *pd;
+
+ if (!clk_dvfs_node)
+ return;
+
+ pd = clk_dvfs_node->pd;
+ if (!pd)
+ return;
+
+ pd->regu_mode = dvfs_pd_get_newmode_byclk(pd, clk_dvfs_node);
+}
+
+static int dvfs_vd_get_newmode_bypd(struct vd_node *vd)
+{
+ unsigned int mode_max_vd = 0;
+ struct pd_node *pd;
+
+ if (!vd)
+ return -EINVAL;
+
+ list_for_each_entry(pd, &vd->pd_list, node) {
+ mode_max_vd = max(mode_max_vd, pd->regu_mode);
+ }
+
+ return mode_max_vd;
+}
+
+static int dvfs_vd_get_newmode_byclk(struct dvfs_node *clk_dvfs_node)
+{
+ if (!clk_dvfs_node)
+ return -EINVAL;
+
+ dvfs_update_clk_pds_mode(clk_dvfs_node);
+
+ return dvfs_vd_get_newmode_bypd(clk_dvfs_node->vd);
+}
+
+static int dvfs_regu_set_mode(struct vd_node *vd, unsigned int mode)
+{
+ int convert_mode;
+ int ret = 0;
+
+
+ if (IS_ERR_OR_NULL(vd)) {
+ DVFS_ERR("%s: vd_node error\n", __func__);
+ return -EINVAL;
+ }
+
+ DVFS_DBG("%s: mode=%d(old=%d)\n", __func__, mode, vd->regu_mode);
+
+ convert_mode = dvfs_regu_mode_convert(mode);
+ if (convert_mode < 0) {
+ DVFS_ERR("%s: mode %d convert error\n", __func__, mode);
+ return convert_mode;
+ }
+
+ if (!IS_ERR_OR_NULL(vd->regulator)) {
+ ret = dvfs_regulator_set_mode(vd->regulator, convert_mode);
+ if (ret < 0) {
+ DVFS_ERR("%s: %s set mode %d (was %d) error!\n", __func__,
+ vd->regulator_name, mode, vd->regu_mode);
+ return -EAGAIN;
+ }
+ } else {
+ DVFS_ERR("%s: invalid regulator\n", __func__);
+ return -EINVAL;
+ }
+
+ vd->regu_mode = mode;
+
+ return 0;
+}
+
+static int dvfs_regu_mode_target(struct dvfs_node *clk_dvfs_node, unsigned long rate)
+{
+ int ret;
+ int mode;
+
+
+ if (!clk_dvfs_node)
+ return -EINVAL;
+
+ if (!clk_dvfs_node->regu_mode_en)
+ return 0;
+
+ ret = clk_dvfs_node_get_regu_mode(clk_dvfs_node, rate, &mode);
+ if (ret) {
+ DVFS_ERR("%s: clk(%s) rate %luhz get mode fail\n",
+ __func__, clk_dvfs_node->name, rate);
+ return ret;
+ }
+ clk_dvfs_node->regu_mode = mode;
+
+ mode = dvfs_vd_get_newmode_byclk(clk_dvfs_node);
+ if (mode < 0)
+ return mode;
+
+ ret = dvfs_regu_set_mode(clk_dvfs_node->vd, mode);
+
+ return ret;
+}
static void dvfs_volt_up_delay(struct vd_node *vd, int new_volt, int old_volt)
{
int i = 0;
if (!clk_dvfs_node)
- return -1;
+ return -EINVAL;
clk_dvfs_node->min_rate = 0;
clk_dvfs_node->max_rate = 0;
//ddr rate = real rate+flags
flags = clk_dvfs_node->dvfs_table[i].frequency%1000;
rate = (clk_dvfs_node->dvfs_table[i].frequency/1000)*1000;
- temp_rate = clk_round_rate(clk_dvfs_node->clk, rate*1000);
+ temp_rate = __clk_round_rate(clk_dvfs_node->clk, rate*1000);
if(temp_rate <= 0){
DVFS_WARNING("%s: clk(%s) rate %d round return %d\n",
__func__, clk_dvfs_node->name, clk_dvfs_node->dvfs_table[i].frequency, temp_rate);
clk_fv->frequency = 0;
clk_fv->index = 0;
//DVFS_DBG("%s get corresponding voltage error! out of bound\n", clk_dvfs_node->name);
- return -1;
+ return -EINVAL;
}
static int dvfs_pd_get_newvolt_byclk(struct pd_node *pd, struct dvfs_node *clk_dvfs_node)
{
int volt_max = 0;
- if (!pd || !clk_dvfs_node)
- return 0;
-
- if (clk_dvfs_node->set_volt >= pd->cur_volt) {
+ if (clk_dvfs_node->enable_count && (clk_dvfs_node->set_volt >= pd->cur_volt)) {
return clk_dvfs_node->set_volt;
}
list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
- // DVFS_DBG("%s ,pd(%s),dvfs(%s),volt(%u)\n",__func__,pd->name,
- // clk_dvfs_node->name,clk_dvfs_node->set_volt);
- volt_max = max(volt_max, clk_dvfs_node->set_volt);
+ if (clk_dvfs_node->enable_count)
+ volt_max = max(volt_max, clk_dvfs_node->set_volt);
}
return volt_max;
}
pd = clk_dvfs_node->pd;
if (!pd)
- return ;
+ return;
pd->cur_volt = dvfs_pd_get_newvolt_byclk(pd, clk_dvfs_node);
}
{
int volt_max_vd = 0;
struct pd_node *pd;
- //struct depend_list *depend;
if (!vd)
return -EINVAL;
list_for_each_entry(pd, &vd->pd_list, node) {
- // DVFS_DBG("%s pd(%s,%u)\n",__func__,pd->name,pd->cur_volt);
volt_max_vd = max(volt_max_vd, pd->cur_volt);
}
- /* some clks depend on this voltage domain */
-/* if (!list_empty(&vd->req_volt_list)) {
- list_for_each_entry(depend, &vd->req_volt_list, node2vd) {
- volt_max_vd = max(volt_max_vd, depend->req_volt);
- }
- }*/
return volt_max_vd;
}
static int dvfs_vd_get_newvolt_byclk(struct dvfs_node *clk_dvfs_node)
{
if (!clk_dvfs_node)
- return -1;
+ return -EINVAL;
+
dvfs_update_clk_pds_volt(clk_dvfs_node);
return dvfs_vd_get_newvolt_bypd(clk_dvfs_node->vd);
}
+#if 0
static void dvfs_temp_limit_work_func(struct work_struct *work)
{
unsigned long delay = HZ / 10; // 100ms
}
mutex_unlock(&rk_dvfs_mutex);
}
+#endif
+
+static void dvfs_temp_limit_work_func(struct work_struct *work)
+{
+ int temp=0, delta_temp=0;
+ unsigned long delay = HZ/10;
+ unsigned long arm_rate_step=0;
+ static int old_temp=0;
+ int i;
+
+ queue_delayed_work_on(0, dvfs_wq, to_delayed_work(work), delay);
+
+ temp = rockchip_tsadc_get_temp(1);
+
+ //debounce
+ delta_temp = (old_temp>temp) ? (old_temp-temp) : (temp-old_temp);
+ if (delta_temp <= 1)
+ return;
+
+ if (ROCKCHIP_PM_POLICY_PERFORMANCE == rockchip_pm_get_policy()) {
+ if (!clk_cpu_dvfs_node->per_temp_limit_table) {
+ return;
+ }
+
+ clk_cpu_dvfs_node->temp_limit_rate = clk_cpu_dvfs_node->max_rate;
+ for (i=0; clk_cpu_dvfs_node->per_temp_limit_table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ if (temp > clk_cpu_dvfs_node->per_temp_limit_table[i].index) {
+ clk_cpu_dvfs_node->temp_limit_rate = clk_cpu_dvfs_node->per_temp_limit_table[i].frequency;
+ }
+ }
+ dvfs_clk_set_rate(clk_cpu_dvfs_node, clk_cpu_dvfs_node->last_set_rate);
+ } else if (ROCKCHIP_PM_POLICY_NORMAL == rockchip_pm_get_policy()){
+ if (!clk_cpu_dvfs_node->nor_temp_limit_table) {
+ return;
+ }
+
+ if (temp > target_temp) {
+ if (temp > old_temp) {
+ delta_temp = temp - target_temp;
+ for (i=0; clk_cpu_dvfs_node->nor_temp_limit_table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ if (delta_temp > clk_cpu_dvfs_node->nor_temp_limit_table[i].index) {
+ arm_rate_step = clk_cpu_dvfs_node->nor_temp_limit_table[i].frequency;
+ }
+ }
+ if (arm_rate_step && (clk_cpu_dvfs_node->temp_limit_rate > arm_rate_step)) {
+ clk_cpu_dvfs_node->temp_limit_rate -= arm_rate_step;
+ dvfs_clk_set_rate(clk_cpu_dvfs_node, clk_cpu_dvfs_node->last_set_rate);
+ }
+ }
+ } else {
+ if (clk_cpu_dvfs_node->temp_limit_rate < clk_cpu_dvfs_node->max_rate) {
+ delta_temp = target_temp - temp;
+ for (i=0; clk_cpu_dvfs_node->nor_temp_limit_table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ if (delta_temp > clk_cpu_dvfs_node->nor_temp_limit_table[i].index) {
+ arm_rate_step = clk_cpu_dvfs_node->nor_temp_limit_table[i].frequency;
+ }
+ }
+
+ if (arm_rate_step) {
+ clk_cpu_dvfs_node->temp_limit_rate += arm_rate_step;
+ if (clk_cpu_dvfs_node->temp_limit_rate > clk_cpu_dvfs_node->max_rate) {
+ clk_cpu_dvfs_node->temp_limit_rate = clk_cpu_dvfs_node->max_rate;
+ }
+ dvfs_clk_set_rate(clk_cpu_dvfs_node, clk_cpu_dvfs_node->last_set_rate);
+ }
+ }
+ }
+ }
+
+ DVFS_DBG("cur temp: %d, temp_limit_core_rate: %lu\n", temp, clk_cpu_dvfs_node->temp_limit_rate);
+
+ old_temp = temp;
+}
static DECLARE_DELAYED_WORK(dvfs_temp_limit_work, dvfs_temp_limit_work_func);
}
if (clk_dvfs_node->last_set_rate == 0)
- rate = clk_get_rate(clk_dvfs_node->clk);
+ rate = __clk_get_rate(clk_dvfs_node->clk);
else
rate = clk_dvfs_node->last_set_rate;
ret = clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, rate);
}
EXPORT_SYMBOL(dvfs_clk_disable_limit);
+void dvfs_disable_temp_limit(void) {
+ temp_limit_enable = 0;
+ cancel_delayed_work_sync(&dvfs_temp_limit_work);
+}
+
+int dvfs_clk_get_limit(struct dvfs_node *clk_dvfs_node, unsigned int *min_rate, unsigned int *max_rate)
+{
+ int freq_limit_en;
+
+ if (!clk_dvfs_node)
+ return -EINVAL;
+
+ mutex_lock(&clk_dvfs_node->vd->mutex);
+
+ *min_rate = clk_dvfs_node->min_rate;
+ *max_rate = clk_dvfs_node->max_rate;
+ freq_limit_en = clk_dvfs_node->freq_limit_en;
+
+ mutex_unlock(&clk_dvfs_node->vd->mutex);
+
+ return freq_limit_en;
+}
+EXPORT_SYMBOL(dvfs_clk_get_limit);
int dvfs_clk_register_set_rate_callback(struct dvfs_node *clk_dvfs_node, clk_set_rate_callback clk_dvfs_target)
{
int clk_enable_dvfs(struct dvfs_node *clk_dvfs_node)
{
struct cpufreq_frequency_table clk_fv;
+ int volt_new;
+ unsigned int mode;
+ int ret;
+
if (!clk_dvfs_node)
return -EINVAL;
return 0;
}
}
-
+ clk_dvfs_node->enable_count++;
clk_dvfs_node->set_volt = clk_fv.index;
- dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
+ volt_new = dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
DVFS_DBG("%s: %s, freq %u(ref vol %u)\n",
__func__, clk_dvfs_node->name, clk_dvfs_node->set_freq, clk_dvfs_node->set_volt);
#if 0
clk_notifier_register(clk, clk_dvfs_node->dvfs_nb);
}
#endif
- if(clk_dvfs_node->vd->cur_volt < clk_dvfs_node->set_volt) {
- int ret;
- ret = dvfs_regulator_set_voltage_readback(clk_dvfs_node->vd->regulator, clk_dvfs_node->set_volt, clk_dvfs_node->set_volt);
+ if(clk_dvfs_node->vd->cur_volt != volt_new) {
+ ret = dvfs_regulator_set_voltage_readback(clk_dvfs_node->vd->regulator, volt_new, volt_new);
+ dvfs_volt_up_delay(clk_dvfs_node->vd,volt_new, clk_dvfs_node->vd->cur_volt);
if (ret < 0) {
clk_dvfs_node->vd->volt_set_flag = DVFS_SET_VOLT_FAILURE;
clk_dvfs_node->enable_count = 0;
mutex_unlock(&clk_dvfs_node->vd->mutex);
return -EAGAIN;
}
- clk_dvfs_node->vd->cur_volt = clk_dvfs_node->set_volt;
+ clk_dvfs_node->vd->cur_volt = volt_new;
clk_dvfs_node->vd->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
}
- clk_dvfs_node->enable_count++;
} else {
DVFS_DBG("%s: dvfs already enable clk enable = %d!\n",
__func__, clk_dvfs_node->enable_count);
clk_dvfs_node->enable_count++;
}
+ if (clk_dvfs_node->regu_mode_en) {
+ ret = dvfs_regu_mode_table_constrain(clk_dvfs_node);
+ if (ret) {
+ DVFS_ERR("%s: clk(%s) regu_mode_table is unvalid, set regu_mode_en=0!\n",
+ __func__, clk_dvfs_node->name);
+ clk_dvfs_node->regu_mode_en = 0;
+ mutex_unlock(&clk_dvfs_node->vd->mutex);
+ return ret;
+ }
+
+ ret = clk_dvfs_node_get_regu_mode(clk_dvfs_node, clk_dvfs_node->set_freq*1000, &mode);
+ if (ret < 0) {
+ DVFS_ERR("%s: clk(%s) rate %dKhz get regu_mode fail\n",
+ __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq);
+ mutex_unlock(&clk_dvfs_node->vd->mutex);
+ return ret;
+ } else
+ clk_dvfs_node->regu_mode = mode;
+
+ dvfs_update_clk_pds_mode(clk_dvfs_node);
+ }
+
mutex_unlock(&clk_dvfs_node->vd->mutex);
return 0;
int clk_disable_dvfs(struct dvfs_node *clk_dvfs_node)
{
+ int volt_new;
+
if (!clk_dvfs_node)
return -EINVAL;
if (!clk_dvfs_node->enable_count) {
DVFS_WARNING("%s:clk(%s) is already closed!\n",
__func__, __clk_get_name(clk_dvfs_node->clk));
+ mutex_unlock(&clk_dvfs_node->vd->mutex);
return 0;
} else {
clk_dvfs_node->enable_count--;
if (0 == clk_dvfs_node->enable_count) {
DVFS_DBG("%s:dvfs clk(%s) disable dvfs ok!\n",
__func__, __clk_get_name(clk_dvfs_node->clk));
+ volt_new = dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
+ dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
+
#if 0
clk_notifier_unregister(clk, clk_dvfs_node->dvfs_nb);
DVFS_DBG("clk unregister nb!\n");
static unsigned long dvfs_get_limit_rate(struct dvfs_node *clk_dvfs_node, unsigned long rate)
{
- unsigned long limit_rate, temp_limit_rate;
- int temp, i;
+ unsigned long limit_rate;
limit_rate = rate;
- temp_limit_rate = -1;
if (clk_dvfs_node->freq_limit_en) {
//dvfs table limit
if (rate < clk_dvfs_node->min_rate) {
} else if (rate > clk_dvfs_node->max_rate) {
limit_rate = clk_dvfs_node->max_rate;
}
-
- //temp limt
- if (clk_dvfs_node->temp_limit_table) {
- temp = clk_dvfs_node->temp;
- for (i=0; clk_dvfs_node->temp_limit_table[i].frequency != CPUFREQ_TABLE_END; i++) {
- if (temp > clk_dvfs_node->temp_limit_table[i].index) {
- temp_limit_rate = clk_dvfs_node->temp_limit_table[i].frequency;
- }
- }
-
- if (limit_rate > temp_limit_rate) {
- DVFS_DBG("%s: temp(%d) limit clk(%s) rate %ld to %ld\n",
- __func__, temp, clk_dvfs_node->name, limit_rate, temp_limit_rate);
- limit_rate = temp_limit_rate;
+ if (temp_limit_enable) {
+ if (limit_rate > clk_dvfs_node->temp_limit_rate) {
+ limit_rate = clk_dvfs_node->temp_limit_rate;
}
}
}
}
rate = dvfs_get_limit_rate(clk_dvfs_node, rate);
- new_rate = clk_round_rate(clk, rate);
- old_rate = clk_get_rate(clk);
+ new_rate = __clk_round_rate(clk, rate);
+ old_rate = __clk_get_rate(clk);
if (new_rate == old_rate)
return 0;
DVFS_DBG("%s:%s new rate=%lu(was=%lu),new volt=%lu,(was=%d)\n",
__func__, clk_dvfs_node->name, new_rate, old_rate, volt_new,clk_dvfs_node->vd->cur_volt);
+
/* if up the rate */
if (new_rate > old_rate) {
+ ret = dvfs_regu_mode_target(clk_dvfs_node, new_rate);
+ if (ret)
+ DVFS_ERR("%s: dvfs clk(%s) rate %luhz set mode err\n",
+ __func__, clk_dvfs_node->name, new_rate);
+
ret = dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
if (ret)
goto fail_roll_back;
/* scale rate */
if (clk_dvfs_node->clk_dvfs_target) {
- ret = clk_dvfs_node->clk_dvfs_target(clk, new_rate);
+ ret = clk_dvfs_node->clk_dvfs_target(clk, rate);
} else {
- ret = clk_set_rate(clk, new_rate);
+ ret = clk_set_rate(clk, rate);
}
if (ret) {
clk_dvfs_node->set_freq = new_rate / 1000;
DVFS_DBG("%s:dvfs clk(%s) set rate %lu ok\n",
- __func__, clk_dvfs_node->name, clk_get_rate(clk));
+ __func__, clk_dvfs_node->name, __clk_get_rate(clk));
/* if down the rate */
if (new_rate < old_rate) {
ret = dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
if (ret)
goto out;
+
+ ret = dvfs_regu_mode_target(clk_dvfs_node, new_rate);
+ if (ret)
+ DVFS_ERR("%s:dvfs clk(%s) rate %luhz set mode err\n",
+ __func__, clk_dvfs_node->name, new_rate);
}
return 0;
return ret;
}
+unsigned long dvfs_clk_round_rate(struct dvfs_node *clk_dvfs_node, unsigned long rate)
+{
+ return __clk_round_rate(clk_dvfs_node->clk, rate);
+}
+EXPORT_SYMBOL_GPL(dvfs_clk_round_rate);
+
unsigned long dvfs_clk_get_rate(struct dvfs_node *clk_dvfs_node)
{
- return clk_get_rate(clk_dvfs_node->clk);
+ return __clk_get_rate(clk_dvfs_node->clk);
}
EXPORT_SYMBOL_GPL(dvfs_clk_get_rate);
+unsigned long dvfs_clk_get_last_set_rate(struct dvfs_node *clk_dvfs_node)
+{
+ unsigned long last_set_rate;
+
+ mutex_lock(&clk_dvfs_node->vd->mutex);
+ last_set_rate = clk_dvfs_node->last_set_rate;
+ mutex_unlock(&clk_dvfs_node->vd->mutex);
+
+ return last_set_rate;
+}
+EXPORT_SYMBOL_GPL(dvfs_clk_get_last_set_rate);
+
+
int dvfs_clk_enable(struct dvfs_node *clk_dvfs_node)
{
return clk_enable(clk_dvfs_node->clk);
return 0;
}
-static struct cpufreq_frequency_table *of_get_temp_limit_table(struct device_node *dev_node)
+static struct cpufreq_frequency_table *of_get_temp_limit_table(struct device_node *dev_node, const char *propname)
{
struct cpufreq_frequency_table *temp_limt_table = NULL;
const struct property *prop;
const __be32 *val;
int nr, i;
- prop = of_find_property(dev_node, "temp-limit", NULL);
+ prop = of_find_property(dev_node, propname, NULL);
if (!prop)
return NULL;
if (!prop->value)
return PTR_ERR(dvfs_dev_node);
}
+ val = of_get_property(dvfs_dev_node, "target-temp", NULL);
+ if (val) {
+ target_temp = be32_to_cpup(val);
+ }
+
+ val = of_get_property(dvfs_dev_node, "temp-limit-enable", NULL);
+ if (val) {
+ temp_limit_enable = be32_to_cpup(val);
+ }
+
for_each_available_child_of_node(dvfs_dev_node, vd_dev_node) {
vd = kzalloc(sizeof(struct vd_node), GFP_KERNEL);
if (!vd)
dvfs_node->name = clk_dev_node->name;
dvfs_node->pd = pd;
dvfs_node->vd = vd;
- val = of_get_property(clk_dev_node, "temp-channel", NULL);
- if (val) {
- dvfs_node->temp_channel = be32_to_cpup(val);
- dvfs_node->temp_limit_table = of_get_temp_limit_table(clk_dev_node);
- }
+ val = of_get_property(clk_dev_node, "regu-mode-en", NULL);
+ if (val)
+ dvfs_node->regu_mode_en = be32_to_cpup(val);
+ if (dvfs_node->regu_mode_en)
+ dvfs_node->regu_mode_table = of_get_regu_mode_table(clk_dev_node);
+ else
+ dvfs_node->regu_mode_table = NULL;
+
+ if (temp_limit_enable) {
+ val = of_get_property(clk_dev_node, "temp-channel", NULL);
+ if (val) {
+ dvfs_node->temp_channel = be32_to_cpup(val);
+ }
+ dvfs_node->nor_temp_limit_table = of_get_temp_limit_table(clk_dev_node, "normal-temp-limit");
+ dvfs_node->per_temp_limit_table = of_get_temp_limit_table(clk_dev_node, "performance-temp-limit");
+ }
+ dvfs_node->temp_limit_rate = -1;
dvfs_node->dev.of_node = clk_dev_node;
ret = of_init_opp_table(&dvfs_node->dev);
if (ret) {
mutex_lock(&vd->mutex);
printk( "|\n|- voltage domain:%s\n", vd->name);
printk( "|- current voltage:%d\n", vd->cur_volt);
+ printk( "|- current regu_mode:%s\n", dvfs_regu_mode_to_string(vd->regu_mode));
list_for_each_entry(pd, &vd->pd_list, node) {
- printk( "| |\n| |- power domain:%s, status = %s, current volt = %d\n",
- pd->name, (pd->pd_status == 1) ? "ON" : "OFF", pd->cur_volt);
+ printk( "| |\n| |- power domain:%s, status = %s, current volt = %d, current regu_mode = %s\n",
+ pd->name, (pd->pd_status == 1) ? "ON" : "OFF", pd->cur_volt,
+ dvfs_regu_mode_to_string(pd->regu_mode));
list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
printk( "| | |\n| | |- clock: %s current: rate %d, volt = %d,"
" enable_dvfs = %s\n",
clk_dvfs_node->name, clk_dvfs_node->set_freq, clk_dvfs_node->set_volt,
clk_dvfs_node->enable_count == 0 ? "DISABLE" : "ENABLE");
- printk( "| | |- clk limit(%s):[%u, %u]; last set rate = %u\n",
+ printk( "| | |- clk limit(%s):[%u, %u]; last set rate = %lu\n",
clk_dvfs_node->freq_limit_en ? "enable" : "disable",
clk_dvfs_node->min_rate, clk_dvfs_node->max_rate,
clk_dvfs_node->last_set_rate/1000);
-
for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
printk( "| | | |- freq = %d, volt = %d\n",
clk_dvfs_node->dvfs_table[i].frequency,
clk_dvfs_node->dvfs_table[i].index);
}
+ printk( "| | |- clock: %s current: rate %d, regu_mode = %s,"
+ " regu_mode_en = %d\n",
+ clk_dvfs_node->name, clk_dvfs_node->set_freq,
+ dvfs_regu_mode_to_string(clk_dvfs_node->regu_mode),
+ clk_dvfs_node->regu_mode_en);
+ if (clk_dvfs_node->regu_mode_table) {
+ for (i = 0; (clk_dvfs_node->regu_mode_table[i].frequency != CPUFREQ_TABLE_END); i++) {
+ printk( "| | | |- freq = %d, regu_mode = %s\n",
+ clk_dvfs_node->regu_mode_table[i].frequency/1000,
+ dvfs_regu_mode_to_string(clk_dvfs_node->regu_mode_table[i].index));
+ }
+ }
}
}
mutex_unlock(&vd->mutex);
}
}
- dvfs_wq = alloc_workqueue("dvfs", WQ_NON_REENTRANT | WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 1);
- queue_delayed_work_on(0, dvfs_wq, &dvfs_temp_limit_work, 0*HZ);
+ if (temp_limit_enable) {
+ clk_cpu_dvfs_node = clk_get_dvfs_node("clk_core");
+ if (!clk_cpu_dvfs_node){
+ return -EINVAL;
+ }
+
+ clk_cpu_dvfs_node->temp_limit_rate = clk_cpu_dvfs_node->max_rate;
+ dvfs_wq = alloc_workqueue("dvfs", WQ_NON_REENTRANT | WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 1);
+ queue_delayed_work_on(0, dvfs_wq, &dvfs_temp_limit_work, 0*HZ);
+ }
+
+ vdd_gpu_regulator = dvfs_get_regulator("vdd_gpu");
+ if (!IS_ERR_OR_NULL(vdd_gpu_regulator)) {
+ struct clk *clk = clk_get(NULL, "pd_gpu");
+
+ if (clk)
+ rk_clk_pd_notifier_register(clk, &clk_pd_gpu_notifier);
+
+ fb_register_client(&early_suspend_notifier);
+ register_reboot_notifier(&vdd_gpu_reboot_notifier);
+ }
return ret;
}