1 #define pr_fmt(fmt) "ddrfreq: " fmt
5 #include <linux/cpufreq.h>
6 #include <linux/delay.h>
7 #include <linux/freezer.h>
9 #include <linux/kthread.h>
10 #include <linux/miscdevice.h>
11 #include <linux/module.h>
12 #include <linux/reboot.h>
13 #include <linux/slab.h>
14 #include <linux/uaccess.h>
15 #include <linux/sched/rt.h>
18 #include <linux/input.h>
19 #include <asm/cacheflush.h>
20 #include <asm/tlbflush.h>
21 #include <linux/vmalloc.h>
22 #include <linux/rockchip/common.h>
23 #include <linux/rockchip/dvfs.h>
24 #include <dt-bindings/clock/ddr.h>
26 #include <linux/rockchip/grf.h>
27 #include <linux/rockchip/iomap.h>
28 static struct dvfs_node *clk_cpu_dvfs_node = NULL;
29 static int ddr_boost = 0;
34 DEBUG_VIDEO_STATE = 1U << 1,
35 DEBUG_SUSPEND = 1U << 2,
36 DEBUG_VERBOSE = 1U << 3,
38 static int debug_mask = DEBUG_DDR;
40 module_param(debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
41 #define dprintk(mask, fmt, ...) do { if (mask & debug_mask) pr_info(fmt, ##__VA_ARGS__); } while (0)
43 #define MHZ (1000*1000)
47 struct dvfs_node *clk_dvfs_node;
48 unsigned long normal_rate;
49 unsigned long video_rate;
50 unsigned long dualview_rate;
51 unsigned long idle_rate;
52 unsigned long suspend_rate;
53 unsigned long reboot_rate;
55 bool auto_self_refresh;
57 unsigned long sys_status;
58 struct task_struct *task;
59 wait_queue_head_t wait;
61 static struct ddr ddr;
63 module_param_named(sys_status, ddr.sys_status, ulong, S_IRUGO);
64 module_param_named(auto_self_refresh, ddr.auto_self_refresh, bool, S_IRUGO);
65 module_param_named(mode, ddr.mode, charp, S_IRUGO);
67 static noinline void ddrfreq_set_sys_status(int status)
69 ddr.sys_status |= status;
73 static noinline void ddrfreq_clear_sys_status(int status)
75 ddr.sys_status &= ~status;
79 static void ddrfreq_mode(bool auto_self_refresh, unsigned long *target_rate, char *name)
82 if (auto_self_refresh != ddr.auto_self_refresh) {
83 ddr_set_auto_self_refresh(auto_self_refresh);
84 ddr.auto_self_refresh = auto_self_refresh;
85 dprintk(DEBUG_DDR, "change auto self refresh to %d when %s\n", auto_self_refresh, name);
87 if (*target_rate != dvfs_clk_get_rate(ddr.clk_dvfs_node)) {
88 dvfs_clk_enable_limit(clk_cpu_dvfs_node, 600000000, -1);
89 if (dvfs_clk_set_rate(ddr.clk_dvfs_node, *target_rate) == 0) {
90 *target_rate = dvfs_clk_get_rate(ddr.clk_dvfs_node);
91 dprintk(DEBUG_DDR, "change freq to %lu MHz when %s\n", *target_rate / MHZ, name);
93 dvfs_clk_enable_limit(clk_cpu_dvfs_node, 0, -1);
97 static void ddr_freq_input_event(struct input_handle *handle, unsigned int type,
98 unsigned int code, int value)
104 static int ddr_freq_input_connect(struct input_handler *handler,
105 struct input_dev *dev, const struct input_device_id *id)
107 struct input_handle *handle;
110 handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
115 handle->handler = handler;
116 handle->name = "ddr_freq";
118 error = input_register_handle(handle);
122 error = input_open_device(handle);
128 input_unregister_handle(handle);
134 static void ddr_freq_input_disconnect(struct input_handle *handle)
136 input_close_device(handle);
137 input_unregister_handle(handle);
141 static const struct input_device_id ddr_freq_ids[] = {
144 .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
145 INPUT_DEVICE_ID_MATCH_ABSBIT,
146 .evbit = { BIT_MASK(EV_ABS) },
147 .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
148 BIT_MASK(ABS_MT_POSITION_X) |
149 BIT_MASK(ABS_MT_POSITION_Y) },
153 .flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
154 INPUT_DEVICE_ID_MATCH_ABSBIT,
155 .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
156 .absbit = { [BIT_WORD(ABS_X)] =
157 BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
161 .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
162 .evbit = { BIT_MASK(EV_KEY) },
167 static struct input_handler ddr_freq_input_handler = {
168 .event = ddr_freq_input_event,
169 .connect = ddr_freq_input_connect,
170 .disconnect = ddr_freq_input_disconnect,
172 .id_table = ddr_freq_ids,
175 enum ddr_bandwidth_id{
186 #define DDR_BOOST_HOLD_MS 300
187 #define HIGH_LOAD_HOLD_MS 300
188 #define HIGH_LOAD_DELAY_MS 0
189 #define LOW_LOAD_DELAY_MS 200
190 #define DDR_BOOST_HOLD (DDR_BOOST_HOLD_MS/ddrbw_work_delay_ms)
191 #define HIGH_LOAD_HOLD (DDR_BOOST_HOLD_MS/ddrbw_work_delay_ms)
192 #define HIGH_LOAD_DELAY (HIGH_LOAD_DELAY_MS/ddrbw_work_delay_ms)
193 #define LOW_LOAD_DELAY (LOW_LOAD_DELAY_MS/ddrbw_work_delay_ms)
194 #define DDR_RATE_NORMAL 240000000
195 #define DDR_RATE_BOOST 324000000
196 #define DDR_RATE_HIGH_LOAD 533000000
197 #define DDR_RATE_1080P 240000000
198 #define DDR_RATE_4K 300000000
199 #define HIGH_LOAD_NORMAL 70
200 #define HGIH_LOAD_VIDEO 50
202 static struct workqueue_struct *ddr_freq_wq;
203 static u32 high_load = HIGH_LOAD_NORMAL;
204 static u32 ddrbw_work_delay_ms = 20;
205 static u32 ddr_rate_normal = DDR_RATE_NORMAL;
206 static u32 ddr_rate_boost = DDR_RATE_BOOST;
207 static u32 ddr_rate_high_load = DDR_RATE_HIGH_LOAD;
210 //#define ddr_monitor_start() grf_writel(0xc000c000,RK3288_GRF_SOC_CON4)
211 #define ddr_monitor_start() grf_writel((((readl_relaxed(RK_PMU_VIRT + 0x9c)>>13)&7)==3)?0xc000c000:0xe000e000,RK3288_GRF_SOC_CON4)
212 #define ddr_monitor_stop() grf_writel(0xc0000000,RK3288_GRF_SOC_CON4)
214 #define grf_readl(offset) readl_relaxed(RK_GRF_VIRT + offset)
215 #define grf_writel(v, offset) do { writel_relaxed(v, RK_GRF_VIRT + offset); dsb(); } while (0)
218 void ddr_bandwidth_get(u32 *ch0_eff, u32 *ch1_eff)
220 u32 ddr_bw_val[2][ddrbw_id_end];
224 for(j = 0; j < 2; j++) {
225 for(i = 0; i < ddrbw_eff; i++ ){
226 ddr_bw_val[j][i] = grf_readl(RK3288_GRF_SOC_STATUS11+i*4+j*16);
230 temp64 = ((u64)ddr_bw_val[0][0]+ddr_bw_val[0][1])*4*100;
231 do_div(temp64, ddr_bw_val[0][ddrbw_time_num]);
232 ddr_bw_val[0][ddrbw_eff] = temp64;
235 temp64 = ((u64)ddr_bw_val[1][0]+ddr_bw_val[1][1])*4*100;
236 do_div(temp64, ddr_bw_val[1][ddrbw_time_num]);
237 ddr_bw_val[1][ddrbw_eff] = temp64;
242 static void ddrbw_work_fn(struct work_struct *work)
245 u32 ch0_eff, ch1_eff;
246 static u32 ddr_boost_hold=0, high_load_hold=0;
247 static u32 high_load_delay = 0, low_load_delay = 0;
250 ddr_bandwidth_get(&ch0_eff, &ch1_eff);
254 //dvfs_clk_set_rate(ddr.clk_dvfs_node, DDR_BOOST_RATE);
255 if (!high_load_hold && !low_load_delay) {
256 rate = ddr_rate_boost;
257 ddrfreq_mode(false, &rate, "boost");
258 ddr_boost_hold = DDR_BOOST_HOLD;
260 } else if(!ddr_boost_hold && ((ch0_eff>high_load)||(ch1_eff>high_load))){
261 low_load_delay = LOW_LOAD_DELAY;
262 if (!high_load_delay) {
263 //dvfs_clk_set_rate(ddr.clk_dvfs_node, HIGH_LOAD_RATE);
264 rate = ddr_rate_high_load;
265 ddrfreq_mode(false, &rate, "high load");
266 high_load_hold = HIGH_LOAD_HOLD;
271 if (ddr_boost_hold) {
273 } else if (high_load_hold) {
276 high_load_delay = HIGH_LOAD_DELAY;
277 //dvfs_clk_set_rate(ddr.clk_dvfs_node, DDR_NORMAL_RATE);
278 if (!low_load_delay) {
279 rate = ddr_rate_normal;
280 ddrfreq_mode(false, &rate, "normal");
289 queue_delayed_work_on(0, ddr_freq_wq, to_delayed_work(work), HZ*ddrbw_work_delay_ms/1000);
292 static DECLARE_DELAYED_WORK(ddrbw_work, ddrbw_work_fn);
294 static noinline void ddrfreq_work(unsigned long sys_status)
296 static struct clk *cpu = NULL;
297 static struct clk *gpu = NULL;
298 unsigned long s = sys_status;
301 cpu = clk_get(NULL, "cpu");
303 gpu = clk_get(NULL, "gpu");
305 dprintk(DEBUG_VERBOSE, "sys_status %02lx\n", sys_status);
308 cancel_delayed_work_sync(&ddrbw_work);
310 if (ddr.reboot_rate && (s & SYS_STATUS_REBOOT)) {
311 ddrfreq_mode(false, &ddr.reboot_rate, "shutdown/reboot");
312 } else if (ddr.suspend_rate && (s & SYS_STATUS_SUSPEND)) {
313 ddrfreq_mode(true, &ddr.suspend_rate, "suspend");
314 } else if (ddr.dualview_rate &&
315 (s & SYS_STATUS_LCDC0) && (s & SYS_STATUS_LCDC1)) {
316 ddrfreq_mode(false, &ddr.dualview_rate, "dual-view");
317 } else if (ddr.video_rate &&
318 ((s & SYS_STATUS_VIDEO_720P)||(s & SYS_STATUS_VIDEO_1080P))) {
319 ddrfreq_mode(false, &ddr.video_rate, "video");
320 } else if (ddr.idle_rate
321 && !(s & SYS_STATUS_GPU)
322 && !(s & SYS_STATUS_RGA)
323 && !(s & SYS_STATUS_CIF0)
324 && !(s & SYS_STATUS_CIF1)
325 && (clk_get_rate(cpu) < 816 * MHZ)
326 && (clk_get_rate(gpu) <= 200 * MHZ)
328 ddrfreq_mode(false, &ddr.idle_rate, "idle");
331 queue_delayed_work_on(0, ddr_freq_wq, &ddrbw_work, 0);
333 ddrfreq_mode(false, &ddr.normal_rate, "normal");
337 static int ddrfreq_task(void *data)
342 unsigned long status = ddr.sys_status;
343 ddrfreq_work(status);
344 wait_event_freezable(ddr.wait, (status != ddr.sys_status) || kthread_should_stop());
345 } while (!kthread_should_stop());
350 static int video_state_release(struct inode *inode, struct file *file)
352 dprintk(DEBUG_VIDEO_STATE, "video_state release\n");
353 ddrfreq_clear_sys_status(SYS_STATUS_VIDEO);
357 #define VIDEO_LOW_RESOLUTION (1080*720)
358 static ssize_t video_state_write(struct file *file, const char __user *buffer,
359 size_t count, loff_t *ppos)
364 char *buf = vzalloc(count);
365 uint32_t v_width=0,v_height=0,v_sync=0;
376 if (copy_from_user(cookie_pot, buffer, count)) {
381 dprintk(DEBUG_VIDEO_STATE, "video_state write %s,len %d\n", cookie_pot,count);
384 if( (count>=3) && (cookie_pot[2]=='w') )
386 strsep(&cookie_pot,",");
387 strsep(&cookie_pot,"=");
388 p=strsep(&cookie_pot,",");
389 v_width = simple_strtol(p,NULL,10);
390 strsep(&cookie_pot,"=");
391 p=strsep(&cookie_pot,",");
392 v_height= simple_strtol(p,NULL,10);
393 strsep(&cookie_pot,"=");
394 p=strsep(&cookie_pot,",");
395 v_sync= simple_strtol(p,NULL,10);
396 dprintk(DEBUG_VIDEO_STATE, "video_state %c,width=%d,height=%d,sync=%d\n", state,v_width,v_height,v_sync);
401 high_load = HIGH_LOAD_NORMAL;
402 ddr_rate_normal = DDR_RATE_NORMAL;
403 ddr_rate_high_load = DDR_RATE_HIGH_LOAD;
405 ddrfreq_clear_sys_status(SYS_STATUS_VIDEO);
408 high_load = HGIH_LOAD_VIDEO;
409 ddr_rate_normal = DDR_RATE_1080P;
410 ddr_rate_high_load = DDR_RATE_4K;
411 if( (v_width == 0) && (v_height == 0)){
413 ddrfreq_set_sys_status(SYS_STATUS_VIDEO_1080P);
416 //if(ddr.video_low_rate && ((v_width*v_height) <= VIDEO_LOW_RESOLUTION) )
417 // ddrfreq_set_sys_status(SYS_STATUS_VIDEO_720P);
420 ddrfreq_set_sys_status(SYS_STATUS_VIDEO_1080P);
424 ddrfreq_clear_sys_status(SYS_STATUS_VIDEO);
436 static const struct file_operations video_state_fops = {
437 .owner = THIS_MODULE,
438 .release= video_state_release,
439 .write = video_state_write,
442 static struct miscdevice video_state_dev = {
443 .fops = &video_state_fops,
444 .name = "video_state",
445 .minor = MISC_DYNAMIC_MINOR,
448 static int ddrfreq_clk_event(int status, unsigned long event)
451 case PRE_RATE_CHANGE:
452 ddrfreq_set_sys_status(status);
454 case POST_RATE_CHANGE:
455 case ABORT_RATE_CHANGE:
456 ddrfreq_clear_sys_status(status);
462 #define CLK_NOTIFIER(name, status) \
463 static int ddrfreq_clk_##name##_event(struct notifier_block *this, unsigned long event, void *ptr) \
465 return ddrfreq_clk_event(SYS_STATUS_##status, event); \
467 static struct notifier_block ddrfreq_clk_##name##_notifier = { .notifier_call = ddrfreq_clk_##name##_event };
469 #define REGISTER_CLK_NOTIFIER(name) \
471 struct clk *clk = clk_get(NULL, #name); \
472 clk_notifier_register(clk, &ddrfreq_clk_##name##_notifier); \
476 #define UNREGISTER_CLK_NOTIFIER(name) \
478 struct clk *clk = clk_get(NULL, #name); \
479 clk_notifier_unregister(clk, &ddrfreq_clk_##name##_notifier); \
483 CLK_NOTIFIER(pd_gpu, GPU);
484 CLK_NOTIFIER(pd_rga, RGA);
485 CLK_NOTIFIER(pd_cif0, CIF0);
486 CLK_NOTIFIER(pd_cif1, CIF1);
487 CLK_NOTIFIER(pd_lcdc0, LCDC0);
488 CLK_NOTIFIER(pd_lcdc1, LCDC1);
490 static int ddrfreq_reboot_notifier_event(struct notifier_block *this, unsigned long event, void *ptr)
492 u32 timeout = 1000; // 10s
493 ddrfreq_set_sys_status(SYS_STATUS_REBOOT);
494 while (dvfs_clk_get_rate(ddr.clk_dvfs_node) != ddr.reboot_rate && --timeout) {
498 pr_err("failed to set ddr clk from %luMHz to %luMHz when shutdown/reboot\n", dvfs_clk_get_rate(ddr.clk_dvfs_node) / MHZ, ddr.reboot_rate / MHZ);
503 static struct notifier_block ddrfreq_reboot_notifier = {
504 .notifier_call = ddrfreq_reboot_notifier_event,
507 int of_init_ddr_freq_table(void)
509 struct device_node *clk_ddr_dev_node;
510 const struct property *prop;
514 clk_ddr_dev_node = of_find_node_by_name(NULL, "clk_ddr");
515 if (IS_ERR_OR_NULL(clk_ddr_dev_node)) {
516 pr_err("%s: get clk ddr dev node err\n", __func__);
517 return PTR_ERR(clk_ddr_dev_node);
520 prop = of_find_property(clk_ddr_dev_node, "auto_freq", NULL);
521 if (prop && prop->value)
522 ddr.auto_freq = be32_to_cpup(prop->value);
524 prop = of_find_property(clk_ddr_dev_node, "freq_table", NULL);
530 nr = prop->length / sizeof(u32);
532 pr_err("%s: Invalid freq list\n", __func__);
538 unsigned long status = be32_to_cpup(val++);
539 unsigned long rate = be32_to_cpup(val++) * 1000;
541 if (status & SYS_STATUS_NORMAL)
542 ddr.normal_rate = rate;
543 if (status & SYS_STATUS_SUSPEND)
544 ddr.suspend_rate = rate;
545 if ((status & SYS_STATUS_VIDEO_720P)||(status & SYS_STATUS_VIDEO_720P))
546 ddr.video_rate = rate;
547 if ((status & SYS_STATUS_LCDC0)&&(status & SYS_STATUS_LCDC1))
548 ddr.dualview_rate = rate;
549 if (status & SYS_STATUS_IDLE)
551 if (status & SYS_STATUS_REBOOT)
552 ddr.reboot_rate= rate;
559 #if 0//defined(CONFIG_RK_PM_TESTS)
560 static void ddrfreq_tst_init(void);
563 static int ddr_freq_suspend_notifier_call(struct notifier_block *self,
564 unsigned long action, void *data)
566 struct fb_event *event = data;
567 int blank_mode = *((int *)event->data);
569 if (action == FB_EARLY_EVENT_BLANK) {
570 switch (blank_mode) {
571 case FB_BLANK_UNBLANK:
572 ddrfreq_clear_sys_status(SYS_STATUS_SUSPEND);
578 else if (action == FB_EVENT_BLANK) {
579 switch (blank_mode) {
580 case FB_BLANK_POWERDOWN:
581 ddrfreq_set_sys_status(SYS_STATUS_SUSPEND);
591 static struct notifier_block ddr_freq_suspend_notifier = {
592 .notifier_call = ddr_freq_suspend_notifier_call,
598 static int ddrfreq_init(void)
600 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
602 #if 0//defined(CONFIG_RK_PM_TESTS)
605 clk_cpu_dvfs_node = clk_get_dvfs_node("clk_core");
606 if (!clk_cpu_dvfs_node){
610 memset(&ddr, 0x00, sizeof(ddr));
611 ddr.clk_dvfs_node = clk_get_dvfs_node("clk_ddr");
612 if (!ddr.clk_dvfs_node){
616 clk_enable_dvfs(ddr.clk_dvfs_node);
618 ddr_freq_wq = alloc_workqueue("ddr_freq", WQ_NON_REENTRANT | WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 1);
620 init_waitqueue_head(&ddr.wait);
623 ddr.normal_rate = dvfs_clk_get_rate(ddr.clk_dvfs_node);
624 ddr.suspend_rate = ddr.normal_rate;
625 ddr.reboot_rate = ddr.normal_rate;
627 of_init_ddr_freq_table();
629 ret = input_register_handler(&ddr_freq_input_handler);
631 ddr.auto_freq = false;
634 //REGISTER_CLK_NOTIFIER(pd_gpu);
635 //REGISTER_CLK_NOTIFIER(pd_rga);
636 //REGISTER_CLK_NOTIFIER(pd_cif0);
637 //REGISTER_CLK_NOTIFIER(pd_cif1);
640 if (ddr.dualview_rate) {
641 //REGISTER_CLK_NOTIFIER(pd_lcdc0);
642 //REGISTER_CLK_NOTIFIER(pd_lcdc1);
645 ret = misc_register(&video_state_dev);
647 pr_err("failed to register video_state misc device! error %d\n", ret);
652 ddr.task = kthread_create(ddrfreq_task, NULL, "ddrfreqd");
653 if (IS_ERR(ddr.task)) {
654 ret = PTR_ERR(ddr.task);
655 pr_err("failed to create kthread! error %d\n", ret);
659 sched_setscheduler_nocheck(ddr.task, SCHED_FIFO, ¶m);
660 get_task_struct(ddr.task);
661 kthread_bind(ddr.task, 0);
662 wake_up_process(ddr.task);
664 fb_register_client(&ddr_freq_suspend_notifier);
665 register_reboot_notifier(&ddrfreq_reboot_notifier);
667 pr_info("verion 1.0 20140228\n");
668 dprintk(DEBUG_DDR, "normal %luMHz video %luMHz dualview %luMHz idle %luMHz suspend %luMHz reboot %luMHz\n",
669 ddr.normal_rate / MHZ, ddr.video_rate / MHZ, ddr.dualview_rate / MHZ, ddr.idle_rate / MHZ, ddr.suspend_rate / MHZ, ddr.reboot_rate / MHZ);
674 misc_deregister(&video_state_dev);
677 //UNREGISTER_CLK_NOTIFIER(pd_gpu);
678 //UNREGISTER_CLK_NOTIFIER(pd_rga);
679 //UNREGISTER_CLK_NOTIFIER(pd_cif0);
680 //UNREGISTER_CLK_NOTIFIER(pd_cif1);
682 if (ddr.dualview_rate) {
683 //UNREGISTER_CLK_NOTIFIER(pd_lcdc0);
684 //UNREGISTER_CLK_NOTIFIER(pd_lcdc1);
689 late_initcall(ddrfreq_init);
691 /****************************ddr bandwith tst************************************/
692 #if 0//defined(CONFIG_RK_PM_TESTS)
694 #define USE_NORMAL_TIME
696 #ifdef USE_NORMAL_TIME
697 static struct timer_list ddrbw_timer;
699 static struct hrtimer ddrbw_hrtimer;
701 enum ddr_bandwidth_id{
709 #define grf_readl(offset) readl_relaxed(RK_GRF_VIRT + offset)
710 #define grf_writel(v, offset) do { writel_relaxed(v, RK_GRF_VIRT + offset); dsb(); } while (0)
712 static u32 ddr_bw_show_st=0;
714 #define ddr_monitor_start() grf_writel(0xc000c000,RK3288_GRF_SOC_CON4)
715 #define ddr_monitor_end() grf_writel(0xc0000000,RK3288_GRF_SOC_CON4)
717 static ssize_t ddrbw_dyn_show(struct kobject *kobj, struct kobj_attribute *attr,
724 static ssize_t ddrbw_dyn_store(struct kobject *kobj, struct kobj_attribute *attr,
725 const char *buf, size_t n)
729 if((strncmp(buf, "start", strlen("start")) == 0)) {
733 #ifdef USE_NORMAL_TIME
734 mod_timer(&ddrbw_timer, jiffies + msecs_to_jiffies(500));
736 hrtimer_start(&ddrbw_hrtimer, ktime_set(0, 5 * 1000 * 1000*1000), HRTIMER_MODE_REL);
739 } else if((strncmp(buf, "stop", strlen("stop")) == 0)) {
747 static void ddr_bandwidth_get(void)
749 u32 ddr_bw_val[2][ddrbw_id_end];
755 for(i=0;i<ddrbw_eff;i++)
757 ddr_bw_val[j][i]=grf_readl(RK3288_GRF_SOC_STATUS11+i*4+j*16);
759 ddr_monitor_end();//stop
762 temp64=((u64)ddr_bw_val[0][0]+ddr_bw_val[0][1])*8*100;
764 // printk("ch0 %llu\n",temp64);
766 do_div(temp64,ddr_bw_val[0][ddrbw_time_num]);
767 ddr_bw_val[0][ddrbw_eff]= temp64;
768 temp64=((u64)ddr_bw_val[1][0]+ddr_bw_val[1][1])*8*100;
770 //printk("ch1 %llu\n",temp64);
772 do_div(temp64,ddr_bw_val[1][ddrbw_time_num]);
773 ddr_bw_val[1][ddrbw_eff]= temp64;
775 printk("ddrch0,wr,rd,act,time,percent(%x,%x,%x,%x,%d)\n",
776 ddr_bw_val[0][0],ddr_bw_val[0][1],ddr_bw_val[0][2],ddr_bw_val[0][3],ddr_bw_val[0][4]);
777 printk("ddrch1,wr,rd,act,time,percent(%x,%x,%x,%x,%d)\n",
778 ddr_bw_val[1][0],ddr_bw_val[1][1],ddr_bw_val[1][2],ddr_bw_val[1][3],ddr_bw_val[1][4]);
782 #ifdef USE_NORMAL_TIME
783 static void ddrbw_timer_fn(unsigned long data)
789 mod_timer(&ddrbw_timer, jiffies + msecs_to_jiffies(500));
793 struct hrtimer ddrbw_hrtimer;
794 static enum hrtimer_restart ddrbw_hrtimer_timer_func(struct hrtimer *timer)
799 hrtimer_start(timer, ktime_set(0, 1 * 1000 * 1000), HRTIMER_MODE_REL);
804 struct ddrfreq_attribute {
805 struct attribute attr;
806 ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
808 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
809 const char *buf, size_t n);
812 static struct ddrfreq_attribute ddrfreq_attrs[] = {
813 /* node_name permision show_func store_func */
814 __ATTR(ddrbw, S_IRUSR | S_IRGRP | S_IWUSR,ddrbw_dyn_show, ddrbw_dyn_store),
816 int rk_pm_tests_kobj_atrradd(const struct attribute *attr);
818 static void ddrfreq_tst_init(void)
821 #ifdef USE_NORMAL_TIME
822 init_timer(&ddrbw_timer);
823 //ddrbw_timer.expires = jiffies+msecs_to_jiffies(1);
824 ddrbw_timer.function = ddrbw_timer_fn;
825 //mod_timer(&ddrbw_timer,jiffies+msecs_to_jiffies(1));
827 hrtimer_init(&ddrbw_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
828 ddrbw_hrtimer.function = ddrbw_hrtimer_timer_func;
829 //hrtimer_start(&ddrbw_hrtimer,ktime_set(0, 5*1000*1000),HRTIMER_MODE_REL);
831 printk("*****%s*****\n",__FUNCTION__);
833 ret = rk_pm_tests_kobj_atrradd(&ddrfreq_attrs[0].attr);
835 printk("create ddrfreq sysfs node error\n");