Merge branch 'linux-linaro-lsk' into linux-linaro-lsk-android
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / cpufreq_stats.c
1 /*
2  *  drivers/cpufreq/cpufreq_stats.c
3  *
4  *  Copyright (C) 2003-2004 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
5  *  (C) 2004 Zou Nan hai <nanhai.zou@intel.com>.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/cpu.h>
15 #include <linux/sysfs.h>
16 #include <linux/cpufreq.h>
17 #include <linux/module.h>
18 #include <linux/jiffies.h>
19 #include <linux/percpu.h>
20 #include <linux/kobject.h>
21 #include <linux/spinlock.h>
22 #include <linux/notifier.h>
23 #include <linux/sort.h>
24 #include <linux/err.h>
25 #include <asm/cputime.h>
26 #ifdef CONFIG_BL_SWITCHER
27 #include <asm/bL_switcher.h>
28 #endif
29
30 static spinlock_t cpufreq_stats_lock;
31
32 struct cpufreq_stats {
33         unsigned int cpu;
34         unsigned int total_trans;
35         unsigned long long  last_time;
36         unsigned int max_state;
37         unsigned int state_num;
38         unsigned int last_index;
39         u64 *time_in_state;
40         unsigned int *freq_table;
41 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
42         unsigned int *trans_table;
43 #endif
44 };
45
46 struct all_cpufreq_stats {
47         unsigned int state_num;
48         cputime64_t *time_in_state;
49         unsigned int *freq_table;
50 };
51
52 struct all_freq_table {
53         unsigned int *freq_table;
54         unsigned int table_size;
55 };
56
57 static struct all_freq_table *all_freq_table;
58
59 static DEFINE_PER_CPU(struct all_cpufreq_stats *, all_cpufreq_stats);
60 static DEFINE_PER_CPU(struct cpufreq_stats *, cpufreq_stats_table);
61
62 struct cpufreq_stats_attribute {
63         struct attribute attr;
64         ssize_t(*show) (struct cpufreq_stats *, char *);
65 };
66
67 static int cpufreq_stats_update(unsigned int cpu)
68 {
69         struct cpufreq_stats *stat;
70         struct all_cpufreq_stats *all_stat;
71         unsigned long long cur_time;
72
73         cur_time = get_jiffies_64();
74         spin_lock(&cpufreq_stats_lock);
75         stat = per_cpu(cpufreq_stats_table, cpu);
76         all_stat = per_cpu(all_cpufreq_stats, cpu);
77         if (!stat) {
78                 spin_unlock(&cpufreq_stats_lock);
79                 return 0;
80         }
81         if (stat->time_in_state) {
82                 stat->time_in_state[stat->last_index] +=
83                         cur_time - stat->last_time;
84                 if (all_stat)
85                         all_stat->time_in_state[stat->last_index] +=
86                                         cur_time - stat->last_time;
87         }
88         stat->last_time = cur_time;
89         spin_unlock(&cpufreq_stats_lock);
90         return 0;
91 }
92
93 static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
94 {
95         struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
96         if (!stat)
97                 return 0;
98         return sprintf(buf, "%d\n",
99                         per_cpu(cpufreq_stats_table, stat->cpu)->total_trans);
100 }
101
102 static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
103 {
104         ssize_t len = 0;
105         int i;
106         struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
107         if (!stat)
108                 return 0;
109         cpufreq_stats_update(stat->cpu);
110         for (i = 0; i < stat->state_num; i++) {
111                 len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i],
112                         (unsigned long long)
113                         jiffies_64_to_clock_t(stat->time_in_state[i]));
114         }
115         return len;
116 }
117
118 static int get_index_all_cpufreq_stat(struct all_cpufreq_stats *all_stat,
119                 unsigned int freq)
120 {
121         int i;
122         if (!all_stat)
123                 return -1;
124         for (i = 0; i < all_stat->state_num; i++) {
125                 if (all_stat->freq_table[i] == freq)
126                         return i;
127         }
128         return -1;
129 }
130
131 static ssize_t show_all_time_in_state(struct kobject *kobj,
132                 struct kobj_attribute *attr, char *buf)
133 {
134         ssize_t len = 0;
135         unsigned int i, cpu, freq, index;
136         struct all_cpufreq_stats *all_stat;
137         struct cpufreq_policy *policy;
138
139         len += scnprintf(buf + len, PAGE_SIZE - len, "freq\t\t");
140         for_each_possible_cpu(cpu) {
141                 len += scnprintf(buf + len, PAGE_SIZE - len, "cpu%d\t\t", cpu);
142                 if (cpu_online(cpu))
143                         cpufreq_stats_update(cpu);
144         }
145
146         if (!all_freq_table)
147                 goto out;
148         for (i = 0; i < all_freq_table->table_size; i++) {
149                 freq = all_freq_table->freq_table[i];
150                 len += scnprintf(buf + len, PAGE_SIZE - len, "\n%u\t\t", freq);
151                 for_each_possible_cpu(cpu) {
152                         policy = cpufreq_cpu_get(cpu);
153                         if (policy == NULL)
154                                 continue;
155                         all_stat = per_cpu(all_cpufreq_stats, policy->cpu);
156                         index = get_index_all_cpufreq_stat(all_stat, freq);
157                         if (index != -1) {
158                                 len += scnprintf(buf + len, PAGE_SIZE - len,
159                                         "%llu\t\t", (unsigned long long)
160                                         cputime64_to_clock_t(all_stat->time_in_state[index]));
161                         } else {
162                                 len += scnprintf(buf + len, PAGE_SIZE - len,
163                                                 "N/A\t\t");
164                         }
165                         cpufreq_cpu_put(policy);
166                 }
167         }
168
169 out:
170         len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
171         return len;
172 }
173
174 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
175 static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
176 {
177         ssize_t len = 0;
178         int i, j;
179
180         struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
181         if (!stat)
182                 return 0;
183         cpufreq_stats_update(stat->cpu);
184         len += snprintf(buf + len, PAGE_SIZE - len, "   From  :    To\n");
185         len += snprintf(buf + len, PAGE_SIZE - len, "         : ");
186         for (i = 0; i < stat->state_num; i++) {
187                 if (len >= PAGE_SIZE)
188                         break;
189                 len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
190                                 stat->freq_table[i]);
191         }
192         if (len >= PAGE_SIZE)
193                 return PAGE_SIZE;
194
195         len += snprintf(buf + len, PAGE_SIZE - len, "\n");
196
197         for (i = 0; i < stat->state_num; i++) {
198                 if (len >= PAGE_SIZE)
199                         break;
200
201                 len += snprintf(buf + len, PAGE_SIZE - len, "%9u: ",
202                                 stat->freq_table[i]);
203
204                 for (j = 0; j < stat->state_num; j++)   {
205                         if (len >= PAGE_SIZE)
206                                 break;
207                         len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
208                                         stat->trans_table[i*stat->max_state+j]);
209                 }
210                 if (len >= PAGE_SIZE)
211                         break;
212                 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
213         }
214         if (len >= PAGE_SIZE)
215                 return PAGE_SIZE;
216         return len;
217 }
218 cpufreq_freq_attr_ro(trans_table);
219 #endif
220
221 cpufreq_freq_attr_ro(total_trans);
222 cpufreq_freq_attr_ro(time_in_state);
223
224 static struct attribute *default_attrs[] = {
225         &total_trans.attr,
226         &time_in_state.attr,
227 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
228         &trans_table.attr,
229 #endif
230         NULL
231 };
232 static struct attribute_group stats_attr_group = {
233         .attrs = default_attrs,
234         .name = "stats"
235 };
236
237 static struct kobj_attribute _attr_all_time_in_state = __ATTR(all_time_in_state,
238                 0444, show_all_time_in_state, NULL);
239
240 static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
241 {
242         int index;
243         for (index = 0; index < stat->max_state; index++)
244                 if (stat->freq_table[index] == freq)
245                         return index;
246         return -1;
247 }
248
249 /* should be called late in the CPU removal sequence so that the stats
250  * memory is still available in case someone tries to use it.
251  */
252 static void cpufreq_stats_free_table(unsigned int cpu)
253 {
254         struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
255
256         if (stat) {
257                 pr_debug("%s: Free stat table\n", __func__);
258                 kfree(stat->time_in_state);
259                 kfree(stat);
260                 per_cpu(cpufreq_stats_table, cpu) = NULL;
261         }
262 }
263
264 /* must be called early in the CPU removal sequence (before
265  * cpufreq_remove_dev) so that policy is still valid.
266  */
267 static void cpufreq_stats_free_sysfs(unsigned int cpu)
268 {
269         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
270
271         if (!policy)
272                 return;
273
274         if (!cpufreq_frequency_get_table(cpu))
275                 goto put_ref;
276
277         if (!policy_is_shared(policy)) {
278                 pr_debug("%s: Free sysfs stat\n", __func__);
279                 sysfs_remove_group(&policy->kobj, &stats_attr_group);
280         }
281
282 put_ref:
283         cpufreq_cpu_put(policy);
284 }
285
286 static void cpufreq_allstats_free(void)
287 {
288         int cpu;
289         struct all_cpufreq_stats *all_stat;
290
291         sysfs_remove_file(cpufreq_global_kobject,
292                                                 &_attr_all_time_in_state.attr);
293
294         for_each_possible_cpu(cpu) {
295                 all_stat = per_cpu(all_cpufreq_stats, cpu);
296                 if (!all_stat)
297                         continue;
298                 kfree(all_stat->time_in_state);
299                 kfree(all_stat);
300                 per_cpu(all_cpufreq_stats, cpu) = NULL;
301         }
302         if (all_freq_table) {
303                 kfree(all_freq_table->freq_table);
304                 kfree(all_freq_table);
305                 all_freq_table = NULL;
306         }
307 }
308
309 static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
310                 struct cpufreq_frequency_table *table)
311 {
312         unsigned int i, j, count = 0, ret = 0;
313         struct cpufreq_stats *stat;
314         struct cpufreq_policy *data;
315         unsigned int alloc_size;
316         unsigned int cpu = policy->cpu;
317         if (per_cpu(cpufreq_stats_table, cpu))
318                 return -EBUSY;
319         stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL);
320         if ((stat) == NULL)
321                 return -ENOMEM;
322
323         data = cpufreq_cpu_get(cpu);
324         if (data == NULL) {
325                 ret = -EINVAL;
326                 goto error_get_fail;
327         }
328
329         ret = sysfs_create_group(&data->kobj, &stats_attr_group);
330         if (ret)
331                 goto error_out;
332
333         stat->cpu = cpu;
334         per_cpu(cpufreq_stats_table, cpu) = stat;
335
336         for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
337                 unsigned int freq = table[i].frequency;
338                 if (freq == CPUFREQ_ENTRY_INVALID)
339                         continue;
340                 count++;
341         }
342
343         alloc_size = count * sizeof(int) + count * sizeof(u64);
344
345 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
346         alloc_size += count * count * sizeof(int);
347 #endif
348         stat->max_state = count;
349         stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
350         if (!stat->time_in_state) {
351                 ret = -ENOMEM;
352                 goto error_out;
353         }
354         stat->freq_table = (unsigned int *)(stat->time_in_state + count);
355
356 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
357         stat->trans_table = stat->freq_table + count;
358 #endif
359         j = 0;
360         for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
361                 unsigned int freq = table[i].frequency;
362                 if (freq == CPUFREQ_ENTRY_INVALID)
363                         continue;
364                 if (freq_table_get_index(stat, freq) == -1)
365                         stat->freq_table[j++] = freq;
366         }
367         stat->state_num = j;
368         spin_lock(&cpufreq_stats_lock);
369         stat->last_time = get_jiffies_64();
370         stat->last_index = freq_table_get_index(stat, policy->cur);
371         spin_unlock(&cpufreq_stats_lock);
372         cpufreq_cpu_put(data);
373         return 0;
374 error_out:
375         cpufreq_cpu_put(data);
376 error_get_fail:
377         kfree(stat);
378         per_cpu(cpufreq_stats_table, cpu) = NULL;
379         return ret;
380 }
381
382 static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
383 {
384         struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table,
385                         policy->last_cpu);
386
387         pr_debug("Updating stats_table for new_cpu %u from last_cpu %u\n",
388                         policy->cpu, policy->last_cpu);
389         per_cpu(cpufreq_stats_table, policy->cpu) = per_cpu(cpufreq_stats_table,
390                         policy->last_cpu);
391         per_cpu(cpufreq_stats_table, policy->last_cpu) = NULL;
392         stat->cpu = policy->cpu;
393 }
394
395 static int compare_for_sort(const void *lhs_ptr, const void *rhs_ptr)
396 {
397         unsigned int lhs = *(const unsigned int *)(lhs_ptr);
398         unsigned int rhs = *(const unsigned int *)(rhs_ptr);
399         if (lhs < rhs)
400                 return -1;
401         if (lhs > rhs)
402                 return 1;
403         return 0;
404 }
405
406 static bool check_all_freq_table(unsigned int freq)
407 {
408         int i;
409         for (i = 0; i < all_freq_table->table_size; i++) {
410                 if (freq == all_freq_table->freq_table[i])
411                         return true;
412         }
413         return false;
414 }
415
416 static void create_all_freq_table(void)
417 {
418         all_freq_table = kzalloc(sizeof(struct all_freq_table),
419                         GFP_KERNEL);
420         if (!all_freq_table)
421                 pr_warn("could not allocate memory for all_freq_table\n");
422         return;
423 }
424
425 static void add_all_freq_table(unsigned int freq)
426 {
427         unsigned int size;
428         size = sizeof(unsigned int) * (all_freq_table->table_size + 1);
429         all_freq_table->freq_table = krealloc(all_freq_table->freq_table,
430                         size, GFP_ATOMIC);
431         if (IS_ERR(all_freq_table->freq_table)) {
432                 pr_warn("Could not reallocate memory for freq_table\n");
433                 all_freq_table->freq_table = NULL;
434                 return;
435         }
436         all_freq_table->freq_table[all_freq_table->table_size++] = freq;
437 }
438
439 static void cpufreq_allstats_create(unsigned int cpu)
440 {
441         int i , j = 0;
442         unsigned int alloc_size, count = 0;
443         struct cpufreq_frequency_table *table = cpufreq_frequency_get_table(cpu);
444         struct all_cpufreq_stats *all_stat;
445         bool sort_needed = false;
446
447         if (!table)
448                 return;
449
450         for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
451                 unsigned int freq = table[i].frequency;
452                 if (freq == CPUFREQ_ENTRY_INVALID)
453                         continue;
454                 count++;
455         }
456
457         all_stat = kzalloc(sizeof(struct all_cpufreq_stats),
458                         GFP_KERNEL);
459         if (!all_stat) {
460                 pr_warn("Cannot allocate memory for cpufreq stats\n");
461                 return;
462         }
463
464         /*Allocate memory for freq table per cpu as well as clockticks per freq*/
465         alloc_size = count * sizeof(int) + count * sizeof(cputime64_t);
466         all_stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
467         if (!all_stat->time_in_state) {
468                 pr_warn("Cannot allocate memory for cpufreq time_in_state\n");
469                 kfree(all_stat);
470                 all_stat = NULL;
471                 return;
472         }
473         all_stat->freq_table = (unsigned int *)
474                 (all_stat->time_in_state + count);
475
476         spin_lock(&cpufreq_stats_lock);
477         for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
478                 unsigned int freq = table[i].frequency;
479                 if (freq == CPUFREQ_ENTRY_INVALID)
480                         continue;
481                 all_stat->freq_table[j++] = freq;
482                 if (all_freq_table && !check_all_freq_table(freq)) {
483                         add_all_freq_table(freq);
484                         sort_needed = true;
485                 }
486         }
487         if (sort_needed)
488                 sort(all_freq_table->freq_table, all_freq_table->table_size,
489                                 sizeof(unsigned int), &compare_for_sort, NULL);
490         all_stat->state_num = j;
491         per_cpu(all_cpufreq_stats, cpu) = all_stat;
492         spin_unlock(&cpufreq_stats_lock);
493 }
494
495 static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
496                 unsigned long val, void *data)
497 {
498         int ret;
499         struct cpufreq_policy *policy = data;
500         struct cpufreq_frequency_table *table;
501         unsigned int cpu = policy->cpu;
502
503         if (val == CPUFREQ_UPDATE_POLICY_CPU) {
504                 cpufreq_stats_update_policy_cpu(policy);
505                 return 0;
506         }
507
508         if (val != CPUFREQ_NOTIFY)
509                 return 0;
510         table = cpufreq_frequency_get_table(cpu);
511         if (!table)
512                 return 0;
513
514         if (!per_cpu(all_cpufreq_stats, cpu))
515                 cpufreq_allstats_create(cpu);
516
517         ret = cpufreq_stats_create_table(policy, table);
518         if (ret)
519                 return ret;
520         return 0;
521 }
522
523 static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
524                 unsigned long val, void *data)
525 {
526         struct cpufreq_freqs *freq = data;
527         struct cpufreq_stats *stat;
528         int old_index, new_index;
529
530         if (val != CPUFREQ_POSTCHANGE)
531                 return 0;
532
533         stat = per_cpu(cpufreq_stats_table, freq->cpu);
534         if (!stat)
535                 return 0;
536
537         old_index = stat->last_index;
538         new_index = freq_table_get_index(stat, freq->new);
539
540         /* We can't do stat->time_in_state[-1]= .. */
541         if (old_index == -1 || new_index == -1)
542                 return 0;
543
544         cpufreq_stats_update(freq->cpu);
545
546         if (old_index == new_index)
547                 return 0;
548
549         spin_lock(&cpufreq_stats_lock);
550         stat->last_index = new_index;
551 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
552         stat->trans_table[old_index * stat->max_state + new_index]++;
553 #endif
554         stat->total_trans++;
555         spin_unlock(&cpufreq_stats_lock);
556         return 0;
557 }
558
559 static int cpufreq_stats_create_table_cpu(unsigned int cpu)
560 {
561         struct cpufreq_policy *policy;
562         struct cpufreq_frequency_table *table;
563         int ret = -ENODEV;
564
565         policy = cpufreq_cpu_get(cpu);
566         if (!policy)
567                 return -ENODEV;
568
569         table = cpufreq_frequency_get_table(cpu);
570         if (!table)
571                 goto out;
572
573         if (!per_cpu(all_cpufreq_stats, cpu))
574                 cpufreq_allstats_create(cpu);
575
576         ret = cpufreq_stats_create_table(policy, table);
577
578 out:
579         cpufreq_cpu_put(policy);
580         return ret;
581 }
582
583 static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
584                                                unsigned long action,
585                                                void *hcpu)
586 {
587         unsigned int cpu = (unsigned long)hcpu;
588
589         switch (action) {
590         case CPU_ONLINE:
591         case CPU_ONLINE_FROZEN:
592                 cpufreq_update_policy(cpu);
593                 break;
594         case CPU_DOWN_PREPARE:
595         case CPU_DOWN_PREPARE_FROZEN:
596                 cpufreq_stats_free_sysfs(cpu);
597                 break;
598         case CPU_DEAD:
599         case CPU_DEAD_FROZEN:
600                 cpufreq_stats_free_table(cpu);
601                 break;
602         case CPU_DOWN_FAILED:
603         case CPU_DOWN_FAILED_FROZEN:
604                 cpufreq_stats_create_table_cpu(cpu);
605                 break;
606         }
607         return NOTIFY_OK;
608 }
609
610 /* priority=1 so this will get called before cpufreq_remove_dev */
611 static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
612         .notifier_call = cpufreq_stat_cpu_callback,
613         .priority = 1,
614 };
615
616 static struct notifier_block notifier_policy_block = {
617         .notifier_call = cpufreq_stat_notifier_policy
618 };
619
620 static struct notifier_block notifier_trans_block = {
621         .notifier_call = cpufreq_stat_notifier_trans
622 };
623
624 static int cpufreq_stats_setup(void)
625 {
626         int ret;
627         unsigned int cpu;
628
629         spin_lock_init(&cpufreq_stats_lock);
630         ret = cpufreq_register_notifier(&notifier_policy_block,
631                                 CPUFREQ_POLICY_NOTIFIER);
632         if (ret)
633                 return ret;
634
635         register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
636         for_each_online_cpu(cpu)
637                 cpufreq_update_policy(cpu);
638
639         ret = cpufreq_register_notifier(&notifier_trans_block,
640                                 CPUFREQ_TRANSITION_NOTIFIER);
641         if (ret) {
642                 cpufreq_unregister_notifier(&notifier_policy_block,
643                                 CPUFREQ_POLICY_NOTIFIER);
644                 unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
645                 for_each_online_cpu(cpu)
646                         cpufreq_stats_free_table(cpu);
647                 return ret;
648         }
649
650         create_all_freq_table();
651         ret = sysfs_create_file(cpufreq_global_kobject,
652                         &_attr_all_time_in_state.attr);
653         if (ret)
654                 pr_warn("Error creating sysfs file for cpufreq stats\n");
655
656         return 0;
657 }
658
659 static void cpufreq_stats_cleanup(void)
660 {
661         unsigned int cpu;
662
663         cpufreq_unregister_notifier(&notifier_policy_block,
664                         CPUFREQ_POLICY_NOTIFIER);
665         cpufreq_unregister_notifier(&notifier_trans_block,
666                         CPUFREQ_TRANSITION_NOTIFIER);
667         unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
668         for_each_online_cpu(cpu) {
669                 cpufreq_stats_free_table(cpu);
670                 cpufreq_stats_free_sysfs(cpu);
671         }
672         cpufreq_allstats_free();
673 }
674
675 #ifdef CONFIG_BL_SWITCHER
676 static int cpufreq_stats_switcher_notifier(struct notifier_block *nfb,
677                                         unsigned long action, void *_arg)
678 {
679         switch (action) {
680         case BL_NOTIFY_PRE_ENABLE:
681         case BL_NOTIFY_PRE_DISABLE:
682                 cpufreq_stats_cleanup();
683                 break;
684
685         case BL_NOTIFY_POST_ENABLE:
686         case BL_NOTIFY_POST_DISABLE:
687                 cpufreq_stats_setup();
688                 break;
689
690         default:
691                 return NOTIFY_DONE;
692         }
693
694         return NOTIFY_OK;
695 }
696
697 static struct notifier_block switcher_notifier = {
698         .notifier_call = cpufreq_stats_switcher_notifier,
699 };
700 #endif
701
702 static int __init cpufreq_stats_init(void)
703 {
704         int ret;
705         spin_lock_init(&cpufreq_stats_lock);
706
707         ret = cpufreq_stats_setup();
708 #ifdef CONFIG_BL_SWITCHER
709         if (!ret)
710                 bL_switcher_register_notifier(&switcher_notifier);
711 #endif
712         return ret;
713 }
714
715 static void __exit cpufreq_stats_exit(void)
716 {
717 #ifdef CONFIG_BL_SWITCHER
718         bL_switcher_unregister_notifier(&switcher_notifier);
719 #endif
720         cpufreq_stats_cleanup();
721 }
722
723 MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>");
724 MODULE_DESCRIPTION("'cpufreq_stats' - A driver to export cpufreq stats "
725                                 "through sysfs filesystem");
726 MODULE_LICENSE("GPL");
727
728 module_init(cpufreq_stats_init);
729 module_exit(cpufreq_stats_exit);