cpu_power: Avoids race condition when the task exits.
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / cpufreq_stats.c
1 /*
2  *  drivers/cpufreq/cpufreq_stats.c
3  *
4  *  Copyright (C) 2003-2004 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
5  *  (C) 2004 Zou Nan hai <nanhai.zou@intel.com>.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/cpu.h>
15 #include <linux/sysfs.h>
16 #include <linux/cpufreq.h>
17 #include <linux/module.h>
18 #include <linux/jiffies.h>
19 #include <linux/percpu.h>
20 #include <linux/kobject.h>
21 #include <linux/spinlock.h>
22 #include <linux/notifier.h>
23 #include <linux/sort.h>
24 #include <linux/err.h>
25 #include <linux/of.h>
26 #include <linux/sched.h>
27 #include <asm/cputime.h>
28 #ifdef CONFIG_BL_SWITCHER
29 #include <asm/bL_switcher.h>
30 #endif
31
32 static spinlock_t cpufreq_stats_lock;
33
34 struct cpufreq_stats {
35         unsigned int cpu;
36         unsigned int total_trans;
37         unsigned long long  last_time;
38         unsigned int max_state;
39         unsigned int state_num;
40         unsigned int last_index;
41         u64 *time_in_state;
42         unsigned int *freq_table;
43 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
44         unsigned int *trans_table;
45 #endif
46 };
47
48 struct all_cpufreq_stats {
49         unsigned int state_num;
50         cputime64_t *time_in_state;
51         unsigned int *freq_table;
52 };
53
54 struct cpufreq_power_stats {
55         unsigned int state_num;
56         unsigned int *curr;
57         unsigned int *freq_table;
58 };
59
60 struct all_freq_table {
61         unsigned int *freq_table;
62         unsigned int table_size;
63 };
64
65 static struct all_freq_table *all_freq_table;
66
67 static DEFINE_PER_CPU(struct all_cpufreq_stats *, all_cpufreq_stats);
68 static DEFINE_PER_CPU(struct cpufreq_stats *, cpufreq_stats_table);
69 static DEFINE_PER_CPU(struct cpufreq_power_stats *, cpufreq_power_stats);
70
71 struct cpufreq_stats_attribute {
72         struct attribute attr;
73         ssize_t(*show) (struct cpufreq_stats *, char *);
74 };
75
76 static int cpufreq_stats_update(unsigned int cpu)
77 {
78         struct cpufreq_stats *stat;
79         struct all_cpufreq_stats *all_stat;
80         unsigned long long cur_time;
81
82         cur_time = get_jiffies_64();
83         spin_lock(&cpufreq_stats_lock);
84         stat = per_cpu(cpufreq_stats_table, cpu);
85         all_stat = per_cpu(all_cpufreq_stats, cpu);
86         if (!stat) {
87                 spin_unlock(&cpufreq_stats_lock);
88                 return 0;
89         }
90         if (stat->time_in_state) {
91                 stat->time_in_state[stat->last_index] +=
92                         cur_time - stat->last_time;
93                 if (all_stat)
94                         all_stat->time_in_state[stat->last_index] +=
95                                         cur_time - stat->last_time;
96         }
97         stat->last_time = cur_time;
98         spin_unlock(&cpufreq_stats_lock);
99         return 0;
100 }
101
102 static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
103 {
104         struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
105         if (!stat)
106                 return 0;
107         return sprintf(buf, "%d\n",
108                         per_cpu(cpufreq_stats_table, stat->cpu)->total_trans);
109 }
110
111 static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
112 {
113         ssize_t len = 0;
114         int i;
115         struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
116         if (!stat)
117                 return 0;
118         cpufreq_stats_update(stat->cpu);
119         for (i = 0; i < stat->state_num; i++) {
120                 len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i],
121                         (unsigned long long)
122                         jiffies_64_to_clock_t(stat->time_in_state[i]));
123         }
124         return len;
125 }
126
127 static int get_index_all_cpufreq_stat(struct all_cpufreq_stats *all_stat,
128                 unsigned int freq)
129 {
130         int i;
131         if (!all_stat)
132                 return -1;
133         for (i = 0; i < all_stat->state_num; i++) {
134                 if (all_stat->freq_table[i] == freq)
135                         return i;
136         }
137         return -1;
138 }
139
140 void acct_update_power(struct task_struct *task, cputime_t cputime) {
141         struct cpufreq_power_stats *powerstats;
142         struct cpufreq_stats *stats;
143         unsigned int cpu_num, curr;
144
145         if (!task)
146                 return;
147         cpu_num = task_cpu(task);
148         powerstats = per_cpu(cpufreq_power_stats, cpu_num);
149         stats = per_cpu(cpufreq_stats_table, cpu_num);
150         if (!powerstats || !stats)
151                 return;
152
153         curr = powerstats->curr[stats->last_index];
154         if (task->cpu_power != ULLONG_MAX)
155                 task->cpu_power += curr * cputime_to_usecs(cputime);
156 }
157 EXPORT_SYMBOL_GPL(acct_update_power);
158
159 static ssize_t show_current_in_state(struct kobject *kobj,
160                 struct kobj_attribute *attr, char *buf)
161 {
162         ssize_t len = 0;
163         unsigned int i, cpu;
164         struct cpufreq_power_stats *powerstats;
165
166         spin_lock(&cpufreq_stats_lock);
167         for_each_possible_cpu(cpu) {
168                 powerstats = per_cpu(cpufreq_power_stats, cpu);
169                 if (!powerstats)
170                         continue;
171                 len += scnprintf(buf + len, PAGE_SIZE - len, "CPU%d:", cpu);
172                 for (i = 0; i < powerstats->state_num; i++)
173                         len += scnprintf(buf + len, PAGE_SIZE - len,
174                                         "%d=%d ", powerstats->freq_table[i],
175                                         powerstats->curr[i]);
176                 len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
177         }
178         spin_unlock(&cpufreq_stats_lock);
179         return len;
180 }
181
182 static ssize_t show_all_time_in_state(struct kobject *kobj,
183                 struct kobj_attribute *attr, char *buf)
184 {
185         ssize_t len = 0;
186         unsigned int i, cpu, freq, index;
187         struct all_cpufreq_stats *all_stat;
188         struct cpufreq_policy *policy;
189
190         len += scnprintf(buf + len, PAGE_SIZE - len, "freq\t\t");
191         for_each_possible_cpu(cpu) {
192                 len += scnprintf(buf + len, PAGE_SIZE - len, "cpu%d\t\t", cpu);
193                 if (cpu_online(cpu))
194                         cpufreq_stats_update(cpu);
195         }
196
197         if (!all_freq_table)
198                 goto out;
199         for (i = 0; i < all_freq_table->table_size; i++) {
200                 freq = all_freq_table->freq_table[i];
201                 len += scnprintf(buf + len, PAGE_SIZE - len, "\n%u\t\t", freq);
202                 for_each_possible_cpu(cpu) {
203                         policy = cpufreq_cpu_get(cpu);
204                         if (policy == NULL)
205                                 continue;
206                         all_stat = per_cpu(all_cpufreq_stats, policy->cpu);
207                         index = get_index_all_cpufreq_stat(all_stat, freq);
208                         if (index != -1) {
209                                 len += scnprintf(buf + len, PAGE_SIZE - len,
210                                         "%llu\t\t", (unsigned long long)
211                                         cputime64_to_clock_t(all_stat->time_in_state[index]));
212                         } else {
213                                 len += scnprintf(buf + len, PAGE_SIZE - len,
214                                                 "N/A\t\t");
215                         }
216                         cpufreq_cpu_put(policy);
217                 }
218         }
219
220 out:
221         len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
222         return len;
223 }
224
225 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
226 static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
227 {
228         ssize_t len = 0;
229         int i, j;
230
231         struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
232         if (!stat)
233                 return 0;
234         cpufreq_stats_update(stat->cpu);
235         len += snprintf(buf + len, PAGE_SIZE - len, "   From  :    To\n");
236         len += snprintf(buf + len, PAGE_SIZE - len, "         : ");
237         for (i = 0; i < stat->state_num; i++) {
238                 if (len >= PAGE_SIZE)
239                         break;
240                 len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
241                                 stat->freq_table[i]);
242         }
243         if (len >= PAGE_SIZE)
244                 return PAGE_SIZE;
245
246         len += snprintf(buf + len, PAGE_SIZE - len, "\n");
247
248         for (i = 0; i < stat->state_num; i++) {
249                 if (len >= PAGE_SIZE)
250                         break;
251
252                 len += snprintf(buf + len, PAGE_SIZE - len, "%9u: ",
253                                 stat->freq_table[i]);
254
255                 for (j = 0; j < stat->state_num; j++)   {
256                         if (len >= PAGE_SIZE)
257                                 break;
258                         len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
259                                         stat->trans_table[i*stat->max_state+j]);
260                 }
261                 if (len >= PAGE_SIZE)
262                         break;
263                 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
264         }
265         if (len >= PAGE_SIZE)
266                 return PAGE_SIZE;
267         return len;
268 }
269 cpufreq_freq_attr_ro(trans_table);
270 #endif
271
272 cpufreq_freq_attr_ro(total_trans);
273 cpufreq_freq_attr_ro(time_in_state);
274
275 static struct attribute *default_attrs[] = {
276         &total_trans.attr,
277         &time_in_state.attr,
278 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
279         &trans_table.attr,
280 #endif
281         NULL
282 };
283 static struct attribute_group stats_attr_group = {
284         .attrs = default_attrs,
285         .name = "stats"
286 };
287
288 static struct kobj_attribute _attr_all_time_in_state = __ATTR(all_time_in_state,
289                 0444, show_all_time_in_state, NULL);
290
291 static struct kobj_attribute _attr_current_in_state = __ATTR(current_in_state,
292                 0444, show_current_in_state, NULL);
293
294 static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
295 {
296         int index;
297         for (index = 0; index < stat->max_state; index++)
298                 if (stat->freq_table[index] == freq)
299                         return index;
300         return -1;
301 }
302
303 /* should be called late in the CPU removal sequence so that the stats
304  * memory is still available in case someone tries to use it.
305  */
306 static void cpufreq_stats_free_table(unsigned int cpu)
307 {
308         struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
309
310         if (stat) {
311                 pr_debug("%s: Free stat table\n", __func__);
312                 kfree(stat->time_in_state);
313                 kfree(stat);
314                 per_cpu(cpufreq_stats_table, cpu) = NULL;
315         }
316 }
317
318 /* must be called early in the CPU removal sequence (before
319  * cpufreq_remove_dev) so that policy is still valid.
320  */
321 static void cpufreq_stats_free_sysfs(unsigned int cpu)
322 {
323         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
324
325         if (!policy)
326                 return;
327
328         if (!cpufreq_frequency_get_table(cpu))
329                 goto put_ref;
330
331         if (!policy_is_shared(policy)) {
332                 pr_debug("%s: Free sysfs stat\n", __func__);
333                 sysfs_remove_group(&policy->kobj, &stats_attr_group);
334         }
335
336 put_ref:
337         cpufreq_cpu_put(policy);
338 }
339
340 static void cpufreq_allstats_free(void)
341 {
342         int cpu;
343         struct all_cpufreq_stats *all_stat;
344
345         sysfs_remove_file(cpufreq_global_kobject,
346                                                 &_attr_all_time_in_state.attr);
347
348         for_each_possible_cpu(cpu) {
349                 all_stat = per_cpu(all_cpufreq_stats, cpu);
350                 if (!all_stat)
351                         continue;
352                 kfree(all_stat->time_in_state);
353                 kfree(all_stat);
354                 per_cpu(all_cpufreq_stats, cpu) = NULL;
355         }
356         if (all_freq_table) {
357                 kfree(all_freq_table->freq_table);
358                 kfree(all_freq_table);
359                 all_freq_table = NULL;
360         }
361 }
362
363 static void cpufreq_powerstats_free(void)
364 {
365         int cpu;
366         struct cpufreq_power_stats *powerstats;
367
368         sysfs_remove_file(cpufreq_global_kobject, &_attr_current_in_state.attr);
369
370         for_each_possible_cpu(cpu) {
371                 powerstats = per_cpu(cpufreq_power_stats, cpu);
372                 if (!powerstats)
373                         continue;
374                 kfree(powerstats->curr);
375                 kfree(powerstats);
376                 per_cpu(cpufreq_power_stats, cpu) = NULL;
377         }
378 }
379
380 static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
381                 struct cpufreq_frequency_table *table, int count)
382 {
383         unsigned int i, j, ret = 0;
384         struct cpufreq_stats *stat;
385         struct cpufreq_policy *data;
386         unsigned int alloc_size;
387         unsigned int cpu = policy->cpu;
388         if (per_cpu(cpufreq_stats_table, cpu))
389                 return -EBUSY;
390         stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL);
391         if ((stat) == NULL)
392                 return -ENOMEM;
393
394         data = cpufreq_cpu_get(cpu);
395         if (data == NULL) {
396                 ret = -EINVAL;
397                 goto error_get_fail;
398         }
399
400         ret = sysfs_create_group(&data->kobj, &stats_attr_group);
401         if (ret)
402                 goto error_out;
403
404         stat->cpu = cpu;
405         per_cpu(cpufreq_stats_table, cpu) = stat;
406
407
408         alloc_size = count * sizeof(int) + count * sizeof(u64);
409
410 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
411         alloc_size += count * count * sizeof(int);
412 #endif
413         stat->max_state = count;
414         stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
415         if (!stat->time_in_state) {
416                 ret = -ENOMEM;
417                 goto error_out;
418         }
419         stat->freq_table = (unsigned int *)(stat->time_in_state + count);
420
421 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
422         stat->trans_table = stat->freq_table + count;
423 #endif
424         j = 0;
425         for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
426                 unsigned int freq = table[i].frequency;
427                 if (freq == CPUFREQ_ENTRY_INVALID)
428                         continue;
429                 if (freq_table_get_index(stat, freq) == -1)
430                         stat->freq_table[j++] = freq;
431         }
432         stat->state_num = j;
433         spin_lock(&cpufreq_stats_lock);
434         stat->last_time = get_jiffies_64();
435         stat->last_index = freq_table_get_index(stat, policy->cur);
436 #ifdef CONFIG_ARCH_ROCKCHIP
437         if (stat->last_index == -1)
438                 stat->last_index = 0;
439 #endif
440         spin_unlock(&cpufreq_stats_lock);
441         cpufreq_cpu_put(data);
442         return 0;
443 error_out:
444         cpufreq_cpu_put(data);
445 error_get_fail:
446         kfree(stat);
447         per_cpu(cpufreq_stats_table, cpu) = NULL;
448         return ret;
449 }
450
451 static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
452 {
453         struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table,
454                         policy->last_cpu);
455
456         pr_debug("Updating stats_table for new_cpu %u from last_cpu %u\n",
457                         policy->cpu, policy->last_cpu);
458         per_cpu(cpufreq_stats_table, policy->cpu) = per_cpu(cpufreq_stats_table,
459                         policy->last_cpu);
460         per_cpu(cpufreq_stats_table, policy->last_cpu) = NULL;
461         stat->cpu = policy->cpu;
462 }
463
464 static void cpufreq_powerstats_create(unsigned int cpu,
465                 struct cpufreq_frequency_table *table, int count) {
466         unsigned int alloc_size, i = 0, j = 0, ret = 0;
467         struct cpufreq_power_stats *powerstats;
468         struct device_node *cpu_node;
469         char device_path[16];
470
471         powerstats = kzalloc(sizeof(struct cpufreq_power_stats),
472                         GFP_KERNEL);
473         if (!powerstats)
474                 return;
475
476         /* Allocate memory for freq table per cpu as well as clockticks per
477          * freq*/
478         alloc_size = count * sizeof(unsigned int) +
479                 count * sizeof(unsigned int);
480         powerstats->curr = kzalloc(alloc_size, GFP_KERNEL);
481         if (!powerstats->curr) {
482                 kfree(powerstats);
483                 return;
484         }
485         powerstats->freq_table = powerstats->curr + count;
486
487         spin_lock(&cpufreq_stats_lock);
488         for (i = 0; table[i].frequency != CPUFREQ_TABLE_END && j < count; i++) {
489                 unsigned int freq = table[i].frequency;
490
491                 if (freq == CPUFREQ_ENTRY_INVALID)
492                         continue;
493                 powerstats->freq_table[j++] = freq;
494         }
495         powerstats->state_num = j;
496
497         snprintf(device_path, sizeof(device_path), "/cpus/cpu@%d", cpu);
498         cpu_node = of_find_node_by_path(device_path);
499         if (cpu_node) {
500                 ret = of_property_read_u32_array(cpu_node, "current",
501                                 powerstats->curr, count);
502                 if (ret) {
503                         kfree(powerstats->curr);
504                         kfree(powerstats);
505                         powerstats = NULL;
506                 }
507         }
508         per_cpu(cpufreq_power_stats, cpu) = powerstats;
509         spin_unlock(&cpufreq_stats_lock);
510 }
511
512 static int compare_for_sort(const void *lhs_ptr, const void *rhs_ptr)
513 {
514         unsigned int lhs = *(const unsigned int *)(lhs_ptr);
515         unsigned int rhs = *(const unsigned int *)(rhs_ptr);
516         if (lhs < rhs)
517                 return -1;
518         if (lhs > rhs)
519                 return 1;
520         return 0;
521 }
522
523 static bool check_all_freq_table(unsigned int freq)
524 {
525         int i;
526         for (i = 0; i < all_freq_table->table_size; i++) {
527                 if (freq == all_freq_table->freq_table[i])
528                         return true;
529         }
530         return false;
531 }
532
533 static void create_all_freq_table(void)
534 {
535         all_freq_table = kzalloc(sizeof(struct all_freq_table),
536                         GFP_KERNEL);
537         if (!all_freq_table)
538                 pr_warn("could not allocate memory for all_freq_table\n");
539         return;
540 }
541
542 static void add_all_freq_table(unsigned int freq)
543 {
544         unsigned int size;
545         size = sizeof(unsigned int) * (all_freq_table->table_size + 1);
546         all_freq_table->freq_table = krealloc(all_freq_table->freq_table,
547                         size, GFP_ATOMIC);
548         if (IS_ERR(all_freq_table->freq_table)) {
549                 pr_warn("Could not reallocate memory for freq_table\n");
550                 all_freq_table->freq_table = NULL;
551                 return;
552         }
553         all_freq_table->freq_table[all_freq_table->table_size++] = freq;
554 }
555
556 static void cpufreq_allstats_create(unsigned int cpu,
557                 struct cpufreq_frequency_table *table, int count)
558 {
559         int i , j = 0;
560         unsigned int alloc_size;
561         struct all_cpufreq_stats *all_stat;
562         bool sort_needed = false;
563
564         all_stat = kzalloc(sizeof(struct all_cpufreq_stats),
565                         GFP_KERNEL);
566         if (!all_stat) {
567                 pr_warn("Cannot allocate memory for cpufreq stats\n");
568                 return;
569         }
570
571         /*Allocate memory for freq table per cpu as well as clockticks per freq*/
572         alloc_size = count * sizeof(int) + count * sizeof(cputime64_t);
573         all_stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
574         if (!all_stat->time_in_state) {
575                 pr_warn("Cannot allocate memory for cpufreq time_in_state\n");
576                 kfree(all_stat);
577                 all_stat = NULL;
578                 return;
579         }
580         all_stat->freq_table = (unsigned int *)
581                 (all_stat->time_in_state + count);
582
583         spin_lock(&cpufreq_stats_lock);
584         for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
585                 unsigned int freq = table[i].frequency;
586                 if (freq == CPUFREQ_ENTRY_INVALID)
587                         continue;
588                 all_stat->freq_table[j++] = freq;
589                 if (all_freq_table && !check_all_freq_table(freq)) {
590                         add_all_freq_table(freq);
591                         sort_needed = true;
592                 }
593         }
594         if (sort_needed)
595                 sort(all_freq_table->freq_table, all_freq_table->table_size,
596                                 sizeof(unsigned int), &compare_for_sort, NULL);
597         all_stat->state_num = j;
598         per_cpu(all_cpufreq_stats, cpu) = all_stat;
599         spin_unlock(&cpufreq_stats_lock);
600 }
601
602 static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
603                 unsigned long val, void *data)
604 {
605         int ret, count = 0, i;
606         struct cpufreq_policy *policy = data;
607         struct cpufreq_frequency_table *table;
608         unsigned int cpu_num, cpu = policy->cpu;
609
610         if (val == CPUFREQ_UPDATE_POLICY_CPU) {
611                 cpufreq_stats_update_policy_cpu(policy);
612                 return 0;
613         }
614
615         if (val != CPUFREQ_NOTIFY)
616                 return 0;
617         table = cpufreq_frequency_get_table(cpu);
618         if (!table)
619                 return 0;
620
621         for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
622                 unsigned int freq = table[i].frequency;
623
624                 if (freq == CPUFREQ_ENTRY_INVALID)
625                         continue;
626                 count++;
627         }
628
629         if (!per_cpu(all_cpufreq_stats, cpu))
630                 cpufreq_allstats_create(cpu, table, count);
631
632         for_each_possible_cpu(cpu_num) {
633                 if (!per_cpu(cpufreq_power_stats, cpu_num))
634                         cpufreq_powerstats_create(cpu_num, table, count);
635         }
636
637         ret = cpufreq_stats_create_table(policy, table, count);
638         if (ret)
639                 return ret;
640         return 0;
641 }
642
643 static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
644                 unsigned long val, void *data)
645 {
646         struct cpufreq_freqs *freq = data;
647         struct cpufreq_stats *stat;
648         int old_index, new_index;
649
650         if (val != CPUFREQ_POSTCHANGE)
651                 return 0;
652
653         stat = per_cpu(cpufreq_stats_table, freq->cpu);
654         if (!stat)
655                 return 0;
656
657         old_index = stat->last_index;
658         new_index = freq_table_get_index(stat, freq->new);
659
660         /* We can't do stat->time_in_state[-1]= .. */
661         if (old_index == -1 || new_index == -1)
662                 return 0;
663
664         cpufreq_stats_update(freq->cpu);
665
666         if (old_index == new_index)
667                 return 0;
668
669         spin_lock(&cpufreq_stats_lock);
670         stat->last_index = new_index;
671 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
672         stat->trans_table[old_index * stat->max_state + new_index]++;
673 #endif
674         stat->total_trans++;
675         spin_unlock(&cpufreq_stats_lock);
676         return 0;
677 }
678
679 static int cpufreq_stats_create_table_cpu(unsigned int cpu)
680 {
681         struct cpufreq_policy *policy;
682         struct cpufreq_frequency_table *table;
683         int i, count, cpu_num, ret = -ENODEV;
684
685         policy = cpufreq_cpu_get(cpu);
686         if (!policy)
687                 return -ENODEV;
688
689         table = cpufreq_frequency_get_table(cpu);
690         if (!table)
691                 goto out;
692
693         count = 0;
694         for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
695                 unsigned int freq = table[i].frequency;
696
697                 if (freq != CPUFREQ_ENTRY_INVALID)
698                         count++;
699         }
700
701         if (!per_cpu(all_cpufreq_stats, cpu))
702                 cpufreq_allstats_create(cpu, table, count);
703
704         for_each_possible_cpu(cpu_num) {
705                 if (!per_cpu(cpufreq_power_stats, cpu_num))
706                         cpufreq_powerstats_create(cpu_num, table, count);
707         }
708
709         ret = cpufreq_stats_create_table(policy, table, count);
710
711 out:
712         cpufreq_cpu_put(policy);
713         return ret;
714 }
715
716 static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
717                                                unsigned long action,
718                                                void *hcpu)
719 {
720         unsigned int cpu = (unsigned long)hcpu;
721
722         switch (action) {
723         case CPU_ONLINE:
724         case CPU_ONLINE_FROZEN:
725                 cpufreq_update_policy(cpu);
726                 break;
727         case CPU_DOWN_PREPARE:
728         case CPU_DOWN_PREPARE_FROZEN:
729                 cpufreq_stats_free_sysfs(cpu);
730                 break;
731         case CPU_DEAD:
732         case CPU_DEAD_FROZEN:
733                 cpufreq_stats_free_table(cpu);
734                 break;
735         case CPU_DOWN_FAILED:
736         case CPU_DOWN_FAILED_FROZEN:
737                 cpufreq_stats_create_table_cpu(cpu);
738                 break;
739         }
740         return NOTIFY_OK;
741 }
742
743 /* priority=1 so this will get called before cpufreq_remove_dev */
744 static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
745         .notifier_call = cpufreq_stat_cpu_callback,
746         .priority = 1,
747 };
748
749 static struct notifier_block notifier_policy_block = {
750         .notifier_call = cpufreq_stat_notifier_policy
751 };
752
753 static struct notifier_block notifier_trans_block = {
754         .notifier_call = cpufreq_stat_notifier_trans
755 };
756
757 static int cpufreq_stats_setup(void)
758 {
759         int ret;
760         unsigned int cpu;
761
762         spin_lock_init(&cpufreq_stats_lock);
763         ret = cpufreq_register_notifier(&notifier_policy_block,
764                                 CPUFREQ_POLICY_NOTIFIER);
765         if (ret)
766                 return ret;
767
768         register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
769         for_each_online_cpu(cpu)
770                 cpufreq_update_policy(cpu);
771
772         ret = cpufreq_register_notifier(&notifier_trans_block,
773                                 CPUFREQ_TRANSITION_NOTIFIER);
774         if (ret) {
775                 cpufreq_unregister_notifier(&notifier_policy_block,
776                                 CPUFREQ_POLICY_NOTIFIER);
777                 unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
778                 for_each_online_cpu(cpu)
779                         cpufreq_stats_free_table(cpu);
780                 return ret;
781         }
782
783         create_all_freq_table();
784         ret = sysfs_create_file(cpufreq_global_kobject,
785                         &_attr_all_time_in_state.attr);
786         if (ret)
787                 pr_warn("Cannot create sysfs file for cpufreq stats\n");
788
789         ret = sysfs_create_file(cpufreq_global_kobject,
790                         &_attr_current_in_state.attr);
791         if (ret)
792                 pr_warn("Cannot create sysfs file for cpufreq current stats\n");
793
794         return 0;
795 }
796
797 static void cpufreq_stats_cleanup(void)
798 {
799         unsigned int cpu;
800
801         cpufreq_unregister_notifier(&notifier_policy_block,
802                         CPUFREQ_POLICY_NOTIFIER);
803         cpufreq_unregister_notifier(&notifier_trans_block,
804                         CPUFREQ_TRANSITION_NOTIFIER);
805         unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
806         for_each_online_cpu(cpu) {
807                 cpufreq_stats_free_table(cpu);
808                 cpufreq_stats_free_sysfs(cpu);
809         }
810         cpufreq_allstats_free();
811         cpufreq_powerstats_free();
812 }
813
814 #ifdef CONFIG_BL_SWITCHER
815 static int cpufreq_stats_switcher_notifier(struct notifier_block *nfb,
816                                         unsigned long action, void *_arg)
817 {
818         switch (action) {
819         case BL_NOTIFY_PRE_ENABLE:
820         case BL_NOTIFY_PRE_DISABLE:
821                 cpufreq_stats_cleanup();
822                 break;
823
824         case BL_NOTIFY_POST_ENABLE:
825         case BL_NOTIFY_POST_DISABLE:
826                 cpufreq_stats_setup();
827                 break;
828
829         default:
830                 return NOTIFY_DONE;
831         }
832
833         return NOTIFY_OK;
834 }
835
836 static struct notifier_block switcher_notifier = {
837         .notifier_call = cpufreq_stats_switcher_notifier,
838 };
839 #endif
840
841 static int __init cpufreq_stats_init(void)
842 {
843         int ret;
844         spin_lock_init(&cpufreq_stats_lock);
845
846         ret = cpufreq_stats_setup();
847 #ifdef CONFIG_BL_SWITCHER
848         if (!ret)
849                 bL_switcher_register_notifier(&switcher_notifier);
850 #endif
851         return ret;
852 }
853
854 static void __exit cpufreq_stats_exit(void)
855 {
856 #ifdef CONFIG_BL_SWITCHER
857         bL_switcher_unregister_notifier(&switcher_notifier);
858 #endif
859         cpufreq_stats_cleanup();
860 }
861
862 MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>");
863 MODULE_DESCRIPTION("'cpufreq_stats' - A driver to export cpufreq stats "
864                                 "through sysfs filesystem");
865 MODULE_LICENSE("GPL");
866
867 module_init(cpufreq_stats_init);
868 module_exit(cpufreq_stats_exit);