Implement memory_state_time, used by qcom,cpubw
[firefly-linux-kernel-4.4.55.git] / drivers / misc / memory_state_time.c
1 /* drivers/misc/memory_state_time.c
2  *
3  * Copyright (C) 2016 Google, Inc.
4  *
5  * This software is licensed under the terms of the GNU General Public
6  * License version 2, as published by the Free Software Foundation, and
7  * may be copied, distributed, and modified under those terms.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  */
15
16 #include <linux/device.h>
17 #include <linux/err.h>
18 #include <linux/errno.h>
19 #include <linux/hashtable.h>
20 #include <linux/kconfig.h>
21 #include <linux/kernel.h>
22 #include <linux/kobject.h>
23 #include <linux/memory-state-time.h>
24 #include <linux/module.h>
25 #include <linux/mutex.h>
26 #include <linux/of_platform.h>
27 #include <linux/slab.h>
28 #include <linux/sysfs.h>
29 #include <linux/time.h>
30 #include <linux/timekeeping.h>
31 #include <linux/workqueue.h>
32
33 #define KERNEL_ATTR_RO(_name) \
34 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
35
36 #define KERNEL_ATTR_RW(_name) \
37 static struct kobj_attribute _name##_attr = \
38         __ATTR(_name, 0644, _name##_show, _name##_store)
39
40 #define FREQ_HASH_BITS 4
41 DECLARE_HASHTABLE(freq_hash_table, FREQ_HASH_BITS);
42
43 static DEFINE_MUTEX(mem_lock);
44
45 #define TAG "memory_state_time"
46 #define BW_NODE "/soc/memory-state-time"
47 #define FREQ_TBL "freq-tbl"
48 #define BW_TBL "bw-buckets"
49 #define NUM_SOURCES "num-sources"
50
51 #define LOWEST_FREQ 2
52
53 static int curr_bw;
54 static int curr_freq;
55 static u32 *bw_buckets;
56 static u32 *freq_buckets;
57 static int num_freqs;
58 static int num_buckets;
59 static int registered_bw_sources;
60 static u64 last_update;
61 static bool init_success;
62 static struct workqueue_struct *memory_wq;
63 static u32 num_sources = 10;
64 static int *bandwidths;
65
66 struct freq_entry {
67         int freq;
68         u64 *buckets; /* Bandwidth buckets. */
69         struct hlist_node hash;
70 };
71
72 struct queue_container {
73         struct work_struct update_state;
74         int value;
75         u64 time_now;
76         int id;
77         struct mutex *lock;
78 };
79
80 static int find_bucket(int bw)
81 {
82         int i;
83
84         if (bw_buckets != NULL) {
85                 for (i = 0; i < num_buckets; i++) {
86                         if (bw_buckets[i] > bw) {
87                                 pr_debug("Found bucket %d for bandwidth %d\n",
88                                         i, bw);
89                                 return i;
90                         }
91                 }
92                 return num_buckets - 1;
93         }
94         return 0;
95 }
96
97 static u64 get_time_diff(u64 time_now)
98 {
99         u64 ms;
100
101         ms = time_now - last_update;
102         last_update = time_now;
103         return ms;
104 }
105
106 static ssize_t show_stat_show(struct kobject *kobj,
107                 struct kobj_attribute *attr, char *buf)
108 {
109         int i, j;
110         int len = 0;
111         struct freq_entry *freq_entry;
112
113         for (i = 0; i < num_freqs; i++) {
114                 hash_for_each_possible(freq_hash_table, freq_entry, hash,
115                                 freq_buckets[i]) {
116                         if (freq_entry->freq == freq_buckets[i]) {
117                                 len += scnprintf(buf + len, PAGE_SIZE - len,
118                                                 "%d ", freq_buckets[i]);
119                                 if (len >= PAGE_SIZE)
120                                         break;
121                                 for (j = 0; j < num_buckets; j++) {
122                                         len += scnprintf(buf + len,
123                                                         PAGE_SIZE - len,
124                                                         "%llu ",
125                                                         freq_entry->buckets[j]);
126                                 }
127                                 len += scnprintf(buf + len, PAGE_SIZE - len,
128                                                 "\n");
129                         }
130                 }
131         }
132         pr_debug("Current Time: %llu\n", ktime_get_boot_ns());
133         return len;
134 }
135 KERNEL_ATTR_RO(show_stat);
136
137 static void update_table(u64 time_now)
138 {
139         struct freq_entry *freq_entry;
140
141         pr_debug("Last known bw %d freq %d\n", curr_bw, curr_freq);
142         hash_for_each_possible(freq_hash_table, freq_entry, hash, curr_freq) {
143                 if (curr_freq == freq_entry->freq) {
144                         freq_entry->buckets[find_bucket(curr_bw)]
145                                         += get_time_diff(time_now);
146                         break;
147                 }
148         }
149 }
150
151 static bool freq_exists(int freq)
152 {
153         int i;
154
155         for (i = 0; i < num_freqs; i++) {
156                 if (freq == freq_buckets[i])
157                         return true;
158         }
159         return false;
160 }
161
162 static int calculate_total_bw(int bw, int index)
163 {
164         int i;
165         int total_bw = 0;
166
167         pr_debug("memory_state_time New bw %d for id %d\n", bw, index);
168         bandwidths[index] = bw;
169         for (i = 0; i < registered_bw_sources; i++)
170                 total_bw += bandwidths[i];
171         return total_bw;
172 }
173
174 static void freq_update_do_work(struct work_struct *work)
175 {
176         struct queue_container *freq_state_update
177                         = container_of(work, struct queue_container,
178                         update_state);
179         if (freq_state_update) {
180                 mutex_lock(&mem_lock);
181                 update_table(freq_state_update->time_now);
182                 curr_freq = freq_state_update->value;
183                 mutex_unlock(&mem_lock);
184                 kfree(freq_state_update);
185         }
186 }
187
188 static void bw_update_do_work(struct work_struct *work)
189 {
190         struct queue_container *bw_state_update
191                         = container_of(work, struct queue_container,
192                         update_state);
193         if (bw_state_update) {
194                 mutex_lock(&mem_lock);
195                 update_table(bw_state_update->time_now);
196                 curr_bw = calculate_total_bw(bw_state_update->value,
197                                 bw_state_update->id);
198                 mutex_unlock(&mem_lock);
199                 kfree(bw_state_update);
200         }
201 }
202
203 static void memory_state_freq_update(struct memory_state_update_block *ub,
204                 int value)
205 {
206         if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) {
207                 if (freq_exists(value) && init_success) {
208                         struct queue_container *freq_container
209                                 = kmalloc(sizeof(struct queue_container),
210                                 GFP_KERNEL);
211                         if (!freq_container)
212                                 return;
213                         INIT_WORK(&freq_container->update_state,
214                                         freq_update_do_work);
215                         freq_container->time_now = ktime_get_boot_ns();
216                         freq_container->value = value;
217                         pr_debug("Scheduling freq update in work queue\n");
218                         queue_work(memory_wq, &freq_container->update_state);
219                 } else {
220                         pr_debug("Freq does not exist.\n");
221                 }
222         }
223 }
224
225 static void memory_state_bw_update(struct memory_state_update_block *ub,
226                 int value)
227 {
228         if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) {
229                 if (init_success) {
230                         struct queue_container *bw_container
231                                 = kmalloc(sizeof(struct queue_container),
232                                 GFP_KERNEL);
233                         if (!bw_container)
234                                 return;
235                         INIT_WORK(&bw_container->update_state,
236                                         bw_update_do_work);
237                         bw_container->time_now = ktime_get_boot_ns();
238                         bw_container->value = value;
239                         bw_container->id = ub->id;
240                         pr_debug("Scheduling bandwidth update in work queue\n");
241                         queue_work(memory_wq, &bw_container->update_state);
242                 }
243         }
244 }
245
246 struct memory_state_update_block *memory_state_register_frequency_source(void)
247 {
248         struct memory_state_update_block *block;
249
250         if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) {
251                 pr_debug("Allocating frequency source\n");
252                 block = kmalloc(sizeof(struct memory_state_update_block),
253                                         GFP_KERNEL);
254                 if (!block)
255                         return NULL;
256                 block->update_call = memory_state_freq_update;
257                 return block;
258         }
259         pr_err("Config option disabled.\n");
260         return NULL;
261 }
262 EXPORT_SYMBOL_GPL(memory_state_register_frequency_source);
263
264 struct memory_state_update_block *memory_state_register_bandwidth_source(void)
265 {
266         struct memory_state_update_block *block;
267
268         if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) {
269                 pr_debug("Allocating bandwidth source %d\n",
270                                 registered_bw_sources);
271                 block = kmalloc(sizeof(struct memory_state_update_block),
272                                         GFP_KERNEL);
273                 if (!block)
274                         return NULL;
275                 block->update_call = memory_state_bw_update;
276                 if (registered_bw_sources < num_sources) {
277                         block->id = registered_bw_sources++;
278                 } else {
279                         pr_err("Unable to allocate source; max number reached\n");
280                         kfree(block);
281                         return NULL;
282                 }
283                 return block;
284         }
285         pr_err("Config option disabled.\n");
286         return NULL;
287 }
288 EXPORT_SYMBOL_GPL(memory_state_register_bandwidth_source);
289
290 /* Buckets are designated by their maximum.
291  * Returns the buckets decided by the capability of the device.
292  */
293 static int get_bw_buckets(struct device *dev)
294 {
295         int ret, lenb;
296         struct device_node *node = dev->of_node;
297
298         of_property_read_u32(node, NUM_SOURCES, &num_sources);
299         if (of_find_property(node, BW_TBL, &lenb)) {
300                 bandwidths = devm_kzalloc(dev,
301                                 sizeof(*bandwidths) * num_sources, GFP_KERNEL);
302                 if (!bandwidths)
303                         return -ENOMEM;
304                 lenb /= sizeof(*bw_buckets);
305                 bw_buckets = devm_kzalloc(dev, lenb * sizeof(*bw_buckets),
306                                 GFP_KERNEL);
307                 if (!bw_buckets) {
308                         devm_kfree(dev, bandwidths);
309                         return -ENOMEM;
310                 }
311                 ret = of_property_read_u32_array(node, BW_TBL, bw_buckets,
312                                 lenb);
313                 if (ret < 0) {
314                         devm_kfree(dev, bandwidths);
315                         devm_kfree(dev, bw_buckets);
316                         pr_err("Unable to read bandwidth table from device tree.\n");
317                         return ret;
318                 }
319         }
320         curr_bw = 0;
321         num_buckets = lenb;
322         return 0;
323 }
324
325 /* Adds struct freq_entry nodes to the hashtable for each compatible frequency.
326  * Returns the supported number of frequencies.
327  */
328 static int freq_buckets_init(struct device *dev)
329 {
330         struct freq_entry *freq_entry;
331         int i;
332         int ret, lenf;
333         struct device_node *node = dev->of_node;
334
335         if (of_find_property(node, FREQ_TBL, &lenf)) {
336                 lenf /= sizeof(*freq_buckets);
337                 freq_buckets = devm_kzalloc(dev, lenf * sizeof(*freq_buckets),
338                                 GFP_KERNEL);
339                 if (!freq_buckets)
340                         return -ENOMEM;
341                 pr_debug("freqs found len %d\n", lenf);
342                 ret = of_property_read_u32_array(node, FREQ_TBL, freq_buckets,
343                                 lenf);
344                 if (ret < 0) {
345                         devm_kfree(dev, freq_buckets);
346                         pr_err("Unable to read frequency table from device tree.\n");
347                         return ret;
348                 }
349                 pr_debug("ret freq %d\n", ret);
350         }
351         num_freqs = lenf;
352         curr_freq = freq_buckets[LOWEST_FREQ];
353
354         for (i = 0; i < num_freqs; i++) {
355                 freq_entry = devm_kzalloc(dev, sizeof(struct freq_entry),
356                                 GFP_KERNEL);
357                 if (!freq_entry)
358                         return -ENOMEM;
359                 freq_entry->buckets = devm_kzalloc(dev, sizeof(u64)*num_buckets,
360                                 GFP_KERNEL);
361                 if (!freq_entry->buckets) {
362                         devm_kfree(dev, freq_entry);
363                         return -ENOMEM;
364                 }
365                 pr_debug("memory_state_time Adding freq to ht %d\n",
366                                 freq_buckets[i]);
367                 freq_entry->freq = freq_buckets[i];
368                 hash_add(freq_hash_table, &freq_entry->hash, freq_buckets[i]);
369         }
370         return 0;
371 }
372
373 struct kobject *memory_kobj;
374 EXPORT_SYMBOL_GPL(memory_kobj);
375
376 static struct attribute *memory_attrs[] = {
377         &show_stat_attr.attr,
378         NULL
379 };
380
381 static struct attribute_group memory_attr_group = {
382         .attrs = memory_attrs,
383 };
384
385 static int memory_state_time_probe(struct platform_device *pdev)
386 {
387         int error;
388
389         error = get_bw_buckets(&pdev->dev);
390         if (error)
391                 return error;
392         error = freq_buckets_init(&pdev->dev);
393         if (error)
394                 return error;
395         last_update = ktime_get_boot_ns();
396         init_success = true;
397
398         pr_debug("memory_state_time initialized with num_freqs %d\n",
399                         num_freqs);
400         return 0;
401 }
402
403 static const struct of_device_id match_table[] = {
404         { .compatible = "memory-state-time" },
405         {}
406 };
407
408 static struct platform_driver memory_state_time_driver = {
409         .probe = memory_state_time_probe,
410         .driver = {
411                 .name = "memory-state-time",
412                 .of_match_table = match_table,
413                 .owner = THIS_MODULE,
414         },
415 };
416
417 static int __init memory_state_time_init(void)
418 {
419         int error;
420
421         hash_init(freq_hash_table);
422         memory_wq = create_singlethread_workqueue("memory_wq");
423         if (!memory_wq) {
424                 pr_err("Unable to create workqueue.\n");
425                 return -EINVAL;
426         }
427         /*
428          * Create sys/kernel directory for memory_state_time.
429          */
430         memory_kobj = kobject_create_and_add(TAG, kernel_kobj);
431         if (!memory_kobj) {
432                 pr_err("Unable to allocate memory_kobj for sysfs directory.\n");
433                 error = -ENOMEM;
434                 goto wq;
435         }
436         error = sysfs_create_group(memory_kobj, &memory_attr_group);
437         if (error) {
438                 pr_err("Unable to create sysfs folder.\n");
439                 goto kobj;
440         }
441
442         error = platform_driver_register(&memory_state_time_driver);
443         if (error) {
444                 pr_err("Unable to register memory_state_time platform driver.\n");
445                 goto group;
446         }
447         return 0;
448
449 group:  sysfs_remove_group(memory_kobj, &memory_attr_group);
450 kobj:   kobject_put(memory_kobj);
451 wq:     destroy_workqueue(memory_wq);
452         return error;
453 }
454 module_init(memory_state_time_init);