1 /* drivers/misc/memory_state_time.c
3 * Copyright (C) 2016 Google, Inc.
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
16 #include <linux/device.h>
17 #include <linux/err.h>
18 #include <linux/errno.h>
19 #include <linux/hashtable.h>
20 #include <linux/kconfig.h>
21 #include <linux/kernel.h>
22 #include <linux/kobject.h>
23 #include <linux/memory-state-time.h>
24 #include <linux/module.h>
25 #include <linux/mutex.h>
26 #include <linux/of_platform.h>
27 #include <linux/slab.h>
28 #include <linux/sysfs.h>
29 #include <linux/time.h>
30 #include <linux/timekeeping.h>
31 #include <linux/workqueue.h>
33 #define KERNEL_ATTR_RO(_name) \
34 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
36 #define KERNEL_ATTR_RW(_name) \
37 static struct kobj_attribute _name##_attr = \
38 __ATTR(_name, 0644, _name##_show, _name##_store)
40 #define FREQ_HASH_BITS 4
41 DECLARE_HASHTABLE(freq_hash_table, FREQ_HASH_BITS);
43 static DEFINE_MUTEX(mem_lock);
45 #define TAG "memory_state_time"
46 #define BW_NODE "/soc/memory-state-time"
47 #define FREQ_TBL "freq-tbl"
48 #define BW_TBL "bw-buckets"
49 #define NUM_SOURCES "num-sources"
55 static u32 *bw_buckets;
56 static u32 *freq_buckets;
58 static int num_buckets;
59 static int registered_bw_sources;
60 static u64 last_update;
61 static bool init_success;
62 static struct workqueue_struct *memory_wq;
63 static u32 num_sources = 10;
64 static int *bandwidths;
68 u64 *buckets; /* Bandwidth buckets. */
69 struct hlist_node hash;
72 struct queue_container {
73 struct work_struct update_state;
80 static int find_bucket(int bw)
84 if (bw_buckets != NULL) {
85 for (i = 0; i < num_buckets; i++) {
86 if (bw_buckets[i] > bw) {
87 pr_debug("Found bucket %d for bandwidth %d\n",
92 return num_buckets - 1;
97 static u64 get_time_diff(u64 time_now)
101 ms = time_now - last_update;
102 last_update = time_now;
106 static ssize_t show_stat_show(struct kobject *kobj,
107 struct kobj_attribute *attr, char *buf)
111 struct freq_entry *freq_entry;
113 for (i = 0; i < num_freqs; i++) {
114 hash_for_each_possible(freq_hash_table, freq_entry, hash,
116 if (freq_entry->freq == freq_buckets[i]) {
117 len += scnprintf(buf + len, PAGE_SIZE - len,
118 "%d ", freq_buckets[i]);
119 if (len >= PAGE_SIZE)
121 for (j = 0; j < num_buckets; j++) {
122 len += scnprintf(buf + len,
125 freq_entry->buckets[j]);
127 len += scnprintf(buf + len, PAGE_SIZE - len,
132 pr_debug("Current Time: %llu\n", ktime_get_boot_ns());
135 KERNEL_ATTR_RO(show_stat);
137 static void update_table(u64 time_now)
139 struct freq_entry *freq_entry;
141 pr_debug("Last known bw %d freq %d\n", curr_bw, curr_freq);
142 hash_for_each_possible(freq_hash_table, freq_entry, hash, curr_freq) {
143 if (curr_freq == freq_entry->freq) {
144 freq_entry->buckets[find_bucket(curr_bw)]
145 += get_time_diff(time_now);
151 static bool freq_exists(int freq)
155 for (i = 0; i < num_freqs; i++) {
156 if (freq == freq_buckets[i])
162 static int calculate_total_bw(int bw, int index)
167 pr_debug("memory_state_time New bw %d for id %d\n", bw, index);
168 bandwidths[index] = bw;
169 for (i = 0; i < registered_bw_sources; i++)
170 total_bw += bandwidths[i];
174 static void freq_update_do_work(struct work_struct *work)
176 struct queue_container *freq_state_update
177 = container_of(work, struct queue_container,
179 if (freq_state_update) {
180 mutex_lock(&mem_lock);
181 update_table(freq_state_update->time_now);
182 curr_freq = freq_state_update->value;
183 mutex_unlock(&mem_lock);
184 kfree(freq_state_update);
188 static void bw_update_do_work(struct work_struct *work)
190 struct queue_container *bw_state_update
191 = container_of(work, struct queue_container,
193 if (bw_state_update) {
194 mutex_lock(&mem_lock);
195 update_table(bw_state_update->time_now);
196 curr_bw = calculate_total_bw(bw_state_update->value,
197 bw_state_update->id);
198 mutex_unlock(&mem_lock);
199 kfree(bw_state_update);
203 static void memory_state_freq_update(struct memory_state_update_block *ub,
206 if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) {
207 if (freq_exists(value) && init_success) {
208 struct queue_container *freq_container
209 = kmalloc(sizeof(struct queue_container),
213 INIT_WORK(&freq_container->update_state,
214 freq_update_do_work);
215 freq_container->time_now = ktime_get_boot_ns();
216 freq_container->value = value;
217 pr_debug("Scheduling freq update in work queue\n");
218 queue_work(memory_wq, &freq_container->update_state);
220 pr_debug("Freq does not exist.\n");
225 static void memory_state_bw_update(struct memory_state_update_block *ub,
228 if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) {
230 struct queue_container *bw_container
231 = kmalloc(sizeof(struct queue_container),
235 INIT_WORK(&bw_container->update_state,
237 bw_container->time_now = ktime_get_boot_ns();
238 bw_container->value = value;
239 bw_container->id = ub->id;
240 pr_debug("Scheduling bandwidth update in work queue\n");
241 queue_work(memory_wq, &bw_container->update_state);
246 struct memory_state_update_block *memory_state_register_frequency_source(void)
248 struct memory_state_update_block *block;
250 if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) {
251 pr_debug("Allocating frequency source\n");
252 block = kmalloc(sizeof(struct memory_state_update_block),
256 block->update_call = memory_state_freq_update;
259 pr_err("Config option disabled.\n");
262 EXPORT_SYMBOL_GPL(memory_state_register_frequency_source);
264 struct memory_state_update_block *memory_state_register_bandwidth_source(void)
266 struct memory_state_update_block *block;
268 if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) {
269 pr_debug("Allocating bandwidth source %d\n",
270 registered_bw_sources);
271 block = kmalloc(sizeof(struct memory_state_update_block),
275 block->update_call = memory_state_bw_update;
276 if (registered_bw_sources < num_sources) {
277 block->id = registered_bw_sources++;
279 pr_err("Unable to allocate source; max number reached\n");
285 pr_err("Config option disabled.\n");
288 EXPORT_SYMBOL_GPL(memory_state_register_bandwidth_source);
290 /* Buckets are designated by their maximum.
291 * Returns the buckets decided by the capability of the device.
293 static int get_bw_buckets(struct device *dev)
296 struct device_node *node = dev->of_node;
298 of_property_read_u32(node, NUM_SOURCES, &num_sources);
299 if (of_find_property(node, BW_TBL, &lenb)) {
300 bandwidths = devm_kzalloc(dev,
301 sizeof(*bandwidths) * num_sources, GFP_KERNEL);
304 lenb /= sizeof(*bw_buckets);
305 bw_buckets = devm_kzalloc(dev, lenb * sizeof(*bw_buckets),
308 devm_kfree(dev, bandwidths);
311 ret = of_property_read_u32_array(node, BW_TBL, bw_buckets,
314 devm_kfree(dev, bandwidths);
315 devm_kfree(dev, bw_buckets);
316 pr_err("Unable to read bandwidth table from device tree.\n");
325 /* Adds struct freq_entry nodes to the hashtable for each compatible frequency.
326 * Returns the supported number of frequencies.
328 static int freq_buckets_init(struct device *dev)
330 struct freq_entry *freq_entry;
333 struct device_node *node = dev->of_node;
335 if (of_find_property(node, FREQ_TBL, &lenf)) {
336 lenf /= sizeof(*freq_buckets);
337 freq_buckets = devm_kzalloc(dev, lenf * sizeof(*freq_buckets),
341 pr_debug("freqs found len %d\n", lenf);
342 ret = of_property_read_u32_array(node, FREQ_TBL, freq_buckets,
345 devm_kfree(dev, freq_buckets);
346 pr_err("Unable to read frequency table from device tree.\n");
349 pr_debug("ret freq %d\n", ret);
352 curr_freq = freq_buckets[LOWEST_FREQ];
354 for (i = 0; i < num_freqs; i++) {
355 freq_entry = devm_kzalloc(dev, sizeof(struct freq_entry),
359 freq_entry->buckets = devm_kzalloc(dev, sizeof(u64)*num_buckets,
361 if (!freq_entry->buckets) {
362 devm_kfree(dev, freq_entry);
365 pr_debug("memory_state_time Adding freq to ht %d\n",
367 freq_entry->freq = freq_buckets[i];
368 hash_add(freq_hash_table, &freq_entry->hash, freq_buckets[i]);
373 struct kobject *memory_kobj;
374 EXPORT_SYMBOL_GPL(memory_kobj);
376 static struct attribute *memory_attrs[] = {
377 &show_stat_attr.attr,
381 static struct attribute_group memory_attr_group = {
382 .attrs = memory_attrs,
385 static int memory_state_time_probe(struct platform_device *pdev)
389 error = get_bw_buckets(&pdev->dev);
392 error = freq_buckets_init(&pdev->dev);
395 last_update = ktime_get_boot_ns();
398 pr_debug("memory_state_time initialized with num_freqs %d\n",
403 static const struct of_device_id match_table[] = {
404 { .compatible = "memory-state-time" },
408 static struct platform_driver memory_state_time_driver = {
409 .probe = memory_state_time_probe,
411 .name = "memory-state-time",
412 .of_match_table = match_table,
413 .owner = THIS_MODULE,
417 static int __init memory_state_time_init(void)
421 hash_init(freq_hash_table);
422 memory_wq = create_singlethread_workqueue("memory_wq");
424 pr_err("Unable to create workqueue.\n");
428 * Create sys/kernel directory for memory_state_time.
430 memory_kobj = kobject_create_and_add(TAG, kernel_kobj);
432 pr_err("Unable to allocate memory_kobj for sysfs directory.\n");
436 error = sysfs_create_group(memory_kobj, &memory_attr_group);
438 pr_err("Unable to create sysfs folder.\n");
442 error = platform_driver_register(&memory_state_time_driver);
444 pr_err("Unable to register memory_state_time platform driver.\n");
449 group: sysfs_remove_group(memory_kobj, &memory_attr_group);
450 kobj: kobject_put(memory_kobj);
451 wq: destroy_workqueue(memory_wq);
454 module_init(memory_state_time_init);