3 * (C) COPYRIGHT 2013-2016 ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
19 * Debugfs interface to dump the memory visible to the GPU
22 #include "mali_kbase_debug_mem_view.h"
23 #include "mali_kbase.h"
25 #include <linux/list.h>
26 #include <linux/file.h>
28 #ifdef CONFIG_DEBUG_FS
30 struct debug_mem_mapping {
31 struct list_head node;
33 struct kbase_mem_phy_alloc *alloc;
40 struct debug_mem_data {
41 struct list_head mapping_list;
42 struct kbase_context *kctx;
45 struct debug_mem_seq_off {
50 static void *debug_mem_start(struct seq_file *m, loff_t *_pos)
52 struct debug_mem_data *mem_data = m->private;
53 struct debug_mem_seq_off *data;
54 struct debug_mem_mapping *map;
57 list_for_each_entry(map, &mem_data->mapping_list, node) {
58 if (pos >= map->nr_pages) {
61 data = kmalloc(sizeof(*data), GFP_KERNEL);
64 data->lh = &map->node;
74 static void debug_mem_stop(struct seq_file *m, void *v)
79 static void *debug_mem_next(struct seq_file *m, void *v, loff_t *pos)
81 struct debug_mem_data *mem_data = m->private;
82 struct debug_mem_seq_off *data = v;
83 struct debug_mem_mapping *map;
85 map = list_entry(data->lh, struct debug_mem_mapping, node);
87 if (data->offset < map->nr_pages - 1) {
93 if (list_is_last(data->lh, &mem_data->mapping_list))
96 data->lh = data->lh->next;
103 static int debug_mem_show(struct seq_file *m, void *v)
105 struct debug_mem_data *mem_data = m->private;
106 struct debug_mem_seq_off *data = v;
107 struct debug_mem_mapping *map;
111 pgprot_t prot = PAGE_KERNEL;
113 map = list_entry(data->lh, struct debug_mem_mapping, node);
115 kbase_gpu_vm_lock(mem_data->kctx);
117 if (data->offset >= map->alloc->nents) {
118 seq_printf(m, "%016llx: Unbacked page\n\n", (map->start_pfn +
119 data->offset) << PAGE_SHIFT);
123 if (!(map->flags & KBASE_REG_CPU_CACHED))
124 prot = pgprot_writecombine(prot);
126 page = pfn_to_page(PFN_DOWN(map->alloc->pages[data->offset]));
127 mapping = vmap(&page, 1, VM_MAP, prot);
131 for (i = 0; i < PAGE_SIZE; i += 4*sizeof(*mapping)) {
132 seq_printf(m, "%016llx:", i + ((map->start_pfn +
133 data->offset) << PAGE_SHIFT));
135 for (j = 0; j < 4*sizeof(*mapping); j += sizeof(*mapping))
136 seq_printf(m, " %08x", mapping[(i+j)/sizeof(*mapping)]);
145 kbase_gpu_vm_unlock(mem_data->kctx);
149 static const struct seq_operations ops = {
150 .start = debug_mem_start,
151 .next = debug_mem_next,
152 .stop = debug_mem_stop,
153 .show = debug_mem_show,
156 static int debug_mem_open(struct inode *i, struct file *file)
158 struct file *kctx_file = i->i_private;
159 struct kbase_context *kctx = kctx_file->private_data;
161 struct debug_mem_data *mem_data;
164 ret = seq_open(file, &ops);
168 mem_data = kmalloc(sizeof(*mem_data), GFP_KERNEL);
174 mem_data->kctx = kctx;
176 INIT_LIST_HEAD(&mem_data->mapping_list);
180 kbase_gpu_vm_lock(kctx);
182 for (p = rb_first(&kctx->reg_rbtree); p; p = rb_next(p)) {
183 struct kbase_va_region *reg;
184 struct debug_mem_mapping *mapping;
186 reg = rb_entry(p, struct kbase_va_region, rblink);
188 if (reg->gpu_alloc == NULL)
189 /* Empty region - ignore */
192 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
195 kbase_gpu_vm_unlock(kctx);
199 mapping->alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
200 mapping->start_pfn = reg->start_pfn;
201 mapping->nr_pages = reg->nr_pages;
202 mapping->flags = reg->flags;
203 list_add_tail(&mapping->node, &mem_data->mapping_list);
206 kbase_gpu_vm_unlock(kctx);
208 ((struct seq_file *)file->private_data)->private = mem_data;
214 while (!list_empty(&mem_data->mapping_list)) {
215 struct debug_mem_mapping *mapping;
217 mapping = list_first_entry(&mem_data->mapping_list,
218 struct debug_mem_mapping, node);
219 kbase_mem_phy_alloc_put(mapping->alloc);
220 list_del(&mapping->node);
226 seq_release(i, file);
230 static int debug_mem_release(struct inode *inode, struct file *file)
232 struct file *kctx_file = inode->i_private;
233 struct seq_file *sfile = file->private_data;
234 struct debug_mem_data *mem_data = sfile->private;
235 struct debug_mem_mapping *mapping;
237 seq_release(inode, file);
239 while (!list_empty(&mem_data->mapping_list)) {
240 mapping = list_first_entry(&mem_data->mapping_list,
241 struct debug_mem_mapping, node);
242 kbase_mem_phy_alloc_put(mapping->alloc);
243 list_del(&mapping->node);
254 static const struct file_operations kbase_debug_mem_view_fops = {
255 .open = debug_mem_open,
256 .release = debug_mem_release,
262 * kbase_debug_mem_view_init - Initialise the mem_view sysfs file
263 * @kctx_file: The /dev/mali0 file instance for the context
265 * This function creates a "mem_view" file which can be used to get a view of
266 * the context's memory as the GPU sees it (i.e. using the GPU's page tables).
268 * The file is cleaned up by a call to debugfs_remove_recursive() deleting the
271 void kbase_debug_mem_view_init(struct file *kctx_file)
273 struct kbase_context *kctx = kctx_file->private_data;
275 debugfs_create_file("mem_view", S_IRUGO, kctx->kctx_dentry, kctx_file,
276 &kbase_debug_mem_view_fops);