MALI: rockchip: upgrade midgard DDK to r11p0-00rel0
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / midgard / mali_kbase_device.c
1 /*
2  *
3  * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * A copy of the licence is included with the program, and can also be obtained
11  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12  * Boston, MA  02110-1301, USA.
13  *
14  */
15
16
17
18
19
20 /*
21  * Base kernel device APIs
22  */
23
24 #include <linux/debugfs.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/seq_file.h>
27 #include <linux/kernel.h>
28 #include <linux/module.h>
29 #include <linux/of_platform.h>
30
31 #include <mali_kbase.h>
32 #include <mali_kbase_defs.h>
33 #include <mali_kbase_hw.h>
34 #include <mali_kbase_config_defaults.h>
35
36 #include <mali_kbase_profiling_gator_api.h>
37
38 /* NOTE: Magic - 0x45435254 (TRCE in ASCII).
39  * Supports tracing feature provided in the base module.
40  * Please keep it in sync with the value of base module.
41  */
42 #define TRACE_BUFFER_HEADER_SPECIAL 0x45435254
43
44 #if KBASE_TRACE_ENABLE
45 static const char *kbasep_trace_code_string[] = {
46         /* IMPORTANT: USE OF SPECIAL #INCLUDE OF NON-STANDARD HEADER FILE
47          * THIS MUST BE USED AT THE START OF THE ARRAY */
48 #define KBASE_TRACE_CODE_MAKE_CODE(X) # X
49 #include "mali_kbase_trace_defs.h"
50 #undef  KBASE_TRACE_CODE_MAKE_CODE
51 };
52 #endif
53
54 #define DEBUG_MESSAGE_SIZE 256
55
56 static int kbasep_trace_init(struct kbase_device *kbdev);
57 static void kbasep_trace_term(struct kbase_device *kbdev);
58 static void kbasep_trace_hook_wrapper(void *param);
59
60 struct kbase_device *kbase_device_alloc(void)
61 {
62         return kzalloc(sizeof(struct kbase_device), GFP_KERNEL);
63 }
64
65 static int kbase_device_as_init(struct kbase_device *kbdev, int i)
66 {
67         const char format[] = "mali_mmu%d";
68         char name[sizeof(format)];
69         const char poke_format[] = "mali_mmu%d_poker";
70         char poke_name[sizeof(poke_format)];
71
72         if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316))
73                 snprintf(poke_name, sizeof(poke_name), poke_format, i);
74
75         snprintf(name, sizeof(name), format, i);
76
77         kbdev->as[i].number = i;
78         kbdev->as[i].fault_addr = 0ULL;
79
80         kbdev->as[i].pf_wq = alloc_workqueue(name, 0, 1);
81         if (!kbdev->as[i].pf_wq)
82                 return -EINVAL;
83
84         mutex_init(&kbdev->as[i].transaction_mutex);
85         INIT_WORK(&kbdev->as[i].work_pagefault, page_fault_worker);
86         INIT_WORK(&kbdev->as[i].work_busfault, bus_fault_worker);
87
88         if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316)) {
89                 struct hrtimer *poke_timer = &kbdev->as[i].poke_timer;
90                 struct work_struct *poke_work = &kbdev->as[i].poke_work;
91
92                 kbdev->as[i].poke_wq = alloc_workqueue(poke_name, 0, 1);
93                 if (!kbdev->as[i].poke_wq) {
94                         destroy_workqueue(kbdev->as[i].pf_wq);
95                         return -EINVAL;
96                 }
97                 KBASE_DEBUG_ASSERT(!object_is_on_stack(poke_work));
98                 INIT_WORK(poke_work, kbasep_as_do_poke);
99
100                 hrtimer_init(poke_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
101
102                 poke_timer->function = kbasep_as_poke_timer_callback;
103
104                 kbdev->as[i].poke_refcount = 0;
105                 kbdev->as[i].poke_state = 0u;
106         }
107
108         return 0;
109 }
110
111 static void kbase_device_as_term(struct kbase_device *kbdev, int i)
112 {
113         destroy_workqueue(kbdev->as[i].pf_wq);
114         if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316))
115                 destroy_workqueue(kbdev->as[i].poke_wq);
116 }
117
118 static int kbase_device_all_as_init(struct kbase_device *kbdev)
119 {
120         int i, err;
121
122         for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
123                 err = kbase_device_as_init(kbdev, i);
124                 if (err)
125                         goto free_workqs;
126         }
127
128         return 0;
129
130 free_workqs:
131         for (; i > 0; i--)
132                 kbase_device_as_term(kbdev, i);
133
134         return err;
135 }
136
137 static void kbase_device_all_as_term(struct kbase_device *kbdev)
138 {
139         int i;
140
141         for (i = 0; i < kbdev->nr_hw_address_spaces; i++)
142                 kbase_device_as_term(kbdev, i);
143 }
144
145 int kbase_device_init(struct kbase_device * const kbdev)
146 {
147         int i, err;
148 #ifdef CONFIG_ARM64
149         struct device_node *np = NULL;
150 #endif /* CONFIG_ARM64 */
151
152         spin_lock_init(&kbdev->mmu_mask_change);
153 #ifdef CONFIG_ARM64
154         kbdev->cci_snoop_enabled = false;
155         np = kbdev->dev->of_node;
156         if (np != NULL) {
157                 if (of_property_read_u32(np, "snoop_enable_smc",
158                                         &kbdev->snoop_enable_smc))
159                         kbdev->snoop_enable_smc = 0;
160                 if (of_property_read_u32(np, "snoop_disable_smc",
161                                         &kbdev->snoop_disable_smc))
162                         kbdev->snoop_disable_smc = 0;
163                 /* Either both or none of the calls should be provided. */
164                 if (!((kbdev->snoop_disable_smc == 0
165                         && kbdev->snoop_enable_smc == 0)
166                         || (kbdev->snoop_disable_smc != 0
167                         && kbdev->snoop_enable_smc != 0))) {
168                         WARN_ON(1);
169                         err = -EINVAL;
170                         goto fail;
171                 }
172         }
173 #endif /* CONFIG_ARM64 */
174         /* Get the list of workarounds for issues on the current HW
175          * (identified by the GPU_ID register)
176          */
177         err = kbase_hw_set_issues_mask(kbdev);
178         if (err)
179                 goto fail;
180
181         /* Set the list of features available on the current HW
182          * (identified by the GPU_ID register)
183          */
184         kbase_hw_set_features_mask(kbdev);
185
186         kbase_gpuprops_set_features(kbdev);
187
188         /* On Linux 4.0+, dma coherency is determined from device tree */
189 #if defined(CONFIG_ARM64) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
190         set_dma_ops(kbdev->dev, &noncoherent_swiotlb_dma_ops);
191 #endif
192
193         /* Workaround a pre-3.13 Linux issue, where dma_mask is NULL when our
194          * device structure was created by device-tree
195          */
196         if (!kbdev->dev->dma_mask)
197                 kbdev->dev->dma_mask = &kbdev->dev->coherent_dma_mask;
198
199         err = dma_set_mask(kbdev->dev,
200                         DMA_BIT_MASK(kbdev->gpu_props.mmu.pa_bits));
201         if (err)
202                 goto dma_set_mask_failed;
203
204         err = dma_set_coherent_mask(kbdev->dev,
205                         DMA_BIT_MASK(kbdev->gpu_props.mmu.pa_bits));
206         if (err)
207                 goto dma_set_mask_failed;
208
209         kbdev->nr_hw_address_spaces = kbdev->gpu_props.num_address_spaces;
210
211         err = kbase_device_all_as_init(kbdev);
212         if (err)
213                 goto as_init_failed;
214
215         spin_lock_init(&kbdev->hwcnt.lock);
216
217         err = kbasep_trace_init(kbdev);
218         if (err)
219                 goto term_as;
220
221         mutex_init(&kbdev->cacheclean_lock);
222
223 #ifdef CONFIG_MALI_TRACE_TIMELINE
224         for (i = 0; i < BASE_JM_MAX_NR_SLOTS; ++i)
225                 kbdev->timeline.slot_atoms_submitted[i] = 0;
226
227         for (i = 0; i <= KBASEP_TIMELINE_PM_EVENT_LAST; ++i)
228                 atomic_set(&kbdev->timeline.pm_event_uid[i], 0);
229 #endif /* CONFIG_MALI_TRACE_TIMELINE */
230
231         /* fbdump profiling controls set to 0 - fbdump not enabled until changed by gator */
232         for (i = 0; i < FBDUMP_CONTROL_MAX; i++)
233                 kbdev->kbase_profiling_controls[i] = 0;
234
235         kbase_debug_assert_register_hook(&kbasep_trace_hook_wrapper, kbdev);
236
237         atomic_set(&kbdev->ctx_num, 0);
238
239         err = kbase_instr_backend_init(kbdev);
240         if (err)
241                 goto term_trace;
242
243         kbdev->pm.dvfs_period = DEFAULT_PM_DVFS_PERIOD;
244
245         kbdev->reset_timeout_ms = DEFAULT_RESET_TIMEOUT_MS;
246
247 #ifdef CONFIG_MALI_GPU_MMU_AARCH64
248         kbdev->mmu_mode = kbase_mmu_mode_get_aarch64();
249 #else
250         kbdev->mmu_mode = kbase_mmu_mode_get_lpae();
251 #endif /* CONFIG_MALI_GPU_MMU_AARCH64 */
252
253 #ifdef CONFIG_MALI_DEBUG
254         init_waitqueue_head(&kbdev->driver_inactive_wait);
255 #endif /* CONFIG_MALI_DEBUG */
256
257         return 0;
258 term_trace:
259         kbasep_trace_term(kbdev);
260 term_as:
261         kbase_device_all_as_term(kbdev);
262 as_init_failed:
263 dma_set_mask_failed:
264 fail:
265         return err;
266 }
267
268 void kbase_device_term(struct kbase_device *kbdev)
269 {
270         KBASE_DEBUG_ASSERT(kbdev);
271
272 #if KBASE_TRACE_ENABLE
273         kbase_debug_assert_register_hook(NULL, NULL);
274 #endif
275
276         kbase_instr_backend_term(kbdev);
277
278         kbasep_trace_term(kbdev);
279
280         kbase_device_all_as_term(kbdev);
281 }
282
283 void kbase_device_free(struct kbase_device *kbdev)
284 {
285         kfree(kbdev);
286 }
287
288 int kbase_device_trace_buffer_install(
289                 struct kbase_context *kctx, u32 *tb, size_t size)
290 {
291         unsigned long flags;
292
293         KBASE_DEBUG_ASSERT(kctx);
294         KBASE_DEBUG_ASSERT(tb);
295
296         /* Interface uses 16-bit value to track last accessed entry. Each entry
297          * is composed of two 32-bit words.
298          * This limits the size that can be handled without an overflow. */
299         if (0xFFFF * (2 * sizeof(u32)) < size)
300                 return -EINVAL;
301
302         /* set up the header */
303         /* magic number in the first 4 bytes */
304         tb[0] = TRACE_BUFFER_HEADER_SPECIAL;
305         /* Store (write offset = 0, wrap counter = 0, transaction active = no)
306          * write offset 0 means never written.
307          * Offsets 1 to (wrap_offset - 1) used to store values when trace started
308          */
309         tb[1] = 0;
310
311         /* install trace buffer */
312         spin_lock_irqsave(&kctx->jctx.tb_lock, flags);
313         kctx->jctx.tb_wrap_offset = size / 8;
314         kctx->jctx.tb = tb;
315         spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags);
316
317         return 0;
318 }
319
320 void kbase_device_trace_buffer_uninstall(struct kbase_context *kctx)
321 {
322         unsigned long flags;
323
324         KBASE_DEBUG_ASSERT(kctx);
325         spin_lock_irqsave(&kctx->jctx.tb_lock, flags);
326         kctx->jctx.tb = NULL;
327         kctx->jctx.tb_wrap_offset = 0;
328         spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags);
329 }
330
331 void kbase_device_trace_register_access(struct kbase_context *kctx, enum kbase_reg_access_type type, u16 reg_offset, u32 reg_value)
332 {
333         unsigned long flags;
334
335         spin_lock_irqsave(&kctx->jctx.tb_lock, flags);
336         if (kctx->jctx.tb) {
337                 u16 wrap_count;
338                 u16 write_offset;
339                 u32 *tb = kctx->jctx.tb;
340                 u32 header_word;
341
342                 header_word = tb[1];
343                 KBASE_DEBUG_ASSERT(0 == (header_word & 0x1));
344
345                 wrap_count = (header_word >> 1) & 0x7FFF;
346                 write_offset = (header_word >> 16) & 0xFFFF;
347
348                 /* mark as transaction in progress */
349                 tb[1] |= 0x1;
350                 mb();
351
352                 /* calculate new offset */
353                 write_offset++;
354                 if (write_offset == kctx->jctx.tb_wrap_offset) {
355                         /* wrap */
356                         write_offset = 1;
357                         wrap_count++;
358                         wrap_count &= 0x7FFF;   /* 15bit wrap counter */
359                 }
360
361                 /* store the trace entry at the selected offset */
362                 tb[write_offset * 2 + 0] = (reg_offset & ~0x3) | ((type == REG_WRITE) ? 0x1 : 0x0);
363                 tb[write_offset * 2 + 1] = reg_value;
364                 mb();
365
366                 /* new header word */
367                 header_word = (write_offset << 16) | (wrap_count << 1) | 0x0;   /* transaction complete */
368                 tb[1] = header_word;
369         }
370         spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags);
371 }
372
373 /*
374  * Device trace functions
375  */
376 #if KBASE_TRACE_ENABLE
377
378 static int kbasep_trace_init(struct kbase_device *kbdev)
379 {
380         struct kbase_trace *rbuf;
381
382         rbuf = kmalloc_array(KBASE_TRACE_SIZE, sizeof(*rbuf), GFP_KERNEL);
383
384         if (!rbuf)
385                 return -EINVAL;
386
387         kbdev->trace_rbuf = rbuf;
388         spin_lock_init(&kbdev->trace_lock);
389         return 0;
390 }
391
392 static void kbasep_trace_term(struct kbase_device *kbdev)
393 {
394         kfree(kbdev->trace_rbuf);
395 }
396
397 static void kbasep_trace_format_msg(struct kbase_trace *trace_msg, char *buffer, int len)
398 {
399         s32 written = 0;
400
401         /* Initial part of message */
402         written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d.%.6d,%d,%d,%s,%p,", (int)trace_msg->timestamp.tv_sec, (int)(trace_msg->timestamp.tv_nsec / 1000), trace_msg->thread_id, trace_msg->cpu, kbasep_trace_code_string[trace_msg->code], trace_msg->ctx), 0);
403
404         if (trace_msg->katom)
405                 written += MAX(snprintf(buffer + written, MAX(len - written, 0), "atom %d (ud: 0x%llx 0x%llx)", trace_msg->atom_number, trace_msg->atom_udata[0], trace_msg->atom_udata[1]), 0);
406
407         written += MAX(snprintf(buffer + written, MAX(len - written, 0), ",%.8llx,", trace_msg->gpu_addr), 0);
408
409         /* NOTE: Could add function callbacks to handle different message types */
410         /* Jobslot present */
411         if (trace_msg->flags & KBASE_TRACE_FLAG_JOBSLOT)
412                 written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d", trace_msg->jobslot), 0);
413
414         written += MAX(snprintf(buffer + written, MAX(len - written, 0), ","), 0);
415
416         /* Refcount present */
417         if (trace_msg->flags & KBASE_TRACE_FLAG_REFCOUNT)
418                 written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d", trace_msg->refcount), 0);
419
420         written += MAX(snprintf(buffer + written, MAX(len - written, 0), ","), 0);
421
422         /* Rest of message */
423         written += MAX(snprintf(buffer + written, MAX(len - written, 0), "0x%.8lx", trace_msg->info_val), 0);
424 }
425
426 static void kbasep_trace_dump_msg(struct kbase_device *kbdev, struct kbase_trace *trace_msg)
427 {
428         char buffer[DEBUG_MESSAGE_SIZE];
429
430         kbasep_trace_format_msg(trace_msg, buffer, DEBUG_MESSAGE_SIZE);
431         dev_dbg(kbdev->dev, "%s", buffer);
432 }
433
434 void kbasep_trace_add(struct kbase_device *kbdev, enum kbase_trace_code code, void *ctx, struct kbase_jd_atom *katom, u64 gpu_addr, u8 flags, int refcount, int jobslot, unsigned long info_val)
435 {
436         unsigned long irqflags;
437         struct kbase_trace *trace_msg;
438
439         spin_lock_irqsave(&kbdev->trace_lock, irqflags);
440
441         trace_msg = &kbdev->trace_rbuf[kbdev->trace_next_in];
442
443         /* Fill the message */
444         trace_msg->thread_id = task_pid_nr(current);
445         trace_msg->cpu = task_cpu(current);
446
447         getnstimeofday(&trace_msg->timestamp);
448
449         trace_msg->code = code;
450         trace_msg->ctx = ctx;
451
452         if (NULL == katom) {
453                 trace_msg->katom = false;
454         } else {
455                 trace_msg->katom = true;
456                 trace_msg->atom_number = kbase_jd_atom_id(katom->kctx, katom);
457                 trace_msg->atom_udata[0] = katom->udata.blob[0];
458                 trace_msg->atom_udata[1] = katom->udata.blob[1];
459         }
460
461         trace_msg->gpu_addr = gpu_addr;
462         trace_msg->jobslot = jobslot;
463         trace_msg->refcount = MIN((unsigned int)refcount, 0xFF);
464         trace_msg->info_val = info_val;
465         trace_msg->flags = flags;
466
467         /* Update the ringbuffer indices */
468         kbdev->trace_next_in = (kbdev->trace_next_in + 1) & KBASE_TRACE_MASK;
469         if (kbdev->trace_next_in == kbdev->trace_first_out)
470                 kbdev->trace_first_out = (kbdev->trace_first_out + 1) & KBASE_TRACE_MASK;
471
472         /* Done */
473
474         spin_unlock_irqrestore(&kbdev->trace_lock, irqflags);
475 }
476
477 void kbasep_trace_clear(struct kbase_device *kbdev)
478 {
479         unsigned long flags;
480
481         spin_lock_irqsave(&kbdev->trace_lock, flags);
482         kbdev->trace_first_out = kbdev->trace_next_in;
483         spin_unlock_irqrestore(&kbdev->trace_lock, flags);
484 }
485
486 void kbasep_trace_dump(struct kbase_device *kbdev)
487 {
488         unsigned long flags;
489         u32 start;
490         u32 end;
491
492         dev_dbg(kbdev->dev, "Dumping trace:\nsecs,nthread,cpu,code,ctx,katom,gpu_addr,jobslot,refcount,info_val");
493         spin_lock_irqsave(&kbdev->trace_lock, flags);
494         start = kbdev->trace_first_out;
495         end = kbdev->trace_next_in;
496
497         while (start != end) {
498                 struct kbase_trace *trace_msg = &kbdev->trace_rbuf[start];
499
500                 kbasep_trace_dump_msg(kbdev, trace_msg);
501
502                 start = (start + 1) & KBASE_TRACE_MASK;
503         }
504         dev_dbg(kbdev->dev, "TRACE_END");
505
506         spin_unlock_irqrestore(&kbdev->trace_lock, flags);
507
508         KBASE_TRACE_CLEAR(kbdev);
509 }
510
511 static void kbasep_trace_hook_wrapper(void *param)
512 {
513         struct kbase_device *kbdev = (struct kbase_device *)param;
514
515         kbasep_trace_dump(kbdev);
516 }
517
518 #ifdef CONFIG_DEBUG_FS
519 struct trace_seq_state {
520         struct kbase_trace trace_buf[KBASE_TRACE_SIZE];
521         u32 start;
522         u32 end;
523 };
524
525 static void *kbasep_trace_seq_start(struct seq_file *s, loff_t *pos)
526 {
527         struct trace_seq_state *state = s->private;
528         int i;
529
530         if (*pos > KBASE_TRACE_SIZE)
531                 return NULL;
532         i = state->start + *pos;
533         if ((state->end >= state->start && i >= state->end) ||
534                         i >= state->end + KBASE_TRACE_SIZE)
535                 return NULL;
536
537         i &= KBASE_TRACE_MASK;
538
539         return &state->trace_buf[i];
540 }
541
542 static void kbasep_trace_seq_stop(struct seq_file *s, void *data)
543 {
544 }
545
546 static void *kbasep_trace_seq_next(struct seq_file *s, void *data, loff_t *pos)
547 {
548         struct trace_seq_state *state = s->private;
549         int i;
550
551         (*pos)++;
552
553         i = (state->start + *pos) & KBASE_TRACE_MASK;
554         if (i == state->end)
555                 return NULL;
556
557         return &state->trace_buf[i];
558 }
559
560 static int kbasep_trace_seq_show(struct seq_file *s, void *data)
561 {
562         struct kbase_trace *trace_msg = data;
563         char buffer[DEBUG_MESSAGE_SIZE];
564
565         kbasep_trace_format_msg(trace_msg, buffer, DEBUG_MESSAGE_SIZE);
566         seq_printf(s, "%s\n", buffer);
567         return 0;
568 }
569
570 static const struct seq_operations kbasep_trace_seq_ops = {
571         .start = kbasep_trace_seq_start,
572         .next = kbasep_trace_seq_next,
573         .stop = kbasep_trace_seq_stop,
574         .show = kbasep_trace_seq_show,
575 };
576
577 static int kbasep_trace_debugfs_open(struct inode *inode, struct file *file)
578 {
579         struct kbase_device *kbdev = inode->i_private;
580         unsigned long flags;
581
582         struct trace_seq_state *state;
583
584         state = __seq_open_private(file, &kbasep_trace_seq_ops, sizeof(*state));
585         if (!state)
586                 return -ENOMEM;
587
588         spin_lock_irqsave(&kbdev->trace_lock, flags);
589         state->start = kbdev->trace_first_out;
590         state->end = kbdev->trace_next_in;
591         memcpy(state->trace_buf, kbdev->trace_rbuf, sizeof(state->trace_buf));
592         spin_unlock_irqrestore(&kbdev->trace_lock, flags);
593
594         return 0;
595 }
596
597 static const struct file_operations kbasep_trace_debugfs_fops = {
598         .open = kbasep_trace_debugfs_open,
599         .read = seq_read,
600         .llseek = seq_lseek,
601         .release = seq_release_private,
602 };
603
604 void kbasep_trace_debugfs_init(struct kbase_device *kbdev)
605 {
606         debugfs_create_file("mali_trace", S_IRUGO,
607                         kbdev->mali_debugfs_directory, kbdev,
608                         &kbasep_trace_debugfs_fops);
609 }
610
611 #else
612 void kbasep_trace_debugfs_init(struct kbase_device *kbdev)
613 {
614 }
615 #endif                          /* CONFIG_DEBUG_FS */
616
617 #else                           /* KBASE_TRACE_ENABLE  */
618 static int kbasep_trace_init(struct kbase_device *kbdev)
619 {
620         CSTD_UNUSED(kbdev);
621         return 0;
622 }
623
624 static void kbasep_trace_term(struct kbase_device *kbdev)
625 {
626         CSTD_UNUSED(kbdev);
627 }
628
629 static void kbasep_trace_hook_wrapper(void *param)
630 {
631         CSTD_UNUSED(param);
632 }
633
634 void kbasep_trace_dump(struct kbase_device *kbdev)
635 {
636         CSTD_UNUSED(kbdev);
637 }
638 #endif                          /* KBASE_TRACE_ENABLE  */
639
640 void kbase_set_profiling_control(struct kbase_device *kbdev, u32 control, u32 value)
641 {
642         switch (control) {
643         case FBDUMP_CONTROL_ENABLE:
644                 /* fall through */
645         case FBDUMP_CONTROL_RATE:
646                 /* fall through */
647         case SW_COUNTER_ENABLE:
648                 /* fall through */
649         case FBDUMP_CONTROL_RESIZE_FACTOR:
650                 kbdev->kbase_profiling_controls[control] = value;
651                 break;
652         default:
653                 dev_err(kbdev->dev, "Profiling control %d not found\n", control);
654                 break;
655         }
656 }
657
658 u32 kbase_get_profiling_control(struct kbase_device *kbdev, u32 control)
659 {
660         u32 ret_value = 0;
661
662         switch (control) {
663         case FBDUMP_CONTROL_ENABLE:
664                 /* fall through */
665         case FBDUMP_CONTROL_RATE:
666                 /* fall through */
667         case SW_COUNTER_ENABLE:
668                 /* fall through */
669         case FBDUMP_CONTROL_RESIZE_FACTOR:
670                 ret_value = kbdev->kbase_profiling_controls[control];
671                 break;
672         default:
673                 dev_err(kbdev->dev, "Profiling control %d not found\n", control);
674                 break;
675         }
676
677         return ret_value;
678 }
679
680 /*
681  * Called by gator to control the production of
682  * profiling information at runtime
683  * */
684
685 void _mali_profiling_control(u32 action, u32 value)
686 {
687         struct kbase_device *kbdev = NULL;
688
689         /* find the first i.e. call with -1 */
690         kbdev = kbase_find_device(-1);
691
692         if (NULL != kbdev)
693                 kbase_set_profiling_control(kbdev, action, value);
694 }
695 KBASE_EXPORT_SYMBOL(_mali_profiling_control);
696