MALI: rockchip: upgrade midgard DDK to r14p0-01rel0
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / midgard / mali_kbase_device.c
1 /*
2  *
3  * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * A copy of the licence is included with the program, and can also be obtained
11  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12  * Boston, MA  02110-1301, USA.
13  *
14  */
15
16
17
18
19
20 /*
21  * Base kernel device APIs
22  */
23
24 #include <linux/debugfs.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/seq_file.h>
27 #include <linux/kernel.h>
28 #include <linux/module.h>
29 #include <linux/of_platform.h>
30
31 #include <mali_kbase.h>
32 #include <mali_kbase_defs.h>
33 #include <mali_kbase_hwaccess_instr.h>
34 #include <mali_kbase_hw.h>
35 #include <mali_kbase_config_defaults.h>
36
37 #include <mali_kbase_profiling_gator_api.h>
38
39 /* NOTE: Magic - 0x45435254 (TRCE in ASCII).
40  * Supports tracing feature provided in the base module.
41  * Please keep it in sync with the value of base module.
42  */
43 #define TRACE_BUFFER_HEADER_SPECIAL 0x45435254
44
45 #if KBASE_TRACE_ENABLE
46 static const char *kbasep_trace_code_string[] = {
47         /* IMPORTANT: USE OF SPECIAL #INCLUDE OF NON-STANDARD HEADER FILE
48          * THIS MUST BE USED AT THE START OF THE ARRAY */
49 #define KBASE_TRACE_CODE_MAKE_CODE(X) # X
50 #include "mali_kbase_trace_defs.h"
51 #undef  KBASE_TRACE_CODE_MAKE_CODE
52 };
53 #endif
54
55 #define DEBUG_MESSAGE_SIZE 256
56
57 static int kbasep_trace_init(struct kbase_device *kbdev);
58 static void kbasep_trace_term(struct kbase_device *kbdev);
59 static void kbasep_trace_hook_wrapper(void *param);
60
61 struct kbase_device *kbase_device_alloc(void)
62 {
63         return kzalloc(sizeof(struct kbase_device), GFP_KERNEL);
64 }
65
66 static int kbase_device_as_init(struct kbase_device *kbdev, int i)
67 {
68         const char format[] = "mali_mmu%d";
69         char name[sizeof(format)];
70         const char poke_format[] = "mali_mmu%d_poker";
71         char poke_name[sizeof(poke_format)];
72
73         if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316))
74                 snprintf(poke_name, sizeof(poke_name), poke_format, i);
75
76         snprintf(name, sizeof(name), format, i);
77
78         kbdev->as[i].number = i;
79         kbdev->as[i].fault_addr = 0ULL;
80
81         kbdev->as[i].pf_wq = alloc_workqueue(name, 0, 1);
82         if (!kbdev->as[i].pf_wq)
83                 return -EINVAL;
84
85         INIT_WORK(&kbdev->as[i].work_pagefault, page_fault_worker);
86         INIT_WORK(&kbdev->as[i].work_busfault, bus_fault_worker);
87
88         if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316)) {
89                 struct hrtimer *poke_timer = &kbdev->as[i].poke_timer;
90                 struct work_struct *poke_work = &kbdev->as[i].poke_work;
91
92                 kbdev->as[i].poke_wq = alloc_workqueue(poke_name, 0, 1);
93                 if (!kbdev->as[i].poke_wq) {
94                         destroy_workqueue(kbdev->as[i].pf_wq);
95                         return -EINVAL;
96                 }
97                 KBASE_DEBUG_ASSERT(!object_is_on_stack(poke_work));
98                 INIT_WORK(poke_work, kbasep_as_do_poke);
99
100                 hrtimer_init(poke_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
101
102                 poke_timer->function = kbasep_as_poke_timer_callback;
103
104                 kbdev->as[i].poke_refcount = 0;
105                 kbdev->as[i].poke_state = 0u;
106         }
107
108         return 0;
109 }
110
111 static void kbase_device_as_term(struct kbase_device *kbdev, int i)
112 {
113         destroy_workqueue(kbdev->as[i].pf_wq);
114         if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316))
115                 destroy_workqueue(kbdev->as[i].poke_wq);
116 }
117
118 static int kbase_device_all_as_init(struct kbase_device *kbdev)
119 {
120         int i, err;
121
122         for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
123                 err = kbase_device_as_init(kbdev, i);
124                 if (err)
125                         goto free_workqs;
126         }
127
128         return 0;
129
130 free_workqs:
131         for (; i > 0; i--)
132                 kbase_device_as_term(kbdev, i);
133
134         return err;
135 }
136
137 static void kbase_device_all_as_term(struct kbase_device *kbdev)
138 {
139         int i;
140
141         for (i = 0; i < kbdev->nr_hw_address_spaces; i++)
142                 kbase_device_as_term(kbdev, i);
143 }
144
145 int kbase_device_init(struct kbase_device * const kbdev)
146 {
147         int i, err;
148 #ifdef CONFIG_ARM64
149         struct device_node *np = NULL;
150 #endif /* CONFIG_ARM64 */
151
152         spin_lock_init(&kbdev->mmu_mask_change);
153         mutex_init(&kbdev->mmu_hw_mutex);
154 #ifdef CONFIG_ARM64
155         kbdev->cci_snoop_enabled = false;
156         np = kbdev->dev->of_node;
157         if (np != NULL) {
158                 if (of_property_read_u32(np, "snoop_enable_smc",
159                                         &kbdev->snoop_enable_smc))
160                         kbdev->snoop_enable_smc = 0;
161                 if (of_property_read_u32(np, "snoop_disable_smc",
162                                         &kbdev->snoop_disable_smc))
163                         kbdev->snoop_disable_smc = 0;
164                 /* Either both or none of the calls should be provided. */
165                 if (!((kbdev->snoop_disable_smc == 0
166                         && kbdev->snoop_enable_smc == 0)
167                         || (kbdev->snoop_disable_smc != 0
168                         && kbdev->snoop_enable_smc != 0))) {
169                         WARN_ON(1);
170                         err = -EINVAL;
171                         goto fail;
172                 }
173         }
174 #endif /* CONFIG_ARM64 */
175         /* Get the list of workarounds for issues on the current HW
176          * (identified by the GPU_ID register)
177          */
178         err = kbase_hw_set_issues_mask(kbdev);
179         if (err)
180                 goto fail;
181
182         /* Set the list of features available on the current HW
183          * (identified by the GPU_ID register)
184          */
185         kbase_hw_set_features_mask(kbdev);
186
187         kbase_gpuprops_set_features(kbdev);
188
189         /* On Linux 4.0+, dma coherency is determined from device tree */
190 #if defined(CONFIG_ARM64) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
191         set_dma_ops(kbdev->dev, &noncoherent_swiotlb_dma_ops);
192 #endif
193
194         /* Workaround a pre-3.13 Linux issue, where dma_mask is NULL when our
195          * device structure was created by device-tree
196          */
197         if (!kbdev->dev->dma_mask)
198                 kbdev->dev->dma_mask = &kbdev->dev->coherent_dma_mask;
199
200         err = dma_set_mask(kbdev->dev,
201                         DMA_BIT_MASK(kbdev->gpu_props.mmu.pa_bits));
202         if (err)
203                 goto dma_set_mask_failed;
204
205         err = dma_set_coherent_mask(kbdev->dev,
206                         DMA_BIT_MASK(kbdev->gpu_props.mmu.pa_bits));
207         if (err)
208                 goto dma_set_mask_failed;
209
210         kbdev->nr_hw_address_spaces = kbdev->gpu_props.num_address_spaces;
211
212         err = kbase_device_all_as_init(kbdev);
213         if (err)
214                 goto as_init_failed;
215
216         spin_lock_init(&kbdev->hwcnt.lock);
217
218         err = kbasep_trace_init(kbdev);
219         if (err)
220                 goto term_as;
221
222         mutex_init(&kbdev->cacheclean_lock);
223
224 #ifdef CONFIG_MALI_TRACE_TIMELINE
225         for (i = 0; i < BASE_JM_MAX_NR_SLOTS; ++i)
226                 kbdev->timeline.slot_atoms_submitted[i] = 0;
227
228         for (i = 0; i <= KBASEP_TIMELINE_PM_EVENT_LAST; ++i)
229                 atomic_set(&kbdev->timeline.pm_event_uid[i], 0);
230 #endif /* CONFIG_MALI_TRACE_TIMELINE */
231
232         /* fbdump profiling controls set to 0 - fbdump not enabled until changed by gator */
233         for (i = 0; i < FBDUMP_CONTROL_MAX; i++)
234                 kbdev->kbase_profiling_controls[i] = 0;
235
236         kbase_debug_assert_register_hook(&kbasep_trace_hook_wrapper, kbdev);
237
238         atomic_set(&kbdev->ctx_num, 0);
239
240         err = kbase_instr_backend_init(kbdev);
241         if (err)
242                 goto term_trace;
243
244         kbdev->pm.dvfs_period = DEFAULT_PM_DVFS_PERIOD;
245
246         kbdev->reset_timeout_ms = DEFAULT_RESET_TIMEOUT_MS;
247
248 #ifdef CONFIG_MALI_GPU_MMU_AARCH64
249         kbdev->mmu_mode = kbase_mmu_mode_get_aarch64();
250 #else
251         kbdev->mmu_mode = kbase_mmu_mode_get_lpae();
252 #endif /* CONFIG_MALI_GPU_MMU_AARCH64 */
253
254 #ifdef CONFIG_MALI_DEBUG
255         init_waitqueue_head(&kbdev->driver_inactive_wait);
256 #endif /* CONFIG_MALI_DEBUG */
257
258         return 0;
259 term_trace:
260         kbasep_trace_term(kbdev);
261 term_as:
262         kbase_device_all_as_term(kbdev);
263 as_init_failed:
264 dma_set_mask_failed:
265 fail:
266         return err;
267 }
268
269 void kbase_device_term(struct kbase_device *kbdev)
270 {
271         KBASE_DEBUG_ASSERT(kbdev);
272
273 #if KBASE_TRACE_ENABLE
274         kbase_debug_assert_register_hook(NULL, NULL);
275 #endif
276
277         kbase_instr_backend_term(kbdev);
278
279         kbasep_trace_term(kbdev);
280
281         kbase_device_all_as_term(kbdev);
282 }
283
284 void kbase_device_free(struct kbase_device *kbdev)
285 {
286         kfree(kbdev);
287 }
288
289 int kbase_device_trace_buffer_install(
290                 struct kbase_context *kctx, u32 *tb, size_t size)
291 {
292         unsigned long flags;
293
294         KBASE_DEBUG_ASSERT(kctx);
295         KBASE_DEBUG_ASSERT(tb);
296
297         /* Interface uses 16-bit value to track last accessed entry. Each entry
298          * is composed of two 32-bit words.
299          * This limits the size that can be handled without an overflow. */
300         if (0xFFFF * (2 * sizeof(u32)) < size)
301                 return -EINVAL;
302
303         /* set up the header */
304         /* magic number in the first 4 bytes */
305         tb[0] = TRACE_BUFFER_HEADER_SPECIAL;
306         /* Store (write offset = 0, wrap counter = 0, transaction active = no)
307          * write offset 0 means never written.
308          * Offsets 1 to (wrap_offset - 1) used to store values when trace started
309          */
310         tb[1] = 0;
311
312         /* install trace buffer */
313         spin_lock_irqsave(&kctx->jctx.tb_lock, flags);
314         kctx->jctx.tb_wrap_offset = size / 8;
315         kctx->jctx.tb = tb;
316         spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags);
317
318         return 0;
319 }
320
321 void kbase_device_trace_buffer_uninstall(struct kbase_context *kctx)
322 {
323         unsigned long flags;
324
325         KBASE_DEBUG_ASSERT(kctx);
326         spin_lock_irqsave(&kctx->jctx.tb_lock, flags);
327         kctx->jctx.tb = NULL;
328         kctx->jctx.tb_wrap_offset = 0;
329         spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags);
330 }
331
332 void kbase_device_trace_register_access(struct kbase_context *kctx, enum kbase_reg_access_type type, u16 reg_offset, u32 reg_value)
333 {
334         unsigned long flags;
335
336         spin_lock_irqsave(&kctx->jctx.tb_lock, flags);
337         if (kctx->jctx.tb) {
338                 u16 wrap_count;
339                 u16 write_offset;
340                 u32 *tb = kctx->jctx.tb;
341                 u32 header_word;
342
343                 header_word = tb[1];
344                 KBASE_DEBUG_ASSERT(0 == (header_word & 0x1));
345
346                 wrap_count = (header_word >> 1) & 0x7FFF;
347                 write_offset = (header_word >> 16) & 0xFFFF;
348
349                 /* mark as transaction in progress */
350                 tb[1] |= 0x1;
351                 mb();
352
353                 /* calculate new offset */
354                 write_offset++;
355                 if (write_offset == kctx->jctx.tb_wrap_offset) {
356                         /* wrap */
357                         write_offset = 1;
358                         wrap_count++;
359                         wrap_count &= 0x7FFF;   /* 15bit wrap counter */
360                 }
361
362                 /* store the trace entry at the selected offset */
363                 tb[write_offset * 2 + 0] = (reg_offset & ~0x3) | ((type == REG_WRITE) ? 0x1 : 0x0);
364                 tb[write_offset * 2 + 1] = reg_value;
365                 mb();
366
367                 /* new header word */
368                 header_word = (write_offset << 16) | (wrap_count << 1) | 0x0;   /* transaction complete */
369                 tb[1] = header_word;
370         }
371         spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags);
372 }
373
374 /*
375  * Device trace functions
376  */
377 #if KBASE_TRACE_ENABLE
378
379 static int kbasep_trace_init(struct kbase_device *kbdev)
380 {
381         struct kbase_trace *rbuf;
382
383         rbuf = kmalloc_array(KBASE_TRACE_SIZE, sizeof(*rbuf), GFP_KERNEL);
384
385         if (!rbuf)
386                 return -EINVAL;
387
388         kbdev->trace_rbuf = rbuf;
389         spin_lock_init(&kbdev->trace_lock);
390         return 0;
391 }
392
393 static void kbasep_trace_term(struct kbase_device *kbdev)
394 {
395         kfree(kbdev->trace_rbuf);
396 }
397
398 static void kbasep_trace_format_msg(struct kbase_trace *trace_msg, char *buffer, int len)
399 {
400         s32 written = 0;
401
402         /* Initial part of message */
403         written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d.%.6d,%d,%d,%s,%p,", (int)trace_msg->timestamp.tv_sec, (int)(trace_msg->timestamp.tv_nsec / 1000), trace_msg->thread_id, trace_msg->cpu, kbasep_trace_code_string[trace_msg->code], trace_msg->ctx), 0);
404
405         if (trace_msg->katom)
406                 written += MAX(snprintf(buffer + written, MAX(len - written, 0), "atom %d (ud: 0x%llx 0x%llx)", trace_msg->atom_number, trace_msg->atom_udata[0], trace_msg->atom_udata[1]), 0);
407
408         written += MAX(snprintf(buffer + written, MAX(len - written, 0), ",%.8llx,", trace_msg->gpu_addr), 0);
409
410         /* NOTE: Could add function callbacks to handle different message types */
411         /* Jobslot present */
412         if (trace_msg->flags & KBASE_TRACE_FLAG_JOBSLOT)
413                 written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d", trace_msg->jobslot), 0);
414
415         written += MAX(snprintf(buffer + written, MAX(len - written, 0), ","), 0);
416
417         /* Refcount present */
418         if (trace_msg->flags & KBASE_TRACE_FLAG_REFCOUNT)
419                 written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d", trace_msg->refcount), 0);
420
421         written += MAX(snprintf(buffer + written, MAX(len - written, 0), ","), 0);
422
423         /* Rest of message */
424         written += MAX(snprintf(buffer + written, MAX(len - written, 0), "0x%.8lx", trace_msg->info_val), 0);
425 }
426
427 static void kbasep_trace_dump_msg(struct kbase_device *kbdev, struct kbase_trace *trace_msg)
428 {
429         char buffer[DEBUG_MESSAGE_SIZE];
430
431         kbasep_trace_format_msg(trace_msg, buffer, DEBUG_MESSAGE_SIZE);
432         dev_dbg(kbdev->dev, "%s", buffer);
433 }
434
435 void kbasep_trace_add(struct kbase_device *kbdev, enum kbase_trace_code code, void *ctx, struct kbase_jd_atom *katom, u64 gpu_addr, u8 flags, int refcount, int jobslot, unsigned long info_val)
436 {
437         unsigned long irqflags;
438         struct kbase_trace *trace_msg;
439
440         spin_lock_irqsave(&kbdev->trace_lock, irqflags);
441
442         trace_msg = &kbdev->trace_rbuf[kbdev->trace_next_in];
443
444         /* Fill the message */
445         trace_msg->thread_id = task_pid_nr(current);
446         trace_msg->cpu = task_cpu(current);
447
448         getnstimeofday(&trace_msg->timestamp);
449
450         trace_msg->code = code;
451         trace_msg->ctx = ctx;
452
453         if (NULL == katom) {
454                 trace_msg->katom = false;
455         } else {
456                 trace_msg->katom = true;
457                 trace_msg->atom_number = kbase_jd_atom_id(katom->kctx, katom);
458                 trace_msg->atom_udata[0] = katom->udata.blob[0];
459                 trace_msg->atom_udata[1] = katom->udata.blob[1];
460         }
461
462         trace_msg->gpu_addr = gpu_addr;
463         trace_msg->jobslot = jobslot;
464         trace_msg->refcount = MIN((unsigned int)refcount, 0xFF);
465         trace_msg->info_val = info_val;
466         trace_msg->flags = flags;
467
468         /* Update the ringbuffer indices */
469         kbdev->trace_next_in = (kbdev->trace_next_in + 1) & KBASE_TRACE_MASK;
470         if (kbdev->trace_next_in == kbdev->trace_first_out)
471                 kbdev->trace_first_out = (kbdev->trace_first_out + 1) & KBASE_TRACE_MASK;
472
473         /* Done */
474
475         spin_unlock_irqrestore(&kbdev->trace_lock, irqflags);
476 }
477
478 void kbasep_trace_clear(struct kbase_device *kbdev)
479 {
480         unsigned long flags;
481
482         spin_lock_irqsave(&kbdev->trace_lock, flags);
483         kbdev->trace_first_out = kbdev->trace_next_in;
484         spin_unlock_irqrestore(&kbdev->trace_lock, flags);
485 }
486
487 void kbasep_trace_dump(struct kbase_device *kbdev)
488 {
489         unsigned long flags;
490         u32 start;
491         u32 end;
492
493         dev_dbg(kbdev->dev, "Dumping trace:\nsecs,nthread,cpu,code,ctx,katom,gpu_addr,jobslot,refcount,info_val");
494         spin_lock_irqsave(&kbdev->trace_lock, flags);
495         start = kbdev->trace_first_out;
496         end = kbdev->trace_next_in;
497
498         while (start != end) {
499                 struct kbase_trace *trace_msg = &kbdev->trace_rbuf[start];
500
501                 kbasep_trace_dump_msg(kbdev, trace_msg);
502
503                 start = (start + 1) & KBASE_TRACE_MASK;
504         }
505         dev_dbg(kbdev->dev, "TRACE_END");
506
507         spin_unlock_irqrestore(&kbdev->trace_lock, flags);
508
509         KBASE_TRACE_CLEAR(kbdev);
510 }
511
512 static void kbasep_trace_hook_wrapper(void *param)
513 {
514         struct kbase_device *kbdev = (struct kbase_device *)param;
515
516         kbasep_trace_dump(kbdev);
517 }
518
519 #ifdef CONFIG_DEBUG_FS
520 struct trace_seq_state {
521         struct kbase_trace trace_buf[KBASE_TRACE_SIZE];
522         u32 start;
523         u32 end;
524 };
525
526 static void *kbasep_trace_seq_start(struct seq_file *s, loff_t *pos)
527 {
528         struct trace_seq_state *state = s->private;
529         int i;
530
531         if (*pos > KBASE_TRACE_SIZE)
532                 return NULL;
533         i = state->start + *pos;
534         if ((state->end >= state->start && i >= state->end) ||
535                         i >= state->end + KBASE_TRACE_SIZE)
536                 return NULL;
537
538         i &= KBASE_TRACE_MASK;
539
540         return &state->trace_buf[i];
541 }
542
543 static void kbasep_trace_seq_stop(struct seq_file *s, void *data)
544 {
545 }
546
547 static void *kbasep_trace_seq_next(struct seq_file *s, void *data, loff_t *pos)
548 {
549         struct trace_seq_state *state = s->private;
550         int i;
551
552         (*pos)++;
553
554         i = (state->start + *pos) & KBASE_TRACE_MASK;
555         if (i == state->end)
556                 return NULL;
557
558         return &state->trace_buf[i];
559 }
560
561 static int kbasep_trace_seq_show(struct seq_file *s, void *data)
562 {
563         struct kbase_trace *trace_msg = data;
564         char buffer[DEBUG_MESSAGE_SIZE];
565
566         kbasep_trace_format_msg(trace_msg, buffer, DEBUG_MESSAGE_SIZE);
567         seq_printf(s, "%s\n", buffer);
568         return 0;
569 }
570
571 static const struct seq_operations kbasep_trace_seq_ops = {
572         .start = kbasep_trace_seq_start,
573         .next = kbasep_trace_seq_next,
574         .stop = kbasep_trace_seq_stop,
575         .show = kbasep_trace_seq_show,
576 };
577
578 static int kbasep_trace_debugfs_open(struct inode *inode, struct file *file)
579 {
580         struct kbase_device *kbdev = inode->i_private;
581         unsigned long flags;
582
583         struct trace_seq_state *state;
584
585         state = __seq_open_private(file, &kbasep_trace_seq_ops, sizeof(*state));
586         if (!state)
587                 return -ENOMEM;
588
589         spin_lock_irqsave(&kbdev->trace_lock, flags);
590         state->start = kbdev->trace_first_out;
591         state->end = kbdev->trace_next_in;
592         memcpy(state->trace_buf, kbdev->trace_rbuf, sizeof(state->trace_buf));
593         spin_unlock_irqrestore(&kbdev->trace_lock, flags);
594
595         return 0;
596 }
597
598 static const struct file_operations kbasep_trace_debugfs_fops = {
599         .open = kbasep_trace_debugfs_open,
600         .read = seq_read,
601         .llseek = seq_lseek,
602         .release = seq_release_private,
603 };
604
605 void kbasep_trace_debugfs_init(struct kbase_device *kbdev)
606 {
607         debugfs_create_file("mali_trace", S_IRUGO,
608                         kbdev->mali_debugfs_directory, kbdev,
609                         &kbasep_trace_debugfs_fops);
610 }
611
612 #else
613 void kbasep_trace_debugfs_init(struct kbase_device *kbdev)
614 {
615 }
616 #endif                          /* CONFIG_DEBUG_FS */
617
618 #else                           /* KBASE_TRACE_ENABLE  */
619 static int kbasep_trace_init(struct kbase_device *kbdev)
620 {
621         CSTD_UNUSED(kbdev);
622         return 0;
623 }
624
625 static void kbasep_trace_term(struct kbase_device *kbdev)
626 {
627         CSTD_UNUSED(kbdev);
628 }
629
630 static void kbasep_trace_hook_wrapper(void *param)
631 {
632         CSTD_UNUSED(param);
633 }
634
635 void kbasep_trace_dump(struct kbase_device *kbdev)
636 {
637         CSTD_UNUSED(kbdev);
638 }
639 #endif                          /* KBASE_TRACE_ENABLE  */
640
641 void kbase_set_profiling_control(struct kbase_device *kbdev, u32 control, u32 value)
642 {
643         switch (control) {
644         case FBDUMP_CONTROL_ENABLE:
645                 /* fall through */
646         case FBDUMP_CONTROL_RATE:
647                 /* fall through */
648         case SW_COUNTER_ENABLE:
649                 /* fall through */
650         case FBDUMP_CONTROL_RESIZE_FACTOR:
651                 kbdev->kbase_profiling_controls[control] = value;
652                 break;
653         default:
654                 dev_err(kbdev->dev, "Profiling control %d not found\n", control);
655                 break;
656         }
657 }
658
659 u32 kbase_get_profiling_control(struct kbase_device *kbdev, u32 control)
660 {
661         u32 ret_value = 0;
662
663         switch (control) {
664         case FBDUMP_CONTROL_ENABLE:
665                 /* fall through */
666         case FBDUMP_CONTROL_RATE:
667                 /* fall through */
668         case SW_COUNTER_ENABLE:
669                 /* fall through */
670         case FBDUMP_CONTROL_RESIZE_FACTOR:
671                 ret_value = kbdev->kbase_profiling_controls[control];
672                 break;
673         default:
674                 dev_err(kbdev->dev, "Profiling control %d not found\n", control);
675                 break;
676         }
677
678         return ret_value;
679 }
680
681 /*
682  * Called by gator to control the production of
683  * profiling information at runtime
684  * */
685
686 void _mali_profiling_control(u32 action, u32 value)
687 {
688         struct kbase_device *kbdev = NULL;
689
690         /* find the first i.e. call with -1 */
691         kbdev = kbase_find_device(-1);
692
693         if (NULL != kbdev)
694                 kbase_set_profiling_control(kbdev, action, value);
695 }
696 KBASE_EXPORT_SYMBOL(_mali_profiling_control);
697