8b545827dbb9783406ff17ce52ca12d95bba62ce
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / t6xx / kbase / src / common / mali_kbase_device.c
1 /*
2  *
3  * (C) COPYRIGHT ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * A copy of the licence is included with the program, and can also be obtained
11  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12  * Boston, MA  02110-1301, USA.
13  *
14  */
15
16
17
18
19
20 /**
21  * @file mali_kbase_device.c
22  * Base kernel device APIs
23  */
24
25 #include <linux/debugfs.h>
26 #include <linux/seq_file.h>
27 #include <linux/kernel.h>
28 #include <linux/module.h>
29
30 #include <kbase/src/common/mali_kbase.h>
31 #include <kbase/src/common/mali_kbase_defs.h>
32 #include <kbase/src/common/mali_kbase_hw.h>
33
34 #include <kbase/src/mali_kbase_profiling_gator_api.h>
35
36 /* NOTE: Magic - 0x45435254 (TRCE in ASCII).
37  * Supports tracing feature provided in the base module.
38  * Please keep it in sync with the value of base module.
39  */
40 #define TRACE_BUFFER_HEADER_SPECIAL 0x45435254
41
42 #if defined(CONFIG_MALI_PLATFORM_VEXPRESS) || defined(CONFIG_MALI_PLATFORM_VEXPRESS_VIRTEX7_40MHZ)
43 #ifdef CONFIG_MALI_PLATFORM_FAKE
44 extern kbase_attribute config_attributes_hw_issue_8408[];
45 #endif                          /* CONFIG_MALI_PLATFORM_FAKE */
46 #endif                          /* CONFIG_MALI_PLATFORM_VEXPRESS || CONFIG_MALI_PLATFORM_VEXPRESS_VIRTEX7_40MHZ */
47
48 #if KBASE_TRACE_ENABLE != 0
49 STATIC CONST char *kbasep_trace_code_string[] = {
50         /* IMPORTANT: USE OF SPECIAL #INCLUDE OF NON-STANDARD HEADER FILE
51          * THIS MUST BE USED AT THE START OF THE ARRAY */
52 #define KBASE_TRACE_CODE_MAKE_CODE(X) # X
53 #include "mali_kbase_trace_defs.h"
54 #undef  KBASE_TRACE_CODE_MAKE_CODE
55 };
56 #endif
57
58 #define DEBUG_MESSAGE_SIZE 256
59
60 STATIC mali_error kbasep_trace_init(kbase_device *kbdev);
61 STATIC void kbasep_trace_term(kbase_device *kbdev);
62 STATIC void kbasep_trace_hook_wrapper(void *param);
63 #if KBASE_TRACE_ENABLE != 0
64 STATIC void kbasep_trace_debugfs_init(kbase_device *kbdev);
65 #endif
66
67 void kbasep_as_do_poke(struct work_struct *work);
68 enum hrtimer_restart kbasep_reset_timer_callback(struct hrtimer *data);
69 void kbasep_reset_timeout_worker(struct work_struct *data);
70
71 kbase_device *kbase_device_alloc(void)
72 {
73         return kzalloc(sizeof(kbase_device), GFP_KERNEL);
74 }
75
76 mali_error kbase_device_init(kbase_device * const kbdev)
77 {
78         int i;                  /* i used after the for loop, don't reuse ! */
79
80         spin_lock_init(&kbdev->mmu_mask_change);
81
82         /* Initialize platform specific context */
83         if (MALI_FALSE == kbasep_platform_device_init(kbdev))
84                 goto fail;
85
86         /* Ensure we can access the GPU registers */
87         kbase_pm_register_access_enable(kbdev);
88
89         /* Find out GPU properties based on the GPU feature registers */
90         kbase_gpuprops_set(kbdev);
91
92         /* Get the list of workarounds for issues on the current HW (identified by the GPU_ID register) */
93         if (MALI_ERROR_NONE != kbase_hw_set_issues_mask(kbdev)) {
94                 kbase_pm_register_access_disable(kbdev);
95                 goto free_platform;
96         }
97
98         /* Set the list of features available on the current HW (identified by the GPU_ID register) */
99         kbase_hw_set_features_mask(kbdev);
100
101         kbdev->nr_hw_address_spaces = kbdev->gpu_props.num_address_spaces;
102
103         /* We're done accessing the GPU registers for now. */
104         kbase_pm_register_access_disable(kbdev);
105
106         for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
107                 const char format[] = "mali_mmu%d";
108                 char name[sizeof(format)];
109                 const char poke_format[] = "mali_mmu%d_poker";  /* BASE_HW_ISSUE_8316 */
110                 char poke_name[sizeof(poke_format)];    /* BASE_HW_ISSUE_8316 */
111
112                 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316)) {
113                         if (0 > snprintf(poke_name, sizeof(poke_name), poke_format, i))
114                                 goto free_workqs;
115                 }
116
117                 if (0 > snprintf(name, sizeof(name), format, i))
118                         goto free_workqs;
119
120                 kbdev->as[i].number = i;
121                 kbdev->as[i].fault_addr = 0ULL;
122
123                 kbdev->as[i].pf_wq = alloc_workqueue(name, 0, 1);
124                 if (NULL == kbdev->as[i].pf_wq)
125                         goto free_workqs;
126
127                 mutex_init(&kbdev->as[i].transaction_mutex);
128
129                 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316)) {
130                         struct hrtimer *poking_timer = &kbdev->as[i].poke_timer;
131
132                         kbdev->as[i].poke_wq = alloc_workqueue(poke_name, 0, 1);
133                         if (NULL == kbdev->as[i].poke_wq) {
134                                 destroy_workqueue(kbdev->as[i].pf_wq);
135                                 goto free_workqs;
136                         }
137                         KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&kbdev->as[i].poke_work));
138                         INIT_WORK(&kbdev->as[i].poke_work, kbasep_as_do_poke);
139
140                         hrtimer_init(poking_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
141
142                         poking_timer->function = kbasep_as_poke_timer_callback;
143
144                         kbdev->as[i].poke_refcount = 0;
145                         kbdev->as[i].poke_state = 0u;
146                 }
147         }
148         /* don't change i after this point */
149
150         spin_lock_init(&kbdev->hwcnt.lock);
151
152         kbdev->hwcnt.state = KBASE_INSTR_STATE_DISABLED;
153         init_waitqueue_head(&kbdev->reset_wait);
154         init_waitqueue_head(&kbdev->hwcnt.wait);
155         init_waitqueue_head(&kbdev->hwcnt.cache_clean_wait);
156         INIT_WORK(&kbdev->hwcnt.cache_clean_work, kbasep_cache_clean_worker);
157         kbdev->hwcnt.triggered = 0;
158
159         kbdev->hwcnt.cache_clean_wq = alloc_workqueue("Mali cache cleaning workqueue",
160                                                       0, 1);
161         if (NULL == kbdev->hwcnt.cache_clean_wq)
162                 goto free_workqs;
163
164         kbdev->reset_workq = alloc_workqueue("Mali reset workqueue", 0, 1);
165         if (NULL == kbdev->reset_workq)
166                 goto free_cache_clean_workq;
167
168         KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&kbdev->reset_work));
169         INIT_WORK(&kbdev->reset_work, kbasep_reset_timeout_worker);
170
171         hrtimer_init(&kbdev->reset_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
172         kbdev->reset_timer.function = kbasep_reset_timer_callback;
173
174         if (kbasep_trace_init(kbdev) != MALI_ERROR_NONE)
175                 goto free_reset_workq;
176
177         mutex_init(&kbdev->cacheclean_lock);
178         atomic_set(&kbdev->keep_gpu_powered_count, 0);
179
180 #ifdef CONFIG_MALI_TRACE_TIMELINE
181         for (i = 0; i < BASE_JM_SUBMIT_SLOTS; ++i)
182                 kbdev->timeline.slot_atoms_submitted[i] = 0;
183
184         for (i = 0; i <= KBASEP_TIMELINE_PM_EVENT_LAST; ++i)
185                 atomic_set(&kbdev->timeline.pm_event_uid[i], 0);
186 #endif /* CONFIG_MALI_TRACE_TIMELINE */
187
188         /* fbdump profiling controls set to 0 - fbdump not enabled until changed by gator */
189         for (i = 0; i < FBDUMP_CONTROL_MAX; i++)
190                 kbdev->kbase_profiling_controls[i] = 0;
191
192                 kbase_debug_assert_register_hook(&kbasep_trace_hook_wrapper, kbdev);
193
194 #if defined(CONFIG_MALI_PLATFORM_VEXPRESS) || defined(CONFIG_MALI_PLATFORM_VEXPRESS_VIRTEX7_40MHZ)
195 #ifdef CONFIG_MALI_PLATFORM_FAKE
196         /* BASE_HW_ISSUE_8408 requires a configuration with different timeouts for
197          * the vexpress platform */
198         if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408))
199                 kbdev->config_attributes = config_attributes_hw_issue_8408;
200 #endif                          /* CONFIG_MALI_PLATFORM_FAKE */
201 #endif                          /* CONFIG_MALI_PLATFORM_VEXPRESS || CONFIG_MALI_PLATFORM_VEXPRESS_VIRTEX7_40MHZ */
202
203         return MALI_ERROR_NONE;
204
205  free_reset_workq:
206         destroy_workqueue(kbdev->reset_workq);
207  free_cache_clean_workq:
208         destroy_workqueue(kbdev->hwcnt.cache_clean_wq);
209  free_workqs:
210         while (i > 0) {
211                 i--;
212                 destroy_workqueue(kbdev->as[i].pf_wq);
213                 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316))
214                         destroy_workqueue(kbdev->as[i].poke_wq);
215         }
216  free_platform:
217         kbasep_platform_device_term(kbdev);
218  fail:
219         return MALI_ERROR_FUNCTION_FAILED;
220 }
221
222 void kbase_device_term(kbase_device *kbdev)
223 {
224         int i;
225
226         KBASE_DEBUG_ASSERT(kbdev);
227
228 #if KBASE_TRACE_ENABLE != 0
229         kbase_debug_assert_register_hook(NULL, NULL);
230 #endif
231
232         kbasep_trace_term(kbdev);
233
234         destroy_workqueue(kbdev->reset_workq);
235         destroy_workqueue(kbdev->hwcnt.cache_clean_wq);
236
237         for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
238                 destroy_workqueue(kbdev->as[i].pf_wq);
239                 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316))
240                         destroy_workqueue(kbdev->as[i].poke_wq);
241         }
242
243         kbasep_platform_device_term(kbdev);
244 }
245
246 void kbase_device_free(kbase_device *kbdev)
247 {
248         kfree(kbdev);
249 }
250
251 void kbase_device_trace_buffer_install(kbase_context *kctx, u32 *tb, size_t size)
252 {
253         unsigned long flags;
254         KBASE_DEBUG_ASSERT(kctx);
255         KBASE_DEBUG_ASSERT(tb);
256
257         /* set up the header */
258         /* magic number in the first 4 bytes */
259         tb[0] = TRACE_BUFFER_HEADER_SPECIAL;
260         /* Store (write offset = 0, wrap counter = 0, transaction active = no)
261          * write offset 0 means never written.
262          * Offsets 1 to (wrap_offset - 1) used to store values when trace started
263          */
264         tb[1] = 0;
265
266         /* install trace buffer */
267         spin_lock_irqsave(&kctx->jctx.tb_lock, flags);
268         kctx->jctx.tb_wrap_offset = size / 8;
269         kctx->jctx.tb = tb;
270         spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags);
271 }
272
273 void kbase_device_trace_buffer_uninstall(kbase_context *kctx)
274 {
275         unsigned long flags;
276         KBASE_DEBUG_ASSERT(kctx);
277         spin_lock_irqsave(&kctx->jctx.tb_lock, flags);
278         kctx->jctx.tb = NULL;
279         kctx->jctx.tb_wrap_offset = 0;
280         spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags);
281 }
282
283 void kbase_device_trace_register_access(kbase_context *kctx, kbase_reg_access_type type, u16 reg_offset, u32 reg_value)
284 {
285         unsigned long flags;
286         spin_lock_irqsave(&kctx->jctx.tb_lock, flags);
287         if (kctx->jctx.tb) {
288                 u16 wrap_count;
289                 u16 write_offset;
290                 u32 *tb = kctx->jctx.tb;
291                 u32 header_word;
292
293                 header_word = tb[1];
294                 KBASE_DEBUG_ASSERT(0 == (header_word & 0x1));
295
296                 wrap_count = (header_word >> 1) & 0x7FFF;
297                 write_offset = (header_word >> 16) & 0xFFFF;
298
299                 /* mark as transaction in progress */
300                 tb[1] |= 0x1;
301                 mb();
302
303                 /* calculate new offset */
304                 write_offset++;
305                 if (write_offset == kctx->jctx.tb_wrap_offset) {
306                         /* wrap */
307                         write_offset = 1;
308                         wrap_count++;
309                         wrap_count &= 0x7FFF;   /* 15bit wrap counter */
310                 }
311
312                 /* store the trace entry at the selected offset */
313                 tb[write_offset * 2 + 0] = (reg_offset & ~0x3) | ((type == REG_WRITE) ? 0x1 : 0x0);
314                 tb[write_offset * 2 + 1] = reg_value;
315                 mb();
316
317                 /* new header word */
318                 header_word = (write_offset << 16) | (wrap_count << 1) | 0x0;   /* transaction complete */
319                 tb[1] = header_word;
320         }
321         spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags);
322 }
323
324 void kbase_reg_write(kbase_device *kbdev, u16 offset, u32 value, kbase_context *kctx)
325 {
326         KBASE_DEBUG_ASSERT(kbdev->pm.gpu_powered);
327         KBASE_DEBUG_ASSERT(kctx == NULL || kctx->as_nr != KBASEP_AS_NR_INVALID);
328         KBASE_DEBUG_PRINT_INFO(KBASE_CORE, "w: reg %04x val %08x", offset, value);
329         kbase_os_reg_write(kbdev, offset, value);
330         if (kctx && kctx->jctx.tb)
331                 kbase_device_trace_register_access(kctx, REG_WRITE, offset, value);
332 }
333
334 KBASE_EXPORT_TEST_API(kbase_reg_write)
335
336 u32 kbase_reg_read(kbase_device *kbdev, u16 offset, kbase_context *kctx)
337 {
338         u32 val;
339         KBASE_DEBUG_ASSERT(kbdev->pm.gpu_powered);
340         KBASE_DEBUG_ASSERT(kctx == NULL || kctx->as_nr != KBASEP_AS_NR_INVALID);
341         val = kbase_os_reg_read(kbdev, offset);
342         KBASE_DEBUG_PRINT_INFO(KBASE_CORE, "r: reg %04x val %08x", offset, val);
343         if (kctx && kctx->jctx.tb)
344                 kbase_device_trace_register_access(kctx, REG_READ, offset, val);
345         return val;
346 }
347
348 KBASE_EXPORT_TEST_API(kbase_reg_read)
349
350 void kbase_report_gpu_fault(kbase_device *kbdev, int multiple)
351 {
352         u32 status;
353         u64 address;
354
355         status = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_FAULTSTATUS), NULL);
356         address = (u64) kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_FAULTADDRESS_HI), NULL) << 32;
357         address |= kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_FAULTADDRESS_LO), NULL);
358
359         KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "GPU Fault 0x%08x (%s) at 0x%016llx", status, kbase_exception_name(status), address);
360         if (multiple)
361                 KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "There were multiple GPU faults - some have not been reported\n");
362 }
363
364 void kbase_gpu_interrupt(kbase_device *kbdev, u32 val)
365 {
366         KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ, NULL, NULL, 0u, val);
367         if (val & GPU_FAULT)
368                 kbase_report_gpu_fault(kbdev, val & MULTIPLE_GPU_FAULTS);
369
370         if (val & RESET_COMPLETED)
371                 kbase_pm_reset_done(kbdev);
372
373         if (val & PRFCNT_SAMPLE_COMPLETED)
374                 kbase_instr_hwcnt_sample_done(kbdev);
375
376         if (val & CLEAN_CACHES_COMPLETED)
377                 kbase_clean_caches_done(kbdev);
378
379         KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ_CLEAR, NULL, NULL, 0u, val);
380         kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), val, NULL);
381
382         /* kbase_pm_check_transitions must be called after the IRQ has been cleared. This is because it might trigger
383          * further power transitions and we don't want to miss the interrupt raised to notify us that these further
384          * transitions have finished.
385          */
386         if (val & POWER_CHANGED_ALL) {
387                 mali_bool cores_are_available;
388                 unsigned long flags;
389
390                 KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_START);
391                 spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
392                 cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
393                 spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
394                 KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_END);
395
396                 if (cores_are_available) {
397                         /* Fast-path Job Scheduling on PM IRQ */
398                         int js;
399                         /* Log timelining information that a change in state has completed */
400                         kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
401
402                         spin_lock_irqsave(&kbdev->js_data.runpool_irq.lock, flags);
403                         /* A simplified check to ensure the last context hasn't exited
404                          * after dropping the PM lock whilst doing a PM IRQ: any bits set
405                          * in 'submit_allowed' indicate that we have a context in the
406                          * runpool (which can't leave whilst we hold this lock). It is
407                          * sometimes zero even when we have a context in the runpool, but
408                          * that's no problem because we'll be unable to submit jobs
409                          * anyway */
410                         if (kbdev->js_data.runpool_irq.submit_allowed)
411                                 for (js = 0; js < kbdev->gpu_props.num_job_slots; ++js) {
412                                         mali_bool needs_retry;
413                                         s8 submitted_count = 0;
414                                         needs_retry = kbasep_js_try_run_next_job_on_slot_irq_nolock(kbdev, js, &submitted_count);
415                                         /* Don't need to retry outside of IRQ context - this can
416                                          * only happen if we submitted too many in one IRQ, such
417                                          * that they were completing faster than we could
418                                          * submit. In this case, a job IRQ will fire to cause more
419                                          * work to be submitted in some way */
420                                         CSTD_UNUSED(needs_retry);
421                                 }
422                         spin_unlock_irqrestore(&kbdev->js_data.runpool_irq.lock, flags);
423                 }
424         }
425         KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ_DONE, NULL, NULL, 0u, val);
426 }
427
428 /*
429  * Device trace functions
430  */
431 #if KBASE_TRACE_ENABLE != 0
432
433 STATIC mali_error kbasep_trace_init(kbase_device *kbdev)
434 {
435         void *rbuf;
436
437         rbuf = kmalloc(sizeof(kbase_trace) * KBASE_TRACE_SIZE, GFP_KERNEL);
438
439         if (!rbuf)
440                 return MALI_ERROR_FUNCTION_FAILED;
441
442         kbdev->trace_rbuf = rbuf;
443         spin_lock_init(&kbdev->trace_lock);
444         kbasep_trace_debugfs_init(kbdev);
445         return MALI_ERROR_NONE;
446 }
447
448 STATIC void kbasep_trace_term(kbase_device *kbdev)
449 {
450         debugfs_remove(kbdev->trace_dentry);
451         kbdev->trace_dentry= NULL;
452         kfree(kbdev->trace_rbuf);
453 }
454
455 void kbasep_trace_format_msg(kbase_trace *trace_msg, char *buffer, int len)
456 {
457         s32 written = 0;
458
459         /* Initial part of message */
460         written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d.%.6d,%d,%d,%s,%p,", (int)trace_msg->timestamp.tv_sec, (int)(trace_msg->timestamp.tv_nsec / 1000), trace_msg->thread_id, trace_msg->cpu, kbasep_trace_code_string[trace_msg->code], trace_msg->ctx), 0);
461
462         if (trace_msg->katom != MALI_FALSE) {
463                 written += MAX(snprintf(buffer + written, MAX(len - written, 0), "atom %d (ud: 0x%llx 0x%llx)", trace_msg->atom_number, trace_msg->atom_udata[0], trace_msg->atom_udata[1]), 0);
464         }
465
466         written += MAX(snprintf(buffer + written, MAX(len - written, 0), ",%.8llx,", trace_msg->gpu_addr), 0);
467
468         /* NOTE: Could add function callbacks to handle different message types */
469         /* Jobslot present */
470         if ((trace_msg->flags & KBASE_TRACE_FLAG_JOBSLOT) != MALI_FALSE)
471                 written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d", trace_msg->jobslot), 0);
472
473         written += MAX(snprintf(buffer + written, MAX(len - written, 0), ","), 0);
474
475         /* Refcount present */
476         if ((trace_msg->flags & KBASE_TRACE_FLAG_REFCOUNT) != MALI_FALSE)
477                 written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d", trace_msg->refcount), 0);
478
479         written += MAX(snprintf(buffer + written, MAX(len - written, 0), ","), 0);
480
481         /* Rest of message */
482         written += MAX(snprintf(buffer + written, MAX(len - written, 0), "0x%.8lx", trace_msg->info_val), 0);
483
484 }
485
486 void kbasep_trace_dump_msg(kbase_trace *trace_msg)
487 {
488         char buffer[DEBUG_MESSAGE_SIZE];
489
490         kbasep_trace_format_msg(trace_msg, buffer, DEBUG_MESSAGE_SIZE);
491         KBASE_DEBUG_PRINT(KBASE_CORE, "%s", buffer);
492 }
493
494 void kbasep_trace_add(kbase_device *kbdev, kbase_trace_code code, void *ctx, kbase_jd_atom *katom, u64 gpu_addr, u8 flags, int refcount, int jobslot, unsigned long info_val)
495 {
496         unsigned long irqflags;
497         kbase_trace *trace_msg;
498
499         spin_lock_irqsave(&kbdev->trace_lock, irqflags);
500
501         trace_msg = &kbdev->trace_rbuf[kbdev->trace_next_in];
502
503         /* Fill the message */
504         trace_msg->thread_id = task_pid_nr(current);
505         trace_msg->cpu = task_cpu(current);
506
507         getnstimeofday(&trace_msg->timestamp);
508
509         trace_msg->code = code;
510         trace_msg->ctx = ctx;
511
512         if (NULL == katom) {
513                 trace_msg->katom = MALI_FALSE;
514         } else {
515                 trace_msg->katom = MALI_TRUE;
516                 trace_msg->atom_number = kbase_jd_atom_id(katom->kctx, katom);
517                 trace_msg->atom_udata[0] = katom->udata.blob[0];
518                 trace_msg->atom_udata[1] = katom->udata.blob[1];
519         }
520
521         trace_msg->gpu_addr = gpu_addr;
522         trace_msg->jobslot = jobslot;
523         trace_msg->refcount = MIN((unsigned int)refcount, 0xFF);
524         trace_msg->info_val = info_val;
525         trace_msg->flags = flags;
526
527         /* Update the ringbuffer indices */
528         kbdev->trace_next_in = (kbdev->trace_next_in + 1) & KBASE_TRACE_MASK;
529         if (kbdev->trace_next_in == kbdev->trace_first_out)
530                 kbdev->trace_first_out = (kbdev->trace_first_out + 1) & KBASE_TRACE_MASK;
531
532         /* Done */
533
534         spin_unlock_irqrestore(&kbdev->trace_lock, irqflags);
535 }
536
537 void kbasep_trace_clear(kbase_device *kbdev)
538 {
539         unsigned long flags;
540         spin_lock_irqsave(&kbdev->trace_lock, flags);
541         kbdev->trace_first_out = kbdev->trace_next_in;
542         spin_unlock_irqrestore(&kbdev->trace_lock, flags);
543 }
544
545 void kbasep_trace_dump(kbase_device *kbdev)
546 {
547         unsigned long flags;
548         u32 start;
549         u32 end;
550
551         KBASE_DEBUG_PRINT(KBASE_CORE, "Dumping trace:\nsecs,nthread,cpu,code,ctx,katom,gpu_addr,jobslot,refcount,info_val");
552         spin_lock_irqsave(&kbdev->trace_lock, flags);
553         start = kbdev->trace_first_out;
554         end = kbdev->trace_next_in;
555
556         while (start != end) {
557                 kbase_trace *trace_msg = &kbdev->trace_rbuf[start];
558                 kbasep_trace_dump_msg(trace_msg);
559
560                 start = (start + 1) & KBASE_TRACE_MASK;
561         }
562         KBASE_DEBUG_PRINT(KBASE_CORE, "TRACE_END");
563
564         spin_unlock_irqrestore(&kbdev->trace_lock, flags);
565
566         KBASE_TRACE_CLEAR(kbdev);
567 }
568
569 STATIC void kbasep_trace_hook_wrapper(void *param)
570 {
571         kbase_device *kbdev = (kbase_device *) param;
572         kbasep_trace_dump(kbdev);
573 }
574
575 #ifdef CONFIG_DEBUG_FS
576 struct trace_seq_state {
577         kbase_trace trace_buf[KBASE_TRACE_SIZE];
578         u32 start;
579         u32 end;
580 };
581
582 void *kbasep_trace_seq_start(struct seq_file *s, loff_t *pos)
583 {
584         struct trace_seq_state *state = s->private;
585         int i;
586
587         i = (state->start + *pos) & KBASE_TRACE_MASK;
588         if (i >= state-> end)
589                 return NULL;
590
591         return state;
592 }
593
594 void kbasep_trace_seq_stop(struct seq_file *s, void *data)
595 {
596 }
597
598 void *kbasep_trace_seq_next(struct seq_file *s, void *data, loff_t *pos)
599 {
600         struct trace_seq_state *state = s->private;
601         int i;
602
603         (*pos)++;
604
605         i = (state->start + *pos) & KBASE_TRACE_MASK;
606         if (i >= state->end)
607                 return NULL;
608
609         return &state->trace_buf[i];
610 }
611
612 int kbasep_trace_seq_show(struct seq_file *s, void *data)
613 {
614         kbase_trace *trace_msg = data;
615         char buffer[DEBUG_MESSAGE_SIZE];
616
617         kbasep_trace_format_msg(trace_msg, buffer, DEBUG_MESSAGE_SIZE);
618         seq_printf(s, "%s\n", buffer);
619         return 0;
620 }
621
622 static const struct seq_operations kbasep_trace_seq_ops = {
623         .start = kbasep_trace_seq_start,
624         .next = kbasep_trace_seq_next,
625         .stop = kbasep_trace_seq_stop,
626         .show = kbasep_trace_seq_show,
627 };
628
629 static int kbasep_trace_debugfs_open(struct inode *inode, struct file *file)
630 {
631         kbase_device *kbdev = inode->i_private;
632         unsigned long flags;
633
634         struct trace_seq_state *state;
635
636         state = __seq_open_private(file, &kbasep_trace_seq_ops, sizeof(*state));
637         if (!state)
638                 return -ENOMEM;
639
640         spin_lock_irqsave(&kbdev->trace_lock, flags);
641         state->start = kbdev->trace_first_out;
642         state->end = kbdev->trace_next_in;
643         memcpy(state->trace_buf, kbdev->trace_rbuf, sizeof(state->trace_buf));
644         spin_unlock_irqrestore(&kbdev->trace_lock, flags);
645
646         return 0;
647 }
648
649 static const struct file_operations kbasep_trace_debugfs_fops = {
650         .open = kbasep_trace_debugfs_open,
651         .read = seq_read,
652         .llseek = seq_lseek,
653         .release = seq_release_private,
654 };
655
656 STATIC void kbasep_trace_debugfs_init(kbase_device *kbdev)
657 {
658         kbdev->trace_dentry = debugfs_create_file("mali_trace", S_IRUGO, NULL, kbdev, &kbasep_trace_debugfs_fops);
659 }
660 #else
661 STATIC void kbasep_trace_debugfs_init(kbase_device *kbdev)
662 {
663
664 }
665 #endif                          /* CONFIG_DEBUG_FS */
666
667 #else                           /* KBASE_TRACE_ENABLE != 0 */
668 STATIC mali_error kbasep_trace_init(kbase_device *kbdev)
669 {
670         CSTD_UNUSED(kbdev);
671         return MALI_ERROR_NONE;
672 }
673
674 STATIC void kbasep_trace_term(kbase_device *kbdev)
675 {
676         CSTD_UNUSED(kbdev);
677 }
678
679 STATIC void kbasep_trace_hook_wrapper(void *param)
680 {
681         CSTD_UNUSED(param);
682 }
683
684 void kbasep_trace_add(kbase_device *kbdev, kbase_trace_code code, void *ctx, kbase_jd_atom *katom, u64 gpu_addr, u8 flags, int refcount, int jobslot, unsigned long info_val)
685 {
686         CSTD_UNUSED(kbdev);
687         CSTD_UNUSED(code);
688         CSTD_UNUSED(ctx);
689         CSTD_UNUSED(katom);
690         CSTD_UNUSED(gpu_addr);
691         CSTD_UNUSED(flags);
692         CSTD_UNUSED(refcount);
693         CSTD_UNUSED(jobslot);
694         CSTD_UNUSED(info_val);
695 }
696
697 void kbasep_trace_clear(kbase_device *kbdev)
698 {
699         CSTD_UNUSED(kbdev);
700 }
701
702 void kbasep_trace_dump(kbase_device *kbdev)
703 {
704         CSTD_UNUSED(kbdev);
705 }
706 #endif                          /* KBASE_TRACE_ENABLE != 0 */
707
708 void kbase_set_profiling_control(struct kbase_device *kbdev, u32 control, u32 value)
709 {
710         switch (control) {
711         case FBDUMP_CONTROL_ENABLE:
712                 /* fall through */
713         case FBDUMP_CONTROL_RATE:
714                 /* fall through */
715         case SW_COUNTER_ENABLE:
716                 /* fall through */
717         case FBDUMP_CONTROL_RESIZE_FACTOR:
718                 kbdev->kbase_profiling_controls[control] = value;
719                 break;
720         default:
721                 KBASE_DEBUG_PRINT_ERROR(KBASE_DEV, "Profiling control %d not found\n", control);
722                 break;
723         }
724 }
725
726 u32 kbase_get_profiling_control(struct kbase_device *kbdev, u32 control)
727 {
728         u32 ret_value = 0;
729
730         switch (control) {
731         case FBDUMP_CONTROL_ENABLE:
732                 /* fall through */
733         case FBDUMP_CONTROL_RATE:
734                 /* fall through */
735         case SW_COUNTER_ENABLE:
736                 /* fall through */
737         case FBDUMP_CONTROL_RESIZE_FACTOR:
738                 ret_value = kbdev->kbase_profiling_controls[control];
739                 break;
740         default:
741                 KBASE_DEBUG_PRINT_ERROR(KBASE_DEV, "Profiling control %d not found\n", control);
742                 break;
743         }
744
745         return ret_value;
746 }
747
748 /*
749  * Called by gator to control the production of
750  * profiling information at runtime
751  * */
752
753 void _mali_profiling_control(u32 action, u32 value)
754 {
755         struct kbase_device *kbdev = NULL;
756
757         /* find the first i.e. call with -1 */
758         kbdev = kbase_find_device(-1);
759
760         if (NULL != kbdev) {
761                 kbase_set_profiling_control(kbdev, action, value);
762         }
763 }
764
765 KBASE_EXPORT_SYMBOL(_mali_profiling_control);