3 * (C) COPYRIGHT ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
21 * @file mali_kbase_device.c
22 * Base kernel device APIs
25 #include <linux/debugfs.h>
26 #include <linux/seq_file.h>
27 #include <linux/kernel.h>
28 #include <linux/module.h>
30 #include <kbase/src/common/mali_kbase.h>
31 #include <kbase/src/common/mali_kbase_defs.h>
32 #include <kbase/src/common/mali_kbase_hw.h>
34 #include <kbase/src/mali_kbase_profiling_gator_api.h>
36 /* NOTE: Magic - 0x45435254 (TRCE in ASCII).
37 * Supports tracing feature provided in the base module.
38 * Please keep it in sync with the value of base module.
40 #define TRACE_BUFFER_HEADER_SPECIAL 0x45435254
42 #if defined(CONFIG_MALI_PLATFORM_VEXPRESS) || defined(CONFIG_MALI_PLATFORM_VEXPRESS_VIRTEX7_40MHZ)
43 #ifdef CONFIG_MALI_PLATFORM_FAKE
44 extern kbase_attribute config_attributes_hw_issue_8408[];
45 #endif /* CONFIG_MALI_PLATFORM_FAKE */
46 #endif /* CONFIG_MALI_PLATFORM_VEXPRESS || CONFIG_MALI_PLATFORM_VEXPRESS_VIRTEX7_40MHZ */
48 #if KBASE_TRACE_ENABLE != 0
49 STATIC CONST char *kbasep_trace_code_string[] = {
50 /* IMPORTANT: USE OF SPECIAL #INCLUDE OF NON-STANDARD HEADER FILE
51 * THIS MUST BE USED AT THE START OF THE ARRAY */
52 #define KBASE_TRACE_CODE_MAKE_CODE(X) # X
53 #include "mali_kbase_trace_defs.h"
54 #undef KBASE_TRACE_CODE_MAKE_CODE
58 #define DEBUG_MESSAGE_SIZE 256
60 STATIC mali_error kbasep_trace_init(kbase_device *kbdev);
61 STATIC void kbasep_trace_term(kbase_device *kbdev);
62 STATIC void kbasep_trace_hook_wrapper(void *param);
63 #if KBASE_TRACE_ENABLE != 0
64 STATIC void kbasep_trace_debugfs_init(kbase_device *kbdev);
67 void kbasep_as_do_poke(struct work_struct *work);
68 enum hrtimer_restart kbasep_reset_timer_callback(struct hrtimer *data);
69 void kbasep_reset_timeout_worker(struct work_struct *data);
71 kbase_device *kbase_device_alloc(void)
73 return kzalloc(sizeof(kbase_device), GFP_KERNEL);
76 mali_error kbase_device_init(kbase_device * const kbdev)
78 int i; /* i used after the for loop, don't reuse ! */
80 spin_lock_init(&kbdev->mmu_mask_change);
82 /* Initialize platform specific context */
83 if (MALI_FALSE == kbasep_platform_device_init(kbdev))
86 /* Ensure we can access the GPU registers */
87 kbase_pm_register_access_enable(kbdev);
89 /* Find out GPU properties based on the GPU feature registers */
90 kbase_gpuprops_set(kbdev);
92 /* Get the list of workarounds for issues on the current HW (identified by the GPU_ID register) */
93 if (MALI_ERROR_NONE != kbase_hw_set_issues_mask(kbdev)) {
94 kbase_pm_register_access_disable(kbdev);
98 /* Set the list of features available on the current HW (identified by the GPU_ID register) */
99 kbase_hw_set_features_mask(kbdev);
101 kbdev->nr_hw_address_spaces = kbdev->gpu_props.num_address_spaces;
103 /* We're done accessing the GPU registers for now. */
104 kbase_pm_register_access_disable(kbdev);
106 for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
107 const char format[] = "mali_mmu%d";
108 char name[sizeof(format)];
109 const char poke_format[] = "mali_mmu%d_poker"; /* BASE_HW_ISSUE_8316 */
110 char poke_name[sizeof(poke_format)]; /* BASE_HW_ISSUE_8316 */
112 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316)) {
113 if (0 > snprintf(poke_name, sizeof(poke_name), poke_format, i))
117 if (0 > snprintf(name, sizeof(name), format, i))
120 kbdev->as[i].number = i;
121 kbdev->as[i].fault_addr = 0ULL;
123 kbdev->as[i].pf_wq = alloc_workqueue(name, 0, 1);
124 if (NULL == kbdev->as[i].pf_wq)
127 mutex_init(&kbdev->as[i].transaction_mutex);
129 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316)) {
130 struct hrtimer *poking_timer = &kbdev->as[i].poke_timer;
132 kbdev->as[i].poke_wq = alloc_workqueue(poke_name, 0, 1);
133 if (NULL == kbdev->as[i].poke_wq) {
134 destroy_workqueue(kbdev->as[i].pf_wq);
137 KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&kbdev->as[i].poke_work));
138 INIT_WORK(&kbdev->as[i].poke_work, kbasep_as_do_poke);
140 hrtimer_init(poking_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
142 poking_timer->function = kbasep_as_poke_timer_callback;
144 kbdev->as[i].poke_refcount = 0;
145 kbdev->as[i].poke_state = 0u;
148 /* don't change i after this point */
150 spin_lock_init(&kbdev->hwcnt.lock);
152 kbdev->hwcnt.state = KBASE_INSTR_STATE_DISABLED;
153 init_waitqueue_head(&kbdev->reset_wait);
154 init_waitqueue_head(&kbdev->hwcnt.wait);
155 init_waitqueue_head(&kbdev->hwcnt.cache_clean_wait);
156 INIT_WORK(&kbdev->hwcnt.cache_clean_work, kbasep_cache_clean_worker);
157 kbdev->hwcnt.triggered = 0;
159 kbdev->hwcnt.cache_clean_wq = alloc_workqueue("Mali cache cleaning workqueue",
161 if (NULL == kbdev->hwcnt.cache_clean_wq)
164 kbdev->reset_workq = alloc_workqueue("Mali reset workqueue", 0, 1);
165 if (NULL == kbdev->reset_workq)
166 goto free_cache_clean_workq;
168 KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&kbdev->reset_work));
169 INIT_WORK(&kbdev->reset_work, kbasep_reset_timeout_worker);
171 hrtimer_init(&kbdev->reset_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
172 kbdev->reset_timer.function = kbasep_reset_timer_callback;
174 if (kbasep_trace_init(kbdev) != MALI_ERROR_NONE)
175 goto free_reset_workq;
177 mutex_init(&kbdev->cacheclean_lock);
178 atomic_set(&kbdev->keep_gpu_powered_count, 0);
180 #ifdef CONFIG_MALI_TRACE_TIMELINE
181 for (i = 0; i < BASE_JM_SUBMIT_SLOTS; ++i)
182 kbdev->timeline.slot_atoms_submitted[i] = 0;
184 for (i = 0; i <= KBASEP_TIMELINE_PM_EVENT_LAST; ++i)
185 atomic_set(&kbdev->timeline.pm_event_uid[i], 0);
186 #endif /* CONFIG_MALI_TRACE_TIMELINE */
188 /* fbdump profiling controls set to 0 - fbdump not enabled until changed by gator */
189 for (i = 0; i < FBDUMP_CONTROL_MAX; i++)
190 kbdev->kbase_profiling_controls[i] = 0;
192 kbase_debug_assert_register_hook(&kbasep_trace_hook_wrapper, kbdev);
194 #if defined(CONFIG_MALI_PLATFORM_VEXPRESS) || defined(CONFIG_MALI_PLATFORM_VEXPRESS_VIRTEX7_40MHZ)
195 #ifdef CONFIG_MALI_PLATFORM_FAKE
196 /* BASE_HW_ISSUE_8408 requires a configuration with different timeouts for
197 * the vexpress platform */
198 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408))
199 kbdev->config_attributes = config_attributes_hw_issue_8408;
200 #endif /* CONFIG_MALI_PLATFORM_FAKE */
201 #endif /* CONFIG_MALI_PLATFORM_VEXPRESS || CONFIG_MALI_PLATFORM_VEXPRESS_VIRTEX7_40MHZ */
203 return MALI_ERROR_NONE;
206 destroy_workqueue(kbdev->reset_workq);
207 free_cache_clean_workq:
208 destroy_workqueue(kbdev->hwcnt.cache_clean_wq);
212 destroy_workqueue(kbdev->as[i].pf_wq);
213 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316))
214 destroy_workqueue(kbdev->as[i].poke_wq);
217 kbasep_platform_device_term(kbdev);
219 return MALI_ERROR_FUNCTION_FAILED;
222 void kbase_device_term(kbase_device *kbdev)
226 KBASE_DEBUG_ASSERT(kbdev);
228 #if KBASE_TRACE_ENABLE != 0
229 kbase_debug_assert_register_hook(NULL, NULL);
232 kbasep_trace_term(kbdev);
234 destroy_workqueue(kbdev->reset_workq);
235 destroy_workqueue(kbdev->hwcnt.cache_clean_wq);
237 for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
238 destroy_workqueue(kbdev->as[i].pf_wq);
239 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316))
240 destroy_workqueue(kbdev->as[i].poke_wq);
243 kbasep_platform_device_term(kbdev);
246 void kbase_device_free(kbase_device *kbdev)
251 void kbase_device_trace_buffer_install(kbase_context *kctx, u32 *tb, size_t size)
254 KBASE_DEBUG_ASSERT(kctx);
255 KBASE_DEBUG_ASSERT(tb);
257 /* set up the header */
258 /* magic number in the first 4 bytes */
259 tb[0] = TRACE_BUFFER_HEADER_SPECIAL;
260 /* Store (write offset = 0, wrap counter = 0, transaction active = no)
261 * write offset 0 means never written.
262 * Offsets 1 to (wrap_offset - 1) used to store values when trace started
266 /* install trace buffer */
267 spin_lock_irqsave(&kctx->jctx.tb_lock, flags);
268 kctx->jctx.tb_wrap_offset = size / 8;
270 spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags);
273 void kbase_device_trace_buffer_uninstall(kbase_context *kctx)
276 KBASE_DEBUG_ASSERT(kctx);
277 spin_lock_irqsave(&kctx->jctx.tb_lock, flags);
278 kctx->jctx.tb = NULL;
279 kctx->jctx.tb_wrap_offset = 0;
280 spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags);
283 void kbase_device_trace_register_access(kbase_context *kctx, kbase_reg_access_type type, u16 reg_offset, u32 reg_value)
286 spin_lock_irqsave(&kctx->jctx.tb_lock, flags);
290 u32 *tb = kctx->jctx.tb;
294 KBASE_DEBUG_ASSERT(0 == (header_word & 0x1));
296 wrap_count = (header_word >> 1) & 0x7FFF;
297 write_offset = (header_word >> 16) & 0xFFFF;
299 /* mark as transaction in progress */
303 /* calculate new offset */
305 if (write_offset == kctx->jctx.tb_wrap_offset) {
309 wrap_count &= 0x7FFF; /* 15bit wrap counter */
312 /* store the trace entry at the selected offset */
313 tb[write_offset * 2 + 0] = (reg_offset & ~0x3) | ((type == REG_WRITE) ? 0x1 : 0x0);
314 tb[write_offset * 2 + 1] = reg_value;
317 /* new header word */
318 header_word = (write_offset << 16) | (wrap_count << 1) | 0x0; /* transaction complete */
321 spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags);
324 void kbase_reg_write(kbase_device *kbdev, u16 offset, u32 value, kbase_context *kctx)
326 KBASE_DEBUG_ASSERT(kbdev->pm.gpu_powered);
327 KBASE_DEBUG_ASSERT(kctx == NULL || kctx->as_nr != KBASEP_AS_NR_INVALID);
328 KBASE_DEBUG_PRINT_INFO(KBASE_CORE, "w: reg %04x val %08x", offset, value);
329 kbase_os_reg_write(kbdev, offset, value);
330 if (kctx && kctx->jctx.tb)
331 kbase_device_trace_register_access(kctx, REG_WRITE, offset, value);
334 KBASE_EXPORT_TEST_API(kbase_reg_write)
336 u32 kbase_reg_read(kbase_device *kbdev, u16 offset, kbase_context *kctx)
339 KBASE_DEBUG_ASSERT(kbdev->pm.gpu_powered);
340 KBASE_DEBUG_ASSERT(kctx == NULL || kctx->as_nr != KBASEP_AS_NR_INVALID);
341 val = kbase_os_reg_read(kbdev, offset);
342 KBASE_DEBUG_PRINT_INFO(KBASE_CORE, "r: reg %04x val %08x", offset, val);
343 if (kctx && kctx->jctx.tb)
344 kbase_device_trace_register_access(kctx, REG_READ, offset, val);
348 KBASE_EXPORT_TEST_API(kbase_reg_read)
350 void kbase_report_gpu_fault(kbase_device *kbdev, int multiple)
355 status = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_FAULTSTATUS), NULL);
356 address = (u64) kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_FAULTADDRESS_HI), NULL) << 32;
357 address |= kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_FAULTADDRESS_LO), NULL);
359 KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "GPU Fault 0x%08x (%s) at 0x%016llx", status, kbase_exception_name(status), address);
361 KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "There were multiple GPU faults - some have not been reported\n");
364 void kbase_gpu_interrupt(kbase_device *kbdev, u32 val)
366 KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ, NULL, NULL, 0u, val);
368 kbase_report_gpu_fault(kbdev, val & MULTIPLE_GPU_FAULTS);
370 if (val & RESET_COMPLETED)
371 kbase_pm_reset_done(kbdev);
373 if (val & PRFCNT_SAMPLE_COMPLETED)
374 kbase_instr_hwcnt_sample_done(kbdev);
376 if (val & CLEAN_CACHES_COMPLETED)
377 kbase_clean_caches_done(kbdev);
379 KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ_CLEAR, NULL, NULL, 0u, val);
380 kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), val, NULL);
382 /* kbase_pm_check_transitions must be called after the IRQ has been cleared. This is because it might trigger
383 * further power transitions and we don't want to miss the interrupt raised to notify us that these further
384 * transitions have finished.
386 if (val & POWER_CHANGED_ALL) {
387 mali_bool cores_are_available;
390 KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_START);
391 spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
392 cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
393 spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
394 KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_END);
396 if (cores_are_available) {
397 /* Fast-path Job Scheduling on PM IRQ */
399 /* Log timelining information that a change in state has completed */
400 kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
402 spin_lock_irqsave(&kbdev->js_data.runpool_irq.lock, flags);
403 /* A simplified check to ensure the last context hasn't exited
404 * after dropping the PM lock whilst doing a PM IRQ: any bits set
405 * in 'submit_allowed' indicate that we have a context in the
406 * runpool (which can't leave whilst we hold this lock). It is
407 * sometimes zero even when we have a context in the runpool, but
408 * that's no problem because we'll be unable to submit jobs
410 if (kbdev->js_data.runpool_irq.submit_allowed)
411 for (js = 0; js < kbdev->gpu_props.num_job_slots; ++js) {
412 mali_bool needs_retry;
413 s8 submitted_count = 0;
414 needs_retry = kbasep_js_try_run_next_job_on_slot_irq_nolock(kbdev, js, &submitted_count);
415 /* Don't need to retry outside of IRQ context - this can
416 * only happen if we submitted too many in one IRQ, such
417 * that they were completing faster than we could
418 * submit. In this case, a job IRQ will fire to cause more
419 * work to be submitted in some way */
420 CSTD_UNUSED(needs_retry);
422 spin_unlock_irqrestore(&kbdev->js_data.runpool_irq.lock, flags);
425 KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ_DONE, NULL, NULL, 0u, val);
429 * Device trace functions
431 #if KBASE_TRACE_ENABLE != 0
433 STATIC mali_error kbasep_trace_init(kbase_device *kbdev)
437 rbuf = kmalloc(sizeof(kbase_trace) * KBASE_TRACE_SIZE, GFP_KERNEL);
440 return MALI_ERROR_FUNCTION_FAILED;
442 kbdev->trace_rbuf = rbuf;
443 spin_lock_init(&kbdev->trace_lock);
444 kbasep_trace_debugfs_init(kbdev);
445 return MALI_ERROR_NONE;
448 STATIC void kbasep_trace_term(kbase_device *kbdev)
450 debugfs_remove(kbdev->trace_dentry);
451 kbdev->trace_dentry= NULL;
452 kfree(kbdev->trace_rbuf);
455 void kbasep_trace_format_msg(kbase_trace *trace_msg, char *buffer, int len)
459 /* Initial part of message */
460 written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d.%.6d,%d,%d,%s,%p,", (int)trace_msg->timestamp.tv_sec, (int)(trace_msg->timestamp.tv_nsec / 1000), trace_msg->thread_id, trace_msg->cpu, kbasep_trace_code_string[trace_msg->code], trace_msg->ctx), 0);
462 if (trace_msg->katom != MALI_FALSE) {
463 written += MAX(snprintf(buffer + written, MAX(len - written, 0), "atom %d (ud: 0x%llx 0x%llx)", trace_msg->atom_number, trace_msg->atom_udata[0], trace_msg->atom_udata[1]), 0);
466 written += MAX(snprintf(buffer + written, MAX(len - written, 0), ",%.8llx,", trace_msg->gpu_addr), 0);
468 /* NOTE: Could add function callbacks to handle different message types */
469 /* Jobslot present */
470 if ((trace_msg->flags & KBASE_TRACE_FLAG_JOBSLOT) != MALI_FALSE)
471 written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d", trace_msg->jobslot), 0);
473 written += MAX(snprintf(buffer + written, MAX(len - written, 0), ","), 0);
475 /* Refcount present */
476 if ((trace_msg->flags & KBASE_TRACE_FLAG_REFCOUNT) != MALI_FALSE)
477 written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d", trace_msg->refcount), 0);
479 written += MAX(snprintf(buffer + written, MAX(len - written, 0), ","), 0);
481 /* Rest of message */
482 written += MAX(snprintf(buffer + written, MAX(len - written, 0), "0x%.8lx", trace_msg->info_val), 0);
486 void kbasep_trace_dump_msg(kbase_trace *trace_msg)
488 char buffer[DEBUG_MESSAGE_SIZE];
490 kbasep_trace_format_msg(trace_msg, buffer, DEBUG_MESSAGE_SIZE);
491 KBASE_DEBUG_PRINT(KBASE_CORE, "%s", buffer);
494 void kbasep_trace_add(kbase_device *kbdev, kbase_trace_code code, void *ctx, kbase_jd_atom *katom, u64 gpu_addr, u8 flags, int refcount, int jobslot, unsigned long info_val)
496 unsigned long irqflags;
497 kbase_trace *trace_msg;
499 spin_lock_irqsave(&kbdev->trace_lock, irqflags);
501 trace_msg = &kbdev->trace_rbuf[kbdev->trace_next_in];
503 /* Fill the message */
504 trace_msg->thread_id = task_pid_nr(current);
505 trace_msg->cpu = task_cpu(current);
507 getnstimeofday(&trace_msg->timestamp);
509 trace_msg->code = code;
510 trace_msg->ctx = ctx;
513 trace_msg->katom = MALI_FALSE;
515 trace_msg->katom = MALI_TRUE;
516 trace_msg->atom_number = kbase_jd_atom_id(katom->kctx, katom);
517 trace_msg->atom_udata[0] = katom->udata.blob[0];
518 trace_msg->atom_udata[1] = katom->udata.blob[1];
521 trace_msg->gpu_addr = gpu_addr;
522 trace_msg->jobslot = jobslot;
523 trace_msg->refcount = MIN((unsigned int)refcount, 0xFF);
524 trace_msg->info_val = info_val;
525 trace_msg->flags = flags;
527 /* Update the ringbuffer indices */
528 kbdev->trace_next_in = (kbdev->trace_next_in + 1) & KBASE_TRACE_MASK;
529 if (kbdev->trace_next_in == kbdev->trace_first_out)
530 kbdev->trace_first_out = (kbdev->trace_first_out + 1) & KBASE_TRACE_MASK;
534 spin_unlock_irqrestore(&kbdev->trace_lock, irqflags);
537 void kbasep_trace_clear(kbase_device *kbdev)
540 spin_lock_irqsave(&kbdev->trace_lock, flags);
541 kbdev->trace_first_out = kbdev->trace_next_in;
542 spin_unlock_irqrestore(&kbdev->trace_lock, flags);
545 void kbasep_trace_dump(kbase_device *kbdev)
551 KBASE_DEBUG_PRINT(KBASE_CORE, "Dumping trace:\nsecs,nthread,cpu,code,ctx,katom,gpu_addr,jobslot,refcount,info_val");
552 spin_lock_irqsave(&kbdev->trace_lock, flags);
553 start = kbdev->trace_first_out;
554 end = kbdev->trace_next_in;
556 while (start != end) {
557 kbase_trace *trace_msg = &kbdev->trace_rbuf[start];
558 kbasep_trace_dump_msg(trace_msg);
560 start = (start + 1) & KBASE_TRACE_MASK;
562 KBASE_DEBUG_PRINT(KBASE_CORE, "TRACE_END");
564 spin_unlock_irqrestore(&kbdev->trace_lock, flags);
566 KBASE_TRACE_CLEAR(kbdev);
569 STATIC void kbasep_trace_hook_wrapper(void *param)
571 kbase_device *kbdev = (kbase_device *) param;
572 kbasep_trace_dump(kbdev);
575 #ifdef CONFIG_DEBUG_FS
576 struct trace_seq_state {
577 kbase_trace trace_buf[KBASE_TRACE_SIZE];
582 void *kbasep_trace_seq_start(struct seq_file *s, loff_t *pos)
584 struct trace_seq_state *state = s->private;
587 i = (state->start + *pos) & KBASE_TRACE_MASK;
588 if (i >= state-> end)
594 void kbasep_trace_seq_stop(struct seq_file *s, void *data)
598 void *kbasep_trace_seq_next(struct seq_file *s, void *data, loff_t *pos)
600 struct trace_seq_state *state = s->private;
605 i = (state->start + *pos) & KBASE_TRACE_MASK;
609 return &state->trace_buf[i];
612 int kbasep_trace_seq_show(struct seq_file *s, void *data)
614 kbase_trace *trace_msg = data;
615 char buffer[DEBUG_MESSAGE_SIZE];
617 kbasep_trace_format_msg(trace_msg, buffer, DEBUG_MESSAGE_SIZE);
618 seq_printf(s, "%s\n", buffer);
622 static const struct seq_operations kbasep_trace_seq_ops = {
623 .start = kbasep_trace_seq_start,
624 .next = kbasep_trace_seq_next,
625 .stop = kbasep_trace_seq_stop,
626 .show = kbasep_trace_seq_show,
629 static int kbasep_trace_debugfs_open(struct inode *inode, struct file *file)
631 kbase_device *kbdev = inode->i_private;
634 struct trace_seq_state *state;
636 state = __seq_open_private(file, &kbasep_trace_seq_ops, sizeof(*state));
640 spin_lock_irqsave(&kbdev->trace_lock, flags);
641 state->start = kbdev->trace_first_out;
642 state->end = kbdev->trace_next_in;
643 memcpy(state->trace_buf, kbdev->trace_rbuf, sizeof(state->trace_buf));
644 spin_unlock_irqrestore(&kbdev->trace_lock, flags);
649 static const struct file_operations kbasep_trace_debugfs_fops = {
650 .open = kbasep_trace_debugfs_open,
653 .release = seq_release_private,
656 STATIC void kbasep_trace_debugfs_init(kbase_device *kbdev)
658 kbdev->trace_dentry = debugfs_create_file("mali_trace", S_IRUGO, NULL, kbdev, &kbasep_trace_debugfs_fops);
661 STATIC void kbasep_trace_debugfs_init(kbase_device *kbdev)
665 #endif /* CONFIG_DEBUG_FS */
667 #else /* KBASE_TRACE_ENABLE != 0 */
668 STATIC mali_error kbasep_trace_init(kbase_device *kbdev)
671 return MALI_ERROR_NONE;
674 STATIC void kbasep_trace_term(kbase_device *kbdev)
679 STATIC void kbasep_trace_hook_wrapper(void *param)
684 void kbasep_trace_add(kbase_device *kbdev, kbase_trace_code code, void *ctx, kbase_jd_atom *katom, u64 gpu_addr, u8 flags, int refcount, int jobslot, unsigned long info_val)
690 CSTD_UNUSED(gpu_addr);
692 CSTD_UNUSED(refcount);
693 CSTD_UNUSED(jobslot);
694 CSTD_UNUSED(info_val);
697 void kbasep_trace_clear(kbase_device *kbdev)
702 void kbasep_trace_dump(kbase_device *kbdev)
706 #endif /* KBASE_TRACE_ENABLE != 0 */
708 void kbase_set_profiling_control(struct kbase_device *kbdev, u32 control, u32 value)
711 case FBDUMP_CONTROL_ENABLE:
713 case FBDUMP_CONTROL_RATE:
715 case SW_COUNTER_ENABLE:
717 case FBDUMP_CONTROL_RESIZE_FACTOR:
718 kbdev->kbase_profiling_controls[control] = value;
721 KBASE_DEBUG_PRINT_ERROR(KBASE_DEV, "Profiling control %d not found\n", control);
726 u32 kbase_get_profiling_control(struct kbase_device *kbdev, u32 control)
731 case FBDUMP_CONTROL_ENABLE:
733 case FBDUMP_CONTROL_RATE:
735 case SW_COUNTER_ENABLE:
737 case FBDUMP_CONTROL_RESIZE_FACTOR:
738 ret_value = kbdev->kbase_profiling_controls[control];
741 KBASE_DEBUG_PRINT_ERROR(KBASE_DEV, "Profiling control %d not found\n", control);
749 * Called by gator to control the production of
750 * profiling information at runtime
753 void _mali_profiling_control(u32 action, u32 value)
755 struct kbase_device *kbdev = NULL;
757 /* find the first i.e. call with -1 */
758 kbdev = kbase_find_device(-1);
761 kbase_set_profiling_control(kbdev, action, value);
765 KBASE_EXPORT_SYMBOL(_mali_profiling_control);