3 * (C) COPYRIGHT ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
21 * @file mali_kbase_device.c
22 * Base kernel device APIs
25 #include <linux/debugfs.h>
26 #include <linux/seq_file.h>
27 #include <linux/kernel.h>
28 #include <linux/module.h>
30 #include <mali_kbase.h>
31 #include <mali_kbase_defs.h>
32 #include <mali_kbase_hw.h>
34 #include <mali_kbase_profiling_gator_api.h>
36 /* NOTE: Magic - 0x45435254 (TRCE in ASCII).
37 * Supports tracing feature provided in the base module.
38 * Please keep it in sync with the value of base module.
40 #define TRACE_BUFFER_HEADER_SPECIAL 0x45435254
42 #if defined(CONFIG_MALI_PLATFORM_VEXPRESS) || defined(CONFIG_MALI_PLATFORM_VEXPRESS_VIRTEX7_40MHZ)
43 #ifdef CONFIG_MALI_PLATFORM_FAKE
44 extern kbase_attribute config_attributes_hw_issue_8408[];
45 #endif /* CONFIG_MALI_PLATFORM_FAKE */
46 #endif /* CONFIG_MALI_PLATFORM_VEXPRESS || CONFIG_MALI_PLATFORM_VEXPRESS_VIRTEX7_40MHZ */
48 #if KBASE_TRACE_ENABLE != 0
49 STATIC CONST char *kbasep_trace_code_string[] = {
50 /* IMPORTANT: USE OF SPECIAL #INCLUDE OF NON-STANDARD HEADER FILE
51 * THIS MUST BE USED AT THE START OF THE ARRAY */
52 #define KBASE_TRACE_CODE_MAKE_CODE(X) # X
53 #include "mali_kbase_trace_defs.h"
54 #undef KBASE_TRACE_CODE_MAKE_CODE
58 #define DEBUG_MESSAGE_SIZE 256
60 STATIC mali_error kbasep_trace_init(kbase_device *kbdev);
61 STATIC void kbasep_trace_term(kbase_device *kbdev);
62 STATIC void kbasep_trace_hook_wrapper(void *param);
63 #if KBASE_TRACE_ENABLE != 0
64 STATIC void kbasep_trace_debugfs_init(kbase_device *kbdev);
67 void kbasep_as_do_poke(struct work_struct *work);
68 enum hrtimer_restart kbasep_reset_timer_callback(struct hrtimer *data);
69 void kbasep_reset_timeout_worker(struct work_struct *data);
71 kbase_device *kbase_device_alloc(void)
73 return kzalloc(sizeof(kbase_device), GFP_KERNEL);
76 mali_error kbase_device_init(kbase_device * const kbdev)
78 int i; /* i used after the for loop, don't reuse ! */
80 spin_lock_init(&kbdev->mmu_mask_change);
82 /* Initialize platform specific context */
83 if (MALI_FALSE == kbasep_platform_device_init(kbdev))
86 /* Ensure we can access the GPU registers */
87 kbase_pm_register_access_enable(kbdev);
89 /* Find out GPU properties based on the GPU feature registers */
90 kbase_gpuprops_set(kbdev);
92 /* Get the list of workarounds for issues on the current HW (identified by the GPU_ID register) */
93 if (MALI_ERROR_NONE != kbase_hw_set_issues_mask(kbdev)) {
94 kbase_pm_register_access_disable(kbdev);
98 /* Set the list of features available on the current HW (identified by the GPU_ID register) */
99 kbase_hw_set_features_mask(kbdev);
101 kbdev->nr_hw_address_spaces = kbdev->gpu_props.num_address_spaces;
103 /* We're done accessing the GPU registers for now. */
104 kbase_pm_register_access_disable(kbdev);
106 for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
107 const char format[] = "mali_mmu%d";
108 char name[sizeof(format)];
109 const char poke_format[] = "mali_mmu%d_poker"; /* BASE_HW_ISSUE_8316 */
110 char poke_name[sizeof(poke_format)]; /* BASE_HW_ISSUE_8316 */
112 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316)) {
113 if (0 > snprintf(poke_name, sizeof(poke_name), poke_format, i))
117 if (0 > snprintf(name, sizeof(name), format, i))
120 kbdev->as[i].number = i;
121 kbdev->as[i].fault_addr = 0ULL;
123 kbdev->as[i].pf_wq = alloc_workqueue(name, 0, 1);
124 if (NULL == kbdev->as[i].pf_wq)
127 mutex_init(&kbdev->as[i].transaction_mutex);
129 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316)) {
130 struct hrtimer *poking_timer = &kbdev->as[i].poke_timer;
132 kbdev->as[i].poke_wq = alloc_workqueue(poke_name, 0, 1);
133 if (NULL == kbdev->as[i].poke_wq) {
134 destroy_workqueue(kbdev->as[i].pf_wq);
137 KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&kbdev->as[i].poke_work));
138 INIT_WORK(&kbdev->as[i].poke_work, kbasep_as_do_poke);
140 hrtimer_init(poking_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
142 poking_timer->function = kbasep_as_poke_timer_callback;
144 kbdev->as[i].poke_refcount = 0;
145 kbdev->as[i].poke_state = 0u;
148 /* don't change i after this point */
150 spin_lock_init(&kbdev->hwcnt.lock);
152 kbdev->hwcnt.state = KBASE_INSTR_STATE_DISABLED;
153 init_waitqueue_head(&kbdev->reset_wait);
154 init_waitqueue_head(&kbdev->hwcnt.wait);
155 init_waitqueue_head(&kbdev->hwcnt.cache_clean_wait);
156 INIT_WORK(&kbdev->hwcnt.cache_clean_work, kbasep_cache_clean_worker);
157 kbdev->hwcnt.triggered = 0;
159 kbdev->hwcnt.cache_clean_wq = alloc_workqueue("Mali cache cleaning workqueue",
161 if (NULL == kbdev->hwcnt.cache_clean_wq)
164 kbdev->reset_workq = alloc_workqueue("Mali reset workqueue", 0, 1);
165 if (NULL == kbdev->reset_workq)
166 goto free_cache_clean_workq;
168 KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&kbdev->reset_work));
169 INIT_WORK(&kbdev->reset_work, kbasep_reset_timeout_worker);
171 hrtimer_init(&kbdev->reset_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
172 kbdev->reset_timer.function = kbasep_reset_timer_callback;
174 if (kbasep_trace_init(kbdev) != MALI_ERROR_NONE)
175 goto free_reset_workq;
177 mutex_init(&kbdev->cacheclean_lock);
178 atomic_set(&kbdev->keep_gpu_powered_count, 0);
180 #ifdef CONFIG_MALI_TRACE_TIMELINE
181 for (i = 0; i < BASE_JM_SUBMIT_SLOTS; ++i)
182 kbdev->timeline.slot_atoms_submitted[i] = 0;
184 for (i = 0; i <= KBASEP_TIMELINE_PM_EVENT_LAST; ++i)
185 atomic_set(&kbdev->timeline.pm_event_uid[i], 0);
186 #endif /* CONFIG_MALI_TRACE_TIMELINE */
188 /* fbdump profiling controls set to 0 - fbdump not enabled until changed by gator */
189 for (i = 0; i < FBDUMP_CONTROL_MAX; i++)
190 kbdev->kbase_profiling_controls[i] = 0;
192 kbase_debug_assert_register_hook(&kbasep_trace_hook_wrapper, kbdev);
194 #if defined(CONFIG_MALI_PLATFORM_VEXPRESS) || defined(CONFIG_MALI_PLATFORM_VEXPRESS_VIRTEX7_40MHZ)
195 #ifdef CONFIG_MALI_PLATFORM_FAKE
196 /* BASE_HW_ISSUE_8408 requires a configuration with different timeouts for
197 * the vexpress platform */
198 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408))
199 kbdev->config_attributes = config_attributes_hw_issue_8408;
200 #endif /* CONFIG_MALI_PLATFORM_FAKE */
201 #endif /* CONFIG_MALI_PLATFORM_VEXPRESS || CONFIG_MALI_PLATFORM_VEXPRESS_VIRTEX7_40MHZ */
203 return MALI_ERROR_NONE;
206 destroy_workqueue(kbdev->reset_workq);
207 free_cache_clean_workq:
208 destroy_workqueue(kbdev->hwcnt.cache_clean_wq);
212 destroy_workqueue(kbdev->as[i].pf_wq);
213 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316))
214 destroy_workqueue(kbdev->as[i].poke_wq);
217 kbasep_platform_device_term(kbdev);
219 return MALI_ERROR_FUNCTION_FAILED;
222 void kbase_device_term(kbase_device *kbdev)
226 KBASE_DEBUG_ASSERT(kbdev);
228 #if KBASE_TRACE_ENABLE != 0
229 kbase_debug_assert_register_hook(NULL, NULL);
232 kbasep_trace_term(kbdev);
234 destroy_workqueue(kbdev->reset_workq);
235 destroy_workqueue(kbdev->hwcnt.cache_clean_wq);
237 for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
238 destroy_workqueue(kbdev->as[i].pf_wq);
239 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316))
240 destroy_workqueue(kbdev->as[i].poke_wq);
243 kbasep_platform_device_term(kbdev);
246 void kbase_device_free(kbase_device *kbdev)
251 void kbase_device_trace_buffer_install(kbase_context *kctx, u32 *tb, size_t size)
254 KBASE_DEBUG_ASSERT(kctx);
255 KBASE_DEBUG_ASSERT(tb);
257 /* set up the header */
258 /* magic number in the first 4 bytes */
259 tb[0] = TRACE_BUFFER_HEADER_SPECIAL;
260 /* Store (write offset = 0, wrap counter = 0, transaction active = no)
261 * write offset 0 means never written.
262 * Offsets 1 to (wrap_offset - 1) used to store values when trace started
266 /* install trace buffer */
267 spin_lock_irqsave(&kctx->jctx.tb_lock, flags);
268 kctx->jctx.tb_wrap_offset = size / 8;
270 spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags);
273 void kbase_device_trace_buffer_uninstall(kbase_context *kctx)
276 KBASE_DEBUG_ASSERT(kctx);
277 spin_lock_irqsave(&kctx->jctx.tb_lock, flags);
278 kctx->jctx.tb = NULL;
279 kctx->jctx.tb_wrap_offset = 0;
280 spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags);
283 void kbase_device_trace_register_access(kbase_context *kctx, kbase_reg_access_type type, u16 reg_offset, u32 reg_value)
286 spin_lock_irqsave(&kctx->jctx.tb_lock, flags);
290 u32 *tb = kctx->jctx.tb;
294 KBASE_DEBUG_ASSERT(0 == (header_word & 0x1));
296 wrap_count = (header_word >> 1) & 0x7FFF;
297 write_offset = (header_word >> 16) & 0xFFFF;
299 /* mark as transaction in progress */
303 /* calculate new offset */
305 if (write_offset == kctx->jctx.tb_wrap_offset) {
309 wrap_count &= 0x7FFF; /* 15bit wrap counter */
312 /* store the trace entry at the selected offset */
313 tb[write_offset * 2 + 0] = (reg_offset & ~0x3) | ((type == REG_WRITE) ? 0x1 : 0x0);
314 tb[write_offset * 2 + 1] = reg_value;
317 /* new header word */
318 header_word = (write_offset << 16) | (wrap_count << 1) | 0x0; /* transaction complete */
321 spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags);
324 void kbase_reg_write(kbase_device *kbdev, u16 offset, u32 value, kbase_context *kctx)
326 KBASE_DEBUG_ASSERT(kbdev->pm.gpu_powered);
327 KBASE_DEBUG_ASSERT(kctx == NULL || kctx->as_nr != KBASEP_AS_NR_INVALID);
328 KBASE_DEBUG_ASSERT(kbdev->dev != NULL);
329 dev_dbg(kbdev->dev, "w: reg %04x val %08x", offset, value);
330 kbase_os_reg_write(kbdev, offset, value);
331 if (kctx && kctx->jctx.tb)
332 kbase_device_trace_register_access(kctx, REG_WRITE, offset, value);
335 KBASE_EXPORT_TEST_API(kbase_reg_write)
337 u32 kbase_reg_read(kbase_device *kbdev, u16 offset, kbase_context *kctx)
340 KBASE_DEBUG_ASSERT(kbdev->pm.gpu_powered);
341 KBASE_DEBUG_ASSERT(kctx == NULL || kctx->as_nr != KBASEP_AS_NR_INVALID);
342 KBASE_DEBUG_ASSERT(kbdev->dev != NULL);
343 val = kbase_os_reg_read(kbdev, offset);
344 dev_dbg(kbdev->dev, "r: reg %04x val %08x", offset, val);
345 if (kctx && kctx->jctx.tb)
346 kbase_device_trace_register_access(kctx, REG_READ, offset, val);
350 KBASE_EXPORT_TEST_API(kbase_reg_read)
352 void kbase_report_gpu_fault(kbase_device *kbdev, int multiple)
357 status = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_FAULTSTATUS), NULL);
358 address = (u64) kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_FAULTADDRESS_HI), NULL) << 32;
359 address |= kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_FAULTADDRESS_LO), NULL);
361 dev_warn(kbdev->dev, "GPU Fault 0x%08x (%s) at 0x%016llx", status & 0xFF, kbase_exception_name(status), address);
363 dev_warn(kbdev->dev, "There were multiple GPU faults - some have not been reported\n");
366 void kbase_gpu_interrupt(kbase_device *kbdev, u32 val)
368 KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ, NULL, NULL, 0u, val);
370 kbase_report_gpu_fault(kbdev, val & MULTIPLE_GPU_FAULTS);
372 if (val & RESET_COMPLETED)
373 kbase_pm_reset_done(kbdev);
375 if (val & PRFCNT_SAMPLE_COMPLETED)
376 kbase_instr_hwcnt_sample_done(kbdev);
378 if (val & CLEAN_CACHES_COMPLETED)
379 kbase_clean_caches_done(kbdev);
381 KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ_CLEAR, NULL, NULL, 0u, val);
382 kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), val, NULL);
384 /* kbase_pm_check_transitions must be called after the IRQ has been cleared. This is because it might trigger
385 * further power transitions and we don't want to miss the interrupt raised to notify us that these further
386 * transitions have finished.
388 if (val & POWER_CHANGED_ALL) {
389 mali_bool cores_are_available;
392 KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_START);
393 spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
394 cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
395 spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
396 KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_END);
398 if (cores_are_available) {
399 /* Fast-path Job Scheduling on PM IRQ */
401 /* Log timelining information that a change in state has completed */
402 kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
404 spin_lock_irqsave(&kbdev->js_data.runpool_irq.lock, flags);
405 /* A simplified check to ensure the last context hasn't exited
406 * after dropping the PM lock whilst doing a PM IRQ: any bits set
407 * in 'submit_allowed' indicate that we have a context in the
408 * runpool (which can't leave whilst we hold this lock). It is
409 * sometimes zero even when we have a context in the runpool, but
410 * that's no problem because we'll be unable to submit jobs
412 if (kbdev->js_data.runpool_irq.submit_allowed)
413 for (js = 0; js < kbdev->gpu_props.num_job_slots; ++js) {
414 mali_bool needs_retry;
415 s8 submitted_count = 0;
416 needs_retry = kbasep_js_try_run_next_job_on_slot_irq_nolock(kbdev, js, &submitted_count);
417 /* Don't need to retry outside of IRQ context - this can
418 * only happen if we submitted too many in one IRQ, such
419 * that they were completing faster than we could
420 * submit. In this case, a job IRQ will fire to cause more
421 * work to be submitted in some way */
422 CSTD_UNUSED(needs_retry);
424 spin_unlock_irqrestore(&kbdev->js_data.runpool_irq.lock, flags);
427 KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ_DONE, NULL, NULL, 0u, val);
431 * Device trace functions
433 #if KBASE_TRACE_ENABLE != 0
435 STATIC mali_error kbasep_trace_init(kbase_device *kbdev)
439 rbuf = kmalloc(sizeof(kbase_trace) * KBASE_TRACE_SIZE, GFP_KERNEL);
442 return MALI_ERROR_FUNCTION_FAILED;
444 kbdev->trace_rbuf = rbuf;
445 spin_lock_init(&kbdev->trace_lock);
446 kbasep_trace_debugfs_init(kbdev);
447 return MALI_ERROR_NONE;
450 STATIC void kbasep_trace_term(kbase_device *kbdev)
452 debugfs_remove(kbdev->trace_dentry);
453 kbdev->trace_dentry= NULL;
454 kfree(kbdev->trace_rbuf);
457 void kbasep_trace_format_msg(kbase_trace *trace_msg, char *buffer, int len)
461 /* Initial part of message */
462 written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d.%.6d,%d,%d,%s,%p,", (int)trace_msg->timestamp.tv_sec, (int)(trace_msg->timestamp.tv_nsec / 1000), trace_msg->thread_id, trace_msg->cpu, kbasep_trace_code_string[trace_msg->code], trace_msg->ctx), 0);
464 if (trace_msg->katom != MALI_FALSE) {
465 written += MAX(snprintf(buffer + written, MAX(len - written, 0), "atom %d (ud: 0x%llx 0x%llx)", trace_msg->atom_number, trace_msg->atom_udata[0], trace_msg->atom_udata[1]), 0);
468 written += MAX(snprintf(buffer + written, MAX(len - written, 0), ",%.8llx,", trace_msg->gpu_addr), 0);
470 /* NOTE: Could add function callbacks to handle different message types */
471 /* Jobslot present */
472 if ((trace_msg->flags & KBASE_TRACE_FLAG_JOBSLOT) != MALI_FALSE)
473 written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d", trace_msg->jobslot), 0);
475 written += MAX(snprintf(buffer + written, MAX(len - written, 0), ","), 0);
477 /* Refcount present */
478 if ((trace_msg->flags & KBASE_TRACE_FLAG_REFCOUNT) != MALI_FALSE)
479 written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d", trace_msg->refcount), 0);
481 written += MAX(snprintf(buffer + written, MAX(len - written, 0), ","), 0);
483 /* Rest of message */
484 written += MAX(snprintf(buffer + written, MAX(len - written, 0), "0x%.8lx", trace_msg->info_val), 0);
488 void kbasep_trace_dump_msg(kbase_device *kbdev, kbase_trace *trace_msg)
490 char buffer[DEBUG_MESSAGE_SIZE];
492 kbasep_trace_format_msg(trace_msg, buffer, DEBUG_MESSAGE_SIZE);
493 dev_dbg(kbdev->dev, "%s", buffer);
496 void kbasep_trace_add(kbase_device *kbdev, kbase_trace_code code, void *ctx, kbase_jd_atom *katom, u64 gpu_addr, u8 flags, int refcount, int jobslot, unsigned long info_val)
498 unsigned long irqflags;
499 kbase_trace *trace_msg;
501 spin_lock_irqsave(&kbdev->trace_lock, irqflags);
503 trace_msg = &kbdev->trace_rbuf[kbdev->trace_next_in];
505 /* Fill the message */
506 trace_msg->thread_id = task_pid_nr(current);
507 trace_msg->cpu = task_cpu(current);
509 getnstimeofday(&trace_msg->timestamp);
511 trace_msg->code = code;
512 trace_msg->ctx = ctx;
515 trace_msg->katom = MALI_FALSE;
517 trace_msg->katom = MALI_TRUE;
518 trace_msg->atom_number = kbase_jd_atom_id(katom->kctx, katom);
519 trace_msg->atom_udata[0] = katom->udata.blob[0];
520 trace_msg->atom_udata[1] = katom->udata.blob[1];
523 trace_msg->gpu_addr = gpu_addr;
524 trace_msg->jobslot = jobslot;
525 trace_msg->refcount = MIN((unsigned int)refcount, 0xFF);
526 trace_msg->info_val = info_val;
527 trace_msg->flags = flags;
529 /* Update the ringbuffer indices */
530 kbdev->trace_next_in = (kbdev->trace_next_in + 1) & KBASE_TRACE_MASK;
531 if (kbdev->trace_next_in == kbdev->trace_first_out)
532 kbdev->trace_first_out = (kbdev->trace_first_out + 1) & KBASE_TRACE_MASK;
536 spin_unlock_irqrestore(&kbdev->trace_lock, irqflags);
539 void kbasep_trace_clear(kbase_device *kbdev)
542 spin_lock_irqsave(&kbdev->trace_lock, flags);
543 kbdev->trace_first_out = kbdev->trace_next_in;
544 spin_unlock_irqrestore(&kbdev->trace_lock, flags);
547 void kbasep_trace_dump(kbase_device *kbdev)
553 dev_dbg(kbdev->dev, "Dumping trace:\nsecs,nthread,cpu,code,ctx,katom,gpu_addr,jobslot,refcount,info_val");
554 spin_lock_irqsave(&kbdev->trace_lock, flags);
555 start = kbdev->trace_first_out;
556 end = kbdev->trace_next_in;
558 while (start != end) {
559 kbase_trace *trace_msg = &kbdev->trace_rbuf[start];
560 kbasep_trace_dump_msg(kbdev, trace_msg);
562 start = (start + 1) & KBASE_TRACE_MASK;
564 dev_dbg(kbdev->dev, "TRACE_END");
566 spin_unlock_irqrestore(&kbdev->trace_lock, flags);
568 KBASE_TRACE_CLEAR(kbdev);
571 STATIC void kbasep_trace_hook_wrapper(void *param)
573 kbase_device *kbdev = (kbase_device *) param;
574 kbasep_trace_dump(kbdev);
577 #ifdef CONFIG_DEBUG_FS
578 struct trace_seq_state {
579 kbase_trace trace_buf[KBASE_TRACE_SIZE];
584 void *kbasep_trace_seq_start(struct seq_file *s, loff_t *pos)
586 struct trace_seq_state *state = s->private;
589 if (*pos > KBASE_TRACE_SIZE)
591 i = state->start + *pos;
592 if ((state->end >= state->start && i >= state->end) ||
593 i >= state->end + KBASE_TRACE_SIZE)
596 i &= KBASE_TRACE_MASK;
598 return &state->trace_buf[i];
601 void kbasep_trace_seq_stop(struct seq_file *s, void *data)
605 void *kbasep_trace_seq_next(struct seq_file *s, void *data, loff_t *pos)
607 struct trace_seq_state *state = s->private;
612 i = (state->start + *pos) & KBASE_TRACE_MASK;
616 return &state->trace_buf[i];
619 int kbasep_trace_seq_show(struct seq_file *s, void *data)
621 kbase_trace *trace_msg = data;
622 char buffer[DEBUG_MESSAGE_SIZE];
624 kbasep_trace_format_msg(trace_msg, buffer, DEBUG_MESSAGE_SIZE);
625 seq_printf(s, "%s\n", buffer);
629 static const struct seq_operations kbasep_trace_seq_ops = {
630 .start = kbasep_trace_seq_start,
631 .next = kbasep_trace_seq_next,
632 .stop = kbasep_trace_seq_stop,
633 .show = kbasep_trace_seq_show,
636 static int kbasep_trace_debugfs_open(struct inode *inode, struct file *file)
638 kbase_device *kbdev = inode->i_private;
641 struct trace_seq_state *state;
643 state = __seq_open_private(file, &kbasep_trace_seq_ops, sizeof(*state));
647 spin_lock_irqsave(&kbdev->trace_lock, flags);
648 state->start = kbdev->trace_first_out;
649 state->end = kbdev->trace_next_in;
650 memcpy(state->trace_buf, kbdev->trace_rbuf, sizeof(state->trace_buf));
651 spin_unlock_irqrestore(&kbdev->trace_lock, flags);
656 static const struct file_operations kbasep_trace_debugfs_fops = {
657 .open = kbasep_trace_debugfs_open,
660 .release = seq_release_private,
663 STATIC void kbasep_trace_debugfs_init(kbase_device *kbdev)
665 kbdev->trace_dentry = debugfs_create_file("mali_trace", S_IRUGO,
666 kbdev->mali_debugfs_directory, kbdev,
667 &kbasep_trace_debugfs_fops);
670 STATIC void kbasep_trace_debugfs_init(kbase_device *kbdev)
674 #endif /* CONFIG_DEBUG_FS */
676 #else /* KBASE_TRACE_ENABLE != 0 */
677 STATIC mali_error kbasep_trace_init(kbase_device *kbdev)
680 return MALI_ERROR_NONE;
683 STATIC void kbasep_trace_term(kbase_device *kbdev)
688 STATIC void kbasep_trace_hook_wrapper(void *param)
693 void kbasep_trace_add(kbase_device *kbdev, kbase_trace_code code, void *ctx, kbase_jd_atom *katom, u64 gpu_addr, u8 flags, int refcount, int jobslot, unsigned long info_val)
699 CSTD_UNUSED(gpu_addr);
701 CSTD_UNUSED(refcount);
702 CSTD_UNUSED(jobslot);
703 CSTD_UNUSED(info_val);
706 void kbasep_trace_clear(kbase_device *kbdev)
711 void kbasep_trace_dump(kbase_device *kbdev)
715 #endif /* KBASE_TRACE_ENABLE != 0 */
717 void kbase_set_profiling_control(struct kbase_device *kbdev, u32 control, u32 value)
720 case FBDUMP_CONTROL_ENABLE:
722 case FBDUMP_CONTROL_RATE:
724 case SW_COUNTER_ENABLE:
726 case FBDUMP_CONTROL_RESIZE_FACTOR:
727 kbdev->kbase_profiling_controls[control] = value;
730 dev_err(kbdev->dev, "Profiling control %d not found\n", control);
735 u32 kbase_get_profiling_control(struct kbase_device *kbdev, u32 control)
740 case FBDUMP_CONTROL_ENABLE:
742 case FBDUMP_CONTROL_RATE:
744 case SW_COUNTER_ENABLE:
746 case FBDUMP_CONTROL_RESIZE_FACTOR:
747 ret_value = kbdev->kbase_profiling_controls[control];
750 dev_err(kbdev->dev, "Profiling control %d not found\n", control);
758 * Called by gator to control the production of
759 * profiling information at runtime
762 void _mali_profiling_control(u32 action, u32 value)
764 struct kbase_device *kbdev = NULL;
766 /* find the first i.e. call with -1 */
767 kbdev = kbase_find_device(-1);
770 kbase_set_profiling_control(kbdev, action, value);
774 KBASE_EXPORT_SYMBOL(_mali_profiling_control);