3 * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
16 #define ENABLE_DEBUG_LOG
17 #include "platform/rk/custom_log.h"
19 #include <mali_kbase.h>
20 #include <mali_kbase_config_defaults.h>
21 #include <mali_kbase_uku.h>
22 #include <mali_midg_regmap.h>
23 #include <mali_kbase_gator.h>
24 #include <mali_kbase_mem_linux.h>
25 #ifdef CONFIG_MALI_DEVFREQ
26 #include <backend/gpu/mali_kbase_devfreq.h>
27 #endif /* CONFIG_MALI_DEVFREQ */
28 #ifdef CONFIG_MALI_NO_MALI
29 #include "mali_kbase_model_linux.h"
30 #endif /* CONFIG_MALI_NO_MALI */
31 #include "mali_kbase_mem_profile_debugfs_buf_size.h"
32 #include "mali_kbase_debug_mem_view.h"
33 #include "mali_kbase_mem.h"
34 #include "mali_kbase_mem_pool_debugfs.h"
35 #if !MALI_CUSTOMER_RELEASE
36 #include "mali_kbase_regs_dump_debugfs.h"
37 #endif /* !MALI_CUSTOMER_RELEASE */
38 #include "mali_kbase_regs_history_debugfs.h"
39 #include <mali_kbase_hwaccess_backend.h>
40 #include <mali_kbase_hwaccess_jm.h>
41 #include <backend/gpu/mali_kbase_device_internal.h>
44 #include <linux/kds.h>
45 #include <linux/anon_inodes.h>
46 #include <linux/syscalls.h>
47 #endif /* CONFIG_KDS */
49 #include <linux/pm_runtime.h>
50 #include <linux/module.h>
51 #include <linux/init.h>
52 #include <linux/poll.h>
53 #include <linux/kernel.h>
54 #include <linux/errno.h>
56 #include <linux/platform_device.h>
57 #include <linux/miscdevice.h>
58 #include <linux/list.h>
59 #include <linux/semaphore.h>
61 #include <linux/uaccess.h>
62 #include <linux/interrupt.h>
64 #include <linux/compat.h> /* is_compat_task */
65 #include <linux/mman.h>
66 #include <linux/version.h>
67 #ifdef CONFIG_MALI_PLATFORM_DEVICETREE
68 #include <linux/pm_runtime.h>
69 #endif /* CONFIG_MALI_PLATFORM_DEVICETREE */
70 #include <mali_kbase_hw.h>
71 #include <platform/mali_kbase_platform_common.h>
72 #ifdef CONFIG_MALI_PLATFORM_FAKE
73 #include <platform/mali_kbase_platform_fake.h>
74 #endif /*CONFIG_MALI_PLATFORM_FAKE */
76 #include <mali_kbase_sync.h>
77 #endif /* CONFIG_SYNC */
78 #ifdef CONFIG_PM_DEVFREQ
79 #include <linux/devfreq.h>
80 #endif /* CONFIG_PM_DEVFREQ */
81 #include <linux/clk.h>
82 #include <linux/delay.h>
84 #include <mali_kbase_config.h>
87 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
88 #include <linux/pm_opp.h>
90 #include <linux/opp.h>
93 #include <mali_kbase_tlstream.h>
95 #include <mali_kbase_as_fault_debugfs.h>
100 #define GPU_IRQ_TAG 2
103 static struct kbase_exported_test_data shared_kernel_test_data;
104 EXPORT_SYMBOL(shared_kernel_test_data);
105 #endif /* MALI_UNIT_TEST */
107 /** rk_ext : version of rk_ext on mali_ko, aka. rk_ko_ver. */
108 #define ROCKCHIP_VERSION (13)
110 static int kbase_dev_nr;
112 static DEFINE_MUTEX(kbase_dev_list_lock);
113 static LIST_HEAD(kbase_dev_list);
115 #define KERNEL_SIDE_DDK_VERSION_STRING "K:" MALI_RELEASE_NAME "(GPL)"
116 static inline void __compile_time_asserts(void)
118 CSTD_COMPILE_TIME_ASSERT(sizeof(KERNEL_SIDE_DDK_VERSION_STRING) <= KBASE_GET_VERSION_BUFFER_SIZE);
121 static void kbase_create_timeline_objects(struct kbase_context *kctx)
123 struct kbase_device *kbdev = kctx->kbdev;
126 struct kbasep_kctx_list_element *element;
128 /* Create LPU objects. */
129 for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
131 &kbdev->gpu_props.props.raw_props.js_features[lpu_id];
132 kbase_tlstream_tl_summary_new_lpu(lpu, lpu_id, *lpu);
135 /* Create Address Space objects. */
136 for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
137 kbase_tlstream_tl_summary_new_as(&kbdev->as[as_nr], as_nr);
139 /* Create GPU object and make it retain all LPUs and address spaces. */
140 kbase_tlstream_tl_summary_new_gpu(
142 kbdev->gpu_props.props.raw_props.gpu_id,
143 kbdev->gpu_props.num_cores);
145 for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
147 &kbdev->gpu_props.props.raw_props.js_features[lpu_id];
148 kbase_tlstream_tl_summary_lifelink_lpu_gpu(lpu, kbdev);
150 for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
151 kbase_tlstream_tl_summary_lifelink_as_gpu(
155 /* Create object for each known context. */
156 mutex_lock(&kbdev->kctx_list_lock);
157 list_for_each_entry(element, &kbdev->kctx_list, link) {
158 kbase_tlstream_tl_summary_new_ctx(
160 (u32)(element->kctx->id),
161 (u32)(element->kctx->tgid));
163 /* Before releasing the lock, reset body stream buffers.
164 * This will prevent context creation message to be directed to both
165 * summary and body stream. */
166 kbase_tlstream_reset_body_streams();
167 mutex_unlock(&kbdev->kctx_list_lock);
168 /* Static object are placed into summary packet that needs to be
169 * transmitted first. Flush all streams to make it available to
171 kbase_tlstream_flush_streams();
174 static void kbase_api_handshake(struct uku_version_check_args *version)
176 switch (version->major) {
177 #ifdef BASE_LEGACY_UK6_SUPPORT
179 /* We are backwards compatible with version 6,
180 * so pretend to be the old version */
184 #endif /* BASE_LEGACY_UK6_SUPPORT */
185 #ifdef BASE_LEGACY_UK7_SUPPORT
187 /* We are backwards compatible with version 7,
188 * so pretend to be the old version */
192 #endif /* BASE_LEGACY_UK7_SUPPORT */
193 #ifdef BASE_LEGACY_UK8_SUPPORT
195 /* We are backwards compatible with version 8,
196 * so pretend to be the old version */
200 #endif /* BASE_LEGACY_UK8_SUPPORT */
201 #ifdef BASE_LEGACY_UK9_SUPPORT
203 /* We are backwards compatible with version 9,
204 * so pretend to be the old version */
208 #endif /* BASE_LEGACY_UK8_SUPPORT */
209 case BASE_UK_VERSION_MAJOR:
210 /* set minor to be the lowest common */
211 version->minor = min_t(int, BASE_UK_VERSION_MINOR,
212 (int)version->minor);
215 /* We return our actual version regardless if it
216 * matches the version returned by userspace -
217 * userspace can bail if it can't handle this
219 version->major = BASE_UK_VERSION_MAJOR;
220 version->minor = BASE_UK_VERSION_MINOR;
226 * enum mali_error - Mali error codes shared with userspace
228 * This is subset of those common Mali errors that can be returned to userspace.
229 * Values of matching user and kernel space enumerators MUST be the same.
230 * MALI_ERROR_NONE is guaranteed to be 0.
234 MALI_ERROR_OUT_OF_GPU_MEMORY,
235 MALI_ERROR_OUT_OF_MEMORY,
236 MALI_ERROR_FUNCTION_FAILED,
240 inited_mem = (1u << 0),
241 inited_js = (1u << 1),
242 inited_pm_runtime_init = (1u << 2),
243 #ifdef CONFIG_MALI_DEVFREQ
244 inited_devfreq = (1u << 3),
245 #endif /* CONFIG_MALI_DEVFREQ */
246 inited_tlstream = (1u << 4),
247 inited_backend_early = (1u << 5),
248 inited_backend_late = (1u << 6),
249 inited_device = (1u << 7),
250 inited_vinstr = (1u << 8),
251 #ifndef CONFIG_MALI_PRFCNT_SET_SECONDARY
252 inited_ipa = (1u << 9),
253 #endif /* CONFIG_MALI_PRFCNT_SET_SECONDARY */
254 inited_job_fault = (1u << 10),
255 inited_misc_register = (1u << 11),
256 inited_get_device = (1u << 12),
257 inited_sysfs_group = (1u << 13),
258 inited_dev_list = (1u << 14),
259 inited_debugfs = (1u << 15),
260 inited_gpu_device = (1u << 16),
261 inited_registers_map = (1u << 17),
262 inited_io_history = (1u << 18),
263 inited_power_control = (1u << 19),
264 inited_buslogger = (1u << 20)
268 #ifdef CONFIG_MALI_DEBUG
269 #define INACTIVE_WAIT_MS (5000)
271 void kbase_set_driver_inactive(struct kbase_device *kbdev, bool inactive)
273 kbdev->driver_inactive = inactive;
274 wake_up(&kbdev->driver_inactive_wait);
276 /* Wait for any running IOCTLs to complete */
278 msleep(INACTIVE_WAIT_MS);
280 KBASE_EXPORT_TEST_API(kbase_set_driver_inactive);
281 #endif /* CONFIG_MALI_DEBUG */
283 static int kbase_dispatch(struct kbase_context *kctx, void * const args, u32 args_size)
285 struct kbase_device *kbdev;
286 union uk_header *ukh = args;
290 KBASE_DEBUG_ASSERT(ukh != NULL);
294 ukh->ret = MALI_ERROR_NONE; /* Be optimistic */
296 #ifdef CONFIG_MALI_DEBUG
297 wait_event(kbdev->driver_inactive_wait,
298 kbdev->driver_inactive == false);
299 #endif /* CONFIG_MALI_DEBUG */
301 if (UKP_FUNC_ID_CHECK_VERSION == id) {
302 struct uku_version_check_args *version_check;
304 if (args_size != sizeof(struct uku_version_check_args)) {
305 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
308 version_check = (struct uku_version_check_args *)args;
309 kbase_api_handshake(version_check);
310 /* save the proposed version number for later use */
311 kctx->api_version = KBASE_API_VERSION(version_check->major,
312 version_check->minor);
313 ukh->ret = MALI_ERROR_NONE;
317 /* block calls until version handshake */
318 if (kctx->api_version == 0)
321 if (!atomic_read(&kctx->setup_complete)) {
322 struct kbase_uk_set_flags *kbase_set_flags;
324 /* setup pending, try to signal that we'll do the setup,
325 * if setup was already in progress, err this call
327 if (atomic_cmpxchg(&kctx->setup_in_progress, 0, 1) != 0)
330 /* if unexpected call, will stay stuck in setup mode
331 * (is it the only call we accept?)
333 if (id != KBASE_FUNC_SET_FLAGS)
336 kbase_set_flags = (struct kbase_uk_set_flags *)args;
338 /* if not matching the expected call, stay in setup mode */
339 if (sizeof(*kbase_set_flags) != args_size)
342 /* if bad flags, will stay stuck in setup mode */
343 if (kbase_context_set_create_flags(kctx,
344 kbase_set_flags->create_flags) != 0)
345 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
347 atomic_set(&kctx->setup_complete, 1);
351 /* setup complete, perform normal operation */
353 case KBASE_FUNC_MEM_JIT_INIT:
355 struct kbase_uk_mem_jit_init *jit_init = args;
357 if (sizeof(*jit_init) != args_size)
360 if (kbase_region_tracker_init_jit(kctx,
362 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
365 case KBASE_FUNC_MEM_ALLOC:
367 struct kbase_uk_mem_alloc *mem = args;
368 struct kbase_va_region *reg;
370 if (sizeof(*mem) != args_size)
373 #if defined(CONFIG_64BIT)
374 if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) {
375 /* force SAME_VA if a 64-bit client */
376 mem->flags |= BASE_MEM_SAME_VA;
380 reg = kbase_mem_alloc(kctx, mem->va_pages,
381 mem->commit_pages, mem->extent,
382 &mem->flags, &mem->gpu_va,
385 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
388 case KBASE_FUNC_MEM_IMPORT: {
389 struct kbase_uk_mem_import *mem_import = args;
390 void __user *phandle;
392 if (sizeof(*mem_import) != args_size)
395 if (kbase_ctx_flag(kctx, KCTX_COMPAT))
396 phandle = compat_ptr(mem_import->phandle.compat_value);
399 phandle = mem_import->phandle.value;
401 if (mem_import->type == BASE_MEM_IMPORT_TYPE_INVALID) {
402 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
406 if (kbase_mem_import(kctx,
407 (enum base_mem_import_type)
411 &mem_import->va_pages,
412 &mem_import->flags)) {
413 mem_import->type = BASE_MEM_IMPORT_TYPE_INVALID;
414 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
418 case KBASE_FUNC_MEM_ALIAS: {
419 struct kbase_uk_mem_alias *alias = args;
420 struct base_mem_aliasing_info __user *user_ai;
421 struct base_mem_aliasing_info *ai;
423 if (sizeof(*alias) != args_size)
426 if (alias->nents > 2048) {
427 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
431 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
436 if (kbase_ctx_flag(kctx, KCTX_COMPAT))
437 user_ai = compat_ptr(alias->ai.compat_value);
440 user_ai = alias->ai.value;
442 ai = vmalloc(sizeof(*ai) * alias->nents);
445 ukh->ret = MALI_ERROR_OUT_OF_MEMORY;
449 if (copy_from_user(ai, user_ai,
450 sizeof(*ai) * alias->nents)) {
451 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
455 alias->gpu_va = kbase_mem_alias(kctx, &alias->flags,
459 if (!alias->gpu_va) {
460 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
468 case KBASE_FUNC_MEM_COMMIT:
470 struct kbase_uk_mem_commit *commit = args;
472 if (sizeof(*commit) != args_size)
475 if (commit->gpu_addr & ~PAGE_MASK) {
476 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_COMMIT: commit->gpu_addr: passed parameter is invalid");
477 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
481 if (kbase_mem_commit(kctx, commit->gpu_addr,
483 (base_backing_threshold_status *)
484 &commit->result_subcode) != 0)
485 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
490 case KBASE_FUNC_MEM_QUERY:
492 struct kbase_uk_mem_query *query = args;
494 if (sizeof(*query) != args_size)
497 if (query->gpu_addr & ~PAGE_MASK) {
498 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_QUERY: query->gpu_addr: passed parameter is invalid");
499 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
502 if (query->query != KBASE_MEM_QUERY_COMMIT_SIZE &&
503 query->query != KBASE_MEM_QUERY_VA_SIZE &&
504 query->query != KBASE_MEM_QUERY_FLAGS) {
505 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_QUERY: query->query = %lld unknown", (unsigned long long)query->query);
506 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
510 if (kbase_mem_query(kctx, query->gpu_addr,
511 query->query, &query->value) != 0)
512 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
514 ukh->ret = MALI_ERROR_NONE;
519 case KBASE_FUNC_MEM_FLAGS_CHANGE:
521 struct kbase_uk_mem_flags_change *fc = args;
523 if (sizeof(*fc) != args_size)
526 if ((fc->gpu_va & ~PAGE_MASK) && (fc->gpu_va >= PAGE_SIZE)) {
527 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_FLAGS_CHANGE: mem->gpu_va: passed parameter is invalid");
528 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
532 if (kbase_mem_flags_change(kctx, fc->gpu_va,
533 fc->flags, fc->mask) != 0)
534 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
538 case KBASE_FUNC_MEM_FREE:
540 struct kbase_uk_mem_free *mem = args;
542 if (sizeof(*mem) != args_size)
545 if ((mem->gpu_addr & ~PAGE_MASK) && (mem->gpu_addr >= PAGE_SIZE)) {
546 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_FREE: mem->gpu_addr: passed parameter is invalid");
547 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
551 if (kbase_mem_free(kctx, mem->gpu_addr) != 0)
552 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
556 case KBASE_FUNC_JOB_SUBMIT:
558 struct kbase_uk_job_submit *job = args;
560 if (sizeof(*job) != args_size)
563 #ifdef BASE_LEGACY_UK6_SUPPORT
564 if (kbase_jd_submit(kctx, job, 0) != 0)
566 if (kbase_jd_submit(kctx, job) != 0)
567 #endif /* BASE_LEGACY_UK6_SUPPORT */
568 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
572 #ifdef BASE_LEGACY_UK6_SUPPORT
573 case KBASE_FUNC_JOB_SUBMIT_UK6:
575 struct kbase_uk_job_submit *job = args;
577 if (sizeof(*job) != args_size)
580 if (kbase_jd_submit(kctx, job, 1) != 0)
581 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
586 case KBASE_FUNC_SYNC:
588 struct kbase_uk_sync_now *sn = args;
590 if (sizeof(*sn) != args_size)
593 if (sn->sset.basep_sset.mem_handle.basep.handle & ~PAGE_MASK) {
594 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_SYNC: sn->sset.basep_sset.mem_handle: passed parameter is invalid");
595 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
599 #ifndef CONFIG_MALI_COH_USER
600 if (kbase_sync_now(kctx, &sn->sset) != 0)
601 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
606 case KBASE_FUNC_DISJOINT_QUERY:
608 struct kbase_uk_disjoint_query *dquery = args;
610 if (sizeof(*dquery) != args_size)
613 /* Get the disjointness counter value. */
614 dquery->counter = kbase_disjoint_event_get(kctx->kbdev);
618 case KBASE_FUNC_POST_TERM:
620 kbase_event_close(kctx);
624 case KBASE_FUNC_HWCNT_SETUP:
626 struct kbase_uk_hwcnt_setup *setup = args;
628 if (sizeof(*setup) != args_size)
631 mutex_lock(&kctx->vinstr_cli_lock);
632 if (kbase_vinstr_legacy_hwc_setup(kbdev->vinstr_ctx,
633 &kctx->vinstr_cli, setup) != 0)
634 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
635 mutex_unlock(&kctx->vinstr_cli_lock);
639 case KBASE_FUNC_HWCNT_DUMP:
642 mutex_lock(&kctx->vinstr_cli_lock);
643 if (kbase_vinstr_hwc_dump(kctx->vinstr_cli,
644 BASE_HWCNT_READER_EVENT_MANUAL) != 0)
645 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
646 mutex_unlock(&kctx->vinstr_cli_lock);
650 case KBASE_FUNC_HWCNT_CLEAR:
653 mutex_lock(&kctx->vinstr_cli_lock);
654 if (kbase_vinstr_hwc_clear(kctx->vinstr_cli) != 0)
655 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
656 mutex_unlock(&kctx->vinstr_cli_lock);
660 case KBASE_FUNC_HWCNT_READER_SETUP:
662 struct kbase_uk_hwcnt_reader_setup *setup = args;
664 if (sizeof(*setup) != args_size)
667 mutex_lock(&kctx->vinstr_cli_lock);
668 if (kbase_vinstr_hwcnt_reader_setup(kbdev->vinstr_ctx,
670 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
671 mutex_unlock(&kctx->vinstr_cli_lock);
675 case KBASE_FUNC_GPU_PROPS_REG_DUMP:
677 struct kbase_uk_gpuprops *setup = args;
679 if (sizeof(*setup) != args_size)
682 if (kbase_gpuprops_uk_get_props(kctx, setup) != 0)
683 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
686 case KBASE_FUNC_FIND_CPU_OFFSET:
688 struct kbase_uk_find_cpu_offset *find = args;
690 if (sizeof(*find) != args_size)
693 if (find->gpu_addr & ~PAGE_MASK) {
694 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_FIND_CPU_OFFSET: find->gpu_addr: passed parameter is invalid");
698 if (find->size > SIZE_MAX || find->cpu_addr > ULONG_MAX) {
699 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
703 err = kbasep_find_enclosing_cpu_mapping_offset(
706 (uintptr_t) find->cpu_addr,
711 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
715 case KBASE_FUNC_GET_VERSION:
717 struct kbase_uk_get_ddk_version *get_version = (struct kbase_uk_get_ddk_version *)args;
719 if (sizeof(*get_version) != args_size)
722 /* version buffer size check is made in compile time assert */
723 memcpy(get_version->version_buffer, KERNEL_SIDE_DDK_VERSION_STRING, sizeof(KERNEL_SIDE_DDK_VERSION_STRING));
724 get_version->version_string_size = sizeof(KERNEL_SIDE_DDK_VERSION_STRING);
725 get_version->rk_version = ROCKCHIP_VERSION;
729 case KBASE_FUNC_STREAM_CREATE:
732 struct kbase_uk_stream_create *screate = (struct kbase_uk_stream_create *)args;
734 if (sizeof(*screate) != args_size)
737 if (strnlen(screate->name, sizeof(screate->name)) >= sizeof(screate->name)) {
738 /* not NULL terminated */
739 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
743 if (kbase_stream_create(screate->name, &screate->fd) != 0)
744 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
746 ukh->ret = MALI_ERROR_NONE;
747 #else /* CONFIG_SYNC */
748 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
749 #endif /* CONFIG_SYNC */
752 case KBASE_FUNC_FENCE_VALIDATE:
755 struct kbase_uk_fence_validate *fence_validate = (struct kbase_uk_fence_validate *)args;
757 if (sizeof(*fence_validate) != args_size)
760 if (kbase_fence_validate(fence_validate->fd) != 0)
761 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
763 ukh->ret = MALI_ERROR_NONE;
764 #endif /* CONFIG_SYNC */
768 case KBASE_FUNC_SET_TEST_DATA:
771 struct kbase_uk_set_test_data *set_data = args;
773 shared_kernel_test_data = set_data->test_data;
774 shared_kernel_test_data.kctx.value = (void __user *)kctx;
775 shared_kernel_test_data.mm.value = (void __user *)current->mm;
776 ukh->ret = MALI_ERROR_NONE;
777 #endif /* MALI_UNIT_TEST */
781 case KBASE_FUNC_INJECT_ERROR:
783 #ifdef CONFIG_MALI_ERROR_INJECT
785 struct kbase_error_params params = ((struct kbase_uk_error_params *)args)->params;
788 spin_lock_irqsave(&kbdev->reg_op_lock, flags);
789 if (job_atom_inject_error(¶ms) != 0)
790 ukh->ret = MALI_ERROR_OUT_OF_MEMORY;
792 ukh->ret = MALI_ERROR_NONE;
793 spin_unlock_irqrestore(&kbdev->reg_op_lock, flags);
795 #endif /* CONFIG_MALI_ERROR_INJECT */
799 case KBASE_FUNC_MODEL_CONTROL:
801 #ifdef CONFIG_MALI_NO_MALI
803 struct kbase_model_control_params params =
804 ((struct kbase_uk_model_control_params *)args)->params;
807 spin_lock_irqsave(&kbdev->reg_op_lock, flags);
808 if (gpu_model_control(kbdev->model, ¶ms) != 0)
809 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
811 ukh->ret = MALI_ERROR_NONE;
812 spin_unlock_irqrestore(&kbdev->reg_op_lock, flags);
814 #endif /* CONFIG_MALI_NO_MALI */
818 #ifdef BASE_LEGACY_UK8_SUPPORT
819 case KBASE_FUNC_KEEP_GPU_POWERED:
821 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_KEEP_GPU_POWERED: function is deprecated and disabled\n");
822 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
825 #endif /* BASE_LEGACY_UK8_SUPPORT */
827 case KBASE_FUNC_GET_PROFILING_CONTROLS:
829 struct kbase_uk_profiling_controls *controls =
830 (struct kbase_uk_profiling_controls *)args;
833 if (sizeof(*controls) != args_size)
836 for (i = FBDUMP_CONTROL_MIN; i < FBDUMP_CONTROL_MAX; i++)
837 controls->profiling_controls[i] = kbase_get_profiling_control(kbdev, i);
842 /* used only for testing purposes; these controls are to be set by gator through gator API */
843 case KBASE_FUNC_SET_PROFILING_CONTROLS:
845 struct kbase_uk_profiling_controls *controls =
846 (struct kbase_uk_profiling_controls *)args;
849 if (sizeof(*controls) != args_size)
852 for (i = FBDUMP_CONTROL_MIN; i < FBDUMP_CONTROL_MAX; i++)
853 _mali_profiling_control(i, controls->profiling_controls[i]);
858 case KBASE_FUNC_DEBUGFS_MEM_PROFILE_ADD:
860 struct kbase_uk_debugfs_mem_profile_add *add_data =
861 (struct kbase_uk_debugfs_mem_profile_add *)args;
863 char __user *user_buf;
865 if (sizeof(*add_data) != args_size)
868 if (add_data->len > KBASE_MEM_PROFILE_MAX_BUF_SIZE) {
869 dev_err(kbdev->dev, "buffer too big\n");
874 if (kbase_ctx_flag(kctx, KCTX_COMPAT))
875 user_buf = compat_ptr(add_data->buf.compat_value);
878 user_buf = add_data->buf.value;
880 buf = kmalloc(add_data->len, GFP_KERNEL);
881 if (ZERO_OR_NULL_PTR(buf))
884 if (0 != copy_from_user(buf, user_buf, add_data->len)) {
885 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
890 if (kbasep_mem_profile_debugfs_insert(kctx, buf,
892 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
900 #ifdef CONFIG_MALI_NO_MALI
901 case KBASE_FUNC_SET_PRFCNT_VALUES:
904 struct kbase_uk_prfcnt_values *params =
905 ((struct kbase_uk_prfcnt_values *)args);
906 gpu_model_set_dummy_prfcnt_sample(params->data,
911 #endif /* CONFIG_MALI_NO_MALI */
912 #ifdef BASE_LEGACY_UK10_4_SUPPORT
913 case KBASE_FUNC_TLSTREAM_ACQUIRE_V10_4:
915 struct kbase_uk_tlstream_acquire_v10_4 *tlstream_acquire
918 if (sizeof(*tlstream_acquire) != args_size)
921 if (0 != kbase_tlstream_acquire(
923 &tlstream_acquire->fd, 0)) {
924 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
925 } else if (0 <= tlstream_acquire->fd) {
926 /* Summary stream was cleared during acquire.
927 * Create static timeline objects that will be
929 kbase_create_timeline_objects(kctx);
933 #endif /* BASE_LEGACY_UK10_4_SUPPORT */
934 case KBASE_FUNC_TLSTREAM_ACQUIRE:
936 struct kbase_uk_tlstream_acquire *tlstream_acquire =
939 if (sizeof(*tlstream_acquire) != args_size)
942 if (tlstream_acquire->flags & ~BASE_TLSTREAM_FLAGS_MASK)
945 if (0 != kbase_tlstream_acquire(
947 &tlstream_acquire->fd,
948 tlstream_acquire->flags)) {
949 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
950 } else if (0 <= tlstream_acquire->fd) {
951 /* Summary stream was cleared during acquire.
952 * Create static timeline objects that will be
954 kbase_create_timeline_objects(kctx);
958 case KBASE_FUNC_TLSTREAM_FLUSH:
960 struct kbase_uk_tlstream_flush *tlstream_flush =
963 if (sizeof(*tlstream_flush) != args_size)
966 kbase_tlstream_flush_streams();
970 case KBASE_FUNC_TLSTREAM_TEST:
972 struct kbase_uk_tlstream_test *tlstream_test = args;
974 if (sizeof(*tlstream_test) != args_size)
978 tlstream_test->tpw_count,
979 tlstream_test->msg_delay,
980 tlstream_test->msg_count,
981 tlstream_test->aux_msg);
984 case KBASE_FUNC_TLSTREAM_STATS:
986 struct kbase_uk_tlstream_stats *tlstream_stats = args;
988 if (sizeof(*tlstream_stats) != args_size)
991 kbase_tlstream_stats(
992 &tlstream_stats->bytes_collected,
993 &tlstream_stats->bytes_generated);
996 #endif /* MALI_UNIT_TEST */
998 case KBASE_FUNC_GET_CONTEXT_ID:
1000 struct kbase_uk_context_id *info = args;
1002 info->id = kctx->id;
1006 case KBASE_FUNC_SOFT_EVENT_UPDATE:
1008 struct kbase_uk_soft_event_update *update = args;
1010 if (sizeof(*update) != args_size)
1013 if (((update->new_status != BASE_JD_SOFT_EVENT_SET) &&
1014 (update->new_status != BASE_JD_SOFT_EVENT_RESET)) ||
1015 (update->flags != 0))
1018 if (kbase_soft_event_update(kctx, update->evt,
1019 update->new_status))
1020 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
1026 dev_err(kbdev->dev, "unknown ioctl %u\n", id);
1033 dev_err(kbdev->dev, "Wrong syscall size (%d) for %08x\n", args_size, id);
1038 static struct kbase_device *to_kbase_device(struct device *dev)
1040 return dev_get_drvdata(dev);
1043 static int assign_irqs(struct platform_device *pdev)
1045 struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
1051 /* 3 IRQ resources */
1052 for (i = 0; i < 3; i++) {
1053 struct resource *irq_res;
1056 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
1058 dev_err(kbdev->dev, "No IRQ resource at index %d\n", i);
1063 if (!strcmp(irq_res->name, "JOB")) {
1064 irqtag = JOB_IRQ_TAG;
1065 } else if (!strcmp(irq_res->name, "MMU")) {
1066 irqtag = MMU_IRQ_TAG;
1067 } else if (!strcmp(irq_res->name, "GPU")) {
1068 irqtag = GPU_IRQ_TAG;
1070 dev_err(&pdev->dev, "Invalid irq res name: '%s'\n",
1076 #endif /* CONFIG_OF */
1077 kbdev->irqs[irqtag].irq = irq_res->start;
1078 kbdev->irqs[irqtag].flags = irq_res->flags & IRQF_TRIGGER_MASK;
1085 * API to acquire device list mutex and
1086 * return pointer to the device list head
1088 const struct list_head *kbase_dev_list_get(void)
1090 mutex_lock(&kbase_dev_list_lock);
1091 return &kbase_dev_list;
1093 KBASE_EXPORT_TEST_API(kbase_dev_list_get);
1095 /* API to release the device list mutex */
1096 void kbase_dev_list_put(const struct list_head *dev_list)
1098 mutex_unlock(&kbase_dev_list_lock);
1100 KBASE_EXPORT_TEST_API(kbase_dev_list_put);
1102 /* Find a particular kbase device (as specified by minor number), or find the "first" device if -1 is specified */
1103 struct kbase_device *kbase_find_device(int minor)
1105 struct kbase_device *kbdev = NULL;
1106 struct list_head *entry;
1107 const struct list_head *dev_list = kbase_dev_list_get();
1109 list_for_each(entry, dev_list) {
1110 struct kbase_device *tmp;
1112 tmp = list_entry(entry, struct kbase_device, entry);
1113 if (tmp->mdev.minor == minor || minor == -1) {
1115 get_device(kbdev->dev);
1119 kbase_dev_list_put(dev_list);
1123 EXPORT_SYMBOL(kbase_find_device);
1125 void kbase_release_device(struct kbase_device *kbdev)
1127 put_device(kbdev->dev);
1129 EXPORT_SYMBOL(kbase_release_device);
1131 #if KERNEL_VERSION(4, 4, 0) > LINUX_VERSION_CODE
1133 * Older versions, before v4.6, of the kernel doesn't have
1134 * kstrtobool_from_user().
1136 static int kstrtobool_from_user(const char __user *s, size_t count, bool *res)
1140 count = min(sizeof(buf), count);
1142 if (copy_from_user(buf, s, count))
1146 return strtobool(buf, res);
1150 static ssize_t write_ctx_infinite_cache(struct file *f, const char __user *ubuf, size_t size, loff_t *off)
1152 struct kbase_context *kctx = f->private_data;
1156 err = kstrtobool_from_user(ubuf, size, &value);
1161 kbase_ctx_flag_set(kctx, KCTX_INFINITE_CACHE);
1163 kbase_ctx_flag_clear(kctx, KCTX_INFINITE_CACHE);
1168 static ssize_t read_ctx_infinite_cache(struct file *f, char __user *ubuf, size_t size, loff_t *off)
1170 struct kbase_context *kctx = f->private_data;
1175 value = kbase_ctx_flag(kctx, KCTX_INFINITE_CACHE);
1177 count = scnprintf(buf, sizeof(buf), "%s\n", value ? "Y" : "N");
1179 return simple_read_from_buffer(ubuf, size, off, buf, count);
1182 static const struct file_operations kbase_infinite_cache_fops = {
1183 .open = simple_open,
1184 .write = write_ctx_infinite_cache,
1185 .read = read_ctx_infinite_cache,
1188 static int kbase_open(struct inode *inode, struct file *filp)
1190 struct kbase_device *kbdev = NULL;
1191 struct kbase_context *kctx;
1193 #ifdef CONFIG_DEBUG_FS
1197 kbdev = kbase_find_device(iminor(inode));
1202 kctx = kbase_create_context(kbdev, is_compat_task());
1208 init_waitqueue_head(&kctx->event_queue);
1209 filp->private_data = kctx;
1212 if (kbdev->infinite_cache_active_default)
1213 kbase_ctx_flag_set(kctx, KCTX_INFINITE_CACHE);
1215 #ifdef CONFIG_DEBUG_FS
1216 snprintf(kctx_name, 64, "%d_%d", kctx->tgid, kctx->id);
1218 kctx->kctx_dentry = debugfs_create_dir(kctx_name,
1219 kbdev->debugfs_ctx_directory);
1221 if (IS_ERR_OR_NULL(kctx->kctx_dentry)) {
1226 #ifdef CONFIG_MALI_COH_USER
1227 /* if cache is completely coherent at hardware level, then remove the
1228 * infinite cache control support from debugfs.
1231 debugfs_create_file("infinite_cache", 0644, kctx->kctx_dentry,
1232 kctx, &kbase_infinite_cache_fops);
1233 #endif /* CONFIG_MALI_COH_USER */
1235 mutex_init(&kctx->mem_profile_lock);
1237 kbasep_jd_debugfs_ctx_init(kctx);
1238 kbase_debug_mem_view_init(filp);
1240 kbase_debug_job_fault_context_init(kctx);
1242 kbase_mem_pool_debugfs_init(kctx->kctx_dentry, &kctx->mem_pool);
1244 kbase_jit_debugfs_init(kctx);
1245 #endif /* CONFIG_DEBUG_FS */
1247 dev_dbg(kbdev->dev, "created base context\n");
1250 struct kbasep_kctx_list_element *element;
1252 element = kzalloc(sizeof(*element), GFP_KERNEL);
1254 mutex_lock(&kbdev->kctx_list_lock);
1255 element->kctx = kctx;
1256 list_add(&element->link, &kbdev->kctx_list);
1257 kbase_tlstream_tl_new_ctx(
1259 (u32)(element->kctx->id),
1260 (u32)(element->kctx->tgid));
1261 mutex_unlock(&kbdev->kctx_list_lock);
1263 /* we don't treat this as a fail - just warn about it */
1264 dev_warn(kbdev->dev, "couldn't add kctx to kctx_list\n");
1270 kbase_release_device(kbdev);
1274 static int kbase_release(struct inode *inode, struct file *filp)
1276 struct kbase_context *kctx = filp->private_data;
1277 struct kbase_device *kbdev = kctx->kbdev;
1278 struct kbasep_kctx_list_element *element, *tmp;
1279 bool found_element = false;
1281 kbase_tlstream_tl_del_ctx(kctx);
1283 #ifdef CONFIG_DEBUG_FS
1284 debugfs_remove_recursive(kctx->kctx_dentry);
1285 kbasep_mem_profile_debugfs_remove(kctx);
1286 kbase_debug_job_fault_context_term(kctx);
1289 mutex_lock(&kbdev->kctx_list_lock);
1290 list_for_each_entry_safe(element, tmp, &kbdev->kctx_list, link) {
1291 if (element->kctx == kctx) {
1292 list_del(&element->link);
1294 found_element = true;
1297 mutex_unlock(&kbdev->kctx_list_lock);
1299 dev_warn(kbdev->dev, "kctx not in kctx_list\n");
1301 filp->private_data = NULL;
1303 mutex_lock(&kctx->vinstr_cli_lock);
1304 /* If this client was performing hwcnt dumping and did not explicitly
1305 * detach itself, remove it from the vinstr core now */
1306 if (kctx->vinstr_cli) {
1307 struct kbase_uk_hwcnt_setup setup;
1309 setup.dump_buffer = 0llu;
1310 kbase_vinstr_legacy_hwc_setup(
1311 kbdev->vinstr_ctx, &kctx->vinstr_cli, &setup);
1313 mutex_unlock(&kctx->vinstr_cli_lock);
1315 kbase_destroy_context(kctx);
1317 dev_dbg(kbdev->dev, "deleted base context\n");
1318 kbase_release_device(kbdev);
1322 #define CALL_MAX_SIZE 536
1324 static long kbase_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1326 u64 msg[(CALL_MAX_SIZE + 7) >> 3] = { 0xdeadbeefdeadbeefull }; /* alignment fixup */
1327 u32 size = _IOC_SIZE(cmd);
1328 struct kbase_context *kctx = filp->private_data;
1330 if (size > CALL_MAX_SIZE)
1333 if (0 != copy_from_user(&msg, (void __user *)arg, size)) {
1334 dev_err(kctx->kbdev->dev, "failed to copy ioctl argument into kernel space\n");
1338 if (kbase_dispatch(kctx, &msg, size) != 0)
1341 if (0 != copy_to_user((void __user *)arg, &msg, size)) {
1342 dev_err(kctx->kbdev->dev, "failed to copy results of UK call back to user space\n");
1348 static ssize_t kbase_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
1350 struct kbase_context *kctx = filp->private_data;
1351 struct base_jd_event_v2 uevent;
1354 if (count < sizeof(uevent))
1358 while (kbase_event_dequeue(kctx, &uevent)) {
1362 if (filp->f_flags & O_NONBLOCK)
1365 if (wait_event_interruptible(kctx->event_queue,
1366 kbase_event_pending(kctx)) != 0)
1367 return -ERESTARTSYS;
1369 if (uevent.event_code == BASE_JD_EVENT_DRV_TERMINATED) {
1375 if (copy_to_user(buf, &uevent, sizeof(uevent)) != 0)
1378 buf += sizeof(uevent);
1380 count -= sizeof(uevent);
1381 } while (count >= sizeof(uevent));
1384 return out_count * sizeof(uevent);
1387 static unsigned int kbase_poll(struct file *filp, poll_table *wait)
1389 struct kbase_context *kctx = filp->private_data;
1391 poll_wait(filp, &kctx->event_queue, wait);
1392 if (kbase_event_pending(kctx))
1393 return POLLIN | POLLRDNORM;
1398 void kbase_event_wakeup(struct kbase_context *kctx)
1400 KBASE_DEBUG_ASSERT(kctx);
1402 wake_up_interruptible(&kctx->event_queue);
1405 KBASE_EXPORT_TEST_API(kbase_event_wakeup);
1407 static int kbase_check_flags(int flags)
1409 /* Enforce that the driver keeps the O_CLOEXEC flag so that execve() always
1410 * closes the file descriptor in a child process.
1412 if (0 == (flags & O_CLOEXEC))
1419 /* The following function is taken from the kernel and just
1420 * renamed. As it's not exported to modules we must copy-paste it here.
1423 static unsigned long kbase_unmapped_area_topdown(struct vm_unmapped_area_info
1426 struct mm_struct *mm = current->mm;
1427 struct vm_area_struct *vma;
1428 unsigned long length, low_limit, high_limit, gap_start, gap_end;
1430 /* Adjust search length to account for worst case alignment overhead */
1431 length = info->length + info->align_mask;
1432 if (length < info->length)
1436 * Adjust search limits by the desired length.
1437 * See implementation comment at top of unmapped_area().
1439 gap_end = info->high_limit;
1440 if (gap_end < length)
1442 high_limit = gap_end - length;
1444 if (info->low_limit > high_limit)
1446 low_limit = info->low_limit + length;
1448 /* Check highest gap, which does not precede any rbtree node */
1449 gap_start = mm->highest_vm_end;
1450 if (gap_start <= high_limit)
1453 /* Check if rbtree root looks promising */
1454 if (RB_EMPTY_ROOT(&mm->mm_rb))
1456 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
1457 if (vma->rb_subtree_gap < length)
1461 /* Visit right subtree if it looks promising */
1462 gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
1463 if (gap_start <= high_limit && vma->vm_rb.rb_right) {
1464 struct vm_area_struct *right =
1465 rb_entry(vma->vm_rb.rb_right,
1466 struct vm_area_struct, vm_rb);
1467 if (right->rb_subtree_gap >= length) {
1474 /* Check if current node has a suitable gap */
1475 gap_end = vma->vm_start;
1476 if (gap_end < low_limit)
1478 if (gap_start <= high_limit && gap_end - gap_start >= length)
1481 /* Visit left subtree if it looks promising */
1482 if (vma->vm_rb.rb_left) {
1483 struct vm_area_struct *left =
1484 rb_entry(vma->vm_rb.rb_left,
1485 struct vm_area_struct, vm_rb);
1486 if (left->rb_subtree_gap >= length) {
1492 /* Go back up the rbtree to find next candidate node */
1494 struct rb_node *prev = &vma->vm_rb;
1495 if (!rb_parent(prev))
1497 vma = rb_entry(rb_parent(prev),
1498 struct vm_area_struct, vm_rb);
1499 if (prev == vma->vm_rb.rb_right) {
1500 gap_start = vma->vm_prev ?
1501 vma->vm_prev->vm_end : 0;
1508 /* We found a suitable gap. Clip it with the original high_limit. */
1509 if (gap_end > info->high_limit)
1510 gap_end = info->high_limit;
1513 /* Compute highest gap address at the desired alignment */
1514 gap_end -= info->length;
1515 gap_end -= (gap_end - info->align_offset) & info->align_mask;
1517 VM_BUG_ON(gap_end < info->low_limit);
1518 VM_BUG_ON(gap_end < gap_start);
1523 static unsigned long kbase_get_unmapped_area(struct file *filp,
1524 const unsigned long addr, const unsigned long len,
1525 const unsigned long pgoff, const unsigned long flags)
1527 /* based on get_unmapped_area, but simplified slightly due to that some
1528 * values are known in advance */
1529 struct kbase_context *kctx = filp->private_data;
1530 struct mm_struct *mm = current->mm;
1531 struct vm_unmapped_area_info info;
1533 /* err on fixed address */
1534 if ((flags & MAP_FIXED) || addr)
1538 if (len > TASK_SIZE - SZ_2M)
1541 if (kbase_ctx_flag(kctx, KCTX_COMPAT))
1542 return current->mm->get_unmapped_area(filp, addr, len, pgoff,
1545 if (kbase_hw_has_feature(kctx->kbdev, BASE_HW_FEATURE_33BIT_VA)) {
1546 info.high_limit = kctx->same_va_end << PAGE_SHIFT;
1547 info.align_mask = 0;
1548 info.align_offset = 0;
1550 info.high_limit = min_t(unsigned long, mm->mmap_base,
1551 (kctx->same_va_end << PAGE_SHIFT));
1553 info.align_offset = SZ_2M;
1554 info.align_mask = SZ_2M - 1;
1556 info.align_mask = 0;
1557 info.align_offset = 0;
1563 info.low_limit = SZ_2M;
1564 return kbase_unmapped_area_topdown(&info);
1568 static const struct file_operations kbase_fops = {
1569 .owner = THIS_MODULE,
1571 .release = kbase_release,
1574 .unlocked_ioctl = kbase_ioctl,
1575 .compat_ioctl = kbase_ioctl,
1577 .check_flags = kbase_check_flags,
1579 .get_unmapped_area = kbase_get_unmapped_area,
1583 #ifndef CONFIG_MALI_NO_MALI
1584 void kbase_os_reg_write(struct kbase_device *kbdev, u16 offset, u32 value)
1586 writel(value, kbdev->reg + offset);
1589 u32 kbase_os_reg_read(struct kbase_device *kbdev, u16 offset)
1591 return readl(kbdev->reg + offset);
1593 #endif /* !CONFIG_MALI_NO_MALI */
1595 /** Show callback for the @c power_policy sysfs file.
1597 * This function is called to get the contents of the @c power_policy sysfs
1598 * file. This is a list of the available policies with the currently active one
1599 * surrounded by square brackets.
1601 * @param dev The device this sysfs file is for
1602 * @param attr The attributes of the sysfs file
1603 * @param buf The output buffer for the sysfs file contents
1605 * @return The number of bytes output to @c buf.
1607 static ssize_t show_policy(struct device *dev, struct device_attribute *attr, char *const buf)
1609 struct kbase_device *kbdev;
1610 const struct kbase_pm_policy *current_policy;
1611 const struct kbase_pm_policy *const *policy_list;
1616 kbdev = to_kbase_device(dev);
1621 current_policy = kbase_pm_get_policy(kbdev);
1623 policy_count = kbase_pm_list_policies(&policy_list);
1625 for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) {
1626 if (policy_list[i] == current_policy)
1627 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name);
1629 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name);
1632 if (ret < PAGE_SIZE - 1) {
1633 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
1635 buf[PAGE_SIZE - 2] = '\n';
1636 buf[PAGE_SIZE - 1] = '\0';
1637 ret = PAGE_SIZE - 1;
1643 /** Store callback for the @c power_policy sysfs file.
1645 * This function is called when the @c power_policy sysfs file is written to.
1646 * It matches the requested policy against the available policies and if a
1647 * matching policy is found calls @ref kbase_pm_set_policy to change the
1650 * @param dev The device with sysfs file is for
1651 * @param attr The attributes of the sysfs file
1652 * @param buf The value written to the sysfs file
1653 * @param count The number of bytes written to the sysfs file
1655 * @return @c count if the function succeeded. An error code on failure.
1657 static ssize_t set_policy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1659 struct kbase_device *kbdev;
1660 const struct kbase_pm_policy *new_policy = NULL;
1661 const struct kbase_pm_policy *const *policy_list;
1665 kbdev = to_kbase_device(dev);
1670 policy_count = kbase_pm_list_policies(&policy_list);
1672 for (i = 0; i < policy_count; i++) {
1673 if (sysfs_streq(policy_list[i]->name, buf)) {
1674 new_policy = policy_list[i];
1680 dev_err(dev, "power_policy: policy not found\n");
1684 kbase_pm_set_policy(kbdev, new_policy);
1689 /** The sysfs file @c power_policy.
1691 * This is used for obtaining information about the available policies,
1692 * determining which policy is currently active, and changing the active
1695 static DEVICE_ATTR(power_policy, S_IRUGO | S_IWUSR, show_policy, set_policy);
1697 /** Show callback for the @c core_availability_policy sysfs file.
1699 * This function is called to get the contents of the @c core_availability_policy
1700 * sysfs file. This is a list of the available policies with the currently
1701 * active one surrounded by square brackets.
1703 * @param dev The device this sysfs file is for
1704 * @param attr The attributes of the sysfs file
1705 * @param buf The output buffer for the sysfs file contents
1707 * @return The number of bytes output to @c buf.
1709 static ssize_t show_ca_policy(struct device *dev, struct device_attribute *attr, char * const buf)
1711 struct kbase_device *kbdev;
1712 const struct kbase_pm_ca_policy *current_policy;
1713 const struct kbase_pm_ca_policy *const *policy_list;
1718 kbdev = to_kbase_device(dev);
1723 current_policy = kbase_pm_ca_get_policy(kbdev);
1725 policy_count = kbase_pm_ca_list_policies(&policy_list);
1727 for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) {
1728 if (policy_list[i] == current_policy)
1729 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name);
1731 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name);
1734 if (ret < PAGE_SIZE - 1) {
1735 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
1737 buf[PAGE_SIZE - 2] = '\n';
1738 buf[PAGE_SIZE - 1] = '\0';
1739 ret = PAGE_SIZE - 1;
1745 /** Store callback for the @c core_availability_policy sysfs file.
1747 * This function is called when the @c core_availability_policy sysfs file is
1748 * written to. It matches the requested policy against the available policies
1749 * and if a matching policy is found calls @ref kbase_pm_set_policy to change
1752 * @param dev The device with sysfs file is for
1753 * @param attr The attributes of the sysfs file
1754 * @param buf The value written to the sysfs file
1755 * @param count The number of bytes written to the sysfs file
1757 * @return @c count if the function succeeded. An error code on failure.
1759 static ssize_t set_ca_policy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1761 struct kbase_device *kbdev;
1762 const struct kbase_pm_ca_policy *new_policy = NULL;
1763 const struct kbase_pm_ca_policy *const *policy_list;
1767 kbdev = to_kbase_device(dev);
1772 policy_count = kbase_pm_ca_list_policies(&policy_list);
1774 for (i = 0; i < policy_count; i++) {
1775 if (sysfs_streq(policy_list[i]->name, buf)) {
1776 new_policy = policy_list[i];
1782 dev_err(dev, "core_availability_policy: policy not found\n");
1786 kbase_pm_ca_set_policy(kbdev, new_policy);
1791 /** The sysfs file @c core_availability_policy
1793 * This is used for obtaining information about the available policies,
1794 * determining which policy is currently active, and changing the active
1797 static DEVICE_ATTR(core_availability_policy, S_IRUGO | S_IWUSR, show_ca_policy, set_ca_policy);
1799 /** Show callback for the @c core_mask sysfs file.
1801 * This function is called to get the contents of the @c core_mask sysfs
1804 * @param dev The device this sysfs file is for
1805 * @param attr The attributes of the sysfs file
1806 * @param buf The output buffer for the sysfs file contents
1808 * @return The number of bytes output to @c buf.
1810 static ssize_t show_core_mask(struct device *dev, struct device_attribute *attr, char * const buf)
1812 struct kbase_device *kbdev;
1815 kbdev = to_kbase_device(dev);
1820 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1821 "Current core mask (JS0) : 0x%llX\n",
1822 kbdev->pm.debug_core_mask[0]);
1823 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1824 "Current core mask (JS1) : 0x%llX\n",
1825 kbdev->pm.debug_core_mask[1]);
1826 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1827 "Current core mask (JS2) : 0x%llX\n",
1828 kbdev->pm.debug_core_mask[2]);
1829 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1830 "Available core mask : 0x%llX\n",
1831 kbdev->gpu_props.props.raw_props.shader_present);
1836 /** Store callback for the @c core_mask sysfs file.
1838 * This function is called when the @c core_mask sysfs file is written to.
1840 * @param dev The device with sysfs file is for
1841 * @param attr The attributes of the sysfs file
1842 * @param buf The value written to the sysfs file
1843 * @param count The number of bytes written to the sysfs file
1845 * @return @c count if the function succeeded. An error code on failure.
1847 static ssize_t set_core_mask(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1849 struct kbase_device *kbdev;
1850 u64 new_core_mask[3];
1853 kbdev = to_kbase_device(dev);
1858 items = sscanf(buf, "%llx %llx %llx",
1859 &new_core_mask[0], &new_core_mask[1],
1863 new_core_mask[1] = new_core_mask[2] = new_core_mask[0];
1865 if (items == 1 || items == 3) {
1866 u64 shader_present =
1867 kbdev->gpu_props.props.raw_props.shader_present;
1868 u64 group0_core_mask =
1869 kbdev->gpu_props.props.coherency_info.group[0].
1872 if ((new_core_mask[0] & shader_present) != new_core_mask[0] ||
1873 !(new_core_mask[0] & group0_core_mask) ||
1874 (new_core_mask[1] & shader_present) !=
1876 !(new_core_mask[1] & group0_core_mask) ||
1877 (new_core_mask[2] & shader_present) !=
1879 !(new_core_mask[2] & group0_core_mask)) {
1880 dev_err(dev, "power_policy: invalid core specification\n");
1884 if (kbdev->pm.debug_core_mask[0] != new_core_mask[0] ||
1885 kbdev->pm.debug_core_mask[1] !=
1887 kbdev->pm.debug_core_mask[2] !=
1889 unsigned long flags;
1891 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
1893 kbase_pm_set_debug_core_mask(kbdev, new_core_mask[0],
1894 new_core_mask[1], new_core_mask[2]);
1896 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1902 dev_err(kbdev->dev, "Couldn't process set_core_mask write operation.\n"
1903 "Use format <core_mask>\n"
1904 "or <core_mask_js0> <core_mask_js1> <core_mask_js2>\n");
1908 /** The sysfs file @c core_mask.
1910 * This is used to restrict shader core availability for debugging purposes.
1911 * Reading it will show the current core mask and the mask of cores available.
1912 * Writing to it will set the current core mask.
1914 static DEVICE_ATTR(core_mask, S_IRUGO | S_IWUSR, show_core_mask, set_core_mask);
1917 * set_soft_job_timeout() - Store callback for the soft_job_timeout sysfs
1920 * @dev: The device this sysfs file is for.
1921 * @attr: The attributes of the sysfs file.
1922 * @buf: The value written to the sysfs file.
1923 * @count: The number of bytes written to the sysfs file.
1925 * This allows setting the timeout for software jobs. Waiting soft event wait
1926 * jobs will be cancelled after this period expires, while soft fence wait jobs
1927 * will print debug information if the fence debug feature is enabled.
1929 * This is expressed in milliseconds.
1931 * Return: count if the function succeeded. An error code on failure.
1933 static ssize_t set_soft_job_timeout(struct device *dev,
1934 struct device_attribute *attr,
1935 const char *buf, size_t count)
1937 struct kbase_device *kbdev;
1938 int soft_job_timeout_ms;
1940 kbdev = to_kbase_device(dev);
1944 if ((kstrtoint(buf, 0, &soft_job_timeout_ms) != 0) ||
1945 (soft_job_timeout_ms <= 0))
1948 atomic_set(&kbdev->js_data.soft_job_timeout_ms,
1949 soft_job_timeout_ms);
1955 * show_soft_job_timeout() - Show callback for the soft_job_timeout sysfs
1958 * This will return the timeout for the software jobs.
1960 * @dev: The device this sysfs file is for.
1961 * @attr: The attributes of the sysfs file.
1962 * @buf: The output buffer for the sysfs file contents.
1964 * Return: The number of bytes output to buf.
1966 static ssize_t show_soft_job_timeout(struct device *dev,
1967 struct device_attribute *attr,
1970 struct kbase_device *kbdev;
1972 kbdev = to_kbase_device(dev);
1976 return scnprintf(buf, PAGE_SIZE, "%i\n",
1977 atomic_read(&kbdev->js_data.soft_job_timeout_ms));
1980 static DEVICE_ATTR(soft_job_timeout, S_IRUGO | S_IWUSR,
1981 show_soft_job_timeout, set_soft_job_timeout);
1983 static u32 timeout_ms_to_ticks(struct kbase_device *kbdev, long timeout_ms,
1984 int default_ticks, u32 old_ticks)
1986 if (timeout_ms > 0) {
1987 u64 ticks = timeout_ms * 1000000ULL;
1988 do_div(ticks, kbdev->js_data.scheduling_period_ns);
1992 } else if (timeout_ms < 0) {
1993 return default_ticks;
1999 /** Store callback for the @c js_timeouts sysfs file.
2001 * This function is called to get the contents of the @c js_timeouts sysfs
2002 * file. This file contains five values separated by whitespace. The values
2003 * are basically the same as JS_SOFT_STOP_TICKS, JS_HARD_STOP_TICKS_SS,
2004 * JS_HARD_STOP_TICKS_DUMPING, JS_RESET_TICKS_SS, JS_RESET_TICKS_DUMPING
2005 * configuration values (in that order), with the difference that the js_timeout
2006 * values are expressed in MILLISECONDS.
2008 * The js_timeouts sysfile file allows the current values in
2009 * use by the job scheduler to get override. Note that a value needs to
2010 * be other than 0 for it to override the current job scheduler value.
2012 * @param dev The device with sysfs file is for
2013 * @param attr The attributes of the sysfs file
2014 * @param buf The value written to the sysfs file
2015 * @param count The number of bytes written to the sysfs file
2017 * @return @c count if the function succeeded. An error code on failure.
2019 static ssize_t set_js_timeouts(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2021 struct kbase_device *kbdev;
2023 long js_soft_stop_ms;
2024 long js_soft_stop_ms_cl;
2025 long js_hard_stop_ms_ss;
2026 long js_hard_stop_ms_cl;
2027 long js_hard_stop_ms_dumping;
2028 long js_reset_ms_ss;
2029 long js_reset_ms_cl;
2030 long js_reset_ms_dumping;
2032 kbdev = to_kbase_device(dev);
2036 items = sscanf(buf, "%ld %ld %ld %ld %ld %ld %ld %ld",
2037 &js_soft_stop_ms, &js_soft_stop_ms_cl,
2038 &js_hard_stop_ms_ss, &js_hard_stop_ms_cl,
2039 &js_hard_stop_ms_dumping, &js_reset_ms_ss,
2040 &js_reset_ms_cl, &js_reset_ms_dumping);
2043 struct kbasep_js_device_data *js_data = &kbdev->js_data;
2044 unsigned long flags;
2046 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
2048 #define UPDATE_TIMEOUT(ticks_name, ms_name, default) do {\
2049 js_data->ticks_name = timeout_ms_to_ticks(kbdev, ms_name, \
2050 default, js_data->ticks_name); \
2051 dev_dbg(kbdev->dev, "Overriding " #ticks_name \
2052 " with %lu ticks (%lu ms)\n", \
2053 (unsigned long)js_data->ticks_name, \
2057 UPDATE_TIMEOUT(soft_stop_ticks, js_soft_stop_ms,
2058 DEFAULT_JS_SOFT_STOP_TICKS);
2059 UPDATE_TIMEOUT(soft_stop_ticks_cl, js_soft_stop_ms_cl,
2060 DEFAULT_JS_SOFT_STOP_TICKS_CL);
2061 UPDATE_TIMEOUT(hard_stop_ticks_ss, js_hard_stop_ms_ss,
2062 kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408) ?
2063 DEFAULT_JS_HARD_STOP_TICKS_SS_8408 :
2064 DEFAULT_JS_HARD_STOP_TICKS_SS);
2065 UPDATE_TIMEOUT(hard_stop_ticks_cl, js_hard_stop_ms_cl,
2066 DEFAULT_JS_HARD_STOP_TICKS_CL);
2067 UPDATE_TIMEOUT(hard_stop_ticks_dumping,
2068 js_hard_stop_ms_dumping,
2069 DEFAULT_JS_HARD_STOP_TICKS_DUMPING);
2070 UPDATE_TIMEOUT(gpu_reset_ticks_ss, js_reset_ms_ss,
2071 kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408) ?
2072 DEFAULT_JS_RESET_TICKS_SS_8408 :
2073 DEFAULT_JS_RESET_TICKS_SS);
2074 UPDATE_TIMEOUT(gpu_reset_ticks_cl, js_reset_ms_cl,
2075 DEFAULT_JS_RESET_TICKS_CL);
2076 UPDATE_TIMEOUT(gpu_reset_ticks_dumping, js_reset_ms_dumping,
2077 DEFAULT_JS_RESET_TICKS_DUMPING);
2079 kbase_js_set_timeouts(kbdev);
2081 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2086 dev_err(kbdev->dev, "Couldn't process js_timeouts write operation.\n"
2087 "Use format <soft_stop_ms> <soft_stop_ms_cl> <hard_stop_ms_ss> <hard_stop_ms_cl> <hard_stop_ms_dumping> <reset_ms_ss> <reset_ms_cl> <reset_ms_dumping>\n"
2088 "Write 0 for no change, -1 to restore default timeout\n");
2092 static unsigned long get_js_timeout_in_ms(
2093 u32 scheduling_period_ns,
2096 u64 ms = (u64)ticks * scheduling_period_ns;
2098 do_div(ms, 1000000UL);
2102 /** Show callback for the @c js_timeouts sysfs file.
2104 * This function is called to get the contents of the @c js_timeouts sysfs
2105 * file. It returns the last set values written to the js_timeouts sysfs file.
2106 * If the file didn't get written yet, the values will be current setting in
2108 * @param dev The device this sysfs file is for
2109 * @param attr The attributes of the sysfs file
2110 * @param buf The output buffer for the sysfs file contents
2112 * @return The number of bytes output to @c buf.
2114 static ssize_t show_js_timeouts(struct device *dev, struct device_attribute *attr, char * const buf)
2116 struct kbase_device *kbdev;
2118 unsigned long js_soft_stop_ms;
2119 unsigned long js_soft_stop_ms_cl;
2120 unsigned long js_hard_stop_ms_ss;
2121 unsigned long js_hard_stop_ms_cl;
2122 unsigned long js_hard_stop_ms_dumping;
2123 unsigned long js_reset_ms_ss;
2124 unsigned long js_reset_ms_cl;
2125 unsigned long js_reset_ms_dumping;
2126 u32 scheduling_period_ns;
2128 kbdev = to_kbase_device(dev);
2132 scheduling_period_ns = kbdev->js_data.scheduling_period_ns;
2134 #define GET_TIMEOUT(name) get_js_timeout_in_ms(\
2135 scheduling_period_ns, \
2136 kbdev->js_data.name)
2138 js_soft_stop_ms = GET_TIMEOUT(soft_stop_ticks);
2139 js_soft_stop_ms_cl = GET_TIMEOUT(soft_stop_ticks_cl);
2140 js_hard_stop_ms_ss = GET_TIMEOUT(hard_stop_ticks_ss);
2141 js_hard_stop_ms_cl = GET_TIMEOUT(hard_stop_ticks_cl);
2142 js_hard_stop_ms_dumping = GET_TIMEOUT(hard_stop_ticks_dumping);
2143 js_reset_ms_ss = GET_TIMEOUT(gpu_reset_ticks_ss);
2144 js_reset_ms_cl = GET_TIMEOUT(gpu_reset_ticks_cl);
2145 js_reset_ms_dumping = GET_TIMEOUT(gpu_reset_ticks_dumping);
2149 ret = scnprintf(buf, PAGE_SIZE, "%lu %lu %lu %lu %lu %lu %lu %lu\n",
2150 js_soft_stop_ms, js_soft_stop_ms_cl,
2151 js_hard_stop_ms_ss, js_hard_stop_ms_cl,
2152 js_hard_stop_ms_dumping, js_reset_ms_ss,
2153 js_reset_ms_cl, js_reset_ms_dumping);
2155 if (ret >= PAGE_SIZE) {
2156 buf[PAGE_SIZE - 2] = '\n';
2157 buf[PAGE_SIZE - 1] = '\0';
2158 ret = PAGE_SIZE - 1;
2164 /** The sysfs file @c js_timeouts.
2166 * This is used to override the current job scheduler values for
2167 * JS_STOP_STOP_TICKS_SS
2168 * JS_STOP_STOP_TICKS_CL
2169 * JS_HARD_STOP_TICKS_SS
2170 * JS_HARD_STOP_TICKS_CL
2171 * JS_HARD_STOP_TICKS_DUMPING
2174 * JS_RESET_TICKS_DUMPING.
2176 static DEVICE_ATTR(js_timeouts, S_IRUGO | S_IWUSR, show_js_timeouts, set_js_timeouts);
2178 static u32 get_new_js_timeout(
2181 u32 new_scheduling_period_ns)
2183 u64 ticks = (u64)old_period * (u64)old_ticks;
2184 do_div(ticks, new_scheduling_period_ns);
2185 return ticks?ticks:1;
2189 * set_js_scheduling_period - Store callback for the js_scheduling_period sysfs
2191 * @dev: The device the sysfs file is for
2192 * @attr: The attributes of the sysfs file
2193 * @buf: The value written to the sysfs file
2194 * @count: The number of bytes written to the sysfs file
2196 * This function is called when the js_scheduling_period sysfs file is written
2197 * to. It checks the data written, and if valid updates the js_scheduling_period
2200 * Return: @c count if the function succeeded. An error code on failure.
2202 static ssize_t set_js_scheduling_period(struct device *dev,
2203 struct device_attribute *attr, const char *buf, size_t count)
2205 struct kbase_device *kbdev;
2207 unsigned int js_scheduling_period;
2208 u32 new_scheduling_period_ns;
2210 struct kbasep_js_device_data *js_data;
2211 unsigned long flags;
2213 kbdev = to_kbase_device(dev);
2217 js_data = &kbdev->js_data;
2219 ret = kstrtouint(buf, 0, &js_scheduling_period);
2220 if (ret || !js_scheduling_period) {
2221 dev_err(kbdev->dev, "Couldn't process js_scheduling_period write operation.\n"
2222 "Use format <js_scheduling_period_ms>\n");
2226 new_scheduling_period_ns = js_scheduling_period * 1000000;
2228 /* Update scheduling timeouts */
2229 mutex_lock(&js_data->runpool_mutex);
2230 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
2232 /* If no contexts have been scheduled since js_timeouts was last written
2233 * to, the new timeouts might not have been latched yet. So check if an
2234 * update is pending and use the new values if necessary. */
2236 /* Use previous 'new' scheduling period as a base if present. */
2237 old_period = js_data->scheduling_period_ns;
2239 #define SET_TIMEOUT(name) \
2240 (js_data->name = get_new_js_timeout(\
2242 kbdev->js_data.name, \
2243 new_scheduling_period_ns))
2245 SET_TIMEOUT(soft_stop_ticks);
2246 SET_TIMEOUT(soft_stop_ticks_cl);
2247 SET_TIMEOUT(hard_stop_ticks_ss);
2248 SET_TIMEOUT(hard_stop_ticks_cl);
2249 SET_TIMEOUT(hard_stop_ticks_dumping);
2250 SET_TIMEOUT(gpu_reset_ticks_ss);
2251 SET_TIMEOUT(gpu_reset_ticks_cl);
2252 SET_TIMEOUT(gpu_reset_ticks_dumping);
2256 js_data->scheduling_period_ns = new_scheduling_period_ns;
2258 kbase_js_set_timeouts(kbdev);
2260 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2261 mutex_unlock(&js_data->runpool_mutex);
2263 dev_dbg(kbdev->dev, "JS scheduling period: %dms\n",
2264 js_scheduling_period);
2270 * show_js_scheduling_period - Show callback for the js_scheduling_period sysfs
2272 * @dev: The device this sysfs file is for.
2273 * @attr: The attributes of the sysfs file.
2274 * @buf: The output buffer to receive the GPU information.
2276 * This function is called to get the current period used for the JS scheduling
2279 * Return: The number of bytes output to buf.
2281 static ssize_t show_js_scheduling_period(struct device *dev,
2282 struct device_attribute *attr, char * const buf)
2284 struct kbase_device *kbdev;
2288 kbdev = to_kbase_device(dev);
2292 period = kbdev->js_data.scheduling_period_ns;
2294 ret = scnprintf(buf, PAGE_SIZE, "%d\n",
2300 static DEVICE_ATTR(js_scheduling_period, S_IRUGO | S_IWUSR,
2301 show_js_scheduling_period, set_js_scheduling_period);
2303 #if !MALI_CUSTOMER_RELEASE
2304 /** Store callback for the @c force_replay sysfs file.
2306 * @param dev The device with sysfs file is for
2307 * @param attr The attributes of the sysfs file
2308 * @param buf The value written to the sysfs file
2309 * @param count The number of bytes written to the sysfs file
2311 * @return @c count if the function succeeded. An error code on failure.
2313 static ssize_t set_force_replay(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2315 struct kbase_device *kbdev;
2317 kbdev = to_kbase_device(dev);
2321 if (!strncmp("limit=", buf, MIN(6, count))) {
2322 int force_replay_limit;
2323 int items = sscanf(buf, "limit=%u", &force_replay_limit);
2326 kbdev->force_replay_random = false;
2327 kbdev->force_replay_limit = force_replay_limit;
2328 kbdev->force_replay_count = 0;
2332 } else if (!strncmp("random_limit", buf, MIN(12, count))) {
2333 kbdev->force_replay_random = true;
2334 kbdev->force_replay_count = 0;
2337 } else if (!strncmp("norandom_limit", buf, MIN(14, count))) {
2338 kbdev->force_replay_random = false;
2339 kbdev->force_replay_limit = KBASEP_FORCE_REPLAY_DISABLED;
2340 kbdev->force_replay_count = 0;
2343 } else if (!strncmp("core_req=", buf, MIN(9, count))) {
2344 unsigned int core_req;
2345 int items = sscanf(buf, "core_req=%x", &core_req);
2348 kbdev->force_replay_core_req = (base_jd_core_req)core_req;
2353 dev_err(kbdev->dev, "Couldn't process force_replay write operation.\nPossible settings: limit=<limit>, random_limit, norandom_limit, core_req=<core_req>\n");
2357 /** Show callback for the @c force_replay sysfs file.
2359 * This function is called to get the contents of the @c force_replay sysfs
2360 * file. It returns the last set value written to the force_replay sysfs file.
2361 * If the file didn't get written yet, the values will be 0.
2363 * @param dev The device this sysfs file is for
2364 * @param attr The attributes of the sysfs file
2365 * @param buf The output buffer for the sysfs file contents
2367 * @return The number of bytes output to @c buf.
2369 static ssize_t show_force_replay(struct device *dev,
2370 struct device_attribute *attr, char * const buf)
2372 struct kbase_device *kbdev;
2375 kbdev = to_kbase_device(dev);
2379 if (kbdev->force_replay_random)
2380 ret = scnprintf(buf, PAGE_SIZE,
2381 "limit=0\nrandom_limit\ncore_req=%x\n",
2382 kbdev->force_replay_core_req);
2384 ret = scnprintf(buf, PAGE_SIZE,
2385 "limit=%u\nnorandom_limit\ncore_req=%x\n",
2386 kbdev->force_replay_limit,
2387 kbdev->force_replay_core_req);
2389 if (ret >= PAGE_SIZE) {
2390 buf[PAGE_SIZE - 2] = '\n';
2391 buf[PAGE_SIZE - 1] = '\0';
2392 ret = PAGE_SIZE - 1;
2398 /** The sysfs file @c force_replay.
2401 static DEVICE_ATTR(force_replay, S_IRUGO | S_IWUSR, show_force_replay,
2403 #endif /* !MALI_CUSTOMER_RELEASE */
2405 #ifdef CONFIG_MALI_DEBUG
2406 static ssize_t set_js_softstop_always(struct device *dev,
2407 struct device_attribute *attr, const char *buf, size_t count)
2409 struct kbase_device *kbdev;
2411 int softstop_always;
2413 kbdev = to_kbase_device(dev);
2417 ret = kstrtoint(buf, 0, &softstop_always);
2418 if (ret || ((softstop_always != 0) && (softstop_always != 1))) {
2419 dev_err(kbdev->dev, "Couldn't process js_softstop_always write operation.\n"
2420 "Use format <soft_stop_always>\n");
2424 kbdev->js_data.softstop_always = (bool) softstop_always;
2425 dev_dbg(kbdev->dev, "Support for softstop on a single context: %s\n",
2426 (kbdev->js_data.softstop_always) ?
2427 "Enabled" : "Disabled");
2431 static ssize_t show_js_softstop_always(struct device *dev,
2432 struct device_attribute *attr, char * const buf)
2434 struct kbase_device *kbdev;
2437 kbdev = to_kbase_device(dev);
2441 ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->js_data.softstop_always);
2443 if (ret >= PAGE_SIZE) {
2444 buf[PAGE_SIZE - 2] = '\n';
2445 buf[PAGE_SIZE - 1] = '\0';
2446 ret = PAGE_SIZE - 1;
2453 * By default, soft-stops are disabled when only a single context is present. The ability to
2454 * enable soft-stop when only a single context is present can be used for debug and unit-testing purposes.
2455 * (see CL t6xx_stress_1 unit-test as an example whereby this feature is used.)
2457 static DEVICE_ATTR(js_softstop_always, S_IRUGO | S_IWUSR, show_js_softstop_always, set_js_softstop_always);
2458 #endif /* CONFIG_MALI_DEBUG */
2460 #ifdef CONFIG_MALI_DEBUG
2461 typedef void (kbasep_debug_command_func) (struct kbase_device *);
2463 enum kbasep_debug_command_code {
2464 KBASEP_DEBUG_COMMAND_DUMPTRACE,
2466 /* This must be the last enum */
2467 KBASEP_DEBUG_COMMAND_COUNT
2470 struct kbasep_debug_command {
2472 kbasep_debug_command_func *func;
2475 /** Debug commands supported by the driver */
2476 static const struct kbasep_debug_command debug_commands[] = {
2479 .func = &kbasep_trace_dump,
2483 /** Show callback for the @c debug_command sysfs file.
2485 * This function is called to get the contents of the @c debug_command sysfs
2486 * file. This is a list of the available debug commands, separated by newlines.
2488 * @param dev The device this sysfs file is for
2489 * @param attr The attributes of the sysfs file
2490 * @param buf The output buffer for the sysfs file contents
2492 * @return The number of bytes output to @c buf.
2494 static ssize_t show_debug(struct device *dev, struct device_attribute *attr, char * const buf)
2496 struct kbase_device *kbdev;
2500 kbdev = to_kbase_device(dev);
2505 for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT && ret < PAGE_SIZE; i++)
2506 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s\n", debug_commands[i].str);
2508 if (ret >= PAGE_SIZE) {
2509 buf[PAGE_SIZE - 2] = '\n';
2510 buf[PAGE_SIZE - 1] = '\0';
2511 ret = PAGE_SIZE - 1;
2517 /** Store callback for the @c debug_command sysfs file.
2519 * This function is called when the @c debug_command sysfs file is written to.
2520 * It matches the requested command against the available commands, and if
2521 * a matching command is found calls the associated function from
2522 * @ref debug_commands to issue the command.
2524 * @param dev The device with sysfs file is for
2525 * @param attr The attributes of the sysfs file
2526 * @param buf The value written to the sysfs file
2527 * @param count The number of bytes written to the sysfs file
2529 * @return @c count if the function succeeded. An error code on failure.
2531 static ssize_t issue_debug(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2533 struct kbase_device *kbdev;
2536 kbdev = to_kbase_device(dev);
2541 for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT; i++) {
2542 if (sysfs_streq(debug_commands[i].str, buf)) {
2543 debug_commands[i].func(kbdev);
2548 /* Debug Command not found */
2549 dev_err(dev, "debug_command: command not known\n");
2553 /** The sysfs file @c debug_command.
2555 * This is used to issue general debug commands to the device driver.
2556 * Reading it will produce a list of debug commands, separated by newlines.
2557 * Writing to it with one of those commands will issue said command.
2559 static DEVICE_ATTR(debug_command, S_IRUGO | S_IWUSR, show_debug, issue_debug);
2560 #endif /* CONFIG_MALI_DEBUG */
2563 * kbase_show_gpuinfo - Show callback for the gpuinfo sysfs entry.
2564 * @dev: The device this sysfs file is for.
2565 * @attr: The attributes of the sysfs file.
2566 * @buf: The output buffer to receive the GPU information.
2568 * This function is called to get a description of the present Mali
2569 * GPU via the gpuinfo sysfs entry. This includes the GPU family, the
2570 * number of cores, the hardware version and the raw product id. For
2573 * Mali-T60x MP4 r0p0 0x6956
2575 * Return: The number of bytes output to buf.
2577 static ssize_t kbase_show_gpuinfo(struct device *dev,
2578 struct device_attribute *attr, char *buf)
2580 static const struct gpu_product_id_name {
2583 } gpu_product_id_names[] = {
2584 { .id = GPU_ID_PI_T60X, .name = "Mali-T60x" },
2585 { .id = GPU_ID_PI_T62X, .name = "Mali-T62x" },
2586 { .id = GPU_ID_PI_T72X, .name = "Mali-T72x" },
2587 { .id = GPU_ID_PI_T76X, .name = "Mali-T76x" },
2588 { .id = GPU_ID_PI_T82X, .name = "Mali-T82x" },
2589 { .id = GPU_ID_PI_T83X, .name = "Mali-T83x" },
2590 { .id = GPU_ID_PI_T86X, .name = "Mali-T86x" },
2591 { .id = GPU_ID_PI_TFRX, .name = "Mali-T88x" },
2592 { .id = GPU_ID2_PRODUCT_TMIX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
2593 .name = "Mali-G71" },
2594 { .id = GPU_ID2_PRODUCT_THEX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
2595 .name = "Mali-THEx" },
2597 const char *product_name = "(Unknown Mali GPU)";
2598 struct kbase_device *kbdev;
2600 unsigned product_id, product_id_mask;
2604 kbdev = to_kbase_device(dev);
2608 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
2609 product_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
2610 is_new_format = GPU_ID_IS_NEW_FORMAT(product_id);
2613 GPU_ID2_PRODUCT_MODEL :
2614 GPU_ID_VERSION_PRODUCT_ID) >>
2615 GPU_ID_VERSION_PRODUCT_ID_SHIFT;
2617 for (i = 0; i < ARRAY_SIZE(gpu_product_id_names); ++i) {
2618 const struct gpu_product_id_name *p = &gpu_product_id_names[i];
2620 if ((GPU_ID_IS_NEW_FORMAT(p->id) == is_new_format) &&
2621 (p->id & product_id_mask) ==
2622 (product_id & product_id_mask)) {
2623 product_name = p->name;
2628 return scnprintf(buf, PAGE_SIZE, "%s MP%d r%dp%d 0x%04X\n",
2629 product_name, kbdev->gpu_props.num_cores,
2630 (gpu_id & GPU_ID_VERSION_MAJOR) >> GPU_ID_VERSION_MAJOR_SHIFT,
2631 (gpu_id & GPU_ID_VERSION_MINOR) >> GPU_ID_VERSION_MINOR_SHIFT,
2634 static DEVICE_ATTR(gpuinfo, S_IRUGO, kbase_show_gpuinfo, NULL);
2637 * set_dvfs_period - Store callback for the dvfs_period sysfs file.
2638 * @dev: The device with sysfs file is for
2639 * @attr: The attributes of the sysfs file
2640 * @buf: The value written to the sysfs file
2641 * @count: The number of bytes written to the sysfs file
2643 * This function is called when the dvfs_period sysfs file is written to. It
2644 * checks the data written, and if valid updates the DVFS period variable,
2646 * Return: @c count if the function succeeded. An error code on failure.
2648 static ssize_t set_dvfs_period(struct device *dev,
2649 struct device_attribute *attr, const char *buf, size_t count)
2651 struct kbase_device *kbdev;
2655 kbdev = to_kbase_device(dev);
2659 ret = kstrtoint(buf, 0, &dvfs_period);
2660 if (ret || dvfs_period <= 0) {
2661 dev_err(kbdev->dev, "Couldn't process dvfs_period write operation.\n"
2662 "Use format <dvfs_period_ms>\n");
2666 kbdev->pm.dvfs_period = dvfs_period;
2667 dev_dbg(kbdev->dev, "DVFS period: %dms\n", dvfs_period);
2673 * show_dvfs_period - Show callback for the dvfs_period sysfs entry.
2674 * @dev: The device this sysfs file is for.
2675 * @attr: The attributes of the sysfs file.
2676 * @buf: The output buffer to receive the GPU information.
2678 * This function is called to get the current period used for the DVFS sample
2681 * Return: The number of bytes output to buf.
2683 static ssize_t show_dvfs_period(struct device *dev,
2684 struct device_attribute *attr, char * const buf)
2686 struct kbase_device *kbdev;
2689 kbdev = to_kbase_device(dev);
2693 ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->pm.dvfs_period);
2698 static DEVICE_ATTR(dvfs_period, S_IRUGO | S_IWUSR, show_dvfs_period,
2702 * set_pm_poweroff - Store callback for the pm_poweroff sysfs file.
2703 * @dev: The device with sysfs file is for
2704 * @attr: The attributes of the sysfs file
2705 * @buf: The value written to the sysfs file
2706 * @count: The number of bytes written to the sysfs file
2708 * This function is called when the pm_poweroff sysfs file is written to.
2710 * This file contains three values separated by whitespace. The values
2711 * are gpu_poweroff_time (the period of the poweroff timer, in ns),
2712 * poweroff_shader_ticks (the number of poweroff timer ticks before an idle
2713 * shader is powered off), and poweroff_gpu_ticks (the number of poweroff timer
2714 * ticks before the GPU is powered off), in that order.
2716 * Return: @c count if the function succeeded. An error code on failure.
2718 static ssize_t set_pm_poweroff(struct device *dev,
2719 struct device_attribute *attr, const char *buf, size_t count)
2721 struct kbase_device *kbdev;
2723 s64 gpu_poweroff_time;
2724 int poweroff_shader_ticks, poweroff_gpu_ticks;
2726 kbdev = to_kbase_device(dev);
2730 items = sscanf(buf, "%llu %u %u", &gpu_poweroff_time,
2731 &poweroff_shader_ticks,
2732 &poweroff_gpu_ticks);
2734 dev_err(kbdev->dev, "Couldn't process pm_poweroff write operation.\n"
2735 "Use format <gpu_poweroff_time_ns> <poweroff_shader_ticks> <poweroff_gpu_ticks>\n");
2739 kbdev->pm.gpu_poweroff_time = HR_TIMER_DELAY_NSEC(gpu_poweroff_time);
2740 kbdev->pm.poweroff_shader_ticks = poweroff_shader_ticks;
2741 kbdev->pm.poweroff_gpu_ticks = poweroff_gpu_ticks;
2747 * show_pm_poweroff - Show callback for the pm_poweroff sysfs entry.
2748 * @dev: The device this sysfs file is for.
2749 * @attr: The attributes of the sysfs file.
2750 * @buf: The output buffer to receive the GPU information.
2752 * This function is called to get the current period used for the DVFS sample
2755 * Return: The number of bytes output to buf.
2757 static ssize_t show_pm_poweroff(struct device *dev,
2758 struct device_attribute *attr, char * const buf)
2760 struct kbase_device *kbdev;
2763 kbdev = to_kbase_device(dev);
2767 ret = scnprintf(buf, PAGE_SIZE, "%llu %u %u\n",
2768 ktime_to_ns(kbdev->pm.gpu_poweroff_time),
2769 kbdev->pm.poweroff_shader_ticks,
2770 kbdev->pm.poweroff_gpu_ticks);
2775 static DEVICE_ATTR(pm_poweroff, S_IRUGO | S_IWUSR, show_pm_poweroff,
2779 * set_reset_timeout - Store callback for the reset_timeout sysfs file.
2780 * @dev: The device with sysfs file is for
2781 * @attr: The attributes of the sysfs file
2782 * @buf: The value written to the sysfs file
2783 * @count: The number of bytes written to the sysfs file
2785 * This function is called when the reset_timeout sysfs file is written to. It
2786 * checks the data written, and if valid updates the reset timeout.
2788 * Return: @c count if the function succeeded. An error code on failure.
2790 static ssize_t set_reset_timeout(struct device *dev,
2791 struct device_attribute *attr, const char *buf, size_t count)
2793 struct kbase_device *kbdev;
2797 kbdev = to_kbase_device(dev);
2801 ret = kstrtoint(buf, 0, &reset_timeout);
2802 if (ret || reset_timeout <= 0) {
2803 dev_err(kbdev->dev, "Couldn't process reset_timeout write operation.\n"
2804 "Use format <reset_timeout_ms>\n");
2808 kbdev->reset_timeout_ms = reset_timeout;
2809 dev_dbg(kbdev->dev, "Reset timeout: %dms\n", reset_timeout);
2815 * show_reset_timeout - Show callback for the reset_timeout sysfs entry.
2816 * @dev: The device this sysfs file is for.
2817 * @attr: The attributes of the sysfs file.
2818 * @buf: The output buffer to receive the GPU information.
2820 * This function is called to get the current reset timeout.
2822 * Return: The number of bytes output to buf.
2824 static ssize_t show_reset_timeout(struct device *dev,
2825 struct device_attribute *attr, char * const buf)
2827 struct kbase_device *kbdev;
2830 kbdev = to_kbase_device(dev);
2834 ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->reset_timeout_ms);
2839 static DEVICE_ATTR(reset_timeout, S_IRUGO | S_IWUSR, show_reset_timeout,
2844 static ssize_t show_mem_pool_size(struct device *dev,
2845 struct device_attribute *attr, char * const buf)
2847 struct kbase_device *kbdev;
2850 kbdev = to_kbase_device(dev);
2854 ret = scnprintf(buf, PAGE_SIZE, "%zu\n",
2855 kbase_mem_pool_size(&kbdev->mem_pool));
2860 static ssize_t set_mem_pool_size(struct device *dev,
2861 struct device_attribute *attr, const char *buf, size_t count)
2863 struct kbase_device *kbdev;
2867 kbdev = to_kbase_device(dev);
2871 err = kstrtoul(buf, 0, (unsigned long *)&new_size);
2875 kbase_mem_pool_trim(&kbdev->mem_pool, new_size);
2880 static DEVICE_ATTR(mem_pool_size, S_IRUGO | S_IWUSR, show_mem_pool_size,
2883 static ssize_t show_mem_pool_max_size(struct device *dev,
2884 struct device_attribute *attr, char * const buf)
2886 struct kbase_device *kbdev;
2889 kbdev = to_kbase_device(dev);
2893 ret = scnprintf(buf, PAGE_SIZE, "%zu\n",
2894 kbase_mem_pool_max_size(&kbdev->mem_pool));
2899 static ssize_t set_mem_pool_max_size(struct device *dev,
2900 struct device_attribute *attr, const char *buf, size_t count)
2902 struct kbase_device *kbdev;
2903 size_t new_max_size;
2906 kbdev = to_kbase_device(dev);
2910 err = kstrtoul(buf, 0, (unsigned long *)&new_max_size);
2914 kbase_mem_pool_set_max_size(&kbdev->mem_pool, new_max_size);
2919 static DEVICE_ATTR(mem_pool_max_size, S_IRUGO | S_IWUSR, show_mem_pool_max_size,
2920 set_mem_pool_max_size);
2923 static int kbasep_protected_mode_enter(struct kbase_device *kbdev)
2925 kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
2926 GPU_COMMAND_SET_PROTECTED_MODE, NULL);
2930 static bool kbasep_protected_mode_supported(struct kbase_device *kbdev)
2935 static struct kbase_protected_ops kbasep_protected_ops = {
2936 .protected_mode_enter = kbasep_protected_mode_enter,
2937 .protected_mode_reset = NULL,
2938 .protected_mode_supported = kbasep_protected_mode_supported,
2941 static void kbasep_protected_mode_init(struct kbase_device *kbdev)
2943 kbdev->protected_ops = NULL;
2945 if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE)) {
2946 /* Use native protected ops */
2947 kbdev->protected_ops = &kbasep_protected_ops;
2949 #ifdef PROTECTED_CALLBACKS
2951 kbdev->protected_ops = PROTECTED_CALLBACKS;
2954 if (kbdev->protected_ops)
2955 kbdev->protected_mode_support =
2956 kbdev->protected_ops->protected_mode_supported(kbdev);
2958 kbdev->protected_mode_support = false;
2961 #ifdef CONFIG_MALI_NO_MALI
2962 static int kbase_common_reg_map(struct kbase_device *kbdev)
2966 static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
2969 #else /* CONFIG_MALI_NO_MALI */
2970 static int kbase_common_reg_map(struct kbase_device *kbdev)
2974 if (!request_mem_region(kbdev->reg_start, kbdev->reg_size, dev_name(kbdev->dev))) {
2975 dev_err(kbdev->dev, "Register window unavailable\n");
2980 kbdev->reg = ioremap(kbdev->reg_start, kbdev->reg_size);
2982 dev_err(kbdev->dev, "Can't remap register window\n");
2990 release_mem_region(kbdev->reg_start, kbdev->reg_size);
2995 static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
2998 iounmap(kbdev->reg);
2999 release_mem_region(kbdev->reg_start, kbdev->reg_size);
3001 kbdev->reg_start = 0;
3002 kbdev->reg_size = 0;
3005 #endif /* CONFIG_MALI_NO_MALI */
3007 static int registers_map(struct kbase_device * const kbdev)
3010 /* the first memory resource is the physical address of the GPU
3012 struct platform_device *pdev = to_platform_device(kbdev->dev);
3013 struct resource *reg_res;
3016 reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3018 dev_err(kbdev->dev, "Invalid register resource\n");
3022 kbdev->reg_start = reg_res->start;
3023 kbdev->reg_size = resource_size(reg_res);
3025 err = kbase_common_reg_map(kbdev);
3027 dev_err(kbdev->dev, "Failed to map registers\n");
3034 static void registers_unmap(struct kbase_device *kbdev)
3036 kbase_common_reg_unmap(kbdev);
3039 static int power_control_init(struct platform_device *pdev)
3041 struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
3047 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \
3048 && defined(CONFIG_REGULATOR)
3049 kbdev->regulator = regulator_get_optional(kbdev->dev, "mali");
3050 if (IS_ERR_OR_NULL(kbdev->regulator)) {
3051 err = PTR_ERR(kbdev->regulator);
3052 kbdev->regulator = NULL;
3053 if (err == -EPROBE_DEFER) {
3054 dev_err(&pdev->dev, "Failed to get regulator\n");
3057 dev_info(kbdev->dev,
3058 "Continuing without Mali regulator control\n");
3059 /* Allow probe to continue without regulator */
3061 #endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
3063 kbdev->clock = clk_get(kbdev->dev, "clk_mali");
3064 if (IS_ERR_OR_NULL(kbdev->clock)) {
3065 err = PTR_ERR(kbdev->clock);
3066 kbdev->clock = NULL;
3067 if (err == -EPROBE_DEFER) {
3068 dev_err(&pdev->dev, "Failed to get clock\n");
3071 dev_info(kbdev->dev, "Continuing without Mali clock control\n");
3072 /* Allow probe to continue without clock. */
3074 err = clk_prepare(kbdev->clock);
3077 "Failed to prepare and enable clock (%d)\n",
3083 mutex_init(&kbdev->mutex_for_clk);
3084 kbdev->is_power_off = false;
3086 #if defined(CONFIG_OF) && defined(CONFIG_PM_OPP)
3087 /* Register the OPPs if they are available in device tree */
3088 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) \
3089 || defined(LSK_OPPV2_BACKPORT)
3090 err = dev_pm_opp_of_add_table(kbdev->dev);
3091 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
3092 err = of_init_opp_table(kbdev->dev);
3095 #endif /* LINUX_VERSION_CODE */
3097 dev_dbg(kbdev->dev, "OPP table not found\n");
3098 #endif /* CONFIG_OF && CONFIG_PM_OPP */
3104 if (kbdev->clock != NULL) {
3105 clk_put(kbdev->clock);
3106 kbdev->clock = NULL;
3109 #ifdef CONFIG_REGULATOR
3110 if (NULL != kbdev->regulator) {
3111 regulator_put(kbdev->regulator);
3112 kbdev->regulator = NULL;
3119 static void power_control_term(struct kbase_device *kbdev)
3121 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
3122 dev_pm_opp_of_remove_table(kbdev->dev);
3123 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
3124 of_free_opp_table(kbdev->dev);
3128 clk_unprepare(kbdev->clock);
3129 clk_put(kbdev->clock);
3130 kbdev->clock = NULL;
3133 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \
3134 && defined(CONFIG_REGULATOR)
3135 if (kbdev->regulator) {
3136 regulator_put(kbdev->regulator);
3137 kbdev->regulator = NULL;
3139 #endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
3142 #ifdef CONFIG_DEBUG_FS
3144 #if KBASE_GPU_RESET_EN
3145 #include <mali_kbase_hwaccess_jm.h>
3147 static void trigger_quirks_reload(struct kbase_device *kbdev)
3149 kbase_pm_context_active(kbdev);
3150 if (kbase_prepare_to_reset_gpu(kbdev))
3151 kbase_reset_gpu(kbdev);
3152 kbase_pm_context_idle(kbdev);
3155 #define MAKE_QUIRK_ACCESSORS(type) \
3156 static int type##_quirks_set(void *data, u64 val) \
3158 struct kbase_device *kbdev; \
3159 kbdev = (struct kbase_device *)data; \
3160 kbdev->hw_quirks_##type = (u32)val; \
3161 trigger_quirks_reload(kbdev); \
3165 static int type##_quirks_get(void *data, u64 *val) \
3167 struct kbase_device *kbdev;\
3168 kbdev = (struct kbase_device *)data;\
3169 *val = kbdev->hw_quirks_##type;\
3172 DEFINE_SIMPLE_ATTRIBUTE(fops_##type##_quirks, type##_quirks_get,\
3173 type##_quirks_set, "%llu\n")
3175 MAKE_QUIRK_ACCESSORS(sc);
3176 MAKE_QUIRK_ACCESSORS(tiler);
3177 MAKE_QUIRK_ACCESSORS(mmu);
3179 #endif /* KBASE_GPU_RESET_EN */
3182 * debugfs_protected_debug_mode_read - "protected_debug_mode" debugfs read
3183 * @file: File object to read is for
3184 * @buf: User buffer to populate with data
3185 * @len: Length of user buffer
3186 * @ppos: Offset within file object
3188 * Retrieves the current status of protected debug mode
3189 * (0 = disabled, 1 = enabled)
3191 * Return: Number of bytes added to user buffer
3193 static ssize_t debugfs_protected_debug_mode_read(struct file *file,
3194 char __user *buf, size_t len, loff_t *ppos)
3196 struct kbase_device *kbdev = (struct kbase_device *)file->private_data;
3200 kbase_pm_context_active(kbdev);
3201 gpu_status = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_STATUS), NULL);
3202 kbase_pm_context_idle(kbdev);
3204 if (gpu_status & GPU_DBGEN)
3205 ret_val = simple_read_from_buffer(buf, len, ppos, "1\n", 2);
3207 ret_val = simple_read_from_buffer(buf, len, ppos, "0\n", 2);
3213 * struct fops_protected_debug_mode - "protected_debug_mode" debugfs fops
3215 * Contains the file operations for the "protected_debug_mode" debugfs file
3217 static const struct file_operations fops_protected_debug_mode = {
3218 .open = simple_open,
3219 .read = debugfs_protected_debug_mode_read,
3220 .llseek = default_llseek,
3223 static int kbase_device_debugfs_init(struct kbase_device *kbdev)
3225 struct dentry *debugfs_ctx_defaults_directory;
3228 kbdev->mali_debugfs_directory = debugfs_create_dir(kbdev->devname,
3230 if (!kbdev->mali_debugfs_directory) {
3231 dev_err(kbdev->dev, "Couldn't create mali debugfs directory\n");
3236 kbdev->debugfs_ctx_directory = debugfs_create_dir("ctx",
3237 kbdev->mali_debugfs_directory);
3238 if (!kbdev->debugfs_ctx_directory) {
3239 dev_err(kbdev->dev, "Couldn't create mali debugfs ctx directory\n");
3244 debugfs_ctx_defaults_directory = debugfs_create_dir("defaults",
3245 kbdev->debugfs_ctx_directory);
3246 if (!debugfs_ctx_defaults_directory) {
3247 dev_err(kbdev->dev, "Couldn't create mali debugfs ctx defaults directory\n");
3252 #if !MALI_CUSTOMER_RELEASE
3253 kbasep_regs_dump_debugfs_init(kbdev);
3254 #endif /* !MALI_CUSTOMER_RELEASE */
3255 kbasep_regs_history_debugfs_init(kbdev);
3257 kbase_debug_job_fault_debugfs_init(kbdev);
3258 kbasep_gpu_memory_debugfs_init(kbdev);
3259 kbase_as_fault_debugfs_init(kbdev);
3260 #if KBASE_GPU_RESET_EN
3261 debugfs_create_file("quirks_sc", 0644,
3262 kbdev->mali_debugfs_directory, kbdev,
3264 debugfs_create_file("quirks_tiler", 0644,
3265 kbdev->mali_debugfs_directory, kbdev,
3266 &fops_tiler_quirks);
3267 debugfs_create_file("quirks_mmu", 0644,
3268 kbdev->mali_debugfs_directory, kbdev,
3270 #endif /* KBASE_GPU_RESET_EN */
3272 #ifndef CONFIG_MALI_COH_USER
3273 debugfs_create_bool("infinite_cache", 0644,
3274 debugfs_ctx_defaults_directory,
3275 (bool*)&(kbdev->infinite_cache_active_default));
3276 #endif /* CONFIG_MALI_COH_USER */
3278 debugfs_create_size_t("mem_pool_max_size", 0644,
3279 debugfs_ctx_defaults_directory,
3280 &kbdev->mem_pool_max_size_default);
3282 if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_DEBUG_MODE)) {
3283 debugfs_create_file("protected_debug_mode", S_IRUGO,
3284 kbdev->mali_debugfs_directory, kbdev,
3285 &fops_protected_debug_mode);
3288 #if KBASE_TRACE_ENABLE
3289 kbasep_trace_debugfs_init(kbdev);
3290 #endif /* KBASE_TRACE_ENABLE */
3292 #ifdef CONFIG_MALI_TRACE_TIMELINE
3293 kbasep_trace_timeline_debugfs_init(kbdev);
3294 #endif /* CONFIG_MALI_TRACE_TIMELINE */
3299 debugfs_remove_recursive(kbdev->mali_debugfs_directory);
3303 static void kbase_device_debugfs_term(struct kbase_device *kbdev)
3305 debugfs_remove_recursive(kbdev->mali_debugfs_directory);
3308 #else /* CONFIG_DEBUG_FS */
3309 static inline int kbase_device_debugfs_init(struct kbase_device *kbdev)
3314 static inline void kbase_device_debugfs_term(struct kbase_device *kbdev) { }
3315 #endif /* CONFIG_DEBUG_FS */
3317 static void kbase_device_coherency_init(struct kbase_device *kbdev, u32 gpu_id)
3320 u32 supported_coherency_bitmap =
3321 kbdev->gpu_props.props.raw_props.coherency_mode;
3322 const void *coherency_override_dts;
3323 u32 override_coherency;
3324 #endif /* CONFIG_OF */
3326 kbdev->system_coherency = COHERENCY_NONE;
3328 /* device tree may override the coherency */
3330 coherency_override_dts = of_get_property(kbdev->dev->of_node,
3333 if (coherency_override_dts) {
3335 override_coherency = be32_to_cpup(coherency_override_dts);
3337 if ((override_coherency <= COHERENCY_NONE) &&
3338 (supported_coherency_bitmap &
3339 COHERENCY_FEATURE_BIT(override_coherency))) {
3341 kbdev->system_coherency = override_coherency;
3343 dev_info(kbdev->dev,
3344 "Using coherency mode %u set from dtb",
3345 override_coherency);
3347 dev_warn(kbdev->dev,
3348 "Ignoring unsupported coherency mode %u set from dtb",
3349 override_coherency);
3352 #endif /* CONFIG_OF */
3354 kbdev->gpu_props.props.raw_props.coherency_mode =
3355 kbdev->system_coherency;
3358 #ifdef CONFIG_MALI_FPGA_BUS_LOGGER
3360 /* Callback used by the kbase bus logger client, to initiate a GPU reset
3361 * when the bus log is restarted. GPU reset is used as reference point
3362 * in HW bus log analyses.
3364 static void kbase_logging_started_cb(void *data)
3366 struct kbase_device *kbdev = (struct kbase_device *)data;
3368 if (kbase_prepare_to_reset_gpu(kbdev))
3369 kbase_reset_gpu(kbdev);
3370 dev_info(kbdev->dev, "KBASE - Bus logger restarted\n");
3374 static struct attribute *kbase_attrs[] = {
3375 #ifdef CONFIG_MALI_DEBUG
3376 &dev_attr_debug_command.attr,
3377 &dev_attr_js_softstop_always.attr,
3379 #if !MALI_CUSTOMER_RELEASE
3380 &dev_attr_force_replay.attr,
3382 &dev_attr_js_timeouts.attr,
3383 &dev_attr_soft_job_timeout.attr,
3384 &dev_attr_gpuinfo.attr,
3385 &dev_attr_dvfs_period.attr,
3386 &dev_attr_pm_poweroff.attr,
3387 &dev_attr_reset_timeout.attr,
3388 &dev_attr_js_scheduling_period.attr,
3389 &dev_attr_power_policy.attr,
3390 &dev_attr_core_availability_policy.attr,
3391 &dev_attr_core_mask.attr,
3392 &dev_attr_mem_pool_size.attr,
3393 &dev_attr_mem_pool_max_size.attr,
3397 static const struct attribute_group kbase_attr_group = {
3398 .attrs = kbase_attrs,
3401 static int kbase_platform_device_remove(struct platform_device *pdev)
3403 struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
3404 const struct list_head *dev_list;
3409 #ifdef CONFIG_MALI_FPGA_BUS_LOGGER
3410 if (kbdev->inited_subsys & inited_buslogger) {
3411 bl_core_client_unregister(kbdev->buslogger);
3412 kbdev->inited_subsys &= ~inited_buslogger;
3416 if (kbdev->inited_subsys & inited_sysfs_group) {
3417 sysfs_remove_group(&kbdev->dev->kobj, &kbase_attr_group);
3418 kbdev->inited_subsys &= ~inited_sysfs_group;
3421 if (kbdev->inited_subsys & inited_dev_list) {
3422 dev_list = kbase_dev_list_get();
3423 list_del(&kbdev->entry);
3424 kbase_dev_list_put(dev_list);
3425 kbdev->inited_subsys &= ~inited_dev_list;
3428 if (kbdev->inited_subsys & inited_misc_register) {
3429 misc_deregister(&kbdev->mdev);
3430 kbdev->inited_subsys &= ~inited_misc_register;
3433 if (kbdev->inited_subsys & inited_get_device) {
3434 put_device(kbdev->dev);
3435 kbdev->inited_subsys &= ~inited_get_device;
3438 if (kbdev->inited_subsys & inited_debugfs) {
3439 kbase_device_debugfs_term(kbdev);
3440 kbdev->inited_subsys &= ~inited_debugfs;
3443 if (kbdev->inited_subsys & inited_job_fault) {
3444 kbase_debug_job_fault_dev_term(kbdev);
3445 kbdev->inited_subsys &= ~inited_job_fault;
3448 #ifndef CONFIG_MALI_PRFCNT_SET_SECONDARY
3449 if (kbdev->inited_subsys & inited_ipa) {
3450 kbase_ipa_term(kbdev->ipa_ctx);
3451 kbdev->inited_subsys &= ~inited_ipa;
3453 #endif /* CONFIG_MALI_PRFCNT_SET_SECONDARY */
3455 if (kbdev->inited_subsys & inited_vinstr) {
3456 kbase_vinstr_term(kbdev->vinstr_ctx);
3457 kbdev->inited_subsys &= ~inited_vinstr;
3460 #ifdef CONFIG_MALI_DEVFREQ
3461 if (kbdev->inited_subsys & inited_devfreq) {
3462 kbase_devfreq_term(kbdev);
3463 kbdev->inited_subsys &= ~inited_devfreq;
3467 if (kbdev->inited_subsys & inited_backend_late) {
3468 kbase_backend_late_term(kbdev);
3469 kbdev->inited_subsys &= ~inited_backend_late;
3472 if (kbdev->inited_subsys & inited_tlstream) {
3473 kbase_tlstream_term();
3474 kbdev->inited_subsys &= ~inited_tlstream;
3477 /* Bring job and mem sys to a halt before we continue termination */
3479 if (kbdev->inited_subsys & inited_js)
3480 kbasep_js_devdata_halt(kbdev);
3482 if (kbdev->inited_subsys & inited_mem)
3483 kbase_mem_halt(kbdev);
3485 if (kbdev->inited_subsys & inited_js) {
3486 kbasep_js_devdata_term(kbdev);
3487 kbdev->inited_subsys &= ~inited_js;
3490 if (kbdev->inited_subsys & inited_mem) {
3491 kbase_mem_term(kbdev);
3492 kbdev->inited_subsys &= ~inited_mem;
3495 if (kbdev->inited_subsys & inited_pm_runtime_init) {
3496 kbdev->pm.callback_power_runtime_term(kbdev);
3497 kbdev->inited_subsys &= ~inited_pm_runtime_init;
3500 if (kbdev->inited_subsys & inited_device) {
3501 kbase_device_term(kbdev);
3502 kbdev->inited_subsys &= ~inited_device;
3505 if (kbdev->inited_subsys & inited_backend_early) {
3506 kbase_backend_early_term(kbdev);
3507 kbdev->inited_subsys &= ~inited_backend_early;
3510 if (kbdev->inited_subsys & inited_io_history) {
3511 kbase_io_history_term(&kbdev->io_history);
3512 kbdev->inited_subsys &= ~inited_io_history;
3515 if (kbdev->inited_subsys & inited_power_control) {
3516 power_control_term(kbdev);
3517 kbdev->inited_subsys &= ~inited_power_control;
3520 if (kbdev->inited_subsys & inited_registers_map) {
3521 registers_unmap(kbdev);
3522 kbdev->inited_subsys &= ~inited_registers_map;
3525 #ifdef CONFIG_MALI_NO_MALI
3526 if (kbdev->inited_subsys & inited_gpu_device) {
3527 gpu_device_destroy(kbdev);
3528 kbdev->inited_subsys &= ~inited_gpu_device;
3530 #endif /* CONFIG_MALI_NO_MALI */
3532 if (kbdev->inited_subsys != 0)
3533 dev_err(kbdev->dev, "Missing sub system termination\n");
3535 kbase_device_free(kbdev);
3540 extern void kbase_platform_rk_shutdown(struct kbase_device *kbdev);
3541 static void kbase_platform_device_shutdown(struct platform_device *pdev)
3543 struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
3545 kbase_platform_rk_shutdown(kbdev);
3548 /* Number of register accesses for the buffer that we allocate during
3549 * initialization time. The buffer size can be changed later via debugfs. */
3550 #define KBASEP_DEFAULT_REGISTER_HISTORY_SIZE ((u16)512)
3552 static int kbase_platform_device_probe(struct platform_device *pdev)
3554 struct kbase_device *kbdev;
3555 struct mali_base_gpu_core_props *core_props;
3557 const struct list_head *dev_list;
3561 err = kbase_platform_early_init();
3563 dev_err(&pdev->dev, "Early platform initialization failed\n");
3564 kbase_platform_device_remove(pdev);
3569 kbdev = kbase_device_alloc();
3571 dev_err(&pdev->dev, "Allocate device failed\n");
3572 kbase_platform_device_remove(pdev);
3576 kbdev->dev = &pdev->dev;
3577 dev_set_drvdata(kbdev->dev, kbdev);
3579 #ifdef CONFIG_MALI_NO_MALI
3580 err = gpu_device_create(kbdev);
3582 dev_err(&pdev->dev, "Dummy model initialization failed\n");
3583 kbase_platform_device_remove(pdev);
3586 kbdev->inited_subsys |= inited_gpu_device;
3587 #endif /* CONFIG_MALI_NO_MALI */
3589 err = assign_irqs(pdev);
3591 dev_err(&pdev->dev, "IRQ search failed\n");
3592 kbase_platform_device_remove(pdev);
3596 err = registers_map(kbdev);
3598 dev_err(&pdev->dev, "Register map failed\n");
3599 kbase_platform_device_remove(pdev);
3602 kbdev->inited_subsys |= inited_registers_map;
3604 err = power_control_init(pdev);
3606 dev_err(&pdev->dev, "Power control initialization failed\n");
3607 kbase_platform_device_remove(pdev);
3610 kbdev->inited_subsys |= inited_power_control;
3612 err = kbase_io_history_init(&kbdev->io_history,
3613 KBASEP_DEFAULT_REGISTER_HISTORY_SIZE);
3615 dev_err(&pdev->dev, "Register access history initialization failed\n");
3616 kbase_platform_device_remove(pdev);
3619 kbdev->inited_subsys |= inited_io_history;
3621 err = kbase_backend_early_init(kbdev);
3623 dev_err(kbdev->dev, "Early backend initialization failed\n");
3624 kbase_platform_device_remove(pdev);
3627 kbdev->inited_subsys |= inited_backend_early;
3629 scnprintf(kbdev->devname, DEVNAME_SIZE, "%s%d", kbase_drv_name,
3632 kbase_disjoint_init(kbdev);
3634 /* obtain min/max configured gpu frequencies */
3635 core_props = &(kbdev->gpu_props.props.core_props);
3636 core_props->gpu_freq_khz_min = GPU_FREQ_KHZ_MIN;
3637 core_props->gpu_freq_khz_max = GPU_FREQ_KHZ_MAX;
3639 kbdev->gpu_props.irq_throttle_time_us = DEFAULT_IRQ_THROTTLE_TIME_US;
3641 err = kbase_device_init(kbdev);
3643 dev_err(kbdev->dev, "Device initialization failed (%d)\n", err);
3644 kbase_platform_device_remove(pdev);
3647 kbdev->inited_subsys |= inited_device;
3649 if (kbdev->pm.callback_power_runtime_init) {
3650 err = kbdev->pm.callback_power_runtime_init(kbdev);
3653 "Runtime PM initialization failed\n");
3654 kbase_platform_device_remove(pdev);
3657 kbdev->inited_subsys |= inited_pm_runtime_init;
3660 err = kbase_mem_init(kbdev);
3662 dev_err(kbdev->dev, "Memory subsystem initialization failed\n");
3663 kbase_platform_device_remove(pdev);
3666 kbdev->inited_subsys |= inited_mem;
3668 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
3669 gpu_id &= GPU_ID_VERSION_PRODUCT_ID;
3670 gpu_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
3672 kbase_device_coherency_init(kbdev, gpu_id);
3674 kbasep_protected_mode_init(kbdev);
3676 err = kbasep_js_devdata_init(kbdev);
3678 dev_err(kbdev->dev, "Job JS devdata initialization failed\n");
3679 kbase_platform_device_remove(pdev);
3682 kbdev->inited_subsys |= inited_js;
3684 err = kbase_tlstream_init();
3686 dev_err(kbdev->dev, "Timeline stream initialization failed\n");
3687 kbase_platform_device_remove(pdev);
3690 kbdev->inited_subsys |= inited_tlstream;
3692 err = kbase_backend_late_init(kbdev);
3694 dev_err(kbdev->dev, "Late backend initialization failed\n");
3695 kbase_platform_device_remove(pdev);
3698 kbdev->inited_subsys |= inited_backend_late;
3700 #ifdef CONFIG_MALI_DEVFREQ
3701 err = kbase_devfreq_init(kbdev);
3703 dev_err(kbdev->dev, "Fevfreq initialization failed\n");
3704 kbase_platform_device_remove(pdev);
3707 kbdev->inited_subsys |= inited_devfreq;
3708 #endif /* CONFIG_MALI_DEVFREQ */
3710 kbdev->vinstr_ctx = kbase_vinstr_init(kbdev);
3711 if (!kbdev->vinstr_ctx) {
3713 "Virtual instrumentation initialization failed\n");
3714 kbase_platform_device_remove(pdev);
3717 kbdev->inited_subsys |= inited_vinstr;
3719 #ifndef CONFIG_MALI_PRFCNT_SET_SECONDARY
3720 kbdev->ipa_ctx = kbase_ipa_init(kbdev);
3721 if (!kbdev->ipa_ctx) {
3722 dev_err(kbdev->dev, "IPA initialization failed\n");
3723 kbase_platform_device_remove(pdev);
3727 kbdev->inited_subsys |= inited_ipa;
3728 #endif /* CONFIG_MALI_PRFCNT_SET_SECONDARY */
3730 err = kbase_debug_job_fault_dev_init(kbdev);
3732 dev_err(kbdev->dev, "Job fault debug initialization failed\n");
3733 kbase_platform_device_remove(pdev);
3736 kbdev->inited_subsys |= inited_job_fault;
3738 err = kbase_device_debugfs_init(kbdev);
3740 dev_err(kbdev->dev, "DebugFS initialization failed");
3741 kbase_platform_device_remove(pdev);
3744 kbdev->inited_subsys |= inited_debugfs;
3746 /* initialize the kctx list */
3747 mutex_init(&kbdev->kctx_list_lock);
3748 INIT_LIST_HEAD(&kbdev->kctx_list);
3750 kbdev->mdev.minor = MISC_DYNAMIC_MINOR;
3751 kbdev->mdev.name = kbdev->devname;
3752 kbdev->mdev.fops = &kbase_fops;
3753 kbdev->mdev.parent = get_device(kbdev->dev);
3754 kbdev->inited_subsys |= inited_get_device;
3756 err = misc_register(&kbdev->mdev);
3758 dev_err(kbdev->dev, "Misc device registration failed for %s\n",
3760 kbase_platform_device_remove(pdev);
3763 kbdev->inited_subsys |= inited_misc_register;
3765 dev_list = kbase_dev_list_get();
3766 list_add(&kbdev->entry, &kbase_dev_list);
3767 kbase_dev_list_put(dev_list);
3768 kbdev->inited_subsys |= inited_dev_list;
3770 err = sysfs_create_group(&kbdev->dev->kobj, &kbase_attr_group);
3772 dev_err(&pdev->dev, "SysFS group creation failed\n");
3773 kbase_platform_device_remove(pdev);
3776 kbdev->inited_subsys |= inited_sysfs_group;
3778 #ifdef CONFIG_MALI_FPGA_BUS_LOGGER
3779 err = bl_core_client_register(kbdev->devname,
3780 kbase_logging_started_cb,
3781 kbdev, &kbdev->buslogger,
3784 kbdev->inited_subsys |= inited_buslogger;
3785 bl_core_set_threshold(kbdev->buslogger, 1024*1024*1024);
3787 dev_warn(kbdev->dev, "Bus log client registration failed\n");
3792 dev_info(kbdev->dev,
3793 "Probed as %s\n", dev_name(kbdev->mdev.this_device));
3800 #undef KBASEP_DEFAULT_REGISTER_HISTORY_SIZE
3803 /** Suspend callback from the OS.
3805 * This is called by Linux when the device should suspend.
3807 * @param dev The device to suspend
3809 * @return A standard Linux error code
3811 static int kbase_device_suspend(struct device *dev)
3813 struct kbase_device *kbdev = to_kbase_device(dev);
3818 #if defined(CONFIG_PM_DEVFREQ) && \
3819 (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
3820 devfreq_suspend_device(kbdev->devfreq);
3823 kbase_pm_suspend(kbdev);
3827 /** Resume callback from the OS.
3829 * This is called by Linux when the device should resume from suspension.
3831 * @param dev The device to resume
3833 * @return A standard Linux error code
3835 static int kbase_device_resume(struct device *dev)
3837 struct kbase_device *kbdev = to_kbase_device(dev);
3842 kbase_pm_resume(kbdev);
3844 #if defined(CONFIG_PM_DEVFREQ) && \
3845 (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
3846 devfreq_resume_device(kbdev->devfreq);
3851 /** Runtime suspend callback from the OS.
3853 * This is called by Linux when the device should prepare for a condition in which it will
3854 * not be able to communicate with the CPU(s) and RAM due to power management.
3856 * @param dev The device to suspend
3858 * @return A standard Linux error code
3860 #ifdef KBASE_PM_RUNTIME
3861 static int kbase_device_runtime_suspend(struct device *dev)
3863 struct kbase_device *kbdev = to_kbase_device(dev);
3868 #if defined(CONFIG_PM_DEVFREQ) && \
3869 (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
3870 devfreq_suspend_device(kbdev->devfreq);
3873 if (kbdev->pm.backend.callback_power_runtime_off) {
3874 kbdev->pm.backend.callback_power_runtime_off(kbdev);
3875 dev_dbg(dev, "runtime suspend\n");
3879 #endif /* KBASE_PM_RUNTIME */
3881 /** Runtime resume callback from the OS.
3883 * This is called by Linux when the device should go into a fully active state.
3885 * @param dev The device to suspend
3887 * @return A standard Linux error code
3890 #ifdef KBASE_PM_RUNTIME
3891 static int kbase_device_runtime_resume(struct device *dev)
3894 struct kbase_device *kbdev = to_kbase_device(dev);
3899 if (kbdev->pm.backend.callback_power_runtime_on) {
3900 ret = kbdev->pm.backend.callback_power_runtime_on(kbdev);
3901 dev_dbg(dev, "runtime resume\n");
3904 #if defined(CONFIG_PM_DEVFREQ) && \
3905 (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
3906 devfreq_resume_device(kbdev->devfreq);
3911 #endif /* KBASE_PM_RUNTIME */
3914 #ifdef KBASE_PM_RUNTIME
3916 * kbase_device_runtime_idle - Runtime idle callback from the OS.
3917 * @dev: The device to suspend
3919 * This is called by Linux when the device appears to be inactive and it might
3920 * be placed into a low power state.
3922 * Return: 0 if device can be suspended, non-zero to avoid runtime autosuspend,
3923 * otherwise a standard Linux error code
3925 static int kbase_device_runtime_idle(struct device *dev)
3927 struct kbase_device *kbdev = to_kbase_device(dev);
3932 /* Use platform specific implementation if it exists. */
3933 if (kbdev->pm.backend.callback_power_runtime_idle)
3934 return kbdev->pm.backend.callback_power_runtime_idle(kbdev);
3938 #endif /* KBASE_PM_RUNTIME */
3940 /** The power management operations for the platform driver.
3942 static const struct dev_pm_ops kbase_pm_ops = {
3943 .suspend = kbase_device_suspend,
3944 .resume = kbase_device_resume,
3945 #ifdef KBASE_PM_RUNTIME
3946 .runtime_suspend = kbase_device_runtime_suspend,
3947 .runtime_resume = kbase_device_runtime_resume,
3948 .runtime_idle = kbase_device_runtime_idle,
3949 #endif /* KBASE_PM_RUNTIME */
3953 static const struct of_device_id kbase_dt_ids[] = {
3954 { .compatible = "arm,malit7xx" },
3955 { .compatible = "arm,mali-midgard" },
3958 MODULE_DEVICE_TABLE(of, kbase_dt_ids);
3961 static struct platform_driver kbase_platform_driver = {
3962 .probe = kbase_platform_device_probe,
3963 .remove = kbase_platform_device_remove,
3964 .shutdown = kbase_platform_device_shutdown,
3966 .name = kbase_drv_name,
3967 .owner = THIS_MODULE,
3968 .pm = &kbase_pm_ops,
3969 .of_match_table = of_match_ptr(kbase_dt_ids),
3974 * The driver will not provide a shortcut to create the Mali platform device
3975 * anymore when using Device Tree.
3978 module_platform_driver(kbase_platform_driver);
3981 static int __init rockchip_gpu_init_driver(void)
3983 return platform_driver_register(&kbase_platform_driver);
3985 late_initcall(rockchip_gpu_init_driver);
3987 static int __init kbase_driver_init(void)
3991 ret = kbase_platform_early_init();
3995 #ifdef CONFIG_MALI_PLATFORM_FAKE
3996 ret = kbase_platform_fake_register();
4000 ret = platform_driver_register(&kbase_platform_driver);
4001 #ifdef CONFIG_MALI_PLATFORM_FAKE
4003 kbase_platform_fake_unregister();
4008 static void __exit kbase_driver_exit(void)
4010 platform_driver_unregister(&kbase_platform_driver);
4011 #ifdef CONFIG_MALI_PLATFORM_FAKE
4012 kbase_platform_fake_unregister();
4016 module_init(kbase_driver_init);
4017 module_exit(kbase_driver_exit);
4019 #endif /* CONFIG_OF */
4021 MODULE_LICENSE("GPL");
4022 MODULE_VERSION(MALI_RELEASE_NAME " (UK version " \
4023 __stringify(BASE_UK_VERSION_MAJOR) "." \
4024 __stringify(BASE_UK_VERSION_MINOR) ")");
4026 #if defined(CONFIG_MALI_GATOR_SUPPORT) || defined(CONFIG_MALI_SYSTEM_TRACE)
4027 #define CREATE_TRACE_POINTS
4030 #ifdef CONFIG_MALI_GATOR_SUPPORT
4031 /* Create the trace points (otherwise we just get code to call a tracepoint) */
4032 #include "mali_linux_trace.h"
4034 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_job_slots_event);
4035 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_status);
4036 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_power_on);
4037 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_power_off);
4038 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_page_fault_insert_pages);
4039 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_mmu_as_in_use);
4040 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_mmu_as_released);
4041 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_total_alloc_pages_change);
4043 void kbase_trace_mali_pm_status(u32 event, u64 value)
4045 trace_mali_pm_status(event, value);
4048 void kbase_trace_mali_pm_power_off(u32 event, u64 value)
4050 trace_mali_pm_power_off(event, value);
4053 void kbase_trace_mali_pm_power_on(u32 event, u64 value)
4055 trace_mali_pm_power_on(event, value);
4058 void kbase_trace_mali_job_slots_event(u32 event, const struct kbase_context *kctx, u8 atom_id)
4060 trace_mali_job_slots_event(event, (kctx != NULL ? kctx->tgid : 0), (kctx != NULL ? kctx->pid : 0), atom_id);
4063 void kbase_trace_mali_page_fault_insert_pages(int event, u32 value)
4065 trace_mali_page_fault_insert_pages(event, value);
4068 void kbase_trace_mali_mmu_as_in_use(int event)
4070 trace_mali_mmu_as_in_use(event);
4073 void kbase_trace_mali_mmu_as_released(int event)
4075 trace_mali_mmu_as_released(event);
4078 void kbase_trace_mali_total_alloc_pages_change(long long int event)
4080 trace_mali_total_alloc_pages_change(event);
4082 #endif /* CONFIG_MALI_GATOR_SUPPORT */
4083 #ifdef CONFIG_MALI_SYSTEM_TRACE
4084 #include "mali_linux_kbase_trace.h"