4 * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved.
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
11 * A copy of the licence is included with the program, and can also be obtained
12 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
13 * Boston, MA 02110-1301, USA.
19 #include <mali_kbase.h>
20 #include <mali_kbase_hwaccess_gpuprops.h>
21 #include <mali_kbase_config_defaults.h>
22 #include <mali_kbase_uku.h>
23 #include <mali_midg_regmap.h>
24 #include <mali_kbase_instr.h>
25 #include <mali_kbase_gator.h>
26 #include <backend/gpu/mali_kbase_js_affinity.h>
27 #include <mali_kbase_mem_linux.h>
28 #ifdef CONFIG_MALI_DEVFREQ
29 #include <backend/gpu/mali_kbase_devfreq.h>
30 #endif /* CONFIG_MALI_DEVFREQ */
31 #ifdef CONFIG_MALI_NO_MALI
32 #include "mali_kbase_model_linux.h"
33 #endif /* CONFIG_MALI_NO_MALI */
34 #include "mali_kbase_mem_profile_debugfs_buf_size.h"
35 #include "mali_kbase_debug_mem_view.h"
36 #include "mali_kbase_mem.h"
37 #include "mali_kbase_mem_pool_debugfs.h"
38 #if !MALI_CUSTOMER_RELEASE
39 #include "mali_kbase_regs_dump_debugfs.h"
40 #endif /* !MALI_CUSTOMER_RELEASE */
41 #include <mali_kbase_hwaccess_backend.h>
42 #include <mali_kbase_hwaccess_jm.h>
43 #include <backend/gpu/mali_kbase_device_internal.h>
46 #include <linux/kds.h>
47 #include <linux/anon_inodes.h>
48 #include <linux/syscalls.h>
49 #endif /* CONFIG_KDS */
51 #include <linux/module.h>
52 #include <linux/init.h>
53 #include <linux/poll.h>
54 #include <linux/kernel.h>
55 #include <linux/errno.h>
57 #include <linux/platform_device.h>
58 #include <linux/miscdevice.h>
59 #include <linux/list.h>
60 #include <linux/semaphore.h>
62 #include <linux/uaccess.h>
63 #include <linux/interrupt.h>
66 #include <linux/compat.h> /* is_compat_task */
67 #include <linux/version.h>
68 #ifdef CONFIG_MALI_PLATFORM_DEVICETREE
69 #include <linux/pm_runtime.h>
70 #endif /* CONFIG_MALI_PLATFORM_DEVICETREE */
71 #include <mali_kbase_hw.h>
72 #include <platform/mali_kbase_platform_common.h>
73 #ifdef CONFIG_MALI_PLATFORM_FAKE
74 #include <platform/mali_kbase_platform_fake.h>
75 #endif /*CONFIG_MALI_PLATFORM_FAKE */
77 #include <mali_kbase_sync.h>
78 #endif /* CONFIG_SYNC */
79 #ifdef CONFIG_PM_DEVFREQ
80 #include <linux/devfreq.h>
81 #endif /* CONFIG_PM_DEVFREQ */
82 #include <linux/clk.h>
83 #include <linux/delay.h>
85 #include <mali_kbase_config.h>
87 #ifdef CONFIG_MACH_MANTA
88 #include <plat/devs.h>
91 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
92 #include <linux/pm_opp.h>
94 #include <linux/opp.h>
97 #if defined(CONFIG_MALI_MIPE_ENABLED)
98 #include <mali_kbase_tlstream.h>
102 #define JOB_IRQ_TAG 0
103 #define MMU_IRQ_TAG 1
104 #define GPU_IRQ_TAG 2
107 static struct kbase_exported_test_data shared_kernel_test_data;
108 EXPORT_SYMBOL(shared_kernel_test_data);
109 #endif /* MALI_UNIT_TEST */
111 #define KBASE_DRV_NAME "mali"
112 /** rk_ext : version of rk_ext on mali_ko, aka. rk_ko_ver. */
113 #define ROCKCHIP_VERSION (13)
115 static const char kbase_drv_name[] = KBASE_DRV_NAME;
117 static int kbase_dev_nr;
119 static DEFINE_MUTEX(kbase_dev_list_lock);
120 static LIST_HEAD(kbase_dev_list);
122 #define KERNEL_SIDE_DDK_VERSION_STRING "K:" MALI_RELEASE_NAME "(GPL)"
123 static inline void __compile_time_asserts(void)
125 CSTD_COMPILE_TIME_ASSERT(sizeof(KERNEL_SIDE_DDK_VERSION_STRING) <= KBASE_GET_VERSION_BUFFER_SIZE);
130 struct kbasep_kds_resource_set_file_data {
131 struct kds_resource_set *lock;
134 static int kds_resource_release(struct inode *inode, struct file *file);
136 static const struct file_operations kds_resource_fops = {
137 .release = kds_resource_release
140 struct kbase_kds_resource_list_data {
141 struct kds_resource **kds_resources;
142 unsigned long *kds_access_bitmap;
146 static int kds_resource_release(struct inode *inode, struct file *file)
148 struct kbasep_kds_resource_set_file_data *data;
150 data = (struct kbasep_kds_resource_set_file_data *)file->private_data;
152 if (NULL != data->lock)
153 kds_resource_set_release(&data->lock);
160 static int kbasep_kds_allocate_resource_list_data(struct kbase_context *kctx, struct base_external_resource *ext_res, int num_elems, struct kbase_kds_resource_list_data *resources_list)
162 struct base_external_resource *res = ext_res;
165 /* assume we have to wait for all */
167 KBASE_DEBUG_ASSERT(0 != num_elems);
168 resources_list->kds_resources = kmalloc_array(num_elems,
169 sizeof(struct kds_resource *), GFP_KERNEL);
171 if (NULL == resources_list->kds_resources)
174 KBASE_DEBUG_ASSERT(0 != num_elems);
175 resources_list->kds_access_bitmap = kzalloc(
176 sizeof(unsigned long) *
177 ((num_elems + BITS_PER_LONG - 1) / BITS_PER_LONG),
180 if (NULL == resources_list->kds_access_bitmap) {
181 kfree(resources_list->kds_access_bitmap);
185 kbase_gpu_vm_lock(kctx);
186 for (res_id = 0; res_id < num_elems; res_id++, res++) {
188 struct kbase_va_region *reg;
189 struct kds_resource *kds_res = NULL;
191 exclusive = res->ext_resource & BASE_EXT_RES_ACCESS_EXCLUSIVE;
192 reg = kbase_region_tracker_find_region_enclosing_address(kctx, res->ext_resource & ~BASE_EXT_RES_ACCESS_EXCLUSIVE);
194 /* did we find a matching region object? */
195 if (NULL == reg || (reg->flags & KBASE_REG_FREE))
198 /* no need to check reg->alloc as only regions with an alloc has
199 * a size, and kbase_region_tracker_find_region_enclosing_address
200 * only returns regions with size > 0 */
201 switch (reg->gpu_alloc->type) {
202 #if defined(CONFIG_UMP) && defined(CONFIG_KDS)
203 case KBASE_MEM_TYPE_IMPORTED_UMP:
204 kds_res = ump_dd_kds_resource_get(reg->gpu_alloc->imported.ump_handle);
206 #endif /* defined(CONFIG_UMP) && defined(CONFIG_KDS) */
211 /* no kds resource for the region ? */
215 resources_list->kds_resources[res_id] = kds_res;
218 set_bit(res_id, resources_list->kds_access_bitmap);
220 kbase_gpu_vm_unlock(kctx);
222 /* did the loop run to completion? */
223 if (res_id == num_elems)
226 /* Clean up as the resource list is not valid. */
227 kfree(resources_list->kds_resources);
228 kfree(resources_list->kds_access_bitmap);
233 static bool kbasep_validate_kbase_pointer(
234 struct kbase_context *kctx, union kbase_pointer *p)
236 if (kctx->is_compat) {
237 if (p->compat_value == 0)
240 if (NULL == p->value)
246 static int kbase_external_buffer_lock(struct kbase_context *kctx,
247 struct kbase_uk_ext_buff_kds_data *args, u32 args_size)
249 struct base_external_resource *ext_res_copy;
250 size_t ext_resource_size;
253 struct base_external_resource __user *ext_res_user;
254 int __user *file_desc_usr;
255 struct kbasep_kds_resource_set_file_data *fdata;
256 struct kbase_kds_resource_list_data resource_list_data;
258 if (args_size != sizeof(struct kbase_uk_ext_buff_kds_data))
261 /* Check user space has provided valid data */
262 if (!kbasep_validate_kbase_pointer(kctx, &args->external_resource) ||
263 !kbasep_validate_kbase_pointer(kctx, &args->file_descriptor) ||
264 (0 == args->num_res) ||
265 (args->num_res > KBASE_MAXIMUM_EXT_RESOURCES))
268 ext_resource_size = sizeof(struct base_external_resource) * args->num_res;
270 KBASE_DEBUG_ASSERT(0 != ext_resource_size);
271 ext_res_copy = kmalloc(ext_resource_size, GFP_KERNEL);
276 if (kctx->is_compat) {
277 ext_res_user = compat_ptr(args->external_resource.compat_value);
278 file_desc_usr = compat_ptr(args->file_descriptor.compat_value);
280 #endif /* CONFIG_COMPAT */
281 ext_res_user = args->external_resource.value;
282 file_desc_usr = args->file_descriptor.value;
285 #endif /* CONFIG_COMPAT */
287 /* Copy the external resources to lock from user space */
288 if (copy_from_user(ext_res_copy, ext_res_user, ext_resource_size))
291 /* Allocate data to be stored in the file */
292 fdata = kmalloc(sizeof(*fdata), GFP_KERNEL);
299 /* Parse given elements and create resource and access lists */
300 ret = kbasep_kds_allocate_resource_list_data(kctx,
301 ext_res_copy, args->num_res, &resource_list_data);
307 fd = anon_inode_getfd("kds_ext", &kds_resource_fops, fdata, 0);
309 err = copy_to_user(file_desc_usr, &fd, sizeof(fd));
311 /* If the file descriptor was valid and we successfully copied
312 * it to user space, then we can try and lock the requested
315 if ((fd >= 0) && (0 == err)) {
316 struct kds_resource_set *lock;
318 lock = kds_waitall(args->num_res,
319 resource_list_data.kds_access_bitmap,
320 resource_list_data.kds_resources,
325 } else if (IS_ERR(lock)) {
335 kfree(resource_list_data.kds_resources);
336 kfree(resource_list_data.kds_access_bitmap);
340 /* If the file was opened successfully then close it which will
341 * clean up the file data, otherwise we clean up the file data
354 #endif /* CONFIG_KDS */
356 #ifdef CONFIG_MALI_MIPE_ENABLED
357 static void kbase_create_timeline_objects(struct kbase_context *kctx)
359 struct kbase_device *kbdev = kctx->kbdev;
362 struct kbasep_kctx_list_element *element;
364 /* Create LPU objects. */
365 for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
367 &kbdev->gpu_props.props.raw_props.js_features[lpu_id];
368 kbase_tlstream_tl_summary_new_lpu(lpu, lpu_id, *lpu);
371 /* Create Address Space objects. */
372 for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
373 kbase_tlstream_tl_summary_new_as(&kbdev->as[as_nr], as_nr);
375 /* Create GPU object and make it retain all LPUs and address spaces. */
376 kbase_tlstream_tl_summary_new_gpu(
378 kbdev->gpu_props.props.raw_props.gpu_id,
379 kbdev->gpu_props.num_cores);
381 for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
383 &kbdev->gpu_props.props.raw_props.js_features[lpu_id];
384 kbase_tlstream_tl_summary_lifelink_lpu_gpu(lpu, kbdev);
386 for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
387 kbase_tlstream_tl_summary_lifelink_as_gpu(
391 /* Create object for each known context. */
392 mutex_lock(&kbdev->kctx_list_lock);
393 list_for_each_entry(element, &kbdev->kctx_list, link) {
394 kbase_tlstream_tl_summary_new_ctx(
396 (u32)(element->kctx->id));
398 /* Before releasing the lock, reset body stream buffers.
399 * This will prevent context creation message to be directed to both
400 * summary and body stream. */
401 kbase_tlstream_reset_body_streams();
402 mutex_unlock(&kbdev->kctx_list_lock);
403 /* Static object are placed into summary packet that needs to be
404 * transmitted first. Flush all streams to make it available to
406 kbase_tlstream_flush_streams();
410 static void kbase_api_handshake(struct uku_version_check_args *version)
412 switch (version->major) {
413 #ifdef BASE_LEGACY_UK6_SUPPORT
415 /* We are backwards compatible with version 6,
416 * so pretend to be the old version */
420 #endif /* BASE_LEGACY_UK6_SUPPORT */
421 #ifdef BASE_LEGACY_UK7_SUPPORT
423 /* We are backwards compatible with version 7,
424 * so pretend to be the old version */
428 #endif /* BASE_LEGACY_UK7_SUPPORT */
429 #ifdef BASE_LEGACY_UK8_SUPPORT
431 /* We are backwards compatible with version 8,
432 * so pretend to be the old version */
436 #endif /* BASE_LEGACY_UK8_SUPPORT */
437 #ifdef BASE_LEGACY_UK9_SUPPORT
439 /* We are backwards compatible with version 9,
440 * so pretend to be the old version */
444 #endif /* BASE_LEGACY_UK8_SUPPORT */
445 case BASE_UK_VERSION_MAJOR:
446 /* set minor to be the lowest common */
447 version->minor = min_t(int, BASE_UK_VERSION_MINOR,
448 (int)version->minor);
451 /* We return our actual version regardless if it
452 * matches the version returned by userspace -
453 * userspace can bail if it can't handle this
455 version->major = BASE_UK_VERSION_MAJOR;
456 version->minor = BASE_UK_VERSION_MINOR;
462 * enum mali_error - Mali error codes shared with userspace
464 * This is subset of those common Mali errors that can be returned to userspace.
465 * Values of matching user and kernel space enumerators MUST be the same.
466 * MALI_ERROR_NONE is guaranteed to be 0.
470 MALI_ERROR_OUT_OF_GPU_MEMORY,
471 MALI_ERROR_OUT_OF_MEMORY,
472 MALI_ERROR_FUNCTION_FAILED,
475 #ifdef CONFIG_MALI_DEBUG
476 #define INACTIVE_WAIT_MS (5000)
478 void kbase_set_driver_inactive(struct kbase_device *kbdev, bool inactive)
480 kbdev->driver_inactive = inactive;
481 wake_up(&kbdev->driver_inactive_wait);
483 /* Wait for any running IOCTLs to complete */
485 msleep(INACTIVE_WAIT_MS);
487 KBASE_EXPORT_TEST_API(kbase_set_driver_inactive);
488 #endif /* CONFIG_MALI_DEBUG */
490 static int kbase_dispatch(struct kbase_context *kctx, void * const args, u32 args_size)
492 struct kbase_device *kbdev;
493 union uk_header *ukh = args;
497 KBASE_DEBUG_ASSERT(ukh != NULL);
501 ukh->ret = MALI_ERROR_NONE; /* Be optimistic */
503 #ifdef CONFIG_MALI_DEBUG
504 wait_event(kbdev->driver_inactive_wait,
505 kbdev->driver_inactive == false);
506 #endif /* CONFIG_MALI_DEBUG */
508 if (UKP_FUNC_ID_CHECK_VERSION == id) {
509 struct uku_version_check_args *version_check;
511 if (args_size != sizeof(struct uku_version_check_args)) {
512 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
515 version_check = (struct uku_version_check_args *)args;
516 kbase_api_handshake(version_check);
517 /* save the proposed version number for later use */
518 kctx->api_version = KBASE_API_VERSION(version_check->major,
519 version_check->minor);
520 ukh->ret = MALI_ERROR_NONE;
524 /* block calls until version handshake */
525 if (kctx->api_version == 0)
528 if (!atomic_read(&kctx->setup_complete)) {
529 struct kbase_uk_set_flags *kbase_set_flags;
531 /* setup pending, try to signal that we'll do the setup,
532 * if setup was already in progress, err this call
534 if (atomic_cmpxchg(&kctx->setup_in_progress, 0, 1) != 0)
537 /* if unexpected call, will stay stuck in setup mode
538 * (is it the only call we accept?)
540 if (id != KBASE_FUNC_SET_FLAGS)
543 kbase_set_flags = (struct kbase_uk_set_flags *)args;
545 /* if not matching the expected call, stay in setup mode */
546 if (sizeof(*kbase_set_flags) != args_size)
549 /* if bad flags, will stay stuck in setup mode */
550 if (kbase_context_set_create_flags(kctx,
551 kbase_set_flags->create_flags) != 0)
552 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
554 atomic_set(&kctx->setup_complete, 1);
558 /* setup complete, perform normal operation */
560 case KBASE_FUNC_MEM_ALLOC:
562 struct kbase_uk_mem_alloc *mem = args;
563 struct kbase_va_region *reg;
565 if (sizeof(*mem) != args_size)
568 reg = kbase_mem_alloc(kctx, mem->va_pages,
569 mem->commit_pages, mem->extent,
570 &mem->flags, &mem->gpu_va,
573 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
576 case KBASE_FUNC_MEM_IMPORT: {
577 struct kbase_uk_mem_import *mem_import = args;
578 void __user *phandle;
580 if (sizeof(*mem_import) != args_size)
584 phandle = compat_ptr(mem_import->phandle.compat_value);
587 phandle = mem_import->phandle.value;
589 if (mem_import->type == BASE_MEM_IMPORT_TYPE_INVALID) {
590 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
594 if (kbase_mem_import(kctx, mem_import->type, phandle,
596 &mem_import->va_pages,
597 &mem_import->flags)) {
598 mem_import->type = BASE_MEM_IMPORT_TYPE_INVALID;
599 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
603 case KBASE_FUNC_MEM_ALIAS: {
604 struct kbase_uk_mem_alias *alias = args;
605 struct base_mem_aliasing_info __user *user_ai;
606 struct base_mem_aliasing_info *ai;
608 if (sizeof(*alias) != args_size)
611 if (alias->nents > 2048) {
612 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
616 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
622 user_ai = compat_ptr(alias->ai.compat_value);
625 user_ai = alias->ai.value;
627 ai = vmalloc(sizeof(*ai) * alias->nents);
630 ukh->ret = MALI_ERROR_OUT_OF_MEMORY;
634 if (copy_from_user(ai, user_ai,
635 sizeof(*ai) * alias->nents)) {
636 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
640 alias->gpu_va = kbase_mem_alias(kctx, &alias->flags,
644 if (!alias->gpu_va) {
645 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
653 case KBASE_FUNC_MEM_COMMIT:
655 struct kbase_uk_mem_commit *commit = args;
657 if (sizeof(*commit) != args_size)
660 if (commit->gpu_addr & ~PAGE_MASK) {
661 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_COMMIT: commit->gpu_addr: passed parameter is invalid");
662 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
666 if (kbase_mem_commit(kctx, commit->gpu_addr,
668 (base_backing_threshold_status *)
669 &commit->result_subcode) != 0)
670 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
675 case KBASE_FUNC_MEM_QUERY:
677 struct kbase_uk_mem_query *query = args;
679 if (sizeof(*query) != args_size)
682 if (query->gpu_addr & ~PAGE_MASK) {
683 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_QUERY: query->gpu_addr: passed parameter is invalid");
684 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
687 if (query->query != KBASE_MEM_QUERY_COMMIT_SIZE &&
688 query->query != KBASE_MEM_QUERY_VA_SIZE &&
689 query->query != KBASE_MEM_QUERY_FLAGS) {
690 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_QUERY: query->query = %lld unknown", (unsigned long long)query->query);
691 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
695 if (kbase_mem_query(kctx, query->gpu_addr,
696 query->query, &query->value) != 0)
697 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
699 ukh->ret = MALI_ERROR_NONE;
704 case KBASE_FUNC_MEM_FLAGS_CHANGE:
706 struct kbase_uk_mem_flags_change *fc = args;
708 if (sizeof(*fc) != args_size)
711 if ((fc->gpu_va & ~PAGE_MASK) && (fc->gpu_va >= PAGE_SIZE)) {
712 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_FLAGS_CHANGE: mem->gpu_va: passed parameter is invalid");
713 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
717 if (kbase_mem_flags_change(kctx, fc->gpu_va,
718 fc->flags, fc->mask) != 0)
719 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
723 case KBASE_FUNC_MEM_FREE:
725 struct kbase_uk_mem_free *mem = args;
727 if (sizeof(*mem) != args_size)
730 if ((mem->gpu_addr & ~PAGE_MASK) && (mem->gpu_addr >= PAGE_SIZE)) {
731 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_FREE: mem->gpu_addr: passed parameter is invalid");
732 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
736 if (kbase_mem_free(kctx, mem->gpu_addr) != 0)
737 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
741 case KBASE_FUNC_JOB_SUBMIT:
743 struct kbase_uk_job_submit *job = args;
745 if (sizeof(*job) != args_size)
748 #ifdef BASE_LEGACY_UK6_SUPPORT
749 if (kbase_jd_submit(kctx, job, 0) != 0)
751 if (kbase_jd_submit(kctx, job) != 0)
752 #endif /* BASE_LEGACY_UK6_SUPPORT */
753 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
757 #ifdef BASE_LEGACY_UK6_SUPPORT
758 case KBASE_FUNC_JOB_SUBMIT_UK6:
760 struct kbase_uk_job_submit *job = args;
762 if (sizeof(*job) != args_size)
765 if (kbase_jd_submit(kctx, job, 1) != 0)
766 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
771 case KBASE_FUNC_SYNC:
773 struct kbase_uk_sync_now *sn = args;
775 if (sizeof(*sn) != args_size)
778 if (sn->sset.basep_sset.mem_handle & ~PAGE_MASK) {
779 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_SYNC: sn->sset.basep_sset.mem_handle: passed parameter is invalid");
780 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
784 #ifndef CONFIG_MALI_COH_USER
785 if (kbase_sync_now(kctx, &sn->sset) != 0)
786 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
791 case KBASE_FUNC_DISJOINT_QUERY:
793 struct kbase_uk_disjoint_query *dquery = args;
795 if (sizeof(*dquery) != args_size)
798 /* Get the disjointness counter value. */
799 dquery->counter = kbase_disjoint_event_get(kctx->kbdev);
803 case KBASE_FUNC_POST_TERM:
805 kbase_event_close(kctx);
809 case KBASE_FUNC_HWCNT_SETUP:
811 struct kbase_uk_hwcnt_setup *setup = args;
813 if (sizeof(*setup) != args_size)
816 mutex_lock(&kctx->vinstr_cli_lock);
817 if (kbase_vinstr_legacy_hwc_setup(kbdev->vinstr_ctx,
818 &kctx->vinstr_cli, setup) != 0)
819 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
820 mutex_unlock(&kctx->vinstr_cli_lock);
824 case KBASE_FUNC_HWCNT_DUMP:
827 mutex_lock(&kctx->vinstr_cli_lock);
828 if (kbase_vinstr_hwc_dump(kctx->vinstr_cli,
829 BASE_HWCNT_READER_EVENT_MANUAL) != 0)
830 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
831 mutex_unlock(&kctx->vinstr_cli_lock);
835 case KBASE_FUNC_HWCNT_CLEAR:
838 mutex_lock(&kctx->vinstr_cli_lock);
839 if (kbase_vinstr_hwc_clear(kctx->vinstr_cli) != 0)
840 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
841 mutex_unlock(&kctx->vinstr_cli_lock);
845 case KBASE_FUNC_HWCNT_READER_SETUP:
847 struct kbase_uk_hwcnt_reader_setup *setup = args;
849 if (sizeof(*setup) != args_size)
852 mutex_lock(&kctx->vinstr_cli_lock);
853 if (kbase_vinstr_hwcnt_reader_setup(kbdev->vinstr_ctx,
855 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
856 mutex_unlock(&kctx->vinstr_cli_lock);
860 case KBASE_FUNC_GPU_PROPS_REG_DUMP:
862 struct kbase_uk_gpuprops *setup = args;
864 if (sizeof(*setup) != args_size)
867 if (kbase_gpuprops_uk_get_props(kctx, setup) != 0)
868 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
871 case KBASE_FUNC_FIND_CPU_OFFSET:
873 struct kbase_uk_find_cpu_offset *find = args;
875 if (sizeof(*find) != args_size)
878 if (find->gpu_addr & ~PAGE_MASK) {
879 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_FIND_CPU_OFFSET: find->gpu_addr: passed parameter is invalid");
883 if (find->size > SIZE_MAX || find->cpu_addr > ULONG_MAX) {
884 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
888 err = kbasep_find_enclosing_cpu_mapping_offset(
891 (uintptr_t) find->cpu_addr,
896 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
900 case KBASE_FUNC_GET_VERSION:
902 struct kbase_uk_get_ddk_version *get_version = (struct kbase_uk_get_ddk_version *)args;
904 if (sizeof(*get_version) != args_size)
907 /* version buffer size check is made in compile time assert */
908 memcpy(get_version->version_buffer, KERNEL_SIDE_DDK_VERSION_STRING, sizeof(KERNEL_SIDE_DDK_VERSION_STRING));
909 get_version->version_string_size = sizeof(KERNEL_SIDE_DDK_VERSION_STRING);
910 get_version->rk_version = ROCKCHIP_VERSION;
914 case KBASE_FUNC_STREAM_CREATE:
917 struct kbase_uk_stream_create *screate = (struct kbase_uk_stream_create *)args;
919 if (sizeof(*screate) != args_size)
922 if (strnlen(screate->name, sizeof(screate->name)) >= sizeof(screate->name)) {
923 /* not NULL terminated */
924 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
928 if (kbase_stream_create(screate->name, &screate->fd) != 0)
929 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
931 ukh->ret = MALI_ERROR_NONE;
932 #else /* CONFIG_SYNC */
933 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
934 #endif /* CONFIG_SYNC */
937 case KBASE_FUNC_FENCE_VALIDATE:
940 struct kbase_uk_fence_validate *fence_validate = (struct kbase_uk_fence_validate *)args;
942 if (sizeof(*fence_validate) != args_size)
945 if (kbase_fence_validate(fence_validate->fd) != 0)
946 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
948 ukh->ret = MALI_ERROR_NONE;
949 #endif /* CONFIG_SYNC */
953 case KBASE_FUNC_EXT_BUFFER_LOCK:
956 ret = kbase_external_buffer_lock(kctx,
957 (struct kbase_uk_ext_buff_kds_data *)args,
961 ukh->ret = MALI_ERROR_NONE;
964 ukh->ret = MALI_ERROR_OUT_OF_MEMORY;
967 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
969 #endif /* CONFIG_KDS */
973 case KBASE_FUNC_SET_TEST_DATA:
976 struct kbase_uk_set_test_data *set_data = args;
978 shared_kernel_test_data = set_data->test_data;
979 shared_kernel_test_data.kctx.value = (void __user *)kctx;
980 shared_kernel_test_data.mm.value = (void __user *)current->mm;
981 ukh->ret = MALI_ERROR_NONE;
982 #endif /* MALI_UNIT_TEST */
986 case KBASE_FUNC_INJECT_ERROR:
988 #ifdef CONFIG_MALI_ERROR_INJECT
990 struct kbase_error_params params = ((struct kbase_uk_error_params *)args)->params;
993 spin_lock_irqsave(&kbdev->reg_op_lock, flags);
994 if (job_atom_inject_error(¶ms) != 0)
995 ukh->ret = MALI_ERROR_OUT_OF_MEMORY;
997 ukh->ret = MALI_ERROR_NONE;
998 spin_unlock_irqrestore(&kbdev->reg_op_lock, flags);
1000 #endif /* CONFIG_MALI_ERROR_INJECT */
1004 case KBASE_FUNC_MODEL_CONTROL:
1006 #ifdef CONFIG_MALI_NO_MALI
1007 unsigned long flags;
1008 struct kbase_model_control_params params =
1009 ((struct kbase_uk_model_control_params *)args)->params;
1012 spin_lock_irqsave(&kbdev->reg_op_lock, flags);
1013 if (gpu_model_control(kbdev->model, ¶ms) != 0)
1014 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
1016 ukh->ret = MALI_ERROR_NONE;
1017 spin_unlock_irqrestore(&kbdev->reg_op_lock, flags);
1019 #endif /* CONFIG_MALI_NO_MALI */
1023 #ifdef BASE_LEGACY_UK8_SUPPORT
1024 case KBASE_FUNC_KEEP_GPU_POWERED:
1026 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_KEEP_GPU_POWERED: function is deprecated and disabled\n");
1027 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
1030 #endif /* BASE_LEGACY_UK8_SUPPORT */
1032 case KBASE_FUNC_GET_PROFILING_CONTROLS:
1034 struct kbase_uk_profiling_controls *controls =
1035 (struct kbase_uk_profiling_controls *)args;
1038 if (sizeof(*controls) != args_size)
1041 for (i = FBDUMP_CONTROL_MIN; i < FBDUMP_CONTROL_MAX; i++)
1042 controls->profiling_controls[i] = kbase_get_profiling_control(kbdev, i);
1047 /* used only for testing purposes; these controls are to be set by gator through gator API */
1048 case KBASE_FUNC_SET_PROFILING_CONTROLS:
1050 struct kbase_uk_profiling_controls *controls =
1051 (struct kbase_uk_profiling_controls *)args;
1054 if (sizeof(*controls) != args_size)
1057 for (i = FBDUMP_CONTROL_MIN; i < FBDUMP_CONTROL_MAX; i++)
1058 _mali_profiling_control(i, controls->profiling_controls[i]);
1063 case KBASE_FUNC_DEBUGFS_MEM_PROFILE_ADD:
1065 struct kbase_uk_debugfs_mem_profile_add *add_data =
1066 (struct kbase_uk_debugfs_mem_profile_add *)args;
1068 char __user *user_buf;
1070 if (sizeof(*add_data) != args_size)
1073 if (add_data->len > KBASE_MEM_PROFILE_MAX_BUF_SIZE) {
1074 dev_err(kbdev->dev, "buffer too big");
1078 #ifdef CONFIG_COMPAT
1079 if (kctx->is_compat)
1080 user_buf = compat_ptr(add_data->buf.compat_value);
1083 user_buf = add_data->buf.value;
1085 buf = kmalloc(add_data->len, GFP_KERNEL);
1089 if (0 != copy_from_user(buf, user_buf, add_data->len)) {
1090 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
1095 if (kbasep_mem_profile_debugfs_insert(kctx, buf,
1097 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
1105 #ifdef CONFIG_MALI_NO_MALI
1106 case KBASE_FUNC_SET_PRFCNT_VALUES:
1109 struct kbase_uk_prfcnt_values *params =
1110 ((struct kbase_uk_prfcnt_values *)args);
1111 gpu_model_set_dummy_prfcnt_sample(params->data,
1116 #endif /* CONFIG_MALI_NO_MALI */
1118 #ifdef CONFIG_MALI_MIPE_ENABLED
1119 case KBASE_FUNC_TLSTREAM_ACQUIRE:
1121 struct kbase_uk_tlstream_acquire *tlstream_acquire =
1124 if (sizeof(*tlstream_acquire) != args_size)
1127 if (0 != kbase_tlstream_acquire(
1129 &tlstream_acquire->fd)) {
1130 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
1131 } else if (0 <= tlstream_acquire->fd) {
1132 /* Summary stream was cleared during acquire.
1133 * Create static timeline objects that will be
1134 * read by client. */
1135 kbase_create_timeline_objects(kctx);
1139 case KBASE_FUNC_TLSTREAM_FLUSH:
1141 struct kbase_uk_tlstream_flush *tlstream_flush =
1144 if (sizeof(*tlstream_flush) != args_size)
1147 kbase_tlstream_flush_streams();
1151 case KBASE_FUNC_TLSTREAM_TEST:
1153 struct kbase_uk_tlstream_test *tlstream_test = args;
1155 if (sizeof(*tlstream_test) != args_size)
1158 kbase_tlstream_test(
1159 tlstream_test->tpw_count,
1160 tlstream_test->msg_delay,
1161 tlstream_test->msg_count,
1162 tlstream_test->aux_msg);
1165 case KBASE_FUNC_TLSTREAM_STATS:
1167 struct kbase_uk_tlstream_stats *tlstream_stats = args;
1169 if (sizeof(*tlstream_stats) != args_size)
1172 kbase_tlstream_stats(
1173 &tlstream_stats->bytes_collected,
1174 &tlstream_stats->bytes_generated);
1177 #endif /* MALI_UNIT_TEST */
1178 #endif /* CONFIG_MALI_MIPE_ENABLED */
1180 case KBASE_FUNC_GET_CONTEXT_ID:
1182 struct kbase_uk_context_id *info = args;
1184 info->id = kctx->id;
1189 dev_err(kbdev->dev, "unknown ioctl %u", id);
1196 dev_err(kbdev->dev, "Wrong syscall size (%d) for %08x\n", args_size, id);
1201 static struct kbase_device *to_kbase_device(struct device *dev)
1203 return dev_get_drvdata(dev);
1207 * API to acquire device list mutex and
1208 * return pointer to the device list head
1210 const struct list_head *kbase_dev_list_get(void)
1212 mutex_lock(&kbase_dev_list_lock);
1213 return &kbase_dev_list;
1215 KBASE_EXPORT_TEST_API(kbase_dev_list_get);
1217 /* API to release the device list mutex */
1218 void kbase_dev_list_put(const struct list_head *dev_list)
1220 mutex_unlock(&kbase_dev_list_lock);
1222 KBASE_EXPORT_TEST_API(kbase_dev_list_put);
1224 /* Find a particular kbase device (as specified by minor number), or find the "first" device if -1 is specified */
1225 struct kbase_device *kbase_find_device(int minor)
1227 struct kbase_device *kbdev = NULL;
1228 struct list_head *entry;
1229 const struct list_head *dev_list = kbase_dev_list_get();
1231 list_for_each(entry, dev_list) {
1232 struct kbase_device *tmp;
1234 tmp = list_entry(entry, struct kbase_device, entry);
1235 if (tmp->mdev.minor == minor || minor == -1) {
1237 get_device(kbdev->dev);
1241 kbase_dev_list_put(dev_list);
1245 EXPORT_SYMBOL(kbase_find_device);
1247 void kbase_release_device(struct kbase_device *kbdev)
1249 put_device(kbdev->dev);
1251 EXPORT_SYMBOL(kbase_release_device);
1253 static int kbase_open(struct inode *inode, struct file *filp)
1255 struct kbase_device *kbdev = NULL;
1256 struct kbase_context *kctx;
1258 #ifdef CONFIG_DEBUG_FS
1262 kbdev = kbase_find_device(iminor(inode));
1267 kctx = kbase_create_context(kbdev, is_compat_task());
1273 init_waitqueue_head(&kctx->event_queue);
1274 filp->private_data = kctx;
1277 kctx->infinite_cache_active = kbdev->infinite_cache_active_default;
1279 #ifdef CONFIG_DEBUG_FS
1280 snprintf(kctx_name, 64, "%d_%d", kctx->tgid, kctx->id);
1282 kctx->kctx_dentry = debugfs_create_dir(kctx_name,
1283 kbdev->debugfs_ctx_directory);
1285 if (IS_ERR_OR_NULL(kctx->kctx_dentry)) {
1290 #ifdef CONFIG_MALI_COH_USER
1291 /* if cache is completely coherent at hardware level, then remove the
1292 * infinite cache control support from debugfs.
1295 debugfs_create_bool("infinite_cache", 0644, kctx->kctx_dentry,
1296 &kctx->infinite_cache_active);
1297 #endif /* CONFIG_MALI_COH_USER */
1299 mutex_init(&kctx->mem_profile_lock);
1301 kbasep_jd_debugfs_ctx_add(kctx);
1302 kbase_debug_mem_view_init(filp);
1304 kbase_debug_job_fault_context_init(kctx);
1306 kbase_mem_pool_debugfs_add(kctx->kctx_dentry, &kctx->mem_pool);
1308 #endif /* CONFIG_DEBUGFS */
1310 dev_dbg(kbdev->dev, "created base context\n");
1313 struct kbasep_kctx_list_element *element;
1315 element = kzalloc(sizeof(*element), GFP_KERNEL);
1317 mutex_lock(&kbdev->kctx_list_lock);
1318 element->kctx = kctx;
1319 list_add(&element->link, &kbdev->kctx_list);
1320 #ifdef CONFIG_MALI_MIPE_ENABLED
1321 kbase_tlstream_tl_new_ctx(
1323 (u32)(element->kctx->id));
1325 mutex_unlock(&kbdev->kctx_list_lock);
1327 /* we don't treat this as a fail - just warn about it */
1328 dev_warn(kbdev->dev, "couldn't add kctx to kctx_list\n");
1334 kbase_release_device(kbdev);
1338 static int kbase_release(struct inode *inode, struct file *filp)
1340 struct kbase_context *kctx = filp->private_data;
1341 struct kbase_device *kbdev = kctx->kbdev;
1342 struct kbasep_kctx_list_element *element, *tmp;
1343 bool found_element = false;
1345 #ifdef CONFIG_MALI_MIPE_ENABLED
1346 kbase_tlstream_tl_del_ctx(kctx);
1349 #ifdef CONFIG_DEBUG_FS
1350 debugfs_remove_recursive(kctx->kctx_dentry);
1351 kbasep_mem_profile_debugfs_remove(kctx);
1352 kbase_debug_job_fault_context_term(kctx);
1355 mutex_lock(&kbdev->kctx_list_lock);
1356 list_for_each_entry_safe(element, tmp, &kbdev->kctx_list, link) {
1357 if (element->kctx == kctx) {
1358 list_del(&element->link);
1360 found_element = true;
1363 mutex_unlock(&kbdev->kctx_list_lock);
1365 dev_warn(kbdev->dev, "kctx not in kctx_list\n");
1367 filp->private_data = NULL;
1369 mutex_lock(&kctx->vinstr_cli_lock);
1370 /* If this client was performing hwcnt dumping and did not explicitly
1371 * detach itself, remove it from the vinstr core now */
1372 if (kctx->vinstr_cli) {
1373 struct kbase_uk_hwcnt_setup setup;
1375 setup.dump_buffer = 0llu;
1376 kbase_vinstr_legacy_hwc_setup(
1377 kbdev->vinstr_ctx, &kctx->vinstr_cli, &setup);
1379 mutex_unlock(&kctx->vinstr_cli_lock);
1381 kbase_destroy_context(kctx);
1383 dev_dbg(kbdev->dev, "deleted base context\n");
1384 kbase_release_device(kbdev);
1388 #define CALL_MAX_SIZE 536
1390 static long kbase_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1392 u64 msg[(CALL_MAX_SIZE + 7) >> 3] = { 0xdeadbeefdeadbeefull }; /* alignment fixup */
1393 u32 size = _IOC_SIZE(cmd);
1394 struct kbase_context *kctx = filp->private_data;
1396 if (size > CALL_MAX_SIZE)
1399 if (0 != copy_from_user(&msg, (void __user *)arg, size)) {
1400 dev_err(kctx->kbdev->dev, "failed to copy ioctl argument into kernel space\n");
1404 if (kbase_dispatch(kctx, &msg, size) != 0)
1407 if (0 != copy_to_user((void __user *)arg, &msg, size)) {
1408 dev_err(kctx->kbdev->dev, "failed to copy results of UK call back to user space\n");
1414 static ssize_t kbase_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
1416 struct kbase_context *kctx = filp->private_data;
1417 struct base_jd_event_v2 uevent;
1420 if (count < sizeof(uevent))
1424 while (kbase_event_dequeue(kctx, &uevent)) {
1428 if (filp->f_flags & O_NONBLOCK)
1431 if (wait_event_interruptible(kctx->event_queue,
1432 kbase_event_pending(kctx)) != 0)
1433 return -ERESTARTSYS;
1435 if (uevent.event_code == BASE_JD_EVENT_DRV_TERMINATED) {
1441 if (copy_to_user(buf, &uevent, sizeof(uevent)) != 0)
1444 buf += sizeof(uevent);
1446 count -= sizeof(uevent);
1447 } while (count >= sizeof(uevent));
1450 return out_count * sizeof(uevent);
1453 static unsigned int kbase_poll(struct file *filp, poll_table *wait)
1455 struct kbase_context *kctx = filp->private_data;
1457 poll_wait(filp, &kctx->event_queue, wait);
1458 if (kbase_event_pending(kctx))
1459 return POLLIN | POLLRDNORM;
1464 void kbase_event_wakeup(struct kbase_context *kctx)
1466 KBASE_DEBUG_ASSERT(kctx);
1468 wake_up_interruptible(&kctx->event_queue);
1471 KBASE_EXPORT_TEST_API(kbase_event_wakeup);
1473 static int kbase_check_flags(int flags)
1475 /* Enforce that the driver keeps the O_CLOEXEC flag so that execve() always
1476 * closes the file descriptor in a child process.
1478 if (0 == (flags & O_CLOEXEC))
1484 static unsigned long kbase_get_unmapped_area(struct file *filp,
1485 const unsigned long addr, const unsigned long len,
1486 const unsigned long pgoff, const unsigned long flags)
1489 /* based on get_unmapped_area, but simplified slightly due to that some
1490 * values are known in advance */
1491 struct kbase_context *kctx = filp->private_data;
1493 if (!kctx->is_compat && !addr &&
1494 kbase_hw_has_feature(kctx->kbdev, BASE_HW_FEATURE_33BIT_VA)) {
1495 struct mm_struct *mm = current->mm;
1496 struct vm_area_struct *vma;
1497 unsigned long low_limit, high_limit, gap_start, gap_end;
1499 /* Hardware has smaller VA than userspace, ensure the page
1500 * comes from a VA which can be used on the GPU */
1502 gap_end = (1UL<<33);
1505 high_limit = gap_end - len;
1506 low_limit = PAGE_SIZE + len;
1508 gap_start = mm->highest_vm_end;
1509 if (gap_start <= high_limit)
1512 if (RB_EMPTY_ROOT(&mm->mm_rb))
1514 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
1515 if (vma->rb_subtree_gap < len)
1519 gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
1520 if (gap_start <= high_limit && vma->vm_rb.rb_right) {
1521 struct vm_area_struct *right =
1522 rb_entry(vma->vm_rb.rb_right,
1523 struct vm_area_struct, vm_rb);
1524 if (right->rb_subtree_gap >= len) {
1530 gap_end = vma->vm_start;
1531 if (gap_end < low_limit)
1533 if (gap_start <= high_limit &&
1534 gap_end - gap_start >= len)
1537 if (vma->vm_rb.rb_left) {
1538 struct vm_area_struct *left =
1539 rb_entry(vma->vm_rb.rb_left,
1540 struct vm_area_struct, vm_rb);
1542 if (left->rb_subtree_gap >= len) {
1548 struct rb_node *prev = &vma->vm_rb;
1550 if (!rb_parent(prev))
1552 vma = rb_entry(rb_parent(prev),
1553 struct vm_area_struct, vm_rb);
1554 if (prev == vma->vm_rb.rb_right) {
1555 gap_start = vma->vm_prev ?
1556 vma->vm_prev->vm_end : 0;
1563 if (gap_end > (1UL<<33))
1564 gap_end = (1UL<<33);
1569 VM_BUG_ON(gap_end < PAGE_SIZE);
1570 VM_BUG_ON(gap_end < gap_start);
1574 /* No special requirements - fallback to the default version */
1575 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
1578 static const struct file_operations kbase_fops = {
1579 .owner = THIS_MODULE,
1581 .release = kbase_release,
1584 .unlocked_ioctl = kbase_ioctl,
1585 .compat_ioctl = kbase_ioctl,
1587 .check_flags = kbase_check_flags,
1588 .get_unmapped_area = kbase_get_unmapped_area,
1591 #ifndef CONFIG_MALI_NO_MALI
1592 void kbase_os_reg_write(struct kbase_device *kbdev, u16 offset, u32 value)
1594 writel(value, kbdev->reg + offset);
1597 u32 kbase_os_reg_read(struct kbase_device *kbdev, u16 offset)
1599 return readl(kbdev->reg + offset);
1601 #endif /* !CONFIG_MALI_NO_MALI */
1604 /** Show callback for the @c power_policy sysfs file.
1606 * This function is called to get the contents of the @c power_policy sysfs
1607 * file. This is a list of the available policies with the currently active one
1608 * surrounded by square brackets.
1610 * @param dev The device this sysfs file is for
1611 * @param attr The attributes of the sysfs file
1612 * @param buf The output buffer for the sysfs file contents
1614 * @return The number of bytes output to @c buf.
1616 static ssize_t show_policy(struct device *dev, struct device_attribute *attr, char *const buf)
1618 struct kbase_device *kbdev;
1619 const struct kbase_pm_policy *current_policy;
1620 const struct kbase_pm_policy *const *policy_list;
1625 kbdev = to_kbase_device(dev);
1630 current_policy = kbase_pm_get_policy(kbdev);
1632 policy_count = kbase_pm_list_policies(&policy_list);
1634 for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) {
1635 if (policy_list[i] == current_policy)
1636 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name);
1638 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name);
1641 if (ret < PAGE_SIZE - 1) {
1642 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
1644 buf[PAGE_SIZE - 2] = '\n';
1645 buf[PAGE_SIZE - 1] = '\0';
1646 ret = PAGE_SIZE - 1;
1652 /** Store callback for the @c power_policy sysfs file.
1654 * This function is called when the @c power_policy sysfs file is written to.
1655 * It matches the requested policy against the available policies and if a
1656 * matching policy is found calls @ref kbase_pm_set_policy to change the
1659 * @param dev The device with sysfs file is for
1660 * @param attr The attributes of the sysfs file
1661 * @param buf The value written to the sysfs file
1662 * @param count The number of bytes written to the sysfs file
1664 * @return @c count if the function succeeded. An error code on failure.
1666 static ssize_t set_policy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1668 struct kbase_device *kbdev;
1669 const struct kbase_pm_policy *new_policy = NULL;
1670 const struct kbase_pm_policy *const *policy_list;
1674 kbdev = to_kbase_device(dev);
1679 policy_count = kbase_pm_list_policies(&policy_list);
1681 for (i = 0; i < policy_count; i++) {
1682 if (sysfs_streq(policy_list[i]->name, buf)) {
1683 new_policy = policy_list[i];
1689 dev_err(dev, "power_policy: policy not found\n");
1693 kbase_pm_set_policy(kbdev, new_policy);
1698 /** The sysfs file @c power_policy.
1700 * This is used for obtaining information about the available policies,
1701 * determining which policy is currently active, and changing the active
1704 static DEVICE_ATTR(power_policy, S_IRUGO | S_IWUSR, show_policy, set_policy);
1706 /** Show callback for the @c core_availability_policy sysfs file.
1708 * This function is called to get the contents of the @c core_availability_policy
1709 * sysfs file. This is a list of the available policies with the currently
1710 * active one surrounded by square brackets.
1712 * @param dev The device this sysfs file is for
1713 * @param attr The attributes of the sysfs file
1714 * @param buf The output buffer for the sysfs file contents
1716 * @return The number of bytes output to @c buf.
1718 static ssize_t show_ca_policy(struct device *dev, struct device_attribute *attr, char * const buf)
1720 struct kbase_device *kbdev;
1721 const struct kbase_pm_ca_policy *current_policy;
1722 const struct kbase_pm_ca_policy *const *policy_list;
1727 kbdev = to_kbase_device(dev);
1732 current_policy = kbase_pm_ca_get_policy(kbdev);
1734 policy_count = kbase_pm_ca_list_policies(&policy_list);
1736 for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) {
1737 if (policy_list[i] == current_policy)
1738 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name);
1740 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name);
1743 if (ret < PAGE_SIZE - 1) {
1744 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
1746 buf[PAGE_SIZE - 2] = '\n';
1747 buf[PAGE_SIZE - 1] = '\0';
1748 ret = PAGE_SIZE - 1;
1754 /** Store callback for the @c core_availability_policy sysfs file.
1756 * This function is called when the @c core_availability_policy sysfs file is
1757 * written to. It matches the requested policy against the available policies
1758 * and if a matching policy is found calls @ref kbase_pm_set_policy to change
1761 * @param dev The device with sysfs file is for
1762 * @param attr The attributes of the sysfs file
1763 * @param buf The value written to the sysfs file
1764 * @param count The number of bytes written to the sysfs file
1766 * @return @c count if the function succeeded. An error code on failure.
1768 static ssize_t set_ca_policy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1770 struct kbase_device *kbdev;
1771 const struct kbase_pm_ca_policy *new_policy = NULL;
1772 const struct kbase_pm_ca_policy *const *policy_list;
1776 kbdev = to_kbase_device(dev);
1781 policy_count = kbase_pm_ca_list_policies(&policy_list);
1783 for (i = 0; i < policy_count; i++) {
1784 if (sysfs_streq(policy_list[i]->name, buf)) {
1785 new_policy = policy_list[i];
1791 dev_err(dev, "core_availability_policy: policy not found\n");
1795 kbase_pm_ca_set_policy(kbdev, new_policy);
1800 /** The sysfs file @c core_availability_policy
1802 * This is used for obtaining information about the available policies,
1803 * determining which policy is currently active, and changing the active
1806 static DEVICE_ATTR(core_availability_policy, S_IRUGO | S_IWUSR, show_ca_policy, set_ca_policy);
1808 /** Show callback for the @c core_mask sysfs file.
1810 * This function is called to get the contents of the @c core_mask sysfs
1813 * @param dev The device this sysfs file is for
1814 * @param attr The attributes of the sysfs file
1815 * @param buf The output buffer for the sysfs file contents
1817 * @return The number of bytes output to @c buf.
1819 static ssize_t show_core_mask(struct device *dev, struct device_attribute *attr, char * const buf)
1821 struct kbase_device *kbdev;
1824 kbdev = to_kbase_device(dev);
1829 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1830 "Current core mask (JS0) : 0x%llX\n",
1831 kbdev->pm.debug_core_mask[0]);
1832 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1833 "Current core mask (JS1) : 0x%llX\n",
1834 kbdev->pm.debug_core_mask[1]);
1835 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1836 "Current core mask (JS2) : 0x%llX\n",
1837 kbdev->pm.debug_core_mask[2]);
1838 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1839 "Available core mask : 0x%llX\n",
1840 kbdev->gpu_props.props.raw_props.shader_present);
1845 /** Store callback for the @c core_mask sysfs file.
1847 * This function is called when the @c core_mask sysfs file is written to.
1849 * @param dev The device with sysfs file is for
1850 * @param attr The attributes of the sysfs file
1851 * @param buf The value written to the sysfs file
1852 * @param count The number of bytes written to the sysfs file
1854 * @return @c count if the function succeeded. An error code on failure.
1856 static ssize_t set_core_mask(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1858 struct kbase_device *kbdev;
1859 u64 new_core_mask[3];
1862 kbdev = to_kbase_device(dev);
1867 items = sscanf(buf, "%llx %llx %llx",
1868 &new_core_mask[0], &new_core_mask[1],
1872 new_core_mask[1] = new_core_mask[2] = new_core_mask[0];
1874 if (items == 1 || items == 3) {
1875 u64 shader_present =
1876 kbdev->gpu_props.props.raw_props.shader_present;
1877 u64 group0_core_mask =
1878 kbdev->gpu_props.props.coherency_info.group[0].
1881 if ((new_core_mask[0] & shader_present) != new_core_mask[0] ||
1882 !(new_core_mask[0] & group0_core_mask) ||
1883 (new_core_mask[1] & shader_present) !=
1885 !(new_core_mask[1] & group0_core_mask) ||
1886 (new_core_mask[2] & shader_present) !=
1888 !(new_core_mask[2] & group0_core_mask)) {
1889 dev_err(dev, "power_policy: invalid core specification\n");
1893 if (kbdev->pm.debug_core_mask[0] != new_core_mask[0] ||
1894 kbdev->pm.debug_core_mask[1] !=
1896 kbdev->pm.debug_core_mask[2] !=
1898 unsigned long flags;
1900 spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
1902 kbase_pm_set_debug_core_mask(kbdev, new_core_mask[0],
1903 new_core_mask[1], new_core_mask[2]);
1905 spin_unlock_irqrestore(&kbdev->pm.power_change_lock,
1912 dev_err(kbdev->dev, "Couldn't process set_core_mask write operation.\n"
1913 "Use format <core_mask>\n"
1914 "or <core_mask_js0> <core_mask_js1> <core_mask_js2>\n");
1918 /** The sysfs file @c core_mask.
1920 * This is used to restrict shader core availability for debugging purposes.
1921 * Reading it will show the current core mask and the mask of cores available.
1922 * Writing to it will set the current core mask.
1924 static DEVICE_ATTR(core_mask, S_IRUGO | S_IWUSR, show_core_mask, set_core_mask);
1926 /** Store callback for the @c js_timeouts sysfs file.
1928 * This function is called to get the contents of the @c js_timeouts sysfs
1929 * file. This file contains five values separated by whitespace. The values
1930 * are basically the same as JS_SOFT_STOP_TICKS, JS_HARD_STOP_TICKS_SS,
1931 * JS_HARD_STOP_TICKS_DUMPING, JS_RESET_TICKS_SS, JS_RESET_TICKS_DUMPING
1932 * configuration values (in that order), with the difference that the js_timeout
1933 * values are expressed in MILLISECONDS.
1935 * The js_timeouts sysfile file allows the current values in
1936 * use by the job scheduler to get override. Note that a value needs to
1937 * be other than 0 for it to override the current job scheduler value.
1939 * @param dev The device with sysfs file is for
1940 * @param attr The attributes of the sysfs file
1941 * @param buf The value written to the sysfs file
1942 * @param count The number of bytes written to the sysfs file
1944 * @return @c count if the function succeeded. An error code on failure.
1946 static ssize_t set_js_timeouts(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1948 struct kbase_device *kbdev;
1950 long js_soft_stop_ms;
1951 long js_soft_stop_ms_cl;
1952 long js_hard_stop_ms_ss;
1953 long js_hard_stop_ms_cl;
1954 long js_hard_stop_ms_dumping;
1955 long js_reset_ms_ss;
1956 long js_reset_ms_cl;
1957 long js_reset_ms_dumping;
1959 kbdev = to_kbase_device(dev);
1963 items = sscanf(buf, "%ld %ld %ld %ld %ld %ld %ld %ld",
1964 &js_soft_stop_ms, &js_soft_stop_ms_cl,
1965 &js_hard_stop_ms_ss, &js_hard_stop_ms_cl,
1966 &js_hard_stop_ms_dumping, &js_reset_ms_ss,
1967 &js_reset_ms_cl, &js_reset_ms_dumping);
1972 if (js_soft_stop_ms >= 0) {
1973 ticks = js_soft_stop_ms * 1000000ULL;
1974 do_div(ticks, kbdev->js_data.scheduling_period_ns);
1975 kbdev->js_soft_stop_ticks = ticks;
1977 kbdev->js_soft_stop_ticks = -1;
1980 if (js_soft_stop_ms_cl >= 0) {
1981 ticks = js_soft_stop_ms_cl * 1000000ULL;
1982 do_div(ticks, kbdev->js_data.scheduling_period_ns);
1983 kbdev->js_soft_stop_ticks_cl = ticks;
1985 kbdev->js_soft_stop_ticks_cl = -1;
1988 if (js_hard_stop_ms_ss >= 0) {
1989 ticks = js_hard_stop_ms_ss * 1000000ULL;
1990 do_div(ticks, kbdev->js_data.scheduling_period_ns);
1991 kbdev->js_hard_stop_ticks_ss = ticks;
1993 kbdev->js_hard_stop_ticks_ss = -1;
1996 if (js_hard_stop_ms_cl >= 0) {
1997 ticks = js_hard_stop_ms_cl * 1000000ULL;
1998 do_div(ticks, kbdev->js_data.scheduling_period_ns);
1999 kbdev->js_hard_stop_ticks_cl = ticks;
2001 kbdev->js_hard_stop_ticks_cl = -1;
2004 if (js_hard_stop_ms_dumping >= 0) {
2005 ticks = js_hard_stop_ms_dumping * 1000000ULL;
2006 do_div(ticks, kbdev->js_data.scheduling_period_ns);
2007 kbdev->js_hard_stop_ticks_dumping = ticks;
2009 kbdev->js_hard_stop_ticks_dumping = -1;
2012 if (js_reset_ms_ss >= 0) {
2013 ticks = js_reset_ms_ss * 1000000ULL;
2014 do_div(ticks, kbdev->js_data.scheduling_period_ns);
2015 kbdev->js_reset_ticks_ss = ticks;
2017 kbdev->js_reset_ticks_ss = -1;
2020 if (js_reset_ms_cl >= 0) {
2021 ticks = js_reset_ms_cl * 1000000ULL;
2022 do_div(ticks, kbdev->js_data.scheduling_period_ns);
2023 kbdev->js_reset_ticks_cl = ticks;
2025 kbdev->js_reset_ticks_cl = -1;
2028 if (js_reset_ms_dumping >= 0) {
2029 ticks = js_reset_ms_dumping * 1000000ULL;
2030 do_div(ticks, kbdev->js_data.scheduling_period_ns);
2031 kbdev->js_reset_ticks_dumping = ticks;
2033 kbdev->js_reset_ticks_dumping = -1;
2036 kbdev->js_timeouts_updated = true;
2038 dev_dbg(kbdev->dev, "Overriding JS_SOFT_STOP_TICKS with %lu ticks (%lu ms)\n",
2039 (unsigned long)kbdev->js_soft_stop_ticks,
2041 dev_dbg(kbdev->dev, "Overriding JS_SOFT_STOP_TICKS_CL with %lu ticks (%lu ms)\n",
2042 (unsigned long)kbdev->js_soft_stop_ticks_cl,
2043 js_soft_stop_ms_cl);
2044 dev_dbg(kbdev->dev, "Overriding JS_HARD_STOP_TICKS_SS with %lu ticks (%lu ms)\n",
2045 (unsigned long)kbdev->js_hard_stop_ticks_ss,
2046 js_hard_stop_ms_ss);
2047 dev_dbg(kbdev->dev, "Overriding JS_HARD_STOP_TICKS_CL with %lu ticks (%lu ms)\n",
2048 (unsigned long)kbdev->js_hard_stop_ticks_cl,
2049 js_hard_stop_ms_cl);
2050 dev_dbg(kbdev->dev, "Overriding JS_HARD_STOP_TICKS_DUMPING with %lu ticks (%lu ms)\n",
2052 kbdev->js_hard_stop_ticks_dumping,
2053 js_hard_stop_ms_dumping);
2054 dev_dbg(kbdev->dev, "Overriding JS_RESET_TICKS_SS with %lu ticks (%lu ms)\n",
2055 (unsigned long)kbdev->js_reset_ticks_ss,
2057 dev_dbg(kbdev->dev, "Overriding JS_RESET_TICKS_CL with %lu ticks (%lu ms)\n",
2058 (unsigned long)kbdev->js_reset_ticks_cl,
2060 dev_dbg(kbdev->dev, "Overriding JS_RESET_TICKS_DUMPING with %lu ticks (%lu ms)\n",
2061 (unsigned long)kbdev->js_reset_ticks_dumping,
2062 js_reset_ms_dumping);
2067 dev_err(kbdev->dev, "Couldn't process js_timeouts write operation.\n"
2068 "Use format <soft_stop_ms> <soft_stop_ms_cl> <hard_stop_ms_ss> <hard_stop_ms_cl> <hard_stop_ms_dumping> <reset_ms_ss> <reset_ms_cl> <reset_ms_dumping>\n"
2069 "Write 0 for no change, -1 to restore default timeout\n");
2073 /** Show callback for the @c js_timeouts sysfs file.
2075 * This function is called to get the contents of the @c js_timeouts sysfs
2076 * file. It returns the last set values written to the js_timeouts sysfs file.
2077 * If the file didn't get written yet, the values will be current setting in
2079 * @param dev The device this sysfs file is for
2080 * @param attr The attributes of the sysfs file
2081 * @param buf The output buffer for the sysfs file contents
2083 * @return The number of bytes output to @c buf.
2085 static ssize_t show_js_timeouts(struct device *dev, struct device_attribute *attr, char * const buf)
2087 struct kbase_device *kbdev;
2090 unsigned long js_soft_stop_ms;
2091 unsigned long js_soft_stop_ms_cl;
2092 unsigned long js_hard_stop_ms_ss;
2093 unsigned long js_hard_stop_ms_cl;
2094 unsigned long js_hard_stop_ms_dumping;
2095 unsigned long js_reset_ms_ss;
2096 unsigned long js_reset_ms_cl;
2097 unsigned long js_reset_ms_dumping;
2098 unsigned long ticks;
2099 u32 scheduling_period_ns;
2101 kbdev = to_kbase_device(dev);
2105 /* If no contexts have been scheduled since js_timeouts was last written
2106 * to, the new timeouts might not have been latched yet. So check if an
2107 * update is pending and use the new values if necessary. */
2108 if (kbdev->js_timeouts_updated && kbdev->js_scheduling_period_ns > 0)
2109 scheduling_period_ns = kbdev->js_scheduling_period_ns;
2111 scheduling_period_ns = kbdev->js_data.scheduling_period_ns;
2113 if (kbdev->js_timeouts_updated && kbdev->js_soft_stop_ticks > 0)
2114 ticks = kbdev->js_soft_stop_ticks;
2116 ticks = kbdev->js_data.soft_stop_ticks;
2117 ms = (u64)ticks * scheduling_period_ns;
2118 do_div(ms, 1000000UL);
2119 js_soft_stop_ms = (unsigned long)ms;
2121 if (kbdev->js_timeouts_updated && kbdev->js_soft_stop_ticks_cl > 0)
2122 ticks = kbdev->js_soft_stop_ticks_cl;
2124 ticks = kbdev->js_data.soft_stop_ticks_cl;
2125 ms = (u64)ticks * scheduling_period_ns;
2126 do_div(ms, 1000000UL);
2127 js_soft_stop_ms_cl = (unsigned long)ms;
2129 if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_ss > 0)
2130 ticks = kbdev->js_hard_stop_ticks_ss;
2132 ticks = kbdev->js_data.hard_stop_ticks_ss;
2133 ms = (u64)ticks * scheduling_period_ns;
2134 do_div(ms, 1000000UL);
2135 js_hard_stop_ms_ss = (unsigned long)ms;
2137 if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_cl > 0)
2138 ticks = kbdev->js_hard_stop_ticks_cl;
2140 ticks = kbdev->js_data.hard_stop_ticks_cl;
2141 ms = (u64)ticks * scheduling_period_ns;
2142 do_div(ms, 1000000UL);
2143 js_hard_stop_ms_cl = (unsigned long)ms;
2145 if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_dumping > 0)
2146 ticks = kbdev->js_hard_stop_ticks_dumping;
2148 ticks = kbdev->js_data.hard_stop_ticks_dumping;
2149 ms = (u64)ticks * scheduling_period_ns;
2150 do_div(ms, 1000000UL);
2151 js_hard_stop_ms_dumping = (unsigned long)ms;
2153 if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_ss > 0)
2154 ticks = kbdev->js_reset_ticks_ss;
2156 ticks = kbdev->js_data.gpu_reset_ticks_ss;
2157 ms = (u64)ticks * scheduling_period_ns;
2158 do_div(ms, 1000000UL);
2159 js_reset_ms_ss = (unsigned long)ms;
2161 if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_cl > 0)
2162 ticks = kbdev->js_reset_ticks_cl;
2164 ticks = kbdev->js_data.gpu_reset_ticks_cl;
2165 ms = (u64)ticks * scheduling_period_ns;
2166 do_div(ms, 1000000UL);
2167 js_reset_ms_cl = (unsigned long)ms;
2169 if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_dumping > 0)
2170 ticks = kbdev->js_reset_ticks_dumping;
2172 ticks = kbdev->js_data.gpu_reset_ticks_dumping;
2173 ms = (u64)ticks * scheduling_period_ns;
2174 do_div(ms, 1000000UL);
2175 js_reset_ms_dumping = (unsigned long)ms;
2177 ret = scnprintf(buf, PAGE_SIZE, "%lu %lu %lu %lu %lu %lu %lu %lu\n",
2178 js_soft_stop_ms, js_soft_stop_ms_cl,
2179 js_hard_stop_ms_ss, js_hard_stop_ms_cl,
2180 js_hard_stop_ms_dumping, js_reset_ms_ss,
2181 js_reset_ms_cl, js_reset_ms_dumping);
2183 if (ret >= PAGE_SIZE) {
2184 buf[PAGE_SIZE - 2] = '\n';
2185 buf[PAGE_SIZE - 1] = '\0';
2186 ret = PAGE_SIZE - 1;
2192 /** The sysfs file @c js_timeouts.
2194 * This is used to override the current job scheduler values for
2195 * JS_STOP_STOP_TICKS_SS
2196 * JS_STOP_STOP_TICKS_CL
2197 * JS_HARD_STOP_TICKS_SS
2198 * JS_HARD_STOP_TICKS_CL
2199 * JS_HARD_STOP_TICKS_DUMPING
2202 * JS_RESET_TICKS_DUMPING.
2204 static DEVICE_ATTR(js_timeouts, S_IRUGO | S_IWUSR, show_js_timeouts, set_js_timeouts);
2207 * set_js_scheduling_period - Store callback for the js_scheduling_period sysfs
2209 * @dev: The device the sysfs file is for
2210 * @attr: The attributes of the sysfs file
2211 * @buf: The value written to the sysfs file
2212 * @count: The number of bytes written to the sysfs file
2214 * This function is called when the js_scheduling_period sysfs file is written
2215 * to. It checks the data written, and if valid updates the js_scheduling_period
2218 * Return: @c count if the function succeeded. An error code on failure.
2220 static ssize_t set_js_scheduling_period(struct device *dev,
2221 struct device_attribute *attr, const char *buf, size_t count)
2223 struct kbase_device *kbdev;
2225 unsigned int js_scheduling_period;
2226 u32 new_scheduling_period_ns;
2230 kbdev = to_kbase_device(dev);
2234 ret = kstrtouint(buf, 0, &js_scheduling_period);
2235 if (ret || !js_scheduling_period) {
2236 dev_err(kbdev->dev, "Couldn't process js_scheduling_period write operation.\n"
2237 "Use format <js_scheduling_period_ms>\n");
2241 new_scheduling_period_ns = js_scheduling_period * 1000000;
2243 /* Update scheduling timeouts */
2244 mutex_lock(&kbdev->js_data.runpool_mutex);
2246 /* If no contexts have been scheduled since js_timeouts was last written
2247 * to, the new timeouts might not have been latched yet. So check if an
2248 * update is pending and use the new values if necessary. */
2250 /* Use previous 'new' scheduling period as a base if present. */
2251 if (kbdev->js_timeouts_updated && kbdev->js_scheduling_period_ns)
2252 old_period = kbdev->js_scheduling_period_ns;
2254 old_period = kbdev->js_data.scheduling_period_ns;
2256 if (kbdev->js_timeouts_updated && kbdev->js_soft_stop_ticks > 0)
2257 ticks = (u64)kbdev->js_soft_stop_ticks * old_period;
2259 ticks = (u64)kbdev->js_data.soft_stop_ticks *
2260 kbdev->js_data.scheduling_period_ns;
2261 do_div(ticks, new_scheduling_period_ns);
2262 kbdev->js_soft_stop_ticks = ticks ? ticks : 1;
2264 if (kbdev->js_timeouts_updated && kbdev->js_soft_stop_ticks_cl > 0)
2265 ticks = (u64)kbdev->js_soft_stop_ticks_cl * old_period;
2267 ticks = (u64)kbdev->js_data.soft_stop_ticks_cl *
2268 kbdev->js_data.scheduling_period_ns;
2269 do_div(ticks, new_scheduling_period_ns);
2270 kbdev->js_soft_stop_ticks_cl = ticks ? ticks : 1;
2272 if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_ss > 0)
2273 ticks = (u64)kbdev->js_hard_stop_ticks_ss * old_period;
2275 ticks = (u64)kbdev->js_data.hard_stop_ticks_ss *
2276 kbdev->js_data.scheduling_period_ns;
2277 do_div(ticks, new_scheduling_period_ns);
2278 kbdev->js_hard_stop_ticks_ss = ticks ? ticks : 1;
2280 if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_cl > 0)
2281 ticks = (u64)kbdev->js_hard_stop_ticks_cl * old_period;
2283 ticks = (u64)kbdev->js_data.hard_stop_ticks_cl *
2284 kbdev->js_data.scheduling_period_ns;
2285 do_div(ticks, new_scheduling_period_ns);
2286 kbdev->js_hard_stop_ticks_cl = ticks ? ticks : 1;
2288 if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_dumping > 0)
2289 ticks = (u64)kbdev->js_hard_stop_ticks_dumping * old_period;
2291 ticks = (u64)kbdev->js_data.hard_stop_ticks_dumping *
2292 kbdev->js_data.scheduling_period_ns;
2293 do_div(ticks, new_scheduling_period_ns);
2294 kbdev->js_hard_stop_ticks_dumping = ticks ? ticks : 1;
2296 if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_ss > 0)
2297 ticks = (u64)kbdev->js_reset_ticks_ss * old_period;
2299 ticks = (u64)kbdev->js_data.gpu_reset_ticks_ss *
2300 kbdev->js_data.scheduling_period_ns;
2301 do_div(ticks, new_scheduling_period_ns);
2302 kbdev->js_reset_ticks_ss = ticks ? ticks : 1;
2304 if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_cl > 0)
2305 ticks = (u64)kbdev->js_reset_ticks_cl * old_period;
2307 ticks = (u64)kbdev->js_data.gpu_reset_ticks_cl *
2308 kbdev->js_data.scheduling_period_ns;
2309 do_div(ticks, new_scheduling_period_ns);
2310 kbdev->js_reset_ticks_cl = ticks ? ticks : 1;
2312 if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_dumping > 0)
2313 ticks = (u64)kbdev->js_reset_ticks_dumping * old_period;
2315 ticks = (u64)kbdev->js_data.gpu_reset_ticks_dumping *
2316 kbdev->js_data.scheduling_period_ns;
2317 do_div(ticks, new_scheduling_period_ns);
2318 kbdev->js_reset_ticks_dumping = ticks ? ticks : 1;
2320 kbdev->js_scheduling_period_ns = new_scheduling_period_ns;
2321 kbdev->js_timeouts_updated = true;
2323 mutex_unlock(&kbdev->js_data.runpool_mutex);
2325 dev_dbg(kbdev->dev, "JS scheduling period: %dms\n",
2326 js_scheduling_period);
2332 * show_js_scheduling_period - Show callback for the js_scheduling_period sysfs
2334 * @dev: The device this sysfs file is for.
2335 * @attr: The attributes of the sysfs file.
2336 * @buf: The output buffer to receive the GPU information.
2338 * This function is called to get the current period used for the JS scheduling
2341 * Return: The number of bytes output to buf.
2343 static ssize_t show_js_scheduling_period(struct device *dev,
2344 struct device_attribute *attr, char * const buf)
2346 struct kbase_device *kbdev;
2350 kbdev = to_kbase_device(dev);
2354 if (kbdev->js_timeouts_updated && kbdev->js_scheduling_period_ns > 0)
2355 period = kbdev->js_scheduling_period_ns;
2357 period = kbdev->js_data.scheduling_period_ns;
2359 ret = scnprintf(buf, PAGE_SIZE, "%d\n",
2365 static DEVICE_ATTR(js_scheduling_period, S_IRUGO | S_IWUSR,
2366 show_js_scheduling_period, set_js_scheduling_period);
2368 #if !MALI_CUSTOMER_RELEASE
2369 /** Store callback for the @c force_replay sysfs file.
2371 * @param dev The device with sysfs file is for
2372 * @param attr The attributes of the sysfs file
2373 * @param buf The value written to the sysfs file
2374 * @param count The number of bytes written to the sysfs file
2376 * @return @c count if the function succeeded. An error code on failure.
2378 static ssize_t set_force_replay(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2380 struct kbase_device *kbdev;
2382 kbdev = to_kbase_device(dev);
2386 if (!strncmp("limit=", buf, MIN(6, count))) {
2387 int force_replay_limit;
2388 int items = sscanf(buf, "limit=%u", &force_replay_limit);
2391 kbdev->force_replay_random = false;
2392 kbdev->force_replay_limit = force_replay_limit;
2393 kbdev->force_replay_count = 0;
2397 } else if (!strncmp("random_limit", buf, MIN(12, count))) {
2398 kbdev->force_replay_random = true;
2399 kbdev->force_replay_count = 0;
2402 } else if (!strncmp("norandom_limit", buf, MIN(14, count))) {
2403 kbdev->force_replay_random = false;
2404 kbdev->force_replay_limit = KBASEP_FORCE_REPLAY_DISABLED;
2405 kbdev->force_replay_count = 0;
2408 } else if (!strncmp("core_req=", buf, MIN(9, count))) {
2409 unsigned int core_req;
2410 int items = sscanf(buf, "core_req=%x", &core_req);
2413 kbdev->force_replay_core_req = (base_jd_core_req)core_req;
2418 dev_err(kbdev->dev, "Couldn't process force_replay write operation.\nPossible settings: limit=<limit>, random_limit, norandom_limit, core_req=<core_req>\n");
2422 /** Show callback for the @c force_replay sysfs file.
2424 * This function is called to get the contents of the @c force_replay sysfs
2425 * file. It returns the last set value written to the force_replay sysfs file.
2426 * If the file didn't get written yet, the values will be 0.
2428 * @param dev The device this sysfs file is for
2429 * @param attr The attributes of the sysfs file
2430 * @param buf The output buffer for the sysfs file contents
2432 * @return The number of bytes output to @c buf.
2434 static ssize_t show_force_replay(struct device *dev,
2435 struct device_attribute *attr, char * const buf)
2437 struct kbase_device *kbdev;
2440 kbdev = to_kbase_device(dev);
2444 if (kbdev->force_replay_random)
2445 ret = scnprintf(buf, PAGE_SIZE,
2446 "limit=0\nrandom_limit\ncore_req=%x\n",
2447 kbdev->force_replay_core_req);
2449 ret = scnprintf(buf, PAGE_SIZE,
2450 "limit=%u\nnorandom_limit\ncore_req=%x\n",
2451 kbdev->force_replay_limit,
2452 kbdev->force_replay_core_req);
2454 if (ret >= PAGE_SIZE) {
2455 buf[PAGE_SIZE - 2] = '\n';
2456 buf[PAGE_SIZE - 1] = '\0';
2457 ret = PAGE_SIZE - 1;
2463 /** The sysfs file @c force_replay.
2466 static DEVICE_ATTR(force_replay, S_IRUGO | S_IWUSR, show_force_replay,
2468 #endif /* !MALI_CUSTOMER_RELEASE */
2470 #ifdef CONFIG_MALI_DEBUG
2471 static ssize_t set_js_softstop_always(struct device *dev,
2472 struct device_attribute *attr, const char *buf, size_t count)
2474 struct kbase_device *kbdev;
2476 int softstop_always;
2478 kbdev = to_kbase_device(dev);
2482 ret = kstrtoint(buf, 0, &softstop_always);
2483 if (ret || ((softstop_always != 0) && (softstop_always != 1))) {
2484 dev_err(kbdev->dev, "Couldn't process js_softstop_always write operation.\n"
2485 "Use format <soft_stop_always>\n");
2489 kbdev->js_data.softstop_always = (bool) softstop_always;
2490 dev_dbg(kbdev->dev, "Support for softstop on a single context: %s\n",
2491 (kbdev->js_data.softstop_always) ?
2492 "Enabled" : "Disabled");
2496 static ssize_t show_js_softstop_always(struct device *dev,
2497 struct device_attribute *attr, char * const buf)
2499 struct kbase_device *kbdev;
2502 kbdev = to_kbase_device(dev);
2506 ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->js_data.softstop_always);
2508 if (ret >= PAGE_SIZE) {
2509 buf[PAGE_SIZE - 2] = '\n';
2510 buf[PAGE_SIZE - 1] = '\0';
2511 ret = PAGE_SIZE - 1;
2518 * By default, soft-stops are disabled when only a single context is present. The ability to
2519 * enable soft-stop when only a single context is present can be used for debug and unit-testing purposes.
2520 * (see CL t6xx_stress_1 unit-test as an example whereby this feature is used.)
2522 static DEVICE_ATTR(js_softstop_always, S_IRUGO | S_IWUSR, show_js_softstop_always, set_js_softstop_always);
2523 #endif /* CONFIG_MALI_DEBUG */
2525 #ifdef CONFIG_MALI_DEBUG
2526 typedef void (kbasep_debug_command_func) (struct kbase_device *);
2528 enum kbasep_debug_command_code {
2529 KBASEP_DEBUG_COMMAND_DUMPTRACE,
2531 /* This must be the last enum */
2532 KBASEP_DEBUG_COMMAND_COUNT
2535 struct kbasep_debug_command {
2537 kbasep_debug_command_func *func;
2540 /** Debug commands supported by the driver */
2541 static const struct kbasep_debug_command debug_commands[] = {
2544 .func = &kbasep_trace_dump,
2548 /** Show callback for the @c debug_command sysfs file.
2550 * This function is called to get the contents of the @c debug_command sysfs
2551 * file. This is a list of the available debug commands, separated by newlines.
2553 * @param dev The device this sysfs file is for
2554 * @param attr The attributes of the sysfs file
2555 * @param buf The output buffer for the sysfs file contents
2557 * @return The number of bytes output to @c buf.
2559 static ssize_t show_debug(struct device *dev, struct device_attribute *attr, char * const buf)
2561 struct kbase_device *kbdev;
2565 kbdev = to_kbase_device(dev);
2570 for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT && ret < PAGE_SIZE; i++)
2571 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s\n", debug_commands[i].str);
2573 if (ret >= PAGE_SIZE) {
2574 buf[PAGE_SIZE - 2] = '\n';
2575 buf[PAGE_SIZE - 1] = '\0';
2576 ret = PAGE_SIZE - 1;
2582 /** Store callback for the @c debug_command sysfs file.
2584 * This function is called when the @c debug_command sysfs file is written to.
2585 * It matches the requested command against the available commands, and if
2586 * a matching command is found calls the associated function from
2587 * @ref debug_commands to issue the command.
2589 * @param dev The device with sysfs file is for
2590 * @param attr The attributes of the sysfs file
2591 * @param buf The value written to the sysfs file
2592 * @param count The number of bytes written to the sysfs file
2594 * @return @c count if the function succeeded. An error code on failure.
2596 static ssize_t issue_debug(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2598 struct kbase_device *kbdev;
2601 kbdev = to_kbase_device(dev);
2606 for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT; i++) {
2607 if (sysfs_streq(debug_commands[i].str, buf)) {
2608 debug_commands[i].func(kbdev);
2613 /* Debug Command not found */
2614 dev_err(dev, "debug_command: command not known\n");
2618 /** The sysfs file @c debug_command.
2620 * This is used to issue general debug commands to the device driver.
2621 * Reading it will produce a list of debug commands, separated by newlines.
2622 * Writing to it with one of those commands will issue said command.
2624 static DEVICE_ATTR(debug_command, S_IRUGO | S_IWUSR, show_debug, issue_debug);
2625 #endif /* CONFIG_MALI_DEBUG */
2628 * kbase_show_gpuinfo - Show callback for the gpuinfo sysfs entry.
2629 * @dev: The device this sysfs file is for.
2630 * @attr: The attributes of the sysfs file.
2631 * @buf: The output buffer to receive the GPU information.
2633 * This function is called to get a description of the present Mali
2634 * GPU via the gpuinfo sysfs entry. This includes the GPU family, the
2635 * number of cores, the hardware version and the raw product id. For
2638 * Mali-T60x MP4 r0p0 0x6956
2640 * Return: The number of bytes output to buf.
2642 static ssize_t kbase_show_gpuinfo(struct device *dev,
2643 struct device_attribute *attr, char *buf)
2645 static const struct gpu_product_id_name {
2648 } gpu_product_id_names[] = {
2649 { .id = GPU_ID_PI_T60X, .name = "Mali-T60x" },
2650 { .id = GPU_ID_PI_T62X, .name = "Mali-T62x" },
2651 { .id = GPU_ID_PI_T72X, .name = "Mali-T72x" },
2652 { .id = GPU_ID_PI_T76X, .name = "Mali-T76x" },
2653 { .id = GPU_ID_PI_T82X, .name = "Mali-T82x" },
2654 { .id = GPU_ID_PI_T83X, .name = "Mali-T83x" },
2655 { .id = GPU_ID_PI_T86X, .name = "Mali-T86x" },
2656 { .id = GPU_ID_PI_TFRX, .name = "Mali-T88x" },
2658 const char *product_name = "(Unknown Mali GPU)";
2659 struct kbase_device *kbdev;
2661 unsigned product_id, product_id_mask;
2665 kbdev = to_kbase_device(dev);
2669 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
2670 product_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
2671 is_new_format = GPU_ID_IS_NEW_FORMAT(product_id);
2674 GPU_ID2_PRODUCT_MODEL :
2675 GPU_ID_VERSION_PRODUCT_ID) >>
2676 GPU_ID_VERSION_PRODUCT_ID_SHIFT;
2678 for (i = 0; i < ARRAY_SIZE(gpu_product_id_names); ++i) {
2679 const struct gpu_product_id_name *p = &gpu_product_id_names[i];
2681 if ((GPU_ID_IS_NEW_FORMAT(p->id) == is_new_format) &&
2682 (p->id & product_id_mask) ==
2683 (product_id & product_id_mask)) {
2684 product_name = p->name;
2689 return scnprintf(buf, PAGE_SIZE, "%s MP%d r%dp%d 0x%04X\n",
2690 product_name, kbdev->gpu_props.num_cores,
2691 (gpu_id & GPU_ID_VERSION_MAJOR) >> GPU_ID_VERSION_MAJOR_SHIFT,
2692 (gpu_id & GPU_ID_VERSION_MINOR) >> GPU_ID_VERSION_MINOR_SHIFT,
2695 static DEVICE_ATTR(gpuinfo, S_IRUGO, kbase_show_gpuinfo, NULL);
2698 * set_dvfs_period - Store callback for the dvfs_period sysfs file.
2699 * @dev: The device with sysfs file is for
2700 * @attr: The attributes of the sysfs file
2701 * @buf: The value written to the sysfs file
2702 * @count: The number of bytes written to the sysfs file
2704 * This function is called when the dvfs_period sysfs file is written to. It
2705 * checks the data written, and if valid updates the DVFS period variable,
2707 * Return: @c count if the function succeeded. An error code on failure.
2709 static ssize_t set_dvfs_period(struct device *dev,
2710 struct device_attribute *attr, const char *buf, size_t count)
2712 struct kbase_device *kbdev;
2716 kbdev = to_kbase_device(dev);
2720 ret = kstrtoint(buf, 0, &dvfs_period);
2721 if (ret || dvfs_period <= 0) {
2722 dev_err(kbdev->dev, "Couldn't process dvfs_period write operation.\n"
2723 "Use format <dvfs_period_ms>\n");
2727 kbdev->pm.dvfs_period = dvfs_period;
2728 dev_dbg(kbdev->dev, "DVFS period: %dms\n", dvfs_period);
2734 * show_dvfs_period - Show callback for the dvfs_period sysfs entry.
2735 * @dev: The device this sysfs file is for.
2736 * @attr: The attributes of the sysfs file.
2737 * @buf: The output buffer to receive the GPU information.
2739 * This function is called to get the current period used for the DVFS sample
2742 * Return: The number of bytes output to buf.
2744 static ssize_t show_dvfs_period(struct device *dev,
2745 struct device_attribute *attr, char * const buf)
2747 struct kbase_device *kbdev;
2750 kbdev = to_kbase_device(dev);
2754 ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->pm.dvfs_period);
2759 static DEVICE_ATTR(dvfs_period, S_IRUGO | S_IWUSR, show_dvfs_period,
2763 * set_pm_poweroff - Store callback for the pm_poweroff sysfs file.
2764 * @dev: The device with sysfs file is for
2765 * @attr: The attributes of the sysfs file
2766 * @buf: The value written to the sysfs file
2767 * @count: The number of bytes written to the sysfs file
2769 * This function is called when the pm_poweroff sysfs file is written to.
2771 * This file contains three values separated by whitespace. The values
2772 * are gpu_poweroff_time (the period of the poweroff timer, in ns),
2773 * poweroff_shader_ticks (the number of poweroff timer ticks before an idle
2774 * shader is powered off), and poweroff_gpu_ticks (the number of poweroff timer
2775 * ticks before the GPU is powered off), in that order.
2777 * Return: @c count if the function succeeded. An error code on failure.
2779 static ssize_t set_pm_poweroff(struct device *dev,
2780 struct device_attribute *attr, const char *buf, size_t count)
2782 struct kbase_device *kbdev;
2784 s64 gpu_poweroff_time;
2785 int poweroff_shader_ticks, poweroff_gpu_ticks;
2787 kbdev = to_kbase_device(dev);
2791 items = sscanf(buf, "%llu %u %u", &gpu_poweroff_time,
2792 &poweroff_shader_ticks,
2793 &poweroff_gpu_ticks);
2795 dev_err(kbdev->dev, "Couldn't process pm_poweroff write operation.\n"
2796 "Use format <gpu_poweroff_time_ns> <poweroff_shader_ticks> <poweroff_gpu_ticks>\n");
2800 kbdev->pm.gpu_poweroff_time = HR_TIMER_DELAY_NSEC(gpu_poweroff_time);
2801 kbdev->pm.poweroff_shader_ticks = poweroff_shader_ticks;
2802 kbdev->pm.poweroff_gpu_ticks = poweroff_gpu_ticks;
2808 * show_pm_poweroff - Show callback for the pm_poweroff sysfs entry.
2809 * @dev: The device this sysfs file is for.
2810 * @attr: The attributes of the sysfs file.
2811 * @buf: The output buffer to receive the GPU information.
2813 * This function is called to get the current period used for the DVFS sample
2816 * Return: The number of bytes output to buf.
2818 static ssize_t show_pm_poweroff(struct device *dev,
2819 struct device_attribute *attr, char * const buf)
2821 struct kbase_device *kbdev;
2824 kbdev = to_kbase_device(dev);
2828 ret = scnprintf(buf, PAGE_SIZE, "%llu %u %u\n",
2829 ktime_to_ns(kbdev->pm.gpu_poweroff_time),
2830 kbdev->pm.poweroff_shader_ticks,
2831 kbdev->pm.poweroff_gpu_ticks);
2836 static DEVICE_ATTR(pm_poweroff, S_IRUGO | S_IWUSR, show_pm_poweroff,
2840 * set_reset_timeout - Store callback for the reset_timeout sysfs file.
2841 * @dev: The device with sysfs file is for
2842 * @attr: The attributes of the sysfs file
2843 * @buf: The value written to the sysfs file
2844 * @count: The number of bytes written to the sysfs file
2846 * This function is called when the reset_timeout sysfs file is written to. It
2847 * checks the data written, and if valid updates the reset timeout.
2849 * Return: @c count if the function succeeded. An error code on failure.
2851 static ssize_t set_reset_timeout(struct device *dev,
2852 struct device_attribute *attr, const char *buf, size_t count)
2854 struct kbase_device *kbdev;
2858 kbdev = to_kbase_device(dev);
2862 ret = kstrtoint(buf, 0, &reset_timeout);
2863 if (ret || reset_timeout <= 0) {
2864 dev_err(kbdev->dev, "Couldn't process reset_timeout write operation.\n"
2865 "Use format <reset_timeout_ms>\n");
2869 kbdev->reset_timeout_ms = reset_timeout;
2870 dev_dbg(kbdev->dev, "Reset timeout: %dms\n", reset_timeout);
2876 * show_reset_timeout - Show callback for the reset_timeout sysfs entry.
2877 * @dev: The device this sysfs file is for.
2878 * @attr: The attributes of the sysfs file.
2879 * @buf: The output buffer to receive the GPU information.
2881 * This function is called to get the current reset timeout.
2883 * Return: The number of bytes output to buf.
2885 static ssize_t show_reset_timeout(struct device *dev,
2886 struct device_attribute *attr, char * const buf)
2888 struct kbase_device *kbdev;
2891 kbdev = to_kbase_device(dev);
2895 ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->reset_timeout_ms);
2900 static DEVICE_ATTR(reset_timeout, S_IRUGO | S_IWUSR, show_reset_timeout,
2905 static ssize_t show_mem_pool_size(struct device *dev,
2906 struct device_attribute *attr, char * const buf)
2908 struct kbase_device *kbdev;
2911 kbdev = to_kbase_device(dev);
2915 ret = scnprintf(buf, PAGE_SIZE, "%zu\n",
2916 kbase_mem_pool_size(&kbdev->mem_pool));
2921 static ssize_t set_mem_pool_size(struct device *dev,
2922 struct device_attribute *attr, const char *buf, size_t count)
2924 struct kbase_device *kbdev;
2928 kbdev = to_kbase_device(dev);
2932 err = kstrtoul(buf, 0, (unsigned long *)&new_size);
2936 kbase_mem_pool_trim(&kbdev->mem_pool, new_size);
2941 static DEVICE_ATTR(mem_pool_size, S_IRUGO | S_IWUSR, show_mem_pool_size,
2944 static ssize_t show_mem_pool_max_size(struct device *dev,
2945 struct device_attribute *attr, char * const buf)
2947 struct kbase_device *kbdev;
2950 kbdev = to_kbase_device(dev);
2954 ret = scnprintf(buf, PAGE_SIZE, "%zu\n",
2955 kbase_mem_pool_max_size(&kbdev->mem_pool));
2960 static ssize_t set_mem_pool_max_size(struct device *dev,
2961 struct device_attribute *attr, const char *buf, size_t count)
2963 struct kbase_device *kbdev;
2964 size_t new_max_size;
2967 kbdev = to_kbase_device(dev);
2971 err = kstrtoul(buf, 0, (unsigned long *)&new_max_size);
2975 kbase_mem_pool_set_max_size(&kbdev->mem_pool, new_max_size);
2980 static DEVICE_ATTR(mem_pool_max_size, S_IRUGO | S_IWUSR, show_mem_pool_max_size,
2981 set_mem_pool_max_size);
2985 static int kbasep_secure_mode_init(struct kbase_device *kbdev)
2988 #ifdef SECURE_CALLBACKS
2989 kbdev->secure_ops = SECURE_CALLBACKS;
2990 kbdev->secure_mode_support = false;
2992 if (kbdev->secure_ops) {
2995 /* Make sure secure mode is disabled on startup */
2996 err = kbdev->secure_ops->secure_mode_disable(kbdev);
2998 /* secure_mode_disable() returns -EINVAL if not supported */
2999 kbdev->secure_mode_support = (err != -EINVAL);
3006 #ifdef CONFIG_MALI_NO_MALI
3007 static int kbase_common_reg_map(struct kbase_device *kbdev)
3011 static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
3014 #else /* CONFIG_MALI_NO_MALI */
3015 static int kbase_common_reg_map(struct kbase_device *kbdev)
3019 if (!request_mem_region(kbdev->reg_start, kbdev->reg_size, dev_name(kbdev->dev))) {
3020 dev_err(kbdev->dev, "Register window unavailable\n");
3025 kbdev->reg = ioremap(kbdev->reg_start, kbdev->reg_size);
3027 dev_err(kbdev->dev, "Can't remap register window\n");
3035 release_mem_region(kbdev->reg_start, kbdev->reg_size);
3040 static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
3042 iounmap(kbdev->reg);
3043 release_mem_region(kbdev->reg_start, kbdev->reg_size);
3045 #endif /* CONFIG_MALI_NO_MALI */
3048 #ifdef CONFIG_DEBUG_FS
3050 #if KBASE_GPU_RESET_EN
3051 #include <mali_kbase_hwaccess_jm.h>
3053 static void trigger_quirks_reload(struct kbase_device *kbdev)
3055 kbase_pm_context_active(kbdev);
3056 if (kbase_prepare_to_reset_gpu(kbdev))
3057 kbase_reset_gpu(kbdev);
3058 kbase_pm_context_idle(kbdev);
3061 #define MAKE_QUIRK_ACCESSORS(type) \
3062 static int type##_quirks_set(void *data, u64 val) \
3064 struct kbase_device *kbdev; \
3065 kbdev = (struct kbase_device *)data; \
3066 kbdev->hw_quirks_##type = (u32)val; \
3067 trigger_quirks_reload(kbdev); \
3071 static int type##_quirks_get(void *data, u64 *val) \
3073 struct kbase_device *kbdev;\
3074 kbdev = (struct kbase_device *)data;\
3075 *val = kbdev->hw_quirks_##type;\
3078 DEFINE_SIMPLE_ATTRIBUTE(fops_##type##_quirks, type##_quirks_get,\
3079 type##_quirks_set, "%llu\n")
3081 MAKE_QUIRK_ACCESSORS(sc);
3082 MAKE_QUIRK_ACCESSORS(tiler);
3083 MAKE_QUIRK_ACCESSORS(mmu);
3085 #endif /* KBASE_GPU_RESET_EN */
3087 static int kbasep_secure_mode_seq_show(struct seq_file *m, void *p)
3089 struct kbase_device *kbdev = m->private;
3091 if (!kbdev->secure_mode_support)
3092 seq_puts(m, "unsupported\n");
3094 seq_printf(m, "%s\n", kbdev->secure_mode ? "Y" : "N");
3099 static int kbasep_secure_mode_debugfs_open(struct inode *in, struct file *file)
3101 return single_open(file, kbasep_secure_mode_seq_show, in->i_private);
3104 static const struct file_operations kbasep_secure_mode_debugfs_fops = {
3105 .open = kbasep_secure_mode_debugfs_open,
3107 .llseek = seq_lseek,
3108 .release = single_release,
3111 static int kbase_device_debugfs_init(struct kbase_device *kbdev)
3113 struct dentry *debugfs_ctx_defaults_directory;
3116 kbdev->mali_debugfs_directory = debugfs_create_dir(kbdev->devname,
3118 if (!kbdev->mali_debugfs_directory) {
3119 dev_err(kbdev->dev, "Couldn't create mali debugfs directory\n");
3124 kbdev->debugfs_ctx_directory = debugfs_create_dir("ctx",
3125 kbdev->mali_debugfs_directory);
3126 if (!kbdev->debugfs_ctx_directory) {
3127 dev_err(kbdev->dev, "Couldn't create mali debugfs ctx directory\n");
3132 debugfs_ctx_defaults_directory = debugfs_create_dir("defaults",
3133 kbdev->debugfs_ctx_directory);
3134 if (!debugfs_ctx_defaults_directory) {
3135 dev_err(kbdev->dev, "Couldn't create mali debugfs ctx defaults directory\n");
3140 #if !MALI_CUSTOMER_RELEASE
3141 kbasep_regs_dump_debugfs_add(kbdev);
3142 #endif /* !MALI_CUSTOMER_RELEASE */
3144 kbase_debug_job_fault_debugfs_init(kbdev);
3145 kbasep_gpu_memory_debugfs_init(kbdev);
3146 #if KBASE_GPU_RESET_EN
3147 debugfs_create_file("quirks_sc", 0644,
3148 kbdev->mali_debugfs_directory, kbdev,
3150 debugfs_create_file("quirks_tiler", 0644,
3151 kbdev->mali_debugfs_directory, kbdev,
3152 &fops_tiler_quirks);
3153 debugfs_create_file("quirks_mmu", 0644,
3154 kbdev->mali_debugfs_directory, kbdev,
3156 #endif /* KBASE_GPU_RESET_EN */
3158 #ifndef CONFIG_MALI_COH_USER
3159 debugfs_create_bool("infinite_cache", 0644,
3160 debugfs_ctx_defaults_directory,
3161 &kbdev->infinite_cache_active_default);
3162 #endif /* CONFIG_MALI_COH_USER */
3164 debugfs_create_size_t("mem_pool_max_size", 0644,
3165 debugfs_ctx_defaults_directory,
3166 &kbdev->mem_pool_max_size_default);
3168 #if KBASE_TRACE_ENABLE
3169 kbasep_trace_debugfs_init(kbdev);
3170 #endif /* KBASE_TRACE_ENABLE */
3172 #ifdef CONFIG_MALI_TRACE_TIMELINE
3173 kbasep_trace_timeline_debugfs_init(kbdev);
3174 #endif /* CONFIG_MALI_TRACE_TIMELINE */
3176 debugfs_create_file("secure_mode", S_IRUGO,
3177 kbdev->mali_debugfs_directory, kbdev,
3178 &kbasep_secure_mode_debugfs_fops);
3183 debugfs_remove_recursive(kbdev->mali_debugfs_directory);
3187 static void kbase_device_debugfs_term(struct kbase_device *kbdev)
3189 debugfs_remove_recursive(kbdev->mali_debugfs_directory);
3192 #else /* CONFIG_DEBUG_FS */
3193 static inline int kbase_device_debugfs_init(struct kbase_device *kbdev)
3198 static inline void kbase_device_debugfs_term(struct kbase_device *kbdev) { }
3199 #endif /* CONFIG_DEBUG_FS */
3201 static void kbase_device_coherency_init(struct kbase_device *kbdev, u32 gpu_id)
3204 u32 supported_coherency_bitmap =
3205 kbdev->gpu_props.props.raw_props.coherency_mode;
3206 const void *coherency_override_dts;
3207 u32 override_coherency;
3208 #endif /* CONFIG_OF */
3210 kbdev->system_coherency = COHERENCY_NONE;
3212 /* device tree may override the coherency */
3214 coherency_override_dts = of_get_property(kbdev->dev->of_node,
3217 if (coherency_override_dts) {
3219 override_coherency = be32_to_cpup(coherency_override_dts);
3221 if ((override_coherency <= COHERENCY_NONE) &&
3222 (supported_coherency_bitmap &
3223 COHERENCY_FEATURE_BIT(override_coherency))) {
3225 kbdev->system_coherency = override_coherency;
3227 dev_info(kbdev->dev,
3228 "Using coherency mode %u set from dtb",
3229 override_coherency);
3231 dev_warn(kbdev->dev,
3232 "Ignoring unsupported coherency mode %u set from dtb",
3233 override_coherency);
3236 #endif /* CONFIG_OF */
3238 kbdev->gpu_props.props.raw_props.coherency_mode =
3239 kbdev->system_coherency;
3242 #ifdef CONFIG_MALI_FPGA_BUS_LOGGER
3244 /* Callback used by the kbase bus logger client, to initiate a GPU reset
3245 * when the bus log is restarted. GPU reset is used as reference point
3246 * in HW bus log analyses.
3248 static void kbase_logging_started_cb(void *data)
3250 struct kbase_device *kbdev = (struct kbase_device *)data;
3252 if (kbase_prepare_to_reset_gpu(kbdev))
3253 kbase_reset_gpu(kbdev);
3254 dev_info(kbdev->dev, "KBASE - Bus logger restarted\n");
3259 static int kbase_common_device_init(struct kbase_device *kbdev)
3262 struct mali_base_gpu_core_props *core_props;
3264 inited_mem = (1u << 0),
3265 inited_js = (1u << 1),
3266 inited_pm_runtime_init = (1u << 6),
3267 #ifdef CONFIG_MALI_DEVFREQ
3268 inited_devfreq = (1u << 9),
3269 #endif /* CONFIG_MALI_DEVFREQ */
3270 #ifdef CONFIG_MALI_MIPE_ENABLED
3271 inited_tlstream = (1u << 10),
3272 #endif /* CONFIG_MALI_MIPE_ENABLED */
3273 inited_backend_early = (1u << 11),
3274 inited_backend_late = (1u << 12),
3275 inited_device = (1u << 13),
3276 inited_vinstr = (1u << 19),
3277 inited_ipa = (1u << 20),
3278 inited_job_fault = (1u << 21)
3283 #if defined(CONFIG_MALI_PLATFORM_VEXPRESS)
3284 u32 ve_logic_tile = 0;
3285 #endif /* CONFIG_MALI_PLATFORM_VEXPRESS */
3287 dev_set_drvdata(kbdev->dev, kbdev);
3289 err = kbase_backend_early_init(kbdev);
3292 inited |= inited_backend_early;
3294 scnprintf(kbdev->devname, DEVNAME_SIZE, "%s%d", kbase_drv_name,
3297 kbase_disjoint_init(kbdev);
3299 /* obtain min/max configured gpu frequencies */
3300 core_props = &(kbdev->gpu_props.props.core_props);
3302 /* For versatile express platforms, min and max values of GPU frequency
3303 * depend on the type of the logic tile; these values may not be known
3304 * at the build time so in some cases a platform config file with wrong
3305 * GPU freguency values may be included; to ensure the correct value of
3306 * min and max GPU frequency is obtained, the type of the logic tile is
3307 * read from the corresponding register on the platform and frequency
3308 * values assigned accordingly.*/
3309 #if defined(CONFIG_MALI_PLATFORM_VEXPRESS)
3310 ve_logic_tile = kbase_get_platform_logic_tile_type();
3312 switch (ve_logic_tile) {
3314 /* Virtex 6, HBI0217 */
3315 core_props->gpu_freq_khz_min = VE_VIRTEX6_GPU_FREQ_MIN;
3316 core_props->gpu_freq_khz_max = VE_VIRTEX6_GPU_FREQ_MAX;
3319 /* Virtex 7, HBI0247 */
3320 core_props->gpu_freq_khz_min = VE_VIRTEX7_GPU_FREQ_MIN;
3321 core_props->gpu_freq_khz_max = VE_VIRTEX7_GPU_FREQ_MAX;
3324 /* all other logic tiles, i.e., Virtex 5 HBI0192
3325 * or unsuccessful reading from the platform -
3326 * fall back to the config_platform default */
3327 core_props->gpu_freq_khz_min = GPU_FREQ_KHZ_MIN;
3328 core_props->gpu_freq_khz_max = GPU_FREQ_KHZ_MAX;
3332 core_props->gpu_freq_khz_min = GPU_FREQ_KHZ_MIN;
3333 core_props->gpu_freq_khz_max = GPU_FREQ_KHZ_MAX;
3334 #endif /* CONFIG_MALI_PLATFORM_VEXPRESS */
3336 kbdev->gpu_props.irq_throttle_time_us = DEFAULT_IRQ_THROTTLE_TIME_US;
3338 err = kbase_device_init(kbdev);
3340 dev_err(kbdev->dev, "Can't initialize device (%d)\n", err);
3344 inited |= inited_device;
3346 if (kbdev->pm.callback_power_runtime_init) {
3347 err = kbdev->pm.callback_power_runtime_init(kbdev);
3351 inited |= inited_pm_runtime_init;
3354 err = kbase_mem_init(kbdev);
3358 inited |= inited_mem;
3360 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
3361 gpu_id &= GPU_ID_VERSION_PRODUCT_ID;
3362 gpu_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
3364 kbase_device_coherency_init(kbdev, gpu_id);
3366 err = kbasep_secure_mode_init(kbdev);
3370 err = kbasep_js_devdata_init(kbdev);
3374 inited |= inited_js;
3376 #ifdef CONFIG_MALI_MIPE_ENABLED
3377 err = kbase_tlstream_init();
3379 dev_err(kbdev->dev, "Couldn't initialize timeline stream\n");
3382 inited |= inited_tlstream;
3383 #endif /* CONFIG_MALI_MIPE_ENABLED */
3385 err = kbase_backend_late_init(kbdev);
3388 inited |= inited_backend_late;
3390 #ifdef CONFIG_MALI_DEVFREQ
3391 err = kbase_devfreq_init(kbdev);
3393 dev_err(kbdev->dev, "Couldn't initialize devfreq\n");
3396 inited |= inited_devfreq;
3397 #endif /* CONFIG_MALI_DEVFREQ */
3399 kbdev->vinstr_ctx = kbase_vinstr_init(kbdev);
3400 if (!kbdev->vinstr_ctx) {
3401 dev_err(kbdev->dev, "Can't initialize virtual instrumentation core\n");
3405 inited |= inited_vinstr;
3407 kbdev->ipa_ctx = kbase_ipa_init(kbdev);
3408 if (!kbdev->ipa_ctx) {
3409 dev_err(kbdev->dev, "Can't initialize IPA\n");
3413 inited |= inited_ipa;
3415 err = kbase_debug_job_fault_dev_init(kbdev);
3419 inited |= inited_job_fault;
3421 err = kbase_device_debugfs_init(kbdev);
3425 /* intialise the kctx list */
3426 mutex_init(&kbdev->kctx_list_lock);
3427 INIT_LIST_HEAD(&kbdev->kctx_list);
3429 kbdev->mdev.minor = MISC_DYNAMIC_MINOR;
3430 kbdev->mdev.name = kbdev->devname;
3431 kbdev->mdev.fops = &kbase_fops;
3432 kbdev->mdev.parent = get_device(kbdev->dev);
3434 err = misc_register(&kbdev->mdev);
3436 dev_err(kbdev->dev, "Couldn't register misc dev %s\n", kbdev->devname);
3441 const struct list_head *dev_list = kbase_dev_list_get();
3443 list_add(&kbdev->entry, &kbase_dev_list);
3444 kbase_dev_list_put(dev_list);
3447 dev_info(kbdev->dev, "Probed as %s\n", dev_name(kbdev->mdev.this_device));
3454 put_device(kbdev->dev);
3455 kbase_device_debugfs_term(kbdev);
3457 if (inited & inited_job_fault)
3458 kbase_debug_job_fault_dev_term(kbdev);
3459 if (inited & inited_ipa)
3460 kbase_ipa_term(kbdev->ipa_ctx);
3461 if (inited & inited_vinstr)
3462 kbase_vinstr_term(kbdev->vinstr_ctx);
3463 #ifdef CONFIG_MALI_DEVFREQ
3464 if (inited & inited_devfreq)
3465 kbase_devfreq_term(kbdev);
3466 #endif /* CONFIG_MALI_DEVFREQ */
3467 if (inited & inited_backend_late)
3468 kbase_backend_late_term(kbdev);
3469 #ifdef CONFIG_MALI_MIPE_ENABLED
3470 if (inited & inited_tlstream)
3471 kbase_tlstream_term();
3472 #endif /* CONFIG_MALI_MIPE_ENABLED */
3474 if (inited & inited_js)
3475 kbasep_js_devdata_halt(kbdev);
3477 if (inited & inited_mem)
3478 kbase_mem_halt(kbdev);
3480 if (inited & inited_js)
3481 kbasep_js_devdata_term(kbdev);
3483 if (inited & inited_mem)
3484 kbase_mem_term(kbdev);
3486 if (inited & inited_pm_runtime_init) {
3487 if (kbdev->pm.callback_power_runtime_term)
3488 kbdev->pm.callback_power_runtime_term(kbdev);
3491 if (inited & inited_device)
3492 kbase_device_term(kbdev);
3494 if (inited & inited_backend_early)
3495 kbase_backend_early_term(kbdev);
3501 static struct attribute *kbase_attrs[] = {
3502 #ifdef CONFIG_MALI_DEBUG
3503 &dev_attr_debug_command.attr,
3504 &dev_attr_js_softstop_always.attr,
3506 #if !MALI_CUSTOMER_RELEASE
3507 &dev_attr_force_replay.attr,
3509 &dev_attr_js_timeouts.attr,
3510 &dev_attr_gpuinfo.attr,
3511 &dev_attr_dvfs_period.attr,
3512 &dev_attr_pm_poweroff.attr,
3513 &dev_attr_reset_timeout.attr,
3514 &dev_attr_js_scheduling_period.attr,
3515 &dev_attr_power_policy.attr,
3516 &dev_attr_core_availability_policy.attr,
3517 &dev_attr_core_mask.attr,
3518 &dev_attr_mem_pool_size.attr,
3519 &dev_attr_mem_pool_max_size.attr,
3523 static const struct attribute_group kbase_attr_group = {
3524 .attrs = kbase_attrs,
3527 static int kbase_common_device_remove(struct kbase_device *kbdev);
3529 static int kbase_platform_device_probe(struct platform_device *pdev)
3531 struct kbase_device *kbdev;
3532 struct resource *reg_res;
3536 printk(KERN_INFO "arm_release_ver of this mali_ko is '%s', rk_ko_ver is '%d', built at '%s', on '%s'.",
3543 err = kbase_platform_early_init();
3545 dev_err(&pdev->dev, "Early platform initialization failed\n");
3550 kbdev = kbase_device_alloc();
3552 dev_err(&pdev->dev, "Can't allocate device\n");
3556 #ifdef CONFIG_MALI_NO_MALI
3557 err = gpu_device_create(kbdev);
3559 dev_err(&pdev->dev, "Can't initialize dummy model\n");
3562 #endif /* CONFIG_MALI_NO_MALI */
3564 kbdev->dev = &pdev->dev;
3565 /* 3 IRQ resources */
3566 for (i = 0; i < 3; i++) {
3567 struct resource *irq_res;
3570 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
3572 dev_err(kbdev->dev, "No IRQ resource at index %d\n", i);
3574 goto out_platform_irq;
3578 if (!strcmp(irq_res->name, "JOB")) {
3579 irqtag = JOB_IRQ_TAG;
3580 } else if (!strcmp(irq_res->name, "MMU")) {
3581 irqtag = MMU_IRQ_TAG;
3582 } else if (!strcmp(irq_res->name, "GPU")) {
3583 irqtag = GPU_IRQ_TAG;
3585 dev_err(&pdev->dev, "Invalid irq res name: '%s'\n",
3592 #endif /* CONFIG_OF */
3593 kbdev->irqs[irqtag].irq = irq_res->start;
3594 kbdev->irqs[irqtag].flags = (irq_res->flags & IRQF_TRIGGER_MASK);
3596 /* the first memory resource is the physical address of the GPU
3598 reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3600 dev_err(kbdev->dev, "Invalid register resource\n");
3602 goto out_platform_mem;
3605 kbdev->reg_start = reg_res->start;
3606 kbdev->reg_size = resource_size(reg_res);
3608 err = kbase_common_reg_map(kbdev);
3612 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \
3613 && defined(CONFIG_REGULATOR)
3614 kbdev->regulator = regulator_get_optional(kbdev->dev, "mali");
3615 if (IS_ERR_OR_NULL(kbdev->regulator)) {
3616 err = PTR_ERR(kbdev->regulator);
3618 kbdev->regulator = NULL;
3619 if (err == -EPROBE_DEFER)
3621 dev_info(kbdev->dev, "Continuing without Mali regulator control\n");
3622 /* Allow probe to continue without regulator */
3624 #endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
3626 #ifdef CONFIG_MALI_PLATFORM_DEVICETREE
3627 pm_runtime_enable(kbdev->dev);
3630 kbdev->clock = clk_get(kbdev->dev, "clk_mali");
3631 if (IS_ERR_OR_NULL(kbdev->clock)) {
3632 err = PTR_ERR(kbdev->clock);
3634 kbdev->clock = NULL;
3635 if (err == -EPROBE_DEFER)
3636 goto out_clock_prepare;
3637 dev_info(kbdev->dev, "Continuing without Mali clock control\n");
3638 /* Allow probe to continue without clock. */
3640 err = clk_prepare_enable(kbdev->clock);
3643 "Failed to prepare and enable clock (%d)\n", err);
3644 goto out_clock_prepare;
3648 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) && defined(CONFIG_OF) \
3649 && defined(CONFIG_PM_OPP)
3650 /* Register the OPPs if they are available in device tree */
3651 if (of_init_opp_table(kbdev->dev) < 0)
3652 dev_dbg(kbdev->dev, "OPP table not found\n");
3656 err = kbase_common_device_init(kbdev);
3658 dev_err(kbdev->dev, "Failed kbase_common_device_init\n");
3659 goto out_common_init;
3662 err = sysfs_create_group(&kbdev->dev->kobj, &kbase_attr_group);
3664 dev_err(&pdev->dev, "Failed to create sysfs entries\n");
3668 #ifdef CONFIG_MALI_FPGA_BUS_LOGGER
3669 err = bl_core_client_register(kbdev->devname,
3670 kbase_logging_started_cb,
3671 kbdev, &kbdev->buslogger,
3674 dev_err(kbdev->dev, "Couldn't register bus log client\n");
3675 goto out_bl_core_register;
3678 bl_core_set_threshold(kbdev->buslogger, 1024*1024*1024);
3682 #ifdef CONFIG_MALI_FPGA_BUS_LOGGER
3683 out_bl_core_register:
3684 sysfs_remove_group(&kbdev->dev->kobj, &kbase_attr_group);
3688 kbase_common_device_remove(kbdev);
3690 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
3691 of_free_opp_table(kbdev->dev);
3693 clk_disable_unprepare(kbdev->clock);
3695 clk_put(kbdev->clock);
3696 #ifdef CONFIG_MALI_PLATFORM_DEVICETREE
3697 pm_runtime_disable(kbdev->dev);
3699 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \
3700 && defined(CONFIG_REGULATOR)
3702 regulator_put(kbdev->regulator);
3703 #endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
3704 kbase_common_reg_unmap(kbdev);
3711 #ifdef CONFIG_MALI_NO_MALI
3712 gpu_device_destroy(kbdev);
3714 #endif /* CONFIG_MALI_NO_MALI */
3715 kbase_device_free(kbdev);
3720 static int kbase_common_device_remove(struct kbase_device *kbdev)
3722 kbase_debug_job_fault_dev_term(kbdev);
3723 kbase_ipa_term(kbdev->ipa_ctx);
3724 kbase_vinstr_term(kbdev->vinstr_ctx);
3725 sysfs_remove_group(&kbdev->dev->kobj, &kbase_attr_group);
3727 #ifdef CONFIG_MALI_FPGA_BUS_LOGGER
3728 if (kbdev->buslogger)
3729 bl_core_client_unregister(kbdev->buslogger);
3732 #ifdef CONFIG_DEBUG_FS
3733 debugfs_remove_recursive(kbdev->mali_debugfs_directory);
3735 #ifdef CONFIG_MALI_DEVFREQ
3736 kbase_devfreq_term(kbdev);
3739 kbase_backend_late_term(kbdev);
3741 if (kbdev->pm.callback_power_runtime_term)
3742 kbdev->pm.callback_power_runtime_term(kbdev);
3743 #ifdef CONFIG_MALI_PLATFORM_DEVICETREE
3744 pm_runtime_disable(kbdev->dev);
3747 #ifdef CONFIG_MALI_MIPE_ENABLED
3748 kbase_tlstream_term();
3749 #endif /* CONFIG_MALI_MIPE_ENABLED */
3751 kbasep_js_devdata_halt(kbdev);
3752 kbase_mem_halt(kbdev);
3754 kbasep_js_devdata_term(kbdev);
3755 kbase_mem_term(kbdev);
3756 kbase_backend_early_term(kbdev);
3759 const struct list_head *dev_list = kbase_dev_list_get();
3761 list_del(&kbdev->entry);
3762 kbase_dev_list_put(dev_list);
3764 misc_deregister(&kbdev->mdev);
3765 put_device(kbdev->dev);
3766 kbase_common_reg_unmap(kbdev);
3767 kbase_device_term(kbdev);
3769 clk_disable_unprepare(kbdev->clock);
3770 clk_put(kbdev->clock);
3771 kbdev->clock = NULL;
3773 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \
3774 && defined(CONFIG_REGULATOR)
3775 regulator_put(kbdev->regulator);
3776 #endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
3777 #ifdef CONFIG_MALI_NO_MALI
3778 gpu_device_destroy(kbdev);
3779 #endif /* CONFIG_MALI_NO_MALI */
3780 kbase_device_free(kbdev);
3785 static int kbase_platform_device_remove(struct platform_device *pdev)
3787 struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
3792 return kbase_common_device_remove(kbdev);
3795 /** Suspend callback from the OS.
3797 * This is called by Linux when the device should suspend.
3799 * @param dev The device to suspend
3801 * @return A standard Linux error code
3803 static int kbase_device_suspend(struct device *dev)
3805 struct kbase_device *kbdev = to_kbase_device(dev);
3810 #if defined(CONFIG_PM_DEVFREQ) && \
3811 (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
3812 devfreq_suspend_device(kbdev->devfreq);
3815 kbase_pm_suspend(kbdev);
3819 /** Resume callback from the OS.
3821 * This is called by Linux when the device should resume from suspension.
3823 * @param dev The device to resume
3825 * @return A standard Linux error code
3827 static int kbase_device_resume(struct device *dev)
3829 struct kbase_device *kbdev = to_kbase_device(dev);
3834 kbase_pm_resume(kbdev);
3836 #if defined(CONFIG_PM_DEVFREQ) && \
3837 (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
3838 devfreq_resume_device(kbdev->devfreq);
3843 /** Runtime suspend callback from the OS.
3845 * This is called by Linux when the device should prepare for a condition in which it will
3846 * not be able to communicate with the CPU(s) and RAM due to power management.
3848 * @param dev The device to suspend
3850 * @return A standard Linux error code
3852 #ifdef KBASE_PM_RUNTIME
3853 static int kbase_device_runtime_suspend(struct device *dev)
3855 struct kbase_device *kbdev = to_kbase_device(dev);
3860 #if defined(CONFIG_PM_DEVFREQ) && \
3861 (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
3862 devfreq_suspend_device(kbdev->devfreq);
3865 if (kbdev->pm.backend.callback_power_runtime_off) {
3866 kbdev->pm.backend.callback_power_runtime_off(kbdev);
3867 dev_dbg(dev, "runtime suspend\n");
3871 #endif /* KBASE_PM_RUNTIME */
3873 /** Runtime resume callback from the OS.
3875 * This is called by Linux when the device should go into a fully active state.
3877 * @param dev The device to suspend
3879 * @return A standard Linux error code
3882 #ifdef KBASE_PM_RUNTIME
3883 int kbase_device_runtime_resume(struct device *dev)
3886 struct kbase_device *kbdev = to_kbase_device(dev);
3891 if (kbdev->pm.backend.callback_power_runtime_on) {
3892 ret = kbdev->pm.backend.callback_power_runtime_on(kbdev);
3893 dev_dbg(dev, "runtime resume\n");
3896 #if defined(CONFIG_PM_DEVFREQ) && \
3897 (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
3898 devfreq_resume_device(kbdev->devfreq);
3903 #endif /* KBASE_PM_RUNTIME */
3906 #ifdef KBASE_PM_RUNTIME
3908 * kbase_device_runtime_idle - Runtime idle callback from the OS.
3909 * @dev: The device to suspend
3911 * This is called by Linux when the device appears to be inactive and it might
3912 * be placed into a low power state.
3914 * Return: 0 if device can be suspended, non-zero to avoid runtime autosuspend,
3915 * otherwise a standard Linux error code
3917 static int kbase_device_runtime_idle(struct device *dev)
3919 struct kbase_device *kbdev = to_kbase_device(dev);
3924 /* Use platform specific implementation if it exists. */
3925 if (kbdev->pm.backend.callback_power_runtime_idle)
3926 return kbdev->pm.backend.callback_power_runtime_idle(kbdev);
3930 #endif /* KBASE_PM_RUNTIME */
3932 /** The power management operations for the platform driver.
3934 static const struct dev_pm_ops kbase_pm_ops = {
3935 .suspend = kbase_device_suspend,
3936 .resume = kbase_device_resume,
3937 #ifdef KBASE_PM_RUNTIME
3938 .runtime_suspend = kbase_device_runtime_suspend,
3939 .runtime_resume = kbase_device_runtime_resume,
3940 .runtime_idle = kbase_device_runtime_idle,
3941 #endif /* KBASE_PM_RUNTIME */
3945 static const struct of_device_id kbase_dt_ids[] = {
3946 { .compatible = "arm,malit7xx" },
3947 { .compatible = "arm,mali-midgard" },
3950 MODULE_DEVICE_TABLE(of, kbase_dt_ids);
3953 static struct platform_driver kbase_platform_driver = {
3954 .probe = kbase_platform_device_probe,
3955 .remove = kbase_platform_device_remove,
3957 .name = kbase_drv_name,
3958 .owner = THIS_MODULE,
3959 .pm = &kbase_pm_ops,
3960 .of_match_table = of_match_ptr(kbase_dt_ids),
3965 * The driver will not provide a shortcut to create the Mali platform device
3966 * anymore when using Device Tree.
3969 module_platform_driver(kbase_platform_driver);
3972 static int __init rockchip_gpu_init_driver(void)
3974 return platform_driver_register(&kbase_platform_driver);
3976 late_initcall(rockchip_gpu_init_driver);
3978 static int __init kbase_driver_init(void)
3982 ret = kbase_platform_early_init();
3986 #ifndef CONFIG_MACH_MANTA
3987 #ifdef CONFIG_MALI_PLATFORM_FAKE
3988 ret = kbase_platform_fake_register();
3993 ret = platform_driver_register(&kbase_platform_driver);
3994 #ifndef CONFIG_MACH_MANTA
3995 #ifdef CONFIG_MALI_PLATFORM_FAKE
3997 kbase_platform_fake_unregister();
4003 static void __exit kbase_driver_exit(void)
4005 platform_driver_unregister(&kbase_platform_driver);
4006 #ifndef CONFIG_MACH_MANTA
4007 #ifdef CONFIG_MALI_PLATFORM_FAKE
4008 kbase_platform_fake_unregister();
4013 module_init(kbase_driver_init);
4014 module_exit(kbase_driver_exit);
4016 #endif /* CONFIG_OF */
4018 MODULE_LICENSE("GPL");
4019 MODULE_VERSION(MALI_RELEASE_NAME " (UK version " \
4020 __stringify(BASE_UK_VERSION_MAJOR) "." \
4021 __stringify(BASE_UK_VERSION_MINOR) ")");
4023 #if defined(CONFIG_MALI_GATOR_SUPPORT) || defined(CONFIG_MALI_SYSTEM_TRACE)
4024 #define CREATE_TRACE_POINTS
4027 #ifdef CONFIG_MALI_GATOR_SUPPORT
4028 /* Create the trace points (otherwise we just get code to call a tracepoint) */
4029 #include "mali_linux_trace.h"
4031 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_job_slots_event);
4032 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_status);
4033 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_power_on);
4034 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_power_off);
4035 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_page_fault_insert_pages);
4036 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_mmu_as_in_use);
4037 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_mmu_as_released);
4038 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_total_alloc_pages_change);
4039 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_sw_counter);
4041 void kbase_trace_mali_pm_status(u32 event, u64 value)
4043 trace_mali_pm_status(event, value);
4046 void kbase_trace_mali_pm_power_off(u32 event, u64 value)
4048 trace_mali_pm_power_off(event, value);
4051 void kbase_trace_mali_pm_power_on(u32 event, u64 value)
4053 trace_mali_pm_power_on(event, value);
4056 void kbase_trace_mali_job_slots_event(u32 event, const struct kbase_context *kctx, u8 atom_id)
4058 trace_mali_job_slots_event(event, (kctx != NULL ? kctx->tgid : 0), (kctx != NULL ? kctx->pid : 0), atom_id);
4061 void kbase_trace_mali_page_fault_insert_pages(int event, u32 value)
4063 trace_mali_page_fault_insert_pages(event, value);
4066 void kbase_trace_mali_mmu_as_in_use(int event)
4068 trace_mali_mmu_as_in_use(event);
4071 void kbase_trace_mali_mmu_as_released(int event)
4073 trace_mali_mmu_as_released(event);
4076 void kbase_trace_mali_total_alloc_pages_change(long long int event)
4078 trace_mali_total_alloc_pages_change(event);
4080 #endif /* CONFIG_MALI_GATOR_SUPPORT */
4081 #ifdef CONFIG_MALI_SYSTEM_TRACE
4082 #include "mali_linux_kbase_trace.h"