4 * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved.
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
11 * A copy of the licence is included with the program, and can also be obtained
12 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
13 * Boston, MA 02110-1301, USA.
17 #define ENABLE_DEBUG_LOG
18 #include "platform/rk/custom_log.h"
20 #include <mali_kbase.h>
21 #include <mali_kbase_hwaccess_gpuprops.h>
22 #include <mali_kbase_config_defaults.h>
23 #include <mali_kbase_uku.h>
24 #include <mali_midg_regmap.h>
25 #include <mali_kbase_instr.h>
26 #include <mali_kbase_gator.h>
27 #include <backend/gpu/mali_kbase_js_affinity.h>
28 #include <mali_kbase_mem_linux.h>
29 #ifdef CONFIG_MALI_DEVFREQ
30 #include <backend/gpu/mali_kbase_devfreq.h>
31 #endif /* CONFIG_MALI_DEVFREQ */
32 #ifdef CONFIG_MALI_NO_MALI
33 #include "mali_kbase_model_linux.h"
34 #endif /* CONFIG_MALI_NO_MALI */
35 #include "mali_kbase_mem_profile_debugfs_buf_size.h"
36 #include "mali_kbase_debug_mem_view.h"
37 #include "mali_kbase_mem.h"
38 #include "mali_kbase_mem_pool_debugfs.h"
39 #if !MALI_CUSTOMER_RELEASE
40 #include "mali_kbase_regs_dump_debugfs.h"
41 #endif /* !MALI_CUSTOMER_RELEASE */
42 #include <mali_kbase_hwaccess_backend.h>
43 #include <mali_kbase_hwaccess_jm.h>
44 #include <backend/gpu/mali_kbase_device_internal.h>
47 #include <linux/kds.h>
48 #include <linux/anon_inodes.h>
49 #include <linux/syscalls.h>
50 #endif /* CONFIG_KDS */
52 #include <linux/pm_runtime.h>
53 #include <linux/module.h>
54 #include <linux/init.h>
55 #include <linux/poll.h>
56 #include <linux/kernel.h>
57 #include <linux/errno.h>
59 #include <linux/platform_device.h>
60 #include <linux/miscdevice.h>
61 #include <linux/list.h>
62 #include <linux/semaphore.h>
64 #include <linux/uaccess.h>
65 #include <linux/interrupt.h>
68 #include <linux/compat.h> /* is_compat_task */
69 #include <linux/mman.h>
70 #include <linux/version.h>
71 #include <linux/security.h>
72 #ifdef CONFIG_MALI_PLATFORM_DEVICETREE
73 #include <linux/pm_runtime.h>
74 #endif /* CONFIG_MALI_PLATFORM_DEVICETREE */
75 #include <mali_kbase_hw.h>
76 #include <platform/mali_kbase_platform_common.h>
77 #ifdef CONFIG_MALI_PLATFORM_FAKE
78 #include <platform/mali_kbase_platform_fake.h>
79 #endif /*CONFIG_MALI_PLATFORM_FAKE */
81 #include <mali_kbase_sync.h>
82 #endif /* CONFIG_SYNC */
83 #ifdef CONFIG_PM_DEVFREQ
84 #include <linux/devfreq.h>
85 #endif /* CONFIG_PM_DEVFREQ */
86 #include <linux/clk.h>
87 #include <linux/delay.h>
89 #include <mali_kbase_config.h>
91 #ifdef CONFIG_MACH_MANTA
92 #include <plat/devs.h>
95 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
96 #include <linux/pm_opp.h>
98 #include <linux/opp.h>
101 #if defined(CONFIG_MALI_MIPE_ENABLED)
102 #include <mali_kbase_tlstream.h>
106 #define JOB_IRQ_TAG 0
107 #define MMU_IRQ_TAG 1
108 #define GPU_IRQ_TAG 2
111 static struct kbase_exported_test_data shared_kernel_test_data;
112 EXPORT_SYMBOL(shared_kernel_test_data);
113 #endif /* MALI_UNIT_TEST */
115 #define KBASE_DRV_NAME "mali"
116 /** rk_ext : version of rk_ext on mali_ko, aka. rk_ko_ver. */
117 #define ROCKCHIP_VERSION (13)
119 static const char kbase_drv_name[] = KBASE_DRV_NAME;
121 static int kbase_dev_nr;
123 static DEFINE_MUTEX(kbase_dev_list_lock);
124 static LIST_HEAD(kbase_dev_list);
126 #define KERNEL_SIDE_DDK_VERSION_STRING "K:" MALI_RELEASE_NAME "(GPL)"
127 static inline void __compile_time_asserts(void)
129 CSTD_COMPILE_TIME_ASSERT(sizeof(KERNEL_SIDE_DDK_VERSION_STRING) <= KBASE_GET_VERSION_BUFFER_SIZE);
134 struct kbasep_kds_resource_set_file_data {
135 struct kds_resource_set *lock;
138 static int kds_resource_release(struct inode *inode, struct file *file);
140 static const struct file_operations kds_resource_fops = {
141 .release = kds_resource_release
144 struct kbase_kds_resource_list_data {
145 struct kds_resource **kds_resources;
146 unsigned long *kds_access_bitmap;
150 static int kds_resource_release(struct inode *inode, struct file *file)
152 struct kbasep_kds_resource_set_file_data *data;
154 data = (struct kbasep_kds_resource_set_file_data *)file->private_data;
156 if (NULL != data->lock)
157 kds_resource_set_release(&data->lock);
164 static int kbasep_kds_allocate_resource_list_data(struct kbase_context *kctx, struct base_external_resource *ext_res, int num_elems, struct kbase_kds_resource_list_data *resources_list)
166 struct base_external_resource *res = ext_res;
169 /* assume we have to wait for all */
171 KBASE_DEBUG_ASSERT(0 != num_elems);
172 resources_list->kds_resources = kmalloc_array(num_elems,
173 sizeof(struct kds_resource *), GFP_KERNEL);
175 if (NULL == resources_list->kds_resources)
178 KBASE_DEBUG_ASSERT(0 != num_elems);
179 resources_list->kds_access_bitmap = kzalloc(
180 sizeof(unsigned long) *
181 ((num_elems + BITS_PER_LONG - 1) / BITS_PER_LONG),
184 if (NULL == resources_list->kds_access_bitmap) {
185 kfree(resources_list->kds_access_bitmap);
189 kbase_gpu_vm_lock(kctx);
190 for (res_id = 0; res_id < num_elems; res_id++, res++) {
192 struct kbase_va_region *reg;
193 struct kds_resource *kds_res = NULL;
195 exclusive = res->ext_resource & BASE_EXT_RES_ACCESS_EXCLUSIVE;
196 reg = kbase_region_tracker_find_region_enclosing_address(kctx, res->ext_resource & ~BASE_EXT_RES_ACCESS_EXCLUSIVE);
198 /* did we find a matching region object? */
199 if (NULL == reg || (reg->flags & KBASE_REG_FREE))
202 /* no need to check reg->alloc as only regions with an alloc has
203 * a size, and kbase_region_tracker_find_region_enclosing_address
204 * only returns regions with size > 0 */
205 switch (reg->gpu_alloc->type) {
206 #if defined(CONFIG_UMP) && defined(CONFIG_KDS)
207 case KBASE_MEM_TYPE_IMPORTED_UMP:
208 kds_res = ump_dd_kds_resource_get(reg->gpu_alloc->imported.ump_handle);
210 #endif /* defined(CONFIG_UMP) && defined(CONFIG_KDS) */
215 /* no kds resource for the region ? */
219 resources_list->kds_resources[res_id] = kds_res;
222 set_bit(res_id, resources_list->kds_access_bitmap);
224 kbase_gpu_vm_unlock(kctx);
226 /* did the loop run to completion? */
227 if (res_id == num_elems)
230 /* Clean up as the resource list is not valid. */
231 kfree(resources_list->kds_resources);
232 kfree(resources_list->kds_access_bitmap);
237 static bool kbasep_validate_kbase_pointer(
238 struct kbase_context *kctx, union kbase_pointer *p)
240 if (kctx->is_compat) {
241 if (p->compat_value == 0)
244 if (NULL == p->value)
250 static int kbase_external_buffer_lock(struct kbase_context *kctx,
251 struct kbase_uk_ext_buff_kds_data *args, u32 args_size)
253 struct base_external_resource *ext_res_copy;
254 size_t ext_resource_size;
257 struct base_external_resource __user *ext_res_user;
258 int __user *file_desc_usr;
259 struct kbasep_kds_resource_set_file_data *fdata;
260 struct kbase_kds_resource_list_data resource_list_data;
262 if (args_size != sizeof(struct kbase_uk_ext_buff_kds_data))
265 /* Check user space has provided valid data */
266 if (!kbasep_validate_kbase_pointer(kctx, &args->external_resource) ||
267 !kbasep_validate_kbase_pointer(kctx, &args->file_descriptor) ||
268 (0 == args->num_res) ||
269 (args->num_res > KBASE_MAXIMUM_EXT_RESOURCES))
272 ext_resource_size = sizeof(struct base_external_resource) * args->num_res;
274 KBASE_DEBUG_ASSERT(0 != ext_resource_size);
275 ext_res_copy = kmalloc(ext_resource_size, GFP_KERNEL);
280 if (kctx->is_compat) {
281 ext_res_user = compat_ptr(args->external_resource.compat_value);
282 file_desc_usr = compat_ptr(args->file_descriptor.compat_value);
284 #endif /* CONFIG_COMPAT */
285 ext_res_user = args->external_resource.value;
286 file_desc_usr = args->file_descriptor.value;
289 #endif /* CONFIG_COMPAT */
291 /* Copy the external resources to lock from user space */
292 if (copy_from_user(ext_res_copy, ext_res_user, ext_resource_size))
295 /* Allocate data to be stored in the file */
296 fdata = kmalloc(sizeof(*fdata), GFP_KERNEL);
303 /* Parse given elements and create resource and access lists */
304 ret = kbasep_kds_allocate_resource_list_data(kctx,
305 ext_res_copy, args->num_res, &resource_list_data);
311 fd = anon_inode_getfd("kds_ext", &kds_resource_fops, fdata, 0);
313 err = copy_to_user(file_desc_usr, &fd, sizeof(fd));
315 /* If the file descriptor was valid and we successfully copied
316 * it to user space, then we can try and lock the requested
319 if ((fd >= 0) && (0 == err)) {
320 struct kds_resource_set *lock;
322 lock = kds_waitall(args->num_res,
323 resource_list_data.kds_access_bitmap,
324 resource_list_data.kds_resources,
329 } else if (IS_ERR(lock)) {
339 kfree(resource_list_data.kds_resources);
340 kfree(resource_list_data.kds_access_bitmap);
344 /* If the file was opened successfully then close it which will
345 * clean up the file data, otherwise we clean up the file data
358 #endif /* CONFIG_KDS */
360 #ifdef CONFIG_MALI_MIPE_ENABLED
361 static void kbase_create_timeline_objects(struct kbase_context *kctx)
363 struct kbase_device *kbdev = kctx->kbdev;
366 struct kbasep_kctx_list_element *element;
368 /* Create LPU objects. */
369 for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
371 &kbdev->gpu_props.props.raw_props.js_features[lpu_id];
372 kbase_tlstream_tl_summary_new_lpu(lpu, lpu_id, *lpu);
375 /* Create Address Space objects. */
376 for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
377 kbase_tlstream_tl_summary_new_as(&kbdev->as[as_nr], as_nr);
379 /* Create GPU object and make it retain all LPUs and address spaces. */
380 kbase_tlstream_tl_summary_new_gpu(
382 kbdev->gpu_props.props.raw_props.gpu_id,
383 kbdev->gpu_props.num_cores);
385 for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
387 &kbdev->gpu_props.props.raw_props.js_features[lpu_id];
388 kbase_tlstream_tl_summary_lifelink_lpu_gpu(lpu, kbdev);
390 for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
391 kbase_tlstream_tl_summary_lifelink_as_gpu(
395 /* Create object for each known context. */
396 mutex_lock(&kbdev->kctx_list_lock);
397 list_for_each_entry(element, &kbdev->kctx_list, link) {
398 kbase_tlstream_tl_summary_new_ctx(
400 (u32)(element->kctx->id));
402 /* Before releasing the lock, reset body stream buffers.
403 * This will prevent context creation message to be directed to both
404 * summary and body stream. */
405 kbase_tlstream_reset_body_streams();
406 mutex_unlock(&kbdev->kctx_list_lock);
407 /* Static object are placed into summary packet that needs to be
408 * transmitted first. Flush all streams to make it available to
410 kbase_tlstream_flush_streams();
414 static void kbase_api_handshake(struct uku_version_check_args *version)
416 switch (version->major) {
417 #ifdef BASE_LEGACY_UK6_SUPPORT
419 /* We are backwards compatible with version 6,
420 * so pretend to be the old version */
424 #endif /* BASE_LEGACY_UK6_SUPPORT */
425 #ifdef BASE_LEGACY_UK7_SUPPORT
427 /* We are backwards compatible with version 7,
428 * so pretend to be the old version */
432 #endif /* BASE_LEGACY_UK7_SUPPORT */
433 #ifdef BASE_LEGACY_UK8_SUPPORT
435 /* We are backwards compatible with version 8,
436 * so pretend to be the old version */
440 #endif /* BASE_LEGACY_UK8_SUPPORT */
441 #ifdef BASE_LEGACY_UK9_SUPPORT
443 /* We are backwards compatible with version 9,
444 * so pretend to be the old version */
448 #endif /* BASE_LEGACY_UK8_SUPPORT */
449 case BASE_UK_VERSION_MAJOR:
450 /* set minor to be the lowest common */
451 version->minor = min_t(int, BASE_UK_VERSION_MINOR,
452 (int)version->minor);
455 /* We return our actual version regardless if it
456 * matches the version returned by userspace -
457 * userspace can bail if it can't handle this
459 version->major = BASE_UK_VERSION_MAJOR;
460 version->minor = BASE_UK_VERSION_MINOR;
466 * enum mali_error - Mali error codes shared with userspace
468 * This is subset of those common Mali errors that can be returned to userspace.
469 * Values of matching user and kernel space enumerators MUST be the same.
470 * MALI_ERROR_NONE is guaranteed to be 0.
474 MALI_ERROR_OUT_OF_GPU_MEMORY,
475 MALI_ERROR_OUT_OF_MEMORY,
476 MALI_ERROR_FUNCTION_FAILED,
479 #ifdef CONFIG_MALI_DEBUG
480 #define INACTIVE_WAIT_MS (5000)
482 void kbase_set_driver_inactive(struct kbase_device *kbdev, bool inactive)
484 kbdev->driver_inactive = inactive;
485 wake_up(&kbdev->driver_inactive_wait);
487 /* Wait for any running IOCTLs to complete */
489 msleep(INACTIVE_WAIT_MS);
491 KBASE_EXPORT_TEST_API(kbase_set_driver_inactive);
492 #endif /* CONFIG_MALI_DEBUG */
494 static int kbase_dispatch(struct kbase_context *kctx, void * const args, u32 args_size)
496 struct kbase_device *kbdev;
497 union uk_header *ukh = args;
501 KBASE_DEBUG_ASSERT(ukh != NULL);
505 ukh->ret = MALI_ERROR_NONE; /* Be optimistic */
507 #ifdef CONFIG_MALI_DEBUG
508 wait_event(kbdev->driver_inactive_wait,
509 kbdev->driver_inactive == false);
510 #endif /* CONFIG_MALI_DEBUG */
512 if (UKP_FUNC_ID_CHECK_VERSION == id) {
513 struct uku_version_check_args *version_check;
515 if (args_size != sizeof(struct uku_version_check_args)) {
516 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
519 version_check = (struct uku_version_check_args *)args;
520 kbase_api_handshake(version_check);
521 /* save the proposed version number for later use */
522 kctx->api_version = KBASE_API_VERSION(version_check->major,
523 version_check->minor);
524 ukh->ret = MALI_ERROR_NONE;
528 /* block calls until version handshake */
529 if (kctx->api_version == 0)
532 if (!atomic_read(&kctx->setup_complete)) {
533 struct kbase_uk_set_flags *kbase_set_flags;
535 /* setup pending, try to signal that we'll do the setup,
536 * if setup was already in progress, err this call
538 if (atomic_cmpxchg(&kctx->setup_in_progress, 0, 1) != 0)
541 /* if unexpected call, will stay stuck in setup mode
542 * (is it the only call we accept?)
544 if (id != KBASE_FUNC_SET_FLAGS)
547 kbase_set_flags = (struct kbase_uk_set_flags *)args;
549 /* if not matching the expected call, stay in setup mode */
550 if (sizeof(*kbase_set_flags) != args_size)
553 /* if bad flags, will stay stuck in setup mode */
554 if (kbase_context_set_create_flags(kctx,
555 kbase_set_flags->create_flags) != 0)
556 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
558 atomic_set(&kctx->setup_complete, 1);
562 /* setup complete, perform normal operation */
564 case KBASE_FUNC_MEM_ALLOC:
566 struct kbase_uk_mem_alloc *mem = args;
567 struct kbase_va_region *reg;
569 if (sizeof(*mem) != args_size)
572 reg = kbase_mem_alloc(kctx, mem->va_pages,
573 mem->commit_pages, mem->extent,
574 &mem->flags, &mem->gpu_va,
577 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
580 case KBASE_FUNC_MEM_IMPORT: {
581 struct kbase_uk_mem_import *mem_import = args;
582 void __user *phandle;
584 if (sizeof(*mem_import) != args_size)
588 phandle = compat_ptr(mem_import->phandle.compat_value);
591 phandle = mem_import->phandle.value;
593 if (mem_import->type == BASE_MEM_IMPORT_TYPE_INVALID) {
594 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
598 if (kbase_mem_import(kctx, mem_import->type, phandle,
600 &mem_import->va_pages,
601 &mem_import->flags)) {
602 mem_import->type = BASE_MEM_IMPORT_TYPE_INVALID;
603 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
607 case KBASE_FUNC_MEM_ALIAS: {
608 struct kbase_uk_mem_alias *alias = args;
609 struct base_mem_aliasing_info __user *user_ai;
610 struct base_mem_aliasing_info *ai;
612 if (sizeof(*alias) != args_size)
615 if (alias->nents > 2048) {
616 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
620 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
626 user_ai = compat_ptr(alias->ai.compat_value);
629 user_ai = alias->ai.value;
631 ai = vmalloc(sizeof(*ai) * alias->nents);
634 ukh->ret = MALI_ERROR_OUT_OF_MEMORY;
638 if (copy_from_user(ai, user_ai,
639 sizeof(*ai) * alias->nents)) {
640 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
644 alias->gpu_va = kbase_mem_alias(kctx, &alias->flags,
648 if (!alias->gpu_va) {
649 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
657 case KBASE_FUNC_MEM_COMMIT:
659 struct kbase_uk_mem_commit *commit = args;
661 if (sizeof(*commit) != args_size)
664 if (commit->gpu_addr & ~PAGE_MASK) {
665 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_COMMIT: commit->gpu_addr: passed parameter is invalid");
666 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
670 if (kbase_mem_commit(kctx, commit->gpu_addr,
672 (base_backing_threshold_status *)
673 &commit->result_subcode) != 0)
674 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
679 case KBASE_FUNC_MEM_QUERY:
681 struct kbase_uk_mem_query *query = args;
683 if (sizeof(*query) != args_size)
686 if (query->gpu_addr & ~PAGE_MASK) {
687 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_QUERY: query->gpu_addr: passed parameter is invalid");
688 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
691 if (query->query != KBASE_MEM_QUERY_COMMIT_SIZE &&
692 query->query != KBASE_MEM_QUERY_VA_SIZE &&
693 query->query != KBASE_MEM_QUERY_FLAGS) {
694 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_QUERY: query->query = %lld unknown", (unsigned long long)query->query);
695 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
699 if (kbase_mem_query(kctx, query->gpu_addr,
700 query->query, &query->value) != 0)
701 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
703 ukh->ret = MALI_ERROR_NONE;
708 case KBASE_FUNC_MEM_FLAGS_CHANGE:
710 struct kbase_uk_mem_flags_change *fc = args;
712 if (sizeof(*fc) != args_size)
715 if ((fc->gpu_va & ~PAGE_MASK) && (fc->gpu_va >= PAGE_SIZE)) {
716 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_FLAGS_CHANGE: mem->gpu_va: passed parameter is invalid");
717 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
721 if (kbase_mem_flags_change(kctx, fc->gpu_va,
722 fc->flags, fc->mask) != 0)
723 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
727 case KBASE_FUNC_MEM_FREE:
729 struct kbase_uk_mem_free *mem = args;
731 if (sizeof(*mem) != args_size)
734 if ((mem->gpu_addr & ~PAGE_MASK) && (mem->gpu_addr >= PAGE_SIZE)) {
735 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_FREE: mem->gpu_addr: passed parameter is invalid");
736 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
740 if (kbase_mem_free(kctx, mem->gpu_addr) != 0)
741 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
745 case KBASE_FUNC_JOB_SUBMIT:
747 struct kbase_uk_job_submit *job = args;
749 if (sizeof(*job) != args_size)
752 #ifdef BASE_LEGACY_UK6_SUPPORT
753 if (kbase_jd_submit(kctx, job, 0) != 0)
755 if (kbase_jd_submit(kctx, job) != 0)
756 #endif /* BASE_LEGACY_UK6_SUPPORT */
757 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
761 #ifdef BASE_LEGACY_UK6_SUPPORT
762 case KBASE_FUNC_JOB_SUBMIT_UK6:
764 struct kbase_uk_job_submit *job = args;
766 if (sizeof(*job) != args_size)
769 if (kbase_jd_submit(kctx, job, 1) != 0)
770 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
775 case KBASE_FUNC_SYNC:
777 struct kbase_uk_sync_now *sn = args;
779 if (sizeof(*sn) != args_size)
782 if (sn->sset.basep_sset.mem_handle & ~PAGE_MASK) {
783 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_SYNC: sn->sset.basep_sset.mem_handle: passed parameter is invalid");
784 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
788 #ifndef CONFIG_MALI_COH_USER
789 if (kbase_sync_now(kctx, &sn->sset) != 0)
790 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
795 case KBASE_FUNC_DISJOINT_QUERY:
797 struct kbase_uk_disjoint_query *dquery = args;
799 if (sizeof(*dquery) != args_size)
802 /* Get the disjointness counter value. */
803 dquery->counter = kbase_disjoint_event_get(kctx->kbdev);
807 case KBASE_FUNC_POST_TERM:
809 kbase_event_close(kctx);
813 case KBASE_FUNC_HWCNT_SETUP:
815 struct kbase_uk_hwcnt_setup *setup = args;
817 if (sizeof(*setup) != args_size)
820 mutex_lock(&kctx->vinstr_cli_lock);
821 if (kbase_vinstr_legacy_hwc_setup(kbdev->vinstr_ctx,
822 &kctx->vinstr_cli, setup) != 0)
823 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
824 mutex_unlock(&kctx->vinstr_cli_lock);
828 case KBASE_FUNC_HWCNT_DUMP:
831 mutex_lock(&kctx->vinstr_cli_lock);
832 if (kbase_vinstr_hwc_dump(kctx->vinstr_cli,
833 BASE_HWCNT_READER_EVENT_MANUAL) != 0)
834 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
835 mutex_unlock(&kctx->vinstr_cli_lock);
839 case KBASE_FUNC_HWCNT_CLEAR:
842 mutex_lock(&kctx->vinstr_cli_lock);
843 if (kbase_vinstr_hwc_clear(kctx->vinstr_cli) != 0)
844 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
845 mutex_unlock(&kctx->vinstr_cli_lock);
849 case KBASE_FUNC_HWCNT_READER_SETUP:
851 struct kbase_uk_hwcnt_reader_setup *setup = args;
853 if (sizeof(*setup) != args_size)
856 mutex_lock(&kctx->vinstr_cli_lock);
857 if (kbase_vinstr_hwcnt_reader_setup(kbdev->vinstr_ctx,
859 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
860 mutex_unlock(&kctx->vinstr_cli_lock);
864 case KBASE_FUNC_GPU_PROPS_REG_DUMP:
866 struct kbase_uk_gpuprops *setup = args;
868 if (sizeof(*setup) != args_size)
871 if (kbase_gpuprops_uk_get_props(kctx, setup) != 0)
872 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
875 case KBASE_FUNC_FIND_CPU_OFFSET:
877 struct kbase_uk_find_cpu_offset *find = args;
879 if (sizeof(*find) != args_size)
882 if (find->gpu_addr & ~PAGE_MASK) {
883 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_FIND_CPU_OFFSET: find->gpu_addr: passed parameter is invalid");
887 if (find->size > SIZE_MAX || find->cpu_addr > ULONG_MAX) {
888 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
892 err = kbasep_find_enclosing_cpu_mapping_offset(
895 (uintptr_t) find->cpu_addr,
900 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
904 case KBASE_FUNC_GET_VERSION:
906 struct kbase_uk_get_ddk_version *get_version = (struct kbase_uk_get_ddk_version *)args;
908 if (sizeof(*get_version) != args_size)
911 /* version buffer size check is made in compile time assert */
912 memcpy(get_version->version_buffer, KERNEL_SIDE_DDK_VERSION_STRING, sizeof(KERNEL_SIDE_DDK_VERSION_STRING));
913 get_version->version_string_size = sizeof(KERNEL_SIDE_DDK_VERSION_STRING);
914 get_version->rk_version = ROCKCHIP_VERSION;
918 case KBASE_FUNC_STREAM_CREATE:
921 struct kbase_uk_stream_create *screate = (struct kbase_uk_stream_create *)args;
923 if (sizeof(*screate) != args_size)
926 if (strnlen(screate->name, sizeof(screate->name)) >= sizeof(screate->name)) {
927 /* not NULL terminated */
928 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
932 if (kbase_stream_create(screate->name, &screate->fd) != 0)
933 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
935 ukh->ret = MALI_ERROR_NONE;
936 #else /* CONFIG_SYNC */
937 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
938 #endif /* CONFIG_SYNC */
941 case KBASE_FUNC_FENCE_VALIDATE:
944 struct kbase_uk_fence_validate *fence_validate = (struct kbase_uk_fence_validate *)args;
946 if (sizeof(*fence_validate) != args_size)
949 if (kbase_fence_validate(fence_validate->fd) != 0)
950 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
952 ukh->ret = MALI_ERROR_NONE;
953 #endif /* CONFIG_SYNC */
957 case KBASE_FUNC_EXT_BUFFER_LOCK:
960 ret = kbase_external_buffer_lock(kctx,
961 (struct kbase_uk_ext_buff_kds_data *)args,
965 ukh->ret = MALI_ERROR_NONE;
968 ukh->ret = MALI_ERROR_OUT_OF_MEMORY;
971 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
973 #endif /* CONFIG_KDS */
977 case KBASE_FUNC_SET_TEST_DATA:
980 struct kbase_uk_set_test_data *set_data = args;
982 shared_kernel_test_data = set_data->test_data;
983 shared_kernel_test_data.kctx.value = (void __user *)kctx;
984 shared_kernel_test_data.mm.value = (void __user *)current->mm;
985 ukh->ret = MALI_ERROR_NONE;
986 #endif /* MALI_UNIT_TEST */
990 case KBASE_FUNC_INJECT_ERROR:
992 #ifdef CONFIG_MALI_ERROR_INJECT
994 struct kbase_error_params params = ((struct kbase_uk_error_params *)args)->params;
997 spin_lock_irqsave(&kbdev->reg_op_lock, flags);
998 if (job_atom_inject_error(¶ms) != 0)
999 ukh->ret = MALI_ERROR_OUT_OF_MEMORY;
1001 ukh->ret = MALI_ERROR_NONE;
1002 spin_unlock_irqrestore(&kbdev->reg_op_lock, flags);
1004 #endif /* CONFIG_MALI_ERROR_INJECT */
1008 case KBASE_FUNC_MODEL_CONTROL:
1010 #ifdef CONFIG_MALI_NO_MALI
1011 unsigned long flags;
1012 struct kbase_model_control_params params =
1013 ((struct kbase_uk_model_control_params *)args)->params;
1016 spin_lock_irqsave(&kbdev->reg_op_lock, flags);
1017 if (gpu_model_control(kbdev->model, ¶ms) != 0)
1018 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
1020 ukh->ret = MALI_ERROR_NONE;
1021 spin_unlock_irqrestore(&kbdev->reg_op_lock, flags);
1023 #endif /* CONFIG_MALI_NO_MALI */
1027 #ifdef BASE_LEGACY_UK8_SUPPORT
1028 case KBASE_FUNC_KEEP_GPU_POWERED:
1030 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_KEEP_GPU_POWERED: function is deprecated and disabled\n");
1031 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
1034 #endif /* BASE_LEGACY_UK8_SUPPORT */
1036 case KBASE_FUNC_GET_PROFILING_CONTROLS:
1038 struct kbase_uk_profiling_controls *controls =
1039 (struct kbase_uk_profiling_controls *)args;
1042 if (sizeof(*controls) != args_size)
1045 for (i = FBDUMP_CONTROL_MIN; i < FBDUMP_CONTROL_MAX; i++)
1046 controls->profiling_controls[i] = kbase_get_profiling_control(kbdev, i);
1051 /* used only for testing purposes; these controls are to be set by gator through gator API */
1052 case KBASE_FUNC_SET_PROFILING_CONTROLS:
1054 struct kbase_uk_profiling_controls *controls =
1055 (struct kbase_uk_profiling_controls *)args;
1058 if (sizeof(*controls) != args_size)
1061 for (i = FBDUMP_CONTROL_MIN; i < FBDUMP_CONTROL_MAX; i++)
1062 _mali_profiling_control(i, controls->profiling_controls[i]);
1067 case KBASE_FUNC_DEBUGFS_MEM_PROFILE_ADD:
1069 struct kbase_uk_debugfs_mem_profile_add *add_data =
1070 (struct kbase_uk_debugfs_mem_profile_add *)args;
1072 char __user *user_buf;
1074 if (sizeof(*add_data) != args_size)
1077 if (add_data->len > KBASE_MEM_PROFILE_MAX_BUF_SIZE) {
1078 dev_err(kbdev->dev, "buffer too big");
1082 #ifdef CONFIG_COMPAT
1083 if (kctx->is_compat)
1084 user_buf = compat_ptr(add_data->buf.compat_value);
1087 user_buf = add_data->buf.value;
1089 buf = kmalloc(add_data->len, GFP_KERNEL);
1093 if (0 != copy_from_user(buf, user_buf, add_data->len)) {
1094 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
1099 if (kbasep_mem_profile_debugfs_insert(kctx, buf,
1101 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
1109 #ifdef CONFIG_MALI_NO_MALI
1110 case KBASE_FUNC_SET_PRFCNT_VALUES:
1113 struct kbase_uk_prfcnt_values *params =
1114 ((struct kbase_uk_prfcnt_values *)args);
1115 gpu_model_set_dummy_prfcnt_sample(params->data,
1120 #endif /* CONFIG_MALI_NO_MALI */
1122 #ifdef CONFIG_MALI_MIPE_ENABLED
1123 case KBASE_FUNC_TLSTREAM_ACQUIRE:
1125 struct kbase_uk_tlstream_acquire *tlstream_acquire =
1128 if (sizeof(*tlstream_acquire) != args_size)
1131 if (0 != kbase_tlstream_acquire(
1133 &tlstream_acquire->fd)) {
1134 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
1135 } else if (0 <= tlstream_acquire->fd) {
1136 /* Summary stream was cleared during acquire.
1137 * Create static timeline objects that will be
1138 * read by client. */
1139 kbase_create_timeline_objects(kctx);
1143 case KBASE_FUNC_TLSTREAM_FLUSH:
1145 struct kbase_uk_tlstream_flush *tlstream_flush =
1148 if (sizeof(*tlstream_flush) != args_size)
1151 kbase_tlstream_flush_streams();
1155 case KBASE_FUNC_TLSTREAM_TEST:
1157 struct kbase_uk_tlstream_test *tlstream_test = args;
1159 if (sizeof(*tlstream_test) != args_size)
1162 kbase_tlstream_test(
1163 tlstream_test->tpw_count,
1164 tlstream_test->msg_delay,
1165 tlstream_test->msg_count,
1166 tlstream_test->aux_msg);
1169 case KBASE_FUNC_TLSTREAM_STATS:
1171 struct kbase_uk_tlstream_stats *tlstream_stats = args;
1173 if (sizeof(*tlstream_stats) != args_size)
1176 kbase_tlstream_stats(
1177 &tlstream_stats->bytes_collected,
1178 &tlstream_stats->bytes_generated);
1181 #endif /* MALI_UNIT_TEST */
1182 #endif /* CONFIG_MALI_MIPE_ENABLED */
1184 case KBASE_FUNC_GET_CONTEXT_ID:
1186 struct kbase_uk_context_id *info = args;
1188 info->id = kctx->id;
1193 dev_err(kbdev->dev, "unknown ioctl %u", id);
1200 dev_err(kbdev->dev, "Wrong syscall size (%d) for %08x\n", args_size, id);
1205 static struct kbase_device *to_kbase_device(struct device *dev)
1207 return dev_get_drvdata(dev);
1211 * API to acquire device list mutex and
1212 * return pointer to the device list head
1214 const struct list_head *kbase_dev_list_get(void)
1216 mutex_lock(&kbase_dev_list_lock);
1217 return &kbase_dev_list;
1219 KBASE_EXPORT_TEST_API(kbase_dev_list_get);
1221 /* API to release the device list mutex */
1222 void kbase_dev_list_put(const struct list_head *dev_list)
1224 mutex_unlock(&kbase_dev_list_lock);
1226 KBASE_EXPORT_TEST_API(kbase_dev_list_put);
1228 /* Find a particular kbase device (as specified by minor number), or find the "first" device if -1 is specified */
1229 struct kbase_device *kbase_find_device(int minor)
1231 struct kbase_device *kbdev = NULL;
1232 struct list_head *entry;
1233 const struct list_head *dev_list = kbase_dev_list_get();
1235 list_for_each(entry, dev_list) {
1236 struct kbase_device *tmp;
1238 tmp = list_entry(entry, struct kbase_device, entry);
1239 if (tmp->mdev.minor == minor || minor == -1) {
1241 get_device(kbdev->dev);
1245 kbase_dev_list_put(dev_list);
1249 EXPORT_SYMBOL(kbase_find_device);
1251 void kbase_release_device(struct kbase_device *kbdev)
1253 put_device(kbdev->dev);
1255 EXPORT_SYMBOL(kbase_release_device);
1257 static int kbase_open(struct inode *inode, struct file *filp)
1259 struct kbase_device *kbdev = NULL;
1260 struct kbase_context *kctx;
1262 #ifdef CONFIG_DEBUG_FS
1266 kbdev = kbase_find_device(iminor(inode));
1271 kctx = kbase_create_context(kbdev, is_compat_task());
1277 init_waitqueue_head(&kctx->event_queue);
1278 filp->private_data = kctx;
1281 kctx->infinite_cache_active = kbdev->infinite_cache_active_default;
1283 #ifdef CONFIG_DEBUG_FS
1284 snprintf(kctx_name, 64, "%d_%d", kctx->tgid, kctx->id);
1286 kctx->kctx_dentry = debugfs_create_dir(kctx_name,
1287 kbdev->debugfs_ctx_directory);
1289 if (IS_ERR_OR_NULL(kctx->kctx_dentry)) {
1294 #ifdef CONFIG_MALI_COH_USER
1295 /* if cache is completely coherent at hardware level, then remove the
1296 * infinite cache control support from debugfs.
1299 debugfs_create_bool("infinite_cache", 0644, kctx->kctx_dentry,
1300 (bool*)&(kctx->infinite_cache_active));
1301 #endif /* CONFIG_MALI_COH_USER */
1303 mutex_init(&kctx->mem_profile_lock);
1305 kbasep_jd_debugfs_ctx_add(kctx);
1306 kbase_debug_mem_view_init(filp);
1308 kbase_debug_job_fault_context_init(kctx);
1310 kbase_mem_pool_debugfs_add(kctx->kctx_dentry, &kctx->mem_pool);
1312 #endif /* CONFIG_DEBUGFS */
1314 dev_dbg(kbdev->dev, "created base context\n");
1317 struct kbasep_kctx_list_element *element;
1319 element = kzalloc(sizeof(*element), GFP_KERNEL);
1321 mutex_lock(&kbdev->kctx_list_lock);
1322 element->kctx = kctx;
1323 list_add(&element->link, &kbdev->kctx_list);
1324 #ifdef CONFIG_MALI_MIPE_ENABLED
1325 kbase_tlstream_tl_new_ctx(
1327 (u32)(element->kctx->id));
1329 mutex_unlock(&kbdev->kctx_list_lock);
1331 /* we don't treat this as a fail - just warn about it */
1332 dev_warn(kbdev->dev, "couldn't add kctx to kctx_list\n");
1338 kbase_release_device(kbdev);
1342 static int kbase_release(struct inode *inode, struct file *filp)
1344 struct kbase_context *kctx = filp->private_data;
1345 struct kbase_device *kbdev = kctx->kbdev;
1346 struct kbasep_kctx_list_element *element, *tmp;
1347 bool found_element = false;
1349 #ifdef CONFIG_MALI_MIPE_ENABLED
1350 kbase_tlstream_tl_del_ctx(kctx);
1353 #ifdef CONFIG_DEBUG_FS
1354 debugfs_remove_recursive(kctx->kctx_dentry);
1355 kbasep_mem_profile_debugfs_remove(kctx);
1356 kbase_debug_job_fault_context_term(kctx);
1359 mutex_lock(&kbdev->kctx_list_lock);
1360 list_for_each_entry_safe(element, tmp, &kbdev->kctx_list, link) {
1361 if (element->kctx == kctx) {
1362 list_del(&element->link);
1364 found_element = true;
1367 mutex_unlock(&kbdev->kctx_list_lock);
1369 dev_warn(kbdev->dev, "kctx not in kctx_list\n");
1371 filp->private_data = NULL;
1373 mutex_lock(&kctx->vinstr_cli_lock);
1374 /* If this client was performing hwcnt dumping and did not explicitly
1375 * detach itself, remove it from the vinstr core now */
1376 if (kctx->vinstr_cli) {
1377 struct kbase_uk_hwcnt_setup setup;
1379 setup.dump_buffer = 0llu;
1380 kbase_vinstr_legacy_hwc_setup(
1381 kbdev->vinstr_ctx, &kctx->vinstr_cli, &setup);
1383 mutex_unlock(&kctx->vinstr_cli_lock);
1385 kbase_destroy_context(kctx);
1387 dev_dbg(kbdev->dev, "deleted base context\n");
1388 kbase_release_device(kbdev);
1392 #define CALL_MAX_SIZE 536
1394 static long kbase_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1396 u64 msg[(CALL_MAX_SIZE + 7) >> 3] = { 0xdeadbeefdeadbeefull }; /* alignment fixup */
1397 u32 size = _IOC_SIZE(cmd);
1398 struct kbase_context *kctx = filp->private_data;
1400 if (size > CALL_MAX_SIZE)
1403 if (0 != copy_from_user(&msg, (void __user *)arg, size)) {
1404 dev_err(kctx->kbdev->dev, "failed to copy ioctl argument into kernel space\n");
1408 if (kbase_dispatch(kctx, &msg, size) != 0)
1411 if (0 != copy_to_user((void __user *)arg, &msg, size)) {
1412 dev_err(kctx->kbdev->dev, "failed to copy results of UK call back to user space\n");
1418 static ssize_t kbase_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
1420 struct kbase_context *kctx = filp->private_data;
1421 struct base_jd_event_v2 uevent;
1424 if (count < sizeof(uevent))
1428 while (kbase_event_dequeue(kctx, &uevent)) {
1432 if (filp->f_flags & O_NONBLOCK)
1435 if (wait_event_interruptible(kctx->event_queue,
1436 kbase_event_pending(kctx)) != 0)
1437 return -ERESTARTSYS;
1439 if (uevent.event_code == BASE_JD_EVENT_DRV_TERMINATED) {
1445 if (copy_to_user(buf, &uevent, sizeof(uevent)) != 0)
1448 buf += sizeof(uevent);
1450 count -= sizeof(uevent);
1451 } while (count >= sizeof(uevent));
1454 return out_count * sizeof(uevent);
1457 static unsigned int kbase_poll(struct file *filp, poll_table *wait)
1459 struct kbase_context *kctx = filp->private_data;
1461 poll_wait(filp, &kctx->event_queue, wait);
1462 if (kbase_event_pending(kctx))
1463 return POLLIN | POLLRDNORM;
1468 void kbase_event_wakeup(struct kbase_context *kctx)
1470 KBASE_DEBUG_ASSERT(kctx);
1472 wake_up_interruptible(&kctx->event_queue);
1475 KBASE_EXPORT_TEST_API(kbase_event_wakeup);
1477 static int kbase_check_flags(int flags)
1479 /* Enforce that the driver keeps the O_CLOEXEC flag so that execve() always
1480 * closes the file descriptor in a child process.
1482 if (0 == (flags & O_CLOEXEC))
1489 /* The following function is taken from the kernel and just
1490 * renamed. As it's not exported to modules we must copy-paste it here.
1493 static unsigned long kbase_unmapped_area_topdown(struct vm_unmapped_area_info
1496 struct mm_struct *mm = current->mm;
1497 struct vm_area_struct *vma;
1498 unsigned long length, low_limit, high_limit, gap_start, gap_end;
1500 /* Adjust search length to account for worst case alignment overhead */
1501 length = info->length + info->align_mask;
1502 if (length < info->length)
1506 * Adjust search limits by the desired length.
1507 * See implementation comment at top of unmapped_area().
1509 gap_end = info->high_limit;
1510 if (gap_end < length)
1512 high_limit = gap_end - length;
1514 if (info->low_limit > high_limit)
1516 low_limit = info->low_limit + length;
1518 /* Check highest gap, which does not precede any rbtree node */
1519 gap_start = mm->highest_vm_end;
1520 if (gap_start <= high_limit)
1523 /* Check if rbtree root looks promising */
1524 if (RB_EMPTY_ROOT(&mm->mm_rb))
1526 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
1527 if (vma->rb_subtree_gap < length)
1531 /* Visit right subtree if it looks promising */
1532 gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
1533 if (gap_start <= high_limit && vma->vm_rb.rb_right) {
1534 struct vm_area_struct *right =
1535 rb_entry(vma->vm_rb.rb_right,
1536 struct vm_area_struct, vm_rb);
1537 if (right->rb_subtree_gap >= length) {
1544 /* Check if current node has a suitable gap */
1545 gap_end = vma->vm_start;
1546 if (gap_end < low_limit)
1548 if (gap_start <= high_limit && gap_end - gap_start >= length)
1551 /* Visit left subtree if it looks promising */
1552 if (vma->vm_rb.rb_left) {
1553 struct vm_area_struct *left =
1554 rb_entry(vma->vm_rb.rb_left,
1555 struct vm_area_struct, vm_rb);
1556 if (left->rb_subtree_gap >= length) {
1562 /* Go back up the rbtree to find next candidate node */
1564 struct rb_node *prev = &vma->vm_rb;
1565 if (!rb_parent(prev))
1567 vma = rb_entry(rb_parent(prev),
1568 struct vm_area_struct, vm_rb);
1569 if (prev == vma->vm_rb.rb_right) {
1570 gap_start = vma->vm_prev ?
1571 vma->vm_prev->vm_end : 0;
1578 /* We found a suitable gap. Clip it with the original high_limit. */
1579 if (gap_end > info->high_limit)
1580 gap_end = info->high_limit;
1583 /* Compute highest gap address at the desired alignment */
1584 gap_end -= info->length;
1585 gap_end -= (gap_end - info->align_offset) & info->align_mask;
1587 VM_BUG_ON(gap_end < info->low_limit);
1588 VM_BUG_ON(gap_end < gap_start);
1593 static unsigned long kbase_get_unmapped_area(struct file *filp,
1594 const unsigned long addr, const unsigned long len,
1595 const unsigned long pgoff, const unsigned long flags)
1597 /* based on get_unmapped_area, but simplified slightly due to that some
1598 * values are known in advance */
1599 struct kbase_context *kctx = filp->private_data;
1600 struct mm_struct *mm = current->mm;
1601 struct vm_unmapped_area_info info;
1603 /* err on fixed address */
1604 if ((flags & MAP_FIXED) || addr)
1608 if (len > TASK_SIZE - SZ_2M)
1611 if (kctx->is_compat)
1612 return current->mm->get_unmapped_area(filp, addr, len, pgoff,
1615 if (kbase_hw_has_feature(kctx->kbdev, BASE_HW_FEATURE_33BIT_VA)) {
1616 info.high_limit = 1ul << 33;
1617 info.align_mask = 0;
1618 info.align_offset = 0;
1620 info.high_limit = mm->mmap_base;
1622 info.align_offset = SZ_2M;
1623 info.align_mask = SZ_2M - 1;
1625 info.align_mask = 0;
1626 info.align_offset = 0;
1632 info.low_limit = SZ_2M;
1633 return kbase_unmapped_area_topdown(&info);
1637 static const struct file_operations kbase_fops = {
1638 .owner = THIS_MODULE,
1640 .release = kbase_release,
1643 .unlocked_ioctl = kbase_ioctl,
1644 .compat_ioctl = kbase_ioctl,
1646 .check_flags = kbase_check_flags,
1648 .get_unmapped_area = kbase_get_unmapped_area,
1652 #ifndef CONFIG_MALI_NO_MALI
1653 void kbase_os_reg_write(struct kbase_device *kbdev, u16 offset, u32 value)
1655 writel(value, kbdev->reg + offset);
1658 u32 kbase_os_reg_read(struct kbase_device *kbdev, u16 offset)
1660 return readl(kbdev->reg + offset);
1662 #endif /* !CONFIG_MALI_NO_MALI */
1665 /** Show callback for the @c power_policy sysfs file.
1667 * This function is called to get the contents of the @c power_policy sysfs
1668 * file. This is a list of the available policies with the currently active one
1669 * surrounded by square brackets.
1671 * @param dev The device this sysfs file is for
1672 * @param attr The attributes of the sysfs file
1673 * @param buf The output buffer for the sysfs file contents
1675 * @return The number of bytes output to @c buf.
1677 static ssize_t show_policy(struct device *dev, struct device_attribute *attr, char *const buf)
1679 struct kbase_device *kbdev;
1680 const struct kbase_pm_policy *current_policy;
1681 const struct kbase_pm_policy *const *policy_list;
1686 kbdev = to_kbase_device(dev);
1691 current_policy = kbase_pm_get_policy(kbdev);
1693 policy_count = kbase_pm_list_policies(&policy_list);
1695 for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) {
1696 if (policy_list[i] == current_policy)
1697 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name);
1699 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name);
1702 if (ret < PAGE_SIZE - 1) {
1703 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
1705 buf[PAGE_SIZE - 2] = '\n';
1706 buf[PAGE_SIZE - 1] = '\0';
1707 ret = PAGE_SIZE - 1;
1713 /** Store callback for the @c power_policy sysfs file.
1715 * This function is called when the @c power_policy sysfs file is written to.
1716 * It matches the requested policy against the available policies and if a
1717 * matching policy is found calls @ref kbase_pm_set_policy to change the
1720 * @param dev The device with sysfs file is for
1721 * @param attr The attributes of the sysfs file
1722 * @param buf The value written to the sysfs file
1723 * @param count The number of bytes written to the sysfs file
1725 * @return @c count if the function succeeded. An error code on failure.
1727 static ssize_t set_policy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1729 struct kbase_device *kbdev;
1730 const struct kbase_pm_policy *new_policy = NULL;
1731 const struct kbase_pm_policy *const *policy_list;
1735 kbdev = to_kbase_device(dev);
1740 policy_count = kbase_pm_list_policies(&policy_list);
1742 for (i = 0; i < policy_count; i++) {
1743 if (sysfs_streq(policy_list[i]->name, buf)) {
1744 new_policy = policy_list[i];
1750 dev_err(dev, "power_policy: policy not found\n");
1754 kbase_pm_set_policy(kbdev, new_policy);
1759 /** The sysfs file @c power_policy.
1761 * This is used for obtaining information about the available policies,
1762 * determining which policy is currently active, and changing the active
1765 static DEVICE_ATTR(power_policy, S_IRUGO | S_IWUSR, show_policy, set_policy);
1767 /** Show callback for the @c core_availability_policy sysfs file.
1769 * This function is called to get the contents of the @c core_availability_policy
1770 * sysfs file. This is a list of the available policies with the currently
1771 * active one surrounded by square brackets.
1773 * @param dev The device this sysfs file is for
1774 * @param attr The attributes of the sysfs file
1775 * @param buf The output buffer for the sysfs file contents
1777 * @return The number of bytes output to @c buf.
1779 static ssize_t show_ca_policy(struct device *dev, struct device_attribute *attr, char * const buf)
1781 struct kbase_device *kbdev;
1782 const struct kbase_pm_ca_policy *current_policy;
1783 const struct kbase_pm_ca_policy *const *policy_list;
1788 kbdev = to_kbase_device(dev);
1793 current_policy = kbase_pm_ca_get_policy(kbdev);
1795 policy_count = kbase_pm_ca_list_policies(&policy_list);
1797 for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) {
1798 if (policy_list[i] == current_policy)
1799 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name);
1801 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name);
1804 if (ret < PAGE_SIZE - 1) {
1805 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
1807 buf[PAGE_SIZE - 2] = '\n';
1808 buf[PAGE_SIZE - 1] = '\0';
1809 ret = PAGE_SIZE - 1;
1815 /** Store callback for the @c core_availability_policy sysfs file.
1817 * This function is called when the @c core_availability_policy sysfs file is
1818 * written to. It matches the requested policy against the available policies
1819 * and if a matching policy is found calls @ref kbase_pm_set_policy to change
1822 * @param dev The device with sysfs file is for
1823 * @param attr The attributes of the sysfs file
1824 * @param buf The value written to the sysfs file
1825 * @param count The number of bytes written to the sysfs file
1827 * @return @c count if the function succeeded. An error code on failure.
1829 static ssize_t set_ca_policy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1831 struct kbase_device *kbdev;
1832 const struct kbase_pm_ca_policy *new_policy = NULL;
1833 const struct kbase_pm_ca_policy *const *policy_list;
1837 kbdev = to_kbase_device(dev);
1842 policy_count = kbase_pm_ca_list_policies(&policy_list);
1844 for (i = 0; i < policy_count; i++) {
1845 if (sysfs_streq(policy_list[i]->name, buf)) {
1846 new_policy = policy_list[i];
1852 dev_err(dev, "core_availability_policy: policy not found\n");
1856 kbase_pm_ca_set_policy(kbdev, new_policy);
1861 /** The sysfs file @c core_availability_policy
1863 * This is used for obtaining information about the available policies,
1864 * determining which policy is currently active, and changing the active
1867 static DEVICE_ATTR(core_availability_policy, S_IRUGO | S_IWUSR, show_ca_policy, set_ca_policy);
1869 /** Show callback for the @c core_mask sysfs file.
1871 * This function is called to get the contents of the @c core_mask sysfs
1874 * @param dev The device this sysfs file is for
1875 * @param attr The attributes of the sysfs file
1876 * @param buf The output buffer for the sysfs file contents
1878 * @return The number of bytes output to @c buf.
1880 static ssize_t show_core_mask(struct device *dev, struct device_attribute *attr, char * const buf)
1882 struct kbase_device *kbdev;
1885 kbdev = to_kbase_device(dev);
1890 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1891 "Current core mask (JS0) : 0x%llX\n",
1892 kbdev->pm.debug_core_mask[0]);
1893 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1894 "Current core mask (JS1) : 0x%llX\n",
1895 kbdev->pm.debug_core_mask[1]);
1896 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1897 "Current core mask (JS2) : 0x%llX\n",
1898 kbdev->pm.debug_core_mask[2]);
1899 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1900 "Available core mask : 0x%llX\n",
1901 kbdev->gpu_props.props.raw_props.shader_present);
1906 /** Store callback for the @c core_mask sysfs file.
1908 * This function is called when the @c core_mask sysfs file is written to.
1910 * @param dev The device with sysfs file is for
1911 * @param attr The attributes of the sysfs file
1912 * @param buf The value written to the sysfs file
1913 * @param count The number of bytes written to the sysfs file
1915 * @return @c count if the function succeeded. An error code on failure.
1917 static ssize_t set_core_mask(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1919 struct kbase_device *kbdev;
1920 u64 new_core_mask[3];
1923 kbdev = to_kbase_device(dev);
1928 items = sscanf(buf, "%llx %llx %llx",
1929 &new_core_mask[0], &new_core_mask[1],
1933 new_core_mask[1] = new_core_mask[2] = new_core_mask[0];
1935 if (items == 1 || items == 3) {
1936 u64 shader_present =
1937 kbdev->gpu_props.props.raw_props.shader_present;
1938 u64 group0_core_mask =
1939 kbdev->gpu_props.props.coherency_info.group[0].
1942 if ((new_core_mask[0] & shader_present) != new_core_mask[0] ||
1943 !(new_core_mask[0] & group0_core_mask) ||
1944 (new_core_mask[1] & shader_present) !=
1946 !(new_core_mask[1] & group0_core_mask) ||
1947 (new_core_mask[2] & shader_present) !=
1949 !(new_core_mask[2] & group0_core_mask)) {
1950 dev_err(dev, "power_policy: invalid core specification\n");
1954 if (kbdev->pm.debug_core_mask[0] != new_core_mask[0] ||
1955 kbdev->pm.debug_core_mask[1] !=
1957 kbdev->pm.debug_core_mask[2] !=
1959 unsigned long flags;
1961 spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
1963 kbase_pm_set_debug_core_mask(kbdev, new_core_mask[0],
1964 new_core_mask[1], new_core_mask[2]);
1966 spin_unlock_irqrestore(&kbdev->pm.power_change_lock,
1973 dev_err(kbdev->dev, "Couldn't process set_core_mask write operation.\n"
1974 "Use format <core_mask>\n"
1975 "or <core_mask_js0> <core_mask_js1> <core_mask_js2>\n");
1979 /** The sysfs file @c core_mask.
1981 * This is used to restrict shader core availability for debugging purposes.
1982 * Reading it will show the current core mask and the mask of cores available.
1983 * Writing to it will set the current core mask.
1985 static DEVICE_ATTR(core_mask, S_IRUGO | S_IWUSR, show_core_mask, set_core_mask);
1987 /** Store callback for the @c js_timeouts sysfs file.
1989 * This function is called to get the contents of the @c js_timeouts sysfs
1990 * file. This file contains five values separated by whitespace. The values
1991 * are basically the same as JS_SOFT_STOP_TICKS, JS_HARD_STOP_TICKS_SS,
1992 * JS_HARD_STOP_TICKS_DUMPING, JS_RESET_TICKS_SS, JS_RESET_TICKS_DUMPING
1993 * configuration values (in that order), with the difference that the js_timeout
1994 * values are expressed in MILLISECONDS.
1996 * The js_timeouts sysfile file allows the current values in
1997 * use by the job scheduler to get override. Note that a value needs to
1998 * be other than 0 for it to override the current job scheduler value.
2000 * @param dev The device with sysfs file is for
2001 * @param attr The attributes of the sysfs file
2002 * @param buf The value written to the sysfs file
2003 * @param count The number of bytes written to the sysfs file
2005 * @return @c count if the function succeeded. An error code on failure.
2007 static ssize_t set_js_timeouts(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2009 struct kbase_device *kbdev;
2011 long js_soft_stop_ms;
2012 long js_soft_stop_ms_cl;
2013 long js_hard_stop_ms_ss;
2014 long js_hard_stop_ms_cl;
2015 long js_hard_stop_ms_dumping;
2016 long js_reset_ms_ss;
2017 long js_reset_ms_cl;
2018 long js_reset_ms_dumping;
2020 kbdev = to_kbase_device(dev);
2024 items = sscanf(buf, "%ld %ld %ld %ld %ld %ld %ld %ld",
2025 &js_soft_stop_ms, &js_soft_stop_ms_cl,
2026 &js_hard_stop_ms_ss, &js_hard_stop_ms_cl,
2027 &js_hard_stop_ms_dumping, &js_reset_ms_ss,
2028 &js_reset_ms_cl, &js_reset_ms_dumping);
2033 if (js_soft_stop_ms >= 0) {
2034 ticks = js_soft_stop_ms * 1000000ULL;
2035 do_div(ticks, kbdev->js_data.scheduling_period_ns);
2036 kbdev->js_soft_stop_ticks = ticks;
2038 kbdev->js_soft_stop_ticks = -1;
2041 if (js_soft_stop_ms_cl >= 0) {
2042 ticks = js_soft_stop_ms_cl * 1000000ULL;
2043 do_div(ticks, kbdev->js_data.scheduling_period_ns);
2044 kbdev->js_soft_stop_ticks_cl = ticks;
2046 kbdev->js_soft_stop_ticks_cl = -1;
2049 if (js_hard_stop_ms_ss >= 0) {
2050 ticks = js_hard_stop_ms_ss * 1000000ULL;
2051 do_div(ticks, kbdev->js_data.scheduling_period_ns);
2052 kbdev->js_hard_stop_ticks_ss = ticks;
2054 kbdev->js_hard_stop_ticks_ss = -1;
2057 if (js_hard_stop_ms_cl >= 0) {
2058 ticks = js_hard_stop_ms_cl * 1000000ULL;
2059 do_div(ticks, kbdev->js_data.scheduling_period_ns);
2060 kbdev->js_hard_stop_ticks_cl = ticks;
2062 kbdev->js_hard_stop_ticks_cl = -1;
2065 if (js_hard_stop_ms_dumping >= 0) {
2066 ticks = js_hard_stop_ms_dumping * 1000000ULL;
2067 do_div(ticks, kbdev->js_data.scheduling_period_ns);
2068 kbdev->js_hard_stop_ticks_dumping = ticks;
2070 kbdev->js_hard_stop_ticks_dumping = -1;
2073 if (js_reset_ms_ss >= 0) {
2074 ticks = js_reset_ms_ss * 1000000ULL;
2075 do_div(ticks, kbdev->js_data.scheduling_period_ns);
2076 kbdev->js_reset_ticks_ss = ticks;
2078 kbdev->js_reset_ticks_ss = -1;
2081 if (js_reset_ms_cl >= 0) {
2082 ticks = js_reset_ms_cl * 1000000ULL;
2083 do_div(ticks, kbdev->js_data.scheduling_period_ns);
2084 kbdev->js_reset_ticks_cl = ticks;
2086 kbdev->js_reset_ticks_cl = -1;
2089 if (js_reset_ms_dumping >= 0) {
2090 ticks = js_reset_ms_dumping * 1000000ULL;
2091 do_div(ticks, kbdev->js_data.scheduling_period_ns);
2092 kbdev->js_reset_ticks_dumping = ticks;
2094 kbdev->js_reset_ticks_dumping = -1;
2097 kbdev->js_timeouts_updated = true;
2099 dev_dbg(kbdev->dev, "Overriding JS_SOFT_STOP_TICKS with %lu ticks (%lu ms)\n",
2100 (unsigned long)kbdev->js_soft_stop_ticks,
2102 dev_dbg(kbdev->dev, "Overriding JS_SOFT_STOP_TICKS_CL with %lu ticks (%lu ms)\n",
2103 (unsigned long)kbdev->js_soft_stop_ticks_cl,
2104 js_soft_stop_ms_cl);
2105 dev_dbg(kbdev->dev, "Overriding JS_HARD_STOP_TICKS_SS with %lu ticks (%lu ms)\n",
2106 (unsigned long)kbdev->js_hard_stop_ticks_ss,
2107 js_hard_stop_ms_ss);
2108 dev_dbg(kbdev->dev, "Overriding JS_HARD_STOP_TICKS_CL with %lu ticks (%lu ms)\n",
2109 (unsigned long)kbdev->js_hard_stop_ticks_cl,
2110 js_hard_stop_ms_cl);
2111 dev_dbg(kbdev->dev, "Overriding JS_HARD_STOP_TICKS_DUMPING with %lu ticks (%lu ms)\n",
2113 kbdev->js_hard_stop_ticks_dumping,
2114 js_hard_stop_ms_dumping);
2115 dev_dbg(kbdev->dev, "Overriding JS_RESET_TICKS_SS with %lu ticks (%lu ms)\n",
2116 (unsigned long)kbdev->js_reset_ticks_ss,
2118 dev_dbg(kbdev->dev, "Overriding JS_RESET_TICKS_CL with %lu ticks (%lu ms)\n",
2119 (unsigned long)kbdev->js_reset_ticks_cl,
2121 dev_dbg(kbdev->dev, "Overriding JS_RESET_TICKS_DUMPING with %lu ticks (%lu ms)\n",
2122 (unsigned long)kbdev->js_reset_ticks_dumping,
2123 js_reset_ms_dumping);
2128 dev_err(kbdev->dev, "Couldn't process js_timeouts write operation.\n"
2129 "Use format <soft_stop_ms> <soft_stop_ms_cl> <hard_stop_ms_ss> <hard_stop_ms_cl> <hard_stop_ms_dumping> <reset_ms_ss> <reset_ms_cl> <reset_ms_dumping>\n"
2130 "Write 0 for no change, -1 to restore default timeout\n");
2134 /** Show callback for the @c js_timeouts sysfs file.
2136 * This function is called to get the contents of the @c js_timeouts sysfs
2137 * file. It returns the last set values written to the js_timeouts sysfs file.
2138 * If the file didn't get written yet, the values will be current setting in
2140 * @param dev The device this sysfs file is for
2141 * @param attr The attributes of the sysfs file
2142 * @param buf The output buffer for the sysfs file contents
2144 * @return The number of bytes output to @c buf.
2146 static ssize_t show_js_timeouts(struct device *dev, struct device_attribute *attr, char * const buf)
2148 struct kbase_device *kbdev;
2151 unsigned long js_soft_stop_ms;
2152 unsigned long js_soft_stop_ms_cl;
2153 unsigned long js_hard_stop_ms_ss;
2154 unsigned long js_hard_stop_ms_cl;
2155 unsigned long js_hard_stop_ms_dumping;
2156 unsigned long js_reset_ms_ss;
2157 unsigned long js_reset_ms_cl;
2158 unsigned long js_reset_ms_dumping;
2159 unsigned long ticks;
2160 u32 scheduling_period_ns;
2162 kbdev = to_kbase_device(dev);
2166 /* If no contexts have been scheduled since js_timeouts was last written
2167 * to, the new timeouts might not have been latched yet. So check if an
2168 * update is pending and use the new values if necessary. */
2169 if (kbdev->js_timeouts_updated && kbdev->js_scheduling_period_ns > 0)
2170 scheduling_period_ns = kbdev->js_scheduling_period_ns;
2172 scheduling_period_ns = kbdev->js_data.scheduling_period_ns;
2174 if (kbdev->js_timeouts_updated && kbdev->js_soft_stop_ticks > 0)
2175 ticks = kbdev->js_soft_stop_ticks;
2177 ticks = kbdev->js_data.soft_stop_ticks;
2178 ms = (u64)ticks * scheduling_period_ns;
2179 do_div(ms, 1000000UL);
2180 js_soft_stop_ms = (unsigned long)ms;
2182 if (kbdev->js_timeouts_updated && kbdev->js_soft_stop_ticks_cl > 0)
2183 ticks = kbdev->js_soft_stop_ticks_cl;
2185 ticks = kbdev->js_data.soft_stop_ticks_cl;
2186 ms = (u64)ticks * scheduling_period_ns;
2187 do_div(ms, 1000000UL);
2188 js_soft_stop_ms_cl = (unsigned long)ms;
2190 if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_ss > 0)
2191 ticks = kbdev->js_hard_stop_ticks_ss;
2193 ticks = kbdev->js_data.hard_stop_ticks_ss;
2194 ms = (u64)ticks * scheduling_period_ns;
2195 do_div(ms, 1000000UL);
2196 js_hard_stop_ms_ss = (unsigned long)ms;
2198 if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_cl > 0)
2199 ticks = kbdev->js_hard_stop_ticks_cl;
2201 ticks = kbdev->js_data.hard_stop_ticks_cl;
2202 ms = (u64)ticks * scheduling_period_ns;
2203 do_div(ms, 1000000UL);
2204 js_hard_stop_ms_cl = (unsigned long)ms;
2206 if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_dumping > 0)
2207 ticks = kbdev->js_hard_stop_ticks_dumping;
2209 ticks = kbdev->js_data.hard_stop_ticks_dumping;
2210 ms = (u64)ticks * scheduling_period_ns;
2211 do_div(ms, 1000000UL);
2212 js_hard_stop_ms_dumping = (unsigned long)ms;
2214 if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_ss > 0)
2215 ticks = kbdev->js_reset_ticks_ss;
2217 ticks = kbdev->js_data.gpu_reset_ticks_ss;
2218 ms = (u64)ticks * scheduling_period_ns;
2219 do_div(ms, 1000000UL);
2220 js_reset_ms_ss = (unsigned long)ms;
2222 if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_cl > 0)
2223 ticks = kbdev->js_reset_ticks_cl;
2225 ticks = kbdev->js_data.gpu_reset_ticks_cl;
2226 ms = (u64)ticks * scheduling_period_ns;
2227 do_div(ms, 1000000UL);
2228 js_reset_ms_cl = (unsigned long)ms;
2230 if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_dumping > 0)
2231 ticks = kbdev->js_reset_ticks_dumping;
2233 ticks = kbdev->js_data.gpu_reset_ticks_dumping;
2234 ms = (u64)ticks * scheduling_period_ns;
2235 do_div(ms, 1000000UL);
2236 js_reset_ms_dumping = (unsigned long)ms;
2238 ret = scnprintf(buf, PAGE_SIZE, "%lu %lu %lu %lu %lu %lu %lu %lu\n",
2239 js_soft_stop_ms, js_soft_stop_ms_cl,
2240 js_hard_stop_ms_ss, js_hard_stop_ms_cl,
2241 js_hard_stop_ms_dumping, js_reset_ms_ss,
2242 js_reset_ms_cl, js_reset_ms_dumping);
2244 if (ret >= PAGE_SIZE) {
2245 buf[PAGE_SIZE - 2] = '\n';
2246 buf[PAGE_SIZE - 1] = '\0';
2247 ret = PAGE_SIZE - 1;
2253 /** The sysfs file @c js_timeouts.
2255 * This is used to override the current job scheduler values for
2256 * JS_STOP_STOP_TICKS_SS
2257 * JS_STOP_STOP_TICKS_CL
2258 * JS_HARD_STOP_TICKS_SS
2259 * JS_HARD_STOP_TICKS_CL
2260 * JS_HARD_STOP_TICKS_DUMPING
2263 * JS_RESET_TICKS_DUMPING.
2265 static DEVICE_ATTR(js_timeouts, S_IRUGO | S_IWUSR, show_js_timeouts, set_js_timeouts);
2268 * set_js_scheduling_period - Store callback for the js_scheduling_period sysfs
2270 * @dev: The device the sysfs file is for
2271 * @attr: The attributes of the sysfs file
2272 * @buf: The value written to the sysfs file
2273 * @count: The number of bytes written to the sysfs file
2275 * This function is called when the js_scheduling_period sysfs file is written
2276 * to. It checks the data written, and if valid updates the js_scheduling_period
2279 * Return: @c count if the function succeeded. An error code on failure.
2281 static ssize_t set_js_scheduling_period(struct device *dev,
2282 struct device_attribute *attr, const char *buf, size_t count)
2284 struct kbase_device *kbdev;
2286 unsigned int js_scheduling_period;
2287 u32 new_scheduling_period_ns;
2291 kbdev = to_kbase_device(dev);
2295 ret = kstrtouint(buf, 0, &js_scheduling_period);
2296 if (ret || !js_scheduling_period) {
2297 dev_err(kbdev->dev, "Couldn't process js_scheduling_period write operation.\n"
2298 "Use format <js_scheduling_period_ms>\n");
2302 new_scheduling_period_ns = js_scheduling_period * 1000000;
2304 /* Update scheduling timeouts */
2305 mutex_lock(&kbdev->js_data.runpool_mutex);
2307 /* If no contexts have been scheduled since js_timeouts was last written
2308 * to, the new timeouts might not have been latched yet. So check if an
2309 * update is pending and use the new values if necessary. */
2311 /* Use previous 'new' scheduling period as a base if present. */
2312 if (kbdev->js_timeouts_updated && kbdev->js_scheduling_period_ns)
2313 old_period = kbdev->js_scheduling_period_ns;
2315 old_period = kbdev->js_data.scheduling_period_ns;
2317 if (kbdev->js_timeouts_updated && kbdev->js_soft_stop_ticks > 0)
2318 ticks = (u64)kbdev->js_soft_stop_ticks * old_period;
2320 ticks = (u64)kbdev->js_data.soft_stop_ticks *
2321 kbdev->js_data.scheduling_period_ns;
2322 do_div(ticks, new_scheduling_period_ns);
2323 kbdev->js_soft_stop_ticks = ticks ? ticks : 1;
2325 if (kbdev->js_timeouts_updated && kbdev->js_soft_stop_ticks_cl > 0)
2326 ticks = (u64)kbdev->js_soft_stop_ticks_cl * old_period;
2328 ticks = (u64)kbdev->js_data.soft_stop_ticks_cl *
2329 kbdev->js_data.scheduling_period_ns;
2330 do_div(ticks, new_scheduling_period_ns);
2331 kbdev->js_soft_stop_ticks_cl = ticks ? ticks : 1;
2333 if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_ss > 0)
2334 ticks = (u64)kbdev->js_hard_stop_ticks_ss * old_period;
2336 ticks = (u64)kbdev->js_data.hard_stop_ticks_ss *
2337 kbdev->js_data.scheduling_period_ns;
2338 do_div(ticks, new_scheduling_period_ns);
2339 kbdev->js_hard_stop_ticks_ss = ticks ? ticks : 1;
2341 if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_cl > 0)
2342 ticks = (u64)kbdev->js_hard_stop_ticks_cl * old_period;
2344 ticks = (u64)kbdev->js_data.hard_stop_ticks_cl *
2345 kbdev->js_data.scheduling_period_ns;
2346 do_div(ticks, new_scheduling_period_ns);
2347 kbdev->js_hard_stop_ticks_cl = ticks ? ticks : 1;
2349 if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_dumping > 0)
2350 ticks = (u64)kbdev->js_hard_stop_ticks_dumping * old_period;
2352 ticks = (u64)kbdev->js_data.hard_stop_ticks_dumping *
2353 kbdev->js_data.scheduling_period_ns;
2354 do_div(ticks, new_scheduling_period_ns);
2355 kbdev->js_hard_stop_ticks_dumping = ticks ? ticks : 1;
2357 if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_ss > 0)
2358 ticks = (u64)kbdev->js_reset_ticks_ss * old_period;
2360 ticks = (u64)kbdev->js_data.gpu_reset_ticks_ss *
2361 kbdev->js_data.scheduling_period_ns;
2362 do_div(ticks, new_scheduling_period_ns);
2363 kbdev->js_reset_ticks_ss = ticks ? ticks : 1;
2365 if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_cl > 0)
2366 ticks = (u64)kbdev->js_reset_ticks_cl * old_period;
2368 ticks = (u64)kbdev->js_data.gpu_reset_ticks_cl *
2369 kbdev->js_data.scheduling_period_ns;
2370 do_div(ticks, new_scheduling_period_ns);
2371 kbdev->js_reset_ticks_cl = ticks ? ticks : 1;
2373 if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_dumping > 0)
2374 ticks = (u64)kbdev->js_reset_ticks_dumping * old_period;
2376 ticks = (u64)kbdev->js_data.gpu_reset_ticks_dumping *
2377 kbdev->js_data.scheduling_period_ns;
2378 do_div(ticks, new_scheduling_period_ns);
2379 kbdev->js_reset_ticks_dumping = ticks ? ticks : 1;
2381 kbdev->js_scheduling_period_ns = new_scheduling_period_ns;
2382 kbdev->js_timeouts_updated = true;
2384 mutex_unlock(&kbdev->js_data.runpool_mutex);
2386 dev_dbg(kbdev->dev, "JS scheduling period: %dms\n",
2387 js_scheduling_period);
2393 * show_js_scheduling_period - Show callback for the js_scheduling_period sysfs
2395 * @dev: The device this sysfs file is for.
2396 * @attr: The attributes of the sysfs file.
2397 * @buf: The output buffer to receive the GPU information.
2399 * This function is called to get the current period used for the JS scheduling
2402 * Return: The number of bytes output to buf.
2404 static ssize_t show_js_scheduling_period(struct device *dev,
2405 struct device_attribute *attr, char * const buf)
2407 struct kbase_device *kbdev;
2411 kbdev = to_kbase_device(dev);
2415 if (kbdev->js_timeouts_updated && kbdev->js_scheduling_period_ns > 0)
2416 period = kbdev->js_scheduling_period_ns;
2418 period = kbdev->js_data.scheduling_period_ns;
2420 ret = scnprintf(buf, PAGE_SIZE, "%d\n",
2426 static DEVICE_ATTR(js_scheduling_period, S_IRUGO | S_IWUSR,
2427 show_js_scheduling_period, set_js_scheduling_period);
2429 #if !MALI_CUSTOMER_RELEASE
2430 /** Store callback for the @c force_replay sysfs file.
2432 * @param dev The device with sysfs file is for
2433 * @param attr The attributes of the sysfs file
2434 * @param buf The value written to the sysfs file
2435 * @param count The number of bytes written to the sysfs file
2437 * @return @c count if the function succeeded. An error code on failure.
2439 static ssize_t set_force_replay(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2441 struct kbase_device *kbdev;
2443 kbdev = to_kbase_device(dev);
2447 if (!strncmp("limit=", buf, MIN(6, count))) {
2448 int force_replay_limit;
2449 int items = sscanf(buf, "limit=%u", &force_replay_limit);
2452 kbdev->force_replay_random = false;
2453 kbdev->force_replay_limit = force_replay_limit;
2454 kbdev->force_replay_count = 0;
2458 } else if (!strncmp("random_limit", buf, MIN(12, count))) {
2459 kbdev->force_replay_random = true;
2460 kbdev->force_replay_count = 0;
2463 } else if (!strncmp("norandom_limit", buf, MIN(14, count))) {
2464 kbdev->force_replay_random = false;
2465 kbdev->force_replay_limit = KBASEP_FORCE_REPLAY_DISABLED;
2466 kbdev->force_replay_count = 0;
2469 } else if (!strncmp("core_req=", buf, MIN(9, count))) {
2470 unsigned int core_req;
2471 int items = sscanf(buf, "core_req=%x", &core_req);
2474 kbdev->force_replay_core_req = (base_jd_core_req)core_req;
2479 dev_err(kbdev->dev, "Couldn't process force_replay write operation.\nPossible settings: limit=<limit>, random_limit, norandom_limit, core_req=<core_req>\n");
2483 /** Show callback for the @c force_replay sysfs file.
2485 * This function is called to get the contents of the @c force_replay sysfs
2486 * file. It returns the last set value written to the force_replay sysfs file.
2487 * If the file didn't get written yet, the values will be 0.
2489 * @param dev The device this sysfs file is for
2490 * @param attr The attributes of the sysfs file
2491 * @param buf The output buffer for the sysfs file contents
2493 * @return The number of bytes output to @c buf.
2495 static ssize_t show_force_replay(struct device *dev,
2496 struct device_attribute *attr, char * const buf)
2498 struct kbase_device *kbdev;
2501 kbdev = to_kbase_device(dev);
2505 if (kbdev->force_replay_random)
2506 ret = scnprintf(buf, PAGE_SIZE,
2507 "limit=0\nrandom_limit\ncore_req=%x\n",
2508 kbdev->force_replay_core_req);
2510 ret = scnprintf(buf, PAGE_SIZE,
2511 "limit=%u\nnorandom_limit\ncore_req=%x\n",
2512 kbdev->force_replay_limit,
2513 kbdev->force_replay_core_req);
2515 if (ret >= PAGE_SIZE) {
2516 buf[PAGE_SIZE - 2] = '\n';
2517 buf[PAGE_SIZE - 1] = '\0';
2518 ret = PAGE_SIZE - 1;
2524 /** The sysfs file @c force_replay.
2527 static DEVICE_ATTR(force_replay, S_IRUGO | S_IWUSR, show_force_replay,
2529 #endif /* !MALI_CUSTOMER_RELEASE */
2531 #ifdef CONFIG_MALI_DEBUG
2532 static ssize_t set_js_softstop_always(struct device *dev,
2533 struct device_attribute *attr, const char *buf, size_t count)
2535 struct kbase_device *kbdev;
2537 int softstop_always;
2539 kbdev = to_kbase_device(dev);
2543 ret = kstrtoint(buf, 0, &softstop_always);
2544 if (ret || ((softstop_always != 0) && (softstop_always != 1))) {
2545 dev_err(kbdev->dev, "Couldn't process js_softstop_always write operation.\n"
2546 "Use format <soft_stop_always>\n");
2550 kbdev->js_data.softstop_always = (bool) softstop_always;
2551 dev_dbg(kbdev->dev, "Support for softstop on a single context: %s\n",
2552 (kbdev->js_data.softstop_always) ?
2553 "Enabled" : "Disabled");
2557 static ssize_t show_js_softstop_always(struct device *dev,
2558 struct device_attribute *attr, char * const buf)
2560 struct kbase_device *kbdev;
2563 kbdev = to_kbase_device(dev);
2567 ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->js_data.softstop_always);
2569 if (ret >= PAGE_SIZE) {
2570 buf[PAGE_SIZE - 2] = '\n';
2571 buf[PAGE_SIZE - 1] = '\0';
2572 ret = PAGE_SIZE - 1;
2579 * By default, soft-stops are disabled when only a single context is present. The ability to
2580 * enable soft-stop when only a single context is present can be used for debug and unit-testing purposes.
2581 * (see CL t6xx_stress_1 unit-test as an example whereby this feature is used.)
2583 static DEVICE_ATTR(js_softstop_always, S_IRUGO | S_IWUSR, show_js_softstop_always, set_js_softstop_always);
2584 #endif /* CONFIG_MALI_DEBUG */
2586 #ifdef CONFIG_MALI_DEBUG
2587 typedef void (kbasep_debug_command_func) (struct kbase_device *);
2589 enum kbasep_debug_command_code {
2590 KBASEP_DEBUG_COMMAND_DUMPTRACE,
2592 /* This must be the last enum */
2593 KBASEP_DEBUG_COMMAND_COUNT
2596 struct kbasep_debug_command {
2598 kbasep_debug_command_func *func;
2601 /** Debug commands supported by the driver */
2602 static const struct kbasep_debug_command debug_commands[] = {
2605 .func = &kbasep_trace_dump,
2609 /** Show callback for the @c debug_command sysfs file.
2611 * This function is called to get the contents of the @c debug_command sysfs
2612 * file. This is a list of the available debug commands, separated by newlines.
2614 * @param dev The device this sysfs file is for
2615 * @param attr The attributes of the sysfs file
2616 * @param buf The output buffer for the sysfs file contents
2618 * @return The number of bytes output to @c buf.
2620 static ssize_t show_debug(struct device *dev, struct device_attribute *attr, char * const buf)
2622 struct kbase_device *kbdev;
2626 kbdev = to_kbase_device(dev);
2631 for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT && ret < PAGE_SIZE; i++)
2632 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s\n", debug_commands[i].str);
2634 if (ret >= PAGE_SIZE) {
2635 buf[PAGE_SIZE - 2] = '\n';
2636 buf[PAGE_SIZE - 1] = '\0';
2637 ret = PAGE_SIZE - 1;
2643 /** Store callback for the @c debug_command sysfs file.
2645 * This function is called when the @c debug_command sysfs file is written to.
2646 * It matches the requested command against the available commands, and if
2647 * a matching command is found calls the associated function from
2648 * @ref debug_commands to issue the command.
2650 * @param dev The device with sysfs file is for
2651 * @param attr The attributes of the sysfs file
2652 * @param buf The value written to the sysfs file
2653 * @param count The number of bytes written to the sysfs file
2655 * @return @c count if the function succeeded. An error code on failure.
2657 static ssize_t issue_debug(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2659 struct kbase_device *kbdev;
2662 kbdev = to_kbase_device(dev);
2667 for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT; i++) {
2668 if (sysfs_streq(debug_commands[i].str, buf)) {
2669 debug_commands[i].func(kbdev);
2674 /* Debug Command not found */
2675 dev_err(dev, "debug_command: command not known\n");
2679 /** The sysfs file @c debug_command.
2681 * This is used to issue general debug commands to the device driver.
2682 * Reading it will produce a list of debug commands, separated by newlines.
2683 * Writing to it with one of those commands will issue said command.
2685 static DEVICE_ATTR(debug_command, S_IRUGO | S_IWUSR, show_debug, issue_debug);
2686 #endif /* CONFIG_MALI_DEBUG */
2689 * kbase_show_gpuinfo - Show callback for the gpuinfo sysfs entry.
2690 * @dev: The device this sysfs file is for.
2691 * @attr: The attributes of the sysfs file.
2692 * @buf: The output buffer to receive the GPU information.
2694 * This function is called to get a description of the present Mali
2695 * GPU via the gpuinfo sysfs entry. This includes the GPU family, the
2696 * number of cores, the hardware version and the raw product id. For
2699 * Mali-T60x MP4 r0p0 0x6956
2701 * Return: The number of bytes output to buf.
2703 static ssize_t kbase_show_gpuinfo(struct device *dev,
2704 struct device_attribute *attr, char *buf)
2706 static const struct gpu_product_id_name {
2709 } gpu_product_id_names[] = {
2710 { .id = GPU_ID_PI_T60X, .name = "Mali-T60x" },
2711 { .id = GPU_ID_PI_T62X, .name = "Mali-T62x" },
2712 { .id = GPU_ID_PI_T72X, .name = "Mali-T72x" },
2713 { .id = GPU_ID_PI_T76X, .name = "Mali-T76x" },
2714 { .id = GPU_ID_PI_T82X, .name = "Mali-T82x" },
2715 { .id = GPU_ID_PI_T83X, .name = "Mali-T83x" },
2716 { .id = GPU_ID_PI_T86X, .name = "Mali-T86x" },
2717 { .id = GPU_ID_PI_TFRX, .name = "Mali-T88x" },
2719 const char *product_name = "(Unknown Mali GPU)";
2720 struct kbase_device *kbdev;
2722 unsigned product_id, product_id_mask;
2726 kbdev = to_kbase_device(dev);
2730 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
2731 product_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
2732 is_new_format = GPU_ID_IS_NEW_FORMAT(product_id);
2735 GPU_ID2_PRODUCT_MODEL :
2736 GPU_ID_VERSION_PRODUCT_ID) >>
2737 GPU_ID_VERSION_PRODUCT_ID_SHIFT;
2739 for (i = 0; i < ARRAY_SIZE(gpu_product_id_names); ++i) {
2740 const struct gpu_product_id_name *p = &gpu_product_id_names[i];
2742 if ((GPU_ID_IS_NEW_FORMAT(p->id) == is_new_format) &&
2743 (p->id & product_id_mask) ==
2744 (product_id & product_id_mask)) {
2745 product_name = p->name;
2750 return scnprintf(buf, PAGE_SIZE, "%s MP%d r%dp%d 0x%04X\n",
2751 product_name, kbdev->gpu_props.num_cores,
2752 (gpu_id & GPU_ID_VERSION_MAJOR) >> GPU_ID_VERSION_MAJOR_SHIFT,
2753 (gpu_id & GPU_ID_VERSION_MINOR) >> GPU_ID_VERSION_MINOR_SHIFT,
2756 static DEVICE_ATTR(gpuinfo, S_IRUGO, kbase_show_gpuinfo, NULL);
2759 * set_dvfs_period - Store callback for the dvfs_period sysfs file.
2760 * @dev: The device with sysfs file is for
2761 * @attr: The attributes of the sysfs file
2762 * @buf: The value written to the sysfs file
2763 * @count: The number of bytes written to the sysfs file
2765 * This function is called when the dvfs_period sysfs file is written to. It
2766 * checks the data written, and if valid updates the DVFS period variable,
2768 * Return: @c count if the function succeeded. An error code on failure.
2770 static ssize_t set_dvfs_period(struct device *dev,
2771 struct device_attribute *attr, const char *buf, size_t count)
2773 struct kbase_device *kbdev;
2777 kbdev = to_kbase_device(dev);
2781 ret = kstrtoint(buf, 0, &dvfs_period);
2782 if (ret || dvfs_period <= 0) {
2783 dev_err(kbdev->dev, "Couldn't process dvfs_period write operation.\n"
2784 "Use format <dvfs_period_ms>\n");
2788 kbdev->pm.dvfs_period = dvfs_period;
2789 dev_dbg(kbdev->dev, "DVFS period: %dms\n", dvfs_period);
2795 * show_dvfs_period - Show callback for the dvfs_period sysfs entry.
2796 * @dev: The device this sysfs file is for.
2797 * @attr: The attributes of the sysfs file.
2798 * @buf: The output buffer to receive the GPU information.
2800 * This function is called to get the current period used for the DVFS sample
2803 * Return: The number of bytes output to buf.
2805 static ssize_t show_dvfs_period(struct device *dev,
2806 struct device_attribute *attr, char * const buf)
2808 struct kbase_device *kbdev;
2811 kbdev = to_kbase_device(dev);
2815 ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->pm.dvfs_period);
2820 static DEVICE_ATTR(dvfs_period, S_IRUGO | S_IWUSR, show_dvfs_period,
2824 * set_pm_poweroff - Store callback for the pm_poweroff sysfs file.
2825 * @dev: The device with sysfs file is for
2826 * @attr: The attributes of the sysfs file
2827 * @buf: The value written to the sysfs file
2828 * @count: The number of bytes written to the sysfs file
2830 * This function is called when the pm_poweroff sysfs file is written to.
2832 * This file contains three values separated by whitespace. The values
2833 * are gpu_poweroff_time (the period of the poweroff timer, in ns),
2834 * poweroff_shader_ticks (the number of poweroff timer ticks before an idle
2835 * shader is powered off), and poweroff_gpu_ticks (the number of poweroff timer
2836 * ticks before the GPU is powered off), in that order.
2838 * Return: @c count if the function succeeded. An error code on failure.
2840 static ssize_t set_pm_poweroff(struct device *dev,
2841 struct device_attribute *attr, const char *buf, size_t count)
2843 struct kbase_device *kbdev;
2845 s64 gpu_poweroff_time;
2846 int poweroff_shader_ticks, poweroff_gpu_ticks;
2848 kbdev = to_kbase_device(dev);
2852 items = sscanf(buf, "%llu %u %u", &gpu_poweroff_time,
2853 &poweroff_shader_ticks,
2854 &poweroff_gpu_ticks);
2856 dev_err(kbdev->dev, "Couldn't process pm_poweroff write operation.\n"
2857 "Use format <gpu_poweroff_time_ns> <poweroff_shader_ticks> <poweroff_gpu_ticks>\n");
2861 kbdev->pm.gpu_poweroff_time = HR_TIMER_DELAY_NSEC(gpu_poweroff_time);
2862 kbdev->pm.poweroff_shader_ticks = poweroff_shader_ticks;
2863 kbdev->pm.poweroff_gpu_ticks = poweroff_gpu_ticks;
2869 * show_pm_poweroff - Show callback for the pm_poweroff sysfs entry.
2870 * @dev: The device this sysfs file is for.
2871 * @attr: The attributes of the sysfs file.
2872 * @buf: The output buffer to receive the GPU information.
2874 * This function is called to get the current period used for the DVFS sample
2877 * Return: The number of bytes output to buf.
2879 static ssize_t show_pm_poweroff(struct device *dev,
2880 struct device_attribute *attr, char * const buf)
2882 struct kbase_device *kbdev;
2885 kbdev = to_kbase_device(dev);
2889 ret = scnprintf(buf, PAGE_SIZE, "%llu %u %u\n",
2890 ktime_to_ns(kbdev->pm.gpu_poweroff_time),
2891 kbdev->pm.poweroff_shader_ticks,
2892 kbdev->pm.poweroff_gpu_ticks);
2897 static DEVICE_ATTR(pm_poweroff, S_IRUGO | S_IWUSR, show_pm_poweroff,
2901 * set_reset_timeout - Store callback for the reset_timeout sysfs file.
2902 * @dev: The device with sysfs file is for
2903 * @attr: The attributes of the sysfs file
2904 * @buf: The value written to the sysfs file
2905 * @count: The number of bytes written to the sysfs file
2907 * This function is called when the reset_timeout sysfs file is written to. It
2908 * checks the data written, and if valid updates the reset timeout.
2910 * Return: @c count if the function succeeded. An error code on failure.
2912 static ssize_t set_reset_timeout(struct device *dev,
2913 struct device_attribute *attr, const char *buf, size_t count)
2915 struct kbase_device *kbdev;
2919 kbdev = to_kbase_device(dev);
2923 ret = kstrtoint(buf, 0, &reset_timeout);
2924 if (ret || reset_timeout <= 0) {
2925 dev_err(kbdev->dev, "Couldn't process reset_timeout write operation.\n"
2926 "Use format <reset_timeout_ms>\n");
2930 kbdev->reset_timeout_ms = reset_timeout;
2931 dev_dbg(kbdev->dev, "Reset timeout: %dms\n", reset_timeout);
2937 * show_reset_timeout - Show callback for the reset_timeout sysfs entry.
2938 * @dev: The device this sysfs file is for.
2939 * @attr: The attributes of the sysfs file.
2940 * @buf: The output buffer to receive the GPU information.
2942 * This function is called to get the current reset timeout.
2944 * Return: The number of bytes output to buf.
2946 static ssize_t show_reset_timeout(struct device *dev,
2947 struct device_attribute *attr, char * const buf)
2949 struct kbase_device *kbdev;
2952 kbdev = to_kbase_device(dev);
2956 ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->reset_timeout_ms);
2961 static DEVICE_ATTR(reset_timeout, S_IRUGO | S_IWUSR, show_reset_timeout,
2966 static ssize_t show_mem_pool_size(struct device *dev,
2967 struct device_attribute *attr, char * const buf)
2969 struct kbase_device *kbdev;
2972 kbdev = to_kbase_device(dev);
2976 ret = scnprintf(buf, PAGE_SIZE, "%zu\n",
2977 kbase_mem_pool_size(&kbdev->mem_pool));
2982 static ssize_t set_mem_pool_size(struct device *dev,
2983 struct device_attribute *attr, const char *buf, size_t count)
2985 struct kbase_device *kbdev;
2989 kbdev = to_kbase_device(dev);
2993 err = kstrtoul(buf, 0, (unsigned long *)&new_size);
2997 kbase_mem_pool_trim(&kbdev->mem_pool, new_size);
3002 static DEVICE_ATTR(mem_pool_size, S_IRUGO | S_IWUSR, show_mem_pool_size,
3005 static ssize_t show_mem_pool_max_size(struct device *dev,
3006 struct device_attribute *attr, char * const buf)
3008 struct kbase_device *kbdev;
3011 kbdev = to_kbase_device(dev);
3015 ret = scnprintf(buf, PAGE_SIZE, "%zu\n",
3016 kbase_mem_pool_max_size(&kbdev->mem_pool));
3021 static ssize_t set_mem_pool_max_size(struct device *dev,
3022 struct device_attribute *attr, const char *buf, size_t count)
3024 struct kbase_device *kbdev;
3025 size_t new_max_size;
3028 kbdev = to_kbase_device(dev);
3032 err = kstrtoul(buf, 0, (unsigned long *)&new_max_size);
3036 kbase_mem_pool_set_max_size(&kbdev->mem_pool, new_max_size);
3041 static DEVICE_ATTR(mem_pool_max_size, S_IRUGO | S_IWUSR, show_mem_pool_max_size,
3042 set_mem_pool_max_size);
3046 static int kbasep_secure_mode_init(struct kbase_device *kbdev)
3049 #ifdef SECURE_CALLBACKS
3050 kbdev->secure_ops = SECURE_CALLBACKS;
3051 kbdev->secure_mode_support = false;
3053 if (kbdev->secure_ops) {
3056 /* Make sure secure mode is disabled on startup */
3057 err = kbdev->secure_ops->secure_mode_disable(kbdev);
3059 /* secure_mode_disable() returns -EINVAL if not supported */
3060 kbdev->secure_mode_support = (err != -EINVAL);
3067 #ifdef CONFIG_MALI_NO_MALI
3068 static int kbase_common_reg_map(struct kbase_device *kbdev)
3072 static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
3075 #else /* CONFIG_MALI_NO_MALI */
3076 static int kbase_common_reg_map(struct kbase_device *kbdev)
3080 if (!request_mem_region(kbdev->reg_start, kbdev->reg_size, dev_name(kbdev->dev))) {
3081 dev_err(kbdev->dev, "Register window unavailable\n");
3086 kbdev->reg = ioremap(kbdev->reg_start, kbdev->reg_size);
3088 dev_err(kbdev->dev, "Can't remap register window\n");
3096 release_mem_region(kbdev->reg_start, kbdev->reg_size);
3101 static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
3103 iounmap(kbdev->reg);
3104 release_mem_region(kbdev->reg_start, kbdev->reg_size);
3106 #endif /* CONFIG_MALI_NO_MALI */
3109 #ifdef CONFIG_DEBUG_FS
3111 #if KBASE_GPU_RESET_EN
3112 #include <mali_kbase_hwaccess_jm.h>
3114 static void trigger_quirks_reload(struct kbase_device *kbdev)
3116 kbase_pm_context_active(kbdev);
3117 if (kbase_prepare_to_reset_gpu(kbdev))
3118 kbase_reset_gpu(kbdev);
3119 kbase_pm_context_idle(kbdev);
3122 #define MAKE_QUIRK_ACCESSORS(type) \
3123 static int type##_quirks_set(void *data, u64 val) \
3125 struct kbase_device *kbdev; \
3126 kbdev = (struct kbase_device *)data; \
3127 kbdev->hw_quirks_##type = (u32)val; \
3128 trigger_quirks_reload(kbdev); \
3132 static int type##_quirks_get(void *data, u64 *val) \
3134 struct kbase_device *kbdev;\
3135 kbdev = (struct kbase_device *)data;\
3136 *val = kbdev->hw_quirks_##type;\
3139 DEFINE_SIMPLE_ATTRIBUTE(fops_##type##_quirks, type##_quirks_get,\
3140 type##_quirks_set, "%llu\n")
3142 MAKE_QUIRK_ACCESSORS(sc);
3143 MAKE_QUIRK_ACCESSORS(tiler);
3144 MAKE_QUIRK_ACCESSORS(mmu);
3146 #endif /* KBASE_GPU_RESET_EN */
3148 static int kbasep_secure_mode_seq_show(struct seq_file *m, void *p)
3150 struct kbase_device *kbdev = m->private;
3152 if (!kbdev->secure_mode_support)
3153 seq_puts(m, "unsupported\n");
3155 seq_printf(m, "%s\n", kbdev->secure_mode ? "Y" : "N");
3160 static int kbasep_secure_mode_debugfs_open(struct inode *in, struct file *file)
3162 return single_open(file, kbasep_secure_mode_seq_show, in->i_private);
3165 static const struct file_operations kbasep_secure_mode_debugfs_fops = {
3166 .open = kbasep_secure_mode_debugfs_open,
3168 .llseek = seq_lseek,
3169 .release = single_release,
3172 static int kbase_device_debugfs_init(struct kbase_device *kbdev)
3174 struct dentry *debugfs_ctx_defaults_directory;
3177 kbdev->mali_debugfs_directory = debugfs_create_dir(kbdev->devname,
3179 if (!kbdev->mali_debugfs_directory) {
3180 dev_err(kbdev->dev, "Couldn't create mali debugfs directory\n");
3185 kbdev->debugfs_ctx_directory = debugfs_create_dir("ctx",
3186 kbdev->mali_debugfs_directory);
3187 if (!kbdev->debugfs_ctx_directory) {
3188 dev_err(kbdev->dev, "Couldn't create mali debugfs ctx directory\n");
3193 debugfs_ctx_defaults_directory = debugfs_create_dir("defaults",
3194 kbdev->debugfs_ctx_directory);
3195 if (!debugfs_ctx_defaults_directory) {
3196 dev_err(kbdev->dev, "Couldn't create mali debugfs ctx defaults directory\n");
3201 #if !MALI_CUSTOMER_RELEASE
3202 kbasep_regs_dump_debugfs_add(kbdev);
3203 #endif /* !MALI_CUSTOMER_RELEASE */
3205 kbase_debug_job_fault_debugfs_init(kbdev);
3206 kbasep_gpu_memory_debugfs_init(kbdev);
3207 #if KBASE_GPU_RESET_EN
3208 debugfs_create_file("quirks_sc", 0644,
3209 kbdev->mali_debugfs_directory, kbdev,
3211 debugfs_create_file("quirks_tiler", 0644,
3212 kbdev->mali_debugfs_directory, kbdev,
3213 &fops_tiler_quirks);
3214 debugfs_create_file("quirks_mmu", 0644,
3215 kbdev->mali_debugfs_directory, kbdev,
3217 #endif /* KBASE_GPU_RESET_EN */
3219 #ifndef CONFIG_MALI_COH_USER
3220 debugfs_create_bool("infinite_cache", 0644,
3221 debugfs_ctx_defaults_directory,
3222 (bool*)&(kbdev->infinite_cache_active_default));
3223 #endif /* CONFIG_MALI_COH_USER */
3225 debugfs_create_size_t("mem_pool_max_size", 0644,
3226 debugfs_ctx_defaults_directory,
3227 &kbdev->mem_pool_max_size_default);
3229 #if KBASE_TRACE_ENABLE
3230 kbasep_trace_debugfs_init(kbdev);
3231 #endif /* KBASE_TRACE_ENABLE */
3233 #ifdef CONFIG_MALI_TRACE_TIMELINE
3234 kbasep_trace_timeline_debugfs_init(kbdev);
3235 #endif /* CONFIG_MALI_TRACE_TIMELINE */
3237 debugfs_create_file("secure_mode", S_IRUGO,
3238 kbdev->mali_debugfs_directory, kbdev,
3239 &kbasep_secure_mode_debugfs_fops);
3244 debugfs_remove_recursive(kbdev->mali_debugfs_directory);
3248 static void kbase_device_debugfs_term(struct kbase_device *kbdev)
3250 debugfs_remove_recursive(kbdev->mali_debugfs_directory);
3253 #else /* CONFIG_DEBUG_FS */
3254 static inline int kbase_device_debugfs_init(struct kbase_device *kbdev)
3259 static inline void kbase_device_debugfs_term(struct kbase_device *kbdev) { }
3260 #endif /* CONFIG_DEBUG_FS */
3262 static void kbase_device_coherency_init(struct kbase_device *kbdev, u32 gpu_id)
3265 u32 supported_coherency_bitmap =
3266 kbdev->gpu_props.props.raw_props.coherency_mode;
3267 const void *coherency_override_dts;
3268 u32 override_coherency;
3269 #endif /* CONFIG_OF */
3271 kbdev->system_coherency = COHERENCY_NONE;
3273 /* device tree may override the coherency */
3275 coherency_override_dts = of_get_property(kbdev->dev->of_node,
3278 if (coherency_override_dts) {
3280 override_coherency = be32_to_cpup(coherency_override_dts);
3282 if ((override_coherency <= COHERENCY_NONE) &&
3283 (supported_coherency_bitmap &
3284 COHERENCY_FEATURE_BIT(override_coherency))) {
3286 kbdev->system_coherency = override_coherency;
3288 dev_info(kbdev->dev,
3289 "Using coherency mode %u set from dtb",
3290 override_coherency);
3292 dev_warn(kbdev->dev,
3293 "Ignoring unsupported coherency mode %u set from dtb",
3294 override_coherency);
3297 #endif /* CONFIG_OF */
3299 kbdev->gpu_props.props.raw_props.coherency_mode =
3300 kbdev->system_coherency;
3303 #ifdef CONFIG_MALI_FPGA_BUS_LOGGER
3305 /* Callback used by the kbase bus logger client, to initiate a GPU reset
3306 * when the bus log is restarted. GPU reset is used as reference point
3307 * in HW bus log analyses.
3309 static void kbase_logging_started_cb(void *data)
3311 struct kbase_device *kbdev = (struct kbase_device *)data;
3313 if (kbase_prepare_to_reset_gpu(kbdev))
3314 kbase_reset_gpu(kbdev);
3315 dev_info(kbdev->dev, "KBASE - Bus logger restarted\n");
3320 static int kbase_common_device_init(struct kbase_device *kbdev)
3323 struct mali_base_gpu_core_props *core_props;
3325 inited_mem = (1u << 0),
3326 inited_js = (1u << 1),
3327 inited_pm_runtime_init = (1u << 6),
3328 #ifdef CONFIG_MALI_DEVFREQ
3329 inited_devfreq = (1u << 9),
3330 #endif /* CONFIG_MALI_DEVFREQ */
3331 #ifdef CONFIG_MALI_MIPE_ENABLED
3332 inited_tlstream = (1u << 10),
3333 #endif /* CONFIG_MALI_MIPE_ENABLED */
3334 inited_backend_early = (1u << 11),
3335 inited_backend_late = (1u << 12),
3336 inited_device = (1u << 13),
3337 inited_vinstr = (1u << 19),
3338 inited_ipa = (1u << 20),
3339 inited_job_fault = (1u << 21)
3344 #if defined(CONFIG_MALI_PLATFORM_VEXPRESS)
3345 u32 ve_logic_tile = 0;
3346 #endif /* CONFIG_MALI_PLATFORM_VEXPRESS */
3348 dev_set_drvdata(kbdev->dev, kbdev);
3350 err = kbase_backend_early_init(kbdev);
3353 inited |= inited_backend_early;
3355 scnprintf(kbdev->devname, DEVNAME_SIZE, "%s%d", kbase_drv_name,
3358 kbase_disjoint_init(kbdev);
3360 /* obtain min/max configured gpu frequencies */
3361 core_props = &(kbdev->gpu_props.props.core_props);
3363 /* For versatile express platforms, min and max values of GPU frequency
3364 * depend on the type of the logic tile; these values may not be known
3365 * at the build time so in some cases a platform config file with wrong
3366 * GPU freguency values may be included; to ensure the correct value of
3367 * min and max GPU frequency is obtained, the type of the logic tile is
3368 * read from the corresponding register on the platform and frequency
3369 * values assigned accordingly.*/
3370 #if defined(CONFIG_MALI_PLATFORM_VEXPRESS)
3371 ve_logic_tile = kbase_get_platform_logic_tile_type();
3373 switch (ve_logic_tile) {
3375 /* Virtex 6, HBI0217 */
3376 core_props->gpu_freq_khz_min = VE_VIRTEX6_GPU_FREQ_MIN;
3377 core_props->gpu_freq_khz_max = VE_VIRTEX6_GPU_FREQ_MAX;
3380 /* Virtex 7, HBI0247 */
3381 core_props->gpu_freq_khz_min = VE_VIRTEX7_GPU_FREQ_MIN;
3382 core_props->gpu_freq_khz_max = VE_VIRTEX7_GPU_FREQ_MAX;
3385 /* all other logic tiles, i.e., Virtex 5 HBI0192
3386 * or unsuccessful reading from the platform -
3387 * fall back to the config_platform default */
3388 core_props->gpu_freq_khz_min = GPU_FREQ_KHZ_MIN;
3389 core_props->gpu_freq_khz_max = GPU_FREQ_KHZ_MAX;
3393 core_props->gpu_freq_khz_min = GPU_FREQ_KHZ_MIN;
3394 core_props->gpu_freq_khz_max = GPU_FREQ_KHZ_MAX;
3395 #endif /* CONFIG_MALI_PLATFORM_VEXPRESS */
3397 kbdev->gpu_props.irq_throttle_time_us = DEFAULT_IRQ_THROTTLE_TIME_US;
3399 err = kbase_device_init(kbdev);
3401 dev_err(kbdev->dev, "Can't initialize device (%d)\n", err);
3405 inited |= inited_device;
3407 if (kbdev->pm.callback_power_runtime_init) {
3408 err = kbdev->pm.callback_power_runtime_init(kbdev);
3412 inited |= inited_pm_runtime_init;
3415 err = kbase_mem_init(kbdev);
3419 inited |= inited_mem;
3421 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
3422 gpu_id &= GPU_ID_VERSION_PRODUCT_ID;
3423 gpu_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
3425 kbase_device_coherency_init(kbdev, gpu_id);
3427 err = kbasep_secure_mode_init(kbdev);
3431 err = kbasep_js_devdata_init(kbdev);
3435 inited |= inited_js;
3437 #ifdef CONFIG_MALI_MIPE_ENABLED
3438 err = kbase_tlstream_init();
3440 dev_err(kbdev->dev, "Couldn't initialize timeline stream\n");
3443 inited |= inited_tlstream;
3444 #endif /* CONFIG_MALI_MIPE_ENABLED */
3446 err = kbase_backend_late_init(kbdev);
3449 inited |= inited_backend_late;
3451 #ifdef CONFIG_MALI_DEVFREQ
3452 err = kbase_devfreq_init(kbdev);
3454 dev_err(kbdev->dev, "Couldn't initialize devfreq\n");
3457 inited |= inited_devfreq;
3458 #endif /* CONFIG_MALI_DEVFREQ */
3460 kbdev->vinstr_ctx = kbase_vinstr_init(kbdev);
3461 if (!kbdev->vinstr_ctx) {
3462 dev_err(kbdev->dev, "Can't initialize virtual instrumentation core\n");
3466 inited |= inited_vinstr;
3468 kbdev->ipa_ctx = kbase_ipa_init(kbdev);
3469 if (!kbdev->ipa_ctx) {
3470 dev_err(kbdev->dev, "Can't initialize IPA\n");
3474 inited |= inited_ipa;
3476 err = kbase_debug_job_fault_dev_init(kbdev);
3480 inited |= inited_job_fault;
3482 err = kbase_device_debugfs_init(kbdev);
3486 /* intialise the kctx list */
3487 mutex_init(&kbdev->kctx_list_lock);
3488 INIT_LIST_HEAD(&kbdev->kctx_list);
3490 kbdev->mdev.minor = MISC_DYNAMIC_MINOR;
3491 kbdev->mdev.name = kbdev->devname;
3492 kbdev->mdev.fops = &kbase_fops;
3493 kbdev->mdev.parent = get_device(kbdev->dev);
3495 err = misc_register(&kbdev->mdev);
3497 dev_err(kbdev->dev, "Couldn't register misc dev %s\n", kbdev->devname);
3502 const struct list_head *dev_list = kbase_dev_list_get();
3504 list_add(&kbdev->entry, &kbase_dev_list);
3505 kbase_dev_list_put(dev_list);
3508 dev_info(kbdev->dev, "Probed as %s\n", dev_name(kbdev->mdev.this_device));
3515 put_device(kbdev->dev);
3516 kbase_device_debugfs_term(kbdev);
3518 if (inited & inited_job_fault)
3519 kbase_debug_job_fault_dev_term(kbdev);
3520 if (inited & inited_ipa)
3521 kbase_ipa_term(kbdev->ipa_ctx);
3522 if (inited & inited_vinstr)
3523 kbase_vinstr_term(kbdev->vinstr_ctx);
3524 #ifdef CONFIG_MALI_DEVFREQ
3525 if (inited & inited_devfreq)
3526 kbase_devfreq_term(kbdev);
3527 #endif /* CONFIG_MALI_DEVFREQ */
3528 if (inited & inited_backend_late)
3529 kbase_backend_late_term(kbdev);
3530 #ifdef CONFIG_MALI_MIPE_ENABLED
3531 if (inited & inited_tlstream)
3532 kbase_tlstream_term();
3533 #endif /* CONFIG_MALI_MIPE_ENABLED */
3535 if (inited & inited_js)
3536 kbasep_js_devdata_halt(kbdev);
3538 if (inited & inited_mem)
3539 kbase_mem_halt(kbdev);
3541 if (inited & inited_js)
3542 kbasep_js_devdata_term(kbdev);
3544 if (inited & inited_mem)
3545 kbase_mem_term(kbdev);
3547 if (inited & inited_pm_runtime_init) {
3548 if (kbdev->pm.callback_power_runtime_term)
3549 kbdev->pm.callback_power_runtime_term(kbdev);
3552 if (inited & inited_device)
3553 kbase_device_term(kbdev);
3555 if (inited & inited_backend_early)
3556 kbase_backend_early_term(kbdev);
3562 static struct attribute *kbase_attrs[] = {
3563 #ifdef CONFIG_MALI_DEBUG
3564 &dev_attr_debug_command.attr,
3565 &dev_attr_js_softstop_always.attr,
3567 #if !MALI_CUSTOMER_RELEASE
3568 &dev_attr_force_replay.attr,
3570 &dev_attr_js_timeouts.attr,
3571 &dev_attr_gpuinfo.attr,
3572 &dev_attr_dvfs_period.attr,
3573 &dev_attr_pm_poweroff.attr,
3574 &dev_attr_reset_timeout.attr,
3575 &dev_attr_js_scheduling_period.attr,
3576 &dev_attr_power_policy.attr,
3577 &dev_attr_core_availability_policy.attr,
3578 &dev_attr_core_mask.attr,
3579 &dev_attr_mem_pool_size.attr,
3580 &dev_attr_mem_pool_max_size.attr,
3584 static const struct attribute_group kbase_attr_group = {
3585 .attrs = kbase_attrs,
3588 static int kbase_common_device_remove(struct kbase_device *kbdev);
3590 static int kbase_platform_device_probe(struct platform_device *pdev)
3592 struct kbase_device *kbdev;
3593 struct resource *reg_res;
3598 err = kbase_platform_early_init();
3600 dev_err(&pdev->dev, "Early platform initialization failed\n");
3605 kbdev = kbase_device_alloc();
3607 dev_err(&pdev->dev, "Can't allocate device\n");
3611 #ifdef CONFIG_MALI_NO_MALI
3612 err = gpu_device_create(kbdev);
3614 dev_err(&pdev->dev, "Can't initialize dummy model\n");
3617 #endif /* CONFIG_MALI_NO_MALI */
3619 kbdev->dev = &pdev->dev;
3620 /* 3 IRQ resources */
3621 for (i = 0; i < 3; i++) {
3622 struct resource *irq_res;
3625 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
3627 dev_err(kbdev->dev, "No IRQ resource at index %d\n", i);
3629 goto out_platform_irq;
3633 if (!strcmp(irq_res->name, "JOB")) {
3634 irqtag = JOB_IRQ_TAG;
3635 } else if (!strcmp(irq_res->name, "MMU")) {
3636 irqtag = MMU_IRQ_TAG;
3637 } else if (!strcmp(irq_res->name, "GPU")) {
3638 irqtag = GPU_IRQ_TAG;
3640 dev_err(&pdev->dev, "Invalid irq res name: '%s'\n",
3647 #endif /* CONFIG_OF */
3648 kbdev->irqs[irqtag].irq = irq_res->start;
3649 kbdev->irqs[irqtag].flags = (irq_res->flags & IRQF_TRIGGER_MASK);
3651 /* the first memory resource is the physical address of the GPU
3653 reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3655 dev_err(kbdev->dev, "Invalid register resource\n");
3657 goto out_platform_mem;
3660 kbdev->reg_start = reg_res->start;
3661 kbdev->reg_size = resource_size(reg_res);
3663 err = kbase_common_reg_map(kbdev);
3667 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \
3668 && defined(CONFIG_REGULATOR)
3669 kbdev->regulator = regulator_get_optional(kbdev->dev, "mali");
3670 if (IS_ERR_OR_NULL(kbdev->regulator)) {
3671 err = PTR_ERR(kbdev->regulator);
3673 kbdev->regulator = NULL;
3674 if (err == -EPROBE_DEFER)
3676 dev_info(kbdev->dev, "Continuing without Mali regulator control\n");
3677 /* Allow probe to continue without regulator */
3679 #endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
3681 #ifdef CONFIG_MALI_PLATFORM_DEVICETREE
3682 pm_runtime_enable(kbdev->dev);
3685 kbdev->clock = clk_get(kbdev->dev, "clk_mali");
3686 if (IS_ERR_OR_NULL(kbdev->clock)) {
3687 err = PTR_ERR(kbdev->clock);
3689 kbdev->clock = NULL;
3690 if (err == -EPROBE_DEFER)
3691 goto out_clock_prepare;
3692 dev_info(kbdev->dev, "Continuing without Mali clock control\n");
3693 /* Allow probe to continue without clock. */
3695 err = clk_prepare_enable(kbdev->clock);
3698 "Failed to prepare and enable clock (%d)\n", err);
3699 goto out_clock_prepare;
3703 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) && defined(CONFIG_OF) \
3704 && defined(CONFIG_PM_OPP)
3705 /* Register the OPPs if they are available in device tree */
3706 if (dev_pm_opp_of_add_table(kbdev->dev) < 0)
3707 dev_dbg(kbdev->dev, "OPP table not found\n");
3711 err = kbase_common_device_init(kbdev);
3713 dev_err(kbdev->dev, "Failed kbase_common_device_init\n");
3714 goto out_common_init;
3717 err = sysfs_create_group(&kbdev->dev->kobj, &kbase_attr_group);
3719 dev_err(&pdev->dev, "Failed to create sysfs entries\n");
3723 #ifdef CONFIG_MALI_FPGA_BUS_LOGGER
3724 err = bl_core_client_register(kbdev->devname,
3725 kbase_logging_started_cb,
3726 kbdev, &kbdev->buslogger,
3729 dev_err(kbdev->dev, "Couldn't register bus log client\n");
3730 goto out_bl_core_register;
3733 bl_core_set_threshold(kbdev->buslogger, 1024*1024*1024);
3737 #ifdef CONFIG_MALI_FPGA_BUS_LOGGER
3738 out_bl_core_register:
3739 sysfs_remove_group(&kbdev->dev->kobj, &kbase_attr_group);
3743 kbase_common_device_remove(kbdev);
3745 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
3746 dev_pm_opp_of_remove_table(kbdev->dev);
3748 clk_disable_unprepare(kbdev->clock);
3750 clk_put(kbdev->clock);
3751 #ifdef CONFIG_MALI_PLATFORM_DEVICETREE
3752 pm_runtime_disable(kbdev->dev);
3754 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \
3755 && defined(CONFIG_REGULATOR)
3757 regulator_put(kbdev->regulator);
3758 #endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
3759 kbase_common_reg_unmap(kbdev);
3766 #ifdef CONFIG_MALI_NO_MALI
3767 gpu_device_destroy(kbdev);
3769 #endif /* CONFIG_MALI_NO_MALI */
3770 kbase_device_free(kbdev);
3775 static int kbase_common_device_remove(struct kbase_device *kbdev)
3777 kbase_debug_job_fault_dev_term(kbdev);
3778 kbase_ipa_term(kbdev->ipa_ctx);
3779 kbase_vinstr_term(kbdev->vinstr_ctx);
3780 sysfs_remove_group(&kbdev->dev->kobj, &kbase_attr_group);
3782 #ifdef CONFIG_MALI_FPGA_BUS_LOGGER
3783 if (kbdev->buslogger)
3784 bl_core_client_unregister(kbdev->buslogger);
3787 #ifdef CONFIG_DEBUG_FS
3788 debugfs_remove_recursive(kbdev->mali_debugfs_directory);
3790 #ifdef CONFIG_MALI_DEVFREQ
3791 kbase_devfreq_term(kbdev);
3794 kbase_backend_late_term(kbdev);
3796 if (kbdev->pm.callback_power_runtime_term)
3797 kbdev->pm.callback_power_runtime_term(kbdev);
3798 #ifdef CONFIG_MALI_PLATFORM_DEVICETREE
3799 pm_runtime_disable(kbdev->dev);
3802 #ifdef CONFIG_MALI_MIPE_ENABLED
3803 kbase_tlstream_term();
3804 #endif /* CONFIG_MALI_MIPE_ENABLED */
3806 kbasep_js_devdata_halt(kbdev);
3807 kbase_mem_halt(kbdev);
3809 kbasep_js_devdata_term(kbdev);
3810 kbase_mem_term(kbdev);
3811 kbase_backend_early_term(kbdev);
3814 const struct list_head *dev_list = kbase_dev_list_get();
3816 list_del(&kbdev->entry);
3817 kbase_dev_list_put(dev_list);
3819 misc_deregister(&kbdev->mdev);
3820 put_device(kbdev->dev);
3821 kbase_common_reg_unmap(kbdev);
3822 kbase_device_term(kbdev);
3824 clk_disable_unprepare(kbdev->clock);
3825 clk_put(kbdev->clock);
3826 kbdev->clock = NULL;
3828 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \
3829 && defined(CONFIG_REGULATOR)
3830 regulator_put(kbdev->regulator);
3831 #endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
3832 #ifdef CONFIG_MALI_NO_MALI
3833 gpu_device_destroy(kbdev);
3834 #endif /* CONFIG_MALI_NO_MALI */
3835 kbase_device_free(kbdev);
3840 static int kbase_platform_device_remove(struct platform_device *pdev)
3842 struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
3847 return kbase_common_device_remove(kbdev);
3850 /** Suspend callback from the OS.
3852 * This is called by Linux when the device should suspend.
3854 * @param dev The device to suspend
3856 * @return A standard Linux error code
3858 static int kbase_device_suspend(struct device *dev)
3860 struct kbase_device *kbdev = to_kbase_device(dev);
3865 #if defined(CONFIG_PM_DEVFREQ) && \
3866 (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
3867 devfreq_suspend_device(kbdev->devfreq);
3870 kbase_pm_suspend(kbdev);
3874 /** Resume callback from the OS.
3876 * This is called by Linux when the device should resume from suspension.
3878 * @param dev The device to resume
3880 * @return A standard Linux error code
3882 static int kbase_device_resume(struct device *dev)
3884 struct kbase_device *kbdev = to_kbase_device(dev);
3889 kbase_pm_resume(kbdev);
3891 #if defined(CONFIG_PM_DEVFREQ) && \
3892 (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
3893 devfreq_resume_device(kbdev->devfreq);
3898 /** Runtime suspend callback from the OS.
3900 * This is called by Linux when the device should prepare for a condition in which it will
3901 * not be able to communicate with the CPU(s) and RAM due to power management.
3903 * @param dev The device to suspend
3905 * @return A standard Linux error code
3907 #ifdef KBASE_PM_RUNTIME
3908 static int kbase_device_runtime_suspend(struct device *dev)
3910 struct kbase_device *kbdev = to_kbase_device(dev);
3915 #if defined(CONFIG_PM_DEVFREQ) && \
3916 (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
3917 devfreq_suspend_device(kbdev->devfreq);
3920 if (kbdev->pm.backend.callback_power_runtime_off) {
3921 kbdev->pm.backend.callback_power_runtime_off(kbdev);
3922 dev_dbg(dev, "runtime suspend\n");
3926 #endif /* KBASE_PM_RUNTIME */
3928 /** Runtime resume callback from the OS.
3930 * This is called by Linux when the device should go into a fully active state.
3932 * @param dev The device to suspend
3934 * @return A standard Linux error code
3937 #ifdef KBASE_PM_RUNTIME
3938 static int kbase_device_runtime_resume(struct device *dev)
3941 struct kbase_device *kbdev = to_kbase_device(dev);
3946 if (kbdev->pm.backend.callback_power_runtime_on) {
3947 ret = kbdev->pm.backend.callback_power_runtime_on(kbdev);
3948 dev_dbg(dev, "runtime resume\n");
3951 #if defined(CONFIG_PM_DEVFREQ) && \
3952 (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
3953 devfreq_resume_device(kbdev->devfreq);
3958 #endif /* KBASE_PM_RUNTIME */
3961 #ifdef KBASE_PM_RUNTIME
3963 * kbase_device_runtime_idle - Runtime idle callback from the OS.
3964 * @dev: The device to suspend
3966 * This is called by Linux when the device appears to be inactive and it might
3967 * be placed into a low power state.
3969 * Return: 0 if device can be suspended, non-zero to avoid runtime autosuspend,
3970 * otherwise a standard Linux error code
3972 static int kbase_device_runtime_idle(struct device *dev)
3974 struct kbase_device *kbdev = to_kbase_device(dev);
3979 /* Use platform specific implementation if it exists. */
3980 if (kbdev->pm.backend.callback_power_runtime_idle)
3981 return kbdev->pm.backend.callback_power_runtime_idle(kbdev);
3985 #endif /* KBASE_PM_RUNTIME */
3987 /** The power management operations for the platform driver.
3989 static const struct dev_pm_ops kbase_pm_ops = {
3990 .suspend = kbase_device_suspend,
3991 .resume = kbase_device_resume,
3992 #ifdef KBASE_PM_RUNTIME
3993 .runtime_suspend = kbase_device_runtime_suspend,
3994 .runtime_resume = kbase_device_runtime_resume,
3995 .runtime_idle = kbase_device_runtime_idle,
3996 #endif /* KBASE_PM_RUNTIME */
4000 static const struct of_device_id kbase_dt_ids[] = {
4001 { .compatible = "arm,malit7xx" },
4002 { .compatible = "arm,mali-midgard" },
4005 MODULE_DEVICE_TABLE(of, kbase_dt_ids);
4008 static struct platform_driver kbase_platform_driver = {
4009 .probe = kbase_platform_device_probe,
4010 .remove = kbase_platform_device_remove,
4012 .name = kbase_drv_name,
4013 .owner = THIS_MODULE,
4014 .pm = &kbase_pm_ops,
4015 .of_match_table = of_match_ptr(kbase_dt_ids),
4020 * The driver will not provide a shortcut to create the Mali platform device
4021 * anymore when using Device Tree.
4024 module_platform_driver(kbase_platform_driver);
4027 static int __init rockchip_gpu_init_driver(void)
4029 return platform_driver_register(&kbase_platform_driver);
4031 late_initcall(rockchip_gpu_init_driver);
4033 static int __init kbase_driver_init(void)
4037 ret = kbase_platform_early_init();
4041 #ifndef CONFIG_MACH_MANTA
4042 #ifdef CONFIG_MALI_PLATFORM_FAKE
4043 ret = kbase_platform_fake_register();
4048 ret = platform_driver_register(&kbase_platform_driver);
4049 #ifndef CONFIG_MACH_MANTA
4050 #ifdef CONFIG_MALI_PLATFORM_FAKE
4052 kbase_platform_fake_unregister();
4058 static void __exit kbase_driver_exit(void)
4060 platform_driver_unregister(&kbase_platform_driver);
4061 #ifndef CONFIG_MACH_MANTA
4062 #ifdef CONFIG_MALI_PLATFORM_FAKE
4063 kbase_platform_fake_unregister();
4068 module_init(kbase_driver_init);
4069 module_exit(kbase_driver_exit);
4071 #endif /* CONFIG_OF */
4073 MODULE_LICENSE("GPL");
4074 MODULE_VERSION(MALI_RELEASE_NAME " (UK version " \
4075 __stringify(BASE_UK_VERSION_MAJOR) "." \
4076 __stringify(BASE_UK_VERSION_MINOR) ")");
4078 #if defined(CONFIG_MALI_GATOR_SUPPORT) || defined(CONFIG_MALI_SYSTEM_TRACE)
4079 #define CREATE_TRACE_POINTS
4082 #ifdef CONFIG_MALI_GATOR_SUPPORT
4083 /* Create the trace points (otherwise we just get code to call a tracepoint) */
4084 #include "mali_linux_trace.h"
4086 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_job_slots_event);
4087 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_status);
4088 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_power_on);
4089 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_power_off);
4090 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_page_fault_insert_pages);
4091 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_mmu_as_in_use);
4092 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_mmu_as_released);
4093 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_total_alloc_pages_change);
4094 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_sw_counter);
4096 void kbase_trace_mali_pm_status(u32 event, u64 value)
4098 trace_mali_pm_status(event, value);
4101 void kbase_trace_mali_pm_power_off(u32 event, u64 value)
4103 trace_mali_pm_power_off(event, value);
4106 void kbase_trace_mali_pm_power_on(u32 event, u64 value)
4108 trace_mali_pm_power_on(event, value);
4111 void kbase_trace_mali_job_slots_event(u32 event, const struct kbase_context *kctx, u8 atom_id)
4113 trace_mali_job_slots_event(event, (kctx != NULL ? kctx->tgid : 0), (kctx != NULL ? kctx->pid : 0), atom_id);
4116 void kbase_trace_mali_page_fault_insert_pages(int event, u32 value)
4118 trace_mali_page_fault_insert_pages(event, value);
4121 void kbase_trace_mali_mmu_as_in_use(int event)
4123 trace_mali_mmu_as_in_use(event);
4126 void kbase_trace_mali_mmu_as_released(int event)
4128 trace_mali_mmu_as_released(event);
4131 void kbase_trace_mali_total_alloc_pages_change(long long int event)
4133 trace_mali_total_alloc_pages_change(event);
4135 #endif /* CONFIG_MALI_GATOR_SUPPORT */
4136 #ifdef CONFIG_MALI_SYSTEM_TRACE
4137 #include "mali_linux_kbase_trace.h"