4 * (C) COPYRIGHT ARM Limited. All rights reserved.
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
11 * A copy of the licence is included with the program, and can also be obtained
12 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
13 * Boston, MA 02110-1301, USA.
22 * @file mali_kbase_core_linux.c
23 * Base kernel driver init.
26 #include <kbase/src/common/mali_kbase.h>
27 #include <kbase/src/common/mali_kbase_uku.h>
28 #include <kbase/src/common/mali_midg_regmap.h>
29 #include <kbase/src/common/mali_kbase_gator.h>
30 #include <kbase/src/linux/mali_kbase_mem_linux.h>
31 #ifdef CONFIG_MALI_NO_MALI
32 #include "mali_kbase_model_linux.h"
33 #endif /* CONFIG_MALI_NO_MALI */
36 #include <linux/kds.h>
37 #include <linux/anon_inodes.h>
38 #include <linux/syscalls.h>
39 #endif /* CONFIG_KDS */
41 #include <linux/module.h>
42 #include <linux/init.h>
43 #include <linux/poll.h>
44 #include <linux/kernel.h>
45 #include <linux/errno.h>
47 #include <linux/platform_device.h>
48 #include <linux/miscdevice.h>
49 #include <linux/list.h>
50 #include <linux/semaphore.h>
52 #include <linux/uaccess.h>
53 #include <linux/interrupt.h>
55 #include <linux/compat.h> /* is_compat_task */
56 #include <kbase/src/common/mali_kbase_hw.h>
57 #include <kbase/src/platform/mali_kbase_platform_common.h>
59 #include <kbase/src/linux/mali_kbase_sync.h>
60 #endif /* CONFIG_SYNC */
63 * This file is included since when we support device tree we don't
64 * use the platform fake code for registering the kbase config attributes.
67 #include <kbase/mali_kbase_config.h>
70 //chenli: add for integration tests
71 #ifdef CONFIG_MALI_DEBUG
72 #include <kbase/src/integration_kit/MaliFns.h>
78 struct kbase_irq_table {
80 irq_handler_t handler;
83 kbase_exported_test_data shared_kernel_test_data;
84 EXPORT_SYMBOL(shared_kernel_test_data);
85 #endif /* MALI_UNIT_TEST */
87 #define KBASE_DRV_NAME "mali"
89 static const char kbase_drv_name[] = KBASE_DRV_NAME;
91 static int kbase_dev_nr;
93 static DEFINE_SEMAPHORE(kbase_dev_list_lock);
94 static LIST_HEAD(kbase_dev_list);
96 KBASE_EXPORT_TEST_API(kbase_dev_list_lock)
97 KBASE_EXPORT_TEST_API(kbase_dev_list)
98 #define KERNEL_SIDE_DDK_VERSION_STRING "K:" MALI_RELEASE_NAME "(GPL)"
99 static INLINE void __compile_time_asserts(void)
101 CSTD_COMPILE_TIME_ASSERT(sizeof(KERNEL_SIDE_DDK_VERSION_STRING) <= KBASE_GET_VERSION_BUFFER_SIZE);
106 typedef struct kbasep_kds_resource_set_file_data {
107 struct kds_resource_set *lock;
108 } kbasep_kds_resource_set_file_data;
110 static int kds_resource_release(struct inode *inode, struct file *file);
112 static const struct file_operations kds_resource_fops = {
113 .release = kds_resource_release
116 typedef struct kbase_kds_resource_list_data {
117 struct kds_resource **kds_resources;
118 unsigned long *kds_access_bitmap;
120 } kbase_kds_resource_list_data;
122 static int kds_resource_release(struct inode *inode, struct file *file)
124 struct kbasep_kds_resource_set_file_data *data;
126 data = (struct kbasep_kds_resource_set_file_data *)file->private_data;
128 if (NULL != data->lock)
129 kds_resource_set_release(&data->lock);
136 mali_error kbasep_kds_allocate_resource_list_data(kbase_context *kctx, base_external_resource *ext_res, int num_elems, kbase_kds_resource_list_data *resources_list)
138 base_external_resource *res = ext_res;
141 /* assume we have to wait for all */
143 KBASE_DEBUG_ASSERT(0 != num_elems);
144 resources_list->kds_resources = kmalloc(sizeof(struct kds_resource *) * num_elems, GFP_KERNEL);
146 if (NULL == resources_list->kds_resources)
147 return MALI_ERROR_OUT_OF_MEMORY;
149 KBASE_DEBUG_ASSERT(0 != num_elems);
150 resources_list->kds_access_bitmap = kzalloc(sizeof(unsigned long) * ((num_elems + BITS_PER_LONG - 1) / BITS_PER_LONG), GFP_KERNEL);
152 if (NULL == resources_list->kds_access_bitmap) {
153 kfree(resources_list->kds_access_bitmap);
154 return MALI_ERROR_OUT_OF_MEMORY;
157 for (res_id = 0; res_id < num_elems; res_id++, res++) {
159 kbase_va_region *reg;
160 struct kds_resource *kds_res = NULL;
162 exclusive = res->ext_resource & BASE_EXT_RES_ACCESS_EXCLUSIVE;
163 reg = kbase_region_tracker_find_region_enclosing_address(kctx, res->ext_resource & ~BASE_EXT_RES_ACCESS_EXCLUSIVE);
165 /* did we find a matching region object? */
169 /* no need to check reg->alloc as only regions with an alloc has
170 * a size, and kbase_region_tracker_find_region_enclosing_address
171 * only returns regions with size > 0 */
172 switch (reg->alloc->type) {
173 #if defined(CONFIG_UMP) && defined(CONFIG_KDS)
174 case KBASE_MEM_TYPE_IMPORTED_UMP:
175 kds_res = ump_dd_kds_resource_get(reg->alloc->imported.ump_handle);
177 #endif /* defined(CONFIG_UMP) && defined(CONFIG_KDS) */
182 /* no kds resource for the region ? */
186 resources_list->kds_resources[res_id] = kds_res;
189 set_bit(res_id, resources_list->kds_access_bitmap);
192 /* did the loop run to completion? */
193 if (res_id == num_elems)
194 return MALI_ERROR_NONE;
196 /* Clean up as the resource list is not valid. */
197 kfree(resources_list->kds_resources);
198 kfree(resources_list->kds_access_bitmap);
200 return MALI_ERROR_FUNCTION_FAILED;
203 mali_bool kbasep_validate_kbase_pointer(kbase_pointer *p)
206 if (is_compat_task()) {
207 if (p->compat_value == 0)
210 #endif /* CONFIG_COMPAT */
211 if (NULL == p->value)
215 #endif /* CONFIG_COMPAT */
219 mali_error kbase_external_buffer_lock(kbase_context *kctx, kbase_uk_ext_buff_kds_data *args, u32 args_size)
221 base_external_resource *ext_res_copy;
222 size_t ext_resource_size;
223 mali_error return_error = MALI_ERROR_FUNCTION_FAILED;
226 if (args_size != sizeof(kbase_uk_ext_buff_kds_data))
227 return MALI_ERROR_FUNCTION_FAILED;
229 /* Check user space has provided valid data */
230 if (!kbasep_validate_kbase_pointer(&args->external_resource) || !kbasep_validate_kbase_pointer(&args->file_descriptor) || (0 == args->num_res) || (args->num_res > KBASE_MAXIMUM_EXT_RESOURCES))
231 return MALI_ERROR_FUNCTION_FAILED;
233 ext_resource_size = sizeof(base_external_resource) * args->num_res;
235 KBASE_DEBUG_ASSERT(0 != ext_resource_size);
236 ext_res_copy = kmalloc(ext_resource_size, GFP_KERNEL);
238 if (NULL != ext_res_copy) {
239 base_external_resource *__user ext_res_user;
240 int *__user file_descriptor_user;
242 if (is_compat_task()) {
243 ext_res_user = compat_ptr(args->external_resource.compat_value);
244 file_descriptor_user = compat_ptr(args->file_descriptor.compat_value);
246 #endif /* CONFIG_COMPAT */
247 ext_res_user = args->external_resource.value;
248 file_descriptor_user = args->file_descriptor.value;
251 #endif /* CONFIG_COMPAT */
253 /* Copy the external resources to lock from user space */
254 if (0 == copy_from_user(ext_res_copy, ext_res_user, ext_resource_size)) {
255 kbasep_kds_resource_set_file_data *fdata;
257 /* Allocate data to be stored in the file */
258 fdata = kmalloc(sizeof(kbasep_kds_resource_set_file_data), GFP_KERNEL);
261 kbase_kds_resource_list_data resource_list_data;
262 /* Parse given elements and create resource and access lists */
263 return_error = kbasep_kds_allocate_resource_list_data(kctx, ext_res_copy, args->num_res, &resource_list_data);
264 if (MALI_ERROR_NONE == return_error) {
269 fd = anon_inode_getfd("kds_ext", &kds_resource_fops, fdata, 0);
271 err = copy_to_user(file_descriptor_user, &fd, sizeof(fd));
273 /* If the file descriptor was valid and we successfully copied it to user space, then we
274 * can try and lock the requested kds resources.
276 if ((fd >= 0) && (0 == err)) {
277 struct kds_resource_set *lock;
279 lock = kds_waitall(args->num_res, resource_list_data.kds_access_bitmap, resource_list_data.kds_resources, KDS_WAIT_BLOCKING);
281 if (IS_ERR_OR_NULL(lock)) {
282 return_error = MALI_ERROR_FUNCTION_FAILED;
284 return_error = MALI_ERROR_NONE;
288 return_error = MALI_ERROR_FUNCTION_FAILED;
291 kfree(resource_list_data.kds_resources);
292 kfree(resource_list_data.kds_access_bitmap);
295 if (MALI_ERROR_NONE != return_error) {
296 /* If the file was opened successfully then close it which will clean up
297 * the file data, otherwise we clean up the file data ourself. */
304 return_error = MALI_ERROR_OUT_OF_MEMORY;
311 #endif /* CONFIG_KDS */
313 static mali_error kbase_dispatch(kbase_context *kctx, void * const args, u32 args_size)
315 struct kbase_device *kbdev;
316 uk_header *ukh = args;
319 KBASE_DEBUG_ASSERT(ukh != NULL);
323 ukh->ret = MALI_ERROR_NONE; /* Be optimistic */
325 if (UKP_FUNC_ID_CHECK_VERSION == id) {
326 if (args_size == sizeof(uku_version_check_args)) {
327 uku_version_check_args *version_check = (uku_version_check_args *)args;
329 version_check->major = BASE_UK_VERSION_MAJOR;
330 version_check->minor = BASE_UK_VERSION_MINOR;
332 ukh->ret = MALI_ERROR_NONE;
334 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
336 return MALI_ERROR_NONE;
340 if (!atomic_read(&kctx->setup_complete)) {
341 /* setup pending, try to signal that we'll do the setup */
342 if (atomic_cmpxchg(&kctx->setup_in_progress, 0, 1)) {
343 /* setup was already in progress, err this call */
344 return MALI_ERROR_FUNCTION_FAILED;
347 /* we're the one doing setup */
349 /* is it the only call we accept? */
350 if (id == KBASE_FUNC_SET_FLAGS) {
351 kbase_uk_set_flags *kbase_set_flags = (kbase_uk_set_flags *) args;
353 if (sizeof(*kbase_set_flags) != args_size) {
354 /* not matching the expected call, stay stuck in setup mode */
358 if (MALI_ERROR_NONE != kbase_context_set_create_flags(kctx, kbase_set_flags->create_flags)) {
359 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
360 /* bad flags, will stay stuck in setup mode */
361 return MALI_ERROR_NONE;
363 /* we've done the setup, all OK */
364 atomic_set(&kctx->setup_complete, 1);
365 return MALI_ERROR_NONE;
368 /* unexpected call, will stay stuck in setup mode */
369 return MALI_ERROR_FUNCTION_FAILED;
373 /* setup complete, perform normal operation */
375 case KBASE_FUNC_MEM_ALLOC:
377 kbase_uk_mem_alloc *mem = args;
378 struct kbase_va_region *reg;
380 if (sizeof(*mem) != args_size)
383 reg = kbase_mem_alloc(kctx, mem->va_pages, mem->commit_pages, mem->extent, &mem->flags, &mem->gpu_va, &mem->va_alignment);
385 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
388 case KBASE_FUNC_MEM_IMPORT:
390 kbase_uk_mem_import *mem_import = args;
394 if (sizeof(*mem_import) != args_size)
397 if (is_compat_task())
398 phandle = compat_ptr(mem_import->phandle.compat_value);
401 phandle = mem_import->phandle.value;
403 switch (mem_import->type) {
404 case BASE_MEM_IMPORT_TYPE_UMP:
405 get_user(handle, phandle);
407 case BASE_MEM_IMPORT_TYPE_UMM:
408 get_user(handle, phandle);
415 if (kbase_mem_import(kctx, mem_import->type, handle, &mem_import->gpu_va, &mem_import->va_pages, &mem_import->flags)) {
417 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
421 case KBASE_FUNC_MEM_COMMIT:
423 kbase_uk_mem_commit *commit = args;
425 if (sizeof(*commit) != args_size)
428 if (commit->gpu_addr & ~PAGE_MASK) {
429 KBASE_DEBUG_PRINT_WARN(KBASE_MEM, "kbase_dispatch case KBASE_FUNC_MEM_COMMIT: commit->gpu_addr: passed parameter is invalid");
430 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
434 if (kbase_mem_commit(kctx, commit->gpu_addr, commit->pages, (base_backing_threshold_status*)&commit->result_subcode))
435 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
439 case KBASE_FUNC_MEM_QUERY:
441 kbase_uk_mem_query *query = args;
442 if (sizeof(*query) != args_size)
445 if (query->gpu_addr & ~PAGE_MASK) {
446 KBASE_DEBUG_PRINT_WARN(KBASE_MEM, "kbase_dispatch case KBASE_FUNC_MEM_QUERY: query->gpu_addr: passed parameter is invalid");
447 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
450 if (query->query != KBASE_MEM_QUERY_COMMIT_SIZE &&
451 query->query != KBASE_MEM_QUERY_VA_SIZE &&
452 query->query != KBASE_MEM_QUERY_FLAGS) {
453 KBASE_DEBUG_PRINT_WARN(KBASE_MEM, "kbase_dispatch case KBASE_FUNC_MEM_QUERY: query->query = %lld unknown", (unsigned long long)query->query);
454 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
458 ukh->ret = kbase_mem_query(kctx, query->gpu_addr, query->query, &query->value);
463 case KBASE_FUNC_MEM_FLAGS_CHANGE:
465 kbase_uk_mem_flags_change * fc = args;
466 if (sizeof(*fc) != args_size)
469 if ((fc->gpu_va & ~PAGE_MASK) && (fc->gpu_va >= PAGE_SIZE)) {
470 KBASE_DEBUG_PRINT_WARN(KBASE_MEM, "kbase_dispatch case KBASE_FUNC_MEM_FLAGS_CHANGE: mem->gpu_va: passed parameter is invalid");
471 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
475 if (kbase_mem_flags_change(kctx, fc->gpu_va, fc->flags, fc->mask))
476 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
480 case KBASE_FUNC_MEM_FREE:
482 kbase_uk_mem_free *mem = args;
484 if (sizeof(*mem) != args_size)
487 if ((mem->gpu_addr & ~PAGE_MASK) && (mem->gpu_addr >= PAGE_SIZE)) {
488 KBASE_DEBUG_PRINT_WARN(KBASE_MEM, "kbase_dispatch case KBASE_FUNC_MEM_FREE: mem->gpu_addr: passed parameter is invalid");
489 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
493 if (kbase_mem_free(kctx, mem->gpu_addr))
494 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
498 case KBASE_FUNC_JOB_SUBMIT:
500 kbase_uk_job_submit *job = args;
502 if (sizeof(*job) != args_size)
505 if (MALI_ERROR_NONE != kbase_jd_submit(kctx, job))
506 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
510 case KBASE_FUNC_SYNC:
512 kbase_uk_sync_now *sn = args;
514 if (sizeof(*sn) != args_size)
517 if (sn->sset.basep_sset.mem_handle & ~PAGE_MASK) {
518 KBASE_DEBUG_PRINT_WARN(KBASE_MEM, "kbase_dispatch case KBASE_FUNC_SYNC: sn->sset.basep_sset.mem_handle: passed parameter is invalid");
519 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
523 if (MALI_ERROR_NONE != kbase_sync_now(kctx, &sn->sset))
524 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
528 case KBASE_FUNC_POST_TERM:
530 kbase_event_close(kctx);
534 case KBASE_FUNC_HWCNT_SETUP:
536 kbase_uk_hwcnt_setup *setup = args;
538 if (sizeof(*setup) != args_size)
541 if (MALI_ERROR_NONE != kbase_instr_hwcnt_setup(kctx, setup))
542 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
546 case KBASE_FUNC_HWCNT_DUMP:
549 if (MALI_ERROR_NONE != kbase_instr_hwcnt_dump(kctx))
550 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
554 case KBASE_FUNC_HWCNT_CLEAR:
557 if (MALI_ERROR_NONE != kbase_instr_hwcnt_clear(kctx))
558 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
562 case KBASE_FUNC_CPU_PROPS_REG_DUMP:
564 kbase_uk_cpuprops *setup = args;
566 if (sizeof(*setup) != args_size)
569 if (MALI_ERROR_NONE != kbase_cpuprops_uk_get_props(kctx, setup))
570 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
574 case KBASE_FUNC_GPU_PROPS_REG_DUMP:
576 kbase_uk_gpuprops *setup = args;
578 if (sizeof(*setup) != args_size)
581 if (MALI_ERROR_NONE != kbase_gpuprops_uk_get_props(kctx, setup))
582 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
585 case KBASE_FUNC_FIND_CPU_MAPPING:
587 kbase_uk_find_cpu_mapping *find = args;
588 struct kbase_cpu_mapping *map;
590 if (sizeof(*find) != args_size)
593 if (find->gpu_addr & ~PAGE_MASK) {
594 KBASE_DEBUG_PRINT_WARN(KBASE_MEM, "kbase_dispatch case KBASE_FUNC_FIND_CPU_MAPPING: find->gpu_addr: passed parameter is invalid");
598 KBASE_DEBUG_ASSERT(find != NULL);
599 if (find->size > SIZE_MAX || find->cpu_addr > ULONG_MAX)
602 map = kbasep_find_enclosing_cpu_mapping(kctx, find->gpu_addr, (uintptr_t) find->cpu_addr, (size_t) find->size);
605 find->uaddr = PTR_TO_U64(map->vma->vm_start);
606 find->page_off = map->page_off;
608 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
612 case KBASE_FUNC_GET_VERSION:
614 kbase_uk_get_ddk_version *get_version = (kbase_uk_get_ddk_version *) args;
616 if (sizeof(*get_version) != args_size)
619 /* version buffer size check is made in compile time assert */
620 memcpy(get_version->version_buffer, KERNEL_SIDE_DDK_VERSION_STRING, sizeof(KERNEL_SIDE_DDK_VERSION_STRING));
621 get_version->version_string_size = sizeof(KERNEL_SIDE_DDK_VERSION_STRING);
625 case KBASE_FUNC_STREAM_CREATE:
628 kbase_uk_stream_create *screate = (kbase_uk_stream_create *) args;
630 if (sizeof(*screate) != args_size)
633 if (strnlen(screate->name, sizeof(screate->name)) >= sizeof(screate->name)) {
634 /* not NULL terminated */
635 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
639 ukh->ret = kbase_stream_create(screate->name, &screate->fd);
640 #else /* CONFIG_SYNC */
641 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
642 #endif /* CONFIG_SYNC */
645 case KBASE_FUNC_FENCE_VALIDATE:
648 kbase_uk_fence_validate *fence_validate = (kbase_uk_fence_validate *) args;
649 if (sizeof(*fence_validate) != args_size)
652 ukh->ret = kbase_fence_validate(fence_validate->fd);
653 #endif /* CONFIG_SYNC */
657 case KBASE_FUNC_EXT_BUFFER_LOCK:
660 ukh->ret = kbase_external_buffer_lock(kctx, (kbase_uk_ext_buff_kds_data *) args, args_size);
661 #endif /* CONFIG_KDS */
665 case KBASE_FUNC_SET_TEST_DATA:
668 kbase_uk_set_test_data *set_data = args;
670 shared_kernel_test_data = set_data->test_data;
671 shared_kernel_test_data.kctx.value = kctx;
672 shared_kernel_test_data.mm.value = (void *)current->mm;
673 ukh->ret = MALI_ERROR_NONE;
674 #endif /* MALI_UNIT_TEST */
678 case KBASE_FUNC_INJECT_ERROR:
680 #ifdef CONFIG_MALI_ERROR_INJECT
682 kbase_error_params params = ((kbase_uk_error_params *) args)->params;
684 spin_lock_irqsave(&kbdev->osdev.reg_op_lock, flags);
685 ukh->ret = job_atom_inject_error(¶ms);
686 spin_unlock_irqrestore(&kbdev->osdev.reg_op_lock, flags);
688 #endif /* CONFIG_MALI_ERROR_INJECT */
692 case KBASE_FUNC_MODEL_CONTROL:
694 #ifdef CONFIG_MALI_NO_MALI
696 kbase_model_control_params params = ((kbase_uk_model_control_params *) args)->params;
698 spin_lock_irqsave(&kbdev->osdev.reg_op_lock, flags);
699 ukh->ret = midg_model_control(kbdev->osdev.model, ¶ms);
700 spin_unlock_irqrestore(&kbdev->osdev.reg_op_lock, flags);
702 #endif /* CONFIG_MALI_NO_MALI */
706 case KBASE_FUNC_KEEP_GPU_POWERED:
708 kbase_uk_keep_gpu_powered *kgp = (kbase_uk_keep_gpu_powered *) args;
709 /* A suspend won't happen here, because we're in a syscall from a
712 * Nevertheless, we'd get the wrong pm_context_active/idle counting
713 * here if a suspend did happen, so let's assert it won't: */
714 KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
716 if (kgp->enabled && !kctx->keep_gpu_powered) {
717 kbase_pm_context_active(kbdev);
718 atomic_inc(&kbdev->keep_gpu_powered_count);
719 kctx->keep_gpu_powered = MALI_TRUE;
720 } else if (!kgp->enabled && kctx->keep_gpu_powered) {
721 atomic_dec(&kbdev->keep_gpu_powered_count);
722 kbase_pm_context_idle(kbdev);
723 kctx->keep_gpu_powered = MALI_FALSE;
729 case KBASE_FUNC_GET_PROFILING_CONTROLS :
731 struct kbase_uk_profiling_controls *controls = \
732 (struct kbase_uk_profiling_controls *)args;
735 if (sizeof(*controls) != args_size)
738 for (i = FBDUMP_CONTROL_MIN; i < FBDUMP_CONTROL_MAX; i++) {
739 controls->profiling_controls[i] = kbase_get_profiling_control(kbdev, i);
745 /* used only for testing purposes; these controls are to be set by gator through gator API */
746 case KBASE_FUNC_SET_PROFILING_CONTROLS :
748 struct kbase_uk_profiling_controls *controls = \
749 (struct kbase_uk_profiling_controls *)args;
752 if (sizeof(*controls) != args_size)
755 for (i = FBDUMP_CONTROL_MIN; i < FBDUMP_CONTROL_MAX; i++)
757 _mali_profiling_control(i, controls->profiling_controls[i]);
764 dev_err(kbdev->osdev.dev, "unknown ioctl %u", id);
768 return MALI_ERROR_NONE;
771 dev_err(kbdev->osdev.dev, "Wrong syscall size (%d) for %08x\n", args_size, id);
773 return MALI_ERROR_FUNCTION_FAILED;
776 static struct kbase_device *to_kbase_device(struct device *dev)
778 return dev_get_drvdata(dev);
782 * API to acquire device list semaphore and
783 * return pointer to the device list head
785 const struct list_head *kbase_dev_list_get(void)
787 down(&kbase_dev_list_lock);
788 return &kbase_dev_list;
791 /* API to release the device list semaphore */
792 void kbase_dev_list_put(const struct list_head *dev_list)
794 up(&kbase_dev_list_lock);
797 /* Find a particular kbase device (as specified by minor number), or find the "first" device if -1 is specified */
798 struct kbase_device *kbase_find_device(int minor)
800 struct kbase_device *kbdev = NULL;
801 struct list_head *entry;
803 down(&kbase_dev_list_lock);
804 list_for_each(entry, &kbase_dev_list) {
805 struct kbase_device *tmp;
807 tmp = list_entry(entry, struct kbase_device, osdev.entry);
808 if (tmp->osdev.mdev.minor == minor || minor == -1) {
810 get_device(kbdev->osdev.dev);
814 up(&kbase_dev_list_lock);
818 EXPORT_SYMBOL(kbase_find_device);
820 void kbase_release_device(struct kbase_device *kbdev)
822 put_device(kbdev->osdev.dev);
824 EXPORT_SYMBOL(kbase_release_device);
826 static int kbase_open(struct inode *inode, struct file *filp)
828 struct kbase_device *kbdev = NULL;
832 kbdev = kbase_find_device(iminor(inode));
837 kctx = kbase_create_context(kbdev);
843 init_waitqueue_head(&kctx->osctx.event_queue);
844 filp->private_data = kctx;
846 dev_dbg(kbdev->osdev.dev, "created base context\n");
849 kbasep_kctx_list_element *element;
851 element = kzalloc(sizeof(kbasep_kctx_list_element), GFP_KERNEL);
853 mutex_lock(&kbdev->kctx_list_lock);
854 element->kctx = kctx;
855 list_add(&element->link, &kbdev->kctx_list);
856 mutex_unlock(&kbdev->kctx_list_lock);
858 /* we don't treat this as a fail - just warn about it */
859 printk(KERN_WARNING KBASE_DRV_NAME "couldn't add kctx to kctx_list\n");
865 kbase_release_device(kbdev);
869 static int kbase_release(struct inode *inode, struct file *filp)
871 kbase_context *kctx = filp->private_data;
872 struct kbase_device *kbdev = kctx->kbdev;
873 kbasep_kctx_list_element *element, *tmp;
874 mali_bool found_element = MALI_FALSE;
876 mutex_lock(&kbdev->kctx_list_lock);
877 list_for_each_entry_safe(element, tmp, &kbdev->kctx_list, link) {
878 if (element->kctx == kctx) {
879 list_del(&element->link);
881 found_element = MALI_TRUE;
884 mutex_unlock(&kbdev->kctx_list_lock);
886 printk(KERN_WARNING KBASE_DRV_NAME "kctx not in kctx_list\n");
888 filp->private_data = NULL;
889 kbase_destroy_context(kctx);
891 dev_dbg(kbdev->osdev.dev, "deleted base context\n");
892 kbase_release_device(kbdev);
896 #define CALL_MAX_SIZE 536
898 static long kbase_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
900 u64 msg[(CALL_MAX_SIZE + 7) >> 3] = { 0xdeadbeefdeadbeefull }; /* alignment fixup */
901 u32 size = _IOC_SIZE(cmd);
902 kbase_context *kctx = filp->private_data;
904 if (size > CALL_MAX_SIZE)
907 if (0 != copy_from_user(&msg, (void *)arg, size)) {
908 pr_err("failed to copy ioctl argument into kernel space\n");
912 if (MALI_ERROR_NONE != kbase_dispatch(kctx, &msg, size))
915 if (0 != copy_to_user((void *)arg, &msg, size)) {
916 pr_err("failed to copy results of UK call back to user space\n");
922 static ssize_t kbase_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
924 kbase_context *kctx = filp->private_data;
925 base_jd_event_v2 uevent;
928 if (count < sizeof(uevent))
932 while (kbase_event_dequeue(kctx, &uevent)) {
936 if (filp->f_flags & O_NONBLOCK)
939 if (wait_event_interruptible(kctx->osctx.event_queue, kbase_event_pending(kctx)))
942 if (uevent.event_code == BASE_JD_EVENT_DRV_TERMINATED) {
948 if (copy_to_user(buf, &uevent, sizeof(uevent)))
951 buf += sizeof(uevent);
953 count -= sizeof(uevent);
954 } while (count >= sizeof(uevent));
957 return out_count * sizeof(uevent);
960 static unsigned int kbase_poll(struct file *filp, poll_table *wait)
962 kbase_context *kctx = filp->private_data;
964 poll_wait(filp, &kctx->osctx.event_queue, wait);
965 if (kbase_event_pending(kctx))
966 return POLLIN | POLLRDNORM;
971 void kbase_event_wakeup(kbase_context *kctx)
973 KBASE_DEBUG_ASSERT(kctx);
975 wake_up_interruptible(&kctx->osctx.event_queue);
978 KBASE_EXPORT_TEST_API(kbase_event_wakeup)
980 int kbase_check_flags(int flags)
982 /* Enforce that the driver keeps the O_CLOEXEC flag so that execve() always
983 * closes the file descriptor in a child process.
985 if (0 == (flags & O_CLOEXEC))
991 static const struct file_operations kbase_fops = {
992 .owner = THIS_MODULE,
994 .release = kbase_release,
997 .unlocked_ioctl = kbase_ioctl,
998 .compat_ioctl = kbase_ioctl,
1000 .check_flags = kbase_check_flags,
1003 #ifndef CONFIG_MALI_NO_MALI
1004 void kbase_os_reg_write(kbase_device *kbdev, u16 offset, u32 value)
1006 writel(value, kbdev->osdev.reg + offset);
1009 u32 kbase_os_reg_read(kbase_device *kbdev, u16 offset)
1011 return readl(kbdev->osdev.reg + offset);
1014 static void *kbase_tag(void *ptr, u32 tag)
1016 return (void *)(((uintptr_t) ptr) | tag);
1019 static void *kbase_untag(void *ptr)
1021 return (void *)(((uintptr_t) ptr) & ~3);
1024 static irqreturn_t kbase_job_irq_handler(int irq, void *data)
1026 unsigned long flags;
1027 struct kbase_device *kbdev = kbase_untag(data);
1030 spin_lock_irqsave(&kbdev->pm.gpu_powered_lock, flags);
1032 if (!kbdev->pm.gpu_powered) {
1033 /* GPU is turned off - IRQ is not for us */
1034 spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
1038 val = kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_STATUS), NULL);
1040 #ifdef CONFIG_MALI_DEBUG
1041 if (!kbdev->pm.driver_ready_for_irqs)
1042 dev_dbg(kbdev->osdev.dev, "%s: irq %d irqstatus 0x%x before driver is ready\n",
1043 __func__, irq, val );
1044 #endif /* CONFIG_MALI_DEBUG */
1045 spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
1050 dev_dbg(kbdev->osdev.dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
1052 kbase_job_done(kbdev, val);
1057 KBASE_EXPORT_TEST_API(kbase_job_irq_handler);
1059 static irqreturn_t kbase_mmu_irq_handler(int irq, void *data)
1061 unsigned long flags;
1062 struct kbase_device *kbdev = kbase_untag(data);
1065 spin_lock_irqsave(&kbdev->pm.gpu_powered_lock, flags);
1067 if (!kbdev->pm.gpu_powered) {
1068 /* GPU is turned off - IRQ is not for us */
1069 spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
1073 val = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_STATUS), NULL);
1075 #ifdef CONFIG_MALI_DEBUG
1076 if (!kbdev->pm.driver_ready_for_irqs)
1077 dev_dbg(kbdev->osdev.dev, "%s: irq %d irqstatus 0x%x before driver is ready\n",
1078 __func__, irq, val );
1079 #endif /* CONFIG_MALI_DEBUG */
1080 spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
1085 dev_dbg(kbdev->osdev.dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
1087 kbase_mmu_interrupt(kbdev, val);
1092 static irqreturn_t kbase_gpu_irq_handler(int irq, void *data)
1094 unsigned long flags;
1095 struct kbase_device *kbdev = kbase_untag(data);
1098 spin_lock_irqsave(&kbdev->pm.gpu_powered_lock, flags);
1100 if (!kbdev->pm.gpu_powered) {
1101 /* GPU is turned off - IRQ is not for us */
1102 spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
1106 val = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_STATUS), NULL);
1108 #ifdef CONFIG_MALI_DEBUG
1109 if (!kbdev->pm.driver_ready_for_irqs)
1110 dev_dbg(kbdev->osdev.dev, "%s: irq %d irqstatus 0x%x before driver is ready\n",
1111 __func__, irq, val );
1112 #endif /* CONFIG_MALI_DEBUG */
1113 spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
1118 dev_dbg(kbdev->osdev.dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
1120 kbase_gpu_interrupt(kbdev, val);
1125 static irq_handler_t kbase_handler_table[] = {
1126 [JOB_IRQ_TAG] = kbase_job_irq_handler,
1127 [MMU_IRQ_TAG] = kbase_mmu_irq_handler,
1128 [GPU_IRQ_TAG] = kbase_gpu_irq_handler,
1131 #ifdef CONFIG_MALI_DEBUG
1132 #define JOB_IRQ_HANDLER JOB_IRQ_TAG
1133 #define MMU_IRQ_HANDLER MMU_IRQ_TAG
1134 #define GPU_IRQ_HANDLER GPU_IRQ_TAG
1137 * @brief Registers given interrupt handler for requested interrupt type
1138 * Case irq handler is not specified default handler shall be registered
1140 * @param[in] kbdev - Device for which the handler is to be registered
1141 * @param[in] custom_handler - Handler to be registered
1142 * @param[in] irq_type - Interrupt type
1143 * @return MALI_ERROR_NONE case success, MALI_ERROR_FUNCTION_FAILED otherwise
1145 static mali_error kbase_set_custom_irq_handler(kbase_device *kbdev, irq_handler_t custom_handler, int irq_type)
1147 struct kbase_os_device *osdev = &kbdev->osdev;
1148 mali_error result = MALI_ERROR_NONE;
1149 irq_handler_t requested_irq_handler = NULL;
1150 KBASE_DEBUG_ASSERT((JOB_IRQ_HANDLER <= irq_type) && (GPU_IRQ_HANDLER >= irq_type));
1152 /* Release previous handler */
1153 if (osdev->irqs[irq_type].irq)
1154 free_irq(osdev->irqs[irq_type].irq, kbase_tag(kbdev, irq_type));
1156 requested_irq_handler = (NULL != custom_handler) ? custom_handler : kbase_handler_table[irq_type];
1158 if (0 != request_irq(osdev->irqs[irq_type].irq, requested_irq_handler, osdev->irqs[irq_type].flags | IRQF_SHARED, dev_name(osdev->dev), kbase_tag(kbdev, irq_type))) {
1159 result = MALI_ERROR_FUNCTION_FAILED;
1160 dev_err(osdev->dev, "Can't request interrupt %d (index %d)\n", osdev->irqs[irq_type].irq, irq_type);
1161 #ifdef CONFIG_SPARSE_IRQ
1162 dev_err(osdev->dev, "You have CONFIG_SPARSE_IRQ support enabled - is the interrupt number correct for this configuration?\n");
1163 #endif /* CONFIG_SPARSE_IRQ */
1169 KBASE_EXPORT_TEST_API(kbase_set_custom_irq_handler)
1171 /* test correct interrupt assigment and reception by cpu */
1172 typedef struct kbasep_irq_test {
1173 struct hrtimer timer;
1174 wait_queue_head_t wait;
1179 static kbasep_irq_test kbasep_irq_test_data;
1181 #define IRQ_TEST_TIMEOUT 500
1183 static irqreturn_t kbase_job_irq_test_handler(int irq, void *data)
1185 unsigned long flags;
1186 struct kbase_device *kbdev = kbase_untag(data);
1189 spin_lock_irqsave(&kbdev->pm.gpu_powered_lock, flags);
1191 if (!kbdev->pm.gpu_powered) {
1192 /* GPU is turned off - IRQ is not for us */
1193 spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
1197 val = kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_STATUS), NULL);
1199 spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
1204 dev_dbg(kbdev->osdev.dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
1206 kbasep_irq_test_data.triggered = 1;
1207 wake_up(&kbasep_irq_test_data.wait);
1209 kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), val, NULL);
1214 static irqreturn_t kbase_mmu_irq_test_handler(int irq, void *data)
1216 unsigned long flags;
1217 struct kbase_device *kbdev = kbase_untag(data);
1220 spin_lock_irqsave(&kbdev->pm.gpu_powered_lock, flags);
1222 if (!kbdev->pm.gpu_powered) {
1223 /* GPU is turned off - IRQ is not for us */
1224 spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
1228 val = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_STATUS), NULL);
1230 spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
1235 dev_dbg(kbdev->osdev.dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
1237 kbasep_irq_test_data.triggered = 1;
1238 wake_up(&kbasep_irq_test_data.wait);
1240 kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), val, NULL);
1245 static enum hrtimer_restart kbasep_test_interrupt_timeout(struct hrtimer *timer)
1247 kbasep_irq_test *test_data = container_of(timer, kbasep_irq_test, timer);
1249 test_data->timeout = 1;
1250 test_data->triggered = 1;
1251 wake_up(&test_data->wait);
1252 return HRTIMER_NORESTART;
1255 static mali_error kbasep_common_test_interrupt(kbase_device * const kbdev, u32 tag)
1257 struct kbase_os_device *osdev = &kbdev->osdev;
1258 mali_error err = MALI_ERROR_NONE;
1259 irq_handler_t test_handler;
1267 test_handler = kbase_job_irq_test_handler;
1268 rawstat_offset = JOB_CONTROL_REG(JOB_IRQ_RAWSTAT);
1269 mask_offset = JOB_CONTROL_REG(JOB_IRQ_MASK);
1272 test_handler = kbase_mmu_irq_test_handler;
1273 rawstat_offset = MMU_REG(MMU_IRQ_RAWSTAT);
1274 mask_offset = MMU_REG(MMU_IRQ_MASK);
1277 /* already tested by pm_driver - bail out */
1279 return MALI_ERROR_NONE;
1282 /* store old mask */
1283 old_mask_val = kbase_reg_read(kbdev, mask_offset, NULL);
1284 /* mask interrupts */
1285 kbase_reg_write(kbdev, mask_offset, 0x0, NULL);
1287 if (osdev->irqs[tag].irq) {
1288 /* release original handler and install test handler */
1289 if (MALI_ERROR_NONE != kbase_set_custom_irq_handler(kbdev, test_handler, tag)) {
1290 err = MALI_ERROR_FUNCTION_FAILED;
1292 kbasep_irq_test_data.timeout = 0;
1293 hrtimer_init(&kbasep_irq_test_data.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1294 kbasep_irq_test_data.timer.function = kbasep_test_interrupt_timeout;
1296 /* trigger interrupt */
1297 kbase_reg_write(kbdev, mask_offset, 0x1, NULL);
1298 kbase_reg_write(kbdev, rawstat_offset, 0x1, NULL);
1300 hrtimer_start(&kbasep_irq_test_data.timer, HR_TIMER_DELAY_MSEC(IRQ_TEST_TIMEOUT), HRTIMER_MODE_REL);
1302 wait_event(kbasep_irq_test_data.wait, kbasep_irq_test_data.triggered != 0);
1304 if (kbasep_irq_test_data.timeout != 0) {
1305 dev_err(osdev->dev, "Interrupt %d (index %d) didn't reach CPU.\n", osdev->irqs[tag].irq, tag);
1306 err = MALI_ERROR_FUNCTION_FAILED;
1308 dev_dbg(osdev->dev, "Interrupt %d (index %d) reached CPU.\n", osdev->irqs[tag].irq, tag);
1311 hrtimer_cancel(&kbasep_irq_test_data.timer);
1312 kbasep_irq_test_data.triggered = 0;
1314 /* mask interrupts */
1315 kbase_reg_write(kbdev, mask_offset, 0x0, NULL);
1317 /* release test handler */
1318 free_irq(osdev->irqs[tag].irq, kbase_tag(kbdev, tag));
1321 /* restore original interrupt */
1322 if (request_irq(osdev->irqs[tag].irq, kbase_handler_table[tag], osdev->irqs[tag].flags | IRQF_SHARED, dev_name(osdev->dev), kbase_tag(kbdev, tag))) {
1323 dev_err(osdev->dev, "Can't restore original interrupt %d (index %d)\n", osdev->irqs[tag].irq, tag);
1324 err = MALI_ERROR_FUNCTION_FAILED;
1327 /* restore old mask */
1328 kbase_reg_write(kbdev, mask_offset, old_mask_val, NULL);
1333 static mali_error kbasep_common_test_interrupt_handlers(kbase_device * const kbdev)
1335 struct kbase_os_device *osdev = &kbdev->osdev;
1338 init_waitqueue_head(&kbasep_irq_test_data.wait);
1339 kbasep_irq_test_data.triggered = 0;
1341 /* A suspend won't happen during startup/insmod */
1342 kbase_pm_context_active(kbdev);
1344 err = kbasep_common_test_interrupt(kbdev, JOB_IRQ_TAG);
1345 if (MALI_ERROR_NONE != err) {
1346 dev_err(osdev->dev, "Interrupt JOB_IRQ didn't reach CPU. Check interrupt assignments.\n");
1350 err = kbasep_common_test_interrupt(kbdev, MMU_IRQ_TAG);
1351 if (MALI_ERROR_NONE != err) {
1352 dev_err(osdev->dev, "Interrupt MMU_IRQ didn't reach CPU. Check interrupt assignments.\n");
1356 dev_err(osdev->dev, "Interrupts are correctly assigned.\n");
1359 kbase_pm_context_idle(kbdev);
1364 #endif /* CONFIG_MALI_DEBUG */
1366 static int kbase_install_interrupts(kbase_device *kbdev)
1368 struct kbase_os_device *osdev = &kbdev->osdev;
1369 u32 nr = ARRAY_SIZE(kbase_handler_table);
1373 for (i = 0; i < nr; i++) {
1374 err = request_irq(osdev->irqs[i].irq, kbase_handler_table[i], osdev->irqs[i].flags | IRQF_SHARED, dev_name(osdev->dev), kbase_tag(kbdev, i));
1376 dev_err(osdev->dev, "Can't request interrupt %d (index %d)\n", osdev->irqs[i].irq, i);
1377 #ifdef CONFIG_SPARSE_IRQ
1378 dev_err(osdev->dev, "You have CONFIG_SPARSE_IRQ support enabled - is the interrupt number correct for this configuration?\n");
1379 #endif /* CONFIG_SPARSE_IRQ */
1382 printk("%s,request irq %d ok\n",__func__,osdev->irqs[i].irq + 32 );
1389 free_irq(osdev->irqs[i].irq, kbase_tag(kbdev, i));
1394 static void kbase_release_interrupts(kbase_device *kbdev)
1396 struct kbase_os_device *osdev = &kbdev->osdev;
1397 u32 nr = ARRAY_SIZE(kbase_handler_table);
1400 for (i = 0; i < nr; i++) {
1401 if (osdev->irqs[i].irq)
1402 free_irq(osdev->irqs[i].irq, kbase_tag(kbdev, i));
1406 void kbase_synchronize_irqs(kbase_device *kbdev)
1408 struct kbase_os_device *osdev = &kbdev->osdev;
1409 u32 nr = ARRAY_SIZE(kbase_handler_table);
1412 for (i = 0; i < nr; i++) {
1413 if (osdev->irqs[i].irq)
1414 synchronize_irq(osdev->irqs[i].irq);
1418 #endif /* CONFIG_MALI_NO_MALI */
1421 /** Show callback for the @c power_policy sysfs file.
1423 * This function is called to get the contents of the @c power_policy sysfs
1424 * file. This is a list of the available policies with the currently active one
1425 * surrounded by square brackets.
1427 * @param dev The device this sysfs file is for
1428 * @param attr The attributes of the sysfs file
1429 * @param buf The output buffer for the sysfs file contents
1431 * @return The number of bytes output to @c buf.
1433 static ssize_t show_policy(struct device *dev, struct device_attribute *attr, char *const buf)
1435 struct kbase_device *kbdev;
1436 const struct kbase_pm_policy *current_policy;
1437 const struct kbase_pm_policy *const *policy_list;
1442 kbdev = to_kbase_device(dev);
1447 current_policy = kbase_pm_get_policy(kbdev);
1449 policy_count = kbase_pm_list_policies(&policy_list);
1451 for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) {
1452 if (policy_list[i] == current_policy)
1453 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name);
1455 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name);
1458 if (ret < PAGE_SIZE - 1) {
1459 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
1461 buf[PAGE_SIZE - 2] = '\n';
1462 buf[PAGE_SIZE - 1] = '\0';
1463 ret = PAGE_SIZE - 1;
1469 /** Store callback for the @c power_policy sysfs file.
1471 * This function is called when the @c power_policy sysfs file is written to.
1472 * It matches the requested policy against the available policies and if a
1473 * matching policy is found calls @ref kbase_pm_set_policy to change the
1476 * @param dev The device with sysfs file is for
1477 * @param attr The attributes of the sysfs file
1478 * @param buf The value written to the sysfs file
1479 * @param count The number of bytes written to the sysfs file
1481 * @return @c count if the function succeeded. An error code on failure.
1483 static ssize_t set_policy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1485 struct kbase_device *kbdev;
1486 const struct kbase_pm_policy *new_policy = NULL;
1487 const struct kbase_pm_policy *const *policy_list;
1491 kbdev = to_kbase_device(dev);
1496 policy_count = kbase_pm_list_policies(&policy_list);
1498 for (i = 0; i < policy_count; i++) {
1499 if (sysfs_streq(policy_list[i]->name, buf)) {
1500 new_policy = policy_list[i];
1506 dev_err(dev, "power_policy: policy not found\n");
1510 kbase_pm_set_policy(kbdev, new_policy);
1515 /** The sysfs file @c power_policy.
1517 * This is used for obtaining information about the available policies,
1518 * determining which policy is currently active, and changing the active
1521 DEVICE_ATTR(power_policy, S_IRUGO | S_IWUSR, show_policy, set_policy);
1523 /** Show callback for the @c core_availability_policy sysfs file.
1525 * This function is called to get the contents of the @c core_availability_policy
1526 * sysfs file. This is a list of the available policies with the currently
1527 * active one surrounded by square brackets.
1529 * @param dev The device this sysfs file is for
1530 * @param attr The attributes of the sysfs file
1531 * @param buf The output buffer for the sysfs file contents
1533 * @return The number of bytes output to @c buf.
1535 static ssize_t show_ca_policy(struct device *dev, struct device_attribute *attr, char *const buf)
1537 struct kbase_device *kbdev;
1538 const struct kbase_pm_ca_policy *current_policy;
1539 const struct kbase_pm_ca_policy *const *policy_list;
1544 kbdev = to_kbase_device(dev);
1549 current_policy = kbase_pm_ca_get_policy(kbdev);
1551 policy_count = kbase_pm_ca_list_policies(&policy_list);
1553 for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) {
1554 if (policy_list[i] == current_policy)
1555 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name);
1557 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name);
1560 if (ret < PAGE_SIZE - 1) {
1561 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
1563 buf[PAGE_SIZE - 2] = '\n';
1564 buf[PAGE_SIZE - 1] = '\0';
1565 ret = PAGE_SIZE - 1;
1571 /** Store callback for the @c core_availability_policy sysfs file.
1573 * This function is called when the @c core_availability_policy sysfs file is
1574 * written to. It matches the requested policy against the available policies
1575 * and if a matching policy is found calls @ref kbase_pm_set_policy to change
1578 * @param dev The device with sysfs file is for
1579 * @param attr The attributes of the sysfs file
1580 * @param buf The value written to the sysfs file
1581 * @param count The number of bytes written to the sysfs file
1583 * @return @c count if the function succeeded. An error code on failure.
1585 static ssize_t set_ca_policy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1587 struct kbase_device *kbdev;
1588 const struct kbase_pm_ca_policy *new_policy = NULL;
1589 const struct kbase_pm_ca_policy *const *policy_list;
1593 kbdev = to_kbase_device(dev);
1598 policy_count = kbase_pm_ca_list_policies(&policy_list);
1600 for (i = 0; i < policy_count; i++) {
1601 if (sysfs_streq(policy_list[i]->name, buf)) {
1602 new_policy = policy_list[i];
1608 dev_err(dev, "core_availability_policy: policy not found\n");
1612 kbase_pm_ca_set_policy(kbdev, new_policy);
1617 /** The sysfs file @c core_availability_policy
1619 * This is used for obtaining information about the available policies,
1620 * determining which policy is currently active, and changing the active
1623 DEVICE_ATTR(core_availability_policy, S_IRUGO | S_IWUSR, show_ca_policy, set_ca_policy);
1625 /** Show callback for the @c core_mask sysfs file.
1627 * This function is called to get the contents of the @c core_mask sysfs
1630 * @param dev The device this sysfs file is for
1631 * @param attr The attributes of the sysfs file
1632 * @param buf The output buffer for the sysfs file contents
1634 * @return The number of bytes output to @c buf.
1636 static ssize_t show_core_mask(struct device *dev, struct device_attribute *attr, char *const buf)
1638 struct kbase_device *kbdev;
1641 kbdev = to_kbase_device(dev);
1646 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "Current core mask : 0x%llX\n", kbdev->pm.debug_core_mask);
1647 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "Available core mask : 0x%llX\n", kbdev->shader_present_bitmap);
1652 /** Store callback for the @c core_mask sysfs file.
1654 * This function is called when the @c core_mask sysfs file is written to.
1656 * @param dev The device with sysfs file is for
1657 * @param attr The attributes of the sysfs file
1658 * @param buf The value written to the sysfs file
1659 * @param count The number of bytes written to the sysfs file
1661 * @return @c count if the function succeeded. An error code on failure.
1663 static ssize_t set_core_mask(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1665 struct kbase_device *kbdev;
1668 kbdev = to_kbase_device(dev);
1673 new_core_mask = simple_strtoull(buf, NULL, 16);
1675 if ((new_core_mask & kbdev->shader_present_bitmap) != new_core_mask ||
1676 !(new_core_mask & kbdev->gpu_props.props.coherency_info.group[0].core_mask)) {
1677 dev_err(dev, "power_policy: invalid core specification\n");
1681 if (kbdev->pm.debug_core_mask != new_core_mask) {
1682 unsigned long flags;
1684 spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
1686 kbdev->pm.debug_core_mask = new_core_mask;
1687 kbase_pm_update_cores_state_nolock(kbdev);
1689 spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
1695 /** The sysfs file @c core_mask.
1697 * This is used to restrict shader core availability for debugging purposes.
1698 * Reading it will show the current core mask and the mask of cores available.
1699 * Writing to it will set the current core mask.
1701 DEVICE_ATTR(core_mask, S_IRUGO | S_IWUSR, show_core_mask, set_core_mask);
1704 #ifdef CONFIG_MALI_DEBUG_SHADER_SPLIT_FS
1705 /* Import the external affinity mask variables */
1706 extern u64 mali_js0_affinity_mask;
1707 extern u64 mali_js1_affinity_mask;
1708 extern u64 mali_js2_affinity_mask;
1711 * Structure containing a single shader affinity split configuration.
1715 char const * human_readable;
1722 * Array of available shader affinity split configurations.
1724 static sc_split_config const sc_split_configs[] =
1726 /* All must be the first config (default). */
1729 0xFFFFFFFFFFFFFFFFULL, 0xFFFFFFFFFFFFFFFFULL, 0xFFFFFFFFFFFFFFFFULL
1732 "mp1", "MP1 shader core",
1736 "mp2", "MP2 shader core",
1740 "mp4", "MP4 shader core",
1744 "mp1_vf", "MP1 vertex + MP1 fragment shader core",
1745 0x2, 0x1, 0xFFFFFFFFFFFFFFFFULL
1748 "mp2_vf", "MP2 vertex + MP2 fragment shader core",
1749 0xA, 0x5, 0xFFFFFFFFFFFFFFFFULL
1751 /* This must be the last config. */
1758 /* Pointer to the currently active shader split configuration. */
1759 static sc_split_config const * current_sc_split_config = &sc_split_configs[0];
1761 /** Show callback for the @c sc_split sysfs file
1763 * Returns the current shader core affinity policy.
1765 static ssize_t show_split(struct device *dev, struct device_attribute *attr, char * const buf)
1768 /* We know we are given a buffer which is PAGE_SIZE long. Our strings are all guaranteed
1769 * to be shorter than that at this time so no length check needed. */
1770 ret = scnprintf(buf, PAGE_SIZE, "Current sc_split: '%s'\n", current_sc_split_config->tag );
1774 /** Store callback for the @c sc_split sysfs file.
1776 * This function is called when the @c sc_split sysfs file is written to
1777 * It modifies the system shader core affinity configuration to allow
1778 * system profiling with different hardware configurations.
1780 * @param dev The device with sysfs file is for
1781 * @param attr The attributes of the sysfs file
1782 * @param buf The value written to the sysfs file
1783 * @param count The number of bytes written to the sysfs file
1785 * @return @c count if the function succeeded. An error code on failure.
1787 static ssize_t set_split(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1789 sc_split_config const * config = &sc_split_configs[0];
1791 /* Try to match: loop until we hit the last "NULL" entry */
1792 while( config->tag )
1794 if (sysfs_streq(config->tag, buf))
1796 current_sc_split_config = config;
1797 mali_js0_affinity_mask = config->js0_mask;
1798 mali_js1_affinity_mask = config->js1_mask;
1799 mali_js2_affinity_mask = config->js2_mask;
1800 dev_info(dev, "Setting sc_split: '%s'\n", config->tag);
1806 /* No match found in config list */
1807 dev_err(dev, "sc_split: invalid value\n");
1808 dev_err(dev, " Possible settings: mp[1|2|4], mp[1|2]_vf\n");
1812 /** The sysfs file @c sc_split
1814 * This is used for configuring/querying the current shader core work affinity
1817 DEVICE_ATTR(sc_split, S_IRUGO|S_IWUSR, show_split, set_split);
1818 #endif /* CONFIG_MALI_DEBUG_SHADER_SPLIT_FS */
1821 #if MALI_CUSTOMER_RELEASE == 0
1822 /** Store callback for the @c js_timeouts sysfs file.
1824 * This function is called to get the contents of the @c js_timeouts sysfs
1825 * file. This file contains five values separated by whitespace. The values
1826 * are basically the same as KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS,
1827 * KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_SS, KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_NSS,
1828 * KBASE_CONFIG_ATTR_JS_RESET_TICKS_SS, BASE_CONFIG_ATTR_JS_RESET_TICKS_NSS
1829 * configuration values (in that order), with the difference that the js_timeout
1830 * valus are expressed in MILLISECONDS.
1832 * The js_timeouts sysfile file allows the current values in
1833 * use by the job scheduler to get override. Note that a value needs to
1834 * be other than 0 for it to override the current job scheduler value.
1836 * @param dev The device with sysfs file is for
1837 * @param attr The attributes of the sysfs file
1838 * @param buf The value written to the sysfs file
1839 * @param count The number of bytes written to the sysfs file
1841 * @return @c count if the function succeeded. An error code on failure.
1843 static ssize_t set_js_timeouts(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1845 struct kbase_device *kbdev;
1847 unsigned long js_soft_stop_ms;
1848 unsigned long js_hard_stop_ms_ss;
1849 unsigned long js_hard_stop_ms_nss;
1850 unsigned long js_reset_ms_ss;
1851 unsigned long js_reset_ms_nss;
1853 kbdev = to_kbase_device(dev);
1857 items = sscanf(buf, "%lu %lu %lu %lu %lu", &js_soft_stop_ms, &js_hard_stop_ms_ss, &js_hard_stop_ms_nss, &js_reset_ms_ss, &js_reset_ms_nss);
1861 ticks = js_soft_stop_ms * 1000000ULL;
1862 do_div(ticks, kbdev->js_data.scheduling_tick_ns);
1863 kbdev->js_soft_stop_ticks = ticks;
1865 ticks = js_hard_stop_ms_ss * 1000000ULL;
1866 do_div(ticks, kbdev->js_data.scheduling_tick_ns);
1867 kbdev->js_hard_stop_ticks_ss = ticks;
1869 ticks = js_hard_stop_ms_nss * 1000000ULL;
1870 do_div(ticks, kbdev->js_data.scheduling_tick_ns);
1871 kbdev->js_hard_stop_ticks_nss = ticks;
1873 ticks = js_reset_ms_ss * 1000000ULL;
1874 do_div(ticks, kbdev->js_data.scheduling_tick_ns);
1875 kbdev->js_reset_ticks_ss = ticks;
1877 ticks = js_reset_ms_nss * 1000000ULL;
1878 do_div(ticks, kbdev->js_data.scheduling_tick_ns);
1879 kbdev->js_reset_ticks_nss = ticks;
1881 dev_info(kbdev->osdev.dev, "Overriding KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_soft_stop_ticks, js_soft_stop_ms);
1882 dev_info(kbdev->osdev.dev, "Overriding KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_SS with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_hard_stop_ticks_ss, js_hard_stop_ms_ss);
1883 dev_info(kbdev->osdev.dev, "Overriding KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_NSS with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_hard_stop_ticks_nss, js_hard_stop_ms_nss);
1884 dev_info(kbdev->osdev.dev, "Overriding KBASE_CONFIG_ATTR_JS_RESET_TICKS_SS with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_reset_ticks_ss, js_reset_ms_ss);
1885 dev_info(kbdev->osdev.dev, "Overriding KBASE_CONFIG_ATTR_JS_RESET_TICKS_NSS with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_reset_ticks_nss, js_reset_ms_nss);
1889 dev_err(kbdev->osdev.dev, "Couldn't process js_timeouts write operation.\nUse format " "<soft_stop_ms> <hard_stop_ms_ss> <hard_stop_ms_nss> <reset_ms_ss> <reset_ms_nss>\n");
1894 /** Show callback for the @c js_timeouts sysfs file.
1896 * This function is called to get the contents of the @c js_timeouts sysfs
1897 * file. It returns the last set values written to the js_timeouts sysfs file.
1898 * If the file didn't get written yet, the values will be 0.
1900 * @param dev The device this sysfs file is for
1901 * @param attr The attributes of the sysfs file
1902 * @param buf The output buffer for the sysfs file contents
1904 * @return The number of bytes output to @c buf.
1906 static ssize_t show_js_timeouts(struct device *dev, struct device_attribute *attr, char * const buf)
1908 struct kbase_device *kbdev;
1911 unsigned long js_soft_stop_ms;
1912 unsigned long js_hard_stop_ms_ss;
1913 unsigned long js_hard_stop_ms_nss;
1914 unsigned long js_reset_ms_ss;
1915 unsigned long js_reset_ms_nss;
1917 kbdev = to_kbase_device(dev);
1921 ms = (u64) kbdev->js_soft_stop_ticks * kbdev->js_data.scheduling_tick_ns;
1922 do_div(ms, 1000000UL);
1923 js_soft_stop_ms = (unsigned long)ms;
1925 ms = (u64) kbdev->js_hard_stop_ticks_ss * kbdev->js_data.scheduling_tick_ns;
1926 do_div(ms, 1000000UL);
1927 js_hard_stop_ms_ss = (unsigned long)ms;
1929 ms = (u64) kbdev->js_hard_stop_ticks_nss * kbdev->js_data.scheduling_tick_ns;
1930 do_div(ms, 1000000UL);
1931 js_hard_stop_ms_nss = (unsigned long)ms;
1933 ms = (u64) kbdev->js_reset_ticks_ss * kbdev->js_data.scheduling_tick_ns;
1934 do_div(ms, 1000000UL);
1935 js_reset_ms_ss = (unsigned long)ms;
1937 ms = (u64) kbdev->js_reset_ticks_nss * kbdev->js_data.scheduling_tick_ns;
1938 do_div(ms, 1000000UL);
1939 js_reset_ms_nss = (unsigned long)ms;
1941 ret = scnprintf(buf, PAGE_SIZE, "%lu %lu %lu %lu %lu\n", js_soft_stop_ms, js_hard_stop_ms_ss, js_hard_stop_ms_nss, js_reset_ms_ss, js_reset_ms_nss);
1943 if (ret >= PAGE_SIZE) {
1944 buf[PAGE_SIZE - 2] = '\n';
1945 buf[PAGE_SIZE - 1] = '\0';
1946 ret = PAGE_SIZE - 1;
1952 /** The sysfs file @c js_timeouts.
1954 * This is used to override the current job scheduler values for
1955 * KBASE_CONFIG_ATTR_JS_STOP_STOP_TICKS_SS
1956 * KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_SS
1957 * KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_NSS
1958 * KBASE_CONFIG_ATTR_JS_RESET_TICKS_SS
1959 * KBASE_CONFIG_ATTR_JS_RESET_TICKS_NSS.
1961 DEVICE_ATTR(js_timeouts, S_IRUGO | S_IWUSR, show_js_timeouts, set_js_timeouts);
1962 #endif /* MALI_CUSTOMER_RELEASE == 0 */
1964 #ifdef CONFIG_MALI_DEBUG
1965 static ssize_t set_js_softstop_always(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1967 struct kbase_device *kbdev;
1969 int softstop_always;
1971 kbdev = to_kbase_device(dev);
1975 items = sscanf(buf, "%d", &softstop_always);
1976 if ((items == 1) && ((softstop_always == 0) || (softstop_always == 1))) {
1977 kbdev->js_data.softstop_always = (mali_bool) softstop_always;
1979 dev_info(kbdev->osdev.dev, "Support for softstop on a single context: %s\n", (kbdev->js_data.softstop_always == MALI_FALSE) ? "Disabled" : "Enabled");
1982 dev_err(kbdev->osdev.dev, "Couldn't process js_softstop_always write operation.\nUse format " "<soft_stop_always>\n");
1987 static ssize_t show_js_softstop_always(struct device *dev, struct device_attribute *attr, char * const buf)
1989 struct kbase_device *kbdev;
1992 kbdev = to_kbase_device(dev);
1996 ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->js_data.softstop_always);
1998 if (ret >= PAGE_SIZE) {
1999 buf[PAGE_SIZE - 2] = '\n';
2000 buf[PAGE_SIZE - 1] = '\0';
2001 ret = PAGE_SIZE - 1;
2008 * By default, soft-stops are disabled when only a single context is present. The ability to
2009 * enable soft-stop when only a single context is present can be used for debug and unit-testing purposes.
2010 * (see CL t6xx_stress_1 unit-test as an example whereby this feature is used.)
2012 DEVICE_ATTR(js_softstop_always, S_IRUGO | S_IWUSR, show_js_softstop_always, set_js_softstop_always);
2013 #endif /* CONFIG_MALI_DEBUG */
2015 #ifdef CONFIG_MALI_DEBUG
2016 typedef void (kbasep_debug_command_func) (kbase_device *);
2019 KBASEP_DEBUG_COMMAND_DUMPTRACE,
2020 KBASEP_DEBUG_COMMAND_SIM1,
2021 KBASEP_DEBUG_COMMAND_SIM2,
2022 KBASEP_DEBUG_COMMAND_SIM3,
2023 KBASEP_DEBUG_COMMAND_SIM4,
2024 /* This must be the last enum */
2025 KBASEP_DEBUG_COMMAND_COUNT
2026 } kbasep_debug_command_code;
2028 typedef struct kbasep_debug_command {
2030 kbasep_debug_command_func *func;
2031 } kbasep_debug_command;
2033 /** Debug commands supported by the driver */
2034 static const kbasep_debug_command debug_commands[] = {
2037 .func = &kbasep_trace_dump,
2041 .func = &RunMaliTest_sim1_t760,
2045 .func = &RunMaliTest_sim2_t760,
2049 .func = &RunMaliTest_sim3_t760,
2053 .func = &RunMaliTest_sim4_t760,
2057 /** Show callback for the @c debug_command sysfs file.
2059 * This function is called to get the contents of the @c debug_command sysfs
2060 * file. This is a list of the available debug commands, separated by newlines.
2062 * @param dev The device this sysfs file is for
2063 * @param attr The attributes of the sysfs file
2064 * @param buf The output buffer for the sysfs file contents
2066 * @return The number of bytes output to @c buf.
2068 static ssize_t show_debug(struct device *dev, struct device_attribute *attr, char *const buf)
2070 struct kbase_device *kbdev;
2074 kbdev = to_kbase_device(dev);
2079 for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT && ret < PAGE_SIZE; i++)
2080 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s\n", debug_commands[i].str);
2082 if (ret >= PAGE_SIZE) {
2083 buf[PAGE_SIZE - 2] = '\n';
2084 buf[PAGE_SIZE - 1] = '\0';
2085 ret = PAGE_SIZE - 1;
2091 /** Store callback for the @c debug_command sysfs file.
2093 * This function is called when the @c debug_command sysfs file is written to.
2094 * It matches the requested command against the available commands, and if
2095 * a matching command is found calls the associated function from
2096 * @ref debug_commands to issue the command.
2098 * @param dev The device with sysfs file is for
2099 * @param attr The attributes of the sysfs file
2100 * @param buf The value written to the sysfs file
2101 * @param count The number of bytes written to the sysfs file
2103 * @return @c count if the function succeeded. An error code on failure.
2105 static ssize_t issue_debug(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2107 struct kbase_device *kbdev;
2110 kbdev = to_kbase_device(dev);
2115 for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT; i++) {
2116 if (sysfs_streq(debug_commands[i].str, buf)) {
2117 //chenli: modify for integration kits
2118 if(i==0)//kbasep_trace_dump
2119 debug_commands[i].func(kbdev);
2120 else //integration kits
2122 struct kbase_os_device *osdev = &kbdev->osdev;
2123 debug_commands[i].func((int *)osdev->reg);
2129 /* Debug Command not found */
2130 dev_err(dev, "debug_command: command not known\n");
2134 /** The sysfs file @c debug_command.
2136 * This is used to issue general debug commands to the device driver.
2137 * Reading it will produce a list of debug commands, separated by newlines.
2138 * Writing to it with one of those commands will issue said command.
2140 DEVICE_ATTR(debug_command, S_IRUGO | S_IWUSR, show_debug, issue_debug);
2141 #endif /* CONFIG_MALI_DEBUG */
2143 #ifdef CONFIG_MALI_NO_MALI
2144 static int kbase_common_reg_map(kbase_device *kbdev)
2148 static void kbase_common_reg_unmap(kbase_device * const kbdev)
2152 #else /* CONFIG_MALI_NO_MALI */
2153 static int kbase_common_reg_map(kbase_device *kbdev)
2155 struct kbase_os_device *osdev = &kbdev->osdev;
2158 osdev->reg_res = request_mem_region(osdev->reg_start, osdev->reg_size, dev_name(osdev->dev));
2159 if (!osdev->reg_res) {
2160 dev_err(osdev->dev, "Register window unavailable\n");
2164 printk("%s,request_mem_region ok\n",__func__);
2165 osdev->reg = ioremap(osdev->reg_start, osdev->reg_size);
2167 dev_err(osdev->dev, "Can't remap register window\n");
2172 printk("%s,ioremap ok\n",__func__);
2176 release_resource(osdev->reg_res);
2177 kfree(osdev->reg_res);
2182 static void kbase_common_reg_unmap(kbase_device * const kbdev)
2184 struct kbase_os_device *osdev = &kbdev->osdev;
2186 iounmap(osdev->reg);
2187 release_resource(osdev->reg_res);
2188 kfree(osdev->reg_res);
2190 #endif /* CONFIG_MALI_NO_MALI */
2193 static int kbase_common_device_init(kbase_device *kbdev)
2195 struct kbase_os_device *osdev = &kbdev->osdev;
2197 mali_error mali_err;
2199 inited_mem = (1u << 0),
2200 inited_job_slot = (1u << 1),
2201 inited_pm = (1u << 2),
2202 inited_js = (1u << 3),
2203 inited_irqs = (1u << 4),
2204 inited_debug = (1u << 5),
2205 inited_js_softstop = (1u << 6),
2206 #if MALI_CUSTOMER_RELEASE == 0
2207 inited_js_timeouts = (1u << 7),
2208 #endif /* MALI_CUSTOMER_RELEASE == 0 */
2209 inited_pm_runtime_init = (1u << 8),
2210 #ifdef CONFIG_DEBUG_FS
2211 inited_gpu_memory = (1u << 9),
2212 inited_debugfs = (1u << 10),
2213 #endif /* CONFIG_DEBUG_FS */
2214 #ifdef CONFIG_MALI_DEBUG_SHADER_SPLIT_FS
2215 inited_sc_split = (1u << 11),
2216 #endif /* CONFIG_MALI_DEBUG_SHADER_SPLIT_FS */
2217 #ifdef CONFIG_MALI_TRACE_TIMELINE
2218 inited_timeline = (1u << 12),
2219 #endif /* CONFIG_MALI_TRACE_LINE */
2224 dev_set_drvdata(osdev->dev, kbdev);
2226 osdev->mdev.minor = MISC_DYNAMIC_MINOR;
2227 osdev->mdev.name = osdev->devname;
2228 osdev->mdev.fops = &kbase_fops;
2229 osdev->mdev.parent = get_device(osdev->dev);
2231 scnprintf(osdev->devname, DEVNAME_SIZE, "%s%d", kbase_drv_name, kbase_dev_nr++);
2233 #ifdef CONFIG_DEBUG_FS
2234 kbdev->mali_debugfs_directory = debugfs_create_dir("mali", NULL);
2235 if (NULL == kbdev->mali_debugfs_directory) {
2236 dev_err(osdev->dev, "Couldn't create mali debugfs directory\n");
2239 inited |= inited_debugfs;
2240 #endif /* CONFIG_DEBUG_FS */
2243 if (misc_register(&osdev->mdev)) {
2244 dev_err(osdev->dev, "Couldn't register misc dev %s\n", osdev->devname);
2249 if (device_create_file(osdev->dev, &dev_attr_power_policy)) {
2250 dev_err(osdev->dev, "Couldn't create power_policy sysfs file\n");
2254 if (device_create_file(osdev->dev, &dev_attr_core_availability_policy)) {
2255 dev_err(osdev->dev, "Couldn't create core_availability_policy sysfs file\n");
2256 goto out_file_core_availability_policy;
2259 if (device_create_file(osdev->dev, &dev_attr_core_mask)) {
2260 dev_err(osdev->dev, "Couldn't create core_mask sysfs file\n");
2261 goto out_file_core_mask;
2264 down(&kbase_dev_list_lock);
2265 list_add(&osdev->entry, &kbase_dev_list);
2266 up(&kbase_dev_list_lock);
2267 dev_info(osdev->dev, "Probed as %s\n", dev_name(osdev->mdev.this_device));
2269 mali_err = kbase_pm_init(kbdev);
2270 if (MALI_ERROR_NONE != mali_err)
2273 inited |= inited_pm;
2275 if (kbdev->pm.callback_power_runtime_init) {
2276 mali_err = kbdev->pm.callback_power_runtime_init(kbdev);
2277 if (MALI_ERROR_NONE != mali_err)
2280 inited |= inited_pm_runtime_init;
2283 mali_err = kbase_mem_init(kbdev);
2284 if (MALI_ERROR_NONE != mali_err)
2287 inited |= inited_mem;
2289 mali_err = kbase_job_slot_init(kbdev);
2290 if (MALI_ERROR_NONE != mali_err)
2293 inited |= inited_job_slot;
2295 mali_err = kbasep_js_devdata_init(kbdev);
2296 if (MALI_ERROR_NONE != mali_err)
2299 inited |= inited_js;
2301 err = kbase_install_interrupts(kbdev);
2305 inited |= inited_irqs;
2307 #ifdef CONFIG_MALI_DEBUG_SHADER_SPLIT_FS
2308 if (device_create_file(osdev->dev, &dev_attr_sc_split))
2310 dev_err(osdev->dev, "Couldn't create sc_split sysfs file\n");
2314 inited |= inited_sc_split;
2315 #endif /* CONFIG_MALI_DEBUG_SHADER_SPLIT_FS */
2317 #ifdef CONFIG_DEBUG_FS
2318 if (kbasep_gpu_memory_debugfs_init(kbdev)) {
2319 dev_err(osdev->dev, "Couldn't create gpu_memory debugfs file\n");
2322 inited |= inited_gpu_memory;
2323 #endif /* CONFIG_DEBUG_FS */
2325 #ifdef CONFIG_MALI_DEBUG
2327 if (device_create_file(osdev->dev, &dev_attr_debug_command)) {
2328 dev_err(osdev->dev, "Couldn't create debug_command sysfs file\n");
2331 inited |= inited_debug;
2333 if (device_create_file(osdev->dev, &dev_attr_js_softstop_always)) {
2334 dev_err(osdev->dev, "Couldn't create js_softstop_always sysfs file\n");
2337 inited |= inited_js_softstop;
2338 #endif /* CONFIG_MALI_DEBUG */
2340 #if MALI_CUSTOMER_RELEASE == 0
2341 if (device_create_file(osdev->dev, &dev_attr_js_timeouts)) {
2342 dev_err(osdev->dev, "Couldn't create js_timeouts sysfs file\n");
2345 inited |= inited_js_timeouts;
2346 #endif /* MALI_CUSTOMER_RELEASE */
2348 #ifdef CONFIG_MALI_TRACE_TIMELINE
2349 if (kbasep_trace_timeline_debugfs_init(kbdev)) {
2350 dev_err(osdev->dev, "Couldn't create mali_timeline_defs debugfs file\n");
2353 inited |= inited_timeline;
2354 #endif /* CONFIG_MALI_TRACE_TIMELINE */
2356 mali_err = kbase_pm_powerup(kbdev);
2357 if (MALI_ERROR_NONE == mali_err) {
2358 #ifdef CONFIG_MALI_DEBUG
2359 #ifndef CONFIG_MALI_NO_MALI
2360 if (MALI_ERROR_NONE != kbasep_common_test_interrupt_handlers(kbdev)) {
2361 dev_err(osdev->dev, "Interrupt assigment check failed.\n");
2365 #endif /* CONFIG_MALI_NO_MALI */
2366 #endif /* CONFIG_MALI_DEBUG */
2368 /* intialise the kctx list */
2369 mutex_init(&kbdev->kctx_list_lock);
2370 INIT_LIST_HEAD(&kbdev->kctx_list);
2375 #ifdef CONFIG_MALI_TRACE_TIMELINE
2376 if (inited & inited_timeline)
2377 kbasep_trace_timeline_debugfs_term(kbdev);
2378 #endif /* CONFIG_MALI_TRACE_TIMELINE */
2379 #if MALI_CUSTOMER_RELEASE == 0
2380 if (inited & inited_js_timeouts)
2381 device_remove_file(kbdev->osdev.dev, &dev_attr_js_timeouts);
2382 #endif /* MALI_CUSTOMER_RELEASE */
2383 #ifdef CONFIG_MALI_DEBUG
2384 if (inited & inited_js_softstop)
2385 device_remove_file(kbdev->osdev.dev, &dev_attr_js_softstop_always);
2387 if (inited & inited_debug)
2388 device_remove_file(kbdev->osdev.dev, &dev_attr_debug_command);
2390 #endif /* CONFIG_MALI_DEBUG */
2392 #ifdef CONFIG_DEBUG_FS
2393 if (inited & inited_gpu_memory)
2394 kbasep_gpu_memory_debugfs_term(kbdev);
2395 if (inited & inited_debugfs)
2396 debugfs_remove(kbdev->mali_debugfs_directory);
2397 #endif /* CONFIG_DEBUG_FS */
2399 #ifdef CONFIG_MALI_DEBUG_SHADER_SPLIT_FS
2400 if (inited & inited_sc_split)
2402 device_remove_file(kbdev->osdev.dev, &dev_attr_sc_split);
2404 #endif /* CONFIG_MALI_DEBUG_SHADER_SPLIT_FS */
2406 if (inited & inited_js)
2407 kbasep_js_devdata_halt(kbdev);
2409 if (inited & inited_job_slot)
2410 kbase_job_slot_halt(kbdev);
2412 if (inited & inited_mem)
2413 kbase_mem_halt(kbdev);
2415 if (inited & inited_pm)
2416 kbase_pm_halt(kbdev);
2418 if (inited & inited_irqs)
2419 kbase_release_interrupts(kbdev);
2421 if (inited & inited_js)
2422 kbasep_js_devdata_term(kbdev);
2424 if (inited & inited_job_slot)
2425 kbase_job_slot_term(kbdev);
2427 if (inited & inited_mem)
2428 kbase_mem_term(kbdev);
2430 if (inited & inited_pm_runtime_init) {
2431 if (kbdev->pm.callback_power_runtime_term)
2432 kbdev->pm.callback_power_runtime_term(kbdev);
2435 if (inited & inited_pm)
2436 kbase_pm_term(kbdev);
2438 down(&kbase_dev_list_lock);
2439 list_del(&osdev->entry);
2440 up(&kbase_dev_list_lock);
2442 device_remove_file(kbdev->osdev.dev, &dev_attr_core_mask);
2444 device_remove_file(kbdev->osdev.dev, &dev_attr_core_availability_policy);
2445 out_file_core_availability_policy:
2446 device_remove_file(kbdev->osdev.dev, &dev_attr_power_policy);
2448 misc_deregister(&kbdev->osdev.mdev);
2450 put_device(osdev->dev);
2454 static int kbase_platform_device_probe(struct platform_device *pdev)
2456 struct kbase_device *kbdev;
2457 struct kbase_os_device *osdev;
2458 struct resource *reg_res;
2459 kbase_attribute *platform_data;
2462 struct mali_base_gpu_core_props *core_props;
2463 #ifdef CONFIG_MALI_NO_MALI
2464 mali_error mali_err;
2465 #endif /* CONFIG_MALI_NO_MALI */
2467 const char *dbgname = NULL;
2469 if(pdev->dev.of_node)
2471 of_property_read_string(pdev->dev.of_node,"dbgname",&dbgname);
2472 printk("%p,dbgname = %s\r\n",pdev->dev.of_node,dbgname);
2476 printk("pdev->dev.of_node null\r\n");
2480 kbase_platform_config *config;
2481 int attribute_count;
2483 //#ifdef CONFIG_MALI_PLATFORM_FAKE
2484 #if 1//defined(CONFIG_MALI_PLATFORM_FAKE) || defined(CONFIG_MALI_PLATFORM_FAKE_MODULE)
2485 config = kbase_get_platform_config();
2486 attribute_count = kbasep_get_config_attribute_count(config->attributes);
2488 err = platform_device_add_data(pdev, config->attributes,
2489 attribute_count * sizeof(config->attributes[0]));
2492 #endif /* CONFIG_MALI_PLATFORM_FAKE */
2493 #endif /* CONFIG_OF */
2495 kbdev = kbase_device_alloc();
2497 dev_err(&pdev->dev, "Can't allocate device\n");
2501 #ifdef CONFIG_MALI_NO_MALI
2502 mali_err = midg_device_create(kbdev);
2503 if (MALI_ERROR_NONE != mali_err) {
2504 dev_err(&pdev->dev, "Can't initialize dummy model\n");
2508 #endif /* CONFIG_MALI_NO_MALI */
2510 osdev = &kbdev->osdev;
2511 osdev->dev = &pdev->dev;
2512 platform_data = (kbase_attribute *) osdev->dev->platform_data;
2514 if (NULL == platform_data) {
2515 dev_err(osdev->dev, "Platform data not specified\n");
2520 if (MALI_TRUE != kbasep_validate_configuration_attributes(kbdev, platform_data)) {
2521 dev_err(osdev->dev, "Configuration attributes failed to validate\n");
2525 kbdev->config_attributes = platform_data;
2527 /* 3 IRQ resources */
2528 for (i = 0; i < 3; i++) {
2529 struct resource *irq_res;
2532 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
2534 dev_err(osdev->dev, "No IRQ resource at index %d\n", i);
2539 if (!strcmp(irq_res->name, "JOB"))
2540 irqtag = JOB_IRQ_TAG;
2541 else if (!strcmp(irq_res->name, "MMU"))
2542 irqtag = MMU_IRQ_TAG;
2543 else if (!strcmp(irq_res->name, "GPU"))
2544 irqtag = GPU_IRQ_TAG;
2546 dev_err(&pdev->dev, "Invalid irq res name: '%s'\n",
2553 #endif /* CONFIG_OF */
2554 printk("irq_res->start = 0x%x,irq_res->name = %s\r\n",irq_res->start,irq_res->name);
2556 osdev->irqs[irqtag].irq = irq_res->start;
2557 osdev->irqs[irqtag].flags = (irq_res->flags & IRQF_TRIGGER_MASK);
2560 /* the first memory resource is the physical address of the GPU registers */
2561 reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2563 dev_err(&pdev->dev, "Invalid register resource\n");
2567 printk("reg_res->start = 0x%0x,size = 0x%0x\r\n",reg_res->start,resource_size(reg_res));
2568 osdev->reg_start = reg_res->start;
2569 osdev->reg_size = resource_size(reg_res);
2571 err = kbase_common_reg_map(kbdev);
2575 if (MALI_ERROR_NONE != kbase_device_init(kbdev)) {
2576 dev_err(&pdev->dev, "Can't initialize device\n");
2581 kbdev->memdev.ump_device_id = kbasep_get_config_value(kbdev, platform_data, KBASE_CONFIG_ATTR_UMP_DEVICE);
2582 #endif /* CONFIG_UMP */
2584 kbdev->memdev.per_process_memory_limit = kbasep_get_config_value(kbdev, platform_data, KBASE_CONFIG_ATTR_MEMORY_PER_PROCESS_LIMIT);
2586 /* obtain min/max configured gpu frequencies */
2587 core_props = &(kbdev->gpu_props.props.core_props);
2588 core_props->gpu_freq_khz_min = kbasep_get_config_value(kbdev, platform_data, KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MIN);
2589 core_props->gpu_freq_khz_max = kbasep_get_config_value(kbdev, platform_data, KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MAX);
2590 kbdev->gpu_props.irq_throttle_time_us = kbasep_get_config_value(kbdev, platform_data, KBASE_CONFIG_ATTR_GPU_IRQ_THROTTLE_TIME_US);
2592 err = kbase_common_device_init(kbdev);
2594 dev_err(osdev->dev, "Failed kbase_common_device_init\n");
2600 kbase_device_term(kbdev);
2602 kbase_common_reg_unmap(kbdev);
2604 #ifdef CONFIG_MALI_NO_MALI
2605 midg_device_destroy(kbdev);
2607 #endif /* CONFIG_MALI_NO_MALI */
2608 kbase_device_free(kbdev);
2613 static int kbase_common_device_remove(struct kbase_device *kbdev)
2615 if (kbdev->pm.callback_power_runtime_term)
2616 kbdev->pm.callback_power_runtime_term(kbdev);
2618 /* Remove the sys power policy file */
2619 device_remove_file(kbdev->osdev.dev, &dev_attr_power_policy);
2620 device_remove_file(kbdev->osdev.dev, &dev_attr_core_availability_policy);
2621 device_remove_file(kbdev->osdev.dev, &dev_attr_core_mask);
2623 #ifdef CONFIG_MALI_TRACE_TIMELINE
2624 kbasep_trace_timeline_debugfs_term(kbdev);
2625 #endif /* CONFIG_MALI_TRACE_TIMELINE */
2627 #ifdef CONFIG_MALI_DEBUG
2628 device_remove_file(kbdev->osdev.dev, &dev_attr_js_softstop_always);
2629 device_remove_file(kbdev->osdev.dev, &dev_attr_debug_command);
2630 #endif /* CONFIG_MALI_DEBUG */
2631 #if MALI_CUSTOMER_RELEASE == 0
2632 device_remove_file(kbdev->osdev.dev, &dev_attr_js_timeouts);
2633 #endif /* MALI_CUSTOMER_RELEASE */
2634 #ifdef CONFIG_DEBUG_FS
2635 kbasep_gpu_memory_debugfs_term(kbdev);
2636 debugfs_remove(kbdev->mali_debugfs_directory);
2637 #endif /* CONFIG_DEBUG_FS */
2639 #ifdef CONFIG_MALI_DEBUG_SHADER_SPLIT_FS
2640 device_remove_file(kbdev->osdev.dev, &dev_attr_sc_split);
2641 #endif /* CONFIG_MALI_DEBUG_SHADER_SPLIT_FS */
2643 kbasep_js_devdata_halt(kbdev);
2644 kbase_job_slot_halt(kbdev);
2645 kbase_mem_halt(kbdev);
2646 kbase_pm_halt(kbdev);
2648 kbase_release_interrupts(kbdev);
2650 kbasep_js_devdata_term(kbdev);
2651 kbase_job_slot_term(kbdev);
2652 kbase_mem_term(kbdev);
2653 kbase_pm_term(kbdev);
2655 down(&kbase_dev_list_lock);
2656 list_del(&kbdev->osdev.entry);
2657 up(&kbase_dev_list_lock);
2659 misc_deregister(&kbdev->osdev.mdev);
2660 put_device(kbdev->osdev.dev);
2661 kbase_common_reg_unmap(kbdev);
2662 kbase_device_term(kbdev);
2663 #ifdef CONFIG_MALI_NO_MALI
2664 midg_device_destroy(kbdev);
2665 #endif /* CONFIG_MALI_NO_MALI */
2666 kbase_device_free(kbdev);
2671 static int kbase_platform_device_remove(struct platform_device *pdev)
2673 struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
2678 return kbase_common_device_remove(kbdev);
2681 /** Suspend callback from the OS.
2683 * This is called by Linux when the device should suspend.
2685 * @param dev The device to suspend
2687 * @return A standard Linux error code
2689 static int kbase_device_suspend(struct device *dev)
2691 struct kbase_device *kbdev = to_kbase_device(dev);
2696 kbase_pm_suspend(kbdev);
2700 /** Resume callback from the OS.
2702 * This is called by Linux when the device should resume from suspension.
2704 * @param dev The device to resume
2706 * @return A standard Linux error code
2708 static int kbase_device_resume(struct device *dev)
2710 struct kbase_device *kbdev = to_kbase_device(dev);
2715 kbase_pm_resume(kbdev);
2719 /** Runtime suspend callback from the OS.
2721 * This is called by Linux when the device should prepare for a condition in which it will
2722 * not be able to communicate with the CPU(s) and RAM due to power management.
2724 * @param dev The device to suspend
2726 * @return A standard Linux error code
2728 #ifdef CONFIG_PM_RUNTIME
2729 static int kbase_device_runtime_suspend(struct device *dev)
2731 struct kbase_device *kbdev = to_kbase_device(dev);
2736 if (kbdev->pm.callback_power_runtime_off) {
2737 kbdev->pm.callback_power_runtime_off(kbdev);
2738 KBASE_DEBUG_PRINT_INFO(KBASE_PM, "runtime suspend\n");
2742 #endif /* CONFIG_PM_RUNTIME */
2744 /** Runtime resume callback from the OS.
2746 * This is called by Linux when the device should go into a fully active state.
2748 * @param dev The device to suspend
2750 * @return A standard Linux error code
2753 #ifdef CONFIG_PM_RUNTIME
2754 int kbase_device_runtime_resume(struct device *dev)
2757 struct kbase_device *kbdev = to_kbase_device(dev);
2762 if (kbdev->pm.callback_power_runtime_on) {
2763 ret = kbdev->pm.callback_power_runtime_on(kbdev);
2764 KBASE_DEBUG_PRINT_INFO(KBASE_PM, "runtime resume\n");
2768 #endif /* CONFIG_PM_RUNTIME */
2770 /** Runtime idle callback from the OS.
2772 * This is called by Linux when the device appears to be inactive and it might be
2773 * placed into a low power state
2775 * @param dev The device to suspend
2777 * @return A standard Linux error code
2780 #ifdef CONFIG_PM_RUNTIME
2781 static int kbase_device_runtime_idle(struct device *dev)
2783 /* Avoid pm_runtime_suspend being called */
2786 #endif /* CONFIG_PM_RUNTIME */
2788 /** The power management operations for the platform driver.
2790 static const struct dev_pm_ops kbase_pm_ops = {
2791 .suspend = kbase_device_suspend,
2792 .resume = kbase_device_resume,
2793 #ifdef CONFIG_PM_RUNTIME
2794 .runtime_suspend = kbase_device_runtime_suspend,
2795 .runtime_resume = kbase_device_runtime_resume,
2796 .runtime_idle = kbase_device_runtime_idle,
2797 #endif /* CONFIG_PM_RUNTIME */
2801 static const struct of_device_id kbase_dt_ids[] = {
2802 { .compatible = "arm,malit7xx" },
2803 { .compatible = "arm,mali-midgard" },
2806 MODULE_DEVICE_TABLE(of, kbase_dt_ids);
2809 static struct platform_driver kbase_platform_driver = {
2810 .probe = kbase_platform_device_probe,
2811 .remove = kbase_platform_device_remove,
2813 .name = kbase_drv_name,
2814 .owner = THIS_MODULE,
2815 .pm = &kbase_pm_ops,
2816 .of_match_table = of_match_ptr(kbase_dt_ids),
2821 * The driver will not provide a shortcut to create the Mali platform device
2822 * anymore when using Device Tree.
2826 module_platform_driver(kbase_platform_driver);
2828 static int __init rockchip_gpu_init_driver(void)
2830 return platform_driver_register(&kbase_platform_driver);
2833 late_initcall(rockchip_gpu_init_driver);
2835 #else /* CONFIG_MALI_PLATFORM_FAKE */
2837 #ifdef CONFIG_MALI_PLATFORM_FAKE
2838 #ifndef MALI_PLATFORM_FAKE_MODULE
2839 extern int kbase_platform_fake_register(void);
2840 extern void kbase_platform_fake_unregister(void);
2844 static int __init kbase_driver_init(void)
2848 ret = kbase_platform_early_init();
2852 #ifdef CONFIG_MALI_PLATFORM_FAKE
2853 #ifndef MALI_PLATFORM_FAKE_MODULE
2854 ret = kbase_platform_fake_register();
2859 ret = platform_driver_register(&kbase_platform_driver);
2860 #ifdef CONFIG_MALI_PLATFORM_FAKE
2861 #ifndef MALI_PLATFORM_FAKE_MODULE
2863 kbase_platform_fake_unregister();
2870 static void __exit kbase_driver_exit(void)
2872 platform_driver_unregister(&kbase_platform_driver);
2873 #ifdef CONFIG_MALI_PLATFORM_FAKE
2874 #ifndef MALI_PLATFORM_FAKE_MODULE
2875 kbase_platform_fake_unregister();
2880 module_init(kbase_driver_init);
2881 module_exit(kbase_driver_exit);
2883 #endif /* CONFIG_OF */
2885 MODULE_LICENSE("GPL");
2886 MODULE_VERSION(MALI_RELEASE_NAME);
2888 #ifdef CONFIG_MALI_GATOR_SUPPORT
2889 /* Create the trace points (otherwise we just get code to call a tracepoint) */
2890 #define CREATE_TRACE_POINTS
2891 #include "mali_linux_trace.h"
2893 void kbase_trace_mali_pm_status(u32 event, u64 value)
2895 trace_mali_pm_status(event, value);
2898 void kbase_trace_mali_pm_power_off(u32 event, u64 value)
2900 trace_mali_pm_power_off(event, value);
2903 void kbase_trace_mali_pm_power_on(u32 event, u64 value)
2905 trace_mali_pm_power_on(event, value);
2908 void kbase_trace_mali_job_slots_event(u32 event, const kbase_context *kctx, u8 atom_id)
2910 trace_mali_job_slots_event(event, (kctx != NULL ? kctx->osctx.tgid : 0), (kctx != NULL ? kctx->osctx.pid : 0), atom_id);
2913 void kbase_trace_mali_page_fault_insert_pages(int event, u32 value)
2915 trace_mali_page_fault_insert_pages(event, value);
2918 void kbase_trace_mali_mmu_as_in_use(int event)
2920 trace_mali_mmu_as_in_use(event);
2923 void kbase_trace_mali_mmu_as_released(int event)
2925 trace_mali_mmu_as_released(event);
2928 void kbase_trace_mali_total_alloc_pages_change(long long int event)
2930 trace_mali_total_alloc_pages_change(event);
2932 #endif /* CONFIG_MALI_GATOR_SUPPORT */