1,support hevc,vpu iommu 2,delete gpu useless log when init 3,fix gpu compiling error
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / t6xx / kbase / src / linux / mali_kbase_core_linux.c
1
2 /*
3  *
4  * (C) COPYRIGHT ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU licence.
10  *
11  * A copy of the licence is included with the program, and can also be obtained
12  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
13  * Boston, MA  02110-1301, USA.
14  *
15  */
16
17
18
19
20
21 /**
22  * @file mali_kbase_core_linux.c
23  * Base kernel driver init.
24  */
25
26 #include <kbase/src/common/mali_kbase.h>
27 #include <kbase/src/common/mali_kbase_uku.h>
28 #include <kbase/src/common/mali_midg_regmap.h>
29 #include <kbase/src/common/mali_kbase_gator.h>
30 #include <kbase/src/linux/mali_kbase_mem_linux.h>
31 #ifdef CONFIG_MALI_NO_MALI
32 #include "mali_kbase_model_linux.h"
33 #endif /* CONFIG_MALI_NO_MALI */
34
35 #ifdef CONFIG_KDS
36 #include <linux/kds.h>
37 #include <linux/anon_inodes.h>
38 #include <linux/syscalls.h>
39 #endif /* CONFIG_KDS */
40
41 #include <linux/module.h>
42 #include <linux/init.h>
43 #include <linux/poll.h>
44 #include <linux/kernel.h>
45 #include <linux/errno.h>
46 #include <linux/of.h>
47 #include <linux/platform_device.h>
48 #include <linux/miscdevice.h>
49 #include <linux/list.h>
50 #include <linux/semaphore.h>
51 #include <linux/fs.h>
52 #include <linux/uaccess.h>
53 #include <linux/interrupt.h>
54 #include <linux/io.h>
55 #include <linux/compat.h>       /* is_compat_task */
56 #include <kbase/src/common/mali_kbase_hw.h>
57 #include <kbase/src/platform/mali_kbase_platform_common.h>
58 #ifdef CONFIG_SYNC
59 #include <kbase/src/linux/mali_kbase_sync.h>
60 #endif /* CONFIG_SYNC */
61
62 /*
63  * This file is included since when we support device tree we don't
64  * use the platform fake code for registering the kbase config attributes.
65  */
66 #ifdef CONFIG_OF
67 #include <kbase/mali_kbase_config.h>
68 #endif
69
70 //chenli: add for integration tests
71 #ifdef CONFIG_MALI_DEBUG
72 #include <kbase/src/integration_kit/MaliFns.h>
73 #endif
74 #define JOB_IRQ_TAG     0
75 #define MMU_IRQ_TAG     1
76 #define GPU_IRQ_TAG     2
77
78 struct kbase_irq_table {
79         u32 tag;
80         irq_handler_t handler;
81 };
82 #if MALI_UNIT_TEST
83 kbase_exported_test_data shared_kernel_test_data;
84 EXPORT_SYMBOL(shared_kernel_test_data);
85 #endif /* MALI_UNIT_TEST */
86
87 #define KBASE_DRV_NAME "mali"
88
89 static const char kbase_drv_name[] = KBASE_DRV_NAME;
90
91 static int kbase_dev_nr;
92
93 static DEFINE_SEMAPHORE(kbase_dev_list_lock);
94 static LIST_HEAD(kbase_dev_list);
95
96 KBASE_EXPORT_TEST_API(kbase_dev_list_lock)
97 KBASE_EXPORT_TEST_API(kbase_dev_list)
98 #define KERNEL_SIDE_DDK_VERSION_STRING "K:" MALI_RELEASE_NAME "(GPL)"
99 static INLINE void __compile_time_asserts(void)
100 {
101         CSTD_COMPILE_TIME_ASSERT(sizeof(KERNEL_SIDE_DDK_VERSION_STRING) <= KBASE_GET_VERSION_BUFFER_SIZE);
102 }
103
104 #ifdef CONFIG_KDS
105
106 typedef struct kbasep_kds_resource_set_file_data {
107         struct kds_resource_set *lock;
108 } kbasep_kds_resource_set_file_data;
109
110 static int kds_resource_release(struct inode *inode, struct file *file);
111
112 static const struct file_operations kds_resource_fops = {
113         .release = kds_resource_release
114 };
115
116 typedef struct kbase_kds_resource_list_data {
117         struct kds_resource **kds_resources;
118         unsigned long *kds_access_bitmap;
119         int num_elems;
120 } kbase_kds_resource_list_data;
121
122 static int kds_resource_release(struct inode *inode, struct file *file)
123 {
124         struct kbasep_kds_resource_set_file_data *data;
125
126         data = (struct kbasep_kds_resource_set_file_data *)file->private_data;
127         if (NULL != data) {
128                 if (NULL != data->lock)
129                         kds_resource_set_release(&data->lock);
130
131                 kfree(data);
132         }
133         return 0;
134 }
135
136 mali_error kbasep_kds_allocate_resource_list_data(kbase_context *kctx, base_external_resource *ext_res, int num_elems, kbase_kds_resource_list_data *resources_list)
137 {
138         base_external_resource *res = ext_res;
139         int res_id;
140
141         /* assume we have to wait for all */
142
143         KBASE_DEBUG_ASSERT(0 != num_elems);
144         resources_list->kds_resources = kmalloc(sizeof(struct kds_resource *) * num_elems, GFP_KERNEL);
145
146         if (NULL == resources_list->kds_resources)
147                 return MALI_ERROR_OUT_OF_MEMORY;
148
149         KBASE_DEBUG_ASSERT(0 != num_elems);
150         resources_list->kds_access_bitmap = kzalloc(sizeof(unsigned long) * ((num_elems + BITS_PER_LONG - 1) / BITS_PER_LONG), GFP_KERNEL);
151
152         if (NULL == resources_list->kds_access_bitmap) {
153                 kfree(resources_list->kds_access_bitmap);
154                 return MALI_ERROR_OUT_OF_MEMORY;
155         }
156
157         for (res_id = 0; res_id < num_elems; res_id++, res++) {
158                 int exclusive;
159                 kbase_va_region *reg;
160                 struct kds_resource *kds_res = NULL;
161
162                 exclusive = res->ext_resource & BASE_EXT_RES_ACCESS_EXCLUSIVE;
163                 reg = kbase_region_tracker_find_region_enclosing_address(kctx, res->ext_resource & ~BASE_EXT_RES_ACCESS_EXCLUSIVE);
164
165                 /* did we find a matching region object? */
166                 if (NULL == reg)
167                         break;
168
169                 /* no need to check reg->alloc as only regions with an alloc has
170                  * a size, and kbase_region_tracker_find_region_enclosing_address
171                  * only returns regions with size > 0 */
172                 switch (reg->alloc->type) {
173 #if defined(CONFIG_UMP) && defined(CONFIG_KDS)
174                 case KBASE_MEM_TYPE_IMPORTED_UMP:
175                         kds_res = ump_dd_kds_resource_get(reg->alloc->imported.ump_handle);
176                         break;
177 #endif /* defined(CONFIG_UMP) && defined(CONFIG_KDS) */
178                 default:
179                         break;
180                 }
181
182                 /* no kds resource for the region ? */
183                 if (!kds_res)
184                         break;
185
186                 resources_list->kds_resources[res_id] = kds_res;
187
188                 if (exclusive)
189                         set_bit(res_id, resources_list->kds_access_bitmap);
190         }
191
192         /* did the loop run to completion? */
193         if (res_id == num_elems)
194                 return MALI_ERROR_NONE;
195
196         /* Clean up as the resource list is not valid. */
197         kfree(resources_list->kds_resources);
198         kfree(resources_list->kds_access_bitmap);
199
200         return MALI_ERROR_FUNCTION_FAILED;
201 }
202
203 mali_bool kbasep_validate_kbase_pointer(kbase_pointer *p)
204 {
205 #ifdef CONFIG_COMPAT
206         if (is_compat_task()) {
207                 if (p->compat_value == 0)
208                         return MALI_FALSE;
209         } else {
210 #endif /* CONFIG_COMPAT */
211                 if (NULL == p->value)
212                         return MALI_FALSE;
213 #ifdef CONFIG_COMPAT
214         }
215 #endif /* CONFIG_COMPAT */
216         return MALI_TRUE;
217 }
218
219 mali_error kbase_external_buffer_lock(kbase_context *kctx, kbase_uk_ext_buff_kds_data *args, u32 args_size)
220 {
221         base_external_resource *ext_res_copy;
222         size_t ext_resource_size;
223         mali_error return_error = MALI_ERROR_FUNCTION_FAILED;
224         int fd;
225
226         if (args_size != sizeof(kbase_uk_ext_buff_kds_data))
227                 return MALI_ERROR_FUNCTION_FAILED;
228
229         /* Check user space has provided valid data */
230         if (!kbasep_validate_kbase_pointer(&args->external_resource) || !kbasep_validate_kbase_pointer(&args->file_descriptor) || (0 == args->num_res) || (args->num_res > KBASE_MAXIMUM_EXT_RESOURCES))
231                 return MALI_ERROR_FUNCTION_FAILED;
232
233         ext_resource_size = sizeof(base_external_resource) * args->num_res;
234
235         KBASE_DEBUG_ASSERT(0 != ext_resource_size);
236         ext_res_copy = kmalloc(ext_resource_size, GFP_KERNEL);
237
238         if (NULL != ext_res_copy) {
239                 base_external_resource *__user ext_res_user;
240                 int *__user file_descriptor_user;
241 #ifdef CONFIG_COMPAT
242                 if (is_compat_task()) {
243                         ext_res_user = compat_ptr(args->external_resource.compat_value);
244                         file_descriptor_user = compat_ptr(args->file_descriptor.compat_value);
245                 } else {
246 #endif /* CONFIG_COMPAT */
247                         ext_res_user = args->external_resource.value;
248                         file_descriptor_user = args->file_descriptor.value;
249 #ifdef CONFIG_COMPAT
250                 }
251 #endif /* CONFIG_COMPAT */
252
253                 /* Copy the external resources to lock from user space */
254                 if (0 == copy_from_user(ext_res_copy, ext_res_user, ext_resource_size)) {
255                         kbasep_kds_resource_set_file_data *fdata;
256
257                         /* Allocate data to be stored in the file */
258                         fdata = kmalloc(sizeof(kbasep_kds_resource_set_file_data), GFP_KERNEL);
259
260                         if (NULL != fdata) {
261                                 kbase_kds_resource_list_data resource_list_data;
262                                 /* Parse given elements and create resource and access lists */
263                                 return_error = kbasep_kds_allocate_resource_list_data(kctx, ext_res_copy, args->num_res, &resource_list_data);
264                                 if (MALI_ERROR_NONE == return_error) {
265                                         long err;
266
267                                         fdata->lock = NULL;
268
269                                         fd = anon_inode_getfd("kds_ext", &kds_resource_fops, fdata, 0);
270
271                                         err = copy_to_user(file_descriptor_user, &fd, sizeof(fd));
272
273                                         /* If the file descriptor was valid and we successfully copied it to user space, then we
274                                          * can try and lock the requested kds resources.
275                                          */
276                                         if ((fd >= 0) && (0 == err)) {
277                                                 struct kds_resource_set *lock;
278
279                                                 lock = kds_waitall(args->num_res, resource_list_data.kds_access_bitmap, resource_list_data.kds_resources, KDS_WAIT_BLOCKING);
280
281                                                 if (IS_ERR_OR_NULL(lock)) {
282                                                         return_error = MALI_ERROR_FUNCTION_FAILED;
283                                                 } else {
284                                                         return_error = MALI_ERROR_NONE;
285                                                         fdata->lock = lock;
286                                                 }
287                                         } else {
288                                                 return_error = MALI_ERROR_FUNCTION_FAILED;
289                                         }
290
291                                         kfree(resource_list_data.kds_resources);
292                                         kfree(resource_list_data.kds_access_bitmap);
293                                 }
294
295                                 if (MALI_ERROR_NONE != return_error) {
296                                         /* If the file was opened successfully then close it which will clean up
297                                          * the file data, otherwise we clean up the file data ourself. */
298                                         if (fd >= 0)
299                                                 sys_close(fd);
300                                         else
301                                                 kfree(fdata);
302                                 }
303                         } else {
304                                 return_error = MALI_ERROR_OUT_OF_MEMORY;
305                         }
306                 }
307                 kfree(ext_res_copy);
308         }
309         return return_error;
310 }
311 #endif /* CONFIG_KDS */
312
313 static mali_error kbase_dispatch(kbase_context *kctx, void * const args, u32 args_size)
314 {
315         struct kbase_device *kbdev;
316         uk_header *ukh = args;
317         u32 id;
318
319         KBASE_DEBUG_ASSERT(ukh != NULL);
320
321         kbdev = kctx->kbdev;
322         id = ukh->id;
323         ukh->ret = MALI_ERROR_NONE;     /* Be optimistic */
324
325         if (UKP_FUNC_ID_CHECK_VERSION == id) {
326                 if (args_size == sizeof(uku_version_check_args)) {
327                         uku_version_check_args *version_check = (uku_version_check_args *)args;
328
329                         version_check->major = BASE_UK_VERSION_MAJOR;
330                         version_check->minor = BASE_UK_VERSION_MINOR;
331
332                         ukh->ret = MALI_ERROR_NONE;
333                 } else {
334                         ukh->ret = MALI_ERROR_FUNCTION_FAILED;
335                 }
336                 return MALI_ERROR_NONE;
337         }
338
339
340         if (!atomic_read(&kctx->setup_complete)) {
341                 /* setup pending, try to signal that we'll do the setup */
342                 if (atomic_cmpxchg(&kctx->setup_in_progress, 0, 1)) {
343                         /* setup was already in progress, err this call */
344                         return MALI_ERROR_FUNCTION_FAILED;
345                 }
346
347                 /* we're the one doing setup */
348
349                 /* is it the only call we accept? */
350                 if (id == KBASE_FUNC_SET_FLAGS) {
351                         kbase_uk_set_flags *kbase_set_flags = (kbase_uk_set_flags *) args;
352
353                         if (sizeof(*kbase_set_flags) != args_size) {
354                                 /* not matching the expected call, stay stuck in setup mode */
355                                 goto bad_size;
356                         }
357
358                         if (MALI_ERROR_NONE != kbase_context_set_create_flags(kctx, kbase_set_flags->create_flags)) {
359                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
360                                 /* bad flags, will stay stuck in setup mode */
361                                 return MALI_ERROR_NONE;
362                         } else {
363                                 /* we've done the setup, all OK */
364                                 atomic_set(&kctx->setup_complete, 1);
365                                 return MALI_ERROR_NONE;
366                         }
367                 } else {
368                         /* unexpected call, will stay stuck in setup mode */
369                         return MALI_ERROR_FUNCTION_FAILED;
370                 }
371         }
372
373         /* setup complete, perform normal operation */
374         switch (id) {
375         case KBASE_FUNC_MEM_ALLOC:
376                 {
377                         kbase_uk_mem_alloc *mem = args;
378                         struct kbase_va_region *reg;
379
380                         if (sizeof(*mem) != args_size)
381                                 goto bad_size;
382
383                         reg = kbase_mem_alloc(kctx, mem->va_pages, mem->commit_pages, mem->extent, &mem->flags, &mem->gpu_va, &mem->va_alignment);
384                         if (!reg)
385                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
386                         break;
387                 }
388         case KBASE_FUNC_MEM_IMPORT:
389                 {
390                         kbase_uk_mem_import *mem_import = args;
391                         int *__user phandle;
392                         int handle;
393
394                         if (sizeof(*mem_import) != args_size)
395                                 goto bad_size;
396 #ifdef CONFIG_64BIT
397                         if (is_compat_task())
398                                 phandle = compat_ptr(mem_import->phandle.compat_value);
399                         else
400 #endif
401                                 phandle = mem_import->phandle.value;
402
403                         switch (mem_import->type) {
404                         case BASE_MEM_IMPORT_TYPE_UMP:
405                                 get_user(handle, phandle);
406                                 break;
407                         case BASE_MEM_IMPORT_TYPE_UMM:
408                                 get_user(handle, phandle);
409                                 break;
410                         default:
411                                 goto bad_type;
412                                 break;
413                         }
414
415                         if (kbase_mem_import(kctx, mem_import->type, handle, &mem_import->gpu_va, &mem_import->va_pages, &mem_import->flags)) {
416 bad_type:
417                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
418                         }
419                         break;
420                 }
421         case KBASE_FUNC_MEM_COMMIT:
422                 {
423                         kbase_uk_mem_commit *commit = args;
424
425                         if (sizeof(*commit) != args_size)
426                                 goto bad_size;
427
428                         if (commit->gpu_addr & ~PAGE_MASK) {
429                                 KBASE_DEBUG_PRINT_WARN(KBASE_MEM, "kbase_dispatch case KBASE_FUNC_MEM_COMMIT: commit->gpu_addr: passed parameter is invalid");
430                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
431                                 break;
432                         }
433
434                         if (kbase_mem_commit(kctx, commit->gpu_addr, commit->pages, (base_backing_threshold_status*)&commit->result_subcode))
435                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
436                         break;
437                 }
438
439         case KBASE_FUNC_MEM_QUERY:
440                 {
441                         kbase_uk_mem_query *query = args;
442                         if (sizeof(*query) != args_size)
443                                 goto bad_size;
444
445                         if (query->gpu_addr & ~PAGE_MASK) {
446                                 KBASE_DEBUG_PRINT_WARN(KBASE_MEM, "kbase_dispatch case KBASE_FUNC_MEM_QUERY: query->gpu_addr: passed parameter is invalid");
447                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
448                                 break;
449                         }
450                         if (query->query != KBASE_MEM_QUERY_COMMIT_SIZE &&
451                             query->query != KBASE_MEM_QUERY_VA_SIZE &&
452                                 query->query != KBASE_MEM_QUERY_FLAGS) {
453                                 KBASE_DEBUG_PRINT_WARN(KBASE_MEM, "kbase_dispatch case KBASE_FUNC_MEM_QUERY: query->query = %lld unknown", (unsigned long long)query->query);
454                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
455                                 break;
456                         }
457
458                         ukh->ret = kbase_mem_query(kctx, query->gpu_addr, query->query, &query->value);
459                         break;
460                 }
461                 break;
462
463         case KBASE_FUNC_MEM_FLAGS_CHANGE:
464                 {
465                         kbase_uk_mem_flags_change * fc = args;
466                         if (sizeof(*fc) != args_size)
467                                 goto bad_size;
468
469                         if ((fc->gpu_va & ~PAGE_MASK) && (fc->gpu_va >= PAGE_SIZE)) {
470                                 KBASE_DEBUG_PRINT_WARN(KBASE_MEM, "kbase_dispatch case KBASE_FUNC_MEM_FLAGS_CHANGE: mem->gpu_va: passed parameter is invalid");
471                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
472                                 break;
473                         }
474
475                         if (kbase_mem_flags_change(kctx, fc->gpu_va, fc->flags, fc->mask))
476                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
477
478                         break;
479                 }
480         case KBASE_FUNC_MEM_FREE:
481                 {
482                         kbase_uk_mem_free *mem = args;
483
484                         if (sizeof(*mem) != args_size)
485                                 goto bad_size;
486
487                         if ((mem->gpu_addr & ~PAGE_MASK) && (mem->gpu_addr >= PAGE_SIZE)) {
488                                 KBASE_DEBUG_PRINT_WARN(KBASE_MEM, "kbase_dispatch case KBASE_FUNC_MEM_FREE: mem->gpu_addr: passed parameter is invalid");
489                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
490                                 break;
491                         }
492
493                         if (kbase_mem_free(kctx, mem->gpu_addr))
494                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
495                         break;
496                 }
497
498         case KBASE_FUNC_JOB_SUBMIT:
499                 {
500                         kbase_uk_job_submit *job = args;
501
502                         if (sizeof(*job) != args_size)
503                                 goto bad_size;
504
505                         if (MALI_ERROR_NONE != kbase_jd_submit(kctx, job))
506                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
507                         break;
508                 }
509
510         case KBASE_FUNC_SYNC:
511                 {
512                         kbase_uk_sync_now *sn = args;
513
514                         if (sizeof(*sn) != args_size)
515                                 goto bad_size;
516
517                         if (sn->sset.basep_sset.mem_handle & ~PAGE_MASK) {
518                                 KBASE_DEBUG_PRINT_WARN(KBASE_MEM, "kbase_dispatch case KBASE_FUNC_SYNC: sn->sset.basep_sset.mem_handle: passed parameter is invalid");
519                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
520                                 break;
521                         }
522
523                         if (MALI_ERROR_NONE != kbase_sync_now(kctx, &sn->sset))
524                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
525                         break;
526                 }
527
528         case KBASE_FUNC_POST_TERM:
529                 {
530                         kbase_event_close(kctx);
531                         break;
532                 }
533
534         case KBASE_FUNC_HWCNT_SETUP:
535                 {
536                         kbase_uk_hwcnt_setup *setup = args;
537
538                         if (sizeof(*setup) != args_size)
539                                 goto bad_size;
540
541                         if (MALI_ERROR_NONE != kbase_instr_hwcnt_setup(kctx, setup))
542                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
543                         break;
544                 }
545
546         case KBASE_FUNC_HWCNT_DUMP:
547                 {
548                         /* args ignored */
549                         if (MALI_ERROR_NONE != kbase_instr_hwcnt_dump(kctx))
550                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
551                         break;
552                 }
553
554         case KBASE_FUNC_HWCNT_CLEAR:
555                 {
556                         /* args ignored */
557                         if (MALI_ERROR_NONE != kbase_instr_hwcnt_clear(kctx))
558                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
559                         break;
560                 }
561
562         case KBASE_FUNC_CPU_PROPS_REG_DUMP:
563                 {
564                         kbase_uk_cpuprops *setup = args;
565
566                         if (sizeof(*setup) != args_size)
567                                 goto bad_size;
568
569                         if (MALI_ERROR_NONE != kbase_cpuprops_uk_get_props(kctx, setup))
570                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
571                         break;
572                 }
573
574         case KBASE_FUNC_GPU_PROPS_REG_DUMP:
575                 {
576                         kbase_uk_gpuprops *setup = args;
577
578                         if (sizeof(*setup) != args_size)
579                                 goto bad_size;
580
581                         if (MALI_ERROR_NONE != kbase_gpuprops_uk_get_props(kctx, setup))
582                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
583                         break;
584                 }
585         case KBASE_FUNC_FIND_CPU_MAPPING:
586                 {
587                         kbase_uk_find_cpu_mapping *find = args;
588                         struct kbase_cpu_mapping *map;
589
590                         if (sizeof(*find) != args_size)
591                                 goto bad_size;
592
593                         if (find->gpu_addr & ~PAGE_MASK) {
594                                 KBASE_DEBUG_PRINT_WARN(KBASE_MEM, "kbase_dispatch case KBASE_FUNC_FIND_CPU_MAPPING: find->gpu_addr: passed parameter is invalid");
595                                 goto out_bad;
596                         }
597
598                         KBASE_DEBUG_ASSERT(find != NULL);
599                         if (find->size > SIZE_MAX || find->cpu_addr > ULONG_MAX)
600                                 map = NULL;
601                         else
602                                 map = kbasep_find_enclosing_cpu_mapping(kctx, find->gpu_addr, (uintptr_t) find->cpu_addr, (size_t) find->size);
603
604                         if (NULL != map) {
605                                 find->uaddr = PTR_TO_U64(map->vma->vm_start);
606                                 find->page_off = map->page_off;
607                         } else {
608                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
609                         }
610                         break;
611                 }
612         case KBASE_FUNC_GET_VERSION:
613                 {
614                         kbase_uk_get_ddk_version *get_version = (kbase_uk_get_ddk_version *) args;
615
616                         if (sizeof(*get_version) != args_size)
617                                 goto bad_size;
618
619                         /* version buffer size check is made in compile time assert */
620                         memcpy(get_version->version_buffer, KERNEL_SIDE_DDK_VERSION_STRING, sizeof(KERNEL_SIDE_DDK_VERSION_STRING));
621                         get_version->version_string_size = sizeof(KERNEL_SIDE_DDK_VERSION_STRING);
622                         break;
623                 }
624
625         case KBASE_FUNC_STREAM_CREATE:
626                 {
627 #ifdef CONFIG_SYNC
628                         kbase_uk_stream_create *screate = (kbase_uk_stream_create *) args;
629
630                         if (sizeof(*screate) != args_size)
631                                 goto bad_size;
632
633                         if (strnlen(screate->name, sizeof(screate->name)) >= sizeof(screate->name)) {
634                                 /* not NULL terminated */
635                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
636                                 break;
637                         }
638
639                         ukh->ret = kbase_stream_create(screate->name, &screate->fd);
640 #else /* CONFIG_SYNC */
641                         ukh->ret = MALI_ERROR_FUNCTION_FAILED;
642 #endif /* CONFIG_SYNC */
643                         break;
644                 }
645         case KBASE_FUNC_FENCE_VALIDATE:
646                 {
647 #ifdef CONFIG_SYNC
648                         kbase_uk_fence_validate *fence_validate = (kbase_uk_fence_validate *) args;
649                         if (sizeof(*fence_validate) != args_size)
650                                 goto bad_size;
651
652                         ukh->ret = kbase_fence_validate(fence_validate->fd);
653 #endif /* CONFIG_SYNC */
654                         break;
655                 }
656
657         case KBASE_FUNC_EXT_BUFFER_LOCK:
658                 {
659 #ifdef CONFIG_KDS
660                         ukh->ret = kbase_external_buffer_lock(kctx, (kbase_uk_ext_buff_kds_data *) args, args_size);
661 #endif /* CONFIG_KDS */
662                         break;
663                 }
664
665         case KBASE_FUNC_SET_TEST_DATA:
666                 {
667 #if MALI_UNIT_TEST
668                         kbase_uk_set_test_data *set_data = args;
669
670                         shared_kernel_test_data = set_data->test_data;
671                         shared_kernel_test_data.kctx.value = kctx;
672                         shared_kernel_test_data.mm.value = (void *)current->mm;
673                         ukh->ret = MALI_ERROR_NONE;
674 #endif /* MALI_UNIT_TEST */
675                         break;
676                 }
677
678         case KBASE_FUNC_INJECT_ERROR:
679                 {
680 #ifdef CONFIG_MALI_ERROR_INJECT
681                         unsigned long flags;
682                         kbase_error_params params = ((kbase_uk_error_params *) args)->params;
683                         /*mutex lock */
684                         spin_lock_irqsave(&kbdev->osdev.reg_op_lock, flags);
685                         ukh->ret = job_atom_inject_error(&params);
686                         spin_unlock_irqrestore(&kbdev->osdev.reg_op_lock, flags);
687                         /*mutex unlock */
688 #endif /* CONFIG_MALI_ERROR_INJECT */
689                         break;
690                 }
691
692         case KBASE_FUNC_MODEL_CONTROL:
693                 {
694 #ifdef CONFIG_MALI_NO_MALI
695                         unsigned long flags;
696                         kbase_model_control_params params = ((kbase_uk_model_control_params *) args)->params;
697                         /*mutex lock */
698                         spin_lock_irqsave(&kbdev->osdev.reg_op_lock, flags);
699                         ukh->ret = midg_model_control(kbdev->osdev.model, &params);
700                         spin_unlock_irqrestore(&kbdev->osdev.reg_op_lock, flags);
701                         /*mutex unlock */
702 #endif /* CONFIG_MALI_NO_MALI */
703                         break;
704                 }
705
706         case KBASE_FUNC_KEEP_GPU_POWERED:
707                 {
708                         kbase_uk_keep_gpu_powered *kgp = (kbase_uk_keep_gpu_powered *) args;
709                         /* A suspend won't happen here, because we're in a syscall from a
710                          * userspace thread.
711                          *
712                          * Nevertheless, we'd get the wrong pm_context_active/idle counting
713                          * here if a suspend did happen, so let's assert it won't: */
714                         KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
715
716                         if (kgp->enabled && !kctx->keep_gpu_powered) {
717                                 kbase_pm_context_active(kbdev);
718                                 atomic_inc(&kbdev->keep_gpu_powered_count);
719                                 kctx->keep_gpu_powered = MALI_TRUE;
720                         } else if (!kgp->enabled && kctx->keep_gpu_powered) {
721                                 atomic_dec(&kbdev->keep_gpu_powered_count);
722                                 kbase_pm_context_idle(kbdev);
723                                 kctx->keep_gpu_powered = MALI_FALSE;
724                         }
725
726                         break;
727                 }
728
729         case KBASE_FUNC_GET_PROFILING_CONTROLS :
730                 {
731                         struct kbase_uk_profiling_controls *controls = \
732                                         (struct kbase_uk_profiling_controls *)args;
733                         u32 i;
734
735                         if (sizeof(*controls) != args_size)
736                                 goto bad_size;
737
738                         for (i = FBDUMP_CONTROL_MIN; i < FBDUMP_CONTROL_MAX; i++) {
739                                 controls->profiling_controls[i] = kbase_get_profiling_control(kbdev, i);
740                         }
741
742                         break;
743                 }
744
745         /* used only for testing purposes; these controls are to be set by gator through gator API */
746         case KBASE_FUNC_SET_PROFILING_CONTROLS :
747                 {
748                         struct kbase_uk_profiling_controls *controls = \
749                                         (struct kbase_uk_profiling_controls *)args;
750                         u32 i;
751
752                         if (sizeof(*controls) != args_size)
753                                 goto bad_size;
754
755                         for (i = FBDUMP_CONTROL_MIN; i < FBDUMP_CONTROL_MAX; i++)
756                         {
757                                 _mali_profiling_control(i, controls->profiling_controls[i]);
758                         }
759
760                         break;
761                 }
762
763         default:
764                 dev_err(kbdev->osdev.dev, "unknown ioctl %u", id);
765                 goto out_bad;
766         }
767
768         return MALI_ERROR_NONE;
769
770  bad_size:
771         dev_err(kbdev->osdev.dev, "Wrong syscall size (%d) for %08x\n", args_size, id);
772  out_bad:
773         return MALI_ERROR_FUNCTION_FAILED;
774 }
775
776 static struct kbase_device *to_kbase_device(struct device *dev)
777 {
778         return dev_get_drvdata(dev);
779 }
780
781 /*
782  * API to acquire device list semaphore and
783  * return pointer to the device list head
784  */
785 const struct list_head *kbase_dev_list_get(void)
786 {
787         down(&kbase_dev_list_lock);
788         return &kbase_dev_list;
789 }
790
791 /* API to release the device list semaphore */
792 void kbase_dev_list_put(const struct list_head *dev_list)
793 {
794         up(&kbase_dev_list_lock);
795 }
796
797 /* Find a particular kbase device (as specified by minor number), or find the "first" device if -1 is specified */
798 struct kbase_device *kbase_find_device(int minor)
799 {
800         struct kbase_device *kbdev = NULL;
801         struct list_head *entry;
802
803         down(&kbase_dev_list_lock);
804         list_for_each(entry, &kbase_dev_list) {
805                 struct kbase_device *tmp;
806
807                 tmp = list_entry(entry, struct kbase_device, osdev.entry);
808                 if (tmp->osdev.mdev.minor == minor || minor == -1) {
809                         kbdev = tmp;
810                         get_device(kbdev->osdev.dev);
811                         break;
812                 }
813         }
814         up(&kbase_dev_list_lock);
815
816         return kbdev;
817 }
818 EXPORT_SYMBOL(kbase_find_device);
819
820 void kbase_release_device(struct kbase_device *kbdev)
821 {
822         put_device(kbdev->osdev.dev);
823 }
824 EXPORT_SYMBOL(kbase_release_device);
825
826 static int kbase_open(struct inode *inode, struct file *filp)
827 {
828         struct kbase_device *kbdev = NULL;
829         kbase_context *kctx;
830         int ret = 0;
831
832         kbdev = kbase_find_device(iminor(inode));
833
834         if (!kbdev)
835                 return -ENODEV;
836
837         kctx = kbase_create_context(kbdev);
838         if (!kctx) {
839                 ret = -ENOMEM;
840                 goto out;
841         }
842
843         init_waitqueue_head(&kctx->osctx.event_queue);
844         filp->private_data = kctx;
845
846         dev_dbg(kbdev->osdev.dev, "created base context\n");
847
848         {
849                 kbasep_kctx_list_element *element;
850
851                 element = kzalloc(sizeof(kbasep_kctx_list_element), GFP_KERNEL);
852                 if (element) {
853                         mutex_lock(&kbdev->kctx_list_lock);
854                         element->kctx = kctx;
855                         list_add(&element->link, &kbdev->kctx_list);
856                         mutex_unlock(&kbdev->kctx_list_lock);
857                 } else {
858                         /* we don't treat this as a fail - just warn about it */
859                         printk(KERN_WARNING KBASE_DRV_NAME "couldn't add kctx to kctx_list\n");
860                 }
861         }
862         return 0;
863
864  out:
865         kbase_release_device(kbdev);
866         return ret;
867 }
868
869 static int kbase_release(struct inode *inode, struct file *filp)
870 {
871         kbase_context *kctx = filp->private_data;
872         struct kbase_device *kbdev = kctx->kbdev;
873         kbasep_kctx_list_element *element, *tmp;
874         mali_bool found_element = MALI_FALSE;
875
876         mutex_lock(&kbdev->kctx_list_lock);
877         list_for_each_entry_safe(element, tmp, &kbdev->kctx_list, link) {
878                 if (element->kctx == kctx) {
879                         list_del(&element->link);
880                         kfree(element);
881                         found_element = MALI_TRUE;
882                 }
883         }
884         mutex_unlock(&kbdev->kctx_list_lock);
885         if (!found_element)
886                 printk(KERN_WARNING KBASE_DRV_NAME "kctx not in kctx_list\n");
887
888         filp->private_data = NULL;
889         kbase_destroy_context(kctx);
890
891         dev_dbg(kbdev->osdev.dev, "deleted base context\n");
892         kbase_release_device(kbdev);
893         return 0;
894 }
895
896 #define CALL_MAX_SIZE 536
897
898 static long kbase_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
899 {
900         u64 msg[(CALL_MAX_SIZE + 7) >> 3] = { 0xdeadbeefdeadbeefull };  /* alignment fixup */
901         u32 size = _IOC_SIZE(cmd);
902         kbase_context *kctx = filp->private_data;
903
904         if (size > CALL_MAX_SIZE)
905                 return -ENOTTY;
906
907         if (0 != copy_from_user(&msg, (void *)arg, size)) {
908                 pr_err("failed to copy ioctl argument into kernel space\n");
909                 return -EFAULT;
910         }
911
912         if (MALI_ERROR_NONE != kbase_dispatch(kctx, &msg, size))
913                 return -EFAULT;
914
915         if (0 != copy_to_user((void *)arg, &msg, size)) {
916                 pr_err("failed to copy results of UK call back to user space\n");
917                 return -EFAULT;
918         }
919         return 0;
920 }
921
922 static ssize_t kbase_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
923 {
924         kbase_context *kctx = filp->private_data;
925         base_jd_event_v2 uevent;
926         int out_count = 0;
927
928         if (count < sizeof(uevent))
929                 return -ENOBUFS;
930
931         do {
932                 while (kbase_event_dequeue(kctx, &uevent)) {
933                         if (out_count > 0)
934                                 goto out;
935
936                         if (filp->f_flags & O_NONBLOCK)
937                                 return -EAGAIN;
938
939                         if (wait_event_interruptible(kctx->osctx.event_queue, kbase_event_pending(kctx)))
940                                 return -ERESTARTSYS;
941                 }
942                 if (uevent.event_code == BASE_JD_EVENT_DRV_TERMINATED) {
943                         if (out_count == 0)
944                                 return -EPIPE;
945                         goto out;
946                 }
947
948                 if (copy_to_user(buf, &uevent, sizeof(uevent)))
949                         return -EFAULT;
950
951                 buf += sizeof(uevent);
952                 out_count++;
953                 count -= sizeof(uevent);
954         } while (count >= sizeof(uevent));
955
956  out:
957         return out_count * sizeof(uevent);
958 }
959
960 static unsigned int kbase_poll(struct file *filp, poll_table *wait)
961 {
962         kbase_context *kctx = filp->private_data;
963
964         poll_wait(filp, &kctx->osctx.event_queue, wait);
965         if (kbase_event_pending(kctx))
966                 return POLLIN | POLLRDNORM;
967
968         return 0;
969 }
970
971 void kbase_event_wakeup(kbase_context *kctx)
972 {
973         KBASE_DEBUG_ASSERT(kctx);
974
975         wake_up_interruptible(&kctx->osctx.event_queue);
976 }
977
978 KBASE_EXPORT_TEST_API(kbase_event_wakeup)
979
980 int kbase_check_flags(int flags)
981 {
982         /* Enforce that the driver keeps the O_CLOEXEC flag so that execve() always
983          * closes the file descriptor in a child process.
984          */
985         if (0 == (flags & O_CLOEXEC))
986                 return -EINVAL;
987
988         return 0;
989 }
990
991 static const struct file_operations kbase_fops = {
992         .owner = THIS_MODULE,
993         .open = kbase_open,
994         .release = kbase_release,
995         .read = kbase_read,
996         .poll = kbase_poll,
997         .unlocked_ioctl = kbase_ioctl,
998         .compat_ioctl = kbase_ioctl,
999         .mmap = kbase_mmap,
1000         .check_flags = kbase_check_flags,
1001 };
1002
1003 #ifndef CONFIG_MALI_NO_MALI
1004 void kbase_os_reg_write(kbase_device *kbdev, u16 offset, u32 value)
1005 {
1006         writel(value, kbdev->osdev.reg + offset);
1007 }
1008
1009 u32 kbase_os_reg_read(kbase_device *kbdev, u16 offset)
1010 {
1011         return readl(kbdev->osdev.reg + offset);
1012 }
1013
1014 static void *kbase_tag(void *ptr, u32 tag)
1015 {
1016         return (void *)(((uintptr_t) ptr) | tag);
1017 }
1018
1019 static void *kbase_untag(void *ptr)
1020 {
1021         return (void *)(((uintptr_t) ptr) & ~3);
1022 }
1023
1024 static irqreturn_t kbase_job_irq_handler(int irq, void *data)
1025 {
1026         unsigned long flags;
1027         struct kbase_device *kbdev = kbase_untag(data);
1028         u32 val;
1029
1030         spin_lock_irqsave(&kbdev->pm.gpu_powered_lock, flags);
1031
1032         if (!kbdev->pm.gpu_powered) {
1033                 /* GPU is turned off - IRQ is not for us */
1034                 spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
1035                 return IRQ_NONE;
1036         }
1037
1038         val = kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_STATUS), NULL);
1039
1040 #ifdef CONFIG_MALI_DEBUG
1041         if (!kbdev->pm.driver_ready_for_irqs)
1042                 dev_dbg(kbdev->osdev.dev, "%s: irq %d irqstatus 0x%x before driver is ready\n",
1043                                 __func__, irq, val );
1044 #endif /* CONFIG_MALI_DEBUG */
1045         spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
1046
1047         if (!val)
1048                 return IRQ_NONE;
1049
1050         dev_dbg(kbdev->osdev.dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
1051
1052         kbase_job_done(kbdev, val);
1053
1054         return IRQ_HANDLED;
1055 }
1056
1057 KBASE_EXPORT_TEST_API(kbase_job_irq_handler);
1058
1059 static irqreturn_t kbase_mmu_irq_handler(int irq, void *data)
1060 {
1061         unsigned long flags;
1062         struct kbase_device *kbdev = kbase_untag(data);
1063         u32 val;
1064
1065         spin_lock_irqsave(&kbdev->pm.gpu_powered_lock, flags);
1066
1067         if (!kbdev->pm.gpu_powered) {
1068                 /* GPU is turned off - IRQ is not for us */
1069                 spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
1070                 return IRQ_NONE;
1071         }
1072
1073         val = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_STATUS), NULL);
1074
1075 #ifdef CONFIG_MALI_DEBUG
1076         if (!kbdev->pm.driver_ready_for_irqs)
1077                 dev_dbg(kbdev->osdev.dev, "%s: irq %d irqstatus 0x%x before driver is ready\n",
1078                                 __func__, irq, val );
1079 #endif /* CONFIG_MALI_DEBUG */
1080         spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
1081
1082         if (!val)
1083                 return IRQ_NONE;
1084
1085         dev_dbg(kbdev->osdev.dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
1086
1087         kbase_mmu_interrupt(kbdev, val);
1088
1089         return IRQ_HANDLED;
1090 }
1091
1092 static irqreturn_t kbase_gpu_irq_handler(int irq, void *data)
1093 {
1094         unsigned long flags;
1095         struct kbase_device *kbdev = kbase_untag(data);
1096         u32 val;
1097
1098         spin_lock_irqsave(&kbdev->pm.gpu_powered_lock, flags);
1099
1100         if (!kbdev->pm.gpu_powered) {
1101                 /* GPU is turned off - IRQ is not for us */
1102                 spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
1103                 return IRQ_NONE;
1104         }
1105
1106         val = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_STATUS), NULL);
1107
1108 #ifdef CONFIG_MALI_DEBUG
1109         if (!kbdev->pm.driver_ready_for_irqs)
1110                 dev_dbg(kbdev->osdev.dev, "%s: irq %d irqstatus 0x%x before driver is ready\n",
1111                                 __func__, irq, val );
1112 #endif /* CONFIG_MALI_DEBUG */
1113         spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
1114
1115         if (!val)
1116                 return IRQ_NONE;
1117
1118         dev_dbg(kbdev->osdev.dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
1119
1120         kbase_gpu_interrupt(kbdev, val);
1121
1122         return IRQ_HANDLED;
1123 }
1124
1125 static irq_handler_t kbase_handler_table[] = {
1126         [JOB_IRQ_TAG] = kbase_job_irq_handler,
1127         [MMU_IRQ_TAG] = kbase_mmu_irq_handler,
1128         [GPU_IRQ_TAG] = kbase_gpu_irq_handler,
1129 };
1130
1131 #ifdef CONFIG_MALI_DEBUG
1132 #define  JOB_IRQ_HANDLER JOB_IRQ_TAG
1133 #define  MMU_IRQ_HANDLER MMU_IRQ_TAG
1134 #define  GPU_IRQ_HANDLER GPU_IRQ_TAG
1135
1136 /**
1137  * @brief Registers given interrupt handler for requested interrupt type
1138  *        Case irq handler is not specified default handler shall be registered
1139  *
1140  * @param[in] kbdev           - Device for which the handler is to be registered
1141  * @param[in] custom_handler  - Handler to be registered
1142  * @param[in] irq_type        - Interrupt type
1143  * @return      MALI_ERROR_NONE case success, MALI_ERROR_FUNCTION_FAILED otherwise
1144  */
1145 static mali_error kbase_set_custom_irq_handler(kbase_device *kbdev, irq_handler_t custom_handler, int irq_type)
1146 {
1147         struct kbase_os_device *osdev = &kbdev->osdev;
1148         mali_error result = MALI_ERROR_NONE;
1149         irq_handler_t requested_irq_handler = NULL;
1150         KBASE_DEBUG_ASSERT((JOB_IRQ_HANDLER <= irq_type) && (GPU_IRQ_HANDLER >= irq_type));
1151
1152         /* Release previous handler */
1153         if (osdev->irqs[irq_type].irq)
1154                 free_irq(osdev->irqs[irq_type].irq, kbase_tag(kbdev, irq_type));
1155
1156         requested_irq_handler = (NULL != custom_handler) ? custom_handler : kbase_handler_table[irq_type];
1157
1158         if (0 != request_irq(osdev->irqs[irq_type].irq, requested_irq_handler, osdev->irqs[irq_type].flags | IRQF_SHARED, dev_name(osdev->dev), kbase_tag(kbdev, irq_type))) {
1159                 result = MALI_ERROR_FUNCTION_FAILED;
1160                 dev_err(osdev->dev, "Can't request interrupt %d (index %d)\n", osdev->irqs[irq_type].irq, irq_type);
1161 #ifdef CONFIG_SPARSE_IRQ
1162                 dev_err(osdev->dev, "You have CONFIG_SPARSE_IRQ support enabled - is the interrupt number correct for this configuration?\n");
1163 #endif /* CONFIG_SPARSE_IRQ */
1164         }
1165
1166         return result;
1167 }
1168
1169 KBASE_EXPORT_TEST_API(kbase_set_custom_irq_handler)
1170
1171 /* test correct interrupt assigment and reception by cpu */
1172 typedef struct kbasep_irq_test {
1173         struct hrtimer timer;
1174         wait_queue_head_t wait;
1175         int triggered;
1176         u32 timeout;
1177 } kbasep_irq_test;
1178
1179 static kbasep_irq_test kbasep_irq_test_data;
1180
1181 #define IRQ_TEST_TIMEOUT    500
1182
1183 static irqreturn_t kbase_job_irq_test_handler(int irq, void *data)
1184 {
1185         unsigned long flags;
1186         struct kbase_device *kbdev = kbase_untag(data);
1187         u32 val;
1188
1189         spin_lock_irqsave(&kbdev->pm.gpu_powered_lock, flags);
1190
1191         if (!kbdev->pm.gpu_powered) {
1192                 /* GPU is turned off - IRQ is not for us */
1193                 spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
1194                 return IRQ_NONE;
1195         }
1196
1197         val = kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_STATUS), NULL);
1198
1199         spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
1200
1201         if (!val)
1202                 return IRQ_NONE;
1203
1204         dev_dbg(kbdev->osdev.dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
1205
1206         kbasep_irq_test_data.triggered = 1;
1207         wake_up(&kbasep_irq_test_data.wait);
1208
1209         kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), val, NULL);
1210
1211         return IRQ_HANDLED;
1212 }
1213
1214 static irqreturn_t kbase_mmu_irq_test_handler(int irq, void *data)
1215 {
1216         unsigned long flags;
1217         struct kbase_device *kbdev = kbase_untag(data);
1218         u32 val;
1219
1220         spin_lock_irqsave(&kbdev->pm.gpu_powered_lock, flags);
1221
1222         if (!kbdev->pm.gpu_powered) {
1223                 /* GPU is turned off - IRQ is not for us */
1224                 spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
1225                 return IRQ_NONE;
1226         }
1227
1228         val = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_STATUS), NULL);
1229
1230         spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
1231
1232         if (!val)
1233                 return IRQ_NONE;
1234
1235         dev_dbg(kbdev->osdev.dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
1236
1237         kbasep_irq_test_data.triggered = 1;
1238         wake_up(&kbasep_irq_test_data.wait);
1239
1240         kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), val, NULL);
1241
1242         return IRQ_HANDLED;
1243 }
1244
1245 static enum hrtimer_restart kbasep_test_interrupt_timeout(struct hrtimer *timer)
1246 {
1247         kbasep_irq_test *test_data = container_of(timer, kbasep_irq_test, timer);
1248
1249         test_data->timeout = 1;
1250         test_data->triggered = 1;
1251         wake_up(&test_data->wait);
1252         return HRTIMER_NORESTART;
1253 }
1254
1255 static mali_error kbasep_common_test_interrupt(kbase_device * const kbdev, u32 tag)
1256 {
1257         struct kbase_os_device *osdev = &kbdev->osdev;
1258         mali_error err = MALI_ERROR_NONE;
1259         irq_handler_t test_handler;
1260
1261         u32 old_mask_val;
1262         u16 mask_offset;
1263         u16 rawstat_offset;
1264
1265         switch (tag) {
1266         case JOB_IRQ_TAG:
1267                 test_handler = kbase_job_irq_test_handler;
1268                 rawstat_offset = JOB_CONTROL_REG(JOB_IRQ_RAWSTAT);
1269                 mask_offset = JOB_CONTROL_REG(JOB_IRQ_MASK);
1270                 break;
1271         case MMU_IRQ_TAG:
1272                 test_handler = kbase_mmu_irq_test_handler;
1273                 rawstat_offset = MMU_REG(MMU_IRQ_RAWSTAT);
1274                 mask_offset = MMU_REG(MMU_IRQ_MASK);
1275                 break;
1276         case GPU_IRQ_TAG:
1277                 /* already tested by pm_driver - bail out */
1278         default:
1279                 return MALI_ERROR_NONE;
1280         }
1281
1282         /* store old mask */
1283         old_mask_val = kbase_reg_read(kbdev, mask_offset, NULL);
1284         /* mask interrupts */
1285         kbase_reg_write(kbdev, mask_offset, 0x0, NULL);
1286
1287         if (osdev->irqs[tag].irq) {
1288                 /* release original handler and install test handler */
1289                 if (MALI_ERROR_NONE != kbase_set_custom_irq_handler(kbdev, test_handler, tag)) {
1290                         err = MALI_ERROR_FUNCTION_FAILED;
1291                 } else {
1292                         kbasep_irq_test_data.timeout = 0;
1293                         hrtimer_init(&kbasep_irq_test_data.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1294                         kbasep_irq_test_data.timer.function = kbasep_test_interrupt_timeout;
1295
1296                         /* trigger interrupt */
1297                         kbase_reg_write(kbdev, mask_offset, 0x1, NULL);
1298                         kbase_reg_write(kbdev, rawstat_offset, 0x1, NULL);
1299
1300                         hrtimer_start(&kbasep_irq_test_data.timer, HR_TIMER_DELAY_MSEC(IRQ_TEST_TIMEOUT), HRTIMER_MODE_REL);
1301
1302                         wait_event(kbasep_irq_test_data.wait, kbasep_irq_test_data.triggered != 0);
1303
1304                         if (kbasep_irq_test_data.timeout != 0) {
1305                                 dev_err(osdev->dev, "Interrupt %d (index %d) didn't reach CPU.\n", osdev->irqs[tag].irq, tag);
1306                                 err = MALI_ERROR_FUNCTION_FAILED;
1307                         } else {
1308                                 dev_dbg(osdev->dev, "Interrupt %d (index %d) reached CPU.\n", osdev->irqs[tag].irq, tag);
1309                         }
1310
1311                         hrtimer_cancel(&kbasep_irq_test_data.timer);
1312                         kbasep_irq_test_data.triggered = 0;
1313
1314                         /* mask interrupts */
1315                         kbase_reg_write(kbdev, mask_offset, 0x0, NULL);
1316
1317                         /* release test handler */
1318                         free_irq(osdev->irqs[tag].irq, kbase_tag(kbdev, tag));
1319                 }
1320
1321                 /* restore original interrupt */
1322                 if (request_irq(osdev->irqs[tag].irq, kbase_handler_table[tag], osdev->irqs[tag].flags | IRQF_SHARED, dev_name(osdev->dev), kbase_tag(kbdev, tag))) {
1323                         dev_err(osdev->dev, "Can't restore original interrupt %d (index %d)\n", osdev->irqs[tag].irq, tag);
1324                         err = MALI_ERROR_FUNCTION_FAILED;
1325                 }
1326         }
1327         /* restore old mask */
1328         kbase_reg_write(kbdev, mask_offset, old_mask_val, NULL);
1329
1330         return err;
1331 }
1332
1333 static mali_error kbasep_common_test_interrupt_handlers(kbase_device * const kbdev)
1334 {
1335         struct kbase_os_device *osdev = &kbdev->osdev;
1336         mali_error err;
1337
1338         init_waitqueue_head(&kbasep_irq_test_data.wait);
1339         kbasep_irq_test_data.triggered = 0;
1340
1341         /* A suspend won't happen during startup/insmod */
1342         kbase_pm_context_active(kbdev);
1343
1344         err = kbasep_common_test_interrupt(kbdev, JOB_IRQ_TAG);
1345         if (MALI_ERROR_NONE != err) {
1346                 dev_err(osdev->dev, "Interrupt JOB_IRQ didn't reach CPU. Check interrupt assignments.\n");
1347                 goto out;
1348         }
1349
1350         err = kbasep_common_test_interrupt(kbdev, MMU_IRQ_TAG);
1351         if (MALI_ERROR_NONE != err) {
1352                 dev_err(osdev->dev, "Interrupt MMU_IRQ didn't reach CPU. Check interrupt assignments.\n");
1353                 goto out;
1354         }
1355
1356         dev_err(osdev->dev, "Interrupts are correctly assigned.\n");
1357
1358  out:
1359         kbase_pm_context_idle(kbdev);
1360
1361         return err;
1362
1363 }
1364 #endif /* CONFIG_MALI_DEBUG */
1365
1366 static int kbase_install_interrupts(kbase_device *kbdev)
1367 {
1368         struct kbase_os_device *osdev = &kbdev->osdev;
1369         u32 nr = ARRAY_SIZE(kbase_handler_table);
1370         int err;
1371         u32 i;
1372
1373         for (i = 0; i < nr; i++) {
1374                 err = request_irq(osdev->irqs[i].irq, kbase_handler_table[i], osdev->irqs[i].flags | IRQF_SHARED, dev_name(osdev->dev), kbase_tag(kbdev, i));
1375                 if (err) {
1376                         dev_err(osdev->dev, "Can't request interrupt %d (index %d)\n", osdev->irqs[i].irq, i);
1377 #ifdef CONFIG_SPARSE_IRQ
1378                         dev_err(osdev->dev, "You have CONFIG_SPARSE_IRQ support enabled - is the interrupt number correct for this configuration?\n");
1379 #endif /* CONFIG_SPARSE_IRQ */
1380                         goto release;
1381                 }
1382                 printk("%s,request irq %d ok\n",__func__,osdev->irqs[i].irq + 32 );
1383         }
1384
1385         return 0;
1386
1387  release:
1388         while (i-- > 0)
1389                 free_irq(osdev->irqs[i].irq, kbase_tag(kbdev, i));
1390
1391         return err;
1392 }
1393
1394 static void kbase_release_interrupts(kbase_device *kbdev)
1395 {
1396         struct kbase_os_device *osdev = &kbdev->osdev;
1397         u32 nr = ARRAY_SIZE(kbase_handler_table);
1398         u32 i;
1399
1400         for (i = 0; i < nr; i++) {
1401                 if (osdev->irqs[i].irq)
1402                         free_irq(osdev->irqs[i].irq, kbase_tag(kbdev, i));
1403         }
1404 }
1405
1406 void kbase_synchronize_irqs(kbase_device *kbdev)
1407 {
1408         struct kbase_os_device *osdev = &kbdev->osdev;
1409         u32 nr = ARRAY_SIZE(kbase_handler_table);
1410         u32 i;
1411
1412         for (i = 0; i < nr; i++) {
1413                 if (osdev->irqs[i].irq)
1414                         synchronize_irq(osdev->irqs[i].irq);
1415         }
1416 }
1417
1418 #endif /* CONFIG_MALI_NO_MALI */
1419
1420
1421 /** Show callback for the @c power_policy sysfs file.
1422  *
1423  * This function is called to get the contents of the @c power_policy sysfs
1424  * file. This is a list of the available policies with the currently active one
1425  * surrounded by square brackets.
1426  *
1427  * @param dev   The device this sysfs file is for
1428  * @param attr  The attributes of the sysfs file
1429  * @param buf   The output buffer for the sysfs file contents
1430  *
1431  * @return The number of bytes output to @c buf.
1432  */
1433 static ssize_t show_policy(struct device *dev, struct device_attribute *attr, char *const buf)
1434 {
1435         struct kbase_device *kbdev;
1436         const struct kbase_pm_policy *current_policy;
1437         const struct kbase_pm_policy *const *policy_list;
1438         int policy_count;
1439         int i;
1440         ssize_t ret = 0;
1441
1442         kbdev = to_kbase_device(dev);
1443
1444         if (!kbdev)
1445                 return -ENODEV;
1446
1447         current_policy = kbase_pm_get_policy(kbdev);
1448
1449         policy_count = kbase_pm_list_policies(&policy_list);
1450
1451         for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) {
1452                 if (policy_list[i] == current_policy)
1453                         ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name);
1454                 else
1455                         ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name);
1456         }
1457
1458         if (ret < PAGE_SIZE - 1) {
1459                 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
1460         } else {
1461                 buf[PAGE_SIZE - 2] = '\n';
1462                 buf[PAGE_SIZE - 1] = '\0';
1463                 ret = PAGE_SIZE - 1;
1464         }
1465
1466         return ret;
1467 }
1468
1469 /** Store callback for the @c power_policy sysfs file.
1470  *
1471  * This function is called when the @c power_policy sysfs file is written to.
1472  * It matches the requested policy against the available policies and if a
1473  * matching policy is found calls @ref kbase_pm_set_policy to change the
1474  * policy.
1475  *
1476  * @param dev   The device with sysfs file is for
1477  * @param attr  The attributes of the sysfs file
1478  * @param buf   The value written to the sysfs file
1479  * @param count The number of bytes written to the sysfs file
1480  *
1481  * @return @c count if the function succeeded. An error code on failure.
1482  */
1483 static ssize_t set_policy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1484 {
1485         struct kbase_device *kbdev;
1486         const struct kbase_pm_policy *new_policy = NULL;
1487         const struct kbase_pm_policy *const *policy_list;
1488         int policy_count;
1489         int i;
1490
1491         kbdev = to_kbase_device(dev);
1492
1493         if (!kbdev)
1494                 return -ENODEV;
1495
1496         policy_count = kbase_pm_list_policies(&policy_list);
1497
1498         for (i = 0; i < policy_count; i++) {
1499                 if (sysfs_streq(policy_list[i]->name, buf)) {
1500                         new_policy = policy_list[i];
1501                         break;
1502                 }
1503         }
1504
1505         if (!new_policy) {
1506                 dev_err(dev, "power_policy: policy not found\n");
1507                 return -EINVAL;
1508         }
1509
1510         kbase_pm_set_policy(kbdev, new_policy);
1511
1512         return count;
1513 }
1514
1515 /** The sysfs file @c power_policy.
1516  *
1517  * This is used for obtaining information about the available policies,
1518  * determining which policy is currently active, and changing the active
1519  * policy.
1520  */
1521 DEVICE_ATTR(power_policy, S_IRUGO | S_IWUSR, show_policy, set_policy);
1522
1523 /** Show callback for the @c core_availability_policy sysfs file.
1524  *
1525  * This function is called to get the contents of the @c core_availability_policy
1526  * sysfs file. This is a list of the available policies with the currently
1527  * active one surrounded by square brackets.
1528  *
1529  * @param dev   The device this sysfs file is for
1530  * @param attr  The attributes of the sysfs file
1531  * @param buf   The output buffer for the sysfs file contents
1532  *
1533  * @return The number of bytes output to @c buf.
1534  */
1535 static ssize_t show_ca_policy(struct device *dev, struct device_attribute *attr, char *const buf)
1536 {
1537         struct kbase_device *kbdev;
1538         const struct kbase_pm_ca_policy *current_policy;
1539         const struct kbase_pm_ca_policy *const *policy_list;
1540         int policy_count;
1541         int i;
1542         ssize_t ret = 0;
1543
1544         kbdev = to_kbase_device(dev);
1545
1546         if (!kbdev)
1547                 return -ENODEV;
1548
1549         current_policy = kbase_pm_ca_get_policy(kbdev);
1550
1551         policy_count = kbase_pm_ca_list_policies(&policy_list);
1552
1553         for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) {
1554                 if (policy_list[i] == current_policy)
1555                         ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name);
1556                 else
1557                         ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name);
1558         }
1559
1560         if (ret < PAGE_SIZE - 1) {
1561                 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
1562         } else {
1563                 buf[PAGE_SIZE - 2] = '\n';
1564                 buf[PAGE_SIZE - 1] = '\0';
1565                 ret = PAGE_SIZE - 1;
1566         }
1567
1568         return ret;
1569 }
1570
1571 /** Store callback for the @c core_availability_policy sysfs file.
1572  *
1573  * This function is called when the @c core_availability_policy sysfs file is
1574  * written to. It matches the requested policy against the available policies
1575  * and if a matching policy is found calls @ref kbase_pm_set_policy to change
1576  * the policy.
1577  *
1578  * @param dev   The device with sysfs file is for
1579  * @param attr  The attributes of the sysfs file
1580  * @param buf   The value written to the sysfs file
1581  * @param count The number of bytes written to the sysfs file
1582  *
1583  * @return @c count if the function succeeded. An error code on failure.
1584  */
1585 static ssize_t set_ca_policy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1586 {
1587         struct kbase_device *kbdev;
1588         const struct kbase_pm_ca_policy *new_policy = NULL;
1589         const struct kbase_pm_ca_policy *const *policy_list;
1590         int policy_count;
1591         int i;
1592
1593         kbdev = to_kbase_device(dev);
1594
1595         if (!kbdev)
1596                 return -ENODEV;
1597
1598         policy_count = kbase_pm_ca_list_policies(&policy_list);
1599
1600         for (i = 0; i < policy_count; i++) {
1601                 if (sysfs_streq(policy_list[i]->name, buf)) {
1602                         new_policy = policy_list[i];
1603                         break;
1604                 }
1605         }
1606
1607         if (!new_policy) {
1608                 dev_err(dev, "core_availability_policy: policy not found\n");
1609                 return -EINVAL;
1610         }
1611
1612         kbase_pm_ca_set_policy(kbdev, new_policy);
1613
1614         return count;
1615 }
1616
1617 /** The sysfs file @c core_availability_policy
1618  *
1619  * This is used for obtaining information about the available policies,
1620  * determining which policy is currently active, and changing the active
1621  * policy.
1622  */
1623 DEVICE_ATTR(core_availability_policy, S_IRUGO | S_IWUSR, show_ca_policy, set_ca_policy);
1624
1625 /** Show callback for the @c core_mask sysfs file.
1626  *
1627  * This function is called to get the contents of the @c core_mask sysfs
1628  * file.
1629  *
1630  * @param dev   The device this sysfs file is for
1631  * @param attr  The attributes of the sysfs file
1632  * @param buf   The output buffer for the sysfs file contents
1633  *
1634  * @return The number of bytes output to @c buf.
1635  */
1636 static ssize_t show_core_mask(struct device *dev, struct device_attribute *attr, char *const buf)
1637 {
1638         struct kbase_device *kbdev;
1639         ssize_t ret = 0;
1640
1641         kbdev = to_kbase_device(dev);
1642
1643         if (!kbdev)
1644                 return -ENODEV;
1645
1646         ret += scnprintf(buf + ret, PAGE_SIZE - ret, "Current core mask : 0x%llX\n", kbdev->pm.debug_core_mask);
1647         ret += scnprintf(buf + ret, PAGE_SIZE - ret, "Available core mask : 0x%llX\n", kbdev->shader_present_bitmap);
1648
1649         return ret;
1650 }
1651
1652 /** Store callback for the @c core_mask sysfs file.
1653  *
1654  * This function is called when the @c core_mask sysfs file is written to.
1655  *
1656  * @param dev   The device with sysfs file is for
1657  * @param attr  The attributes of the sysfs file
1658  * @param buf   The value written to the sysfs file
1659  * @param count The number of bytes written to the sysfs file
1660  *
1661  * @return @c count if the function succeeded. An error code on failure.
1662  */
1663 static ssize_t set_core_mask(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1664 {
1665         struct kbase_device *kbdev;
1666         u64 new_core_mask;
1667
1668         kbdev = to_kbase_device(dev);
1669
1670         if (!kbdev)
1671                 return -ENODEV;
1672
1673         new_core_mask = simple_strtoull(buf, NULL, 16);
1674
1675         if ((new_core_mask & kbdev->shader_present_bitmap) != new_core_mask ||
1676             !(new_core_mask & kbdev->gpu_props.props.coherency_info.group[0].core_mask)) {
1677                 dev_err(dev, "power_policy: invalid core specification\n");
1678                 return -EINVAL;
1679         }
1680
1681         if (kbdev->pm.debug_core_mask != new_core_mask) {
1682                 unsigned long flags;
1683
1684                 spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
1685
1686                 kbdev->pm.debug_core_mask = new_core_mask;
1687                 kbase_pm_update_cores_state_nolock(kbdev);
1688
1689                 spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
1690         }
1691
1692         return count;
1693 }
1694
1695 /** The sysfs file @c core_mask.
1696  *
1697  * This is used to restrict shader core availability for debugging purposes.
1698  * Reading it will show the current core mask and the mask of cores available.
1699  * Writing to it will set the current core mask.
1700  */
1701 DEVICE_ATTR(core_mask, S_IRUGO | S_IWUSR, show_core_mask, set_core_mask);
1702
1703
1704 #ifdef CONFIG_MALI_DEBUG_SHADER_SPLIT_FS
1705 /* Import the external affinity mask variables */
1706 extern u64 mali_js0_affinity_mask;
1707 extern u64 mali_js1_affinity_mask;
1708 extern u64 mali_js2_affinity_mask;
1709
1710 /**
1711  * Structure containing a single shader affinity split configuration.
1712  */
1713 typedef struct {
1714         char const * tag;
1715         char const * human_readable;
1716         u64          js0_mask;
1717         u64          js1_mask;
1718         u64          js2_mask;
1719 } sc_split_config;
1720
1721 /**
1722  * Array of available shader affinity split configurations.
1723  */
1724 static sc_split_config const sc_split_configs[] =
1725 {
1726         /* All must be the first config (default). */
1727         {
1728                 "all", "All cores",
1729                 0xFFFFFFFFFFFFFFFFULL, 0xFFFFFFFFFFFFFFFFULL, 0xFFFFFFFFFFFFFFFFULL
1730         },
1731         {
1732                 "mp1", "MP1 shader core",
1733                 0x1, 0x1, 0x1
1734         },
1735         {
1736                 "mp2", "MP2 shader core",
1737                 0x3, 0x3, 0x3
1738         },
1739         {
1740                 "mp4", "MP4 shader core",
1741                 0xF, 0xF, 0xF
1742         },
1743         {
1744                 "mp1_vf", "MP1 vertex + MP1 fragment shader core",
1745                 0x2, 0x1, 0xFFFFFFFFFFFFFFFFULL
1746         },
1747         {
1748                 "mp2_vf", "MP2 vertex + MP2 fragment shader core",
1749                 0xA, 0x5, 0xFFFFFFFFFFFFFFFFULL
1750         },
1751         /* This must be the last config. */
1752         {
1753                 NULL, NULL,
1754                 0x0, 0x0, 0x0
1755         },
1756 };
1757
1758 /* Pointer to the currently active shader split configuration. */
1759 static sc_split_config const * current_sc_split_config = &sc_split_configs[0];
1760
1761 /** Show callback for the @c sc_split sysfs file
1762  *
1763  * Returns the current shader core affinity policy.
1764  */
1765 static ssize_t show_split(struct device *dev, struct device_attribute *attr, char * const buf)
1766 {
1767         ssize_t ret;
1768         /* We know we are given a buffer which is PAGE_SIZE long. Our strings are all guaranteed
1769          * to be shorter than that at this time so no length check needed. */
1770         ret = scnprintf(buf, PAGE_SIZE, "Current sc_split: '%s'\n", current_sc_split_config->tag );
1771         return ret;
1772 }
1773
1774 /** Store callback for the @c sc_split sysfs file.
1775  *
1776  * This function is called when the @c sc_split sysfs file is written to
1777  * It modifies the system shader core affinity configuration to allow
1778  * system profiling with different hardware configurations.
1779  *
1780  * @param dev   The device with sysfs file is for
1781  * @param attr  The attributes of the sysfs file
1782  * @param buf   The value written to the sysfs file
1783  * @param count The number of bytes written to the sysfs file
1784  *
1785  * @return @c count if the function succeeded. An error code on failure.
1786  */
1787 static ssize_t set_split(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1788 {
1789         sc_split_config const * config = &sc_split_configs[0];
1790
1791         /* Try to match: loop until we hit the last "NULL" entry */
1792         while( config->tag )
1793         {
1794                 if (sysfs_streq(config->tag, buf))
1795                 {
1796                         current_sc_split_config = config;
1797                         mali_js0_affinity_mask  = config->js0_mask;
1798                         mali_js1_affinity_mask  = config->js1_mask;
1799                         mali_js2_affinity_mask  = config->js2_mask;
1800                         dev_info(dev, "Setting sc_split: '%s'\n", config->tag);
1801                         return count;
1802                 }
1803                 config++;
1804         }
1805
1806         /* No match found in config list */
1807         dev_err(dev, "sc_split: invalid value\n");
1808         dev_err(dev, "  Possible settings: mp[1|2|4], mp[1|2]_vf\n");
1809         return -ENOENT;
1810 }
1811
1812 /** The sysfs file @c sc_split
1813  *
1814  * This is used for configuring/querying the current shader core work affinity
1815  * configuration.
1816  */
1817 DEVICE_ATTR(sc_split, S_IRUGO|S_IWUSR, show_split, set_split);
1818 #endif /* CONFIG_MALI_DEBUG_SHADER_SPLIT_FS */
1819
1820
1821 #if MALI_CUSTOMER_RELEASE == 0
1822 /** Store callback for the @c js_timeouts sysfs file.
1823  *
1824  * This function is called to get the contents of the @c js_timeouts sysfs
1825  * file. This file contains five values separated by whitespace. The values
1826  * are basically the same as KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS,
1827  * KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_SS, KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_NSS,
1828  * KBASE_CONFIG_ATTR_JS_RESET_TICKS_SS, BASE_CONFIG_ATTR_JS_RESET_TICKS_NSS
1829  * configuration values (in that order), with the difference that the js_timeout
1830  * valus are expressed in MILLISECONDS.
1831  *
1832  * The js_timeouts sysfile file allows the current values in
1833  * use by the job scheduler to get override. Note that a value needs to
1834  * be other than 0 for it to override the current job scheduler value.
1835  *
1836  * @param dev   The device with sysfs file is for
1837  * @param attr  The attributes of the sysfs file
1838  * @param buf   The value written to the sysfs file
1839  * @param count The number of bytes written to the sysfs file
1840  *
1841  * @return @c count if the function succeeded. An error code on failure.
1842  */
1843 static ssize_t set_js_timeouts(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1844 {
1845         struct kbase_device *kbdev;
1846         int items;
1847         unsigned long js_soft_stop_ms;
1848         unsigned long js_hard_stop_ms_ss;
1849         unsigned long js_hard_stop_ms_nss;
1850         unsigned long js_reset_ms_ss;
1851         unsigned long js_reset_ms_nss;
1852
1853         kbdev = to_kbase_device(dev);
1854         if (!kbdev)
1855                 return -ENODEV;
1856
1857         items = sscanf(buf, "%lu %lu %lu %lu %lu", &js_soft_stop_ms, &js_hard_stop_ms_ss, &js_hard_stop_ms_nss, &js_reset_ms_ss, &js_reset_ms_nss);
1858         if (items == 5) {
1859                 u64 ticks;
1860
1861                 ticks = js_soft_stop_ms * 1000000ULL;
1862                 do_div(ticks, kbdev->js_data.scheduling_tick_ns);
1863                 kbdev->js_soft_stop_ticks = ticks;
1864
1865                 ticks = js_hard_stop_ms_ss * 1000000ULL;
1866                 do_div(ticks, kbdev->js_data.scheduling_tick_ns);
1867                 kbdev->js_hard_stop_ticks_ss = ticks;
1868
1869                 ticks = js_hard_stop_ms_nss * 1000000ULL;
1870                 do_div(ticks, kbdev->js_data.scheduling_tick_ns);
1871                 kbdev->js_hard_stop_ticks_nss = ticks;
1872
1873                 ticks = js_reset_ms_ss * 1000000ULL;
1874                 do_div(ticks, kbdev->js_data.scheduling_tick_ns);
1875                 kbdev->js_reset_ticks_ss = ticks;
1876
1877                 ticks = js_reset_ms_nss * 1000000ULL;
1878                 do_div(ticks, kbdev->js_data.scheduling_tick_ns);
1879                 kbdev->js_reset_ticks_nss = ticks;
1880
1881                 dev_info(kbdev->osdev.dev, "Overriding KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_soft_stop_ticks, js_soft_stop_ms);
1882                 dev_info(kbdev->osdev.dev, "Overriding KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_SS with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_hard_stop_ticks_ss, js_hard_stop_ms_ss);
1883                 dev_info(kbdev->osdev.dev, "Overriding KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_NSS with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_hard_stop_ticks_nss, js_hard_stop_ms_nss);
1884                 dev_info(kbdev->osdev.dev, "Overriding KBASE_CONFIG_ATTR_JS_RESET_TICKS_SS with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_reset_ticks_ss, js_reset_ms_ss);
1885                 dev_info(kbdev->osdev.dev, "Overriding KBASE_CONFIG_ATTR_JS_RESET_TICKS_NSS with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_reset_ticks_nss, js_reset_ms_nss);
1886
1887                 return count;
1888         } else {
1889                 dev_err(kbdev->osdev.dev, "Couldn't process js_timeouts write operation.\nUse format " "<soft_stop_ms> <hard_stop_ms_ss> <hard_stop_ms_nss> <reset_ms_ss> <reset_ms_nss>\n");
1890                 return -EINVAL;
1891         }
1892 }
1893
1894 /** Show callback for the @c js_timeouts sysfs file.
1895  *
1896  * This function is called to get the contents of the @c js_timeouts sysfs
1897  * file. It returns the last set values written to the js_timeouts sysfs file.
1898  * If the file didn't get written yet, the values will be 0.
1899  *
1900  * @param dev   The device this sysfs file is for
1901  * @param attr  The attributes of the sysfs file
1902  * @param buf   The output buffer for the sysfs file contents
1903  *
1904  * @return The number of bytes output to @c buf.
1905  */
1906 static ssize_t show_js_timeouts(struct device *dev, struct device_attribute *attr, char * const buf)
1907 {
1908         struct kbase_device *kbdev;
1909         ssize_t ret;
1910         u64 ms;
1911         unsigned long js_soft_stop_ms;
1912         unsigned long js_hard_stop_ms_ss;
1913         unsigned long js_hard_stop_ms_nss;
1914         unsigned long js_reset_ms_ss;
1915         unsigned long js_reset_ms_nss;
1916
1917         kbdev = to_kbase_device(dev);
1918         if (!kbdev)
1919                 return -ENODEV;
1920
1921         ms = (u64) kbdev->js_soft_stop_ticks * kbdev->js_data.scheduling_tick_ns;
1922         do_div(ms, 1000000UL);
1923         js_soft_stop_ms = (unsigned long)ms;
1924
1925         ms = (u64) kbdev->js_hard_stop_ticks_ss * kbdev->js_data.scheduling_tick_ns;
1926         do_div(ms, 1000000UL);
1927         js_hard_stop_ms_ss = (unsigned long)ms;
1928
1929         ms = (u64) kbdev->js_hard_stop_ticks_nss * kbdev->js_data.scheduling_tick_ns;
1930         do_div(ms, 1000000UL);
1931         js_hard_stop_ms_nss = (unsigned long)ms;
1932
1933         ms = (u64) kbdev->js_reset_ticks_ss * kbdev->js_data.scheduling_tick_ns;
1934         do_div(ms, 1000000UL);
1935         js_reset_ms_ss = (unsigned long)ms;
1936
1937         ms = (u64) kbdev->js_reset_ticks_nss * kbdev->js_data.scheduling_tick_ns;
1938         do_div(ms, 1000000UL);
1939         js_reset_ms_nss = (unsigned long)ms;
1940
1941         ret = scnprintf(buf, PAGE_SIZE, "%lu %lu %lu %lu %lu\n", js_soft_stop_ms, js_hard_stop_ms_ss, js_hard_stop_ms_nss, js_reset_ms_ss, js_reset_ms_nss);
1942
1943         if (ret >= PAGE_SIZE) {
1944                 buf[PAGE_SIZE - 2] = '\n';
1945                 buf[PAGE_SIZE - 1] = '\0';
1946                 ret = PAGE_SIZE - 1;
1947         }
1948
1949         return ret;
1950 }
1951
1952 /** The sysfs file @c js_timeouts.
1953  *
1954  * This is used to override the current job scheduler values for
1955  * KBASE_CONFIG_ATTR_JS_STOP_STOP_TICKS_SS
1956  * KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_SS
1957  * KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_NSS
1958  * KBASE_CONFIG_ATTR_JS_RESET_TICKS_SS
1959  * KBASE_CONFIG_ATTR_JS_RESET_TICKS_NSS.
1960  */
1961 DEVICE_ATTR(js_timeouts, S_IRUGO | S_IWUSR, show_js_timeouts, set_js_timeouts);
1962 #endif /* MALI_CUSTOMER_RELEASE == 0 */
1963
1964 #ifdef CONFIG_MALI_DEBUG
1965 static ssize_t set_js_softstop_always(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1966 {
1967         struct kbase_device *kbdev;
1968         int items;
1969         int softstop_always;
1970
1971         kbdev = to_kbase_device(dev);
1972         if (!kbdev)
1973                 return -ENODEV;
1974
1975         items = sscanf(buf, "%d", &softstop_always);
1976         if ((items == 1) && ((softstop_always == 0) || (softstop_always == 1))) {
1977                 kbdev->js_data.softstop_always = (mali_bool) softstop_always;
1978
1979                 dev_info(kbdev->osdev.dev, "Support for softstop on a single context: %s\n", (kbdev->js_data.softstop_always == MALI_FALSE) ? "Disabled" : "Enabled");
1980                 return count;
1981         } else {
1982                 dev_err(kbdev->osdev.dev, "Couldn't process js_softstop_always write operation.\nUse format " "<soft_stop_always>\n");
1983                 return -EINVAL;
1984         }
1985 }
1986
1987 static ssize_t show_js_softstop_always(struct device *dev, struct device_attribute *attr, char * const buf)
1988 {
1989         struct kbase_device *kbdev;
1990         ssize_t ret;
1991
1992         kbdev = to_kbase_device(dev);
1993         if (!kbdev)
1994                 return -ENODEV;
1995
1996         ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->js_data.softstop_always);
1997
1998         if (ret >= PAGE_SIZE) {
1999                 buf[PAGE_SIZE - 2] = '\n';
2000                 buf[PAGE_SIZE - 1] = '\0';
2001                 ret = PAGE_SIZE - 1;
2002         }
2003
2004         return ret;
2005 }
2006
2007 /**
2008  * By default, soft-stops are disabled when only a single context is present. The ability to
2009  * enable soft-stop when only a single context is present can be used for debug and unit-testing purposes.
2010  * (see CL t6xx_stress_1 unit-test as an example whereby this feature is used.)
2011  */
2012 DEVICE_ATTR(js_softstop_always, S_IRUGO | S_IWUSR, show_js_softstop_always, set_js_softstop_always);
2013 #endif /* CONFIG_MALI_DEBUG */
2014
2015 #ifdef CONFIG_MALI_DEBUG
2016 typedef void (kbasep_debug_command_func) (kbase_device *);
2017
2018 typedef enum {
2019         KBASEP_DEBUG_COMMAND_DUMPTRACE,
2020         KBASEP_DEBUG_COMMAND_SIM1,
2021         KBASEP_DEBUG_COMMAND_SIM2,
2022         KBASEP_DEBUG_COMMAND_SIM3,
2023         KBASEP_DEBUG_COMMAND_SIM4,
2024         /* This must be the last enum */
2025         KBASEP_DEBUG_COMMAND_COUNT
2026 } kbasep_debug_command_code;
2027
2028 typedef struct kbasep_debug_command {
2029         char *str;
2030         kbasep_debug_command_func *func;
2031 } kbasep_debug_command;
2032
2033 /** Debug commands supported by the driver */
2034 static const kbasep_debug_command debug_commands[] = {
2035         {
2036          .str = "dumptrace",
2037          .func = &kbasep_trace_dump,
2038          },
2039         {
2040     .str = "sim1",
2041     .func = &RunMaliTest_sim1_t760,
2042     },
2043     {
2044     .str = "sim2",
2045     .func = &RunMaliTest_sim2_t760,
2046     },
2047     {
2048     .str = "sim3",
2049     .func = &RunMaliTest_sim3_t760,
2050     },
2051     {
2052     .str = "sim4",
2053     .func = &RunMaliTest_sim4_t760,
2054     }
2055 };
2056
2057 /** Show callback for the @c debug_command sysfs file.
2058  *
2059  * This function is called to get the contents of the @c debug_command sysfs
2060  * file. This is a list of the available debug commands, separated by newlines.
2061  *
2062  * @param dev   The device this sysfs file is for
2063  * @param attr  The attributes of the sysfs file
2064  * @param buf   The output buffer for the sysfs file contents
2065  *
2066  * @return The number of bytes output to @c buf.
2067  */
2068 static ssize_t show_debug(struct device *dev, struct device_attribute *attr, char *const buf)
2069 {
2070         struct kbase_device *kbdev;
2071         int i;
2072         ssize_t ret = 0;
2073
2074         kbdev = to_kbase_device(dev);
2075
2076         if (!kbdev)
2077                 return -ENODEV;
2078
2079         for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT && ret < PAGE_SIZE; i++)
2080                 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s\n", debug_commands[i].str);
2081
2082         if (ret >= PAGE_SIZE) {
2083                 buf[PAGE_SIZE - 2] = '\n';
2084                 buf[PAGE_SIZE - 1] = '\0';
2085                 ret = PAGE_SIZE - 1;
2086         }
2087
2088         return ret;
2089 }
2090
2091 /** Store callback for the @c debug_command sysfs file.
2092  *
2093  * This function is called when the @c debug_command sysfs file is written to.
2094  * It matches the requested command against the available commands, and if
2095  * a matching command is found calls the associated function from
2096  * @ref debug_commands to issue the command.
2097  *
2098  * @param dev   The device with sysfs file is for
2099  * @param attr  The attributes of the sysfs file
2100  * @param buf   The value written to the sysfs file
2101  * @param count The number of bytes written to the sysfs file
2102  *
2103  * @return @c count if the function succeeded. An error code on failure.
2104  */
2105 static ssize_t issue_debug(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2106 {
2107         struct kbase_device *kbdev;
2108         int i;
2109
2110         kbdev = to_kbase_device(dev);
2111
2112         if (!kbdev)
2113                 return -ENODEV;
2114
2115         for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT; i++) {
2116                 if (sysfs_streq(debug_commands[i].str, buf)) {
2117                         //chenli: modify for integration kits
2118                         if(i==0)//kbasep_trace_dump
2119                                 debug_commands[i].func(kbdev);
2120                         else    //integration kits
2121                         {
2122                                 struct kbase_os_device *osdev = &kbdev->osdev;
2123                                 debug_commands[i].func((int *)osdev->reg);
2124                         }
2125                         return count;
2126                 }
2127         }
2128
2129         /* Debug Command not found */
2130         dev_err(dev, "debug_command: command not known\n");
2131         return -EINVAL;
2132 }
2133
2134 /** The sysfs file @c debug_command.
2135  *
2136  * This is used to issue general debug commands to the device driver.
2137  * Reading it will produce a list of debug commands, separated by newlines.
2138  * Writing to it with one of those commands will issue said command.
2139  */
2140 DEVICE_ATTR(debug_command, S_IRUGO | S_IWUSR, show_debug, issue_debug);
2141 #endif /* CONFIG_MALI_DEBUG */
2142
2143 #ifdef CONFIG_MALI_NO_MALI
2144 static int kbase_common_reg_map(kbase_device *kbdev)
2145 {
2146         return 0;
2147 }
2148 static void kbase_common_reg_unmap(kbase_device * const kbdev)
2149 {
2150         return;
2151 }
2152 #else /* CONFIG_MALI_NO_MALI */
2153 static int kbase_common_reg_map(kbase_device *kbdev)
2154 {
2155         struct kbase_os_device *osdev = &kbdev->osdev;
2156         int err = -ENOMEM;
2157
2158         osdev->reg_res = request_mem_region(osdev->reg_start, osdev->reg_size, dev_name(osdev->dev));
2159         if (!osdev->reg_res) {
2160                 dev_err(osdev->dev, "Register window unavailable\n");
2161                 err = -EIO;
2162                 goto out_region;
2163         }
2164         printk("%s,request_mem_region ok\n",__func__);
2165         osdev->reg = ioremap(osdev->reg_start, osdev->reg_size);
2166         if (!osdev->reg) {
2167                 dev_err(osdev->dev, "Can't remap register window\n");
2168                 err = -EINVAL;
2169                 goto out_ioremap;
2170         }
2171
2172         printk("%s,ioremap ok\n",__func__);
2173         return 0;
2174
2175  out_ioremap:
2176         release_resource(osdev->reg_res);
2177         kfree(osdev->reg_res);
2178  out_region:
2179         return err;
2180 }
2181
2182 static void kbase_common_reg_unmap(kbase_device * const kbdev)
2183 {
2184         struct kbase_os_device *osdev = &kbdev->osdev;
2185
2186         iounmap(osdev->reg);
2187         release_resource(osdev->reg_res);
2188         kfree(osdev->reg_res);
2189 }
2190 #endif /* CONFIG_MALI_NO_MALI */
2191
2192
2193 static int kbase_common_device_init(kbase_device *kbdev)
2194 {
2195         struct kbase_os_device *osdev = &kbdev->osdev;
2196         int err = -ENOMEM;
2197         mali_error mali_err;
2198         enum {
2199                 inited_mem = (1u << 0),
2200                 inited_job_slot = (1u << 1),
2201                 inited_pm = (1u << 2),
2202                 inited_js = (1u << 3),
2203                 inited_irqs = (1u << 4),
2204                 inited_debug = (1u << 5),
2205                 inited_js_softstop = (1u << 6),
2206 #if MALI_CUSTOMER_RELEASE == 0
2207                 inited_js_timeouts = (1u << 7),
2208 #endif /* MALI_CUSTOMER_RELEASE == 0 */
2209                 inited_pm_runtime_init = (1u << 8),
2210 #ifdef CONFIG_DEBUG_FS
2211                 inited_gpu_memory = (1u << 9),
2212                 inited_debugfs = (1u << 10),
2213 #endif /* CONFIG_DEBUG_FS */
2214 #ifdef CONFIG_MALI_DEBUG_SHADER_SPLIT_FS
2215                 inited_sc_split = (1u << 11),
2216 #endif /* CONFIG_MALI_DEBUG_SHADER_SPLIT_FS */
2217 #ifdef CONFIG_MALI_TRACE_TIMELINE
2218                 inited_timeline = (1u << 12),
2219 #endif /* CONFIG_MALI_TRACE_LINE */
2220         };
2221
2222         int inited = 0;
2223
2224         dev_set_drvdata(osdev->dev, kbdev);
2225
2226         osdev->mdev.minor = MISC_DYNAMIC_MINOR;
2227         osdev->mdev.name = osdev->devname;
2228         osdev->mdev.fops = &kbase_fops;
2229         osdev->mdev.parent = get_device(osdev->dev);
2230
2231         scnprintf(osdev->devname, DEVNAME_SIZE, "%s%d", kbase_drv_name, kbase_dev_nr++);
2232
2233 #ifdef CONFIG_DEBUG_FS
2234         kbdev->mali_debugfs_directory = debugfs_create_dir("mali", NULL);
2235         if (NULL == kbdev->mali_debugfs_directory) {
2236                 dev_err(osdev->dev, "Couldn't create mali debugfs directory\n");
2237                 goto out_partial;
2238         }
2239         inited |= inited_debugfs;
2240 #endif /* CONFIG_DEBUG_FS */
2241
2242
2243         if (misc_register(&osdev->mdev)) {
2244                 dev_err(osdev->dev, "Couldn't register misc dev %s\n", osdev->devname);
2245                 err = -EINVAL;
2246                 goto out_misc;
2247         }
2248
2249         if (device_create_file(osdev->dev, &dev_attr_power_policy)) {
2250                 dev_err(osdev->dev, "Couldn't create power_policy sysfs file\n");
2251                 goto out_file;
2252         }
2253
2254         if (device_create_file(osdev->dev, &dev_attr_core_availability_policy)) {
2255                 dev_err(osdev->dev, "Couldn't create core_availability_policy sysfs file\n");
2256                 goto out_file_core_availability_policy;
2257         }
2258
2259         if (device_create_file(osdev->dev, &dev_attr_core_mask)) {
2260                 dev_err(osdev->dev, "Couldn't create core_mask sysfs file\n");
2261                 goto out_file_core_mask;
2262         }
2263
2264         down(&kbase_dev_list_lock);
2265         list_add(&osdev->entry, &kbase_dev_list);
2266         up(&kbase_dev_list_lock);
2267         dev_info(osdev->dev, "Probed as %s\n", dev_name(osdev->mdev.this_device));
2268
2269         mali_err = kbase_pm_init(kbdev);
2270         if (MALI_ERROR_NONE != mali_err)
2271                 goto out_partial;
2272
2273         inited |= inited_pm;
2274
2275         if (kbdev->pm.callback_power_runtime_init) {
2276                 mali_err = kbdev->pm.callback_power_runtime_init(kbdev);
2277                 if (MALI_ERROR_NONE != mali_err)
2278                         goto out_partial;
2279
2280                 inited |= inited_pm_runtime_init;
2281         }
2282
2283         mali_err = kbase_mem_init(kbdev);
2284         if (MALI_ERROR_NONE != mali_err)
2285                 goto out_partial;
2286
2287         inited |= inited_mem;
2288
2289         mali_err = kbase_job_slot_init(kbdev);
2290         if (MALI_ERROR_NONE != mali_err)
2291                 goto out_partial;
2292
2293         inited |= inited_job_slot;
2294
2295         mali_err = kbasep_js_devdata_init(kbdev);
2296         if (MALI_ERROR_NONE != mali_err)
2297                 goto out_partial;
2298
2299         inited |= inited_js;
2300
2301         err = kbase_install_interrupts(kbdev);
2302         if (err)
2303                 goto out_partial;
2304
2305         inited |= inited_irqs;
2306
2307 #ifdef CONFIG_MALI_DEBUG_SHADER_SPLIT_FS
2308         if (device_create_file(osdev->dev, &dev_attr_sc_split))
2309         {
2310                 dev_err(osdev->dev, "Couldn't create sc_split sysfs file\n");
2311                 goto out_partial;
2312         }
2313
2314         inited |= inited_sc_split;
2315 #endif /* CONFIG_MALI_DEBUG_SHADER_SPLIT_FS */
2316
2317 #ifdef CONFIG_DEBUG_FS
2318         if (kbasep_gpu_memory_debugfs_init(kbdev)) {
2319                 dev_err(osdev->dev, "Couldn't create gpu_memory debugfs file\n");
2320                 goto out_partial;
2321         }
2322         inited |= inited_gpu_memory;
2323 #endif /* CONFIG_DEBUG_FS */
2324
2325 #ifdef CONFIG_MALI_DEBUG
2326
2327         if (device_create_file(osdev->dev, &dev_attr_debug_command)) {
2328                 dev_err(osdev->dev, "Couldn't create debug_command sysfs file\n");
2329                 goto out_partial;
2330         }
2331         inited |= inited_debug;
2332
2333         if (device_create_file(osdev->dev, &dev_attr_js_softstop_always)) {
2334                 dev_err(osdev->dev, "Couldn't create js_softstop_always sysfs file\n");
2335                 goto out_partial;
2336         }
2337         inited |= inited_js_softstop;
2338 #endif /* CONFIG_MALI_DEBUG */
2339
2340 #if MALI_CUSTOMER_RELEASE == 0
2341         if (device_create_file(osdev->dev, &dev_attr_js_timeouts)) {
2342                 dev_err(osdev->dev, "Couldn't create js_timeouts sysfs file\n");
2343                 goto out_partial;
2344         }
2345         inited |= inited_js_timeouts;
2346 #endif /* MALI_CUSTOMER_RELEASE */
2347
2348 #ifdef CONFIG_MALI_TRACE_TIMELINE
2349         if (kbasep_trace_timeline_debugfs_init(kbdev)) {
2350                 dev_err(osdev->dev, "Couldn't create mali_timeline_defs debugfs file\n");
2351                 goto out_partial;
2352         }
2353         inited |= inited_timeline;
2354 #endif /* CONFIG_MALI_TRACE_TIMELINE */
2355
2356         mali_err = kbase_pm_powerup(kbdev);
2357         if (MALI_ERROR_NONE == mali_err) {
2358 #ifdef CONFIG_MALI_DEBUG
2359 #ifndef CONFIG_MALI_NO_MALI
2360                 if (MALI_ERROR_NONE != kbasep_common_test_interrupt_handlers(kbdev)) {
2361                         dev_err(osdev->dev, "Interrupt assigment check failed.\n");
2362                         err = -EINVAL;
2363                         goto out_partial;
2364                 }
2365 #endif /* CONFIG_MALI_NO_MALI */
2366 #endif /* CONFIG_MALI_DEBUG */
2367
2368                 /* intialise the kctx list */
2369                 mutex_init(&kbdev->kctx_list_lock);
2370                 INIT_LIST_HEAD(&kbdev->kctx_list);
2371                 return 0;
2372         }
2373
2374  out_partial:
2375 #ifdef CONFIG_MALI_TRACE_TIMELINE
2376         if (inited & inited_timeline)
2377                 kbasep_trace_timeline_debugfs_term(kbdev);
2378 #endif /* CONFIG_MALI_TRACE_TIMELINE */
2379 #if MALI_CUSTOMER_RELEASE == 0
2380         if (inited & inited_js_timeouts)
2381                 device_remove_file(kbdev->osdev.dev, &dev_attr_js_timeouts);
2382 #endif /* MALI_CUSTOMER_RELEASE */
2383 #ifdef CONFIG_MALI_DEBUG
2384         if (inited & inited_js_softstop)
2385                 device_remove_file(kbdev->osdev.dev, &dev_attr_js_softstop_always);
2386
2387         if (inited & inited_debug)
2388                 device_remove_file(kbdev->osdev.dev, &dev_attr_debug_command);
2389
2390 #endif /* CONFIG_MALI_DEBUG */
2391
2392 #ifdef CONFIG_DEBUG_FS
2393         if (inited & inited_gpu_memory)
2394                 kbasep_gpu_memory_debugfs_term(kbdev);
2395         if (inited & inited_debugfs)
2396                 debugfs_remove(kbdev->mali_debugfs_directory);
2397 #endif /* CONFIG_DEBUG_FS */
2398
2399 #ifdef CONFIG_MALI_DEBUG_SHADER_SPLIT_FS
2400         if (inited & inited_sc_split)
2401         {
2402                 device_remove_file(kbdev->osdev.dev, &dev_attr_sc_split);
2403         }
2404 #endif /* CONFIG_MALI_DEBUG_SHADER_SPLIT_FS */
2405
2406         if (inited & inited_js)
2407                 kbasep_js_devdata_halt(kbdev);
2408
2409         if (inited & inited_job_slot)
2410                 kbase_job_slot_halt(kbdev);
2411
2412         if (inited & inited_mem)
2413                 kbase_mem_halt(kbdev);
2414
2415         if (inited & inited_pm)
2416                 kbase_pm_halt(kbdev);
2417
2418         if (inited & inited_irqs)
2419                 kbase_release_interrupts(kbdev);
2420
2421         if (inited & inited_js)
2422                 kbasep_js_devdata_term(kbdev);
2423
2424         if (inited & inited_job_slot)
2425                 kbase_job_slot_term(kbdev);
2426
2427         if (inited & inited_mem)
2428                 kbase_mem_term(kbdev);
2429
2430         if (inited & inited_pm_runtime_init) {
2431                 if (kbdev->pm.callback_power_runtime_term)
2432                         kbdev->pm.callback_power_runtime_term(kbdev);
2433         }
2434
2435         if (inited & inited_pm)
2436                 kbase_pm_term(kbdev);
2437
2438         down(&kbase_dev_list_lock);
2439         list_del(&osdev->entry);
2440         up(&kbase_dev_list_lock);
2441
2442         device_remove_file(kbdev->osdev.dev, &dev_attr_core_mask);
2443  out_file_core_mask:
2444         device_remove_file(kbdev->osdev.dev, &dev_attr_core_availability_policy);
2445  out_file_core_availability_policy:
2446         device_remove_file(kbdev->osdev.dev, &dev_attr_power_policy);
2447  out_file:
2448         misc_deregister(&kbdev->osdev.mdev);
2449  out_misc:
2450         put_device(osdev->dev);
2451         return err;
2452 }
2453
2454 static int kbase_platform_device_probe(struct platform_device *pdev)
2455 {
2456         struct kbase_device *kbdev;
2457         struct kbase_os_device *osdev;
2458         struct resource *reg_res;
2459         kbase_attribute *platform_data;
2460         int err;
2461         int i;
2462         struct mali_base_gpu_core_props *core_props;
2463 #ifdef CONFIG_MALI_NO_MALI
2464         mali_error mali_err;
2465 #endif /* CONFIG_MALI_NO_MALI */
2466
2467 const char *dbgname = NULL;
2468 #if 0
2469 if(pdev->dev.of_node)
2470 {
2471         of_property_read_string(pdev->dev.of_node,"dbgname",&dbgname);
2472         printk("%p,dbgname = %s\r\n",pdev->dev.of_node,dbgname);
2473 }
2474 else
2475 {
2476         printk("pdev->dev.of_node null\r\n");
2477 }
2478 #endif
2479 #ifdef CONFIG_OF
2480         kbase_platform_config *config;
2481         int attribute_count;
2482
2483 //#ifdef CONFIG_MALI_PLATFORM_FAKE
2484 #if 1//defined(CONFIG_MALI_PLATFORM_FAKE) || defined(CONFIG_MALI_PLATFORM_FAKE_MODULE)
2485         config = kbase_get_platform_config();
2486         attribute_count = kbasep_get_config_attribute_count(config->attributes);
2487
2488         err = platform_device_add_data(pdev, config->attributes,
2489                         attribute_count * sizeof(config->attributes[0]));
2490         if (err)
2491                 return err;
2492 #endif /* CONFIG_MALI_PLATFORM_FAKE */
2493 #endif /* CONFIG_OF */
2494
2495         kbdev = kbase_device_alloc();
2496         if (!kbdev) {
2497                 dev_err(&pdev->dev, "Can't allocate device\n");
2498                 err = -ENOMEM;
2499                 goto out;
2500         }
2501 #ifdef CONFIG_MALI_NO_MALI
2502         mali_err = midg_device_create(kbdev);
2503         if (MALI_ERROR_NONE != mali_err) {
2504                 dev_err(&pdev->dev, "Can't initialize dummy model\n");
2505                 err = -ENOMEM;
2506                 goto out_midg;
2507         }
2508 #endif /* CONFIG_MALI_NO_MALI */
2509
2510         osdev = &kbdev->osdev;
2511         osdev->dev = &pdev->dev;
2512         platform_data = (kbase_attribute *) osdev->dev->platform_data;
2513
2514         if (NULL == platform_data) {
2515                 dev_err(osdev->dev, "Platform data not specified\n");
2516                 err = -ENOENT;
2517                 goto out_free_dev;
2518         }
2519
2520         if (MALI_TRUE != kbasep_validate_configuration_attributes(kbdev, platform_data)) {
2521                 dev_err(osdev->dev, "Configuration attributes failed to validate\n");
2522                 err = -EINVAL;
2523                 goto out_free_dev;
2524         }
2525         kbdev->config_attributes = platform_data;
2526
2527         /* 3 IRQ resources */
2528         for (i = 0; i < 3; i++) {
2529                 struct resource *irq_res;
2530                 int irqtag;
2531
2532                 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
2533                 if (!irq_res) {
2534                         dev_err(osdev->dev, "No IRQ resource at index %d\n", i);
2535                         err = -ENOENT;
2536                         goto out_free_dev;
2537                 }
2538 #ifdef CONFIG_OF
2539                 if (!strcmp(irq_res->name, "JOB"))
2540                         irqtag = JOB_IRQ_TAG;
2541                 else if (!strcmp(irq_res->name, "MMU"))
2542                         irqtag = MMU_IRQ_TAG;
2543                 else if (!strcmp(irq_res->name, "GPU"))
2544                         irqtag = GPU_IRQ_TAG;
2545                 else {
2546                         dev_err(&pdev->dev, "Invalid irq res name: '%s'\n",
2547                                 irq_res->name);
2548                         err = -EINVAL;
2549                         goto out_free_dev;
2550                 }
2551 #else
2552                 irqtag = i;
2553 #endif /* CONFIG_OF */
2554                 printk("irq_res->start = 0x%x,irq_res->name = %s\r\n",irq_res->start,irq_res->name);
2555         
2556                 osdev->irqs[irqtag].irq = irq_res->start;
2557                 osdev->irqs[irqtag].flags = (irq_res->flags & IRQF_TRIGGER_MASK);
2558         }
2559
2560         /* the first memory resource is the physical address of the GPU registers */
2561         reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2562         if (!reg_res) {
2563                 dev_err(&pdev->dev, "Invalid register resource\n");
2564                 err = -ENOENT;
2565                 goto out_free_dev;
2566         }
2567         printk("reg_res->start = 0x%0x,size = 0x%0x\r\n",reg_res->start,resource_size(reg_res));
2568         osdev->reg_start = reg_res->start;
2569         osdev->reg_size = resource_size(reg_res);
2570
2571         err = kbase_common_reg_map(kbdev);
2572         if (err)
2573                 goto out_free_dev;
2574
2575         if (MALI_ERROR_NONE != kbase_device_init(kbdev)) {
2576                 dev_err(&pdev->dev, "Can't initialize device\n");
2577                 err = -ENOMEM;
2578                 goto out_reg_unmap;
2579         }
2580 #ifdef CONFIG_UMP
2581         kbdev->memdev.ump_device_id = kbasep_get_config_value(kbdev, platform_data, KBASE_CONFIG_ATTR_UMP_DEVICE);
2582 #endif /* CONFIG_UMP */
2583
2584         kbdev->memdev.per_process_memory_limit = kbasep_get_config_value(kbdev, platform_data, KBASE_CONFIG_ATTR_MEMORY_PER_PROCESS_LIMIT);
2585
2586         /* obtain min/max configured gpu frequencies */
2587         core_props = &(kbdev->gpu_props.props.core_props);
2588         core_props->gpu_freq_khz_min = kbasep_get_config_value(kbdev, platform_data, KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MIN);
2589         core_props->gpu_freq_khz_max = kbasep_get_config_value(kbdev, platform_data, KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MAX);
2590         kbdev->gpu_props.irq_throttle_time_us = kbasep_get_config_value(kbdev, platform_data, KBASE_CONFIG_ATTR_GPU_IRQ_THROTTLE_TIME_US);
2591
2592         err = kbase_common_device_init(kbdev);
2593         if (err) {
2594                 dev_err(osdev->dev, "Failed kbase_common_device_init\n");
2595                 goto out_term_dev;
2596         }
2597         return 0;
2598
2599  out_term_dev:
2600         kbase_device_term(kbdev);
2601  out_reg_unmap:
2602         kbase_common_reg_unmap(kbdev);
2603  out_free_dev:
2604 #ifdef CONFIG_MALI_NO_MALI
2605         midg_device_destroy(kbdev);
2606  out_midg:
2607 #endif /* CONFIG_MALI_NO_MALI */
2608         kbase_device_free(kbdev);
2609  out:
2610         return err;
2611 }
2612
2613 static int kbase_common_device_remove(struct kbase_device *kbdev)
2614 {
2615         if (kbdev->pm.callback_power_runtime_term)
2616                 kbdev->pm.callback_power_runtime_term(kbdev);
2617
2618         /* Remove the sys power policy file */
2619         device_remove_file(kbdev->osdev.dev, &dev_attr_power_policy);
2620         device_remove_file(kbdev->osdev.dev, &dev_attr_core_availability_policy);
2621         device_remove_file(kbdev->osdev.dev, &dev_attr_core_mask);
2622
2623 #ifdef CONFIG_MALI_TRACE_TIMELINE
2624         kbasep_trace_timeline_debugfs_term(kbdev);
2625 #endif /* CONFIG_MALI_TRACE_TIMELINE */
2626
2627 #ifdef CONFIG_MALI_DEBUG
2628         device_remove_file(kbdev->osdev.dev, &dev_attr_js_softstop_always);
2629         device_remove_file(kbdev->osdev.dev, &dev_attr_debug_command);
2630 #endif /* CONFIG_MALI_DEBUG */
2631 #if MALI_CUSTOMER_RELEASE == 0
2632         device_remove_file(kbdev->osdev.dev, &dev_attr_js_timeouts);
2633 #endif /* MALI_CUSTOMER_RELEASE */
2634 #ifdef CONFIG_DEBUG_FS
2635         kbasep_gpu_memory_debugfs_term(kbdev);
2636         debugfs_remove(kbdev->mali_debugfs_directory);
2637 #endif /* CONFIG_DEBUG_FS */
2638
2639 #ifdef CONFIG_MALI_DEBUG_SHADER_SPLIT_FS
2640         device_remove_file(kbdev->osdev.dev, &dev_attr_sc_split);
2641 #endif /* CONFIG_MALI_DEBUG_SHADER_SPLIT_FS */
2642
2643         kbasep_js_devdata_halt(kbdev);
2644         kbase_job_slot_halt(kbdev);
2645         kbase_mem_halt(kbdev);
2646         kbase_pm_halt(kbdev);
2647
2648         kbase_release_interrupts(kbdev);
2649
2650         kbasep_js_devdata_term(kbdev);
2651         kbase_job_slot_term(kbdev);
2652         kbase_mem_term(kbdev);
2653         kbase_pm_term(kbdev);
2654
2655         down(&kbase_dev_list_lock);
2656         list_del(&kbdev->osdev.entry);
2657         up(&kbase_dev_list_lock);
2658
2659         misc_deregister(&kbdev->osdev.mdev);
2660         put_device(kbdev->osdev.dev);
2661         kbase_common_reg_unmap(kbdev);
2662         kbase_device_term(kbdev);
2663 #ifdef CONFIG_MALI_NO_MALI
2664         midg_device_destroy(kbdev);
2665 #endif /* CONFIG_MALI_NO_MALI */
2666         kbase_device_free(kbdev);
2667
2668         return 0;
2669 }
2670
2671 static int kbase_platform_device_remove(struct platform_device *pdev)
2672 {
2673         struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
2674
2675         if (!kbdev)
2676                 return -ENODEV;
2677
2678         return kbase_common_device_remove(kbdev);
2679 }
2680
2681 /** Suspend callback from the OS.
2682  *
2683  * This is called by Linux when the device should suspend.
2684  *
2685  * @param dev  The device to suspend
2686  *
2687  * @return A standard Linux error code
2688  */
2689 static int kbase_device_suspend(struct device *dev)
2690 {
2691         struct kbase_device *kbdev = to_kbase_device(dev);
2692
2693         if (!kbdev)
2694                 return -ENODEV;
2695
2696         kbase_pm_suspend(kbdev);
2697         return 0;
2698 }
2699
2700 /** Resume callback from the OS.
2701  *
2702  * This is called by Linux when the device should resume from suspension.
2703  *
2704  * @param dev  The device to resume
2705  *
2706  * @return A standard Linux error code
2707  */
2708 static int kbase_device_resume(struct device *dev)
2709 {
2710         struct kbase_device *kbdev = to_kbase_device(dev);
2711
2712         if (!kbdev)
2713                 return -ENODEV;
2714
2715         kbase_pm_resume(kbdev);
2716         return 0;
2717 }
2718
2719 /** Runtime suspend callback from the OS.
2720  *
2721  * This is called by Linux when the device should prepare for a condition in which it will
2722  * not be able to communicate with the CPU(s) and RAM due to power management.
2723  *
2724  * @param dev  The device to suspend
2725  *
2726  * @return A standard Linux error code
2727  */
2728 #ifdef CONFIG_PM_RUNTIME
2729 static int kbase_device_runtime_suspend(struct device *dev)
2730 {
2731         struct kbase_device *kbdev = to_kbase_device(dev);
2732
2733         if (!kbdev)
2734                 return -ENODEV;
2735
2736         if (kbdev->pm.callback_power_runtime_off) {
2737                 kbdev->pm.callback_power_runtime_off(kbdev);
2738                 KBASE_DEBUG_PRINT_INFO(KBASE_PM, "runtime suspend\n");
2739         }
2740         return 0;
2741 }
2742 #endif /* CONFIG_PM_RUNTIME */
2743
2744 /** Runtime resume callback from the OS.
2745  *
2746  * This is called by Linux when the device should go into a fully active state.
2747  *
2748  * @param dev  The device to suspend
2749  *
2750  * @return A standard Linux error code
2751  */
2752
2753 #ifdef CONFIG_PM_RUNTIME
2754 int kbase_device_runtime_resume(struct device *dev)
2755 {
2756         int ret = 0;
2757         struct kbase_device *kbdev = to_kbase_device(dev);
2758
2759         if (!kbdev)
2760                 return -ENODEV;
2761
2762         if (kbdev->pm.callback_power_runtime_on) {
2763                 ret = kbdev->pm.callback_power_runtime_on(kbdev);
2764                 KBASE_DEBUG_PRINT_INFO(KBASE_PM, "runtime resume\n");
2765         }
2766         return ret;
2767 }
2768 #endif /* CONFIG_PM_RUNTIME */
2769
2770 /** Runtime idle callback from the OS.
2771  *
2772  * This is called by Linux when the device appears to be inactive and it might be
2773  * placed into a low power state
2774  *
2775  * @param dev  The device to suspend
2776  *
2777  * @return A standard Linux error code
2778  */
2779
2780 #ifdef CONFIG_PM_RUNTIME
2781 static int kbase_device_runtime_idle(struct device *dev)
2782 {
2783         /* Avoid pm_runtime_suspend being called */
2784         return 1;
2785 }
2786 #endif /* CONFIG_PM_RUNTIME */
2787
2788 /** The power management operations for the platform driver.
2789  */
2790 static const struct dev_pm_ops kbase_pm_ops = {
2791         .suspend = kbase_device_suspend,
2792         .resume = kbase_device_resume,
2793 #ifdef CONFIG_PM_RUNTIME
2794         .runtime_suspend = kbase_device_runtime_suspend,
2795         .runtime_resume = kbase_device_runtime_resume,
2796         .runtime_idle = kbase_device_runtime_idle,
2797 #endif /* CONFIG_PM_RUNTIME */
2798 };
2799
2800 #ifdef CONFIG_OF
2801 static const struct of_device_id kbase_dt_ids[] = {
2802         { .compatible = "arm,malit7xx" },
2803         { .compatible = "arm,mali-midgard" },
2804         { /* sentinel */ }
2805 };
2806 MODULE_DEVICE_TABLE(of, kbase_dt_ids);
2807 #endif
2808
2809 static struct platform_driver kbase_platform_driver = {
2810         .probe = kbase_platform_device_probe,
2811         .remove = kbase_platform_device_remove,
2812         .driver = {
2813                    .name = kbase_drv_name,
2814                    .owner = THIS_MODULE,
2815                    .pm = &kbase_pm_ops,
2816                    .of_match_table = of_match_ptr(kbase_dt_ids),
2817         },
2818 };
2819
2820 /*
2821  * The driver will not provide a shortcut to create the Mali platform device
2822  * anymore when using Device Tree.
2823  */
2824 #ifdef CONFIG_OF
2825 #if 0
2826 module_platform_driver(kbase_platform_driver);
2827 #else
2828 static int __init rockchip_gpu_init_driver(void)
2829 {
2830         return platform_driver_register(&kbase_platform_driver);
2831 }
2832
2833 late_initcall(rockchip_gpu_init_driver);
2834 #endif
2835 #else /* CONFIG_MALI_PLATFORM_FAKE */
2836
2837 #ifdef CONFIG_MALI_PLATFORM_FAKE
2838 #ifndef MALI_PLATFORM_FAKE_MODULE
2839 extern int kbase_platform_fake_register(void);
2840 extern void kbase_platform_fake_unregister(void);
2841 #endif
2842 #endif
2843
2844 static int __init kbase_driver_init(void)
2845 {
2846         int ret;
2847
2848         ret = kbase_platform_early_init();
2849         if (ret)
2850                 return ret;
2851
2852 #ifdef CONFIG_MALI_PLATFORM_FAKE
2853 #ifndef MALI_PLATFORM_FAKE_MODULE
2854         ret = kbase_platform_fake_register();
2855         if (ret)
2856                 return ret;
2857 #endif
2858 #endif
2859         ret = platform_driver_register(&kbase_platform_driver);
2860 #ifdef CONFIG_MALI_PLATFORM_FAKE
2861 #ifndef MALI_PLATFORM_FAKE_MODULE
2862         if (ret)
2863                 kbase_platform_fake_unregister();
2864 #endif
2865 #endif
2866
2867         return ret;
2868 }
2869
2870 static void __exit kbase_driver_exit(void)
2871 {
2872         platform_driver_unregister(&kbase_platform_driver);
2873 #ifdef CONFIG_MALI_PLATFORM_FAKE
2874 #ifndef MALI_PLATFORM_FAKE_MODULE
2875         kbase_platform_fake_unregister();
2876 #endif
2877 #endif
2878 }
2879
2880 module_init(kbase_driver_init);
2881 module_exit(kbase_driver_exit);
2882
2883 #endif /* CONFIG_OF */
2884
2885 MODULE_LICENSE("GPL");
2886 MODULE_VERSION(MALI_RELEASE_NAME);
2887
2888 #ifdef CONFIG_MALI_GATOR_SUPPORT
2889 /* Create the trace points (otherwise we just get code to call a tracepoint) */
2890 #define CREATE_TRACE_POINTS
2891 #include "mali_linux_trace.h"
2892
2893 void kbase_trace_mali_pm_status(u32 event, u64 value)
2894 {
2895         trace_mali_pm_status(event, value);
2896 }
2897
2898 void kbase_trace_mali_pm_power_off(u32 event, u64 value)
2899 {
2900         trace_mali_pm_power_off(event, value);
2901 }
2902
2903 void kbase_trace_mali_pm_power_on(u32 event, u64 value)
2904 {
2905         trace_mali_pm_power_on(event, value);
2906 }
2907
2908 void kbase_trace_mali_job_slots_event(u32 event, const kbase_context *kctx, u8 atom_id)
2909 {
2910         trace_mali_job_slots_event(event, (kctx != NULL ? kctx->osctx.tgid : 0), (kctx != NULL ? kctx->osctx.pid : 0), atom_id);
2911 }
2912
2913 void kbase_trace_mali_page_fault_insert_pages(int event, u32 value)
2914 {
2915         trace_mali_page_fault_insert_pages(event, value);
2916 }
2917
2918 void kbase_trace_mali_mmu_as_in_use(int event)
2919 {
2920         trace_mali_mmu_as_in_use(event);
2921 }
2922
2923 void kbase_trace_mali_mmu_as_released(int event)
2924 {
2925         trace_mali_mmu_as_released(event);
2926 }
2927
2928 void kbase_trace_mali_total_alloc_pages_change(long long int event)
2929 {
2930         trace_mali_total_alloc_pages_change(event);
2931 }
2932 #endif /* CONFIG_MALI_GATOR_SUPPORT */