09688219d5f3712a167493ffb67ac715e093f5cd
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / midgard / mali_kbase_core_linux.c
1
2 /*
3  *
4  * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU licence.
10  *
11  * A copy of the licence is included with the program, and can also be obtained
12  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
13  * Boston, MA  02110-1301, USA.
14  *
15  */
16
17
18
19 #include <mali_kbase.h>
20 #include <mali_kbase_hwaccess_gpuprops.h>
21 #include <mali_kbase_config_defaults.h>
22 #include <mali_kbase_uku.h>
23 #include <mali_midg_regmap.h>
24 #include <mali_kbase_instr.h>
25 #include <mali_kbase_gator.h>
26 #include <backend/gpu/mali_kbase_js_affinity.h>
27 #include <mali_kbase_mem_linux.h>
28 #ifdef CONFIG_MALI_DEVFREQ
29 #include <backend/gpu/mali_kbase_devfreq.h>
30 #endif /* CONFIG_MALI_DEVFREQ */
31 #ifdef CONFIG_MALI_NO_MALI
32 #include "mali_kbase_model_linux.h"
33 #endif /* CONFIG_MALI_NO_MALI */
34 #include "mali_kbase_mem_profile_debugfs_buf_size.h"
35 #include "mali_kbase_debug_mem_view.h"
36 #include "mali_kbase_mem.h"
37 #include "mali_kbase_mem_pool_debugfs.h"
38 #if !MALI_CUSTOMER_RELEASE
39 #include "mali_kbase_regs_dump_debugfs.h"
40 #endif /* !MALI_CUSTOMER_RELEASE */
41 #include <mali_kbase_hwaccess_backend.h>
42 #include <mali_kbase_hwaccess_jm.h>
43 #include <backend/gpu/mali_kbase_device_internal.h>
44
45 #ifdef CONFIG_KDS
46 #include <linux/kds.h>
47 #include <linux/anon_inodes.h>
48 #include <linux/syscalls.h>
49 #endif /* CONFIG_KDS */
50
51 #include <linux/module.h>
52 #include <linux/init.h>
53 #include <linux/poll.h>
54 #include <linux/kernel.h>
55 #include <linux/errno.h>
56 #include <linux/of.h>
57 #include <linux/platform_device.h>
58 #include <linux/miscdevice.h>
59 #include <linux/list.h>
60 #include <linux/semaphore.h>
61 #include <linux/fs.h>
62 #include <linux/uaccess.h>
63 #include <linux/interrupt.h>
64 #include <linux/io.h>
65 #include <linux/mm.h>
66 #include <linux/compat.h>       /* is_compat_task */
67 #include <linux/version.h>
68 #ifdef CONFIG_MALI_PLATFORM_DEVICETREE
69 #include <linux/pm_runtime.h>
70 #endif /* CONFIG_MALI_PLATFORM_DEVICETREE */
71 #include <mali_kbase_hw.h>
72 #include <platform/mali_kbase_platform_common.h>
73 #ifdef CONFIG_MALI_PLATFORM_FAKE
74 #include <platform/mali_kbase_platform_fake.h>
75 #endif /*CONFIG_MALI_PLATFORM_FAKE */
76 #ifdef CONFIG_SYNC
77 #include <mali_kbase_sync.h>
78 #endif /* CONFIG_SYNC */
79 #ifdef CONFIG_PM_DEVFREQ
80 #include <linux/devfreq.h>
81 #endif /* CONFIG_PM_DEVFREQ */
82 #include <linux/clk.h>
83 #include <linux/delay.h>
84
85 #include <mali_kbase_config.h>
86
87 #ifdef CONFIG_MACH_MANTA
88 #include <plat/devs.h>
89 #endif
90
91 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
92 #include <linux/pm_opp.h>
93 #else
94 #include <linux/opp.h>
95 #endif
96
97 #if defined(CONFIG_MALI_MIPE_ENABLED)
98 #include <mali_kbase_tlstream.h>
99 #endif
100
101 /* GPU IRQ Tags */
102 #define JOB_IRQ_TAG     0
103 #define MMU_IRQ_TAG     1
104 #define GPU_IRQ_TAG     2
105
106 #if MALI_UNIT_TEST
107 static struct kbase_exported_test_data shared_kernel_test_data;
108 EXPORT_SYMBOL(shared_kernel_test_data);
109 #endif /* MALI_UNIT_TEST */
110
111 #define KBASE_DRV_NAME "mali"
112 /** rk_ext : version of rk_ext on mali_ko, aka. rk_ko_ver. */
113 #define ROCKCHIP_VERSION    (13)
114
115 static const char kbase_drv_name[] = KBASE_DRV_NAME;
116
117 static int kbase_dev_nr;
118
119 static DEFINE_MUTEX(kbase_dev_list_lock);
120 static LIST_HEAD(kbase_dev_list);
121
122 #define KERNEL_SIDE_DDK_VERSION_STRING "K:" MALI_RELEASE_NAME "(GPL)"
123 static inline void __compile_time_asserts(void)
124 {
125         CSTD_COMPILE_TIME_ASSERT(sizeof(KERNEL_SIDE_DDK_VERSION_STRING) <= KBASE_GET_VERSION_BUFFER_SIZE);
126 }
127
128 #ifdef CONFIG_KDS
129
130 struct kbasep_kds_resource_set_file_data {
131         struct kds_resource_set *lock;
132 };
133
134 static int kds_resource_release(struct inode *inode, struct file *file);
135
136 static const struct file_operations kds_resource_fops = {
137         .release = kds_resource_release
138 };
139
140 struct kbase_kds_resource_list_data {
141         struct kds_resource **kds_resources;
142         unsigned long *kds_access_bitmap;
143         int num_elems;
144 };
145
146 static int kds_resource_release(struct inode *inode, struct file *file)
147 {
148         struct kbasep_kds_resource_set_file_data *data;
149
150         data = (struct kbasep_kds_resource_set_file_data *)file->private_data;
151         if (NULL != data) {
152                 if (NULL != data->lock)
153                         kds_resource_set_release(&data->lock);
154
155                 kfree(data);
156         }
157         return 0;
158 }
159
160 static int kbasep_kds_allocate_resource_list_data(struct kbase_context *kctx, struct base_external_resource *ext_res, int num_elems, struct kbase_kds_resource_list_data *resources_list)
161 {
162         struct base_external_resource *res = ext_res;
163         int res_id;
164
165         /* assume we have to wait for all */
166
167         KBASE_DEBUG_ASSERT(0 != num_elems);
168         resources_list->kds_resources = kmalloc_array(num_elems,
169                         sizeof(struct kds_resource *), GFP_KERNEL);
170
171         if (NULL == resources_list->kds_resources)
172                 return -ENOMEM;
173
174         KBASE_DEBUG_ASSERT(0 != num_elems);
175         resources_list->kds_access_bitmap = kzalloc(
176                         sizeof(unsigned long) *
177                         ((num_elems + BITS_PER_LONG - 1) / BITS_PER_LONG),
178                         GFP_KERNEL);
179
180         if (NULL == resources_list->kds_access_bitmap) {
181                 kfree(resources_list->kds_access_bitmap);
182                 return -ENOMEM;
183         }
184
185         kbase_gpu_vm_lock(kctx);
186         for (res_id = 0; res_id < num_elems; res_id++, res++) {
187                 int exclusive;
188                 struct kbase_va_region *reg;
189                 struct kds_resource *kds_res = NULL;
190
191                 exclusive = res->ext_resource & BASE_EXT_RES_ACCESS_EXCLUSIVE;
192                 reg = kbase_region_tracker_find_region_enclosing_address(kctx, res->ext_resource & ~BASE_EXT_RES_ACCESS_EXCLUSIVE);
193
194                 /* did we find a matching region object? */
195                 if (NULL == reg || (reg->flags & KBASE_REG_FREE))
196                         break;
197
198                 /* no need to check reg->alloc as only regions with an alloc has
199                  * a size, and kbase_region_tracker_find_region_enclosing_address
200                  * only returns regions with size > 0 */
201                 switch (reg->gpu_alloc->type) {
202 #if defined(CONFIG_UMP) && defined(CONFIG_KDS)
203                 case KBASE_MEM_TYPE_IMPORTED_UMP:
204                         kds_res = ump_dd_kds_resource_get(reg->gpu_alloc->imported.ump_handle);
205                         break;
206 #endif /* defined(CONFIG_UMP) && defined(CONFIG_KDS) */
207                 default:
208                         break;
209                 }
210
211                 /* no kds resource for the region ? */
212                 if (!kds_res)
213                         break;
214
215                 resources_list->kds_resources[res_id] = kds_res;
216
217                 if (exclusive)
218                         set_bit(res_id, resources_list->kds_access_bitmap);
219         }
220         kbase_gpu_vm_unlock(kctx);
221
222         /* did the loop run to completion? */
223         if (res_id == num_elems)
224                 return 0;
225
226         /* Clean up as the resource list is not valid. */
227         kfree(resources_list->kds_resources);
228         kfree(resources_list->kds_access_bitmap);
229
230         return -EINVAL;
231 }
232
233 static bool kbasep_validate_kbase_pointer(
234                 struct kbase_context *kctx, union kbase_pointer *p)
235 {
236         if (kctx->is_compat) {
237                 if (p->compat_value == 0)
238                         return false;
239         } else {
240                 if (NULL == p->value)
241                         return false;
242         }
243         return true;
244 }
245
246 static int kbase_external_buffer_lock(struct kbase_context *kctx,
247                 struct kbase_uk_ext_buff_kds_data *args, u32 args_size)
248 {
249         struct base_external_resource *ext_res_copy;
250         size_t ext_resource_size;
251         int ret = -EINVAL;
252         int fd = -EBADF;
253         struct base_external_resource __user *ext_res_user;
254         int __user *file_desc_usr;
255         struct kbasep_kds_resource_set_file_data *fdata;
256         struct kbase_kds_resource_list_data resource_list_data;
257
258         if (args_size != sizeof(struct kbase_uk_ext_buff_kds_data))
259                 return -EINVAL;
260
261         /* Check user space has provided valid data */
262         if (!kbasep_validate_kbase_pointer(kctx, &args->external_resource) ||
263                         !kbasep_validate_kbase_pointer(kctx, &args->file_descriptor) ||
264                         (0 == args->num_res) ||
265                         (args->num_res > KBASE_MAXIMUM_EXT_RESOURCES))
266                 return -EINVAL;
267
268         ext_resource_size = sizeof(struct base_external_resource) * args->num_res;
269
270         KBASE_DEBUG_ASSERT(0 != ext_resource_size);
271         ext_res_copy = kmalloc(ext_resource_size, GFP_KERNEL);
272
273         if (!ext_res_copy)
274                 return -EINVAL;
275 #ifdef CONFIG_COMPAT
276         if (kctx->is_compat) {
277                 ext_res_user = compat_ptr(args->external_resource.compat_value);
278                 file_desc_usr = compat_ptr(args->file_descriptor.compat_value);
279         } else {
280 #endif /* CONFIG_COMPAT */
281                 ext_res_user = args->external_resource.value;
282                 file_desc_usr = args->file_descriptor.value;
283 #ifdef CONFIG_COMPAT
284         }
285 #endif /* CONFIG_COMPAT */
286
287         /* Copy the external resources to lock from user space */
288         if (copy_from_user(ext_res_copy, ext_res_user, ext_resource_size))
289                 goto out;
290
291         /* Allocate data to be stored in the file */
292         fdata = kmalloc(sizeof(*fdata), GFP_KERNEL);
293
294         if (!fdata) {
295                 ret = -ENOMEM;
296                 goto out;
297         }
298
299         /* Parse given elements and create resource and access lists */
300         ret = kbasep_kds_allocate_resource_list_data(kctx,
301                         ext_res_copy, args->num_res, &resource_list_data);
302         if (!ret) {
303                 long err;
304
305                 fdata->lock = NULL;
306
307                 fd = anon_inode_getfd("kds_ext", &kds_resource_fops, fdata, 0);
308
309                 err = copy_to_user(file_desc_usr, &fd, sizeof(fd));
310
311                 /* If the file descriptor was valid and we successfully copied
312                  * it to user space, then we can try and lock the requested
313                  * kds resources.
314                  */
315                 if ((fd >= 0) && (0 == err)) {
316                         struct kds_resource_set *lock;
317
318                         lock = kds_waitall(args->num_res,
319                                         resource_list_data.kds_access_bitmap,
320                                         resource_list_data.kds_resources,
321                                         KDS_WAIT_BLOCKING);
322
323                         if (!lock) {
324                                 ret = -EINVAL;
325                         } else if (IS_ERR(lock)) {
326                                 ret = PTR_ERR(lock);
327                         } else {
328                                 ret = 0;
329                                 fdata->lock = lock;
330                         }
331                 } else {
332                         ret = -EINVAL;
333                 }
334
335                 kfree(resource_list_data.kds_resources);
336                 kfree(resource_list_data.kds_access_bitmap);
337         }
338
339         if (ret) {
340                 /* If the file was opened successfully then close it which will
341                  * clean up the file data, otherwise we clean up the file data
342                  * ourself.
343                  */
344                 if (fd >= 0)
345                         sys_close(fd);
346                 else
347                         kfree(fdata);
348         }
349 out:
350         kfree(ext_res_copy);
351
352         return ret;
353 }
354 #endif /* CONFIG_KDS */
355
356 #ifdef CONFIG_MALI_MIPE_ENABLED
357 static void kbase_create_timeline_objects(struct kbase_context *kctx)
358 {
359         struct kbase_device             *kbdev = kctx->kbdev;
360         unsigned int                    lpu_id;
361         unsigned int                    as_nr;
362         struct kbasep_kctx_list_element *element;
363
364         /* Create LPU objects. */
365         for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
366                 u32 *lpu =
367                         &kbdev->gpu_props.props.raw_props.js_features[lpu_id];
368                 kbase_tlstream_tl_summary_new_lpu(lpu, lpu_id, *lpu);
369         }
370
371         /* Create Address Space objects. */
372         for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
373                 kbase_tlstream_tl_summary_new_as(&kbdev->as[as_nr], as_nr);
374
375         /* Create GPU object and make it retain all LPUs and address spaces. */
376         kbase_tlstream_tl_summary_new_gpu(
377                         kbdev,
378                         kbdev->gpu_props.props.raw_props.gpu_id,
379                         kbdev->gpu_props.num_cores);
380
381         for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
382                 void *lpu =
383                         &kbdev->gpu_props.props.raw_props.js_features[lpu_id];
384                 kbase_tlstream_tl_summary_lifelink_lpu_gpu(lpu, kbdev);
385         }
386         for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
387                 kbase_tlstream_tl_summary_lifelink_as_gpu(
388                                 &kbdev->as[as_nr],
389                                 kbdev);
390
391         /* Create object for each known context. */
392         mutex_lock(&kbdev->kctx_list_lock);
393         list_for_each_entry(element, &kbdev->kctx_list, link) {
394                 kbase_tlstream_tl_summary_new_ctx(
395                                 element->kctx,
396                                 (u32)(element->kctx->id));
397         }
398         /* Before releasing the lock, reset body stream buffers.
399          * This will prevent context creation message to be directed to both
400          * summary and body stream. */
401         kbase_tlstream_reset_body_streams();
402         mutex_unlock(&kbdev->kctx_list_lock);
403         /* Static object are placed into summary packet that needs to be
404          * transmitted first. Flush all streams to make it available to
405          * user space. */
406         kbase_tlstream_flush_streams();
407 }
408 #endif
409
410 static void kbase_api_handshake(struct uku_version_check_args *version)
411 {
412         switch (version->major) {
413 #ifdef BASE_LEGACY_UK6_SUPPORT
414         case 6:
415                 /* We are backwards compatible with version 6,
416                  * so pretend to be the old version */
417                 version->major = 6;
418                 version->minor = 1;
419                 break;
420 #endif /* BASE_LEGACY_UK6_SUPPORT */
421 #ifdef BASE_LEGACY_UK7_SUPPORT
422         case 7:
423                 /* We are backwards compatible with version 7,
424                  * so pretend to be the old version */
425                 version->major = 7;
426                 version->minor = 1;
427                 break;
428 #endif /* BASE_LEGACY_UK7_SUPPORT */
429 #ifdef BASE_LEGACY_UK8_SUPPORT
430         case 8:
431                 /* We are backwards compatible with version 8,
432                  * so pretend to be the old version */
433                 version->major = 8;
434                 version->minor = 4;
435                 break;
436 #endif /* BASE_LEGACY_UK8_SUPPORT */
437 #ifdef BASE_LEGACY_UK9_SUPPORT
438         case 9:
439                 /* We are backwards compatible with version 9,
440                  * so pretend to be the old version */
441                 version->major = 9;
442                 version->minor = 0;
443                 break;
444 #endif /* BASE_LEGACY_UK8_SUPPORT */
445         case BASE_UK_VERSION_MAJOR:
446                 /* set minor to be the lowest common */
447                 version->minor = min_t(int, BASE_UK_VERSION_MINOR,
448                                 (int)version->minor);
449                 break;
450         default:
451                 /* We return our actual version regardless if it
452                  * matches the version returned by userspace -
453                  * userspace can bail if it can't handle this
454                  * version */
455                 version->major = BASE_UK_VERSION_MAJOR;
456                 version->minor = BASE_UK_VERSION_MINOR;
457                 break;
458         }
459 }
460
461 /**
462  * enum mali_error - Mali error codes shared with userspace
463  *
464  * This is subset of those common Mali errors that can be returned to userspace.
465  * Values of matching user and kernel space enumerators MUST be the same.
466  * MALI_ERROR_NONE is guaranteed to be 0.
467  */
468 enum mali_error {
469         MALI_ERROR_NONE = 0,
470         MALI_ERROR_OUT_OF_GPU_MEMORY,
471         MALI_ERROR_OUT_OF_MEMORY,
472         MALI_ERROR_FUNCTION_FAILED,
473 };
474
475 #ifdef CONFIG_MALI_DEBUG
476 #define INACTIVE_WAIT_MS (5000)
477
478 void kbase_set_driver_inactive(struct kbase_device *kbdev, bool inactive)
479 {
480         kbdev->driver_inactive = inactive;
481         wake_up(&kbdev->driver_inactive_wait);
482
483         /* Wait for any running IOCTLs to complete */
484         if (inactive)
485                 msleep(INACTIVE_WAIT_MS);
486 }
487 KBASE_EXPORT_TEST_API(kbase_set_driver_inactive);
488 #endif /* CONFIG_MALI_DEBUG */
489
490 static int kbase_dispatch(struct kbase_context *kctx, void * const args, u32 args_size)
491 {
492         struct kbase_device *kbdev;
493         union uk_header *ukh = args;
494         u32 id;
495         int ret = 0;
496
497         KBASE_DEBUG_ASSERT(ukh != NULL);
498
499         kbdev = kctx->kbdev;
500         id = ukh->id;
501         ukh->ret = MALI_ERROR_NONE; /* Be optimistic */
502
503 #ifdef CONFIG_MALI_DEBUG
504         wait_event(kbdev->driver_inactive_wait,
505                         kbdev->driver_inactive == false);
506 #endif /* CONFIG_MALI_DEBUG */
507
508         if (UKP_FUNC_ID_CHECK_VERSION == id) {
509                 struct uku_version_check_args *version_check;
510
511                 if (args_size != sizeof(struct uku_version_check_args)) {
512                         ukh->ret = MALI_ERROR_FUNCTION_FAILED;
513                         return 0;
514                 }
515                 version_check = (struct uku_version_check_args *)args;
516                 kbase_api_handshake(version_check);
517                 /* save the proposed version number for later use */
518                 kctx->api_version = KBASE_API_VERSION(version_check->major,
519                                 version_check->minor);
520                 ukh->ret = MALI_ERROR_NONE;
521                 return 0;
522         }
523
524         /* block calls until version handshake */
525         if (kctx->api_version == 0)
526                 return -EINVAL;
527
528         if (!atomic_read(&kctx->setup_complete)) {
529                 struct kbase_uk_set_flags *kbase_set_flags;
530
531                 /* setup pending, try to signal that we'll do the setup,
532                  * if setup was already in progress, err this call
533                  */
534                 if (atomic_cmpxchg(&kctx->setup_in_progress, 0, 1) != 0)
535                         return -EINVAL;
536
537                 /* if unexpected call, will stay stuck in setup mode
538                  * (is it the only call we accept?)
539                  */
540                 if (id != KBASE_FUNC_SET_FLAGS)
541                         return -EINVAL;
542
543                 kbase_set_flags = (struct kbase_uk_set_flags *)args;
544
545                 /* if not matching the expected call, stay in setup mode */
546                 if (sizeof(*kbase_set_flags) != args_size)
547                         goto bad_size;
548
549                 /* if bad flags, will stay stuck in setup mode */
550                 if (kbase_context_set_create_flags(kctx,
551                                 kbase_set_flags->create_flags) != 0)
552                         ukh->ret = MALI_ERROR_FUNCTION_FAILED;
553
554                 atomic_set(&kctx->setup_complete, 1);
555                 return 0;
556         }
557
558         /* setup complete, perform normal operation */
559         switch (id) {
560         case KBASE_FUNC_MEM_ALLOC:
561                 {
562                         struct kbase_uk_mem_alloc *mem = args;
563                         struct kbase_va_region *reg;
564
565                         if (sizeof(*mem) != args_size)
566                                 goto bad_size;
567
568                         reg = kbase_mem_alloc(kctx, mem->va_pages,
569                                         mem->commit_pages, mem->extent,
570                                         &mem->flags, &mem->gpu_va,
571                                         &mem->va_alignment);
572                         if (!reg)
573                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
574                         break;
575                 }
576         case KBASE_FUNC_MEM_IMPORT: {
577                         struct kbase_uk_mem_import *mem_import = args;
578                         void __user *phandle;
579
580                         if (sizeof(*mem_import) != args_size)
581                                 goto bad_size;
582 #ifdef CONFIG_COMPAT
583                         if (kctx->is_compat)
584                                 phandle = compat_ptr(mem_import->phandle.compat_value);
585                         else
586 #endif
587                                 phandle = mem_import->phandle.value;
588
589                         if (mem_import->type == BASE_MEM_IMPORT_TYPE_INVALID) {
590                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
591                                 break;
592                         }
593
594                         if (kbase_mem_import(kctx, mem_import->type, phandle,
595                                                 &mem_import->gpu_va,
596                                                 &mem_import->va_pages,
597                                                 &mem_import->flags)) {
598                                 mem_import->type = BASE_MEM_IMPORT_TYPE_INVALID;
599                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
600                         }
601                         break;
602         }
603         case KBASE_FUNC_MEM_ALIAS: {
604                         struct kbase_uk_mem_alias *alias = args;
605                         struct base_mem_aliasing_info __user *user_ai;
606                         struct base_mem_aliasing_info *ai;
607
608                         if (sizeof(*alias) != args_size)
609                                 goto bad_size;
610
611                         if (alias->nents > 2048) {
612                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
613                                 break;
614                         }
615                         if (!alias->nents) {
616                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
617                                 break;
618                         }
619
620 #ifdef CONFIG_COMPAT
621                         if (kctx->is_compat)
622                                 user_ai = compat_ptr(alias->ai.compat_value);
623                         else
624 #endif
625                                 user_ai = alias->ai.value;
626
627                         ai = vmalloc(sizeof(*ai) * alias->nents);
628
629                         if (!ai) {
630                                 ukh->ret = MALI_ERROR_OUT_OF_MEMORY;
631                                 break;
632                         }
633
634                         if (copy_from_user(ai, user_ai,
635                                            sizeof(*ai) * alias->nents)) {
636                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
637                                 goto copy_failed;
638                         }
639
640                         alias->gpu_va = kbase_mem_alias(kctx, &alias->flags,
641                                                         alias->stride,
642                                                         alias->nents, ai,
643                                                         &alias->va_pages);
644                         if (!alias->gpu_va) {
645                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
646                                 goto no_alias;
647                         }
648 no_alias:
649 copy_failed:
650                         vfree(ai);
651                         break;
652                 }
653         case KBASE_FUNC_MEM_COMMIT:
654                 {
655                         struct kbase_uk_mem_commit *commit = args;
656
657                         if (sizeof(*commit) != args_size)
658                                 goto bad_size;
659
660                         if (commit->gpu_addr & ~PAGE_MASK) {
661                                 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_COMMIT: commit->gpu_addr: passed parameter is invalid");
662                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
663                                 break;
664                         }
665
666                         if (kbase_mem_commit(kctx, commit->gpu_addr,
667                                         commit->pages,
668                                         (base_backing_threshold_status *)
669                                         &commit->result_subcode) != 0)
670                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
671
672                         break;
673                 }
674
675         case KBASE_FUNC_MEM_QUERY:
676                 {
677                         struct kbase_uk_mem_query *query = args;
678
679                         if (sizeof(*query) != args_size)
680                                 goto bad_size;
681
682                         if (query->gpu_addr & ~PAGE_MASK) {
683                                 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_QUERY: query->gpu_addr: passed parameter is invalid");
684                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
685                                 break;
686                         }
687                         if (query->query != KBASE_MEM_QUERY_COMMIT_SIZE &&
688                             query->query != KBASE_MEM_QUERY_VA_SIZE &&
689                                 query->query != KBASE_MEM_QUERY_FLAGS) {
690                                 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_QUERY: query->query = %lld unknown", (unsigned long long)query->query);
691                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
692                                 break;
693                         }
694
695                         if (kbase_mem_query(kctx, query->gpu_addr,
696                                         query->query, &query->value) != 0)
697                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
698                         else
699                                 ukh->ret = MALI_ERROR_NONE;
700                         break;
701                 }
702                 break;
703
704         case KBASE_FUNC_MEM_FLAGS_CHANGE:
705                 {
706                         struct kbase_uk_mem_flags_change *fc = args;
707
708                         if (sizeof(*fc) != args_size)
709                                 goto bad_size;
710
711                         if ((fc->gpu_va & ~PAGE_MASK) && (fc->gpu_va >= PAGE_SIZE)) {
712                                 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_FLAGS_CHANGE: mem->gpu_va: passed parameter is invalid");
713                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
714                                 break;
715                         }
716
717                         if (kbase_mem_flags_change(kctx, fc->gpu_va,
718                                         fc->flags, fc->mask) != 0)
719                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
720
721                         break;
722                 }
723         case KBASE_FUNC_MEM_FREE:
724                 {
725                         struct kbase_uk_mem_free *mem = args;
726
727                         if (sizeof(*mem) != args_size)
728                                 goto bad_size;
729
730                         if ((mem->gpu_addr & ~PAGE_MASK) && (mem->gpu_addr >= PAGE_SIZE)) {
731                                 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_FREE: mem->gpu_addr: passed parameter is invalid");
732                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
733                                 break;
734                         }
735
736                         if (kbase_mem_free(kctx, mem->gpu_addr) != 0)
737                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
738                         break;
739                 }
740
741         case KBASE_FUNC_JOB_SUBMIT:
742                 {
743                         struct kbase_uk_job_submit *job = args;
744
745                         if (sizeof(*job) != args_size)
746                                 goto bad_size;
747
748 #ifdef BASE_LEGACY_UK6_SUPPORT
749                         if (kbase_jd_submit(kctx, job, 0) != 0)
750 #else
751                         if (kbase_jd_submit(kctx, job) != 0)
752 #endif /* BASE_LEGACY_UK6_SUPPORT */
753                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
754                         break;
755                 }
756
757 #ifdef BASE_LEGACY_UK6_SUPPORT
758         case KBASE_FUNC_JOB_SUBMIT_UK6:
759                 {
760                         struct kbase_uk_job_submit *job = args;
761
762                         if (sizeof(*job) != args_size)
763                                 goto bad_size;
764
765                         if (kbase_jd_submit(kctx, job, 1) != 0)
766                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
767                         break;
768                 }
769 #endif
770
771         case KBASE_FUNC_SYNC:
772                 {
773                         struct kbase_uk_sync_now *sn = args;
774
775                         if (sizeof(*sn) != args_size)
776                                 goto bad_size;
777
778                         if (sn->sset.basep_sset.mem_handle & ~PAGE_MASK) {
779                                 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_SYNC: sn->sset.basep_sset.mem_handle: passed parameter is invalid");
780                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
781                                 break;
782                         }
783
784 #ifndef CONFIG_MALI_COH_USER
785                         if (kbase_sync_now(kctx, &sn->sset) != 0)
786                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
787 #endif
788                         break;
789                 }
790
791         case KBASE_FUNC_DISJOINT_QUERY:
792                 {
793                         struct kbase_uk_disjoint_query *dquery = args;
794
795                         if (sizeof(*dquery) != args_size)
796                                 goto bad_size;
797
798                         /* Get the disjointness counter value. */
799                         dquery->counter = kbase_disjoint_event_get(kctx->kbdev);
800                         break;
801                 }
802
803         case KBASE_FUNC_POST_TERM:
804                 {
805                         kbase_event_close(kctx);
806                         break;
807                 }
808
809         case KBASE_FUNC_HWCNT_SETUP:
810                 {
811                         struct kbase_uk_hwcnt_setup *setup = args;
812
813                         if (sizeof(*setup) != args_size)
814                                 goto bad_size;
815
816                         mutex_lock(&kctx->vinstr_cli_lock);
817                         if (kbase_vinstr_legacy_hwc_setup(kbdev->vinstr_ctx,
818                                         &kctx->vinstr_cli, setup) != 0)
819                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
820                         mutex_unlock(&kctx->vinstr_cli_lock);
821                         break;
822                 }
823
824         case KBASE_FUNC_HWCNT_DUMP:
825                 {
826                         /* args ignored */
827                         mutex_lock(&kctx->vinstr_cli_lock);
828                         if (kbase_vinstr_hwc_dump(kctx->vinstr_cli,
829                                         BASE_HWCNT_READER_EVENT_MANUAL) != 0)
830                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
831                         mutex_unlock(&kctx->vinstr_cli_lock);
832                         break;
833                 }
834
835         case KBASE_FUNC_HWCNT_CLEAR:
836                 {
837                         /* args ignored */
838                         mutex_lock(&kctx->vinstr_cli_lock);
839                         if (kbase_vinstr_hwc_clear(kctx->vinstr_cli) != 0)
840                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
841                         mutex_unlock(&kctx->vinstr_cli_lock);
842                         break;
843                 }
844
845         case KBASE_FUNC_HWCNT_READER_SETUP:
846                 {
847                         struct kbase_uk_hwcnt_reader_setup *setup = args;
848
849                         if (sizeof(*setup) != args_size)
850                                 goto bad_size;
851
852                         mutex_lock(&kctx->vinstr_cli_lock);
853                         if (kbase_vinstr_hwcnt_reader_setup(kbdev->vinstr_ctx,
854                                         setup) != 0)
855                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
856                         mutex_unlock(&kctx->vinstr_cli_lock);
857                         break;
858                 }
859
860         case KBASE_FUNC_GPU_PROPS_REG_DUMP:
861                 {
862                         struct kbase_uk_gpuprops *setup = args;
863
864                         if (sizeof(*setup) != args_size)
865                                 goto bad_size;
866
867                         if (kbase_gpuprops_uk_get_props(kctx, setup) != 0)
868                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
869                         break;
870                 }
871         case KBASE_FUNC_FIND_CPU_OFFSET:
872                 {
873                         struct kbase_uk_find_cpu_offset *find = args;
874
875                         if (sizeof(*find) != args_size)
876                                 goto bad_size;
877
878                         if (find->gpu_addr & ~PAGE_MASK) {
879                                 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_FIND_CPU_OFFSET: find->gpu_addr: passed parameter is invalid");
880                                 goto out_bad;
881                         }
882
883                         if (find->size > SIZE_MAX || find->cpu_addr > ULONG_MAX) {
884                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
885                         } else {
886                                 int err;
887
888                                 err = kbasep_find_enclosing_cpu_mapping_offset(
889                                                 kctx,
890                                                 find->gpu_addr,
891                                                 (uintptr_t) find->cpu_addr,
892                                                 (size_t) find->size,
893                                                 &find->offset);
894
895                                 if (err)
896                                         ukh->ret = MALI_ERROR_FUNCTION_FAILED;
897                         }
898                         break;
899                 }
900         case KBASE_FUNC_GET_VERSION:
901                 {
902                         struct kbase_uk_get_ddk_version *get_version = (struct kbase_uk_get_ddk_version *)args;
903
904                         if (sizeof(*get_version) != args_size)
905                                 goto bad_size;
906
907                         /* version buffer size check is made in compile time assert */
908                         memcpy(get_version->version_buffer, KERNEL_SIDE_DDK_VERSION_STRING, sizeof(KERNEL_SIDE_DDK_VERSION_STRING));
909                         get_version->version_string_size = sizeof(KERNEL_SIDE_DDK_VERSION_STRING);
910                         get_version->rk_version = ROCKCHIP_VERSION;
911                         break;
912                 }
913
914         case KBASE_FUNC_STREAM_CREATE:
915                 {
916 #ifdef CONFIG_SYNC
917                         struct kbase_uk_stream_create *screate = (struct kbase_uk_stream_create *)args;
918
919                         if (sizeof(*screate) != args_size)
920                                 goto bad_size;
921
922                         if (strnlen(screate->name, sizeof(screate->name)) >= sizeof(screate->name)) {
923                                 /* not NULL terminated */
924                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
925                                 break;
926                         }
927
928                         if (kbase_stream_create(screate->name, &screate->fd) != 0)
929                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
930                         else
931                                 ukh->ret = MALI_ERROR_NONE;
932 #else /* CONFIG_SYNC */
933                         ukh->ret = MALI_ERROR_FUNCTION_FAILED;
934 #endif /* CONFIG_SYNC */
935                         break;
936                 }
937         case KBASE_FUNC_FENCE_VALIDATE:
938                 {
939 #ifdef CONFIG_SYNC
940                         struct kbase_uk_fence_validate *fence_validate = (struct kbase_uk_fence_validate *)args;
941
942                         if (sizeof(*fence_validate) != args_size)
943                                 goto bad_size;
944
945                         if (kbase_fence_validate(fence_validate->fd) != 0)
946                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
947                         else
948                                 ukh->ret = MALI_ERROR_NONE;
949 #endif /* CONFIG_SYNC */
950                         break;
951                 }
952
953         case KBASE_FUNC_EXT_BUFFER_LOCK:
954                 {
955 #ifdef CONFIG_KDS
956                         ret = kbase_external_buffer_lock(kctx,
957                                 (struct kbase_uk_ext_buff_kds_data *)args,
958                                 args_size);
959                         switch (ret) {
960                         case 0:
961                                 ukh->ret = MALI_ERROR_NONE;
962                                 break;
963                         case -ENOMEM:
964                                 ukh->ret = MALI_ERROR_OUT_OF_MEMORY;
965                                 break;
966                         default:
967                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
968                         }
969 #endif /* CONFIG_KDS */
970                         break;
971                 }
972
973         case KBASE_FUNC_SET_TEST_DATA:
974                 {
975 #if MALI_UNIT_TEST
976                         struct kbase_uk_set_test_data *set_data = args;
977
978                         shared_kernel_test_data = set_data->test_data;
979                         shared_kernel_test_data.kctx.value = (void __user *)kctx;
980                         shared_kernel_test_data.mm.value = (void __user *)current->mm;
981                         ukh->ret = MALI_ERROR_NONE;
982 #endif /* MALI_UNIT_TEST */
983                         break;
984                 }
985
986         case KBASE_FUNC_INJECT_ERROR:
987                 {
988 #ifdef CONFIG_MALI_ERROR_INJECT
989                         unsigned long flags;
990                         struct kbase_error_params params = ((struct kbase_uk_error_params *)args)->params;
991
992                         /*mutex lock */
993                         spin_lock_irqsave(&kbdev->reg_op_lock, flags);
994                         if (job_atom_inject_error(&params) != 0)
995                                 ukh->ret = MALI_ERROR_OUT_OF_MEMORY;
996                         else
997                                 ukh->ret = MALI_ERROR_NONE;
998                         spin_unlock_irqrestore(&kbdev->reg_op_lock, flags);
999                         /*mutex unlock */
1000 #endif /* CONFIG_MALI_ERROR_INJECT */
1001                         break;
1002                 }
1003
1004         case KBASE_FUNC_MODEL_CONTROL:
1005                 {
1006 #ifdef CONFIG_MALI_NO_MALI
1007                         unsigned long flags;
1008                         struct kbase_model_control_params params =
1009                                         ((struct kbase_uk_model_control_params *)args)->params;
1010
1011                         /*mutex lock */
1012                         spin_lock_irqsave(&kbdev->reg_op_lock, flags);
1013                         if (gpu_model_control(kbdev->model, &params) != 0)
1014                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
1015                         else
1016                                 ukh->ret = MALI_ERROR_NONE;
1017                         spin_unlock_irqrestore(&kbdev->reg_op_lock, flags);
1018                         /*mutex unlock */
1019 #endif /* CONFIG_MALI_NO_MALI */
1020                         break;
1021                 }
1022
1023 #ifdef BASE_LEGACY_UK8_SUPPORT
1024         case KBASE_FUNC_KEEP_GPU_POWERED:
1025                 {
1026                         dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_KEEP_GPU_POWERED: function is deprecated and disabled\n");
1027                         ukh->ret = MALI_ERROR_FUNCTION_FAILED;
1028                         break;
1029                 }
1030 #endif /* BASE_LEGACY_UK8_SUPPORT */
1031
1032         case KBASE_FUNC_GET_PROFILING_CONTROLS:
1033                 {
1034                         struct kbase_uk_profiling_controls *controls =
1035                                         (struct kbase_uk_profiling_controls *)args;
1036                         u32 i;
1037
1038                         if (sizeof(*controls) != args_size)
1039                                 goto bad_size;
1040
1041                         for (i = FBDUMP_CONTROL_MIN; i < FBDUMP_CONTROL_MAX; i++)
1042                                 controls->profiling_controls[i] = kbase_get_profiling_control(kbdev, i);
1043
1044                         break;
1045                 }
1046
1047         /* used only for testing purposes; these controls are to be set by gator through gator API */
1048         case KBASE_FUNC_SET_PROFILING_CONTROLS:
1049                 {
1050                         struct kbase_uk_profiling_controls *controls =
1051                                         (struct kbase_uk_profiling_controls *)args;
1052                         u32 i;
1053
1054                         if (sizeof(*controls) != args_size)
1055                                 goto bad_size;
1056
1057                         for (i = FBDUMP_CONTROL_MIN; i < FBDUMP_CONTROL_MAX; i++)
1058                                 _mali_profiling_control(i, controls->profiling_controls[i]);
1059
1060                         break;
1061                 }
1062
1063         case KBASE_FUNC_DEBUGFS_MEM_PROFILE_ADD:
1064                 {
1065                         struct kbase_uk_debugfs_mem_profile_add *add_data =
1066                                         (struct kbase_uk_debugfs_mem_profile_add *)args;
1067                         char *buf;
1068                         char __user *user_buf;
1069
1070                         if (sizeof(*add_data) != args_size)
1071                                 goto bad_size;
1072
1073                         if (add_data->len > KBASE_MEM_PROFILE_MAX_BUF_SIZE) {
1074                                 dev_err(kbdev->dev, "buffer too big");
1075                                 goto out_bad;
1076                         }
1077
1078 #ifdef CONFIG_COMPAT
1079                         if (kctx->is_compat)
1080                                 user_buf = compat_ptr(add_data->buf.compat_value);
1081                         else
1082 #endif
1083                                 user_buf = add_data->buf.value;
1084
1085                         buf = kmalloc(add_data->len, GFP_KERNEL);
1086                         if (!buf)
1087                                 goto out_bad;
1088
1089                         if (0 != copy_from_user(buf, user_buf, add_data->len)) {
1090                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
1091                                 kfree(buf);
1092                                 goto out_bad;
1093                         }
1094
1095                         if (kbasep_mem_profile_debugfs_insert(kctx, buf,
1096                                                         add_data->len)) {
1097                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
1098                                 kfree(buf);
1099                                 goto out_bad;
1100                         }
1101
1102                         break;
1103                 }
1104
1105 #ifdef CONFIG_MALI_NO_MALI
1106         case KBASE_FUNC_SET_PRFCNT_VALUES:
1107                 {
1108
1109                         struct kbase_uk_prfcnt_values *params =
1110                           ((struct kbase_uk_prfcnt_values *)args);
1111                         gpu_model_set_dummy_prfcnt_sample(params->data,
1112                                         params->size);
1113
1114                         break;
1115                 }
1116 #endif /* CONFIG_MALI_NO_MALI */
1117
1118 #ifdef CONFIG_MALI_MIPE_ENABLED
1119         case KBASE_FUNC_TLSTREAM_ACQUIRE:
1120                 {
1121                         struct kbase_uk_tlstream_acquire *tlstream_acquire =
1122                                 args;
1123
1124                         if (sizeof(*tlstream_acquire) != args_size)
1125                                 goto bad_size;
1126
1127                         if (0 != kbase_tlstream_acquire(
1128                                                 kctx,
1129                                                 &tlstream_acquire->fd)) {
1130                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
1131                         } else if (0 <= tlstream_acquire->fd) {
1132                                 /* Summary stream was cleared during acquire.
1133                                  * Create static timeline objects that will be
1134                                  * read by client. */
1135                                 kbase_create_timeline_objects(kctx);
1136                         }
1137                         break;
1138                 }
1139         case KBASE_FUNC_TLSTREAM_FLUSH:
1140                 {
1141                         struct kbase_uk_tlstream_flush *tlstream_flush =
1142                                 args;
1143
1144                         if (sizeof(*tlstream_flush) != args_size)
1145                                 goto bad_size;
1146
1147                         kbase_tlstream_flush_streams();
1148                         break;
1149                 }
1150 #if MALI_UNIT_TEST
1151         case KBASE_FUNC_TLSTREAM_TEST:
1152                 {
1153                         struct kbase_uk_tlstream_test *tlstream_test = args;
1154
1155                         if (sizeof(*tlstream_test) != args_size)
1156                                 goto bad_size;
1157
1158                         kbase_tlstream_test(
1159                                         tlstream_test->tpw_count,
1160                                         tlstream_test->msg_delay,
1161                                         tlstream_test->msg_count,
1162                                         tlstream_test->aux_msg);
1163                         break;
1164                 }
1165         case KBASE_FUNC_TLSTREAM_STATS:
1166                 {
1167                         struct kbase_uk_tlstream_stats *tlstream_stats = args;
1168
1169                         if (sizeof(*tlstream_stats) != args_size)
1170                                 goto bad_size;
1171
1172                         kbase_tlstream_stats(
1173                                         &tlstream_stats->bytes_collected,
1174                                         &tlstream_stats->bytes_generated);
1175                         break;
1176                 }
1177 #endif /* MALI_UNIT_TEST */
1178 #endif /* CONFIG_MALI_MIPE_ENABLED */
1179
1180         case KBASE_FUNC_GET_CONTEXT_ID:
1181                 {
1182                         struct kbase_uk_context_id *info = args;
1183
1184                         info->id = kctx->id;
1185                         break;
1186                 }
1187
1188         default:
1189                 dev_err(kbdev->dev, "unknown ioctl %u", id);
1190                 goto out_bad;
1191         }
1192
1193         return ret;
1194
1195  bad_size:
1196         dev_err(kbdev->dev, "Wrong syscall size (%d) for %08x\n", args_size, id);
1197  out_bad:
1198         return -EINVAL;
1199 }
1200
1201 static struct kbase_device *to_kbase_device(struct device *dev)
1202 {
1203         return dev_get_drvdata(dev);
1204 }
1205
1206 /*
1207  * API to acquire device list mutex and
1208  * return pointer to the device list head
1209  */
1210 const struct list_head *kbase_dev_list_get(void)
1211 {
1212         mutex_lock(&kbase_dev_list_lock);
1213         return &kbase_dev_list;
1214 }
1215 KBASE_EXPORT_TEST_API(kbase_dev_list_get);
1216
1217 /* API to release the device list mutex */
1218 void kbase_dev_list_put(const struct list_head *dev_list)
1219 {
1220         mutex_unlock(&kbase_dev_list_lock);
1221 }
1222 KBASE_EXPORT_TEST_API(kbase_dev_list_put);
1223
1224 /* Find a particular kbase device (as specified by minor number), or find the "first" device if -1 is specified */
1225 struct kbase_device *kbase_find_device(int minor)
1226 {
1227         struct kbase_device *kbdev = NULL;
1228         struct list_head *entry;
1229         const struct list_head *dev_list = kbase_dev_list_get();
1230
1231         list_for_each(entry, dev_list) {
1232                 struct kbase_device *tmp;
1233
1234                 tmp = list_entry(entry, struct kbase_device, entry);
1235                 if (tmp->mdev.minor == minor || minor == -1) {
1236                         kbdev = tmp;
1237                         get_device(kbdev->dev);
1238                         break;
1239                 }
1240         }
1241         kbase_dev_list_put(dev_list);
1242
1243         return kbdev;
1244 }
1245 EXPORT_SYMBOL(kbase_find_device);
1246
1247 void kbase_release_device(struct kbase_device *kbdev)
1248 {
1249         put_device(kbdev->dev);
1250 }
1251 EXPORT_SYMBOL(kbase_release_device);
1252
1253 static int kbase_open(struct inode *inode, struct file *filp)
1254 {
1255         struct kbase_device *kbdev = NULL;
1256         struct kbase_context *kctx;
1257         int ret = 0;
1258 #ifdef CONFIG_DEBUG_FS
1259         char kctx_name[64];
1260 #endif
1261
1262         kbdev = kbase_find_device(iminor(inode));
1263
1264         if (!kbdev)
1265                 return -ENODEV;
1266
1267         kctx = kbase_create_context(kbdev, is_compat_task());
1268         if (!kctx) {
1269                 ret = -ENOMEM;
1270                 goto out;
1271         }
1272
1273         init_waitqueue_head(&kctx->event_queue);
1274         filp->private_data = kctx;
1275         kctx->filp = filp;
1276
1277         kctx->infinite_cache_active = kbdev->infinite_cache_active_default;
1278
1279 #ifdef CONFIG_DEBUG_FS
1280         snprintf(kctx_name, 64, "%d_%d", kctx->tgid, kctx->id);
1281
1282         kctx->kctx_dentry = debugfs_create_dir(kctx_name,
1283                         kbdev->debugfs_ctx_directory);
1284
1285         if (IS_ERR_OR_NULL(kctx->kctx_dentry)) {
1286                 ret = -ENOMEM;
1287                 goto out;
1288         }
1289
1290 #ifdef CONFIG_MALI_COH_USER
1291          /* if cache is completely coherent at hardware level, then remove the
1292           * infinite cache control support from debugfs.
1293           */
1294 #else
1295         debugfs_create_bool("infinite_cache", 0644, kctx->kctx_dentry,
1296                         &kctx->infinite_cache_active);
1297 #endif /* CONFIG_MALI_COH_USER */
1298
1299         mutex_init(&kctx->mem_profile_lock);
1300
1301         kbasep_jd_debugfs_ctx_add(kctx);
1302         kbase_debug_mem_view_init(filp);
1303
1304         kbase_debug_job_fault_context_init(kctx);
1305
1306         kbase_mem_pool_debugfs_add(kctx->kctx_dentry, &kctx->mem_pool);
1307
1308 #endif /* CONFIG_DEBUGFS */
1309
1310         dev_dbg(kbdev->dev, "created base context\n");
1311
1312         {
1313                 struct kbasep_kctx_list_element *element;
1314
1315                 element = kzalloc(sizeof(*element), GFP_KERNEL);
1316                 if (element) {
1317                         mutex_lock(&kbdev->kctx_list_lock);
1318                         element->kctx = kctx;
1319                         list_add(&element->link, &kbdev->kctx_list);
1320 #ifdef CONFIG_MALI_MIPE_ENABLED
1321                         kbase_tlstream_tl_new_ctx(
1322                                         element->kctx,
1323                                         (u32)(element->kctx->id));
1324 #endif
1325                         mutex_unlock(&kbdev->kctx_list_lock);
1326                 } else {
1327                         /* we don't treat this as a fail - just warn about it */
1328                         dev_warn(kbdev->dev, "couldn't add kctx to kctx_list\n");
1329                 }
1330         }
1331         return 0;
1332
1333  out:
1334         kbase_release_device(kbdev);
1335         return ret;
1336 }
1337
1338 static int kbase_release(struct inode *inode, struct file *filp)
1339 {
1340         struct kbase_context *kctx = filp->private_data;
1341         struct kbase_device *kbdev = kctx->kbdev;
1342         struct kbasep_kctx_list_element *element, *tmp;
1343         bool found_element = false;
1344
1345 #ifdef CONFIG_MALI_MIPE_ENABLED
1346         kbase_tlstream_tl_del_ctx(kctx);
1347 #endif
1348
1349 #ifdef CONFIG_DEBUG_FS
1350         debugfs_remove_recursive(kctx->kctx_dentry);
1351         kbasep_mem_profile_debugfs_remove(kctx);
1352         kbase_debug_job_fault_context_term(kctx);
1353 #endif
1354
1355         mutex_lock(&kbdev->kctx_list_lock);
1356         list_for_each_entry_safe(element, tmp, &kbdev->kctx_list, link) {
1357                 if (element->kctx == kctx) {
1358                         list_del(&element->link);
1359                         kfree(element);
1360                         found_element = true;
1361                 }
1362         }
1363         mutex_unlock(&kbdev->kctx_list_lock);
1364         if (!found_element)
1365                 dev_warn(kbdev->dev, "kctx not in kctx_list\n");
1366
1367         filp->private_data = NULL;
1368
1369         mutex_lock(&kctx->vinstr_cli_lock);
1370         /* If this client was performing hwcnt dumping and did not explicitly
1371          * detach itself, remove it from the vinstr core now */
1372         if (kctx->vinstr_cli) {
1373                 struct kbase_uk_hwcnt_setup setup;
1374
1375                 setup.dump_buffer = 0llu;
1376                 kbase_vinstr_legacy_hwc_setup(
1377                                 kbdev->vinstr_ctx, &kctx->vinstr_cli, &setup);
1378         }
1379         mutex_unlock(&kctx->vinstr_cli_lock);
1380
1381         kbase_destroy_context(kctx);
1382
1383         dev_dbg(kbdev->dev, "deleted base context\n");
1384         kbase_release_device(kbdev);
1385         return 0;
1386 }
1387
1388 #define CALL_MAX_SIZE 536
1389
1390 static long kbase_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1391 {
1392         u64 msg[(CALL_MAX_SIZE + 7) >> 3] = { 0xdeadbeefdeadbeefull };  /* alignment fixup */
1393         u32 size = _IOC_SIZE(cmd);
1394         struct kbase_context *kctx = filp->private_data;
1395
1396         if (size > CALL_MAX_SIZE)
1397                 return -ENOTTY;
1398
1399         if (0 != copy_from_user(&msg, (void __user *)arg, size)) {
1400                 dev_err(kctx->kbdev->dev, "failed to copy ioctl argument into kernel space\n");
1401                 return -EFAULT;
1402         }
1403
1404         if (kbase_dispatch(kctx, &msg, size) != 0)
1405                 return -EFAULT;
1406
1407         if (0 != copy_to_user((void __user *)arg, &msg, size)) {
1408                 dev_err(kctx->kbdev->dev, "failed to copy results of UK call back to user space\n");
1409                 return -EFAULT;
1410         }
1411         return 0;
1412 }
1413
1414 static ssize_t kbase_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
1415 {
1416         struct kbase_context *kctx = filp->private_data;
1417         struct base_jd_event_v2 uevent;
1418         int out_count = 0;
1419
1420         if (count < sizeof(uevent))
1421                 return -ENOBUFS;
1422
1423         do {
1424                 while (kbase_event_dequeue(kctx, &uevent)) {
1425                         if (out_count > 0)
1426                                 goto out;
1427
1428                         if (filp->f_flags & O_NONBLOCK)
1429                                 return -EAGAIN;
1430
1431                         if (wait_event_interruptible(kctx->event_queue,
1432                                         kbase_event_pending(kctx)) != 0)
1433                                 return -ERESTARTSYS;
1434                 }
1435                 if (uevent.event_code == BASE_JD_EVENT_DRV_TERMINATED) {
1436                         if (out_count == 0)
1437                                 return -EPIPE;
1438                         goto out;
1439                 }
1440
1441                 if (copy_to_user(buf, &uevent, sizeof(uevent)) != 0)
1442                         return -EFAULT;
1443
1444                 buf += sizeof(uevent);
1445                 out_count++;
1446                 count -= sizeof(uevent);
1447         } while (count >= sizeof(uevent));
1448
1449  out:
1450         return out_count * sizeof(uevent);
1451 }
1452
1453 static unsigned int kbase_poll(struct file *filp, poll_table *wait)
1454 {
1455         struct kbase_context *kctx = filp->private_data;
1456
1457         poll_wait(filp, &kctx->event_queue, wait);
1458         if (kbase_event_pending(kctx))
1459                 return POLLIN | POLLRDNORM;
1460
1461         return 0;
1462 }
1463
1464 void kbase_event_wakeup(struct kbase_context *kctx)
1465 {
1466         KBASE_DEBUG_ASSERT(kctx);
1467
1468         wake_up_interruptible(&kctx->event_queue);
1469 }
1470
1471 KBASE_EXPORT_TEST_API(kbase_event_wakeup);
1472
1473 static int kbase_check_flags(int flags)
1474 {
1475         /* Enforce that the driver keeps the O_CLOEXEC flag so that execve() always
1476          * closes the file descriptor in a child process.
1477          */
1478         if (0 == (flags & O_CLOEXEC))
1479                 return -EINVAL;
1480
1481         return 0;
1482 }
1483
1484 static unsigned long kbase_get_unmapped_area(struct file *filp,
1485                 const unsigned long addr, const unsigned long len,
1486                 const unsigned long pgoff, const unsigned long flags)
1487 {
1488 #ifdef CONFIG_64BIT
1489         /* based on get_unmapped_area, but simplified slightly due to that some
1490          * values are known in advance */
1491         struct kbase_context *kctx = filp->private_data;
1492
1493         if (!kctx->is_compat && !addr &&
1494                 kbase_hw_has_feature(kctx->kbdev, BASE_HW_FEATURE_33BIT_VA)) {
1495                 struct mm_struct *mm = current->mm;
1496                 struct vm_area_struct *vma;
1497                 unsigned long low_limit, high_limit, gap_start, gap_end;
1498
1499                 /* Hardware has smaller VA than userspace, ensure the page
1500                  * comes from a VA which can be used on the GPU */
1501
1502                 gap_end = (1UL<<33);
1503                 if (gap_end < len)
1504                         return -ENOMEM;
1505                 high_limit = gap_end - len;
1506                 low_limit = PAGE_SIZE + len;
1507
1508                 gap_start = mm->highest_vm_end;
1509                 if (gap_start <= high_limit)
1510                         goto found_highest;
1511
1512                 if (RB_EMPTY_ROOT(&mm->mm_rb))
1513                         return -ENOMEM;
1514                 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
1515                 if (vma->rb_subtree_gap < len)
1516                         return -ENOMEM;
1517
1518                 while (true) {
1519                         gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
1520                         if (gap_start <= high_limit && vma->vm_rb.rb_right) {
1521                                 struct vm_area_struct *right =
1522                                         rb_entry(vma->vm_rb.rb_right,
1523                                                  struct vm_area_struct, vm_rb);
1524                                 if (right->rb_subtree_gap >= len) {
1525                                         vma = right;
1526                                         continue;
1527                                 }
1528                         }
1529 check_current:
1530                         gap_end = vma->vm_start;
1531                         if (gap_end < low_limit)
1532                                 return -ENOMEM;
1533                         if (gap_start <= high_limit &&
1534                             gap_end - gap_start >= len)
1535                                 goto found;
1536
1537                         if (vma->vm_rb.rb_left) {
1538                                 struct vm_area_struct *left =
1539                                         rb_entry(vma->vm_rb.rb_left,
1540                                                  struct vm_area_struct, vm_rb);
1541
1542                                 if (left->rb_subtree_gap >= len) {
1543                                         vma = left;
1544                                         continue;
1545                                 }
1546                         }
1547                         while (true) {
1548                                 struct rb_node *prev = &vma->vm_rb;
1549
1550                                 if (!rb_parent(prev))
1551                                         return -ENOMEM;
1552                                 vma = rb_entry(rb_parent(prev),
1553                                                 struct vm_area_struct, vm_rb);
1554                                 if (prev == vma->vm_rb.rb_right) {
1555                                         gap_start = vma->vm_prev ?
1556                                                 vma->vm_prev->vm_end : 0;
1557                                         goto check_current;
1558                                 }
1559                         }
1560                 }
1561
1562 found:
1563                 if (gap_end > (1UL<<33))
1564                         gap_end = (1UL<<33);
1565
1566 found_highest:
1567                 gap_end -= len;
1568
1569                 VM_BUG_ON(gap_end < PAGE_SIZE);
1570                 VM_BUG_ON(gap_end < gap_start);
1571                 return gap_end;
1572         }
1573 #endif
1574         /* No special requirements - fallback to the default version */
1575         return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
1576 }
1577
1578 static const struct file_operations kbase_fops = {
1579         .owner = THIS_MODULE,
1580         .open = kbase_open,
1581         .release = kbase_release,
1582         .read = kbase_read,
1583         .poll = kbase_poll,
1584         .unlocked_ioctl = kbase_ioctl,
1585         .compat_ioctl = kbase_ioctl,
1586         .mmap = kbase_mmap,
1587         .check_flags = kbase_check_flags,
1588         .get_unmapped_area = kbase_get_unmapped_area,
1589 };
1590
1591 #ifndef CONFIG_MALI_NO_MALI
1592 void kbase_os_reg_write(struct kbase_device *kbdev, u16 offset, u32 value)
1593 {
1594         writel(value, kbdev->reg + offset);
1595 }
1596
1597 u32 kbase_os_reg_read(struct kbase_device *kbdev, u16 offset)
1598 {
1599         return readl(kbdev->reg + offset);
1600 }
1601 #endif /* !CONFIG_MALI_NO_MALI */
1602
1603
1604 /** Show callback for the @c power_policy sysfs file.
1605  *
1606  * This function is called to get the contents of the @c power_policy sysfs
1607  * file. This is a list of the available policies with the currently active one
1608  * surrounded by square brackets.
1609  *
1610  * @param dev   The device this sysfs file is for
1611  * @param attr  The attributes of the sysfs file
1612  * @param buf   The output buffer for the sysfs file contents
1613  *
1614  * @return The number of bytes output to @c buf.
1615  */
1616 static ssize_t show_policy(struct device *dev, struct device_attribute *attr, char *const buf)
1617 {
1618         struct kbase_device *kbdev;
1619         const struct kbase_pm_policy *current_policy;
1620         const struct kbase_pm_policy *const *policy_list;
1621         int policy_count;
1622         int i;
1623         ssize_t ret = 0;
1624
1625         kbdev = to_kbase_device(dev);
1626
1627         if (!kbdev)
1628                 return -ENODEV;
1629
1630         current_policy = kbase_pm_get_policy(kbdev);
1631
1632         policy_count = kbase_pm_list_policies(&policy_list);
1633
1634         for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) {
1635                 if (policy_list[i] == current_policy)
1636                         ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name);
1637                 else
1638                         ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name);
1639         }
1640
1641         if (ret < PAGE_SIZE - 1) {
1642                 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
1643         } else {
1644                 buf[PAGE_SIZE - 2] = '\n';
1645                 buf[PAGE_SIZE - 1] = '\0';
1646                 ret = PAGE_SIZE - 1;
1647         }
1648
1649         return ret;
1650 }
1651
1652 /** Store callback for the @c power_policy sysfs file.
1653  *
1654  * This function is called when the @c power_policy sysfs file is written to.
1655  * It matches the requested policy against the available policies and if a
1656  * matching policy is found calls @ref kbase_pm_set_policy to change the
1657  * policy.
1658  *
1659  * @param dev   The device with sysfs file is for
1660  * @param attr  The attributes of the sysfs file
1661  * @param buf   The value written to the sysfs file
1662  * @param count The number of bytes written to the sysfs file
1663  *
1664  * @return @c count if the function succeeded. An error code on failure.
1665  */
1666 static ssize_t set_policy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1667 {
1668         struct kbase_device *kbdev;
1669         const struct kbase_pm_policy *new_policy = NULL;
1670         const struct kbase_pm_policy *const *policy_list;
1671         int policy_count;
1672         int i;
1673
1674         kbdev = to_kbase_device(dev);
1675
1676         if (!kbdev)
1677                 return -ENODEV;
1678
1679         policy_count = kbase_pm_list_policies(&policy_list);
1680
1681         for (i = 0; i < policy_count; i++) {
1682                 if (sysfs_streq(policy_list[i]->name, buf)) {
1683                         new_policy = policy_list[i];
1684                         break;
1685                 }
1686         }
1687
1688         if (!new_policy) {
1689                 dev_err(dev, "power_policy: policy not found\n");
1690                 return -EINVAL;
1691         }
1692
1693         kbase_pm_set_policy(kbdev, new_policy);
1694
1695         return count;
1696 }
1697
1698 /** The sysfs file @c power_policy.
1699  *
1700  * This is used for obtaining information about the available policies,
1701  * determining which policy is currently active, and changing the active
1702  * policy.
1703  */
1704 static DEVICE_ATTR(power_policy, S_IRUGO | S_IWUSR, show_policy, set_policy);
1705
1706 /** Show callback for the @c core_availability_policy sysfs file.
1707  *
1708  * This function is called to get the contents of the @c core_availability_policy
1709  * sysfs file. This is a list of the available policies with the currently
1710  * active one surrounded by square brackets.
1711  *
1712  * @param dev   The device this sysfs file is for
1713  * @param attr  The attributes of the sysfs file
1714  * @param buf   The output buffer for the sysfs file contents
1715  *
1716  * @return The number of bytes output to @c buf.
1717  */
1718 static ssize_t show_ca_policy(struct device *dev, struct device_attribute *attr, char * const buf)
1719 {
1720         struct kbase_device *kbdev;
1721         const struct kbase_pm_ca_policy *current_policy;
1722         const struct kbase_pm_ca_policy *const *policy_list;
1723         int policy_count;
1724         int i;
1725         ssize_t ret = 0;
1726
1727         kbdev = to_kbase_device(dev);
1728
1729         if (!kbdev)
1730                 return -ENODEV;
1731
1732         current_policy = kbase_pm_ca_get_policy(kbdev);
1733
1734         policy_count = kbase_pm_ca_list_policies(&policy_list);
1735
1736         for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) {
1737                 if (policy_list[i] == current_policy)
1738                         ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name);
1739                 else
1740                         ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name);
1741         }
1742
1743         if (ret < PAGE_SIZE - 1) {
1744                 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
1745         } else {
1746                 buf[PAGE_SIZE - 2] = '\n';
1747                 buf[PAGE_SIZE - 1] = '\0';
1748                 ret = PAGE_SIZE - 1;
1749         }
1750
1751         return ret;
1752 }
1753
1754 /** Store callback for the @c core_availability_policy sysfs file.
1755  *
1756  * This function is called when the @c core_availability_policy sysfs file is
1757  * written to. It matches the requested policy against the available policies
1758  * and if a matching policy is found calls @ref kbase_pm_set_policy to change
1759  * the policy.
1760  *
1761  * @param dev   The device with sysfs file is for
1762  * @param attr  The attributes of the sysfs file
1763  * @param buf   The value written to the sysfs file
1764  * @param count The number of bytes written to the sysfs file
1765  *
1766  * @return @c count if the function succeeded. An error code on failure.
1767  */
1768 static ssize_t set_ca_policy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1769 {
1770         struct kbase_device *kbdev;
1771         const struct kbase_pm_ca_policy *new_policy = NULL;
1772         const struct kbase_pm_ca_policy *const *policy_list;
1773         int policy_count;
1774         int i;
1775
1776         kbdev = to_kbase_device(dev);
1777
1778         if (!kbdev)
1779                 return -ENODEV;
1780
1781         policy_count = kbase_pm_ca_list_policies(&policy_list);
1782
1783         for (i = 0; i < policy_count; i++) {
1784                 if (sysfs_streq(policy_list[i]->name, buf)) {
1785                         new_policy = policy_list[i];
1786                         break;
1787                 }
1788         }
1789
1790         if (!new_policy) {
1791                 dev_err(dev, "core_availability_policy: policy not found\n");
1792                 return -EINVAL;
1793         }
1794
1795         kbase_pm_ca_set_policy(kbdev, new_policy);
1796
1797         return count;
1798 }
1799
1800 /** The sysfs file @c core_availability_policy
1801  *
1802  * This is used for obtaining information about the available policies,
1803  * determining which policy is currently active, and changing the active
1804  * policy.
1805  */
1806 static DEVICE_ATTR(core_availability_policy, S_IRUGO | S_IWUSR, show_ca_policy, set_ca_policy);
1807
1808 /** Show callback for the @c core_mask sysfs file.
1809  *
1810  * This function is called to get the contents of the @c core_mask sysfs
1811  * file.
1812  *
1813  * @param dev   The device this sysfs file is for
1814  * @param attr  The attributes of the sysfs file
1815  * @param buf   The output buffer for the sysfs file contents
1816  *
1817  * @return The number of bytes output to @c buf.
1818  */
1819 static ssize_t show_core_mask(struct device *dev, struct device_attribute *attr, char * const buf)
1820 {
1821         struct kbase_device *kbdev;
1822         ssize_t ret = 0;
1823
1824         kbdev = to_kbase_device(dev);
1825
1826         if (!kbdev)
1827                 return -ENODEV;
1828
1829         ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1830                         "Current core mask (JS0) : 0x%llX\n",
1831                         kbdev->pm.debug_core_mask[0]);
1832         ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1833                         "Current core mask (JS1) : 0x%llX\n",
1834                         kbdev->pm.debug_core_mask[1]);
1835         ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1836                         "Current core mask (JS2) : 0x%llX\n",
1837                         kbdev->pm.debug_core_mask[2]);
1838         ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1839                         "Available core mask : 0x%llX\n",
1840                         kbdev->gpu_props.props.raw_props.shader_present);
1841
1842         return ret;
1843 }
1844
1845 /** Store callback for the @c core_mask sysfs file.
1846  *
1847  * This function is called when the @c core_mask sysfs file is written to.
1848  *
1849  * @param dev   The device with sysfs file is for
1850  * @param attr  The attributes of the sysfs file
1851  * @param buf   The value written to the sysfs file
1852  * @param count The number of bytes written to the sysfs file
1853  *
1854  * @return @c count if the function succeeded. An error code on failure.
1855  */
1856 static ssize_t set_core_mask(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1857 {
1858         struct kbase_device *kbdev;
1859         u64 new_core_mask[3];
1860         int items;
1861
1862         kbdev = to_kbase_device(dev);
1863
1864         if (!kbdev)
1865                 return -ENODEV;
1866
1867         items = sscanf(buf, "%llx %llx %llx",
1868                         &new_core_mask[0], &new_core_mask[1],
1869                         &new_core_mask[2]);
1870
1871         if (items == 1)
1872                 new_core_mask[1] = new_core_mask[2] = new_core_mask[0];
1873
1874         if (items == 1 || items == 3) {
1875                 u64 shader_present =
1876                                 kbdev->gpu_props.props.raw_props.shader_present;
1877                 u64 group0_core_mask =
1878                                 kbdev->gpu_props.props.coherency_info.group[0].
1879                                 core_mask;
1880
1881                 if ((new_core_mask[0] & shader_present) != new_core_mask[0] ||
1882                                 !(new_core_mask[0] & group0_core_mask) ||
1883                         (new_core_mask[1] & shader_present) !=
1884                                                 new_core_mask[1] ||
1885                                 !(new_core_mask[1] & group0_core_mask) ||
1886                         (new_core_mask[2] & shader_present) !=
1887                                                 new_core_mask[2] ||
1888                                 !(new_core_mask[2] & group0_core_mask)) {
1889                         dev_err(dev, "power_policy: invalid core specification\n");
1890                         return -EINVAL;
1891                 }
1892
1893                 if (kbdev->pm.debug_core_mask[0] != new_core_mask[0] ||
1894                                 kbdev->pm.debug_core_mask[1] !=
1895                                                 new_core_mask[1] ||
1896                                 kbdev->pm.debug_core_mask[2] !=
1897                                                 new_core_mask[2]) {
1898                         unsigned long flags;
1899
1900                         spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
1901
1902                         kbase_pm_set_debug_core_mask(kbdev, new_core_mask[0],
1903                                         new_core_mask[1], new_core_mask[2]);
1904
1905                         spin_unlock_irqrestore(&kbdev->pm.power_change_lock,
1906                                         flags);
1907                 }
1908
1909                 return count;
1910         }
1911
1912         dev_err(kbdev->dev, "Couldn't process set_core_mask write operation.\n"
1913                 "Use format <core_mask>\n"
1914                 "or <core_mask_js0> <core_mask_js1> <core_mask_js2>\n");
1915         return -EINVAL;
1916 }
1917
1918 /** The sysfs file @c core_mask.
1919  *
1920  * This is used to restrict shader core availability for debugging purposes.
1921  * Reading it will show the current core mask and the mask of cores available.
1922  * Writing to it will set the current core mask.
1923  */
1924 static DEVICE_ATTR(core_mask, S_IRUGO | S_IWUSR, show_core_mask, set_core_mask);
1925
1926 /** Store callback for the @c js_timeouts sysfs file.
1927  *
1928  * This function is called to get the contents of the @c js_timeouts sysfs
1929  * file. This file contains five values separated by whitespace. The values
1930  * are basically the same as JS_SOFT_STOP_TICKS, JS_HARD_STOP_TICKS_SS,
1931  * JS_HARD_STOP_TICKS_DUMPING, JS_RESET_TICKS_SS, JS_RESET_TICKS_DUMPING
1932  * configuration values (in that order), with the difference that the js_timeout
1933  * values are expressed in MILLISECONDS.
1934  *
1935  * The js_timeouts sysfile file allows the current values in
1936  * use by the job scheduler to get override. Note that a value needs to
1937  * be other than 0 for it to override the current job scheduler value.
1938  *
1939  * @param dev   The device with sysfs file is for
1940  * @param attr  The attributes of the sysfs file
1941  * @param buf   The value written to the sysfs file
1942  * @param count The number of bytes written to the sysfs file
1943  *
1944  * @return @c count if the function succeeded. An error code on failure.
1945  */
1946 static ssize_t set_js_timeouts(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1947 {
1948         struct kbase_device *kbdev;
1949         int items;
1950         long js_soft_stop_ms;
1951         long js_soft_stop_ms_cl;
1952         long js_hard_stop_ms_ss;
1953         long js_hard_stop_ms_cl;
1954         long js_hard_stop_ms_dumping;
1955         long js_reset_ms_ss;
1956         long js_reset_ms_cl;
1957         long js_reset_ms_dumping;
1958
1959         kbdev = to_kbase_device(dev);
1960         if (!kbdev)
1961                 return -ENODEV;
1962
1963         items = sscanf(buf, "%ld %ld %ld %ld %ld %ld %ld %ld",
1964                         &js_soft_stop_ms, &js_soft_stop_ms_cl,
1965                         &js_hard_stop_ms_ss, &js_hard_stop_ms_cl,
1966                         &js_hard_stop_ms_dumping, &js_reset_ms_ss,
1967                         &js_reset_ms_cl, &js_reset_ms_dumping);
1968
1969         if (items == 8) {
1970                 u64 ticks;
1971
1972                 if (js_soft_stop_ms >= 0) {
1973                         ticks = js_soft_stop_ms * 1000000ULL;
1974                         do_div(ticks, kbdev->js_data.scheduling_period_ns);
1975                         kbdev->js_soft_stop_ticks = ticks;
1976                 } else {
1977                         kbdev->js_soft_stop_ticks = -1;
1978                 }
1979
1980                 if (js_soft_stop_ms_cl >= 0) {
1981                         ticks = js_soft_stop_ms_cl * 1000000ULL;
1982                         do_div(ticks, kbdev->js_data.scheduling_period_ns);
1983                         kbdev->js_soft_stop_ticks_cl = ticks;
1984                 } else {
1985                         kbdev->js_soft_stop_ticks_cl = -1;
1986                 }
1987
1988                 if (js_hard_stop_ms_ss >= 0) {
1989                         ticks = js_hard_stop_ms_ss * 1000000ULL;
1990                         do_div(ticks, kbdev->js_data.scheduling_period_ns);
1991                         kbdev->js_hard_stop_ticks_ss = ticks;
1992                 } else {
1993                         kbdev->js_hard_stop_ticks_ss = -1;
1994                 }
1995
1996                 if (js_hard_stop_ms_cl >= 0) {
1997                         ticks = js_hard_stop_ms_cl * 1000000ULL;
1998                         do_div(ticks, kbdev->js_data.scheduling_period_ns);
1999                         kbdev->js_hard_stop_ticks_cl = ticks;
2000                 } else {
2001                         kbdev->js_hard_stop_ticks_cl = -1;
2002                 }
2003
2004                 if (js_hard_stop_ms_dumping >= 0) {
2005                         ticks = js_hard_stop_ms_dumping * 1000000ULL;
2006                         do_div(ticks, kbdev->js_data.scheduling_period_ns);
2007                         kbdev->js_hard_stop_ticks_dumping = ticks;
2008                 } else {
2009                         kbdev->js_hard_stop_ticks_dumping = -1;
2010                 }
2011
2012                 if (js_reset_ms_ss >= 0) {
2013                         ticks = js_reset_ms_ss * 1000000ULL;
2014                         do_div(ticks, kbdev->js_data.scheduling_period_ns);
2015                         kbdev->js_reset_ticks_ss = ticks;
2016                 } else {
2017                         kbdev->js_reset_ticks_ss = -1;
2018                 }
2019
2020                 if (js_reset_ms_cl >= 0) {
2021                         ticks = js_reset_ms_cl * 1000000ULL;
2022                         do_div(ticks, kbdev->js_data.scheduling_period_ns);
2023                         kbdev->js_reset_ticks_cl = ticks;
2024                 } else {
2025                         kbdev->js_reset_ticks_cl = -1;
2026                 }
2027
2028                 if (js_reset_ms_dumping >= 0) {
2029                         ticks = js_reset_ms_dumping * 1000000ULL;
2030                         do_div(ticks, kbdev->js_data.scheduling_period_ns);
2031                         kbdev->js_reset_ticks_dumping = ticks;
2032                 } else {
2033                         kbdev->js_reset_ticks_dumping = -1;
2034                 }
2035
2036                 kbdev->js_timeouts_updated = true;
2037
2038                 dev_dbg(kbdev->dev, "Overriding JS_SOFT_STOP_TICKS with %lu ticks (%lu ms)\n",
2039                                 (unsigned long)kbdev->js_soft_stop_ticks,
2040                                 js_soft_stop_ms);
2041                 dev_dbg(kbdev->dev, "Overriding JS_SOFT_STOP_TICKS_CL with %lu ticks (%lu ms)\n",
2042                                 (unsigned long)kbdev->js_soft_stop_ticks_cl,
2043                                 js_soft_stop_ms_cl);
2044                 dev_dbg(kbdev->dev, "Overriding JS_HARD_STOP_TICKS_SS with %lu ticks (%lu ms)\n",
2045                                 (unsigned long)kbdev->js_hard_stop_ticks_ss,
2046                                 js_hard_stop_ms_ss);
2047                 dev_dbg(kbdev->dev, "Overriding JS_HARD_STOP_TICKS_CL with %lu ticks (%lu ms)\n",
2048                                 (unsigned long)kbdev->js_hard_stop_ticks_cl,
2049                                 js_hard_stop_ms_cl);
2050                 dev_dbg(kbdev->dev, "Overriding JS_HARD_STOP_TICKS_DUMPING with %lu ticks (%lu ms)\n",
2051                                 (unsigned long)
2052                                         kbdev->js_hard_stop_ticks_dumping,
2053                                 js_hard_stop_ms_dumping);
2054                 dev_dbg(kbdev->dev, "Overriding JS_RESET_TICKS_SS with %lu ticks (%lu ms)\n",
2055                                 (unsigned long)kbdev->js_reset_ticks_ss,
2056                                 js_reset_ms_ss);
2057                 dev_dbg(kbdev->dev, "Overriding JS_RESET_TICKS_CL with %lu ticks (%lu ms)\n",
2058                                 (unsigned long)kbdev->js_reset_ticks_cl,
2059                                 js_reset_ms_cl);
2060                 dev_dbg(kbdev->dev, "Overriding JS_RESET_TICKS_DUMPING with %lu ticks (%lu ms)\n",
2061                                 (unsigned long)kbdev->js_reset_ticks_dumping,
2062                                 js_reset_ms_dumping);
2063
2064                 return count;
2065         }
2066
2067         dev_err(kbdev->dev, "Couldn't process js_timeouts write operation.\n"
2068                         "Use format <soft_stop_ms> <soft_stop_ms_cl> <hard_stop_ms_ss> <hard_stop_ms_cl> <hard_stop_ms_dumping> <reset_ms_ss> <reset_ms_cl> <reset_ms_dumping>\n"
2069                         "Write 0 for no change, -1 to restore default timeout\n");
2070         return -EINVAL;
2071 }
2072
2073 /** Show callback for the @c js_timeouts sysfs file.
2074  *
2075  * This function is called to get the contents of the @c js_timeouts sysfs
2076  * file. It returns the last set values written to the js_timeouts sysfs file.
2077  * If the file didn't get written yet, the values will be current setting in
2078  * use.
2079  * @param dev   The device this sysfs file is for
2080  * @param attr  The attributes of the sysfs file
2081  * @param buf   The output buffer for the sysfs file contents
2082  *
2083  * @return The number of bytes output to @c buf.
2084  */
2085 static ssize_t show_js_timeouts(struct device *dev, struct device_attribute *attr, char * const buf)
2086 {
2087         struct kbase_device *kbdev;
2088         ssize_t ret;
2089         u64 ms;
2090         unsigned long js_soft_stop_ms;
2091         unsigned long js_soft_stop_ms_cl;
2092         unsigned long js_hard_stop_ms_ss;
2093         unsigned long js_hard_stop_ms_cl;
2094         unsigned long js_hard_stop_ms_dumping;
2095         unsigned long js_reset_ms_ss;
2096         unsigned long js_reset_ms_cl;
2097         unsigned long js_reset_ms_dumping;
2098         unsigned long ticks;
2099         u32 scheduling_period_ns;
2100
2101         kbdev = to_kbase_device(dev);
2102         if (!kbdev)
2103                 return -ENODEV;
2104
2105         /* If no contexts have been scheduled since js_timeouts was last written
2106          * to, the new timeouts might not have been latched yet. So check if an
2107          * update is pending and use the new values if necessary. */
2108         if (kbdev->js_timeouts_updated && kbdev->js_scheduling_period_ns > 0)
2109                 scheduling_period_ns = kbdev->js_scheduling_period_ns;
2110         else
2111                 scheduling_period_ns = kbdev->js_data.scheduling_period_ns;
2112
2113         if (kbdev->js_timeouts_updated && kbdev->js_soft_stop_ticks > 0)
2114                 ticks = kbdev->js_soft_stop_ticks;
2115         else
2116                 ticks = kbdev->js_data.soft_stop_ticks;
2117         ms = (u64)ticks * scheduling_period_ns;
2118         do_div(ms, 1000000UL);
2119         js_soft_stop_ms = (unsigned long)ms;
2120
2121         if (kbdev->js_timeouts_updated && kbdev->js_soft_stop_ticks_cl > 0)
2122                 ticks = kbdev->js_soft_stop_ticks_cl;
2123         else
2124                 ticks = kbdev->js_data.soft_stop_ticks_cl;
2125         ms = (u64)ticks * scheduling_period_ns;
2126         do_div(ms, 1000000UL);
2127         js_soft_stop_ms_cl = (unsigned long)ms;
2128
2129         if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_ss > 0)
2130                 ticks = kbdev->js_hard_stop_ticks_ss;
2131         else
2132                 ticks = kbdev->js_data.hard_stop_ticks_ss;
2133         ms = (u64)ticks * scheduling_period_ns;
2134         do_div(ms, 1000000UL);
2135         js_hard_stop_ms_ss = (unsigned long)ms;
2136
2137         if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_cl > 0)
2138                 ticks = kbdev->js_hard_stop_ticks_cl;
2139         else
2140                 ticks = kbdev->js_data.hard_stop_ticks_cl;
2141         ms = (u64)ticks * scheduling_period_ns;
2142         do_div(ms, 1000000UL);
2143         js_hard_stop_ms_cl = (unsigned long)ms;
2144
2145         if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_dumping > 0)
2146                 ticks = kbdev->js_hard_stop_ticks_dumping;
2147         else
2148                 ticks = kbdev->js_data.hard_stop_ticks_dumping;
2149         ms = (u64)ticks * scheduling_period_ns;
2150         do_div(ms, 1000000UL);
2151         js_hard_stop_ms_dumping = (unsigned long)ms;
2152
2153         if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_ss > 0)
2154                 ticks = kbdev->js_reset_ticks_ss;
2155         else
2156                 ticks = kbdev->js_data.gpu_reset_ticks_ss;
2157         ms = (u64)ticks * scheduling_period_ns;
2158         do_div(ms, 1000000UL);
2159         js_reset_ms_ss = (unsigned long)ms;
2160
2161         if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_cl > 0)
2162                 ticks = kbdev->js_reset_ticks_cl;
2163         else
2164                 ticks = kbdev->js_data.gpu_reset_ticks_cl;
2165         ms = (u64)ticks * scheduling_period_ns;
2166         do_div(ms, 1000000UL);
2167         js_reset_ms_cl = (unsigned long)ms;
2168
2169         if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_dumping > 0)
2170                 ticks = kbdev->js_reset_ticks_dumping;
2171         else
2172                 ticks = kbdev->js_data.gpu_reset_ticks_dumping;
2173         ms = (u64)ticks * scheduling_period_ns;
2174         do_div(ms, 1000000UL);
2175         js_reset_ms_dumping = (unsigned long)ms;
2176
2177         ret = scnprintf(buf, PAGE_SIZE, "%lu %lu %lu %lu %lu %lu %lu %lu\n",
2178                         js_soft_stop_ms, js_soft_stop_ms_cl,
2179                         js_hard_stop_ms_ss, js_hard_stop_ms_cl,
2180                         js_hard_stop_ms_dumping, js_reset_ms_ss,
2181                         js_reset_ms_cl, js_reset_ms_dumping);
2182
2183         if (ret >= PAGE_SIZE) {
2184                 buf[PAGE_SIZE - 2] = '\n';
2185                 buf[PAGE_SIZE - 1] = '\0';
2186                 ret = PAGE_SIZE - 1;
2187         }
2188
2189         return ret;
2190 }
2191
2192 /** The sysfs file @c js_timeouts.
2193  *
2194  * This is used to override the current job scheduler values for
2195  * JS_STOP_STOP_TICKS_SS
2196  * JS_STOP_STOP_TICKS_CL
2197  * JS_HARD_STOP_TICKS_SS
2198  * JS_HARD_STOP_TICKS_CL
2199  * JS_HARD_STOP_TICKS_DUMPING
2200  * JS_RESET_TICKS_SS
2201  * JS_RESET_TICKS_CL
2202  * JS_RESET_TICKS_DUMPING.
2203  */
2204 static DEVICE_ATTR(js_timeouts, S_IRUGO | S_IWUSR, show_js_timeouts, set_js_timeouts);
2205
2206 /**
2207  * set_js_scheduling_period - Store callback for the js_scheduling_period sysfs
2208  *                            file
2209  * @dev:   The device the sysfs file is for
2210  * @attr:  The attributes of the sysfs file
2211  * @buf:   The value written to the sysfs file
2212  * @count: The number of bytes written to the sysfs file
2213  *
2214  * This function is called when the js_scheduling_period sysfs file is written
2215  * to. It checks the data written, and if valid updates the js_scheduling_period
2216  * value
2217  *
2218  * Return: @c count if the function succeeded. An error code on failure.
2219  */
2220 static ssize_t set_js_scheduling_period(struct device *dev,
2221                 struct device_attribute *attr, const char *buf, size_t count)
2222 {
2223         struct kbase_device *kbdev;
2224         int ret;
2225         unsigned int js_scheduling_period;
2226         u32 new_scheduling_period_ns;
2227         u32 old_period;
2228         u64 ticks;
2229
2230         kbdev = to_kbase_device(dev);
2231         if (!kbdev)
2232                 return -ENODEV;
2233
2234         ret = kstrtouint(buf, 0, &js_scheduling_period);
2235         if (ret || !js_scheduling_period) {
2236                 dev_err(kbdev->dev, "Couldn't process js_scheduling_period write operation.\n"
2237                                 "Use format <js_scheduling_period_ms>\n");
2238                 return -EINVAL;
2239         }
2240
2241         new_scheduling_period_ns = js_scheduling_period * 1000000;
2242
2243         /* Update scheduling timeouts */
2244         mutex_lock(&kbdev->js_data.runpool_mutex);
2245
2246         /* If no contexts have been scheduled since js_timeouts was last written
2247          * to, the new timeouts might not have been latched yet. So check if an
2248          * update is pending and use the new values if necessary. */
2249
2250         /* Use previous 'new' scheduling period as a base if present. */
2251         if (kbdev->js_timeouts_updated && kbdev->js_scheduling_period_ns)
2252                 old_period = kbdev->js_scheduling_period_ns;
2253         else
2254                 old_period = kbdev->js_data.scheduling_period_ns;
2255
2256         if (kbdev->js_timeouts_updated && kbdev->js_soft_stop_ticks > 0)
2257                 ticks = (u64)kbdev->js_soft_stop_ticks * old_period;
2258         else
2259                 ticks = (u64)kbdev->js_data.soft_stop_ticks *
2260                                 kbdev->js_data.scheduling_period_ns;
2261         do_div(ticks, new_scheduling_period_ns);
2262         kbdev->js_soft_stop_ticks = ticks ? ticks : 1;
2263
2264         if (kbdev->js_timeouts_updated && kbdev->js_soft_stop_ticks_cl > 0)
2265                 ticks = (u64)kbdev->js_soft_stop_ticks_cl * old_period;
2266         else
2267                 ticks = (u64)kbdev->js_data.soft_stop_ticks_cl *
2268                                 kbdev->js_data.scheduling_period_ns;
2269         do_div(ticks, new_scheduling_period_ns);
2270         kbdev->js_soft_stop_ticks_cl = ticks ? ticks : 1;
2271
2272         if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_ss > 0)
2273                 ticks = (u64)kbdev->js_hard_stop_ticks_ss * old_period;
2274         else
2275                 ticks = (u64)kbdev->js_data.hard_stop_ticks_ss *
2276                                 kbdev->js_data.scheduling_period_ns;
2277         do_div(ticks, new_scheduling_period_ns);
2278         kbdev->js_hard_stop_ticks_ss = ticks ? ticks : 1;
2279
2280         if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_cl > 0)
2281                 ticks = (u64)kbdev->js_hard_stop_ticks_cl * old_period;
2282         else
2283                 ticks = (u64)kbdev->js_data.hard_stop_ticks_cl *
2284                                 kbdev->js_data.scheduling_period_ns;
2285         do_div(ticks, new_scheduling_period_ns);
2286         kbdev->js_hard_stop_ticks_cl = ticks ? ticks : 1;
2287
2288         if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_dumping > 0)
2289                 ticks = (u64)kbdev->js_hard_stop_ticks_dumping * old_period;
2290         else
2291                 ticks = (u64)kbdev->js_data.hard_stop_ticks_dumping *
2292                                 kbdev->js_data.scheduling_period_ns;
2293         do_div(ticks, new_scheduling_period_ns);
2294         kbdev->js_hard_stop_ticks_dumping = ticks ? ticks : 1;
2295
2296         if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_ss > 0)
2297                 ticks = (u64)kbdev->js_reset_ticks_ss * old_period;
2298         else
2299                 ticks = (u64)kbdev->js_data.gpu_reset_ticks_ss *
2300                                 kbdev->js_data.scheduling_period_ns;
2301         do_div(ticks, new_scheduling_period_ns);
2302         kbdev->js_reset_ticks_ss = ticks ? ticks : 1;
2303
2304         if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_cl > 0)
2305                 ticks = (u64)kbdev->js_reset_ticks_cl * old_period;
2306         else
2307                 ticks = (u64)kbdev->js_data.gpu_reset_ticks_cl *
2308                                 kbdev->js_data.scheduling_period_ns;
2309         do_div(ticks, new_scheduling_period_ns);
2310         kbdev->js_reset_ticks_cl = ticks ? ticks : 1;
2311
2312         if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_dumping > 0)
2313                 ticks = (u64)kbdev->js_reset_ticks_dumping * old_period;
2314         else
2315                 ticks = (u64)kbdev->js_data.gpu_reset_ticks_dumping *
2316                                 kbdev->js_data.scheduling_period_ns;
2317         do_div(ticks, new_scheduling_period_ns);
2318         kbdev->js_reset_ticks_dumping = ticks ? ticks : 1;
2319
2320         kbdev->js_scheduling_period_ns = new_scheduling_period_ns;
2321         kbdev->js_timeouts_updated = true;
2322
2323         mutex_unlock(&kbdev->js_data.runpool_mutex);
2324
2325         dev_dbg(kbdev->dev, "JS scheduling period: %dms\n",
2326                         js_scheduling_period);
2327
2328         return count;
2329 }
2330
2331 /**
2332  * show_js_scheduling_period - Show callback for the js_scheduling_period sysfs
2333  *                             entry.
2334  * @dev:  The device this sysfs file is for.
2335  * @attr: The attributes of the sysfs file.
2336  * @buf:  The output buffer to receive the GPU information.
2337  *
2338  * This function is called to get the current period used for the JS scheduling
2339  * period.
2340  *
2341  * Return: The number of bytes output to buf.
2342  */
2343 static ssize_t show_js_scheduling_period(struct device *dev,
2344                 struct device_attribute *attr, char * const buf)
2345 {
2346         struct kbase_device *kbdev;
2347         u32 period;
2348         ssize_t ret;
2349
2350         kbdev = to_kbase_device(dev);
2351         if (!kbdev)
2352                 return -ENODEV;
2353
2354         if (kbdev->js_timeouts_updated && kbdev->js_scheduling_period_ns > 0)
2355                 period = kbdev->js_scheduling_period_ns;
2356         else
2357                 period = kbdev->js_data.scheduling_period_ns;
2358
2359         ret = scnprintf(buf, PAGE_SIZE, "%d\n",
2360                         period / 1000000);
2361
2362         return ret;
2363 }
2364
2365 static DEVICE_ATTR(js_scheduling_period, S_IRUGO | S_IWUSR,
2366                 show_js_scheduling_period, set_js_scheduling_period);
2367
2368 #if !MALI_CUSTOMER_RELEASE
2369 /** Store callback for the @c force_replay sysfs file.
2370  *
2371  * @param dev   The device with sysfs file is for
2372  * @param attr  The attributes of the sysfs file
2373  * @param buf   The value written to the sysfs file
2374  * @param count The number of bytes written to the sysfs file
2375  *
2376  * @return @c count if the function succeeded. An error code on failure.
2377  */
2378 static ssize_t set_force_replay(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2379 {
2380         struct kbase_device *kbdev;
2381
2382         kbdev = to_kbase_device(dev);
2383         if (!kbdev)
2384                 return -ENODEV;
2385
2386         if (!strncmp("limit=", buf, MIN(6, count))) {
2387                 int force_replay_limit;
2388                 int items = sscanf(buf, "limit=%u", &force_replay_limit);
2389
2390                 if (items == 1) {
2391                         kbdev->force_replay_random = false;
2392                         kbdev->force_replay_limit = force_replay_limit;
2393                         kbdev->force_replay_count = 0;
2394
2395                         return count;
2396                 }
2397         } else if (!strncmp("random_limit", buf, MIN(12, count))) {
2398                 kbdev->force_replay_random = true;
2399                 kbdev->force_replay_count = 0;
2400
2401                 return count;
2402         } else if (!strncmp("norandom_limit", buf, MIN(14, count))) {
2403                 kbdev->force_replay_random = false;
2404                 kbdev->force_replay_limit = KBASEP_FORCE_REPLAY_DISABLED;
2405                 kbdev->force_replay_count = 0;
2406
2407                 return count;
2408         } else if (!strncmp("core_req=", buf, MIN(9, count))) {
2409                 unsigned int core_req;
2410                 int items = sscanf(buf, "core_req=%x", &core_req);
2411
2412                 if (items == 1) {
2413                         kbdev->force_replay_core_req = (base_jd_core_req)core_req;
2414
2415                         return count;
2416                 }
2417         }
2418         dev_err(kbdev->dev, "Couldn't process force_replay write operation.\nPossible settings: limit=<limit>, random_limit, norandom_limit, core_req=<core_req>\n");
2419         return -EINVAL;
2420 }
2421
2422 /** Show callback for the @c force_replay sysfs file.
2423  *
2424  * This function is called to get the contents of the @c force_replay sysfs
2425  * file. It returns the last set value written to the force_replay sysfs file.
2426  * If the file didn't get written yet, the values will be 0.
2427  *
2428  * @param dev   The device this sysfs file is for
2429  * @param attr  The attributes of the sysfs file
2430  * @param buf   The output buffer for the sysfs file contents
2431  *
2432  * @return The number of bytes output to @c buf.
2433  */
2434 static ssize_t show_force_replay(struct device *dev,
2435                 struct device_attribute *attr, char * const buf)
2436 {
2437         struct kbase_device *kbdev;
2438         ssize_t ret;
2439
2440         kbdev = to_kbase_device(dev);
2441         if (!kbdev)
2442                 return -ENODEV;
2443
2444         if (kbdev->force_replay_random)
2445                 ret = scnprintf(buf, PAGE_SIZE,
2446                                 "limit=0\nrandom_limit\ncore_req=%x\n",
2447                                 kbdev->force_replay_core_req);
2448         else
2449                 ret = scnprintf(buf, PAGE_SIZE,
2450                                 "limit=%u\nnorandom_limit\ncore_req=%x\n",
2451                                 kbdev->force_replay_limit,
2452                                 kbdev->force_replay_core_req);
2453
2454         if (ret >= PAGE_SIZE) {
2455                 buf[PAGE_SIZE - 2] = '\n';
2456                 buf[PAGE_SIZE - 1] = '\0';
2457                 ret = PAGE_SIZE - 1;
2458         }
2459
2460         return ret;
2461 }
2462
2463 /** The sysfs file @c force_replay.
2464  *
2465  */
2466 static DEVICE_ATTR(force_replay, S_IRUGO | S_IWUSR, show_force_replay,
2467                 set_force_replay);
2468 #endif /* !MALI_CUSTOMER_RELEASE */
2469
2470 #ifdef CONFIG_MALI_DEBUG
2471 static ssize_t set_js_softstop_always(struct device *dev,
2472                 struct device_attribute *attr, const char *buf, size_t count)
2473 {
2474         struct kbase_device *kbdev;
2475         int ret;
2476         int softstop_always;
2477
2478         kbdev = to_kbase_device(dev);
2479         if (!kbdev)
2480                 return -ENODEV;
2481
2482         ret = kstrtoint(buf, 0, &softstop_always);
2483         if (ret || ((softstop_always != 0) && (softstop_always != 1))) {
2484                 dev_err(kbdev->dev, "Couldn't process js_softstop_always write operation.\n"
2485                                 "Use format <soft_stop_always>\n");
2486                 return -EINVAL;
2487         }
2488
2489         kbdev->js_data.softstop_always = (bool) softstop_always;
2490         dev_dbg(kbdev->dev, "Support for softstop on a single context: %s\n",
2491                         (kbdev->js_data.softstop_always) ?
2492                         "Enabled" : "Disabled");
2493         return count;
2494 }
2495
2496 static ssize_t show_js_softstop_always(struct device *dev,
2497                 struct device_attribute *attr, char * const buf)
2498 {
2499         struct kbase_device *kbdev;
2500         ssize_t ret;
2501
2502         kbdev = to_kbase_device(dev);
2503         if (!kbdev)
2504                 return -ENODEV;
2505
2506         ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->js_data.softstop_always);
2507
2508         if (ret >= PAGE_SIZE) {
2509                 buf[PAGE_SIZE - 2] = '\n';
2510                 buf[PAGE_SIZE - 1] = '\0';
2511                 ret = PAGE_SIZE - 1;
2512         }
2513
2514         return ret;
2515 }
2516
2517 /*
2518  * By default, soft-stops are disabled when only a single context is present. The ability to
2519  * enable soft-stop when only a single context is present can be used for debug and unit-testing purposes.
2520  * (see CL t6xx_stress_1 unit-test as an example whereby this feature is used.)
2521  */
2522 static DEVICE_ATTR(js_softstop_always, S_IRUGO | S_IWUSR, show_js_softstop_always, set_js_softstop_always);
2523 #endif /* CONFIG_MALI_DEBUG */
2524
2525 #ifdef CONFIG_MALI_DEBUG
2526 typedef void (kbasep_debug_command_func) (struct kbase_device *);
2527
2528 enum kbasep_debug_command_code {
2529         KBASEP_DEBUG_COMMAND_DUMPTRACE,
2530
2531         /* This must be the last enum */
2532         KBASEP_DEBUG_COMMAND_COUNT
2533 };
2534
2535 struct kbasep_debug_command {
2536         char *str;
2537         kbasep_debug_command_func *func;
2538 };
2539
2540 /** Debug commands supported by the driver */
2541 static const struct kbasep_debug_command debug_commands[] = {
2542         {
2543          .str = "dumptrace",
2544          .func = &kbasep_trace_dump,
2545          }
2546 };
2547
2548 /** Show callback for the @c debug_command sysfs file.
2549  *
2550  * This function is called to get the contents of the @c debug_command sysfs
2551  * file. This is a list of the available debug commands, separated by newlines.
2552  *
2553  * @param dev   The device this sysfs file is for
2554  * @param attr  The attributes of the sysfs file
2555  * @param buf   The output buffer for the sysfs file contents
2556  *
2557  * @return The number of bytes output to @c buf.
2558  */
2559 static ssize_t show_debug(struct device *dev, struct device_attribute *attr, char * const buf)
2560 {
2561         struct kbase_device *kbdev;
2562         int i;
2563         ssize_t ret = 0;
2564
2565         kbdev = to_kbase_device(dev);
2566
2567         if (!kbdev)
2568                 return -ENODEV;
2569
2570         for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT && ret < PAGE_SIZE; i++)
2571                 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s\n", debug_commands[i].str);
2572
2573         if (ret >= PAGE_SIZE) {
2574                 buf[PAGE_SIZE - 2] = '\n';
2575                 buf[PAGE_SIZE - 1] = '\0';
2576                 ret = PAGE_SIZE - 1;
2577         }
2578
2579         return ret;
2580 }
2581
2582 /** Store callback for the @c debug_command sysfs file.
2583  *
2584  * This function is called when the @c debug_command sysfs file is written to.
2585  * It matches the requested command against the available commands, and if
2586  * a matching command is found calls the associated function from
2587  * @ref debug_commands to issue the command.
2588  *
2589  * @param dev   The device with sysfs file is for
2590  * @param attr  The attributes of the sysfs file
2591  * @param buf   The value written to the sysfs file
2592  * @param count The number of bytes written to the sysfs file
2593  *
2594  * @return @c count if the function succeeded. An error code on failure.
2595  */
2596 static ssize_t issue_debug(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2597 {
2598         struct kbase_device *kbdev;
2599         int i;
2600
2601         kbdev = to_kbase_device(dev);
2602
2603         if (!kbdev)
2604                 return -ENODEV;
2605
2606         for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT; i++) {
2607                 if (sysfs_streq(debug_commands[i].str, buf)) {
2608                         debug_commands[i].func(kbdev);
2609                         return count;
2610                 }
2611         }
2612
2613         /* Debug Command not found */
2614         dev_err(dev, "debug_command: command not known\n");
2615         return -EINVAL;
2616 }
2617
2618 /** The sysfs file @c debug_command.
2619  *
2620  * This is used to issue general debug commands to the device driver.
2621  * Reading it will produce a list of debug commands, separated by newlines.
2622  * Writing to it with one of those commands will issue said command.
2623  */
2624 static DEVICE_ATTR(debug_command, S_IRUGO | S_IWUSR, show_debug, issue_debug);
2625 #endif /* CONFIG_MALI_DEBUG */
2626
2627 /**
2628  * kbase_show_gpuinfo - Show callback for the gpuinfo sysfs entry.
2629  * @dev: The device this sysfs file is for.
2630  * @attr: The attributes of the sysfs file.
2631  * @buf: The output buffer to receive the GPU information.
2632  *
2633  * This function is called to get a description of the present Mali
2634  * GPU via the gpuinfo sysfs entry.  This includes the GPU family, the
2635  * number of cores, the hardware version and the raw product id.  For
2636  * example:
2637  *
2638  *    Mali-T60x MP4 r0p0 0x6956
2639  *
2640  * Return: The number of bytes output to buf.
2641  */
2642 static ssize_t kbase_show_gpuinfo(struct device *dev,
2643                                   struct device_attribute *attr, char *buf)
2644 {
2645         static const struct gpu_product_id_name {
2646                 unsigned id;
2647                 char *name;
2648         } gpu_product_id_names[] = {
2649                 { .id = GPU_ID_PI_T60X, .name = "Mali-T60x" },
2650                 { .id = GPU_ID_PI_T62X, .name = "Mali-T62x" },
2651                 { .id = GPU_ID_PI_T72X, .name = "Mali-T72x" },
2652                 { .id = GPU_ID_PI_T76X, .name = "Mali-T76x" },
2653                 { .id = GPU_ID_PI_T82X, .name = "Mali-T82x" },
2654                 { .id = GPU_ID_PI_T83X, .name = "Mali-T83x" },
2655                 { .id = GPU_ID_PI_T86X, .name = "Mali-T86x" },
2656                 { .id = GPU_ID_PI_TFRX, .name = "Mali-T88x" },
2657         };
2658         const char *product_name = "(Unknown Mali GPU)";
2659         struct kbase_device *kbdev;
2660         u32 gpu_id;
2661         unsigned product_id, product_id_mask;
2662         unsigned i;
2663         bool is_new_format;
2664
2665         kbdev = to_kbase_device(dev);
2666         if (!kbdev)
2667                 return -ENODEV;
2668
2669         gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
2670         product_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
2671         is_new_format = GPU_ID_IS_NEW_FORMAT(product_id);
2672         product_id_mask =
2673                 (is_new_format ?
2674                         GPU_ID2_PRODUCT_MODEL :
2675                         GPU_ID_VERSION_PRODUCT_ID) >>
2676                 GPU_ID_VERSION_PRODUCT_ID_SHIFT;
2677
2678         for (i = 0; i < ARRAY_SIZE(gpu_product_id_names); ++i) {
2679                 const struct gpu_product_id_name *p = &gpu_product_id_names[i];
2680
2681                 if ((GPU_ID_IS_NEW_FORMAT(p->id) == is_new_format) &&
2682                     (p->id & product_id_mask) ==
2683                     (product_id & product_id_mask)) {
2684                         product_name = p->name;
2685                         break;
2686                 }
2687         }
2688
2689         return scnprintf(buf, PAGE_SIZE, "%s MP%d r%dp%d 0x%04X\n",
2690                 product_name, kbdev->gpu_props.num_cores,
2691                 (gpu_id & GPU_ID_VERSION_MAJOR) >> GPU_ID_VERSION_MAJOR_SHIFT,
2692                 (gpu_id & GPU_ID_VERSION_MINOR) >> GPU_ID_VERSION_MINOR_SHIFT,
2693                 product_id);
2694 }
2695 static DEVICE_ATTR(gpuinfo, S_IRUGO, kbase_show_gpuinfo, NULL);
2696
2697 /**
2698  * set_dvfs_period - Store callback for the dvfs_period sysfs file.
2699  * @dev:   The device with sysfs file is for
2700  * @attr:  The attributes of the sysfs file
2701  * @buf:   The value written to the sysfs file
2702  * @count: The number of bytes written to the sysfs file
2703  *
2704  * This function is called when the dvfs_period sysfs file is written to. It
2705  * checks the data written, and if valid updates the DVFS period variable,
2706  *
2707  * Return: @c count if the function succeeded. An error code on failure.
2708  */
2709 static ssize_t set_dvfs_period(struct device *dev,
2710                 struct device_attribute *attr, const char *buf, size_t count)
2711 {
2712         struct kbase_device *kbdev;
2713         int ret;
2714         int dvfs_period;
2715
2716         kbdev = to_kbase_device(dev);
2717         if (!kbdev)
2718                 return -ENODEV;
2719
2720         ret = kstrtoint(buf, 0, &dvfs_period);
2721         if (ret || dvfs_period <= 0) {
2722                 dev_err(kbdev->dev, "Couldn't process dvfs_period write operation.\n"
2723                                 "Use format <dvfs_period_ms>\n");
2724                 return -EINVAL;
2725         }
2726
2727         kbdev->pm.dvfs_period = dvfs_period;
2728         dev_dbg(kbdev->dev, "DVFS period: %dms\n", dvfs_period);
2729
2730         return count;
2731 }
2732
2733 /**
2734  * show_dvfs_period - Show callback for the dvfs_period sysfs entry.
2735  * @dev:  The device this sysfs file is for.
2736  * @attr: The attributes of the sysfs file.
2737  * @buf:  The output buffer to receive the GPU information.
2738  *
2739  * This function is called to get the current period used for the DVFS sample
2740  * timer.
2741  *
2742  * Return: The number of bytes output to buf.
2743  */
2744 static ssize_t show_dvfs_period(struct device *dev,
2745                 struct device_attribute *attr, char * const buf)
2746 {
2747         struct kbase_device *kbdev;
2748         ssize_t ret;
2749
2750         kbdev = to_kbase_device(dev);
2751         if (!kbdev)
2752                 return -ENODEV;
2753
2754         ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->pm.dvfs_period);
2755
2756         return ret;
2757 }
2758
2759 static DEVICE_ATTR(dvfs_period, S_IRUGO | S_IWUSR, show_dvfs_period,
2760                 set_dvfs_period);
2761
2762 /**
2763  * set_pm_poweroff - Store callback for the pm_poweroff sysfs file.
2764  * @dev:   The device with sysfs file is for
2765  * @attr:  The attributes of the sysfs file
2766  * @buf:   The value written to the sysfs file
2767  * @count: The number of bytes written to the sysfs file
2768  *
2769  * This function is called when the pm_poweroff sysfs file is written to.
2770  *
2771  * This file contains three values separated by whitespace. The values
2772  * are gpu_poweroff_time (the period of the poweroff timer, in ns),
2773  * poweroff_shader_ticks (the number of poweroff timer ticks before an idle
2774  * shader is powered off), and poweroff_gpu_ticks (the number of poweroff timer
2775  * ticks before the GPU is powered off), in that order.
2776  *
2777  * Return: @c count if the function succeeded. An error code on failure.
2778  */
2779 static ssize_t set_pm_poweroff(struct device *dev,
2780                 struct device_attribute *attr, const char *buf, size_t count)
2781 {
2782         struct kbase_device *kbdev;
2783         int items;
2784         s64 gpu_poweroff_time;
2785         int poweroff_shader_ticks, poweroff_gpu_ticks;
2786
2787         kbdev = to_kbase_device(dev);
2788         if (!kbdev)
2789                 return -ENODEV;
2790
2791         items = sscanf(buf, "%llu %u %u", &gpu_poweroff_time,
2792                         &poweroff_shader_ticks,
2793                         &poweroff_gpu_ticks);
2794         if (items != 3) {
2795                 dev_err(kbdev->dev, "Couldn't process pm_poweroff write operation.\n"
2796                                 "Use format <gpu_poweroff_time_ns> <poweroff_shader_ticks> <poweroff_gpu_ticks>\n");
2797                 return -EINVAL;
2798         }
2799
2800         kbdev->pm.gpu_poweroff_time = HR_TIMER_DELAY_NSEC(gpu_poweroff_time);
2801         kbdev->pm.poweroff_shader_ticks = poweroff_shader_ticks;
2802         kbdev->pm.poweroff_gpu_ticks = poweroff_gpu_ticks;
2803
2804         return count;
2805 }
2806
2807 /**
2808  * show_pm_poweroff - Show callback for the pm_poweroff sysfs entry.
2809  * @dev:  The device this sysfs file is for.
2810  * @attr: The attributes of the sysfs file.
2811  * @buf:  The output buffer to receive the GPU information.
2812  *
2813  * This function is called to get the current period used for the DVFS sample
2814  * timer.
2815  *
2816  * Return: The number of bytes output to buf.
2817  */
2818 static ssize_t show_pm_poweroff(struct device *dev,
2819                 struct device_attribute *attr, char * const buf)
2820 {
2821         struct kbase_device *kbdev;
2822         ssize_t ret;
2823
2824         kbdev = to_kbase_device(dev);
2825         if (!kbdev)
2826                 return -ENODEV;
2827
2828         ret = scnprintf(buf, PAGE_SIZE, "%llu %u %u\n",
2829                         ktime_to_ns(kbdev->pm.gpu_poweroff_time),
2830                         kbdev->pm.poweroff_shader_ticks,
2831                         kbdev->pm.poweroff_gpu_ticks);
2832
2833         return ret;
2834 }
2835
2836 static DEVICE_ATTR(pm_poweroff, S_IRUGO | S_IWUSR, show_pm_poweroff,
2837                 set_pm_poweroff);
2838
2839 /**
2840  * set_reset_timeout - Store callback for the reset_timeout sysfs file.
2841  * @dev:   The device with sysfs file is for
2842  * @attr:  The attributes of the sysfs file
2843  * @buf:   The value written to the sysfs file
2844  * @count: The number of bytes written to the sysfs file
2845  *
2846  * This function is called when the reset_timeout sysfs file is written to. It
2847  * checks the data written, and if valid updates the reset timeout.
2848  *
2849  * Return: @c count if the function succeeded. An error code on failure.
2850  */
2851 static ssize_t set_reset_timeout(struct device *dev,
2852                 struct device_attribute *attr, const char *buf, size_t count)
2853 {
2854         struct kbase_device *kbdev;
2855         int ret;
2856         int reset_timeout;
2857
2858         kbdev = to_kbase_device(dev);
2859         if (!kbdev)
2860                 return -ENODEV;
2861
2862         ret = kstrtoint(buf, 0, &reset_timeout);
2863         if (ret || reset_timeout <= 0) {
2864                 dev_err(kbdev->dev, "Couldn't process reset_timeout write operation.\n"
2865                                 "Use format <reset_timeout_ms>\n");
2866                 return -EINVAL;
2867         }
2868
2869         kbdev->reset_timeout_ms = reset_timeout;
2870         dev_dbg(kbdev->dev, "Reset timeout: %dms\n", reset_timeout);
2871
2872         return count;
2873 }
2874
2875 /**
2876  * show_reset_timeout - Show callback for the reset_timeout sysfs entry.
2877  * @dev:  The device this sysfs file is for.
2878  * @attr: The attributes of the sysfs file.
2879  * @buf:  The output buffer to receive the GPU information.
2880  *
2881  * This function is called to get the current reset timeout.
2882  *
2883  * Return: The number of bytes output to buf.
2884  */
2885 static ssize_t show_reset_timeout(struct device *dev,
2886                 struct device_attribute *attr, char * const buf)
2887 {
2888         struct kbase_device *kbdev;
2889         ssize_t ret;
2890
2891         kbdev = to_kbase_device(dev);
2892         if (!kbdev)
2893                 return -ENODEV;
2894
2895         ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->reset_timeout_ms);
2896
2897         return ret;
2898 }
2899
2900 static DEVICE_ATTR(reset_timeout, S_IRUGO | S_IWUSR, show_reset_timeout,
2901                 set_reset_timeout);
2902
2903
2904
2905 static ssize_t show_mem_pool_size(struct device *dev,
2906                 struct device_attribute *attr, char * const buf)
2907 {
2908         struct kbase_device *kbdev;
2909         ssize_t ret;
2910
2911         kbdev = to_kbase_device(dev);
2912         if (!kbdev)
2913                 return -ENODEV;
2914
2915         ret = scnprintf(buf, PAGE_SIZE, "%zu\n",
2916                         kbase_mem_pool_size(&kbdev->mem_pool));
2917
2918         return ret;
2919 }
2920
2921 static ssize_t set_mem_pool_size(struct device *dev,
2922                 struct device_attribute *attr, const char *buf, size_t count)
2923 {
2924         struct kbase_device *kbdev;
2925         size_t new_size;
2926         int err;
2927
2928         kbdev = to_kbase_device(dev);
2929         if (!kbdev)
2930                 return -ENODEV;
2931
2932         err = kstrtoul(buf, 0, (unsigned long *)&new_size);
2933         if (err)
2934                 return err;
2935
2936         kbase_mem_pool_trim(&kbdev->mem_pool, new_size);
2937
2938         return count;
2939 }
2940
2941 static DEVICE_ATTR(mem_pool_size, S_IRUGO | S_IWUSR, show_mem_pool_size,
2942                 set_mem_pool_size);
2943
2944 static ssize_t show_mem_pool_max_size(struct device *dev,
2945                 struct device_attribute *attr, char * const buf)
2946 {
2947         struct kbase_device *kbdev;
2948         ssize_t ret;
2949
2950         kbdev = to_kbase_device(dev);
2951         if (!kbdev)
2952                 return -ENODEV;
2953
2954         ret = scnprintf(buf, PAGE_SIZE, "%zu\n",
2955                         kbase_mem_pool_max_size(&kbdev->mem_pool));
2956
2957         return ret;
2958 }
2959
2960 static ssize_t set_mem_pool_max_size(struct device *dev,
2961                 struct device_attribute *attr, const char *buf, size_t count)
2962 {
2963         struct kbase_device *kbdev;
2964         size_t new_max_size;
2965         int err;
2966
2967         kbdev = to_kbase_device(dev);
2968         if (!kbdev)
2969                 return -ENODEV;
2970
2971         err = kstrtoul(buf, 0, (unsigned long *)&new_max_size);
2972         if (err)
2973                 return -EINVAL;
2974
2975         kbase_mem_pool_set_max_size(&kbdev->mem_pool, new_max_size);
2976
2977         return count;
2978 }
2979
2980 static DEVICE_ATTR(mem_pool_max_size, S_IRUGO | S_IWUSR, show_mem_pool_max_size,
2981                 set_mem_pool_max_size);
2982
2983
2984
2985 static int kbasep_secure_mode_init(struct kbase_device *kbdev)
2986 {
2987
2988 #ifdef SECURE_CALLBACKS
2989         kbdev->secure_ops = SECURE_CALLBACKS;
2990         kbdev->secure_mode_support = false;
2991
2992         if (kbdev->secure_ops) {
2993                 int err;
2994
2995                 /* Make sure secure mode is disabled on startup */
2996                 err = kbdev->secure_ops->secure_mode_disable(kbdev);
2997
2998                 /* secure_mode_disable() returns -EINVAL if not supported */
2999                 kbdev->secure_mode_support = (err != -EINVAL);
3000         }
3001 #endif
3002
3003         return 0;
3004 }
3005
3006 #ifdef CONFIG_MALI_NO_MALI
3007 static int kbase_common_reg_map(struct kbase_device *kbdev)
3008 {
3009         return 0;
3010 }
3011 static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
3012 {
3013 }
3014 #else /* CONFIG_MALI_NO_MALI */
3015 static int kbase_common_reg_map(struct kbase_device *kbdev)
3016 {
3017         int err = -ENOMEM;
3018
3019         if (!request_mem_region(kbdev->reg_start, kbdev->reg_size, dev_name(kbdev->dev))) {
3020                 dev_err(kbdev->dev, "Register window unavailable\n");
3021                 err = -EIO;
3022                 goto out_region;
3023         }
3024
3025         kbdev->reg = ioremap(kbdev->reg_start, kbdev->reg_size);
3026         if (!kbdev->reg) {
3027                 dev_err(kbdev->dev, "Can't remap register window\n");
3028                 err = -EINVAL;
3029                 goto out_ioremap;
3030         }
3031
3032         return 0;
3033
3034  out_ioremap:
3035         release_mem_region(kbdev->reg_start, kbdev->reg_size);
3036  out_region:
3037         return err;
3038 }
3039
3040 static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
3041 {
3042         iounmap(kbdev->reg);
3043         release_mem_region(kbdev->reg_start, kbdev->reg_size);
3044 }
3045 #endif /* CONFIG_MALI_NO_MALI */
3046
3047
3048 #ifdef CONFIG_DEBUG_FS
3049
3050 #if KBASE_GPU_RESET_EN
3051 #include <mali_kbase_hwaccess_jm.h>
3052
3053 static void trigger_quirks_reload(struct kbase_device *kbdev)
3054 {
3055         kbase_pm_context_active(kbdev);
3056         if (kbase_prepare_to_reset_gpu(kbdev))
3057                 kbase_reset_gpu(kbdev);
3058         kbase_pm_context_idle(kbdev);
3059 }
3060
3061 #define MAKE_QUIRK_ACCESSORS(type) \
3062 static int type##_quirks_set(void *data, u64 val) \
3063 { \
3064         struct kbase_device *kbdev; \
3065         kbdev = (struct kbase_device *)data; \
3066         kbdev->hw_quirks_##type = (u32)val; \
3067         trigger_quirks_reload(kbdev); \
3068         return 0;\
3069 } \
3070 \
3071 static int type##_quirks_get(void *data, u64 *val) \
3072 { \
3073         struct kbase_device *kbdev;\
3074         kbdev = (struct kbase_device *)data;\
3075         *val = kbdev->hw_quirks_##type;\
3076         return 0;\
3077 } \
3078 DEFINE_SIMPLE_ATTRIBUTE(fops_##type##_quirks, type##_quirks_get,\
3079                 type##_quirks_set, "%llu\n")
3080
3081 MAKE_QUIRK_ACCESSORS(sc);
3082 MAKE_QUIRK_ACCESSORS(tiler);
3083 MAKE_QUIRK_ACCESSORS(mmu);
3084
3085 #endif /* KBASE_GPU_RESET_EN */
3086
3087 static int kbasep_secure_mode_seq_show(struct seq_file *m, void *p)
3088 {
3089         struct kbase_device *kbdev = m->private;
3090
3091         if (!kbdev->secure_mode_support)
3092                 seq_puts(m, "unsupported\n");
3093         else
3094                 seq_printf(m, "%s\n", kbdev->secure_mode ? "Y" : "N");
3095
3096         return 0;
3097 }
3098
3099 static int kbasep_secure_mode_debugfs_open(struct inode *in, struct file *file)
3100 {
3101         return single_open(file, kbasep_secure_mode_seq_show, in->i_private);
3102 }
3103
3104 static const struct file_operations kbasep_secure_mode_debugfs_fops = {
3105         .open = kbasep_secure_mode_debugfs_open,
3106         .read = seq_read,
3107         .llseek = seq_lseek,
3108         .release = single_release,
3109 };
3110
3111 static int kbase_device_debugfs_init(struct kbase_device *kbdev)
3112 {
3113         struct dentry *debugfs_ctx_defaults_directory;
3114         int err;
3115
3116         kbdev->mali_debugfs_directory = debugfs_create_dir(kbdev->devname,
3117                         NULL);
3118         if (!kbdev->mali_debugfs_directory) {
3119                 dev_err(kbdev->dev, "Couldn't create mali debugfs directory\n");
3120                 err = -ENOMEM;
3121                 goto out;
3122         }
3123
3124         kbdev->debugfs_ctx_directory = debugfs_create_dir("ctx",
3125                         kbdev->mali_debugfs_directory);
3126         if (!kbdev->debugfs_ctx_directory) {
3127                 dev_err(kbdev->dev, "Couldn't create mali debugfs ctx directory\n");
3128                 err = -ENOMEM;
3129                 goto out;
3130         }
3131
3132         debugfs_ctx_defaults_directory = debugfs_create_dir("defaults",
3133                         kbdev->debugfs_ctx_directory);
3134         if (!debugfs_ctx_defaults_directory) {
3135                 dev_err(kbdev->dev, "Couldn't create mali debugfs ctx defaults directory\n");
3136                 err = -ENOMEM;
3137                 goto out;
3138         }
3139
3140 #if !MALI_CUSTOMER_RELEASE
3141         kbasep_regs_dump_debugfs_add(kbdev);
3142 #endif /* !MALI_CUSTOMER_RELEASE */
3143
3144         kbase_debug_job_fault_debugfs_init(kbdev);
3145         kbasep_gpu_memory_debugfs_init(kbdev);
3146 #if KBASE_GPU_RESET_EN
3147         debugfs_create_file("quirks_sc", 0644,
3148                         kbdev->mali_debugfs_directory, kbdev,
3149                         &fops_sc_quirks);
3150         debugfs_create_file("quirks_tiler", 0644,
3151                         kbdev->mali_debugfs_directory, kbdev,
3152                         &fops_tiler_quirks);
3153         debugfs_create_file("quirks_mmu", 0644,
3154                         kbdev->mali_debugfs_directory, kbdev,
3155                         &fops_mmu_quirks);
3156 #endif /* KBASE_GPU_RESET_EN */
3157
3158 #ifndef CONFIG_MALI_COH_USER
3159         debugfs_create_bool("infinite_cache", 0644,
3160                         debugfs_ctx_defaults_directory,
3161                         &kbdev->infinite_cache_active_default);
3162 #endif /* CONFIG_MALI_COH_USER */
3163
3164         debugfs_create_size_t("mem_pool_max_size", 0644,
3165                         debugfs_ctx_defaults_directory,
3166                         &kbdev->mem_pool_max_size_default);
3167
3168 #if KBASE_TRACE_ENABLE
3169         kbasep_trace_debugfs_init(kbdev);
3170 #endif /* KBASE_TRACE_ENABLE */
3171
3172 #ifdef CONFIG_MALI_TRACE_TIMELINE
3173         kbasep_trace_timeline_debugfs_init(kbdev);
3174 #endif /* CONFIG_MALI_TRACE_TIMELINE */
3175
3176         debugfs_create_file("secure_mode", S_IRUGO,
3177                         kbdev->mali_debugfs_directory, kbdev,
3178                         &kbasep_secure_mode_debugfs_fops);
3179
3180         return 0;
3181
3182 out:
3183         debugfs_remove_recursive(kbdev->mali_debugfs_directory);
3184         return err;
3185 }
3186
3187 static void kbase_device_debugfs_term(struct kbase_device *kbdev)
3188 {
3189         debugfs_remove_recursive(kbdev->mali_debugfs_directory);
3190 }
3191
3192 #else /* CONFIG_DEBUG_FS */
3193 static inline int kbase_device_debugfs_init(struct kbase_device *kbdev)
3194 {
3195         return 0;
3196 }
3197
3198 static inline void kbase_device_debugfs_term(struct kbase_device *kbdev) { }
3199 #endif /* CONFIG_DEBUG_FS */
3200
3201 static void kbase_device_coherency_init(struct kbase_device *kbdev, u32 gpu_id)
3202 {
3203 #ifdef CONFIG_OF
3204         u32 supported_coherency_bitmap =
3205                 kbdev->gpu_props.props.raw_props.coherency_mode;
3206         const void *coherency_override_dts;
3207         u32 override_coherency;
3208 #endif /* CONFIG_OF */
3209
3210         kbdev->system_coherency = COHERENCY_NONE;
3211
3212         /* device tree may override the coherency */
3213 #ifdef CONFIG_OF
3214         coherency_override_dts = of_get_property(kbdev->dev->of_node,
3215                                                 "system-coherency",
3216                                                 NULL);
3217         if (coherency_override_dts) {
3218
3219                 override_coherency = be32_to_cpup(coherency_override_dts);
3220
3221                 if ((override_coherency <= COHERENCY_NONE) &&
3222                         (supported_coherency_bitmap &
3223                          COHERENCY_FEATURE_BIT(override_coherency))) {
3224
3225                         kbdev->system_coherency = override_coherency;
3226
3227                         dev_info(kbdev->dev,
3228                                 "Using coherency mode %u set from dtb",
3229                                 override_coherency);
3230                 } else
3231                         dev_warn(kbdev->dev,
3232                                 "Ignoring unsupported coherency mode %u set from dtb",
3233                                 override_coherency);
3234         }
3235
3236 #endif /* CONFIG_OF */
3237
3238         kbdev->gpu_props.props.raw_props.coherency_mode =
3239                 kbdev->system_coherency;
3240 }
3241
3242 #ifdef CONFIG_MALI_FPGA_BUS_LOGGER
3243
3244 /* Callback used by the kbase bus logger client, to initiate a GPU reset
3245  * when the bus log is restarted.  GPU reset is used as reference point
3246  * in HW bus log analyses.
3247  */
3248 static void kbase_logging_started_cb(void *data)
3249 {
3250         struct kbase_device *kbdev = (struct kbase_device *)data;
3251
3252         if (kbase_prepare_to_reset_gpu(kbdev))
3253                 kbase_reset_gpu(kbdev);
3254         dev_info(kbdev->dev, "KBASE - Bus logger restarted\n");
3255 }
3256 #endif
3257
3258
3259 static int kbase_common_device_init(struct kbase_device *kbdev)
3260 {
3261         int err;
3262         struct mali_base_gpu_core_props *core_props;
3263         enum {
3264                 inited_mem = (1u << 0),
3265                 inited_js = (1u << 1),
3266                 inited_pm_runtime_init = (1u << 6),
3267 #ifdef CONFIG_MALI_DEVFREQ
3268                 inited_devfreq = (1u << 9),
3269 #endif /* CONFIG_MALI_DEVFREQ */
3270 #ifdef CONFIG_MALI_MIPE_ENABLED
3271                 inited_tlstream = (1u << 10),
3272 #endif /* CONFIG_MALI_MIPE_ENABLED */
3273                 inited_backend_early = (1u << 11),
3274                 inited_backend_late = (1u << 12),
3275                 inited_device = (1u << 13),
3276                 inited_vinstr = (1u << 19),
3277                 inited_ipa = (1u << 20),
3278                 inited_job_fault = (1u << 21)
3279         };
3280
3281         int inited = 0;
3282         u32 gpu_id;
3283 #if defined(CONFIG_MALI_PLATFORM_VEXPRESS)
3284         u32 ve_logic_tile = 0;
3285 #endif /* CONFIG_MALI_PLATFORM_VEXPRESS */
3286
3287         dev_set_drvdata(kbdev->dev, kbdev);
3288
3289         err = kbase_backend_early_init(kbdev);
3290         if (err)
3291                 goto out_partial;
3292         inited |= inited_backend_early;
3293
3294         scnprintf(kbdev->devname, DEVNAME_SIZE, "%s%d", kbase_drv_name,
3295                         kbase_dev_nr);
3296
3297         kbase_disjoint_init(kbdev);
3298
3299         /* obtain min/max configured gpu frequencies */
3300         core_props = &(kbdev->gpu_props.props.core_props);
3301
3302         /* For versatile express platforms, min and max values of GPU frequency
3303          * depend on the type of the logic tile; these values may not be known
3304          * at the build time so in some cases a platform config file with wrong
3305          * GPU freguency values may be included; to ensure the correct value of
3306          * min and max GPU frequency is obtained, the type of the logic tile is
3307          * read from the corresponding register on the platform and frequency
3308          * values assigned accordingly.*/
3309 #if defined(CONFIG_MALI_PLATFORM_VEXPRESS)
3310         ve_logic_tile = kbase_get_platform_logic_tile_type();
3311
3312         switch (ve_logic_tile) {
3313         case 0x217:
3314                 /* Virtex 6, HBI0217 */
3315                 core_props->gpu_freq_khz_min = VE_VIRTEX6_GPU_FREQ_MIN;
3316                 core_props->gpu_freq_khz_max = VE_VIRTEX6_GPU_FREQ_MAX;
3317                 break;
3318         case 0x247:
3319                 /* Virtex 7, HBI0247 */
3320                 core_props->gpu_freq_khz_min = VE_VIRTEX7_GPU_FREQ_MIN;
3321                 core_props->gpu_freq_khz_max = VE_VIRTEX7_GPU_FREQ_MAX;
3322                 break;
3323         default:
3324                 /* all other logic tiles, i.e., Virtex 5 HBI0192
3325                  * or unsuccessful reading from the platform -
3326                  * fall back to the config_platform default */
3327                 core_props->gpu_freq_khz_min = GPU_FREQ_KHZ_MIN;
3328                 core_props->gpu_freq_khz_max = GPU_FREQ_KHZ_MAX;
3329                 break;
3330         }
3331 #else
3332                 core_props->gpu_freq_khz_min = GPU_FREQ_KHZ_MIN;
3333                 core_props->gpu_freq_khz_max = GPU_FREQ_KHZ_MAX;
3334 #endif /* CONFIG_MALI_PLATFORM_VEXPRESS */
3335
3336         kbdev->gpu_props.irq_throttle_time_us = DEFAULT_IRQ_THROTTLE_TIME_US;
3337
3338         err = kbase_device_init(kbdev);
3339         if (err) {
3340                 dev_err(kbdev->dev, "Can't initialize device (%d)\n", err);
3341                 goto out_partial;
3342         }
3343
3344         inited |= inited_device;
3345
3346         if (kbdev->pm.callback_power_runtime_init) {
3347                 err = kbdev->pm.callback_power_runtime_init(kbdev);
3348                 if (err)
3349                         goto out_partial;
3350
3351                 inited |= inited_pm_runtime_init;
3352         }
3353
3354         err = kbase_mem_init(kbdev);
3355         if (err)
3356                 goto out_partial;
3357
3358         inited |= inited_mem;
3359
3360         gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
3361         gpu_id &= GPU_ID_VERSION_PRODUCT_ID;
3362         gpu_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
3363
3364         kbase_device_coherency_init(kbdev, gpu_id);
3365
3366         err = kbasep_secure_mode_init(kbdev);
3367         if (err)
3368                 goto out_partial;
3369
3370         err = kbasep_js_devdata_init(kbdev);
3371         if (err)
3372                 goto out_partial;
3373
3374         inited |= inited_js;
3375
3376 #ifdef CONFIG_MALI_MIPE_ENABLED
3377         err = kbase_tlstream_init();
3378         if (err) {
3379                 dev_err(kbdev->dev, "Couldn't initialize timeline stream\n");
3380                 goto out_partial;
3381         }
3382         inited |= inited_tlstream;
3383 #endif /* CONFIG_MALI_MIPE_ENABLED */
3384
3385         err = kbase_backend_late_init(kbdev);
3386         if (err)
3387                 goto out_partial;
3388         inited |= inited_backend_late;
3389
3390 #ifdef CONFIG_MALI_DEVFREQ
3391         err = kbase_devfreq_init(kbdev);
3392         if (err) {
3393                 dev_err(kbdev->dev, "Couldn't initialize devfreq\n");
3394                 goto out_partial;
3395         }
3396         inited |= inited_devfreq;
3397 #endif /* CONFIG_MALI_DEVFREQ */
3398
3399         kbdev->vinstr_ctx = kbase_vinstr_init(kbdev);
3400         if (!kbdev->vinstr_ctx) {
3401                 dev_err(kbdev->dev, "Can't initialize virtual instrumentation core\n");
3402                 goto out_partial;
3403         }
3404
3405         inited |= inited_vinstr;
3406
3407         kbdev->ipa_ctx = kbase_ipa_init(kbdev);
3408         if (!kbdev->ipa_ctx) {
3409                 dev_err(kbdev->dev, "Can't initialize IPA\n");
3410                 goto out_partial;
3411         }
3412
3413         inited |= inited_ipa;
3414
3415         err = kbase_debug_job_fault_dev_init(kbdev);
3416         if (err)
3417                 goto out_partial;
3418
3419         inited |= inited_job_fault;
3420
3421         err = kbase_device_debugfs_init(kbdev);
3422         if (err)
3423                 goto out_partial;
3424
3425         /* intialise the kctx list */
3426         mutex_init(&kbdev->kctx_list_lock);
3427         INIT_LIST_HEAD(&kbdev->kctx_list);
3428
3429         kbdev->mdev.minor = MISC_DYNAMIC_MINOR;
3430         kbdev->mdev.name = kbdev->devname;
3431         kbdev->mdev.fops = &kbase_fops;
3432         kbdev->mdev.parent = get_device(kbdev->dev);
3433
3434         err = misc_register(&kbdev->mdev);
3435         if (err) {
3436                 dev_err(kbdev->dev, "Couldn't register misc dev %s\n", kbdev->devname);
3437                 goto out_misc;
3438         }
3439
3440         {
3441                 const struct list_head *dev_list = kbase_dev_list_get();
3442
3443                 list_add(&kbdev->entry, &kbase_dev_list);
3444                 kbase_dev_list_put(dev_list);
3445         }
3446
3447         dev_info(kbdev->dev, "Probed as %s\n", dev_name(kbdev->mdev.this_device));
3448
3449         kbase_dev_nr++;
3450
3451         return 0;
3452
3453 out_misc:
3454         put_device(kbdev->dev);
3455         kbase_device_debugfs_term(kbdev);
3456 out_partial:
3457         if (inited & inited_job_fault)
3458                 kbase_debug_job_fault_dev_term(kbdev);
3459         if (inited & inited_ipa)
3460                 kbase_ipa_term(kbdev->ipa_ctx);
3461         if (inited & inited_vinstr)
3462                 kbase_vinstr_term(kbdev->vinstr_ctx);
3463 #ifdef CONFIG_MALI_DEVFREQ
3464         if (inited & inited_devfreq)
3465                 kbase_devfreq_term(kbdev);
3466 #endif /* CONFIG_MALI_DEVFREQ */
3467         if (inited & inited_backend_late)
3468                 kbase_backend_late_term(kbdev);
3469 #ifdef CONFIG_MALI_MIPE_ENABLED
3470         if (inited & inited_tlstream)
3471                 kbase_tlstream_term();
3472 #endif /* CONFIG_MALI_MIPE_ENABLED */
3473
3474         if (inited & inited_js)
3475                 kbasep_js_devdata_halt(kbdev);
3476
3477         if (inited & inited_mem)
3478                 kbase_mem_halt(kbdev);
3479
3480         if (inited & inited_js)
3481                 kbasep_js_devdata_term(kbdev);
3482
3483         if (inited & inited_mem)
3484                 kbase_mem_term(kbdev);
3485
3486         if (inited & inited_pm_runtime_init) {
3487                 if (kbdev->pm.callback_power_runtime_term)
3488                         kbdev->pm.callback_power_runtime_term(kbdev);
3489         }
3490
3491         if (inited & inited_device)
3492                 kbase_device_term(kbdev);
3493
3494         if (inited & inited_backend_early)
3495                 kbase_backend_early_term(kbdev);
3496
3497         return err;
3498 }
3499
3500
3501 static struct attribute *kbase_attrs[] = {
3502 #ifdef CONFIG_MALI_DEBUG
3503         &dev_attr_debug_command.attr,
3504         &dev_attr_js_softstop_always.attr,
3505 #endif
3506 #if !MALI_CUSTOMER_RELEASE
3507         &dev_attr_force_replay.attr,
3508 #endif
3509         &dev_attr_js_timeouts.attr,
3510         &dev_attr_gpuinfo.attr,
3511         &dev_attr_dvfs_period.attr,
3512         &dev_attr_pm_poweroff.attr,
3513         &dev_attr_reset_timeout.attr,
3514         &dev_attr_js_scheduling_period.attr,
3515         &dev_attr_power_policy.attr,
3516         &dev_attr_core_availability_policy.attr,
3517         &dev_attr_core_mask.attr,
3518         &dev_attr_mem_pool_size.attr,
3519         &dev_attr_mem_pool_max_size.attr,
3520         NULL
3521 };
3522
3523 static const struct attribute_group kbase_attr_group = {
3524         .attrs = kbase_attrs,
3525 };
3526
3527 static int kbase_common_device_remove(struct kbase_device *kbdev);
3528
3529 static int kbase_platform_device_probe(struct platform_device *pdev)
3530 {
3531         struct kbase_device *kbdev;
3532         struct resource *reg_res;
3533         int err = 0;
3534         int i;
3535
3536         printk(KERN_INFO "arm_release_ver of this mali_ko is '%s', rk_ko_ver is '%d', built at '%s', on '%s'.",
3537            MALI_RELEASE_NAME,
3538            ROCKCHIP_VERSION,
3539            __TIME__,
3540            __DATE__);
3541
3542 #ifdef CONFIG_OF
3543         err = kbase_platform_early_init();
3544         if (err) {
3545                 dev_err(&pdev->dev, "Early platform initialization failed\n");
3546                 return err;
3547         }
3548 #endif
3549
3550         kbdev = kbase_device_alloc();
3551         if (!kbdev) {
3552                 dev_err(&pdev->dev, "Can't allocate device\n");
3553                 err = -ENOMEM;
3554                 goto out;
3555         }
3556 #ifdef CONFIG_MALI_NO_MALI
3557         err = gpu_device_create(kbdev);
3558         if (err) {
3559                 dev_err(&pdev->dev, "Can't initialize dummy model\n");
3560                 goto out_midg;
3561         }
3562 #endif /* CONFIG_MALI_NO_MALI */
3563
3564         kbdev->dev = &pdev->dev;
3565         /* 3 IRQ resources */
3566         for (i = 0; i < 3; i++) {
3567                 struct resource *irq_res;
3568                 int irqtag;
3569
3570                 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
3571                 if (!irq_res) {
3572                         dev_err(kbdev->dev, "No IRQ resource at index %d\n", i);
3573                         err = -ENOENT;
3574                         goto out_platform_irq;
3575                 }
3576
3577 #ifdef CONFIG_OF
3578                 if (!strcmp(irq_res->name, "JOB")) {
3579                         irqtag = JOB_IRQ_TAG;
3580                 } else if (!strcmp(irq_res->name, "MMU")) {
3581                         irqtag = MMU_IRQ_TAG;
3582                 } else if (!strcmp(irq_res->name, "GPU")) {
3583                         irqtag = GPU_IRQ_TAG;
3584                 } else {
3585                         dev_err(&pdev->dev, "Invalid irq res name: '%s'\n",
3586                                 irq_res->name);
3587                         err = -EINVAL;
3588                         goto out_irq_name;
3589                 }
3590 #else
3591                 irqtag = i;
3592 #endif /* CONFIG_OF */
3593                 kbdev->irqs[irqtag].irq = irq_res->start;
3594                 kbdev->irqs[irqtag].flags = (irq_res->flags & IRQF_TRIGGER_MASK);
3595         }
3596                 /* the first memory resource is the physical address of the GPU
3597                  * registers */
3598                 reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3599                 if (!reg_res) {
3600                         dev_err(kbdev->dev, "Invalid register resource\n");
3601                         err = -ENOENT;
3602                         goto out_platform_mem;
3603                 }
3604
3605                 kbdev->reg_start = reg_res->start;
3606                 kbdev->reg_size = resource_size(reg_res);
3607
3608                 err = kbase_common_reg_map(kbdev);
3609                 if (err)
3610                         goto out_reg_map;
3611
3612 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \
3613                         && defined(CONFIG_REGULATOR)
3614         kbdev->regulator = regulator_get_optional(kbdev->dev, "mali");
3615         if (IS_ERR_OR_NULL(kbdev->regulator)) {
3616                 err = PTR_ERR(kbdev->regulator);
3617
3618                 kbdev->regulator = NULL;
3619                 if (err == -EPROBE_DEFER)
3620                         goto out_regulator;
3621                 dev_info(kbdev->dev, "Continuing without Mali regulator control\n");
3622                 /* Allow probe to continue without regulator */
3623         }
3624 #endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
3625
3626 #ifdef CONFIG_MALI_PLATFORM_DEVICETREE
3627         pm_runtime_enable(kbdev->dev);
3628 #endif
3629
3630         kbdev->clock = clk_get(kbdev->dev, "clk_mali");
3631         if (IS_ERR_OR_NULL(kbdev->clock)) {
3632                 err = PTR_ERR(kbdev->clock);
3633
3634                 kbdev->clock = NULL;
3635                 if (err == -EPROBE_DEFER)
3636                         goto out_clock_prepare;
3637                 dev_info(kbdev->dev, "Continuing without Mali clock control\n");
3638                 /* Allow probe to continue without clock. */
3639         } else {
3640                 err = clk_prepare_enable(kbdev->clock);
3641                 if (err) {
3642                         dev_err(kbdev->dev,
3643                                 "Failed to prepare and enable clock (%d)\n", err);
3644                         goto out_clock_prepare;
3645                 }
3646         }
3647
3648 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) && defined(CONFIG_OF) \
3649                         && defined(CONFIG_PM_OPP)
3650         /* Register the OPPs if they are available in device tree */
3651         if (of_init_opp_table(kbdev->dev) < 0)
3652                 dev_dbg(kbdev->dev, "OPP table not found\n");
3653 #endif
3654
3655
3656         err = kbase_common_device_init(kbdev);
3657         if (err) {
3658                 dev_err(kbdev->dev, "Failed kbase_common_device_init\n");
3659                 goto out_common_init;
3660         }
3661
3662         err = sysfs_create_group(&kbdev->dev->kobj, &kbase_attr_group);
3663         if (err) {
3664                 dev_err(&pdev->dev, "Failed to create sysfs entries\n");
3665                 goto out_sysfs;
3666         }
3667
3668 #ifdef CONFIG_MALI_FPGA_BUS_LOGGER
3669         err = bl_core_client_register(kbdev->devname,
3670                                                 kbase_logging_started_cb,
3671                                                 kbdev, &kbdev->buslogger,
3672                                                 THIS_MODULE, NULL);
3673         if (err) {
3674                 dev_err(kbdev->dev, "Couldn't register bus log client\n");
3675                 goto out_bl_core_register;
3676         }
3677
3678         bl_core_set_threshold(kbdev->buslogger, 1024*1024*1024);
3679 #endif
3680         return 0;
3681
3682 #ifdef CONFIG_MALI_FPGA_BUS_LOGGER
3683 out_bl_core_register:
3684         sysfs_remove_group(&kbdev->dev->kobj, &kbase_attr_group);
3685 #endif
3686
3687 out_sysfs:
3688         kbase_common_device_remove(kbdev);
3689 out_common_init:
3690 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
3691         of_free_opp_table(kbdev->dev);
3692 #endif
3693         clk_disable_unprepare(kbdev->clock);
3694 out_clock_prepare:
3695         clk_put(kbdev->clock);
3696 #ifdef CONFIG_MALI_PLATFORM_DEVICETREE
3697         pm_runtime_disable(kbdev->dev);
3698 #endif
3699 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \
3700                         && defined(CONFIG_REGULATOR)
3701 out_regulator:
3702         regulator_put(kbdev->regulator);
3703 #endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
3704                 kbase_common_reg_unmap(kbdev);
3705 out_reg_map:
3706 out_platform_mem:
3707 #ifdef CONFIG_OF
3708 out_irq_name:
3709 #endif
3710 out_platform_irq:
3711 #ifdef CONFIG_MALI_NO_MALI
3712         gpu_device_destroy(kbdev);
3713 out_midg:
3714 #endif /* CONFIG_MALI_NO_MALI */
3715         kbase_device_free(kbdev);
3716 out:
3717         return err;
3718 }
3719
3720 static int kbase_common_device_remove(struct kbase_device *kbdev)
3721 {
3722         kbase_debug_job_fault_dev_term(kbdev);
3723         kbase_ipa_term(kbdev->ipa_ctx);
3724         kbase_vinstr_term(kbdev->vinstr_ctx);
3725         sysfs_remove_group(&kbdev->dev->kobj, &kbase_attr_group);
3726
3727 #ifdef CONFIG_MALI_FPGA_BUS_LOGGER
3728         if (kbdev->buslogger)
3729                 bl_core_client_unregister(kbdev->buslogger);
3730 #endif
3731
3732 #ifdef CONFIG_DEBUG_FS
3733         debugfs_remove_recursive(kbdev->mali_debugfs_directory);
3734 #endif
3735 #ifdef CONFIG_MALI_DEVFREQ
3736         kbase_devfreq_term(kbdev);
3737 #endif
3738
3739         kbase_backend_late_term(kbdev);
3740
3741         if (kbdev->pm.callback_power_runtime_term)
3742                 kbdev->pm.callback_power_runtime_term(kbdev);
3743 #ifdef CONFIG_MALI_PLATFORM_DEVICETREE
3744         pm_runtime_disable(kbdev->dev);
3745 #endif
3746
3747 #ifdef CONFIG_MALI_MIPE_ENABLED
3748         kbase_tlstream_term();
3749 #endif /* CONFIG_MALI_MIPE_ENABLED */
3750
3751         kbasep_js_devdata_halt(kbdev);
3752         kbase_mem_halt(kbdev);
3753
3754         kbasep_js_devdata_term(kbdev);
3755         kbase_mem_term(kbdev);
3756         kbase_backend_early_term(kbdev);
3757
3758         {
3759                 const struct list_head *dev_list = kbase_dev_list_get();
3760
3761                 list_del(&kbdev->entry);
3762                 kbase_dev_list_put(dev_list);
3763         }
3764         misc_deregister(&kbdev->mdev);
3765         put_device(kbdev->dev);
3766                 kbase_common_reg_unmap(kbdev);
3767         kbase_device_term(kbdev);
3768         if (kbdev->clock) {
3769                 clk_disable_unprepare(kbdev->clock);
3770                 clk_put(kbdev->clock);
3771                 kbdev->clock = NULL;
3772         }
3773 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \
3774                         && defined(CONFIG_REGULATOR)
3775         regulator_put(kbdev->regulator);
3776 #endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
3777 #ifdef CONFIG_MALI_NO_MALI
3778         gpu_device_destroy(kbdev);
3779 #endif /* CONFIG_MALI_NO_MALI */
3780         kbase_device_free(kbdev);
3781
3782         return 0;
3783 }
3784
3785 static int kbase_platform_device_remove(struct platform_device *pdev)
3786 {
3787         struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
3788
3789         if (!kbdev)
3790                 return -ENODEV;
3791
3792         return kbase_common_device_remove(kbdev);
3793 }
3794
3795 /** Suspend callback from the OS.
3796  *
3797  * This is called by Linux when the device should suspend.
3798  *
3799  * @param dev  The device to suspend
3800  *
3801  * @return A standard Linux error code
3802  */
3803 static int kbase_device_suspend(struct device *dev)
3804 {
3805         struct kbase_device *kbdev = to_kbase_device(dev);
3806
3807         if (!kbdev)
3808                 return -ENODEV;
3809
3810 #if defined(CONFIG_PM_DEVFREQ) && \
3811                 (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
3812         devfreq_suspend_device(kbdev->devfreq);
3813 #endif
3814
3815         kbase_pm_suspend(kbdev);
3816         return 0;
3817 }
3818
3819 /** Resume callback from the OS.
3820  *
3821  * This is called by Linux when the device should resume from suspension.
3822  *
3823  * @param dev  The device to resume
3824  *
3825  * @return A standard Linux error code
3826  */
3827 static int kbase_device_resume(struct device *dev)
3828 {
3829         struct kbase_device *kbdev = to_kbase_device(dev);
3830
3831         if (!kbdev)
3832                 return -ENODEV;
3833
3834         kbase_pm_resume(kbdev);
3835
3836 #if defined(CONFIG_PM_DEVFREQ) && \
3837                 (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
3838         devfreq_resume_device(kbdev->devfreq);
3839 #endif
3840         return 0;
3841 }
3842
3843 /** Runtime suspend callback from the OS.
3844  *
3845  * This is called by Linux when the device should prepare for a condition in which it will
3846  * not be able to communicate with the CPU(s) and RAM due to power management.
3847  *
3848  * @param dev  The device to suspend
3849  *
3850  * @return A standard Linux error code
3851  */
3852 #ifdef KBASE_PM_RUNTIME
3853 static int kbase_device_runtime_suspend(struct device *dev)
3854 {
3855         struct kbase_device *kbdev = to_kbase_device(dev);
3856
3857         if (!kbdev)
3858                 return -ENODEV;
3859
3860 #if defined(CONFIG_PM_DEVFREQ) && \
3861                 (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
3862         devfreq_suspend_device(kbdev->devfreq);
3863 #endif
3864
3865         if (kbdev->pm.backend.callback_power_runtime_off) {
3866                 kbdev->pm.backend.callback_power_runtime_off(kbdev);
3867                 dev_dbg(dev, "runtime suspend\n");
3868         }
3869         return 0;
3870 }
3871 #endif /* KBASE_PM_RUNTIME */
3872
3873 /** Runtime resume callback from the OS.
3874  *
3875  * This is called by Linux when the device should go into a fully active state.
3876  *
3877  * @param dev  The device to suspend
3878  *
3879  * @return A standard Linux error code
3880  */
3881
3882 #ifdef KBASE_PM_RUNTIME
3883 int kbase_device_runtime_resume(struct device *dev)
3884 {
3885         int ret = 0;
3886         struct kbase_device *kbdev = to_kbase_device(dev);
3887
3888         if (!kbdev)
3889                 return -ENODEV;
3890
3891         if (kbdev->pm.backend.callback_power_runtime_on) {
3892                 ret = kbdev->pm.backend.callback_power_runtime_on(kbdev);
3893                 dev_dbg(dev, "runtime resume\n");
3894         }
3895
3896 #if defined(CONFIG_PM_DEVFREQ) && \
3897                 (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
3898         devfreq_resume_device(kbdev->devfreq);
3899 #endif
3900
3901         return ret;
3902 }
3903 #endif /* KBASE_PM_RUNTIME */
3904
3905
3906 #ifdef KBASE_PM_RUNTIME
3907 /**
3908  * kbase_device_runtime_idle - Runtime idle callback from the OS.
3909  * @dev: The device to suspend
3910  *
3911  * This is called by Linux when the device appears to be inactive and it might
3912  * be placed into a low power state.
3913  *
3914  * Return: 0 if device can be suspended, non-zero to avoid runtime autosuspend,
3915  * otherwise a standard Linux error code
3916  */
3917 static int kbase_device_runtime_idle(struct device *dev)
3918 {
3919         struct kbase_device *kbdev = to_kbase_device(dev);
3920
3921         if (!kbdev)
3922                 return -ENODEV;
3923
3924         /* Use platform specific implementation if it exists. */
3925         if (kbdev->pm.backend.callback_power_runtime_idle)
3926                 return kbdev->pm.backend.callback_power_runtime_idle(kbdev);
3927
3928         return 0;
3929 }
3930 #endif /* KBASE_PM_RUNTIME */
3931
3932 /** The power management operations for the platform driver.
3933  */
3934 static const struct dev_pm_ops kbase_pm_ops = {
3935         .suspend = kbase_device_suspend,
3936         .resume = kbase_device_resume,
3937 #ifdef KBASE_PM_RUNTIME
3938         .runtime_suspend = kbase_device_runtime_suspend,
3939         .runtime_resume = kbase_device_runtime_resume,
3940         .runtime_idle = kbase_device_runtime_idle,
3941 #endif /* KBASE_PM_RUNTIME */
3942 };
3943
3944 #ifdef CONFIG_OF
3945 static const struct of_device_id kbase_dt_ids[] = {
3946         { .compatible = "arm,malit7xx" },
3947         { .compatible = "arm,mali-midgard" },
3948         { /* sentinel */ }
3949 };
3950 MODULE_DEVICE_TABLE(of, kbase_dt_ids);
3951 #endif
3952
3953 static struct platform_driver kbase_platform_driver = {
3954         .probe = kbase_platform_device_probe,
3955         .remove = kbase_platform_device_remove,
3956         .driver = {
3957                    .name = kbase_drv_name,
3958                    .owner = THIS_MODULE,
3959                    .pm = &kbase_pm_ops,
3960                    .of_match_table = of_match_ptr(kbase_dt_ids),
3961         },
3962 };
3963
3964 /*
3965  * The driver will not provide a shortcut to create the Mali platform device
3966  * anymore when using Device Tree.
3967  */
3968 #ifdef CONFIG_OF
3969 module_platform_driver(kbase_platform_driver);
3970 #else
3971
3972 static int __init rockchip_gpu_init_driver(void)
3973 {
3974         return platform_driver_register(&kbase_platform_driver);
3975 }
3976 late_initcall(rockchip_gpu_init_driver);
3977
3978 static int __init kbase_driver_init(void)
3979 {
3980         int ret;
3981
3982         ret = kbase_platform_early_init();
3983         if (ret)
3984                 return ret;
3985
3986 #ifndef CONFIG_MACH_MANTA
3987 #ifdef CONFIG_MALI_PLATFORM_FAKE
3988         ret = kbase_platform_fake_register();
3989         if (ret)
3990                 return ret;
3991 #endif
3992 #endif
3993         ret = platform_driver_register(&kbase_platform_driver);
3994 #ifndef CONFIG_MACH_MANTA
3995 #ifdef CONFIG_MALI_PLATFORM_FAKE
3996         if (ret)
3997                 kbase_platform_fake_unregister();
3998 #endif
3999 #endif
4000         return ret;
4001 }
4002
4003 static void __exit kbase_driver_exit(void)
4004 {
4005         platform_driver_unregister(&kbase_platform_driver);
4006 #ifndef CONFIG_MACH_MANTA
4007 #ifdef CONFIG_MALI_PLATFORM_FAKE
4008         kbase_platform_fake_unregister();
4009 #endif
4010 #endif
4011 }
4012
4013 module_init(kbase_driver_init);
4014 module_exit(kbase_driver_exit);
4015
4016 #endif /* CONFIG_OF */
4017
4018 MODULE_LICENSE("GPL");
4019 MODULE_VERSION(MALI_RELEASE_NAME " (UK version " \
4020                 __stringify(BASE_UK_VERSION_MAJOR) "." \
4021                 __stringify(BASE_UK_VERSION_MINOR) ")");
4022
4023 #if defined(CONFIG_MALI_GATOR_SUPPORT) || defined(CONFIG_MALI_SYSTEM_TRACE)
4024 #define CREATE_TRACE_POINTS
4025 #endif
4026
4027 #ifdef CONFIG_MALI_GATOR_SUPPORT
4028 /* Create the trace points (otherwise we just get code to call a tracepoint) */
4029 #include "mali_linux_trace.h"
4030
4031 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_job_slots_event);
4032 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_status);
4033 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_power_on);
4034 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_power_off);
4035 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_page_fault_insert_pages);
4036 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_mmu_as_in_use);
4037 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_mmu_as_released);
4038 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_total_alloc_pages_change);
4039 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_sw_counter);
4040
4041 void kbase_trace_mali_pm_status(u32 event, u64 value)
4042 {
4043         trace_mali_pm_status(event, value);
4044 }
4045
4046 void kbase_trace_mali_pm_power_off(u32 event, u64 value)
4047 {
4048         trace_mali_pm_power_off(event, value);
4049 }
4050
4051 void kbase_trace_mali_pm_power_on(u32 event, u64 value)
4052 {
4053         trace_mali_pm_power_on(event, value);
4054 }
4055
4056 void kbase_trace_mali_job_slots_event(u32 event, const struct kbase_context *kctx, u8 atom_id)
4057 {
4058         trace_mali_job_slots_event(event, (kctx != NULL ? kctx->tgid : 0), (kctx != NULL ? kctx->pid : 0), atom_id);
4059 }
4060
4061 void kbase_trace_mali_page_fault_insert_pages(int event, u32 value)
4062 {
4063         trace_mali_page_fault_insert_pages(event, value);
4064 }
4065
4066 void kbase_trace_mali_mmu_as_in_use(int event)
4067 {
4068         trace_mali_mmu_as_in_use(event);
4069 }
4070
4071 void kbase_trace_mali_mmu_as_released(int event)
4072 {
4073         trace_mali_mmu_as_released(event);
4074 }
4075
4076 void kbase_trace_mali_total_alloc_pages_change(long long int event)
4077 {
4078         trace_mali_total_alloc_pages_change(event);
4079 }
4080 #endif /* CONFIG_MALI_GATOR_SUPPORT */
4081 #ifdef CONFIG_MALI_SYSTEM_TRACE
4082 #include "mali_linux_kbase_trace.h"
4083 #endif