MALI: rockchip: upgrade midgard DDK to r11p0-00rel0
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / midgard / mali_kbase_core_linux.c
1 /*
2  *
3  * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * A copy of the licence is included with the program, and can also be obtained
11  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12  * Boston, MA  02110-1301, USA.
13  *
14  */
15
16 #define ENABLE_DEBUG_LOG
17 #include "platform/rk/custom_log.h"
18
19 #include <mali_kbase.h>
20 #include <mali_kbase_hwaccess_gpuprops.h>
21 #include <mali_kbase_config_defaults.h>
22 #include <mali_kbase_uku.h>
23 #include <mali_midg_regmap.h>
24 #include <mali_kbase_instr.h>
25 #include <mali_kbase_gator.h>
26 #include <backend/gpu/mali_kbase_js_affinity.h>
27 #include <mali_kbase_mem_linux.h>
28 #ifdef CONFIG_MALI_DEVFREQ
29 #include <backend/gpu/mali_kbase_devfreq.h>
30 #endif /* CONFIG_MALI_DEVFREQ */
31 #ifdef CONFIG_MALI_NO_MALI
32 #include "mali_kbase_model_linux.h"
33 #endif /* CONFIG_MALI_NO_MALI */
34 #include "mali_kbase_mem_profile_debugfs_buf_size.h"
35 #include "mali_kbase_debug_mem_view.h"
36 #include "mali_kbase_mem.h"
37 #include "mali_kbase_mem_pool_debugfs.h"
38 #if !MALI_CUSTOMER_RELEASE
39 #include "mali_kbase_regs_dump_debugfs.h"
40 #endif /* !MALI_CUSTOMER_RELEASE */
41 #include <mali_kbase_hwaccess_backend.h>
42 #include <mali_kbase_hwaccess_jm.h>
43 #include <backend/gpu/mali_kbase_device_internal.h>
44
45 #ifdef CONFIG_KDS
46 #include <linux/kds.h>
47 #include <linux/anon_inodes.h>
48 #include <linux/syscalls.h>
49 #endif /* CONFIG_KDS */
50
51 #include <linux/pm_runtime.h>
52 #include <linux/module.h>
53 #include <linux/init.h>
54 #include <linux/poll.h>
55 #include <linux/kernel.h>
56 #include <linux/errno.h>
57 #include <linux/of.h>
58 #include <linux/platform_device.h>
59 #include <linux/miscdevice.h>
60 #include <linux/list.h>
61 #include <linux/semaphore.h>
62 #include <linux/fs.h>
63 #include <linux/uaccess.h>
64 #include <linux/interrupt.h>
65 #include <linux/io.h>
66 #include <linux/mm.h>
67 #include <linux/compat.h>       /* is_compat_task */
68 #include <linux/mman.h>
69 #include <linux/version.h>
70 #include <linux/security.h>
71 #ifdef CONFIG_MALI_PLATFORM_DEVICETREE
72 #include <linux/pm_runtime.h>
73 #endif /* CONFIG_MALI_PLATFORM_DEVICETREE */
74 #include <mali_kbase_hw.h>
75 #include <platform/mali_kbase_platform_common.h>
76 #ifdef CONFIG_MALI_PLATFORM_FAKE
77 #include <platform/mali_kbase_platform_fake.h>
78 #endif /*CONFIG_MALI_PLATFORM_FAKE */
79 #ifdef CONFIG_SYNC
80 #include <mali_kbase_sync.h>
81 #endif /* CONFIG_SYNC */
82 #ifdef CONFIG_PM_DEVFREQ
83 #include <linux/devfreq.h>
84 #endif /* CONFIG_PM_DEVFREQ */
85 #include <linux/clk.h>
86 #include <linux/delay.h>
87
88 #include <mali_kbase_config.h>
89
90 #ifdef CONFIG_MACH_MANTA
91 #include <plat/devs.h>
92 #endif
93
94 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
95 #include <linux/pm_opp.h>
96 #else
97 #include <linux/opp.h>
98 #endif
99
100 #include <mali_kbase_tlstream.h>
101
102 /* GPU IRQ Tags */
103 #define JOB_IRQ_TAG     0
104 #define MMU_IRQ_TAG     1
105 #define GPU_IRQ_TAG     2
106
107 #if MALI_UNIT_TEST
108 static struct kbase_exported_test_data shared_kernel_test_data;
109 EXPORT_SYMBOL(shared_kernel_test_data);
110 #endif /* MALI_UNIT_TEST */
111
112 #define KBASE_DRV_NAME "mali"
113 /** rk_ext : version of rk_ext on mali_ko, aka. rk_ko_ver. */
114 #define ROCKCHIP_VERSION    (13)
115
116 static const char kbase_drv_name[] = KBASE_DRV_NAME;
117
118 static int kbase_dev_nr;
119
120 static DEFINE_MUTEX(kbase_dev_list_lock);
121 static LIST_HEAD(kbase_dev_list);
122
123 #define KERNEL_SIDE_DDK_VERSION_STRING "K:" MALI_RELEASE_NAME "(GPL)"
124 static inline void __compile_time_asserts(void)
125 {
126         CSTD_COMPILE_TIME_ASSERT(sizeof(KERNEL_SIDE_DDK_VERSION_STRING) <= KBASE_GET_VERSION_BUFFER_SIZE);
127 }
128
129 #ifdef CONFIG_KDS
130
131 struct kbasep_kds_resource_set_file_data {
132         struct kds_resource_set *lock;
133 };
134
135 static int kds_resource_release(struct inode *inode, struct file *file);
136
137 static const struct file_operations kds_resource_fops = {
138         .release = kds_resource_release
139 };
140
141 struct kbase_kds_resource_list_data {
142         struct kds_resource **kds_resources;
143         unsigned long *kds_access_bitmap;
144         int num_elems;
145 };
146
147 static int kds_resource_release(struct inode *inode, struct file *file)
148 {
149         struct kbasep_kds_resource_set_file_data *data;
150
151         data = (struct kbasep_kds_resource_set_file_data *)file->private_data;
152         if (NULL != data) {
153                 if (NULL != data->lock)
154                         kds_resource_set_release(&data->lock);
155
156                 kfree(data);
157         }
158         return 0;
159 }
160
161 static int kbasep_kds_allocate_resource_list_data(struct kbase_context *kctx, struct base_external_resource *ext_res, int num_elems, struct kbase_kds_resource_list_data *resources_list)
162 {
163         struct base_external_resource *res = ext_res;
164         int res_id;
165
166         /* assume we have to wait for all */
167
168         KBASE_DEBUG_ASSERT(0 != num_elems);
169         resources_list->kds_resources = kmalloc_array(num_elems,
170                         sizeof(struct kds_resource *), GFP_KERNEL);
171
172         if (NULL == resources_list->kds_resources)
173                 return -ENOMEM;
174
175         KBASE_DEBUG_ASSERT(0 != num_elems);
176         resources_list->kds_access_bitmap = kzalloc(
177                         sizeof(unsigned long) *
178                         ((num_elems + BITS_PER_LONG - 1) / BITS_PER_LONG),
179                         GFP_KERNEL);
180
181         if (NULL == resources_list->kds_access_bitmap) {
182                 kfree(resources_list->kds_access_bitmap);
183                 return -ENOMEM;
184         }
185
186         kbase_gpu_vm_lock(kctx);
187         for (res_id = 0; res_id < num_elems; res_id++, res++) {
188                 int exclusive;
189                 struct kbase_va_region *reg;
190                 struct kds_resource *kds_res = NULL;
191
192                 exclusive = res->ext_resource & BASE_EXT_RES_ACCESS_EXCLUSIVE;
193                 reg = kbase_region_tracker_find_region_enclosing_address(kctx, res->ext_resource & ~BASE_EXT_RES_ACCESS_EXCLUSIVE);
194
195                 /* did we find a matching region object? */
196                 if (NULL == reg || (reg->flags & KBASE_REG_FREE))
197                         break;
198
199                 /* no need to check reg->alloc as only regions with an alloc has
200                  * a size, and kbase_region_tracker_find_region_enclosing_address
201                  * only returns regions with size > 0 */
202                 switch (reg->gpu_alloc->type) {
203 #if defined(CONFIG_UMP) && defined(CONFIG_KDS)
204                 case KBASE_MEM_TYPE_IMPORTED_UMP:
205                         kds_res = ump_dd_kds_resource_get(reg->gpu_alloc->imported.ump_handle);
206                         break;
207 #endif /* defined(CONFIG_UMP) && defined(CONFIG_KDS) */
208                 default:
209                         break;
210                 }
211
212                 /* no kds resource for the region ? */
213                 if (!kds_res)
214                         break;
215
216                 resources_list->kds_resources[res_id] = kds_res;
217
218                 if (exclusive)
219                         set_bit(res_id, resources_list->kds_access_bitmap);
220         }
221         kbase_gpu_vm_unlock(kctx);
222
223         /* did the loop run to completion? */
224         if (res_id == num_elems)
225                 return 0;
226
227         /* Clean up as the resource list is not valid. */
228         kfree(resources_list->kds_resources);
229         kfree(resources_list->kds_access_bitmap);
230
231         return -EINVAL;
232 }
233
234 static bool kbasep_validate_kbase_pointer(
235                 struct kbase_context *kctx, union kbase_pointer *p)
236 {
237         if (kctx->is_compat) {
238                 if (p->compat_value == 0)
239                         return false;
240         } else {
241                 if (NULL == p->value)
242                         return false;
243         }
244         return true;
245 }
246
247 static int kbase_external_buffer_lock(struct kbase_context *kctx,
248                 struct kbase_uk_ext_buff_kds_data *args, u32 args_size)
249 {
250         struct base_external_resource *ext_res_copy;
251         size_t ext_resource_size;
252         int ret = -EINVAL;
253         int fd = -EBADF;
254         struct base_external_resource __user *ext_res_user;
255         int __user *file_desc_usr;
256         struct kbasep_kds_resource_set_file_data *fdata;
257         struct kbase_kds_resource_list_data resource_list_data;
258
259         if (args_size != sizeof(struct kbase_uk_ext_buff_kds_data))
260                 return -EINVAL;
261
262         /* Check user space has provided valid data */
263         if (!kbasep_validate_kbase_pointer(kctx, &args->external_resource) ||
264                         !kbasep_validate_kbase_pointer(kctx, &args->file_descriptor) ||
265                         (0 == args->num_res) ||
266                         (args->num_res > KBASE_MAXIMUM_EXT_RESOURCES))
267                 return -EINVAL;
268
269         ext_resource_size = sizeof(struct base_external_resource) * args->num_res;
270
271         KBASE_DEBUG_ASSERT(0 != ext_resource_size);
272         ext_res_copy = kmalloc(ext_resource_size, GFP_KERNEL);
273
274         if (!ext_res_copy)
275                 return -EINVAL;
276 #ifdef CONFIG_COMPAT
277         if (kctx->is_compat) {
278                 ext_res_user = compat_ptr(args->external_resource.compat_value);
279                 file_desc_usr = compat_ptr(args->file_descriptor.compat_value);
280         } else {
281 #endif /* CONFIG_COMPAT */
282                 ext_res_user = args->external_resource.value;
283                 file_desc_usr = args->file_descriptor.value;
284 #ifdef CONFIG_COMPAT
285         }
286 #endif /* CONFIG_COMPAT */
287
288         /* Copy the external resources to lock from user space */
289         if (copy_from_user(ext_res_copy, ext_res_user, ext_resource_size))
290                 goto out;
291
292         /* Allocate data to be stored in the file */
293         fdata = kmalloc(sizeof(*fdata), GFP_KERNEL);
294
295         if (!fdata) {
296                 ret = -ENOMEM;
297                 goto out;
298         }
299
300         /* Parse given elements and create resource and access lists */
301         ret = kbasep_kds_allocate_resource_list_data(kctx,
302                         ext_res_copy, args->num_res, &resource_list_data);
303         if (!ret) {
304                 long err;
305
306                 fdata->lock = NULL;
307
308                 fd = anon_inode_getfd("kds_ext", &kds_resource_fops, fdata, 0);
309
310                 err = copy_to_user(file_desc_usr, &fd, sizeof(fd));
311
312                 /* If the file descriptor was valid and we successfully copied
313                  * it to user space, then we can try and lock the requested
314                  * kds resources.
315                  */
316                 if ((fd >= 0) && (0 == err)) {
317                         struct kds_resource_set *lock;
318
319                         lock = kds_waitall(args->num_res,
320                                         resource_list_data.kds_access_bitmap,
321                                         resource_list_data.kds_resources,
322                                         KDS_WAIT_BLOCKING);
323
324                         if (!lock) {
325                                 ret = -EINVAL;
326                         } else if (IS_ERR(lock)) {
327                                 ret = PTR_ERR(lock);
328                         } else {
329                                 ret = 0;
330                                 fdata->lock = lock;
331                         }
332                 } else {
333                         ret = -EINVAL;
334                 }
335
336                 kfree(resource_list_data.kds_resources);
337                 kfree(resource_list_data.kds_access_bitmap);
338         }
339
340         if (ret) {
341                 /* If the file was opened successfully then close it which will
342                  * clean up the file data, otherwise we clean up the file data
343                  * ourself.
344                  */
345                 if (fd >= 0)
346                         sys_close(fd);
347                 else
348                         kfree(fdata);
349         }
350 out:
351         kfree(ext_res_copy);
352
353         return ret;
354 }
355 #endif /* CONFIG_KDS */
356
357 static void kbase_create_timeline_objects(struct kbase_context *kctx)
358 {
359         struct kbase_device             *kbdev = kctx->kbdev;
360         unsigned int                    lpu_id;
361         unsigned int                    as_nr;
362         struct kbasep_kctx_list_element *element;
363
364         /* Create LPU objects. */
365         for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
366                 u32 *lpu =
367                         &kbdev->gpu_props.props.raw_props.js_features[lpu_id];
368                 kbase_tlstream_tl_summary_new_lpu(lpu, lpu_id, *lpu);
369         }
370
371         /* Create Address Space objects. */
372         for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
373                 kbase_tlstream_tl_summary_new_as(&kbdev->as[as_nr], as_nr);
374
375         /* Create GPU object and make it retain all LPUs and address spaces. */
376         kbase_tlstream_tl_summary_new_gpu(
377                         kbdev,
378                         kbdev->gpu_props.props.raw_props.gpu_id,
379                         kbdev->gpu_props.num_cores);
380
381         for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
382                 void *lpu =
383                         &kbdev->gpu_props.props.raw_props.js_features[lpu_id];
384                 kbase_tlstream_tl_summary_lifelink_lpu_gpu(lpu, kbdev);
385         }
386         for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
387                 kbase_tlstream_tl_summary_lifelink_as_gpu(
388                                 &kbdev->as[as_nr],
389                                 kbdev);
390
391         /* Create object for each known context. */
392         mutex_lock(&kbdev->kctx_list_lock);
393         list_for_each_entry(element, &kbdev->kctx_list, link) {
394                 kbase_tlstream_tl_summary_new_ctx(
395                                 element->kctx,
396                                 (u32)(element->kctx->id),
397                                 (u32)(element->kctx->tgid));
398         }
399         /* Before releasing the lock, reset body stream buffers.
400          * This will prevent context creation message to be directed to both
401          * summary and body stream. */
402         kbase_tlstream_reset_body_streams();
403         mutex_unlock(&kbdev->kctx_list_lock);
404         /* Static object are placed into summary packet that needs to be
405          * transmitted first. Flush all streams to make it available to
406          * user space. */
407         kbase_tlstream_flush_streams();
408 }
409
410 static void kbase_api_handshake(struct uku_version_check_args *version)
411 {
412         switch (version->major) {
413 #ifdef BASE_LEGACY_UK6_SUPPORT
414         case 6:
415                 /* We are backwards compatible with version 6,
416                  * so pretend to be the old version */
417                 version->major = 6;
418                 version->minor = 1;
419                 break;
420 #endif /* BASE_LEGACY_UK6_SUPPORT */
421 #ifdef BASE_LEGACY_UK7_SUPPORT
422         case 7:
423                 /* We are backwards compatible with version 7,
424                  * so pretend to be the old version */
425                 version->major = 7;
426                 version->minor = 1;
427                 break;
428 #endif /* BASE_LEGACY_UK7_SUPPORT */
429 #ifdef BASE_LEGACY_UK8_SUPPORT
430         case 8:
431                 /* We are backwards compatible with version 8,
432                  * so pretend to be the old version */
433                 version->major = 8;
434                 version->minor = 4;
435                 break;
436 #endif /* BASE_LEGACY_UK8_SUPPORT */
437 #ifdef BASE_LEGACY_UK9_SUPPORT
438         case 9:
439                 /* We are backwards compatible with version 9,
440                  * so pretend to be the old version */
441                 version->major = 9;
442                 version->minor = 0;
443                 break;
444 #endif /* BASE_LEGACY_UK8_SUPPORT */
445         case BASE_UK_VERSION_MAJOR:
446                 /* set minor to be the lowest common */
447                 version->minor = min_t(int, BASE_UK_VERSION_MINOR,
448                                 (int)version->minor);
449                 break;
450         default:
451                 /* We return our actual version regardless if it
452                  * matches the version returned by userspace -
453                  * userspace can bail if it can't handle this
454                  * version */
455                 version->major = BASE_UK_VERSION_MAJOR;
456                 version->minor = BASE_UK_VERSION_MINOR;
457                 break;
458         }
459 }
460
461 /**
462  * enum mali_error - Mali error codes shared with userspace
463  *
464  * This is subset of those common Mali errors that can be returned to userspace.
465  * Values of matching user and kernel space enumerators MUST be the same.
466  * MALI_ERROR_NONE is guaranteed to be 0.
467  */
468 enum mali_error {
469         MALI_ERROR_NONE = 0,
470         MALI_ERROR_OUT_OF_GPU_MEMORY,
471         MALI_ERROR_OUT_OF_MEMORY,
472         MALI_ERROR_FUNCTION_FAILED,
473 };
474
475 enum {
476         inited_mem = (1u << 0),
477         inited_js = (1u << 1),
478         inited_pm_runtime_init = (1u << 2),
479 #ifdef CONFIG_MALI_DEVFREQ
480         inited_devfreq = (1u << 3),
481 #endif /* CONFIG_MALI_DEVFREQ */
482         inited_tlstream = (1u << 4),
483         inited_backend_early = (1u << 5),
484         inited_backend_late = (1u << 6),
485         inited_device = (1u << 7),
486         inited_vinstr = (1u << 8),
487 #ifndef CONFIG_MALI_PRFCNT_SET_SECONDARY
488         inited_ipa = (1u << 9),
489 #endif /* CONFIG_MALI_PRFCNT_SET_SECONDARY */
490         inited_job_fault = (1u << 10),
491         inited_misc_register = (1u << 11),
492         inited_get_device = (1u << 12),
493         inited_sysfs_group = (1u << 13),
494         inited_dev_list = (1u << 14),
495         inited_debugfs = (1u << 15),
496         inited_gpu_device = (1u << 16),
497         inited_registers_map = (1u << 17),
498         inited_power_control = (1u << 19),
499         inited_buslogger = (1u << 20)
500 };
501
502
503 #ifdef CONFIG_MALI_DEBUG
504 #define INACTIVE_WAIT_MS (5000)
505
506 void kbase_set_driver_inactive(struct kbase_device *kbdev, bool inactive)
507 {
508         kbdev->driver_inactive = inactive;
509         wake_up(&kbdev->driver_inactive_wait);
510
511         /* Wait for any running IOCTLs to complete */
512         if (inactive)
513                 msleep(INACTIVE_WAIT_MS);
514 }
515 KBASE_EXPORT_TEST_API(kbase_set_driver_inactive);
516 #endif /* CONFIG_MALI_DEBUG */
517
518 static int kbase_dispatch(struct kbase_context *kctx, void * const args, u32 args_size)
519 {
520         struct kbase_device *kbdev;
521         union uk_header *ukh = args;
522         u32 id;
523         int ret = 0;
524
525         KBASE_DEBUG_ASSERT(ukh != NULL);
526
527         kbdev = kctx->kbdev;
528         id = ukh->id;
529         ukh->ret = MALI_ERROR_NONE; /* Be optimistic */
530
531 #ifdef CONFIG_MALI_DEBUG
532         wait_event(kbdev->driver_inactive_wait,
533                         kbdev->driver_inactive == false);
534 #endif /* CONFIG_MALI_DEBUG */
535
536         if (UKP_FUNC_ID_CHECK_VERSION == id) {
537                 struct uku_version_check_args *version_check;
538
539                 if (args_size != sizeof(struct uku_version_check_args)) {
540                         ukh->ret = MALI_ERROR_FUNCTION_FAILED;
541                         return 0;
542                 }
543                 version_check = (struct uku_version_check_args *)args;
544                 kbase_api_handshake(version_check);
545                 /* save the proposed version number for later use */
546                 kctx->api_version = KBASE_API_VERSION(version_check->major,
547                                 version_check->minor);
548                 ukh->ret = MALI_ERROR_NONE;
549                 return 0;
550         }
551
552         /* block calls until version handshake */
553         if (kctx->api_version == 0)
554                 return -EINVAL;
555
556         if (!atomic_read(&kctx->setup_complete)) {
557                 struct kbase_uk_set_flags *kbase_set_flags;
558
559                 /* setup pending, try to signal that we'll do the setup,
560                  * if setup was already in progress, err this call
561                  */
562                 if (atomic_cmpxchg(&kctx->setup_in_progress, 0, 1) != 0)
563                         return -EINVAL;
564
565                 /* if unexpected call, will stay stuck in setup mode
566                  * (is it the only call we accept?)
567                  */
568                 if (id != KBASE_FUNC_SET_FLAGS)
569                         return -EINVAL;
570
571                 kbase_set_flags = (struct kbase_uk_set_flags *)args;
572
573                 /* if not matching the expected call, stay in setup mode */
574                 if (sizeof(*kbase_set_flags) != args_size)
575                         goto bad_size;
576
577                 /* if bad flags, will stay stuck in setup mode */
578                 if (kbase_context_set_create_flags(kctx,
579                                 kbase_set_flags->create_flags) != 0)
580                         ukh->ret = MALI_ERROR_FUNCTION_FAILED;
581
582                 atomic_set(&kctx->setup_complete, 1);
583                 return 0;
584         }
585
586         /* setup complete, perform normal operation */
587         switch (id) {
588         case KBASE_FUNC_MEM_JIT_INIT:
589                 {
590                         struct kbase_uk_mem_jit_init *jit_init = args;
591
592                         if (sizeof(*jit_init) != args_size)
593                                 goto bad_size;
594
595                         if (kbase_region_tracker_init_jit(kctx,
596                                         jit_init->va_pages))
597                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
598                         break;
599                 }
600         case KBASE_FUNC_MEM_ALLOC:
601                 {
602                         struct kbase_uk_mem_alloc *mem = args;
603                         struct kbase_va_region *reg;
604
605                         if (sizeof(*mem) != args_size)
606                                 goto bad_size;
607
608 #if defined(CONFIG_64BIT)
609                         if (!kctx->is_compat) {
610                                 /* force SAME_VA if a 64-bit client */
611                                 mem->flags |= BASE_MEM_SAME_VA;
612                         }
613 #endif
614
615                         reg = kbase_mem_alloc(kctx, mem->va_pages,
616                                         mem->commit_pages, mem->extent,
617                                         &mem->flags, &mem->gpu_va,
618                                         &mem->va_alignment);
619                         if (!reg)
620                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
621                         break;
622                 }
623         case KBASE_FUNC_MEM_IMPORT: {
624                         struct kbase_uk_mem_import *mem_import = args;
625                         void __user *phandle;
626
627                         if (sizeof(*mem_import) != args_size)
628                                 goto bad_size;
629 #ifdef CONFIG_COMPAT
630                         if (kctx->is_compat)
631                                 phandle = compat_ptr(mem_import->phandle.compat_value);
632                         else
633 #endif
634                                 phandle = mem_import->phandle.value;
635
636                         if (mem_import->type == BASE_MEM_IMPORT_TYPE_INVALID) {
637                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
638                                 break;
639                         }
640
641                         if (kbase_mem_import(kctx, mem_import->type, phandle,
642                                                 &mem_import->gpu_va,
643                                                 &mem_import->va_pages,
644                                                 &mem_import->flags)) {
645                                 mem_import->type = BASE_MEM_IMPORT_TYPE_INVALID;
646                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
647                         }
648                         break;
649         }
650         case KBASE_FUNC_MEM_ALIAS: {
651                         struct kbase_uk_mem_alias *alias = args;
652                         struct base_mem_aliasing_info __user *user_ai;
653                         struct base_mem_aliasing_info *ai;
654
655                         if (sizeof(*alias) != args_size)
656                                 goto bad_size;
657
658                         if (alias->nents > 2048) {
659                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
660                                 break;
661                         }
662                         if (!alias->nents) {
663                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
664                                 break;
665                         }
666
667 #ifdef CONFIG_COMPAT
668                         if (kctx->is_compat)
669                                 user_ai = compat_ptr(alias->ai.compat_value);
670                         else
671 #endif
672                                 user_ai = alias->ai.value;
673
674                         ai = vmalloc(sizeof(*ai) * alias->nents);
675
676                         if (!ai) {
677                                 ukh->ret = MALI_ERROR_OUT_OF_MEMORY;
678                                 break;
679                         }
680
681                         if (copy_from_user(ai, user_ai,
682                                            sizeof(*ai) * alias->nents)) {
683                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
684                                 goto copy_failed;
685                         }
686
687                         alias->gpu_va = kbase_mem_alias(kctx, &alias->flags,
688                                                         alias->stride,
689                                                         alias->nents, ai,
690                                                         &alias->va_pages);
691                         if (!alias->gpu_va) {
692                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
693                                 goto no_alias;
694                         }
695 no_alias:
696 copy_failed:
697                         vfree(ai);
698                         break;
699                 }
700         case KBASE_FUNC_MEM_COMMIT:
701                 {
702                         struct kbase_uk_mem_commit *commit = args;
703
704                         if (sizeof(*commit) != args_size)
705                                 goto bad_size;
706
707                         if (commit->gpu_addr & ~PAGE_MASK) {
708                                 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_COMMIT: commit->gpu_addr: passed parameter is invalid");
709                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
710                                 break;
711                         }
712
713                         if (kbase_mem_commit(kctx, commit->gpu_addr,
714                                         commit->pages,
715                                         (base_backing_threshold_status *)
716                                         &commit->result_subcode) != 0)
717                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
718
719                         break;
720                 }
721
722         case KBASE_FUNC_MEM_QUERY:
723                 {
724                         struct kbase_uk_mem_query *query = args;
725
726                         if (sizeof(*query) != args_size)
727                                 goto bad_size;
728
729                         if (query->gpu_addr & ~PAGE_MASK) {
730                                 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_QUERY: query->gpu_addr: passed parameter is invalid");
731                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
732                                 break;
733                         }
734                         if (query->query != KBASE_MEM_QUERY_COMMIT_SIZE &&
735                             query->query != KBASE_MEM_QUERY_VA_SIZE &&
736                                 query->query != KBASE_MEM_QUERY_FLAGS) {
737                                 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_QUERY: query->query = %lld unknown", (unsigned long long)query->query);
738                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
739                                 break;
740                         }
741
742                         if (kbase_mem_query(kctx, query->gpu_addr,
743                                         query->query, &query->value) != 0)
744                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
745                         else
746                                 ukh->ret = MALI_ERROR_NONE;
747                         break;
748                 }
749                 break;
750
751         case KBASE_FUNC_MEM_FLAGS_CHANGE:
752                 {
753                         struct kbase_uk_mem_flags_change *fc = args;
754
755                         if (sizeof(*fc) != args_size)
756                                 goto bad_size;
757
758                         if ((fc->gpu_va & ~PAGE_MASK) && (fc->gpu_va >= PAGE_SIZE)) {
759                                 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_FLAGS_CHANGE: mem->gpu_va: passed parameter is invalid");
760                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
761                                 break;
762                         }
763
764                         if (kbase_mem_flags_change(kctx, fc->gpu_va,
765                                         fc->flags, fc->mask) != 0)
766                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
767
768                         break;
769                 }
770         case KBASE_FUNC_MEM_FREE:
771                 {
772                         struct kbase_uk_mem_free *mem = args;
773
774                         if (sizeof(*mem) != args_size)
775                                 goto bad_size;
776
777                         if ((mem->gpu_addr & ~PAGE_MASK) && (mem->gpu_addr >= PAGE_SIZE)) {
778                                 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_FREE: mem->gpu_addr: passed parameter is invalid");
779                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
780                                 break;
781                         }
782
783                         if (kbase_mem_free(kctx, mem->gpu_addr) != 0)
784                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
785                         break;
786                 }
787
788         case KBASE_FUNC_JOB_SUBMIT:
789                 {
790                         struct kbase_uk_job_submit *job = args;
791
792                         if (sizeof(*job) != args_size)
793                                 goto bad_size;
794
795 #ifdef BASE_LEGACY_UK6_SUPPORT
796                         if (kbase_jd_submit(kctx, job, 0) != 0)
797 #else
798                         if (kbase_jd_submit(kctx, job) != 0)
799 #endif /* BASE_LEGACY_UK6_SUPPORT */
800                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
801                         break;
802                 }
803
804 #ifdef BASE_LEGACY_UK6_SUPPORT
805         case KBASE_FUNC_JOB_SUBMIT_UK6:
806                 {
807                         struct kbase_uk_job_submit *job = args;
808
809                         if (sizeof(*job) != args_size)
810                                 goto bad_size;
811
812                         if (kbase_jd_submit(kctx, job, 1) != 0)
813                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
814                         break;
815                 }
816 #endif
817
818         case KBASE_FUNC_SYNC:
819                 {
820                         struct kbase_uk_sync_now *sn = args;
821
822                         if (sizeof(*sn) != args_size)
823                                 goto bad_size;
824
825                         if (sn->sset.basep_sset.mem_handle.basep.handle & ~PAGE_MASK) {
826                                 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_SYNC: sn->sset.basep_sset.mem_handle: passed parameter is invalid");
827                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
828                                 break;
829                         }
830
831 #ifndef CONFIG_MALI_COH_USER
832                         if (kbase_sync_now(kctx, &sn->sset) != 0)
833                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
834 #endif
835                         break;
836                 }
837
838         case KBASE_FUNC_DISJOINT_QUERY:
839                 {
840                         struct kbase_uk_disjoint_query *dquery = args;
841
842                         if (sizeof(*dquery) != args_size)
843                                 goto bad_size;
844
845                         /* Get the disjointness counter value. */
846                         dquery->counter = kbase_disjoint_event_get(kctx->kbdev);
847                         break;
848                 }
849
850         case KBASE_FUNC_POST_TERM:
851                 {
852                         kbase_event_close(kctx);
853                         break;
854                 }
855
856         case KBASE_FUNC_HWCNT_SETUP:
857                 {
858                         struct kbase_uk_hwcnt_setup *setup = args;
859
860                         if (sizeof(*setup) != args_size)
861                                 goto bad_size;
862
863                         mutex_lock(&kctx->vinstr_cli_lock);
864                         if (kbase_vinstr_legacy_hwc_setup(kbdev->vinstr_ctx,
865                                         &kctx->vinstr_cli, setup) != 0)
866                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
867                         mutex_unlock(&kctx->vinstr_cli_lock);
868                         break;
869                 }
870
871         case KBASE_FUNC_HWCNT_DUMP:
872                 {
873                         /* args ignored */
874                         mutex_lock(&kctx->vinstr_cli_lock);
875                         if (kbase_vinstr_hwc_dump(kctx->vinstr_cli,
876                                         BASE_HWCNT_READER_EVENT_MANUAL) != 0)
877                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
878                         mutex_unlock(&kctx->vinstr_cli_lock);
879                         break;
880                 }
881
882         case KBASE_FUNC_HWCNT_CLEAR:
883                 {
884                         /* args ignored */
885                         mutex_lock(&kctx->vinstr_cli_lock);
886                         if (kbase_vinstr_hwc_clear(kctx->vinstr_cli) != 0)
887                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
888                         mutex_unlock(&kctx->vinstr_cli_lock);
889                         break;
890                 }
891
892         case KBASE_FUNC_HWCNT_READER_SETUP:
893                 {
894                         struct kbase_uk_hwcnt_reader_setup *setup = args;
895
896                         if (sizeof(*setup) != args_size)
897                                 goto bad_size;
898
899                         mutex_lock(&kctx->vinstr_cli_lock);
900                         if (kbase_vinstr_hwcnt_reader_setup(kbdev->vinstr_ctx,
901                                         setup) != 0)
902                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
903                         mutex_unlock(&kctx->vinstr_cli_lock);
904                         break;
905                 }
906
907         case KBASE_FUNC_GPU_PROPS_REG_DUMP:
908                 {
909                         struct kbase_uk_gpuprops *setup = args;
910
911                         if (sizeof(*setup) != args_size)
912                                 goto bad_size;
913
914                         if (kbase_gpuprops_uk_get_props(kctx, setup) != 0)
915                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
916                         break;
917                 }
918         case KBASE_FUNC_FIND_CPU_OFFSET:
919                 {
920                         struct kbase_uk_find_cpu_offset *find = args;
921
922                         if (sizeof(*find) != args_size)
923                                 goto bad_size;
924
925                         if (find->gpu_addr & ~PAGE_MASK) {
926                                 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_FIND_CPU_OFFSET: find->gpu_addr: passed parameter is invalid");
927                                 goto out_bad;
928                         }
929
930                         if (find->size > SIZE_MAX || find->cpu_addr > ULONG_MAX) {
931                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
932                         } else {
933                                 int err;
934
935                                 err = kbasep_find_enclosing_cpu_mapping_offset(
936                                                 kctx,
937                                                 find->gpu_addr,
938                                                 (uintptr_t) find->cpu_addr,
939                                                 (size_t) find->size,
940                                                 &find->offset);
941
942                                 if (err)
943                                         ukh->ret = MALI_ERROR_FUNCTION_FAILED;
944                         }
945                         break;
946                 }
947         case KBASE_FUNC_GET_VERSION:
948                 {
949                         struct kbase_uk_get_ddk_version *get_version = (struct kbase_uk_get_ddk_version *)args;
950
951                         if (sizeof(*get_version) != args_size)
952                                 goto bad_size;
953
954                         /* version buffer size check is made in compile time assert */
955                         memcpy(get_version->version_buffer, KERNEL_SIDE_DDK_VERSION_STRING, sizeof(KERNEL_SIDE_DDK_VERSION_STRING));
956                         get_version->version_string_size = sizeof(KERNEL_SIDE_DDK_VERSION_STRING);
957                         get_version->rk_version = ROCKCHIP_VERSION;
958                         break;
959                 }
960
961         case KBASE_FUNC_STREAM_CREATE:
962                 {
963 #ifdef CONFIG_SYNC
964                         struct kbase_uk_stream_create *screate = (struct kbase_uk_stream_create *)args;
965
966                         if (sizeof(*screate) != args_size)
967                                 goto bad_size;
968
969                         if (strnlen(screate->name, sizeof(screate->name)) >= sizeof(screate->name)) {
970                                 /* not NULL terminated */
971                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
972                                 break;
973                         }
974
975                         if (kbase_stream_create(screate->name, &screate->fd) != 0)
976                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
977                         else
978                                 ukh->ret = MALI_ERROR_NONE;
979 #else /* CONFIG_SYNC */
980                         ukh->ret = MALI_ERROR_FUNCTION_FAILED;
981 #endif /* CONFIG_SYNC */
982                         break;
983                 }
984         case KBASE_FUNC_FENCE_VALIDATE:
985                 {
986 #ifdef CONFIG_SYNC
987                         struct kbase_uk_fence_validate *fence_validate = (struct kbase_uk_fence_validate *)args;
988
989                         if (sizeof(*fence_validate) != args_size)
990                                 goto bad_size;
991
992                         if (kbase_fence_validate(fence_validate->fd) != 0)
993                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
994                         else
995                                 ukh->ret = MALI_ERROR_NONE;
996 #endif /* CONFIG_SYNC */
997                         break;
998                 }
999
1000         case KBASE_FUNC_EXT_BUFFER_LOCK:
1001                 {
1002 #ifdef CONFIG_KDS
1003                         ret = kbase_external_buffer_lock(kctx,
1004                                 (struct kbase_uk_ext_buff_kds_data *)args,
1005                                 args_size);
1006                         switch (ret) {
1007                         case 0:
1008                                 ukh->ret = MALI_ERROR_NONE;
1009                                 break;
1010                         case -ENOMEM:
1011                                 ukh->ret = MALI_ERROR_OUT_OF_MEMORY;
1012                                 break;
1013                         default:
1014                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
1015                         }
1016 #endif /* CONFIG_KDS */
1017                         break;
1018                 }
1019
1020         case KBASE_FUNC_SET_TEST_DATA:
1021                 {
1022 #if MALI_UNIT_TEST
1023                         struct kbase_uk_set_test_data *set_data = args;
1024
1025                         shared_kernel_test_data = set_data->test_data;
1026                         shared_kernel_test_data.kctx.value = (void __user *)kctx;
1027                         shared_kernel_test_data.mm.value = (void __user *)current->mm;
1028                         ukh->ret = MALI_ERROR_NONE;
1029 #endif /* MALI_UNIT_TEST */
1030                         break;
1031                 }
1032
1033         case KBASE_FUNC_INJECT_ERROR:
1034                 {
1035 #ifdef CONFIG_MALI_ERROR_INJECT
1036                         unsigned long flags;
1037                         struct kbase_error_params params = ((struct kbase_uk_error_params *)args)->params;
1038
1039                         /*mutex lock */
1040                         spin_lock_irqsave(&kbdev->reg_op_lock, flags);
1041                         if (job_atom_inject_error(&params) != 0)
1042                                 ukh->ret = MALI_ERROR_OUT_OF_MEMORY;
1043                         else
1044                                 ukh->ret = MALI_ERROR_NONE;
1045                         spin_unlock_irqrestore(&kbdev->reg_op_lock, flags);
1046                         /*mutex unlock */
1047 #endif /* CONFIG_MALI_ERROR_INJECT */
1048                         break;
1049                 }
1050
1051         case KBASE_FUNC_MODEL_CONTROL:
1052                 {
1053 #ifdef CONFIG_MALI_NO_MALI
1054                         unsigned long flags;
1055                         struct kbase_model_control_params params =
1056                                         ((struct kbase_uk_model_control_params *)args)->params;
1057
1058                         /*mutex lock */
1059                         spin_lock_irqsave(&kbdev->reg_op_lock, flags);
1060                         if (gpu_model_control(kbdev->model, &params) != 0)
1061                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
1062                         else
1063                                 ukh->ret = MALI_ERROR_NONE;
1064                         spin_unlock_irqrestore(&kbdev->reg_op_lock, flags);
1065                         /*mutex unlock */
1066 #endif /* CONFIG_MALI_NO_MALI */
1067                         break;
1068                 }
1069
1070 #ifdef BASE_LEGACY_UK8_SUPPORT
1071         case KBASE_FUNC_KEEP_GPU_POWERED:
1072                 {
1073                         dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_KEEP_GPU_POWERED: function is deprecated and disabled\n");
1074                         ukh->ret = MALI_ERROR_FUNCTION_FAILED;
1075                         break;
1076                 }
1077 #endif /* BASE_LEGACY_UK8_SUPPORT */
1078
1079         case KBASE_FUNC_GET_PROFILING_CONTROLS:
1080                 {
1081                         struct kbase_uk_profiling_controls *controls =
1082                                         (struct kbase_uk_profiling_controls *)args;
1083                         u32 i;
1084
1085                         if (sizeof(*controls) != args_size)
1086                                 goto bad_size;
1087
1088                         for (i = FBDUMP_CONTROL_MIN; i < FBDUMP_CONTROL_MAX; i++)
1089                                 controls->profiling_controls[i] = kbase_get_profiling_control(kbdev, i);
1090
1091                         break;
1092                 }
1093
1094         /* used only for testing purposes; these controls are to be set by gator through gator API */
1095         case KBASE_FUNC_SET_PROFILING_CONTROLS:
1096                 {
1097                         struct kbase_uk_profiling_controls *controls =
1098                                         (struct kbase_uk_profiling_controls *)args;
1099                         u32 i;
1100
1101                         if (sizeof(*controls) != args_size)
1102                                 goto bad_size;
1103
1104                         for (i = FBDUMP_CONTROL_MIN; i < FBDUMP_CONTROL_MAX; i++)
1105                                 _mali_profiling_control(i, controls->profiling_controls[i]);
1106
1107                         break;
1108                 }
1109
1110         case KBASE_FUNC_DEBUGFS_MEM_PROFILE_ADD:
1111                 {
1112                         struct kbase_uk_debugfs_mem_profile_add *add_data =
1113                                         (struct kbase_uk_debugfs_mem_profile_add *)args;
1114                         char *buf;
1115                         char __user *user_buf;
1116
1117                         if (sizeof(*add_data) != args_size)
1118                                 goto bad_size;
1119
1120                         if (add_data->len > KBASE_MEM_PROFILE_MAX_BUF_SIZE) {
1121                                 dev_err(kbdev->dev, "buffer too big\n");
1122                                 goto out_bad;
1123                         }
1124
1125 #ifdef CONFIG_COMPAT
1126                         if (kctx->is_compat)
1127                                 user_buf = compat_ptr(add_data->buf.compat_value);
1128                         else
1129 #endif
1130                                 user_buf = add_data->buf.value;
1131
1132                         buf = kmalloc(add_data->len, GFP_KERNEL);
1133                         if (!buf)
1134                                 goto out_bad;
1135
1136                         if (0 != copy_from_user(buf, user_buf, add_data->len)) {
1137                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
1138                                 kfree(buf);
1139                                 goto out_bad;
1140                         }
1141
1142                         if (kbasep_mem_profile_debugfs_insert(kctx, buf,
1143                                                         add_data->len)) {
1144                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
1145                                 kfree(buf);
1146                                 goto out_bad;
1147                         }
1148
1149                         break;
1150                 }
1151
1152 #ifdef CONFIG_MALI_NO_MALI
1153         case KBASE_FUNC_SET_PRFCNT_VALUES:
1154                 {
1155
1156                         struct kbase_uk_prfcnt_values *params =
1157                           ((struct kbase_uk_prfcnt_values *)args);
1158                         gpu_model_set_dummy_prfcnt_sample(params->data,
1159                                         params->size);
1160
1161                         break;
1162                 }
1163 #endif /* CONFIG_MALI_NO_MALI */
1164
1165         case KBASE_FUNC_TLSTREAM_ACQUIRE:
1166                 {
1167                         struct kbase_uk_tlstream_acquire *tlstream_acquire =
1168                                 args;
1169
1170                         if (sizeof(*tlstream_acquire) != args_size)
1171                                 goto bad_size;
1172
1173                         if (0 != kbase_tlstream_acquire(
1174                                                 kctx,
1175                                                 &tlstream_acquire->fd)) {
1176                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
1177                         } else if (0 <= tlstream_acquire->fd) {
1178                                 /* Summary stream was cleared during acquire.
1179                                  * Create static timeline objects that will be
1180                                  * read by client. */
1181                                 kbase_create_timeline_objects(kctx);
1182                         }
1183                         break;
1184                 }
1185         case KBASE_FUNC_TLSTREAM_FLUSH:
1186                 {
1187                         struct kbase_uk_tlstream_flush *tlstream_flush =
1188                                 args;
1189
1190                         if (sizeof(*tlstream_flush) != args_size)
1191                                 goto bad_size;
1192
1193                         kbase_tlstream_flush_streams();
1194                         break;
1195                 }
1196 #if MALI_UNIT_TEST
1197         case KBASE_FUNC_TLSTREAM_TEST:
1198                 {
1199                         struct kbase_uk_tlstream_test *tlstream_test = args;
1200
1201                         if (sizeof(*tlstream_test) != args_size)
1202                                 goto bad_size;
1203
1204                         kbase_tlstream_test(
1205                                         tlstream_test->tpw_count,
1206                                         tlstream_test->msg_delay,
1207                                         tlstream_test->msg_count,
1208                                         tlstream_test->aux_msg);
1209                         break;
1210                 }
1211         case KBASE_FUNC_TLSTREAM_STATS:
1212                 {
1213                         struct kbase_uk_tlstream_stats *tlstream_stats = args;
1214
1215                         if (sizeof(*tlstream_stats) != args_size)
1216                                 goto bad_size;
1217
1218                         kbase_tlstream_stats(
1219                                         &tlstream_stats->bytes_collected,
1220                                         &tlstream_stats->bytes_generated);
1221                         break;
1222                 }
1223 #endif /* MALI_UNIT_TEST */
1224
1225         case KBASE_FUNC_GET_CONTEXT_ID:
1226                 {
1227                         struct kbase_uk_context_id *info = args;
1228
1229                         info->id = kctx->id;
1230                         break;
1231                 }
1232
1233         case KBASE_FUNC_SOFT_EVENT_UPDATE:
1234                 {
1235                         struct kbase_uk_soft_event_update *update = args;
1236
1237                         if (sizeof(*update) != args_size)
1238                                 goto bad_size;
1239
1240                         if (((update->new_status != BASE_JD_SOFT_EVENT_SET) &&
1241                             (update->new_status != BASE_JD_SOFT_EVENT_RESET)) ||
1242                             (update->flags != 0))
1243                                 goto out_bad;
1244
1245                         if (kbasep_write_soft_event_status(
1246                                                 kctx, update->evt,
1247                                                 update->new_status) != 0) {
1248                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
1249                                 break;
1250                         }
1251
1252                         if (update->new_status == BASE_JD_SOFT_EVENT_SET)
1253                                 kbasep_complete_triggered_soft_events(
1254                                                 kctx, update->evt);
1255
1256                         break;
1257                 }
1258
1259         default:
1260                 dev_err(kbdev->dev, "unknown ioctl %u\n", id);
1261                 goto out_bad;
1262         }
1263
1264         return ret;
1265
1266  bad_size:
1267         dev_err(kbdev->dev, "Wrong syscall size (%d) for %08x\n", args_size, id);
1268  out_bad:
1269         return -EINVAL;
1270 }
1271
1272 static struct kbase_device *to_kbase_device(struct device *dev)
1273 {
1274         return dev_get_drvdata(dev);
1275 }
1276
1277 static int assign_irqs(struct platform_device *pdev)
1278 {
1279         struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
1280         int i;
1281
1282         if (!kbdev)
1283                 return -ENODEV;
1284
1285         /* 3 IRQ resources */
1286         for (i = 0; i < 3; i++) {
1287                 struct resource *irq_res;
1288                 int irqtag;
1289
1290                 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
1291                 if (!irq_res) {
1292                         dev_err(kbdev->dev, "No IRQ resource at index %d\n", i);
1293                         return -ENOENT;
1294                 }
1295
1296 #ifdef CONFIG_OF
1297                 if (!strcmp(irq_res->name, "JOB")) {
1298                         irqtag = JOB_IRQ_TAG;
1299                 } else if (!strcmp(irq_res->name, "MMU")) {
1300                         irqtag = MMU_IRQ_TAG;
1301                 } else if (!strcmp(irq_res->name, "GPU")) {
1302                         irqtag = GPU_IRQ_TAG;
1303                 } else {
1304                         dev_err(&pdev->dev, "Invalid irq res name: '%s'\n",
1305                                 irq_res->name);
1306                         return -EINVAL;
1307                 }
1308 #else
1309                 irqtag = i;
1310 #endif /* CONFIG_OF */
1311                 kbdev->irqs[irqtag].irq = irq_res->start;
1312                 kbdev->irqs[irqtag].flags = irq_res->flags & IRQF_TRIGGER_MASK;
1313         }
1314
1315         return 0;
1316 }
1317
1318 /*
1319  * API to acquire device list mutex and
1320  * return pointer to the device list head
1321  */
1322 const struct list_head *kbase_dev_list_get(void)
1323 {
1324         mutex_lock(&kbase_dev_list_lock);
1325         return &kbase_dev_list;
1326 }
1327 KBASE_EXPORT_TEST_API(kbase_dev_list_get);
1328
1329 /* API to release the device list mutex */
1330 void kbase_dev_list_put(const struct list_head *dev_list)
1331 {
1332         mutex_unlock(&kbase_dev_list_lock);
1333 }
1334 KBASE_EXPORT_TEST_API(kbase_dev_list_put);
1335
1336 /* Find a particular kbase device (as specified by minor number), or find the "first" device if -1 is specified */
1337 struct kbase_device *kbase_find_device(int minor)
1338 {
1339         struct kbase_device *kbdev = NULL;
1340         struct list_head *entry;
1341         const struct list_head *dev_list = kbase_dev_list_get();
1342
1343         list_for_each(entry, dev_list) {
1344                 struct kbase_device *tmp;
1345
1346                 tmp = list_entry(entry, struct kbase_device, entry);
1347                 if (tmp->mdev.minor == minor || minor == -1) {
1348                         kbdev = tmp;
1349                         get_device(kbdev->dev);
1350                         break;
1351                 }
1352         }
1353         kbase_dev_list_put(dev_list);
1354
1355         return kbdev;
1356 }
1357 EXPORT_SYMBOL(kbase_find_device);
1358
1359 void kbase_release_device(struct kbase_device *kbdev)
1360 {
1361         put_device(kbdev->dev);
1362 }
1363 EXPORT_SYMBOL(kbase_release_device);
1364
1365 static int kbase_open(struct inode *inode, struct file *filp)
1366 {
1367         struct kbase_device *kbdev = NULL;
1368         struct kbase_context *kctx;
1369         int ret = 0;
1370 #ifdef CONFIG_DEBUG_FS
1371         char kctx_name[64];
1372 #endif
1373
1374         kbdev = kbase_find_device(iminor(inode));
1375
1376         if (!kbdev)
1377                 return -ENODEV;
1378
1379         kctx = kbase_create_context(kbdev, is_compat_task());
1380         if (!kctx) {
1381                 ret = -ENOMEM;
1382                 goto out;
1383         }
1384
1385         init_waitqueue_head(&kctx->event_queue);
1386         filp->private_data = kctx;
1387         kctx->filp = filp;
1388
1389         kctx->infinite_cache_active = kbdev->infinite_cache_active_default;
1390
1391 #ifdef CONFIG_DEBUG_FS
1392         snprintf(kctx_name, 64, "%d_%d", kctx->tgid, kctx->id);
1393
1394         kctx->kctx_dentry = debugfs_create_dir(kctx_name,
1395                         kbdev->debugfs_ctx_directory);
1396
1397         if (IS_ERR_OR_NULL(kctx->kctx_dentry)) {
1398                 ret = -ENOMEM;
1399                 goto out;
1400         }
1401
1402 #ifdef CONFIG_MALI_COH_USER
1403          /* if cache is completely coherent at hardware level, then remove the
1404           * infinite cache control support from debugfs.
1405           */
1406 #else
1407         debugfs_create_bool("infinite_cache", 0644, kctx->kctx_dentry,
1408                         (bool*)&(kctx->infinite_cache_active));
1409 #endif /* CONFIG_MALI_COH_USER */
1410
1411         mutex_init(&kctx->mem_profile_lock);
1412
1413         kbasep_jd_debugfs_ctx_add(kctx);
1414         kbase_debug_mem_view_init(filp);
1415
1416         kbase_debug_job_fault_context_init(kctx);
1417
1418         kbase_mem_pool_debugfs_add(kctx->kctx_dentry, &kctx->mem_pool);
1419
1420         kbase_jit_debugfs_add(kctx);
1421 #endif /* CONFIG_DEBUGFS */
1422
1423         dev_dbg(kbdev->dev, "created base context\n");
1424
1425         {
1426                 struct kbasep_kctx_list_element *element;
1427
1428                 element = kzalloc(sizeof(*element), GFP_KERNEL);
1429                 if (element) {
1430                         mutex_lock(&kbdev->kctx_list_lock);
1431                         element->kctx = kctx;
1432                         list_add(&element->link, &kbdev->kctx_list);
1433                         kbase_tlstream_tl_new_ctx(
1434                                         element->kctx,
1435                                         (u32)(element->kctx->id),
1436                                         (u32)(element->kctx->tgid));
1437                         mutex_unlock(&kbdev->kctx_list_lock);
1438                 } else {
1439                         /* we don't treat this as a fail - just warn about it */
1440                         dev_warn(kbdev->dev, "couldn't add kctx to kctx_list\n");
1441                 }
1442         }
1443         return 0;
1444
1445  out:
1446         kbase_release_device(kbdev);
1447         return ret;
1448 }
1449
1450 static int kbase_release(struct inode *inode, struct file *filp)
1451 {
1452         struct kbase_context *kctx = filp->private_data;
1453         struct kbase_device *kbdev = kctx->kbdev;
1454         struct kbasep_kctx_list_element *element, *tmp;
1455         bool found_element = false;
1456
1457         kbase_tlstream_tl_del_ctx(kctx);
1458
1459 #ifdef CONFIG_DEBUG_FS
1460         debugfs_remove_recursive(kctx->kctx_dentry);
1461         kbasep_mem_profile_debugfs_remove(kctx);
1462         kbase_debug_job_fault_context_term(kctx);
1463 #endif
1464
1465         mutex_lock(&kbdev->kctx_list_lock);
1466         list_for_each_entry_safe(element, tmp, &kbdev->kctx_list, link) {
1467                 if (element->kctx == kctx) {
1468                         list_del(&element->link);
1469                         kfree(element);
1470                         found_element = true;
1471                 }
1472         }
1473         mutex_unlock(&kbdev->kctx_list_lock);
1474         if (!found_element)
1475                 dev_warn(kbdev->dev, "kctx not in kctx_list\n");
1476
1477         filp->private_data = NULL;
1478
1479         mutex_lock(&kctx->vinstr_cli_lock);
1480         /* If this client was performing hwcnt dumping and did not explicitly
1481          * detach itself, remove it from the vinstr core now */
1482         if (kctx->vinstr_cli) {
1483                 struct kbase_uk_hwcnt_setup setup;
1484
1485                 setup.dump_buffer = 0llu;
1486                 kbase_vinstr_legacy_hwc_setup(
1487                                 kbdev->vinstr_ctx, &kctx->vinstr_cli, &setup);
1488         }
1489         mutex_unlock(&kctx->vinstr_cli_lock);
1490
1491         kbase_destroy_context(kctx);
1492
1493         dev_dbg(kbdev->dev, "deleted base context\n");
1494         kbase_release_device(kbdev);
1495         return 0;
1496 }
1497
1498 #define CALL_MAX_SIZE 536
1499
1500 static long kbase_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1501 {
1502         u64 msg[(CALL_MAX_SIZE + 7) >> 3] = { 0xdeadbeefdeadbeefull };  /* alignment fixup */
1503         u32 size = _IOC_SIZE(cmd);
1504         struct kbase_context *kctx = filp->private_data;
1505
1506         if (size > CALL_MAX_SIZE)
1507                 return -ENOTTY;
1508
1509         if (0 != copy_from_user(&msg, (void __user *)arg, size)) {
1510                 dev_err(kctx->kbdev->dev, "failed to copy ioctl argument into kernel space\n");
1511                 return -EFAULT;
1512         }
1513
1514         if (kbase_dispatch(kctx, &msg, size) != 0)
1515                 return -EFAULT;
1516
1517         if (0 != copy_to_user((void __user *)arg, &msg, size)) {
1518                 dev_err(kctx->kbdev->dev, "failed to copy results of UK call back to user space\n");
1519                 return -EFAULT;
1520         }
1521         return 0;
1522 }
1523
1524 static ssize_t kbase_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
1525 {
1526         struct kbase_context *kctx = filp->private_data;
1527         struct base_jd_event_v2 uevent;
1528         int out_count = 0;
1529
1530         if (count < sizeof(uevent))
1531                 return -ENOBUFS;
1532
1533         do {
1534                 while (kbase_event_dequeue(kctx, &uevent)) {
1535                         if (out_count > 0)
1536                                 goto out;
1537
1538                         if (filp->f_flags & O_NONBLOCK)
1539                                 return -EAGAIN;
1540
1541                         if (wait_event_interruptible(kctx->event_queue,
1542                                         kbase_event_pending(kctx)) != 0)
1543                                 return -ERESTARTSYS;
1544                 }
1545                 if (uevent.event_code == BASE_JD_EVENT_DRV_TERMINATED) {
1546                         if (out_count == 0)
1547                                 return -EPIPE;
1548                         goto out;
1549                 }
1550
1551                 if (copy_to_user(buf, &uevent, sizeof(uevent)) != 0)
1552                         return -EFAULT;
1553
1554                 buf += sizeof(uevent);
1555                 out_count++;
1556                 count -= sizeof(uevent);
1557         } while (count >= sizeof(uevent));
1558
1559  out:
1560         return out_count * sizeof(uevent);
1561 }
1562
1563 static unsigned int kbase_poll(struct file *filp, poll_table *wait)
1564 {
1565         struct kbase_context *kctx = filp->private_data;
1566
1567         poll_wait(filp, &kctx->event_queue, wait);
1568         if (kbase_event_pending(kctx))
1569                 return POLLIN | POLLRDNORM;
1570
1571         return 0;
1572 }
1573
1574 void kbase_event_wakeup(struct kbase_context *kctx)
1575 {
1576         KBASE_DEBUG_ASSERT(kctx);
1577
1578         wake_up_interruptible(&kctx->event_queue);
1579 }
1580
1581 KBASE_EXPORT_TEST_API(kbase_event_wakeup);
1582
1583 static int kbase_check_flags(int flags)
1584 {
1585         /* Enforce that the driver keeps the O_CLOEXEC flag so that execve() always
1586          * closes the file descriptor in a child process.
1587          */
1588         if (0 == (flags & O_CLOEXEC))
1589                 return -EINVAL;
1590
1591         return 0;
1592 }
1593
1594 #ifdef CONFIG_64BIT
1595 /* The following function is taken from the kernel and just
1596  * renamed. As it's not exported to modules we must copy-paste it here.
1597  */
1598
1599 static unsigned long kbase_unmapped_area_topdown(struct vm_unmapped_area_info
1600                 *info)
1601 {
1602         struct mm_struct *mm = current->mm;
1603         struct vm_area_struct *vma;
1604         unsigned long length, low_limit, high_limit, gap_start, gap_end;
1605
1606         /* Adjust search length to account for worst case alignment overhead */
1607         length = info->length + info->align_mask;
1608         if (length < info->length)
1609                 return -ENOMEM;
1610
1611         /*
1612          * Adjust search limits by the desired length.
1613          * See implementation comment at top of unmapped_area().
1614          */
1615         gap_end = info->high_limit;
1616         if (gap_end < length)
1617                 return -ENOMEM;
1618         high_limit = gap_end - length;
1619
1620         if (info->low_limit > high_limit)
1621                 return -ENOMEM;
1622         low_limit = info->low_limit + length;
1623
1624         /* Check highest gap, which does not precede any rbtree node */
1625         gap_start = mm->highest_vm_end;
1626         if (gap_start <= high_limit)
1627                 goto found_highest;
1628
1629         /* Check if rbtree root looks promising */
1630         if (RB_EMPTY_ROOT(&mm->mm_rb))
1631                 return -ENOMEM;
1632         vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
1633         if (vma->rb_subtree_gap < length)
1634                 return -ENOMEM;
1635
1636         while (true) {
1637                 /* Visit right subtree if it looks promising */
1638                 gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
1639                 if (gap_start <= high_limit && vma->vm_rb.rb_right) {
1640                         struct vm_area_struct *right =
1641                                 rb_entry(vma->vm_rb.rb_right,
1642                                          struct vm_area_struct, vm_rb);
1643                         if (right->rb_subtree_gap >= length) {
1644                                 vma = right;
1645                                 continue;
1646                         }
1647                 }
1648
1649 check_current:
1650                 /* Check if current node has a suitable gap */
1651                 gap_end = vma->vm_start;
1652                 if (gap_end < low_limit)
1653                         return -ENOMEM;
1654                 if (gap_start <= high_limit && gap_end - gap_start >= length)
1655                         goto found;
1656
1657                 /* Visit left subtree if it looks promising */
1658                 if (vma->vm_rb.rb_left) {
1659                         struct vm_area_struct *left =
1660                                 rb_entry(vma->vm_rb.rb_left,
1661                                          struct vm_area_struct, vm_rb);
1662                         if (left->rb_subtree_gap >= length) {
1663                                 vma = left;
1664                                 continue;
1665                         }
1666                 }
1667
1668                 /* Go back up the rbtree to find next candidate node */
1669                 while (true) {
1670                         struct rb_node *prev = &vma->vm_rb;
1671                         if (!rb_parent(prev))
1672                                 return -ENOMEM;
1673                         vma = rb_entry(rb_parent(prev),
1674                                        struct vm_area_struct, vm_rb);
1675                         if (prev == vma->vm_rb.rb_right) {
1676                                 gap_start = vma->vm_prev ?
1677                                         vma->vm_prev->vm_end : 0;
1678                                 goto check_current;
1679                         }
1680                 }
1681         }
1682
1683 found:
1684         /* We found a suitable gap. Clip it with the original high_limit. */
1685         if (gap_end > info->high_limit)
1686                 gap_end = info->high_limit;
1687
1688 found_highest:
1689         /* Compute highest gap address at the desired alignment */
1690         gap_end -= info->length;
1691         gap_end -= (gap_end - info->align_offset) & info->align_mask;
1692
1693         VM_BUG_ON(gap_end < info->low_limit);
1694         VM_BUG_ON(gap_end < gap_start);
1695         return gap_end;
1696 }
1697
1698
1699 static unsigned long kbase_get_unmapped_area(struct file *filp,
1700                 const unsigned long addr, const unsigned long len,
1701                 const unsigned long pgoff, const unsigned long flags)
1702 {
1703         /* based on get_unmapped_area, but simplified slightly due to that some
1704          * values are known in advance */
1705         struct kbase_context *kctx = filp->private_data;
1706         struct mm_struct *mm = current->mm;
1707         struct vm_unmapped_area_info info;
1708
1709         /* err on fixed address */
1710         if ((flags & MAP_FIXED) || addr)
1711                 return -EINVAL;
1712
1713         /* too big? */
1714         if (len > TASK_SIZE - SZ_2M)
1715                 return -ENOMEM;
1716
1717         if (kctx->is_compat)
1718                 return current->mm->get_unmapped_area(filp, addr, len, pgoff,
1719                                 flags);
1720
1721         if (kbase_hw_has_feature(kctx->kbdev, BASE_HW_FEATURE_33BIT_VA)) {
1722                 info.high_limit = kctx->same_va_end << PAGE_SHIFT;
1723                 info.align_mask = 0;
1724                 info.align_offset = 0;
1725         } else {
1726                 info.high_limit = min_t(unsigned long, mm->mmap_base,
1727                                         (kctx->same_va_end << PAGE_SHIFT));
1728                 if (len >= SZ_2M) {
1729                         info.align_offset = SZ_2M;
1730                         info.align_mask = SZ_2M - 1;
1731                 } else {
1732                         info.align_mask = 0;
1733                         info.align_offset = 0;
1734                 }
1735         }
1736
1737         info.flags = 0;
1738         info.length = len;
1739         info.low_limit = SZ_2M;
1740         return kbase_unmapped_area_topdown(&info);
1741 }
1742 #endif
1743
1744 static const struct file_operations kbase_fops = {
1745         .owner = THIS_MODULE,
1746         .open = kbase_open,
1747         .release = kbase_release,
1748         .read = kbase_read,
1749         .poll = kbase_poll,
1750         .unlocked_ioctl = kbase_ioctl,
1751         .compat_ioctl = kbase_ioctl,
1752         .mmap = kbase_mmap,
1753         .check_flags = kbase_check_flags,
1754 #ifdef CONFIG_64BIT
1755         .get_unmapped_area = kbase_get_unmapped_area,
1756 #endif
1757 };
1758
1759 #ifndef CONFIG_MALI_NO_MALI
1760 void kbase_os_reg_write(struct kbase_device *kbdev, u16 offset, u32 value)
1761 {
1762         writel(value, kbdev->reg + offset);
1763 }
1764
1765 u32 kbase_os_reg_read(struct kbase_device *kbdev, u16 offset)
1766 {
1767         return readl(kbdev->reg + offset);
1768 }
1769 #endif /* !CONFIG_MALI_NO_MALI */
1770
1771
1772 /** Show callback for the @c power_policy sysfs file.
1773  *
1774  * This function is called to get the contents of the @c power_policy sysfs
1775  * file. This is a list of the available policies with the currently active one
1776  * surrounded by square brackets.
1777  *
1778  * @param dev   The device this sysfs file is for
1779  * @param attr  The attributes of the sysfs file
1780  * @param buf   The output buffer for the sysfs file contents
1781  *
1782  * @return The number of bytes output to @c buf.
1783  */
1784 static ssize_t show_policy(struct device *dev, struct device_attribute *attr, char *const buf)
1785 {
1786         struct kbase_device *kbdev;
1787         const struct kbase_pm_policy *current_policy;
1788         const struct kbase_pm_policy *const *policy_list;
1789         int policy_count;
1790         int i;
1791         ssize_t ret = 0;
1792
1793         kbdev = to_kbase_device(dev);
1794
1795         if (!kbdev)
1796                 return -ENODEV;
1797
1798         current_policy = kbase_pm_get_policy(kbdev);
1799
1800         policy_count = kbase_pm_list_policies(&policy_list);
1801
1802         for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) {
1803                 if (policy_list[i] == current_policy)
1804                         ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name);
1805                 else
1806                         ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name);
1807         }
1808
1809         if (ret < PAGE_SIZE - 1) {
1810                 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
1811         } else {
1812                 buf[PAGE_SIZE - 2] = '\n';
1813                 buf[PAGE_SIZE - 1] = '\0';
1814                 ret = PAGE_SIZE - 1;
1815         }
1816
1817         return ret;
1818 }
1819
1820 /** Store callback for the @c power_policy sysfs file.
1821  *
1822  * This function is called when the @c power_policy sysfs file is written to.
1823  * It matches the requested policy against the available policies and if a
1824  * matching policy is found calls @ref kbase_pm_set_policy to change the
1825  * policy.
1826  *
1827  * @param dev   The device with sysfs file is for
1828  * @param attr  The attributes of the sysfs file
1829  * @param buf   The value written to the sysfs file
1830  * @param count The number of bytes written to the sysfs file
1831  *
1832  * @return @c count if the function succeeded. An error code on failure.
1833  */
1834 static ssize_t set_policy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1835 {
1836         struct kbase_device *kbdev;
1837         const struct kbase_pm_policy *new_policy = NULL;
1838         const struct kbase_pm_policy *const *policy_list;
1839         int policy_count;
1840         int i;
1841
1842         kbdev = to_kbase_device(dev);
1843
1844         if (!kbdev)
1845                 return -ENODEV;
1846
1847         policy_count = kbase_pm_list_policies(&policy_list);
1848
1849         for (i = 0; i < policy_count; i++) {
1850                 if (sysfs_streq(policy_list[i]->name, buf)) {
1851                         new_policy = policy_list[i];
1852                         break;
1853                 }
1854         }
1855
1856         if (!new_policy) {
1857                 dev_err(dev, "power_policy: policy not found\n");
1858                 return -EINVAL;
1859         }
1860
1861         kbase_pm_set_policy(kbdev, new_policy);
1862
1863         return count;
1864 }
1865
1866 /** The sysfs file @c power_policy.
1867  *
1868  * This is used for obtaining information about the available policies,
1869  * determining which policy is currently active, and changing the active
1870  * policy.
1871  */
1872 static DEVICE_ATTR(power_policy, S_IRUGO | S_IWUSR, show_policy, set_policy);
1873
1874 /** Show callback for the @c core_availability_policy sysfs file.
1875  *
1876  * This function is called to get the contents of the @c core_availability_policy
1877  * sysfs file. This is a list of the available policies with the currently
1878  * active one surrounded by square brackets.
1879  *
1880  * @param dev   The device this sysfs file is for
1881  * @param attr  The attributes of the sysfs file
1882  * @param buf   The output buffer for the sysfs file contents
1883  *
1884  * @return The number of bytes output to @c buf.
1885  */
1886 static ssize_t show_ca_policy(struct device *dev, struct device_attribute *attr, char * const buf)
1887 {
1888         struct kbase_device *kbdev;
1889         const struct kbase_pm_ca_policy *current_policy;
1890         const struct kbase_pm_ca_policy *const *policy_list;
1891         int policy_count;
1892         int i;
1893         ssize_t ret = 0;
1894
1895         kbdev = to_kbase_device(dev);
1896
1897         if (!kbdev)
1898                 return -ENODEV;
1899
1900         current_policy = kbase_pm_ca_get_policy(kbdev);
1901
1902         policy_count = kbase_pm_ca_list_policies(&policy_list);
1903
1904         for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) {
1905                 if (policy_list[i] == current_policy)
1906                         ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name);
1907                 else
1908                         ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name);
1909         }
1910
1911         if (ret < PAGE_SIZE - 1) {
1912                 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
1913         } else {
1914                 buf[PAGE_SIZE - 2] = '\n';
1915                 buf[PAGE_SIZE - 1] = '\0';
1916                 ret = PAGE_SIZE - 1;
1917         }
1918
1919         return ret;
1920 }
1921
1922 /** Store callback for the @c core_availability_policy sysfs file.
1923  *
1924  * This function is called when the @c core_availability_policy sysfs file is
1925  * written to. It matches the requested policy against the available policies
1926  * and if a matching policy is found calls @ref kbase_pm_set_policy to change
1927  * the policy.
1928  *
1929  * @param dev   The device with sysfs file is for
1930  * @param attr  The attributes of the sysfs file
1931  * @param buf   The value written to the sysfs file
1932  * @param count The number of bytes written to the sysfs file
1933  *
1934  * @return @c count if the function succeeded. An error code on failure.
1935  */
1936 static ssize_t set_ca_policy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1937 {
1938         struct kbase_device *kbdev;
1939         const struct kbase_pm_ca_policy *new_policy = NULL;
1940         const struct kbase_pm_ca_policy *const *policy_list;
1941         int policy_count;
1942         int i;
1943
1944         kbdev = to_kbase_device(dev);
1945
1946         if (!kbdev)
1947                 return -ENODEV;
1948
1949         policy_count = kbase_pm_ca_list_policies(&policy_list);
1950
1951         for (i = 0; i < policy_count; i++) {
1952                 if (sysfs_streq(policy_list[i]->name, buf)) {
1953                         new_policy = policy_list[i];
1954                         break;
1955                 }
1956         }
1957
1958         if (!new_policy) {
1959                 dev_err(dev, "core_availability_policy: policy not found\n");
1960                 return -EINVAL;
1961         }
1962
1963         kbase_pm_ca_set_policy(kbdev, new_policy);
1964
1965         return count;
1966 }
1967
1968 /** The sysfs file @c core_availability_policy
1969  *
1970  * This is used for obtaining information about the available policies,
1971  * determining which policy is currently active, and changing the active
1972  * policy.
1973  */
1974 static DEVICE_ATTR(core_availability_policy, S_IRUGO | S_IWUSR, show_ca_policy, set_ca_policy);
1975
1976 /** Show callback for the @c core_mask sysfs file.
1977  *
1978  * This function is called to get the contents of the @c core_mask sysfs
1979  * file.
1980  *
1981  * @param dev   The device this sysfs file is for
1982  * @param attr  The attributes of the sysfs file
1983  * @param buf   The output buffer for the sysfs file contents
1984  *
1985  * @return The number of bytes output to @c buf.
1986  */
1987 static ssize_t show_core_mask(struct device *dev, struct device_attribute *attr, char * const buf)
1988 {
1989         struct kbase_device *kbdev;
1990         ssize_t ret = 0;
1991
1992         kbdev = to_kbase_device(dev);
1993
1994         if (!kbdev)
1995                 return -ENODEV;
1996
1997         ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1998                         "Current core mask (JS0) : 0x%llX\n",
1999                         kbdev->pm.debug_core_mask[0]);
2000         ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2001                         "Current core mask (JS1) : 0x%llX\n",
2002                         kbdev->pm.debug_core_mask[1]);
2003         ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2004                         "Current core mask (JS2) : 0x%llX\n",
2005                         kbdev->pm.debug_core_mask[2]);
2006         ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2007                         "Available core mask : 0x%llX\n",
2008                         kbdev->gpu_props.props.raw_props.shader_present);
2009
2010         return ret;
2011 }
2012
2013 /** Store callback for the @c core_mask sysfs file.
2014  *
2015  * This function is called when the @c core_mask sysfs file is written to.
2016  *
2017  * @param dev   The device with sysfs file is for
2018  * @param attr  The attributes of the sysfs file
2019  * @param buf   The value written to the sysfs file
2020  * @param count The number of bytes written to the sysfs file
2021  *
2022  * @return @c count if the function succeeded. An error code on failure.
2023  */
2024 static ssize_t set_core_mask(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2025 {
2026         struct kbase_device *kbdev;
2027         u64 new_core_mask[3];
2028         int items;
2029
2030         kbdev = to_kbase_device(dev);
2031
2032         if (!kbdev)
2033                 return -ENODEV;
2034
2035         items = sscanf(buf, "%llx %llx %llx",
2036                         &new_core_mask[0], &new_core_mask[1],
2037                         &new_core_mask[2]);
2038
2039         if (items == 1)
2040                 new_core_mask[1] = new_core_mask[2] = new_core_mask[0];
2041
2042         if (items == 1 || items == 3) {
2043                 u64 shader_present =
2044                                 kbdev->gpu_props.props.raw_props.shader_present;
2045                 u64 group0_core_mask =
2046                                 kbdev->gpu_props.props.coherency_info.group[0].
2047                                 core_mask;
2048
2049                 if ((new_core_mask[0] & shader_present) != new_core_mask[0] ||
2050                                 !(new_core_mask[0] & group0_core_mask) ||
2051                         (new_core_mask[1] & shader_present) !=
2052                                                 new_core_mask[1] ||
2053                                 !(new_core_mask[1] & group0_core_mask) ||
2054                         (new_core_mask[2] & shader_present) !=
2055                                                 new_core_mask[2] ||
2056                                 !(new_core_mask[2] & group0_core_mask)) {
2057                         dev_err(dev, "power_policy: invalid core specification\n");
2058                         return -EINVAL;
2059                 }
2060
2061                 if (kbdev->pm.debug_core_mask[0] != new_core_mask[0] ||
2062                                 kbdev->pm.debug_core_mask[1] !=
2063                                                 new_core_mask[1] ||
2064                                 kbdev->pm.debug_core_mask[2] !=
2065                                                 new_core_mask[2]) {
2066                         unsigned long flags;
2067
2068                         spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
2069
2070                         kbase_pm_set_debug_core_mask(kbdev, new_core_mask[0],
2071                                         new_core_mask[1], new_core_mask[2]);
2072
2073                         spin_unlock_irqrestore(&kbdev->pm.power_change_lock,
2074                                         flags);
2075                 }
2076
2077                 return count;
2078         }
2079
2080         dev_err(kbdev->dev, "Couldn't process set_core_mask write operation.\n"
2081                 "Use format <core_mask>\n"
2082                 "or <core_mask_js0> <core_mask_js1> <core_mask_js2>\n");
2083         return -EINVAL;
2084 }
2085
2086 /** The sysfs file @c core_mask.
2087  *
2088  * This is used to restrict shader core availability for debugging purposes.
2089  * Reading it will show the current core mask and the mask of cores available.
2090  * Writing to it will set the current core mask.
2091  */
2092 static DEVICE_ATTR(core_mask, S_IRUGO | S_IWUSR, show_core_mask, set_core_mask);
2093
2094 /**
2095  * set_soft_event_timeout() - Store callback for the soft_event_timeout sysfs
2096  * file.
2097  *
2098  * @dev: The device this sysfs file is for.
2099  * @attr: The attributes of the sysfs file.
2100  * @buf: The value written to the sysfs file.
2101  * @count: The number of bytes written to the sysfs file.
2102  *
2103  * This allows setting the timeout for software event jobs. Waiting jobs will
2104  * be cancelled after this period expires. This is expressed in milliseconds.
2105  *
2106  * Return: count if the function succeeded. An error code on failure.
2107  */
2108 static ssize_t set_soft_event_timeout(struct device *dev,
2109                                       struct device_attribute *attr,
2110                                       const char *buf, size_t count)
2111 {
2112         struct kbase_device *kbdev;
2113         int soft_event_timeout_ms;
2114
2115         kbdev = to_kbase_device(dev);
2116         if (!kbdev)
2117                 return -ENODEV;
2118
2119         if ((kstrtoint(buf, 0, &soft_event_timeout_ms) != 0) ||
2120             (soft_event_timeout_ms <= 0))
2121                 return -EINVAL;
2122
2123         atomic_set(&kbdev->js_data.soft_event_timeout_ms,
2124                    soft_event_timeout_ms);
2125
2126         return count;
2127 }
2128
2129 /**
2130  * show_soft_event_timeout() - Show callback for the soft_event_timeout sysfs
2131  * file.
2132  *
2133  * This will return the timeout for the software event jobs.
2134  *
2135  * @dev: The device this sysfs file is for.
2136  * @attr: The attributes of the sysfs file.
2137  * @buf: The output buffer for the sysfs file contents.
2138  *
2139  * Return: The number of bytes output to buf.
2140  */
2141 static ssize_t show_soft_event_timeout(struct device *dev,
2142                                        struct device_attribute *attr,
2143                                        char * const buf)
2144 {
2145         struct kbase_device *kbdev;
2146
2147         kbdev = to_kbase_device(dev);
2148         if (!kbdev)
2149                 return -ENODEV;
2150
2151         return scnprintf(buf, PAGE_SIZE, "%i\n",
2152                          atomic_read(&kbdev->js_data.soft_event_timeout_ms));
2153 }
2154
2155 static DEVICE_ATTR(soft_event_timeout, S_IRUGO | S_IWUSR,
2156                    show_soft_event_timeout, set_soft_event_timeout);
2157
2158 /** Store callback for the @c js_timeouts sysfs file.
2159  *
2160  * This function is called to get the contents of the @c js_timeouts sysfs
2161  * file. This file contains five values separated by whitespace. The values
2162  * are basically the same as JS_SOFT_STOP_TICKS, JS_HARD_STOP_TICKS_SS,
2163  * JS_HARD_STOP_TICKS_DUMPING, JS_RESET_TICKS_SS, JS_RESET_TICKS_DUMPING
2164  * configuration values (in that order), with the difference that the js_timeout
2165  * values are expressed in MILLISECONDS.
2166  *
2167  * The js_timeouts sysfile file allows the current values in
2168  * use by the job scheduler to get override. Note that a value needs to
2169  * be other than 0 for it to override the current job scheduler value.
2170  *
2171  * @param dev   The device with sysfs file is for
2172  * @param attr  The attributes of the sysfs file
2173  * @param buf   The value written to the sysfs file
2174  * @param count The number of bytes written to the sysfs file
2175  *
2176  * @return @c count if the function succeeded. An error code on failure.
2177  */
2178 static ssize_t set_js_timeouts(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2179 {
2180         struct kbase_device *kbdev;
2181         int items;
2182         long js_soft_stop_ms;
2183         long js_soft_stop_ms_cl;
2184         long js_hard_stop_ms_ss;
2185         long js_hard_stop_ms_cl;
2186         long js_hard_stop_ms_dumping;
2187         long js_reset_ms_ss;
2188         long js_reset_ms_cl;
2189         long js_reset_ms_dumping;
2190
2191         kbdev = to_kbase_device(dev);
2192         if (!kbdev)
2193                 return -ENODEV;
2194
2195         items = sscanf(buf, "%ld %ld %ld %ld %ld %ld %ld %ld",
2196                         &js_soft_stop_ms, &js_soft_stop_ms_cl,
2197                         &js_hard_stop_ms_ss, &js_hard_stop_ms_cl,
2198                         &js_hard_stop_ms_dumping, &js_reset_ms_ss,
2199                         &js_reset_ms_cl, &js_reset_ms_dumping);
2200
2201         if (items == 8) {
2202                 u64 ticks;
2203
2204                 if (js_soft_stop_ms >= 0) {
2205                         ticks = js_soft_stop_ms * 1000000ULL;
2206                         do_div(ticks, kbdev->js_data.scheduling_period_ns);
2207                         kbdev->js_soft_stop_ticks = ticks;
2208                 } else {
2209                         kbdev->js_soft_stop_ticks = -1;
2210                 }
2211
2212                 if (js_soft_stop_ms_cl >= 0) {
2213                         ticks = js_soft_stop_ms_cl * 1000000ULL;
2214                         do_div(ticks, kbdev->js_data.scheduling_period_ns);
2215                         kbdev->js_soft_stop_ticks_cl = ticks;
2216                 } else {
2217                         kbdev->js_soft_stop_ticks_cl = -1;
2218                 }
2219
2220                 if (js_hard_stop_ms_ss >= 0) {
2221                         ticks = js_hard_stop_ms_ss * 1000000ULL;
2222                         do_div(ticks, kbdev->js_data.scheduling_period_ns);
2223                         kbdev->js_hard_stop_ticks_ss = ticks;
2224                 } else {
2225                         kbdev->js_hard_stop_ticks_ss = -1;
2226                 }
2227
2228                 if (js_hard_stop_ms_cl >= 0) {
2229                         ticks = js_hard_stop_ms_cl * 1000000ULL;
2230                         do_div(ticks, kbdev->js_data.scheduling_period_ns);
2231                         kbdev->js_hard_stop_ticks_cl = ticks;
2232                 } else {
2233                         kbdev->js_hard_stop_ticks_cl = -1;
2234                 }
2235
2236                 if (js_hard_stop_ms_dumping >= 0) {
2237                         ticks = js_hard_stop_ms_dumping * 1000000ULL;
2238                         do_div(ticks, kbdev->js_data.scheduling_period_ns);
2239                         kbdev->js_hard_stop_ticks_dumping = ticks;
2240                 } else {
2241                         kbdev->js_hard_stop_ticks_dumping = -1;
2242                 }
2243
2244                 if (js_reset_ms_ss >= 0) {
2245                         ticks = js_reset_ms_ss * 1000000ULL;
2246                         do_div(ticks, kbdev->js_data.scheduling_period_ns);
2247                         kbdev->js_reset_ticks_ss = ticks;
2248                 } else {
2249                         kbdev->js_reset_ticks_ss = -1;
2250                 }
2251
2252                 if (js_reset_ms_cl >= 0) {
2253                         ticks = js_reset_ms_cl * 1000000ULL;
2254                         do_div(ticks, kbdev->js_data.scheduling_period_ns);
2255                         kbdev->js_reset_ticks_cl = ticks;
2256                 } else {
2257                         kbdev->js_reset_ticks_cl = -1;
2258                 }
2259
2260                 if (js_reset_ms_dumping >= 0) {
2261                         ticks = js_reset_ms_dumping * 1000000ULL;
2262                         do_div(ticks, kbdev->js_data.scheduling_period_ns);
2263                         kbdev->js_reset_ticks_dumping = ticks;
2264                 } else {
2265                         kbdev->js_reset_ticks_dumping = -1;
2266                 }
2267
2268                 kbdev->js_timeouts_updated = true;
2269
2270                 dev_dbg(kbdev->dev, "Overriding JS_SOFT_STOP_TICKS with %lu ticks (%lu ms)\n",
2271                                 (unsigned long)kbdev->js_soft_stop_ticks,
2272                                 js_soft_stop_ms);
2273                 dev_dbg(kbdev->dev, "Overriding JS_SOFT_STOP_TICKS_CL with %lu ticks (%lu ms)\n",
2274                                 (unsigned long)kbdev->js_soft_stop_ticks_cl,
2275                                 js_soft_stop_ms_cl);
2276                 dev_dbg(kbdev->dev, "Overriding JS_HARD_STOP_TICKS_SS with %lu ticks (%lu ms)\n",
2277                                 (unsigned long)kbdev->js_hard_stop_ticks_ss,
2278                                 js_hard_stop_ms_ss);
2279                 dev_dbg(kbdev->dev, "Overriding JS_HARD_STOP_TICKS_CL with %lu ticks (%lu ms)\n",
2280                                 (unsigned long)kbdev->js_hard_stop_ticks_cl,
2281                                 js_hard_stop_ms_cl);
2282                 dev_dbg(kbdev->dev, "Overriding JS_HARD_STOP_TICKS_DUMPING with %lu ticks (%lu ms)\n",
2283                                 (unsigned long)
2284                                         kbdev->js_hard_stop_ticks_dumping,
2285                                 js_hard_stop_ms_dumping);
2286                 dev_dbg(kbdev->dev, "Overriding JS_RESET_TICKS_SS with %lu ticks (%lu ms)\n",
2287                                 (unsigned long)kbdev->js_reset_ticks_ss,
2288                                 js_reset_ms_ss);
2289                 dev_dbg(kbdev->dev, "Overriding JS_RESET_TICKS_CL with %lu ticks (%lu ms)\n",
2290                                 (unsigned long)kbdev->js_reset_ticks_cl,
2291                                 js_reset_ms_cl);
2292                 dev_dbg(kbdev->dev, "Overriding JS_RESET_TICKS_DUMPING with %lu ticks (%lu ms)\n",
2293                                 (unsigned long)kbdev->js_reset_ticks_dumping,
2294                                 js_reset_ms_dumping);
2295
2296                 return count;
2297         }
2298
2299         dev_err(kbdev->dev, "Couldn't process js_timeouts write operation.\n"
2300                         "Use format <soft_stop_ms> <soft_stop_ms_cl> <hard_stop_ms_ss> <hard_stop_ms_cl> <hard_stop_ms_dumping> <reset_ms_ss> <reset_ms_cl> <reset_ms_dumping>\n"
2301                         "Write 0 for no change, -1 to restore default timeout\n");
2302         return -EINVAL;
2303 }
2304
2305 /** Show callback for the @c js_timeouts sysfs file.
2306  *
2307  * This function is called to get the contents of the @c js_timeouts sysfs
2308  * file. It returns the last set values written to the js_timeouts sysfs file.
2309  * If the file didn't get written yet, the values will be current setting in
2310  * use.
2311  * @param dev   The device this sysfs file is for
2312  * @param attr  The attributes of the sysfs file
2313  * @param buf   The output buffer for the sysfs file contents
2314  *
2315  * @return The number of bytes output to @c buf.
2316  */
2317 static ssize_t show_js_timeouts(struct device *dev, struct device_attribute *attr, char * const buf)
2318 {
2319         struct kbase_device *kbdev;
2320         ssize_t ret;
2321         u64 ms;
2322         unsigned long js_soft_stop_ms;
2323         unsigned long js_soft_stop_ms_cl;
2324         unsigned long js_hard_stop_ms_ss;
2325         unsigned long js_hard_stop_ms_cl;
2326         unsigned long js_hard_stop_ms_dumping;
2327         unsigned long js_reset_ms_ss;
2328         unsigned long js_reset_ms_cl;
2329         unsigned long js_reset_ms_dumping;
2330         unsigned long ticks;
2331         u32 scheduling_period_ns;
2332
2333         kbdev = to_kbase_device(dev);
2334         if (!kbdev)
2335                 return -ENODEV;
2336
2337         /* If no contexts have been scheduled since js_timeouts was last written
2338          * to, the new timeouts might not have been latched yet. So check if an
2339          * update is pending and use the new values if necessary. */
2340         if (kbdev->js_timeouts_updated && kbdev->js_scheduling_period_ns > 0)
2341                 scheduling_period_ns = kbdev->js_scheduling_period_ns;
2342         else
2343                 scheduling_period_ns = kbdev->js_data.scheduling_period_ns;
2344
2345         if (kbdev->js_timeouts_updated && kbdev->js_soft_stop_ticks > 0)
2346                 ticks = kbdev->js_soft_stop_ticks;
2347         else
2348                 ticks = kbdev->js_data.soft_stop_ticks;
2349         ms = (u64)ticks * scheduling_period_ns;
2350         do_div(ms, 1000000UL);
2351         js_soft_stop_ms = (unsigned long)ms;
2352
2353         if (kbdev->js_timeouts_updated && kbdev->js_soft_stop_ticks_cl > 0)
2354                 ticks = kbdev->js_soft_stop_ticks_cl;
2355         else
2356                 ticks = kbdev->js_data.soft_stop_ticks_cl;
2357         ms = (u64)ticks * scheduling_period_ns;
2358         do_div(ms, 1000000UL);
2359         js_soft_stop_ms_cl = (unsigned long)ms;
2360
2361         if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_ss > 0)
2362                 ticks = kbdev->js_hard_stop_ticks_ss;
2363         else
2364                 ticks = kbdev->js_data.hard_stop_ticks_ss;
2365         ms = (u64)ticks * scheduling_period_ns;
2366         do_div(ms, 1000000UL);
2367         js_hard_stop_ms_ss = (unsigned long)ms;
2368
2369         if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_cl > 0)
2370                 ticks = kbdev->js_hard_stop_ticks_cl;
2371         else
2372                 ticks = kbdev->js_data.hard_stop_ticks_cl;
2373         ms = (u64)ticks * scheduling_period_ns;
2374         do_div(ms, 1000000UL);
2375         js_hard_stop_ms_cl = (unsigned long)ms;
2376
2377         if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_dumping > 0)
2378                 ticks = kbdev->js_hard_stop_ticks_dumping;
2379         else
2380                 ticks = kbdev->js_data.hard_stop_ticks_dumping;
2381         ms = (u64)ticks * scheduling_period_ns;
2382         do_div(ms, 1000000UL);
2383         js_hard_stop_ms_dumping = (unsigned long)ms;
2384
2385         if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_ss > 0)
2386                 ticks = kbdev->js_reset_ticks_ss;
2387         else
2388                 ticks = kbdev->js_data.gpu_reset_ticks_ss;
2389         ms = (u64)ticks * scheduling_period_ns;
2390         do_div(ms, 1000000UL);
2391         js_reset_ms_ss = (unsigned long)ms;
2392
2393         if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_cl > 0)
2394                 ticks = kbdev->js_reset_ticks_cl;
2395         else
2396                 ticks = kbdev->js_data.gpu_reset_ticks_cl;
2397         ms = (u64)ticks * scheduling_period_ns;
2398         do_div(ms, 1000000UL);
2399         js_reset_ms_cl = (unsigned long)ms;
2400
2401         if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_dumping > 0)
2402                 ticks = kbdev->js_reset_ticks_dumping;
2403         else
2404                 ticks = kbdev->js_data.gpu_reset_ticks_dumping;
2405         ms = (u64)ticks * scheduling_period_ns;
2406         do_div(ms, 1000000UL);
2407         js_reset_ms_dumping = (unsigned long)ms;
2408
2409         ret = scnprintf(buf, PAGE_SIZE, "%lu %lu %lu %lu %lu %lu %lu %lu\n",
2410                         js_soft_stop_ms, js_soft_stop_ms_cl,
2411                         js_hard_stop_ms_ss, js_hard_stop_ms_cl,
2412                         js_hard_stop_ms_dumping, js_reset_ms_ss,
2413                         js_reset_ms_cl, js_reset_ms_dumping);
2414
2415         if (ret >= PAGE_SIZE) {
2416                 buf[PAGE_SIZE - 2] = '\n';
2417                 buf[PAGE_SIZE - 1] = '\0';
2418                 ret = PAGE_SIZE - 1;
2419         }
2420
2421         return ret;
2422 }
2423
2424 /** The sysfs file @c js_timeouts.
2425  *
2426  * This is used to override the current job scheduler values for
2427  * JS_STOP_STOP_TICKS_SS
2428  * JS_STOP_STOP_TICKS_CL
2429  * JS_HARD_STOP_TICKS_SS
2430  * JS_HARD_STOP_TICKS_CL
2431  * JS_HARD_STOP_TICKS_DUMPING
2432  * JS_RESET_TICKS_SS
2433  * JS_RESET_TICKS_CL
2434  * JS_RESET_TICKS_DUMPING.
2435  */
2436 static DEVICE_ATTR(js_timeouts, S_IRUGO | S_IWUSR, show_js_timeouts, set_js_timeouts);
2437
2438 /**
2439  * set_js_scheduling_period - Store callback for the js_scheduling_period sysfs
2440  *                            file
2441  * @dev:   The device the sysfs file is for
2442  * @attr:  The attributes of the sysfs file
2443  * @buf:   The value written to the sysfs file
2444  * @count: The number of bytes written to the sysfs file
2445  *
2446  * This function is called when the js_scheduling_period sysfs file is written
2447  * to. It checks the data written, and if valid updates the js_scheduling_period
2448  * value
2449  *
2450  * Return: @c count if the function succeeded. An error code on failure.
2451  */
2452 static ssize_t set_js_scheduling_period(struct device *dev,
2453                 struct device_attribute *attr, const char *buf, size_t count)
2454 {
2455         struct kbase_device *kbdev;
2456         int ret;
2457         unsigned int js_scheduling_period;
2458         u32 new_scheduling_period_ns;
2459         u32 old_period;
2460         u64 ticks;
2461
2462         kbdev = to_kbase_device(dev);
2463         if (!kbdev)
2464                 return -ENODEV;
2465
2466         ret = kstrtouint(buf, 0, &js_scheduling_period);
2467         if (ret || !js_scheduling_period) {
2468                 dev_err(kbdev->dev, "Couldn't process js_scheduling_period write operation.\n"
2469                                 "Use format <js_scheduling_period_ms>\n");
2470                 return -EINVAL;
2471         }
2472
2473         new_scheduling_period_ns = js_scheduling_period * 1000000;
2474
2475         /* Update scheduling timeouts */
2476         mutex_lock(&kbdev->js_data.runpool_mutex);
2477
2478         /* If no contexts have been scheduled since js_timeouts was last written
2479          * to, the new timeouts might not have been latched yet. So check if an
2480          * update is pending and use the new values if necessary. */
2481
2482         /* Use previous 'new' scheduling period as a base if present. */
2483         if (kbdev->js_timeouts_updated && kbdev->js_scheduling_period_ns)
2484                 old_period = kbdev->js_scheduling_period_ns;
2485         else
2486                 old_period = kbdev->js_data.scheduling_period_ns;
2487
2488         if (kbdev->js_timeouts_updated && kbdev->js_soft_stop_ticks > 0)
2489                 ticks = (u64)kbdev->js_soft_stop_ticks * old_period;
2490         else
2491                 ticks = (u64)kbdev->js_data.soft_stop_ticks *
2492                                 kbdev->js_data.scheduling_period_ns;
2493         do_div(ticks, new_scheduling_period_ns);
2494         kbdev->js_soft_stop_ticks = ticks ? ticks : 1;
2495
2496         if (kbdev->js_timeouts_updated && kbdev->js_soft_stop_ticks_cl > 0)
2497                 ticks = (u64)kbdev->js_soft_stop_ticks_cl * old_period;
2498         else
2499                 ticks = (u64)kbdev->js_data.soft_stop_ticks_cl *
2500                                 kbdev->js_data.scheduling_period_ns;
2501         do_div(ticks, new_scheduling_period_ns);
2502         kbdev->js_soft_stop_ticks_cl = ticks ? ticks : 1;
2503
2504         if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_ss > 0)
2505                 ticks = (u64)kbdev->js_hard_stop_ticks_ss * old_period;
2506         else
2507                 ticks = (u64)kbdev->js_data.hard_stop_ticks_ss *
2508                                 kbdev->js_data.scheduling_period_ns;
2509         do_div(ticks, new_scheduling_period_ns);
2510         kbdev->js_hard_stop_ticks_ss = ticks ? ticks : 1;
2511
2512         if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_cl > 0)
2513                 ticks = (u64)kbdev->js_hard_stop_ticks_cl * old_period;
2514         else
2515                 ticks = (u64)kbdev->js_data.hard_stop_ticks_cl *
2516                                 kbdev->js_data.scheduling_period_ns;
2517         do_div(ticks, new_scheduling_period_ns);
2518         kbdev->js_hard_stop_ticks_cl = ticks ? ticks : 1;
2519
2520         if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_dumping > 0)
2521                 ticks = (u64)kbdev->js_hard_stop_ticks_dumping * old_period;
2522         else
2523                 ticks = (u64)kbdev->js_data.hard_stop_ticks_dumping *
2524                                 kbdev->js_data.scheduling_period_ns;
2525         do_div(ticks, new_scheduling_period_ns);
2526         kbdev->js_hard_stop_ticks_dumping = ticks ? ticks : 1;
2527
2528         if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_ss > 0)
2529                 ticks = (u64)kbdev->js_reset_ticks_ss * old_period;
2530         else
2531                 ticks = (u64)kbdev->js_data.gpu_reset_ticks_ss *
2532                                 kbdev->js_data.scheduling_period_ns;
2533         do_div(ticks, new_scheduling_period_ns);
2534         kbdev->js_reset_ticks_ss = ticks ? ticks : 1;
2535
2536         if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_cl > 0)
2537                 ticks = (u64)kbdev->js_reset_ticks_cl * old_period;
2538         else
2539                 ticks = (u64)kbdev->js_data.gpu_reset_ticks_cl *
2540                                 kbdev->js_data.scheduling_period_ns;
2541         do_div(ticks, new_scheduling_period_ns);
2542         kbdev->js_reset_ticks_cl = ticks ? ticks : 1;
2543
2544         if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_dumping > 0)
2545                 ticks = (u64)kbdev->js_reset_ticks_dumping * old_period;
2546         else
2547                 ticks = (u64)kbdev->js_data.gpu_reset_ticks_dumping *
2548                                 kbdev->js_data.scheduling_period_ns;
2549         do_div(ticks, new_scheduling_period_ns);
2550         kbdev->js_reset_ticks_dumping = ticks ? ticks : 1;
2551
2552         kbdev->js_scheduling_period_ns = new_scheduling_period_ns;
2553         kbdev->js_timeouts_updated = true;
2554
2555         mutex_unlock(&kbdev->js_data.runpool_mutex);
2556
2557         dev_dbg(kbdev->dev, "JS scheduling period: %dms\n",
2558                         js_scheduling_period);
2559
2560         return count;
2561 }
2562
2563 /**
2564  * show_js_scheduling_period - Show callback for the js_scheduling_period sysfs
2565  *                             entry.
2566  * @dev:  The device this sysfs file is for.
2567  * @attr: The attributes of the sysfs file.
2568  * @buf:  The output buffer to receive the GPU information.
2569  *
2570  * This function is called to get the current period used for the JS scheduling
2571  * period.
2572  *
2573  * Return: The number of bytes output to buf.
2574  */
2575 static ssize_t show_js_scheduling_period(struct device *dev,
2576                 struct device_attribute *attr, char * const buf)
2577 {
2578         struct kbase_device *kbdev;
2579         u32 period;
2580         ssize_t ret;
2581
2582         kbdev = to_kbase_device(dev);
2583         if (!kbdev)
2584                 return -ENODEV;
2585
2586         if (kbdev->js_timeouts_updated && kbdev->js_scheduling_period_ns > 0)
2587                 period = kbdev->js_scheduling_period_ns;
2588         else
2589                 period = kbdev->js_data.scheduling_period_ns;
2590
2591         ret = scnprintf(buf, PAGE_SIZE, "%d\n",
2592                         period / 1000000);
2593
2594         return ret;
2595 }
2596
2597 static DEVICE_ATTR(js_scheduling_period, S_IRUGO | S_IWUSR,
2598                 show_js_scheduling_period, set_js_scheduling_period);
2599
2600 #if !MALI_CUSTOMER_RELEASE
2601 /** Store callback for the @c force_replay sysfs file.
2602  *
2603  * @param dev   The device with sysfs file is for
2604  * @param attr  The attributes of the sysfs file
2605  * @param buf   The value written to the sysfs file
2606  * @param count The number of bytes written to the sysfs file
2607  *
2608  * @return @c count if the function succeeded. An error code on failure.
2609  */
2610 static ssize_t set_force_replay(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2611 {
2612         struct kbase_device *kbdev;
2613
2614         kbdev = to_kbase_device(dev);
2615         if (!kbdev)
2616                 return -ENODEV;
2617
2618         if (!strncmp("limit=", buf, MIN(6, count))) {
2619                 int force_replay_limit;
2620                 int items = sscanf(buf, "limit=%u", &force_replay_limit);
2621
2622                 if (items == 1) {
2623                         kbdev->force_replay_random = false;
2624                         kbdev->force_replay_limit = force_replay_limit;
2625                         kbdev->force_replay_count = 0;
2626
2627                         return count;
2628                 }
2629         } else if (!strncmp("random_limit", buf, MIN(12, count))) {
2630                 kbdev->force_replay_random = true;
2631                 kbdev->force_replay_count = 0;
2632
2633                 return count;
2634         } else if (!strncmp("norandom_limit", buf, MIN(14, count))) {
2635                 kbdev->force_replay_random = false;
2636                 kbdev->force_replay_limit = KBASEP_FORCE_REPLAY_DISABLED;
2637                 kbdev->force_replay_count = 0;
2638
2639                 return count;
2640         } else if (!strncmp("core_req=", buf, MIN(9, count))) {
2641                 unsigned int core_req;
2642                 int items = sscanf(buf, "core_req=%x", &core_req);
2643
2644                 if (items == 1) {
2645                         kbdev->force_replay_core_req = (base_jd_core_req)core_req;
2646
2647                         return count;
2648                 }
2649         }
2650         dev_err(kbdev->dev, "Couldn't process force_replay write operation.\nPossible settings: limit=<limit>, random_limit, norandom_limit, core_req=<core_req>\n");
2651         return -EINVAL;
2652 }
2653
2654 /** Show callback for the @c force_replay sysfs file.
2655  *
2656  * This function is called to get the contents of the @c force_replay sysfs
2657  * file. It returns the last set value written to the force_replay sysfs file.
2658  * If the file didn't get written yet, the values will be 0.
2659  *
2660  * @param dev   The device this sysfs file is for
2661  * @param attr  The attributes of the sysfs file
2662  * @param buf   The output buffer for the sysfs file contents
2663  *
2664  * @return The number of bytes output to @c buf.
2665  */
2666 static ssize_t show_force_replay(struct device *dev,
2667                 struct device_attribute *attr, char * const buf)
2668 {
2669         struct kbase_device *kbdev;
2670         ssize_t ret;
2671
2672         kbdev = to_kbase_device(dev);
2673         if (!kbdev)
2674                 return -ENODEV;
2675
2676         if (kbdev->force_replay_random)
2677                 ret = scnprintf(buf, PAGE_SIZE,
2678                                 "limit=0\nrandom_limit\ncore_req=%x\n",
2679                                 kbdev->force_replay_core_req);
2680         else
2681                 ret = scnprintf(buf, PAGE_SIZE,
2682                                 "limit=%u\nnorandom_limit\ncore_req=%x\n",
2683                                 kbdev->force_replay_limit,
2684                                 kbdev->force_replay_core_req);
2685
2686         if (ret >= PAGE_SIZE) {
2687                 buf[PAGE_SIZE - 2] = '\n';
2688                 buf[PAGE_SIZE - 1] = '\0';
2689                 ret = PAGE_SIZE - 1;
2690         }
2691
2692         return ret;
2693 }
2694
2695 /** The sysfs file @c force_replay.
2696  *
2697  */
2698 static DEVICE_ATTR(force_replay, S_IRUGO | S_IWUSR, show_force_replay,
2699                 set_force_replay);
2700 #endif /* !MALI_CUSTOMER_RELEASE */
2701
2702 #ifdef CONFIG_MALI_DEBUG
2703 static ssize_t set_js_softstop_always(struct device *dev,
2704                 struct device_attribute *attr, const char *buf, size_t count)
2705 {
2706         struct kbase_device *kbdev;
2707         int ret;
2708         int softstop_always;
2709
2710         kbdev = to_kbase_device(dev);
2711         if (!kbdev)
2712                 return -ENODEV;
2713
2714         ret = kstrtoint(buf, 0, &softstop_always);
2715         if (ret || ((softstop_always != 0) && (softstop_always != 1))) {
2716                 dev_err(kbdev->dev, "Couldn't process js_softstop_always write operation.\n"
2717                                 "Use format <soft_stop_always>\n");
2718                 return -EINVAL;
2719         }
2720
2721         kbdev->js_data.softstop_always = (bool) softstop_always;
2722         dev_dbg(kbdev->dev, "Support for softstop on a single context: %s\n",
2723                         (kbdev->js_data.softstop_always) ?
2724                         "Enabled" : "Disabled");
2725         return count;
2726 }
2727
2728 static ssize_t show_js_softstop_always(struct device *dev,
2729                 struct device_attribute *attr, char * const buf)
2730 {
2731         struct kbase_device *kbdev;
2732         ssize_t ret;
2733
2734         kbdev = to_kbase_device(dev);
2735         if (!kbdev)
2736                 return -ENODEV;
2737
2738         ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->js_data.softstop_always);
2739
2740         if (ret >= PAGE_SIZE) {
2741                 buf[PAGE_SIZE - 2] = '\n';
2742                 buf[PAGE_SIZE - 1] = '\0';
2743                 ret = PAGE_SIZE - 1;
2744         }
2745
2746         return ret;
2747 }
2748
2749 /*
2750  * By default, soft-stops are disabled when only a single context is present. The ability to
2751  * enable soft-stop when only a single context is present can be used for debug and unit-testing purposes.
2752  * (see CL t6xx_stress_1 unit-test as an example whereby this feature is used.)
2753  */
2754 static DEVICE_ATTR(js_softstop_always, S_IRUGO | S_IWUSR, show_js_softstop_always, set_js_softstop_always);
2755 #endif /* CONFIG_MALI_DEBUG */
2756
2757 #ifdef CONFIG_MALI_DEBUG
2758 typedef void (kbasep_debug_command_func) (struct kbase_device *);
2759
2760 enum kbasep_debug_command_code {
2761         KBASEP_DEBUG_COMMAND_DUMPTRACE,
2762
2763         /* This must be the last enum */
2764         KBASEP_DEBUG_COMMAND_COUNT
2765 };
2766
2767 struct kbasep_debug_command {
2768         char *str;
2769         kbasep_debug_command_func *func;
2770 };
2771
2772 /** Debug commands supported by the driver */
2773 static const struct kbasep_debug_command debug_commands[] = {
2774         {
2775          .str = "dumptrace",
2776          .func = &kbasep_trace_dump,
2777          }
2778 };
2779
2780 /** Show callback for the @c debug_command sysfs file.
2781  *
2782  * This function is called to get the contents of the @c debug_command sysfs
2783  * file. This is a list of the available debug commands, separated by newlines.
2784  *
2785  * @param dev   The device this sysfs file is for
2786  * @param attr  The attributes of the sysfs file
2787  * @param buf   The output buffer for the sysfs file contents
2788  *
2789  * @return The number of bytes output to @c buf.
2790  */
2791 static ssize_t show_debug(struct device *dev, struct device_attribute *attr, char * const buf)
2792 {
2793         struct kbase_device *kbdev;
2794         int i;
2795         ssize_t ret = 0;
2796
2797         kbdev = to_kbase_device(dev);
2798
2799         if (!kbdev)
2800                 return -ENODEV;
2801
2802         for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT && ret < PAGE_SIZE; i++)
2803                 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s\n", debug_commands[i].str);
2804
2805         if (ret >= PAGE_SIZE) {
2806                 buf[PAGE_SIZE - 2] = '\n';
2807                 buf[PAGE_SIZE - 1] = '\0';
2808                 ret = PAGE_SIZE - 1;
2809         }
2810
2811         return ret;
2812 }
2813
2814 /** Store callback for the @c debug_command sysfs file.
2815  *
2816  * This function is called when the @c debug_command sysfs file is written to.
2817  * It matches the requested command against the available commands, and if
2818  * a matching command is found calls the associated function from
2819  * @ref debug_commands to issue the command.
2820  *
2821  * @param dev   The device with sysfs file is for
2822  * @param attr  The attributes of the sysfs file
2823  * @param buf   The value written to the sysfs file
2824  * @param count The number of bytes written to the sysfs file
2825  *
2826  * @return @c count if the function succeeded. An error code on failure.
2827  */
2828 static ssize_t issue_debug(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2829 {
2830         struct kbase_device *kbdev;
2831         int i;
2832
2833         kbdev = to_kbase_device(dev);
2834
2835         if (!kbdev)
2836                 return -ENODEV;
2837
2838         for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT; i++) {
2839                 if (sysfs_streq(debug_commands[i].str, buf)) {
2840                         debug_commands[i].func(kbdev);
2841                         return count;
2842                 }
2843         }
2844
2845         /* Debug Command not found */
2846         dev_err(dev, "debug_command: command not known\n");
2847         return -EINVAL;
2848 }
2849
2850 /** The sysfs file @c debug_command.
2851  *
2852  * This is used to issue general debug commands to the device driver.
2853  * Reading it will produce a list of debug commands, separated by newlines.
2854  * Writing to it with one of those commands will issue said command.
2855  */
2856 static DEVICE_ATTR(debug_command, S_IRUGO | S_IWUSR, show_debug, issue_debug);
2857 #endif /* CONFIG_MALI_DEBUG */
2858
2859 /**
2860  * kbase_show_gpuinfo - Show callback for the gpuinfo sysfs entry.
2861  * @dev: The device this sysfs file is for.
2862  * @attr: The attributes of the sysfs file.
2863  * @buf: The output buffer to receive the GPU information.
2864  *
2865  * This function is called to get a description of the present Mali
2866  * GPU via the gpuinfo sysfs entry.  This includes the GPU family, the
2867  * number of cores, the hardware version and the raw product id.  For
2868  * example:
2869  *
2870  *    Mali-T60x MP4 r0p0 0x6956
2871  *
2872  * Return: The number of bytes output to buf.
2873  */
2874 static ssize_t kbase_show_gpuinfo(struct device *dev,
2875                                   struct device_attribute *attr, char *buf)
2876 {
2877         static const struct gpu_product_id_name {
2878                 unsigned id;
2879                 char *name;
2880         } gpu_product_id_names[] = {
2881                 { .id = GPU_ID_PI_T60X, .name = "Mali-T60x" },
2882                 { .id = GPU_ID_PI_T62X, .name = "Mali-T62x" },
2883                 { .id = GPU_ID_PI_T72X, .name = "Mali-T72x" },
2884                 { .id = GPU_ID_PI_T76X, .name = "Mali-T76x" },
2885                 { .id = GPU_ID_PI_T82X, .name = "Mali-T82x" },
2886                 { .id = GPU_ID_PI_T83X, .name = "Mali-T83x" },
2887                 { .id = GPU_ID_PI_T86X, .name = "Mali-T86x" },
2888                 { .id = GPU_ID_PI_TFRX, .name = "Mali-T88x" },
2889                 { .id = GPU_ID2_PRODUCT_TMIX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
2890                   .name = "Mali-TMIx" },
2891         };
2892         const char *product_name = "(Unknown Mali GPU)";
2893         struct kbase_device *kbdev;
2894         u32 gpu_id;
2895         unsigned product_id, product_id_mask;
2896         unsigned i;
2897         bool is_new_format;
2898
2899         kbdev = to_kbase_device(dev);
2900         if (!kbdev)
2901                 return -ENODEV;
2902
2903         gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
2904         product_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
2905         is_new_format = GPU_ID_IS_NEW_FORMAT(product_id);
2906         product_id_mask =
2907                 (is_new_format ?
2908                         GPU_ID2_PRODUCT_MODEL :
2909                         GPU_ID_VERSION_PRODUCT_ID) >>
2910                 GPU_ID_VERSION_PRODUCT_ID_SHIFT;
2911
2912         for (i = 0; i < ARRAY_SIZE(gpu_product_id_names); ++i) {
2913                 const struct gpu_product_id_name *p = &gpu_product_id_names[i];
2914
2915                 if ((GPU_ID_IS_NEW_FORMAT(p->id) == is_new_format) &&
2916                     (p->id & product_id_mask) ==
2917                     (product_id & product_id_mask)) {
2918                         product_name = p->name;
2919                         break;
2920                 }
2921         }
2922
2923         return scnprintf(buf, PAGE_SIZE, "%s MP%d r%dp%d 0x%04X\n",
2924                 product_name, kbdev->gpu_props.num_cores,
2925                 (gpu_id & GPU_ID_VERSION_MAJOR) >> GPU_ID_VERSION_MAJOR_SHIFT,
2926                 (gpu_id & GPU_ID_VERSION_MINOR) >> GPU_ID_VERSION_MINOR_SHIFT,
2927                 product_id);
2928 }
2929 static DEVICE_ATTR(gpuinfo, S_IRUGO, kbase_show_gpuinfo, NULL);
2930
2931 /**
2932  * set_dvfs_period - Store callback for the dvfs_period sysfs file.
2933  * @dev:   The device with sysfs file is for
2934  * @attr:  The attributes of the sysfs file
2935  * @buf:   The value written to the sysfs file
2936  * @count: The number of bytes written to the sysfs file
2937  *
2938  * This function is called when the dvfs_period sysfs file is written to. It
2939  * checks the data written, and if valid updates the DVFS period variable,
2940  *
2941  * Return: @c count if the function succeeded. An error code on failure.
2942  */
2943 static ssize_t set_dvfs_period(struct device *dev,
2944                 struct device_attribute *attr, const char *buf, size_t count)
2945 {
2946         struct kbase_device *kbdev;
2947         int ret;
2948         int dvfs_period;
2949
2950         kbdev = to_kbase_device(dev);
2951         if (!kbdev)
2952                 return -ENODEV;
2953
2954         ret = kstrtoint(buf, 0, &dvfs_period);
2955         if (ret || dvfs_period <= 0) {
2956                 dev_err(kbdev->dev, "Couldn't process dvfs_period write operation.\n"
2957                                 "Use format <dvfs_period_ms>\n");
2958                 return -EINVAL;
2959         }
2960
2961         kbdev->pm.dvfs_period = dvfs_period;
2962         dev_dbg(kbdev->dev, "DVFS period: %dms\n", dvfs_period);
2963
2964         return count;
2965 }
2966
2967 /**
2968  * show_dvfs_period - Show callback for the dvfs_period sysfs entry.
2969  * @dev:  The device this sysfs file is for.
2970  * @attr: The attributes of the sysfs file.
2971  * @buf:  The output buffer to receive the GPU information.
2972  *
2973  * This function is called to get the current period used for the DVFS sample
2974  * timer.
2975  *
2976  * Return: The number of bytes output to buf.
2977  */
2978 static ssize_t show_dvfs_period(struct device *dev,
2979                 struct device_attribute *attr, char * const buf)
2980 {
2981         struct kbase_device *kbdev;
2982         ssize_t ret;
2983
2984         kbdev = to_kbase_device(dev);
2985         if (!kbdev)
2986                 return -ENODEV;
2987
2988         ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->pm.dvfs_period);
2989
2990         return ret;
2991 }
2992
2993 static DEVICE_ATTR(dvfs_period, S_IRUGO | S_IWUSR, show_dvfs_period,
2994                 set_dvfs_period);
2995
2996 /**
2997  * set_pm_poweroff - Store callback for the pm_poweroff sysfs file.
2998  * @dev:   The device with sysfs file is for
2999  * @attr:  The attributes of the sysfs file
3000  * @buf:   The value written to the sysfs file
3001  * @count: The number of bytes written to the sysfs file
3002  *
3003  * This function is called when the pm_poweroff sysfs file is written to.
3004  *
3005  * This file contains three values separated by whitespace. The values
3006  * are gpu_poweroff_time (the period of the poweroff timer, in ns),
3007  * poweroff_shader_ticks (the number of poweroff timer ticks before an idle
3008  * shader is powered off), and poweroff_gpu_ticks (the number of poweroff timer
3009  * ticks before the GPU is powered off), in that order.
3010  *
3011  * Return: @c count if the function succeeded. An error code on failure.
3012  */
3013 static ssize_t set_pm_poweroff(struct device *dev,
3014                 struct device_attribute *attr, const char *buf, size_t count)
3015 {
3016         struct kbase_device *kbdev;
3017         int items;
3018         s64 gpu_poweroff_time;
3019         int poweroff_shader_ticks, poweroff_gpu_ticks;
3020
3021         kbdev = to_kbase_device(dev);
3022         if (!kbdev)
3023                 return -ENODEV;
3024
3025         items = sscanf(buf, "%llu %u %u", &gpu_poweroff_time,
3026                         &poweroff_shader_ticks,
3027                         &poweroff_gpu_ticks);
3028         if (items != 3) {
3029                 dev_err(kbdev->dev, "Couldn't process pm_poweroff write operation.\n"
3030                                 "Use format <gpu_poweroff_time_ns> <poweroff_shader_ticks> <poweroff_gpu_ticks>\n");
3031                 return -EINVAL;
3032         }
3033
3034         kbdev->pm.gpu_poweroff_time = HR_TIMER_DELAY_NSEC(gpu_poweroff_time);
3035         kbdev->pm.poweroff_shader_ticks = poweroff_shader_ticks;
3036         kbdev->pm.poweroff_gpu_ticks = poweroff_gpu_ticks;
3037
3038         return count;
3039 }
3040
3041 /**
3042  * show_pm_poweroff - Show callback for the pm_poweroff sysfs entry.
3043  * @dev:  The device this sysfs file is for.
3044  * @attr: The attributes of the sysfs file.
3045  * @buf:  The output buffer to receive the GPU information.
3046  *
3047  * This function is called to get the current period used for the DVFS sample
3048  * timer.
3049  *
3050  * Return: The number of bytes output to buf.
3051  */
3052 static ssize_t show_pm_poweroff(struct device *dev,
3053                 struct device_attribute *attr, char * const buf)
3054 {
3055         struct kbase_device *kbdev;
3056         ssize_t ret;
3057
3058         kbdev = to_kbase_device(dev);
3059         if (!kbdev)
3060                 return -ENODEV;
3061
3062         ret = scnprintf(buf, PAGE_SIZE, "%llu %u %u\n",
3063                         ktime_to_ns(kbdev->pm.gpu_poweroff_time),
3064                         kbdev->pm.poweroff_shader_ticks,
3065                         kbdev->pm.poweroff_gpu_ticks);
3066
3067         return ret;
3068 }
3069
3070 static DEVICE_ATTR(pm_poweroff, S_IRUGO | S_IWUSR, show_pm_poweroff,
3071                 set_pm_poweroff);
3072
3073 /**
3074  * set_reset_timeout - Store callback for the reset_timeout sysfs file.
3075  * @dev:   The device with sysfs file is for
3076  * @attr:  The attributes of the sysfs file
3077  * @buf:   The value written to the sysfs file
3078  * @count: The number of bytes written to the sysfs file
3079  *
3080  * This function is called when the reset_timeout sysfs file is written to. It
3081  * checks the data written, and if valid updates the reset timeout.
3082  *
3083  * Return: @c count if the function succeeded. An error code on failure.
3084  */
3085 static ssize_t set_reset_timeout(struct device *dev,
3086                 struct device_attribute *attr, const char *buf, size_t count)
3087 {
3088         struct kbase_device *kbdev;
3089         int ret;
3090         int reset_timeout;
3091
3092         kbdev = to_kbase_device(dev);
3093         if (!kbdev)
3094                 return -ENODEV;
3095
3096         ret = kstrtoint(buf, 0, &reset_timeout);
3097         if (ret || reset_timeout <= 0) {
3098                 dev_err(kbdev->dev, "Couldn't process reset_timeout write operation.\n"
3099                                 "Use format <reset_timeout_ms>\n");
3100                 return -EINVAL;
3101         }
3102
3103         kbdev->reset_timeout_ms = reset_timeout;
3104         dev_dbg(kbdev->dev, "Reset timeout: %dms\n", reset_timeout);
3105
3106         return count;
3107 }
3108
3109 /**
3110  * show_reset_timeout - Show callback for the reset_timeout sysfs entry.
3111  * @dev:  The device this sysfs file is for.
3112  * @attr: The attributes of the sysfs file.
3113  * @buf:  The output buffer to receive the GPU information.
3114  *
3115  * This function is called to get the current reset timeout.
3116  *
3117  * Return: The number of bytes output to buf.
3118  */
3119 static ssize_t show_reset_timeout(struct device *dev,
3120                 struct device_attribute *attr, char * const buf)
3121 {
3122         struct kbase_device *kbdev;
3123         ssize_t ret;
3124
3125         kbdev = to_kbase_device(dev);
3126         if (!kbdev)
3127                 return -ENODEV;
3128
3129         ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->reset_timeout_ms);
3130
3131         return ret;
3132 }
3133
3134 static DEVICE_ATTR(reset_timeout, S_IRUGO | S_IWUSR, show_reset_timeout,
3135                 set_reset_timeout);
3136
3137
3138
3139 static ssize_t show_mem_pool_size(struct device *dev,
3140                 struct device_attribute *attr, char * const buf)
3141 {
3142         struct kbase_device *kbdev;
3143         ssize_t ret;
3144
3145         kbdev = to_kbase_device(dev);
3146         if (!kbdev)
3147                 return -ENODEV;
3148
3149         ret = scnprintf(buf, PAGE_SIZE, "%zu\n",
3150                         kbase_mem_pool_size(&kbdev->mem_pool));
3151
3152         return ret;
3153 }
3154
3155 static ssize_t set_mem_pool_size(struct device *dev,
3156                 struct device_attribute *attr, const char *buf, size_t count)
3157 {
3158         struct kbase_device *kbdev;
3159         size_t new_size;
3160         int err;
3161
3162         kbdev = to_kbase_device(dev);
3163         if (!kbdev)
3164                 return -ENODEV;
3165
3166         err = kstrtoul(buf, 0, (unsigned long *)&new_size);
3167         if (err)
3168                 return err;
3169
3170         kbase_mem_pool_trim(&kbdev->mem_pool, new_size);
3171
3172         return count;
3173 }
3174
3175 static DEVICE_ATTR(mem_pool_size, S_IRUGO | S_IWUSR, show_mem_pool_size,
3176                 set_mem_pool_size);
3177
3178 static ssize_t show_mem_pool_max_size(struct device *dev,
3179                 struct device_attribute *attr, char * const buf)
3180 {
3181         struct kbase_device *kbdev;
3182         ssize_t ret;
3183
3184         kbdev = to_kbase_device(dev);
3185         if (!kbdev)
3186                 return -ENODEV;
3187
3188         ret = scnprintf(buf, PAGE_SIZE, "%zu\n",
3189                         kbase_mem_pool_max_size(&kbdev->mem_pool));
3190
3191         return ret;
3192 }
3193
3194 static ssize_t set_mem_pool_max_size(struct device *dev,
3195                 struct device_attribute *attr, const char *buf, size_t count)
3196 {
3197         struct kbase_device *kbdev;
3198         size_t new_max_size;
3199         int err;
3200
3201         kbdev = to_kbase_device(dev);
3202         if (!kbdev)
3203                 return -ENODEV;
3204
3205         err = kstrtoul(buf, 0, (unsigned long *)&new_max_size);
3206         if (err)
3207                 return -EINVAL;
3208
3209         kbase_mem_pool_set_max_size(&kbdev->mem_pool, new_max_size);
3210
3211         return count;
3212 }
3213
3214 static DEVICE_ATTR(mem_pool_max_size, S_IRUGO | S_IWUSR, show_mem_pool_max_size,
3215                 set_mem_pool_max_size);
3216
3217
3218 static int kbasep_secure_mode_enable(struct kbase_device *kbdev)
3219 {
3220         kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
3221                 GPU_COMMAND_SET_PROTECTED_MODE, NULL);
3222         return 0;
3223 }
3224
3225 static int kbasep_secure_mode_disable(struct kbase_device *kbdev)
3226 {
3227         if (!kbase_prepare_to_reset_gpu_locked(kbdev))
3228                 return -EBUSY;
3229
3230         kbase_reset_gpu_locked(kbdev);
3231
3232         return 0;
3233 }
3234
3235 static struct kbase_secure_ops kbasep_secure_ops = {
3236         .secure_mode_enable = kbasep_secure_mode_enable,
3237         .secure_mode_disable = kbasep_secure_mode_disable,
3238 };
3239
3240 static void kbasep_secure_mode_init(struct kbase_device *kbdev)
3241 {
3242         if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE)) {
3243                 /* Use native secure ops */
3244                 kbdev->secure_ops = &kbasep_secure_ops;
3245                 kbdev->secure_mode_support = true;
3246         }
3247 #ifdef SECURE_CALLBACKS
3248         else {
3249                 kbdev->secure_ops = SECURE_CALLBACKS;
3250                 kbdev->secure_mode_support = false;
3251
3252                 if (kbdev->secure_ops) {
3253                         int err;
3254
3255                         /* Make sure secure mode is disabled on startup */
3256                         err = kbdev->secure_ops->secure_mode_disable(kbdev);
3257
3258                         /* secure_mode_disable() returns -EINVAL if not
3259                          * supported
3260                          */
3261                         kbdev->secure_mode_support = (err != -EINVAL);
3262                 }
3263         }
3264 #endif
3265 }
3266
3267 #ifdef CONFIG_MALI_NO_MALI
3268 static int kbase_common_reg_map(struct kbase_device *kbdev)
3269 {
3270         return 0;
3271 }
3272 static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
3273 {
3274 }
3275 #else /* CONFIG_MALI_NO_MALI */
3276 static int kbase_common_reg_map(struct kbase_device *kbdev)
3277 {
3278         int err = -ENOMEM;
3279
3280         if (!request_mem_region(kbdev->reg_start, kbdev->reg_size, dev_name(kbdev->dev))) {
3281                 dev_err(kbdev->dev, "Register window unavailable\n");
3282                 err = -EIO;
3283                 goto out_region;
3284         }
3285
3286         kbdev->reg = ioremap(kbdev->reg_start, kbdev->reg_size);
3287         if (!kbdev->reg) {
3288                 dev_err(kbdev->dev, "Can't remap register window\n");
3289                 err = -EINVAL;
3290                 goto out_ioremap;
3291         }
3292
3293         return 0;
3294
3295  out_ioremap:
3296         release_mem_region(kbdev->reg_start, kbdev->reg_size);
3297  out_region:
3298         return err;
3299 }
3300
3301 static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
3302 {
3303         if (kbdev->reg) {
3304                 iounmap(kbdev->reg);
3305                 release_mem_region(kbdev->reg_start, kbdev->reg_size);
3306                 kbdev->reg = NULL;
3307                 kbdev->reg_start = 0;
3308                 kbdev->reg_size = 0;
3309         }
3310 }
3311 #endif /* CONFIG_MALI_NO_MALI */
3312
3313 static int registers_map(struct kbase_device * const kbdev)
3314 {
3315                 /* the first memory resource is the physical address of the GPU
3316                  * registers */
3317                 struct platform_device *pdev = to_platform_device(kbdev->dev);
3318                 struct resource *reg_res;
3319                 int err;
3320
3321                 reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3322                 if (!reg_res) {
3323                         dev_err(kbdev->dev, "Invalid register resource\n");
3324                         return -ENOENT;
3325                 }
3326
3327                 kbdev->reg_start = reg_res->start;
3328                 kbdev->reg_size = resource_size(reg_res);
3329
3330                 err = kbase_common_reg_map(kbdev);
3331                 if (err) {
3332                         dev_err(kbdev->dev, "Failed to map registers\n");
3333                         return err;
3334                 }
3335
3336         return 0;
3337 }
3338
3339 static void registers_unmap(struct kbase_device *kbdev)
3340 {
3341         kbase_common_reg_unmap(kbdev);
3342 }
3343
3344 static int power_control_init(struct platform_device *pdev)
3345 {
3346         struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
3347         int err = 0;
3348
3349         if (!kbdev)
3350                 return -ENODEV;
3351
3352 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \
3353                         && defined(CONFIG_REGULATOR)
3354         kbdev->regulator = regulator_get_optional(kbdev->dev, "mali");
3355         if (IS_ERR_OR_NULL(kbdev->regulator)) {
3356                 err = PTR_ERR(kbdev->regulator);
3357                 kbdev->regulator = NULL;
3358                 if (err == -EPROBE_DEFER) {
3359                         dev_err(&pdev->dev, "Failed to get regulator\n");
3360                         return err;
3361                 }
3362                 dev_info(kbdev->dev,
3363                         "Continuing without Mali regulator control\n");
3364                 /* Allow probe to continue without regulator */
3365         }
3366 #endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
3367
3368         kbdev->clock = clk_get(kbdev->dev, "clk_mali");
3369         if (IS_ERR_OR_NULL(kbdev->clock)) {
3370                 err = PTR_ERR(kbdev->clock);
3371                 kbdev->clock = NULL;
3372                 if (err == -EPROBE_DEFER) {
3373                         dev_err(&pdev->dev, "Failed to get clock\n");
3374                         goto fail;
3375                 }
3376                 dev_info(kbdev->dev, "Continuing without Mali clock control\n");
3377                 /* Allow probe to continue without clock. */
3378         } else {
3379                 err = clk_prepare_enable(kbdev->clock);
3380                 if (err) {
3381                         dev_err(kbdev->dev,
3382                                 "Failed to prepare and enable clock (%d)\n",
3383                                 err);
3384                         goto fail;
3385                 }
3386         }
3387
3388 #if defined(CONFIG_OF) && defined(CONFIG_PM_OPP)
3389         /* Register the OPPs if they are available in device tree */
3390 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
3391         err = dev_pm_opp_of_add_table(kbdev->dev);
3392 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
3393         err = of_init_opp_table(kbdev->dev);
3394 #else
3395         err = 0;
3396 #endif /* LINUX_VERSION_CODE */
3397         if (err)
3398                 dev_dbg(kbdev->dev, "OPP table not found\n");
3399 #endif /* CONFIG_OF && CONFIG_PM_OPP */
3400
3401         return 0;
3402
3403 fail:
3404
3405 if (kbdev->clock != NULL) {
3406         clk_put(kbdev->clock);
3407         kbdev->clock = NULL;
3408 }
3409
3410 #ifdef CONFIG_REGULATOR
3411         if (NULL != kbdev->regulator) {
3412                 regulator_put(kbdev->regulator);
3413                 kbdev->regulator = NULL;
3414         }
3415 #endif
3416
3417         return err;
3418 }
3419
3420 static void power_control_term(struct kbase_device *kbdev)
3421 {
3422 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
3423         dev_pm_opp_of_remove_table(kbdev->dev);
3424 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
3425         of_free_opp_table(kbdev->dev);
3426 #endif
3427
3428         if (kbdev->clock) {
3429                 clk_disable_unprepare(kbdev->clock);
3430                 clk_put(kbdev->clock);
3431                 kbdev->clock = NULL;
3432         }
3433
3434 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \
3435                         && defined(CONFIG_REGULATOR)
3436         if (kbdev->regulator) {
3437                 regulator_put(kbdev->regulator);
3438                 kbdev->regulator = NULL;
3439         }
3440 #endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
3441 }
3442
3443 #ifdef CONFIG_DEBUG_FS
3444
3445 #if KBASE_GPU_RESET_EN
3446 #include <mali_kbase_hwaccess_jm.h>
3447
3448 static void trigger_quirks_reload(struct kbase_device *kbdev)
3449 {
3450         kbase_pm_context_active(kbdev);
3451         if (kbase_prepare_to_reset_gpu(kbdev))
3452                 kbase_reset_gpu(kbdev);
3453         kbase_pm_context_idle(kbdev);
3454 }
3455
3456 #define MAKE_QUIRK_ACCESSORS(type) \
3457 static int type##_quirks_set(void *data, u64 val) \
3458 { \
3459         struct kbase_device *kbdev; \
3460         kbdev = (struct kbase_device *)data; \
3461         kbdev->hw_quirks_##type = (u32)val; \
3462         trigger_quirks_reload(kbdev); \
3463         return 0;\
3464 } \
3465 \
3466 static int type##_quirks_get(void *data, u64 *val) \
3467 { \
3468         struct kbase_device *kbdev;\
3469         kbdev = (struct kbase_device *)data;\
3470         *val = kbdev->hw_quirks_##type;\
3471         return 0;\
3472 } \
3473 DEFINE_SIMPLE_ATTRIBUTE(fops_##type##_quirks, type##_quirks_get,\
3474                 type##_quirks_set, "%llu\n")
3475
3476 MAKE_QUIRK_ACCESSORS(sc);
3477 MAKE_QUIRK_ACCESSORS(tiler);
3478 MAKE_QUIRK_ACCESSORS(mmu);
3479
3480 #endif /* KBASE_GPU_RESET_EN */
3481
3482 static int kbasep_secure_mode_seq_show(struct seq_file *m, void *p)
3483 {
3484         struct kbase_device *kbdev = m->private;
3485
3486         if (!kbdev->secure_mode_support)
3487                 seq_puts(m, "unsupported\n");
3488         else
3489                 seq_printf(m, "%s\n", kbdev->secure_mode ? "Y" : "N");
3490
3491         return 0;
3492 }
3493
3494 static int kbasep_secure_mode_debugfs_open(struct inode *in, struct file *file)
3495 {
3496         return single_open(file, kbasep_secure_mode_seq_show, in->i_private);
3497 }
3498
3499 static const struct file_operations kbasep_secure_mode_debugfs_fops = {
3500         .open = kbasep_secure_mode_debugfs_open,
3501         .read = seq_read,
3502         .llseek = seq_lseek,
3503         .release = single_release,
3504 };
3505
3506 static int kbase_device_debugfs_init(struct kbase_device *kbdev)
3507 {
3508         struct dentry *debugfs_ctx_defaults_directory;
3509         int err;
3510
3511         kbdev->mali_debugfs_directory = debugfs_create_dir(kbdev->devname,
3512                         NULL);
3513         if (!kbdev->mali_debugfs_directory) {
3514                 dev_err(kbdev->dev, "Couldn't create mali debugfs directory\n");
3515                 err = -ENOMEM;
3516                 goto out;
3517         }
3518
3519         kbdev->debugfs_ctx_directory = debugfs_create_dir("ctx",
3520                         kbdev->mali_debugfs_directory);
3521         if (!kbdev->debugfs_ctx_directory) {
3522                 dev_err(kbdev->dev, "Couldn't create mali debugfs ctx directory\n");
3523                 err = -ENOMEM;
3524                 goto out;
3525         }
3526
3527         debugfs_ctx_defaults_directory = debugfs_create_dir("defaults",
3528                         kbdev->debugfs_ctx_directory);
3529         if (!debugfs_ctx_defaults_directory) {
3530                 dev_err(kbdev->dev, "Couldn't create mali debugfs ctx defaults directory\n");
3531                 err = -ENOMEM;
3532                 goto out;
3533         }
3534
3535 #if !MALI_CUSTOMER_RELEASE
3536         kbasep_regs_dump_debugfs_add(kbdev);
3537 #endif /* !MALI_CUSTOMER_RELEASE */
3538
3539         kbase_debug_job_fault_debugfs_init(kbdev);
3540         kbasep_gpu_memory_debugfs_init(kbdev);
3541 #if KBASE_GPU_RESET_EN
3542         debugfs_create_file("quirks_sc", 0644,
3543                         kbdev->mali_debugfs_directory, kbdev,
3544                         &fops_sc_quirks);
3545         debugfs_create_file("quirks_tiler", 0644,
3546                         kbdev->mali_debugfs_directory, kbdev,
3547                         &fops_tiler_quirks);
3548         debugfs_create_file("quirks_mmu", 0644,
3549                         kbdev->mali_debugfs_directory, kbdev,
3550                         &fops_mmu_quirks);
3551 #endif /* KBASE_GPU_RESET_EN */
3552
3553 #ifndef CONFIG_MALI_COH_USER
3554         debugfs_create_bool("infinite_cache", 0644,
3555                         debugfs_ctx_defaults_directory,
3556                         (bool*)&(kbdev->infinite_cache_active_default));
3557 #endif /* CONFIG_MALI_COH_USER */
3558
3559         debugfs_create_size_t("mem_pool_max_size", 0644,
3560                         debugfs_ctx_defaults_directory,
3561                         &kbdev->mem_pool_max_size_default);
3562
3563 #if KBASE_TRACE_ENABLE
3564         kbasep_trace_debugfs_init(kbdev);
3565 #endif /* KBASE_TRACE_ENABLE */
3566
3567 #ifdef CONFIG_MALI_TRACE_TIMELINE
3568         kbasep_trace_timeline_debugfs_init(kbdev);
3569 #endif /* CONFIG_MALI_TRACE_TIMELINE */
3570
3571         debugfs_create_file("secure_mode", S_IRUGO,
3572                         kbdev->mali_debugfs_directory, kbdev,
3573                         &kbasep_secure_mode_debugfs_fops);
3574
3575         return 0;
3576
3577 out:
3578         debugfs_remove_recursive(kbdev->mali_debugfs_directory);
3579         return err;
3580 }
3581
3582 static void kbase_device_debugfs_term(struct kbase_device *kbdev)
3583 {
3584         debugfs_remove_recursive(kbdev->mali_debugfs_directory);
3585 }
3586
3587 #else /* CONFIG_DEBUG_FS */
3588 static inline int kbase_device_debugfs_init(struct kbase_device *kbdev)
3589 {
3590         return 0;
3591 }
3592
3593 static inline void kbase_device_debugfs_term(struct kbase_device *kbdev) { }
3594 #endif /* CONFIG_DEBUG_FS */
3595
3596 static void kbase_device_coherency_init(struct kbase_device *kbdev, u32 gpu_id)
3597 {
3598 #ifdef CONFIG_OF
3599         u32 supported_coherency_bitmap =
3600                 kbdev->gpu_props.props.raw_props.coherency_mode;
3601         const void *coherency_override_dts;
3602         u32 override_coherency;
3603 #endif /* CONFIG_OF */
3604
3605         kbdev->system_coherency = COHERENCY_NONE;
3606
3607         /* device tree may override the coherency */
3608 #ifdef CONFIG_OF
3609         coherency_override_dts = of_get_property(kbdev->dev->of_node,
3610                                                 "system-coherency",
3611                                                 NULL);
3612         if (coherency_override_dts) {
3613
3614                 override_coherency = be32_to_cpup(coherency_override_dts);
3615
3616                 if ((override_coherency <= COHERENCY_NONE) &&
3617                         (supported_coherency_bitmap &
3618                          COHERENCY_FEATURE_BIT(override_coherency))) {
3619
3620                         kbdev->system_coherency = override_coherency;
3621
3622                         dev_info(kbdev->dev,
3623                                 "Using coherency mode %u set from dtb",
3624                                 override_coherency);
3625                 } else
3626                         dev_warn(kbdev->dev,
3627                                 "Ignoring unsupported coherency mode %u set from dtb",
3628                                 override_coherency);
3629         }
3630
3631 #endif /* CONFIG_OF */
3632
3633         kbdev->gpu_props.props.raw_props.coherency_mode =
3634                 kbdev->system_coherency;
3635 }
3636
3637 #ifdef CONFIG_MALI_FPGA_BUS_LOGGER
3638
3639 /* Callback used by the kbase bus logger client, to initiate a GPU reset
3640  * when the bus log is restarted.  GPU reset is used as reference point
3641  * in HW bus log analyses.
3642  */
3643 static void kbase_logging_started_cb(void *data)
3644 {
3645         struct kbase_device *kbdev = (struct kbase_device *)data;
3646
3647         if (kbase_prepare_to_reset_gpu(kbdev))
3648                 kbase_reset_gpu(kbdev);
3649         dev_info(kbdev->dev, "KBASE - Bus logger restarted\n");
3650 }
3651 #endif
3652
3653
3654 static struct attribute *kbase_attrs[] = {
3655 #ifdef CONFIG_MALI_DEBUG
3656         &dev_attr_debug_command.attr,
3657         &dev_attr_js_softstop_always.attr,
3658 #endif
3659 #if !MALI_CUSTOMER_RELEASE
3660         &dev_attr_force_replay.attr,
3661 #endif
3662         &dev_attr_js_timeouts.attr,
3663         &dev_attr_soft_event_timeout.attr,
3664         &dev_attr_gpuinfo.attr,
3665         &dev_attr_dvfs_period.attr,
3666         &dev_attr_pm_poweroff.attr,
3667         &dev_attr_reset_timeout.attr,
3668         &dev_attr_js_scheduling_period.attr,
3669         &dev_attr_power_policy.attr,
3670         &dev_attr_core_availability_policy.attr,
3671         &dev_attr_core_mask.attr,
3672         &dev_attr_mem_pool_size.attr,
3673         &dev_attr_mem_pool_max_size.attr,
3674         NULL
3675 };
3676
3677 static const struct attribute_group kbase_attr_group = {
3678         .attrs = kbase_attrs,
3679 };
3680
3681 static int kbase_platform_device_remove(struct platform_device *pdev)
3682 {
3683         struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
3684         const struct list_head *dev_list;
3685
3686         if (!kbdev)
3687                 return -ENODEV;
3688
3689 #ifdef CONFIG_MALI_FPGA_BUS_LOGGER
3690         if (kbdev->inited_subsys & inited_buslogger) {
3691                 bl_core_client_unregister(kbdev->buslogger);
3692                 kbdev->inited_subsys &= ~inited_buslogger;
3693         }
3694 #endif
3695
3696         if (kbdev->inited_subsys & inited_sysfs_group) {
3697                 sysfs_remove_group(&kbdev->dev->kobj, &kbase_attr_group);
3698                 kbdev->inited_subsys &= ~inited_sysfs_group;
3699         }
3700
3701         if (kbdev->inited_subsys & inited_dev_list) {
3702                 dev_list = kbase_dev_list_get();
3703                 list_del(&kbdev->entry);
3704                 kbase_dev_list_put(dev_list);
3705                 kbdev->inited_subsys &= ~inited_dev_list;
3706         }
3707
3708         if (kbdev->inited_subsys & inited_misc_register) {
3709                 misc_deregister(&kbdev->mdev);
3710                 kbdev->inited_subsys &= ~inited_misc_register;
3711         }
3712
3713         if (kbdev->inited_subsys & inited_get_device) {
3714                 put_device(kbdev->dev);
3715                 kbdev->inited_subsys &= ~inited_get_device;
3716         }
3717
3718         if (kbdev->inited_subsys & inited_debugfs) {
3719                 kbase_device_debugfs_term(kbdev);
3720                 kbdev->inited_subsys &= ~inited_debugfs;
3721         }
3722
3723         if (kbdev->inited_subsys & inited_job_fault) {
3724                 kbase_debug_job_fault_dev_term(kbdev);
3725                 kbdev->inited_subsys &= ~inited_job_fault;
3726         }
3727
3728 #ifndef CONFIG_MALI_PRFCNT_SET_SECONDARY
3729         if (kbdev->inited_subsys & inited_ipa) {
3730                 kbase_ipa_term(kbdev->ipa_ctx);
3731                 kbdev->inited_subsys &= ~inited_ipa;
3732         }
3733 #endif /* CONFIG_MALI_PRFCNT_SET_SECONDARY */
3734
3735         if (kbdev->inited_subsys & inited_vinstr) {
3736                 kbase_vinstr_term(kbdev->vinstr_ctx);
3737                 kbdev->inited_subsys &= ~inited_vinstr;
3738         }
3739
3740 #ifdef CONFIG_MALI_DEVFREQ
3741         if (kbdev->inited_subsys & inited_devfreq) {
3742                 kbase_devfreq_term(kbdev);
3743                 kbdev->inited_subsys &= ~inited_devfreq;
3744         }
3745 #endif
3746
3747         if (kbdev->inited_subsys & inited_backend_late) {
3748                 kbase_backend_late_term(kbdev);
3749                 kbdev->inited_subsys &= ~inited_backend_late;
3750         }
3751
3752         if (kbdev->inited_subsys & inited_tlstream) {
3753                 kbase_tlstream_term();
3754                 kbdev->inited_subsys &= ~inited_tlstream;
3755         }
3756
3757         /* Bring job and mem sys to a halt before we continue termination */
3758
3759         if (kbdev->inited_subsys & inited_js)
3760                 kbasep_js_devdata_halt(kbdev);
3761
3762         if (kbdev->inited_subsys & inited_mem)
3763                 kbase_mem_halt(kbdev);
3764
3765         if (kbdev->inited_subsys & inited_js) {
3766                 kbasep_js_devdata_term(kbdev);
3767                 kbdev->inited_subsys &= ~inited_js;
3768         }
3769
3770         if (kbdev->inited_subsys & inited_mem) {
3771                 kbase_mem_term(kbdev);
3772                 kbdev->inited_subsys &= ~inited_mem;
3773         }
3774
3775         if (kbdev->inited_subsys & inited_pm_runtime_init) {
3776                 kbdev->pm.callback_power_runtime_term(kbdev);
3777                 kbdev->inited_subsys &= ~inited_pm_runtime_init;
3778         }
3779
3780         if (kbdev->inited_subsys & inited_device) {
3781                 kbase_device_term(kbdev);
3782                 kbdev->inited_subsys &= ~inited_device;
3783         }
3784
3785         if (kbdev->inited_subsys & inited_backend_early) {
3786                 kbase_backend_early_term(kbdev);
3787                 kbdev->inited_subsys &= ~inited_backend_early;
3788         }
3789
3790
3791         if (kbdev->inited_subsys & inited_power_control) {
3792                 power_control_term(kbdev);
3793                 kbdev->inited_subsys &= ~inited_power_control;
3794         }
3795
3796         if (kbdev->inited_subsys & inited_registers_map) {
3797                 registers_unmap(kbdev);
3798                 kbdev->inited_subsys &= ~inited_registers_map;
3799         }
3800
3801 #ifdef CONFIG_MALI_NO_MALI
3802         if (kbdev->inited_subsys & inited_gpu_device) {
3803                 gpu_device_destroy(kbdev);
3804                 kbdev->inited_subsys &= ~inited_gpu_device;
3805         }
3806 #endif /* CONFIG_MALI_NO_MALI */
3807
3808         if (kbdev->inited_subsys != 0)
3809                 dev_err(kbdev->dev, "Missing sub system termination\n");
3810
3811         kbase_device_free(kbdev);
3812
3813         return 0;
3814 }
3815
3816 static int kbase_platform_device_probe(struct platform_device *pdev)
3817 {
3818         struct kbase_device *kbdev;
3819         struct mali_base_gpu_core_props *core_props;
3820         u32 gpu_id;
3821         const struct list_head *dev_list;
3822         int err = 0;
3823
3824 #ifdef CONFIG_OF
3825         err = kbase_platform_early_init();
3826         if (err) {
3827                 dev_err(&pdev->dev, "Early platform initialization failed\n");
3828                 kbase_platform_device_remove(pdev);
3829                 return err;
3830         }
3831 #endif
3832
3833         kbdev = kbase_device_alloc();
3834         if (!kbdev) {
3835                 dev_err(&pdev->dev, "Allocate device failed\n");
3836                 kbase_platform_device_remove(pdev);
3837                 return -ENOMEM;
3838         }
3839
3840         kbdev->dev = &pdev->dev;
3841         dev_set_drvdata(kbdev->dev, kbdev);
3842
3843 #ifdef CONFIG_MALI_NO_MALI
3844         err = gpu_device_create(kbdev);
3845         if (err) {
3846                 dev_err(&pdev->dev, "Dummy model initialization failed\n");
3847                 kbase_platform_device_remove(pdev);
3848                 return err;
3849         }
3850         kbdev->inited_subsys |= inited_gpu_device;
3851 #endif /* CONFIG_MALI_NO_MALI */
3852
3853         err = assign_irqs(pdev);
3854         if (err) {
3855                 dev_err(&pdev->dev, "IRQ search failed\n");
3856                 kbase_platform_device_remove(pdev);
3857                 return err;
3858         }
3859
3860         err = registers_map(kbdev);
3861         if (err) {
3862                 dev_err(&pdev->dev, "Register map failed\n");
3863                 kbase_platform_device_remove(pdev);
3864                 return err;
3865         }
3866         kbdev->inited_subsys |= inited_registers_map;
3867
3868         err = power_control_init(pdev);
3869         if (err) {
3870                 dev_err(&pdev->dev, "Power control initialization failed\n");
3871                 kbase_platform_device_remove(pdev);
3872                 return err;
3873         }
3874         kbdev->inited_subsys |= inited_power_control;
3875
3876
3877         err = kbase_backend_early_init(kbdev);
3878         if (err) {
3879                 dev_err(kbdev->dev, "Early backend initialization failed\n");
3880                 kbase_platform_device_remove(pdev);
3881                 return err;
3882         }
3883         kbdev->inited_subsys |= inited_backend_early;
3884
3885         scnprintf(kbdev->devname, DEVNAME_SIZE, "%s%d", kbase_drv_name,
3886                         kbase_dev_nr);
3887
3888         kbase_disjoint_init(kbdev);
3889
3890         /* obtain min/max configured gpu frequencies */
3891         core_props = &(kbdev->gpu_props.props.core_props);
3892         core_props->gpu_freq_khz_min = GPU_FREQ_KHZ_MIN;
3893         core_props->gpu_freq_khz_max = GPU_FREQ_KHZ_MAX;
3894
3895         kbdev->gpu_props.irq_throttle_time_us = DEFAULT_IRQ_THROTTLE_TIME_US;
3896
3897         err = kbase_device_init(kbdev);
3898         if (err) {
3899                 dev_err(kbdev->dev, "Device initialization failed (%d)\n", err);
3900                 kbase_platform_device_remove(pdev);
3901                 return err;
3902         }
3903         kbdev->inited_subsys |= inited_device;
3904
3905         if (kbdev->pm.callback_power_runtime_init) {
3906                 err = kbdev->pm.callback_power_runtime_init(kbdev);
3907                 if (err) {
3908                         dev_err(kbdev->dev,
3909                                 "Runtime PM initialization failed\n");
3910                         kbase_platform_device_remove(pdev);
3911                         return err;
3912                 }
3913                 kbdev->inited_subsys |= inited_pm_runtime_init;
3914         }
3915
3916         err = kbase_mem_init(kbdev);
3917         if (err) {
3918                 dev_err(kbdev->dev, "Memory subsystem initialization failed\n");
3919                 kbase_platform_device_remove(pdev);
3920                 return err;
3921         }
3922         kbdev->inited_subsys |= inited_mem;
3923
3924         gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
3925         gpu_id &= GPU_ID_VERSION_PRODUCT_ID;
3926         gpu_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
3927
3928         kbase_device_coherency_init(kbdev, gpu_id);
3929
3930         kbasep_secure_mode_init(kbdev);
3931
3932         err = kbasep_js_devdata_init(kbdev);
3933         if (err) {
3934                 dev_err(kbdev->dev, "Job JS devdata initialization failed\n");
3935                 kbase_platform_device_remove(pdev);
3936                 return err;
3937         }
3938         kbdev->inited_subsys |= inited_js;
3939
3940         err = kbase_tlstream_init();
3941         if (err) {
3942                 dev_err(kbdev->dev, "Timeline stream initialization failed\n");
3943                 kbase_platform_device_remove(pdev);
3944                 return err;
3945         }
3946         kbdev->inited_subsys |= inited_tlstream;
3947
3948         err = kbase_backend_late_init(kbdev);
3949         if (err) {
3950                 dev_err(kbdev->dev, "Late backend initialization failed\n");
3951                 kbase_platform_device_remove(pdev);
3952                 return err;
3953         }
3954         kbdev->inited_subsys |= inited_backend_late;
3955
3956 #ifdef CONFIG_MALI_DEVFREQ
3957         err = kbase_devfreq_init(kbdev);
3958         if (err) {
3959                 dev_err(kbdev->dev, "Fevfreq initialization failed\n");
3960                 kbase_platform_device_remove(pdev);
3961                 return err;
3962         }
3963         kbdev->inited_subsys |= inited_devfreq;
3964 #endif /* CONFIG_MALI_DEVFREQ */
3965
3966         kbdev->vinstr_ctx = kbase_vinstr_init(kbdev);
3967         if (!kbdev->vinstr_ctx) {
3968                 dev_err(kbdev->dev,
3969                         "Virtual instrumentation initialization failed\n");
3970                 kbase_platform_device_remove(pdev);
3971                 return -EINVAL;
3972         }
3973         kbdev->inited_subsys |= inited_vinstr;
3974
3975 #ifndef CONFIG_MALI_PRFCNT_SET_SECONDARY
3976         kbdev->ipa_ctx = kbase_ipa_init(kbdev);
3977         if (!kbdev->ipa_ctx) {
3978                 dev_err(kbdev->dev, "IPA initialization failed\n");
3979                 kbase_platform_device_remove(pdev);
3980                 return -EINVAL;
3981         }
3982
3983         kbdev->inited_subsys |= inited_ipa;
3984 #endif  /* CONFIG_MALI_PRFCNT_SET_SECONDARY */
3985
3986         err = kbase_debug_job_fault_dev_init(kbdev);
3987         if (err) {
3988                 dev_err(kbdev->dev, "Job fault debug initialization failed\n");
3989                 kbase_platform_device_remove(pdev);
3990                 return err;
3991         }
3992         kbdev->inited_subsys |= inited_job_fault;
3993
3994         err = kbase_device_debugfs_init(kbdev);
3995         if (err) {
3996                 dev_err(kbdev->dev, "DebugFS initialization failed");
3997                 kbase_platform_device_remove(pdev);
3998                 return err;
3999         }
4000         kbdev->inited_subsys |= inited_debugfs;
4001
4002         /* initialize the kctx list */
4003         mutex_init(&kbdev->kctx_list_lock);
4004         INIT_LIST_HEAD(&kbdev->kctx_list);
4005
4006         kbdev->mdev.minor = MISC_DYNAMIC_MINOR;
4007         kbdev->mdev.name = kbdev->devname;
4008         kbdev->mdev.fops = &kbase_fops;
4009         kbdev->mdev.parent = get_device(kbdev->dev);
4010         kbdev->inited_subsys |= inited_get_device;
4011
4012         err = misc_register(&kbdev->mdev);
4013         if (err) {
4014                 dev_err(kbdev->dev, "Misc device registration failed for %s\n",
4015                         kbdev->devname);
4016                 kbase_platform_device_remove(pdev);
4017                 return err;
4018         }
4019         kbdev->inited_subsys |= inited_misc_register;
4020
4021         dev_list = kbase_dev_list_get();
4022         list_add(&kbdev->entry, &kbase_dev_list);
4023         kbase_dev_list_put(dev_list);
4024         kbdev->inited_subsys |= inited_dev_list;
4025
4026         err = sysfs_create_group(&kbdev->dev->kobj, &kbase_attr_group);
4027         if (err) {
4028                 dev_err(&pdev->dev, "SysFS group creation failed\n");
4029                 kbase_platform_device_remove(pdev);
4030                 return err;
4031         }
4032         kbdev->inited_subsys |= inited_sysfs_group;
4033
4034 #ifdef CONFIG_MALI_FPGA_BUS_LOGGER
4035         err = bl_core_client_register(kbdev->devname,
4036                                                 kbase_logging_started_cb,
4037                                                 kbdev, &kbdev->buslogger,
4038                                                 THIS_MODULE, NULL);
4039         if (err == 0) {
4040                 kbdev->inited_subsys |= inited_buslogger;
4041                 bl_core_set_threshold(kbdev->buslogger, 1024*1024*1024);
4042         } else {
4043                 dev_warn(kbdev->dev, "Bus log client registration failed\n");
4044                 err = 0;
4045         }
4046 #endif
4047
4048         dev_info(kbdev->dev,
4049                         "Probed as %s\n", dev_name(kbdev->mdev.this_device));
4050
4051         kbase_dev_nr++;
4052
4053         return err;
4054 }
4055
4056 /** Suspend callback from the OS.
4057  *
4058  * This is called by Linux when the device should suspend.
4059  *
4060  * @param dev  The device to suspend
4061  *
4062  * @return A standard Linux error code
4063  */
4064 static int kbase_device_suspend(struct device *dev)
4065 {
4066         struct kbase_device *kbdev = to_kbase_device(dev);
4067
4068         if (!kbdev)
4069                 return -ENODEV;
4070
4071 #if defined(CONFIG_PM_DEVFREQ) && \
4072                 (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
4073         devfreq_suspend_device(kbdev->devfreq);
4074 #endif
4075
4076         kbase_pm_suspend(kbdev);
4077         return 0;
4078 }
4079
4080 /** Resume callback from the OS.
4081  *
4082  * This is called by Linux when the device should resume from suspension.
4083  *
4084  * @param dev  The device to resume
4085  *
4086  * @return A standard Linux error code
4087  */
4088 static int kbase_device_resume(struct device *dev)
4089 {
4090         struct kbase_device *kbdev = to_kbase_device(dev);
4091
4092         if (!kbdev)
4093                 return -ENODEV;
4094
4095         kbase_pm_resume(kbdev);
4096
4097 #if defined(CONFIG_PM_DEVFREQ) && \
4098                 (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
4099         devfreq_resume_device(kbdev->devfreq);
4100 #endif
4101         return 0;
4102 }
4103
4104 /** Runtime suspend callback from the OS.
4105  *
4106  * This is called by Linux when the device should prepare for a condition in which it will
4107  * not be able to communicate with the CPU(s) and RAM due to power management.
4108  *
4109  * @param dev  The device to suspend
4110  *
4111  * @return A standard Linux error code
4112  */
4113 #ifdef KBASE_PM_RUNTIME
4114 static int kbase_device_runtime_suspend(struct device *dev)
4115 {
4116         struct kbase_device *kbdev = to_kbase_device(dev);
4117
4118         if (!kbdev)
4119                 return -ENODEV;
4120
4121 #if defined(CONFIG_PM_DEVFREQ) && \
4122                 (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
4123         devfreq_suspend_device(kbdev->devfreq);
4124 #endif
4125
4126         if (kbdev->pm.backend.callback_power_runtime_off) {
4127                 kbdev->pm.backend.callback_power_runtime_off(kbdev);
4128                 dev_dbg(dev, "runtime suspend\n");
4129         }
4130         return 0;
4131 }
4132 #endif /* KBASE_PM_RUNTIME */
4133
4134 /** Runtime resume callback from the OS.
4135  *
4136  * This is called by Linux when the device should go into a fully active state.
4137  *
4138  * @param dev  The device to suspend
4139  *
4140  * @return A standard Linux error code
4141  */
4142
4143 #ifdef KBASE_PM_RUNTIME
4144 static int kbase_device_runtime_resume(struct device *dev)
4145 {
4146         int ret = 0;
4147         struct kbase_device *kbdev = to_kbase_device(dev);
4148
4149         if (!kbdev)
4150                 return -ENODEV;
4151
4152         if (kbdev->pm.backend.callback_power_runtime_on) {
4153                 ret = kbdev->pm.backend.callback_power_runtime_on(kbdev);
4154                 dev_dbg(dev, "runtime resume\n");
4155         }
4156
4157 #if defined(CONFIG_PM_DEVFREQ) && \
4158                 (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
4159         devfreq_resume_device(kbdev->devfreq);
4160 #endif
4161
4162         return ret;
4163 }
4164 #endif /* KBASE_PM_RUNTIME */
4165
4166
4167 #ifdef KBASE_PM_RUNTIME
4168 /**
4169  * kbase_device_runtime_idle - Runtime idle callback from the OS.
4170  * @dev: The device to suspend
4171  *
4172  * This is called by Linux when the device appears to be inactive and it might
4173  * be placed into a low power state.
4174  *
4175  * Return: 0 if device can be suspended, non-zero to avoid runtime autosuspend,
4176  * otherwise a standard Linux error code
4177  */
4178 static int kbase_device_runtime_idle(struct device *dev)
4179 {
4180         struct kbase_device *kbdev = to_kbase_device(dev);
4181
4182         if (!kbdev)
4183                 return -ENODEV;
4184
4185         /* Use platform specific implementation if it exists. */
4186         if (kbdev->pm.backend.callback_power_runtime_idle)
4187                 return kbdev->pm.backend.callback_power_runtime_idle(kbdev);
4188
4189         return 0;
4190 }
4191 #endif /* KBASE_PM_RUNTIME */
4192
4193 /** The power management operations for the platform driver.
4194  */
4195 static const struct dev_pm_ops kbase_pm_ops = {
4196         .suspend = kbase_device_suspend,
4197         .resume = kbase_device_resume,
4198 #ifdef KBASE_PM_RUNTIME
4199         .runtime_suspend = kbase_device_runtime_suspend,
4200         .runtime_resume = kbase_device_runtime_resume,
4201         .runtime_idle = kbase_device_runtime_idle,
4202 #endif /* KBASE_PM_RUNTIME */
4203 };
4204
4205 #ifdef CONFIG_OF
4206 static const struct of_device_id kbase_dt_ids[] = {
4207         { .compatible = "arm,malit7xx" },
4208         { .compatible = "arm,mali-midgard" },
4209         { /* sentinel */ }
4210 };
4211 MODULE_DEVICE_TABLE(of, kbase_dt_ids);
4212 #endif
4213
4214 static struct platform_driver kbase_platform_driver = {
4215         .probe = kbase_platform_device_probe,
4216         .remove = kbase_platform_device_remove,
4217         .driver = {
4218                    .name = kbase_drv_name,
4219                    .owner = THIS_MODULE,
4220                    .pm = &kbase_pm_ops,
4221                    .of_match_table = of_match_ptr(kbase_dt_ids),
4222         },
4223 };
4224
4225 /*
4226  * The driver will not provide a shortcut to create the Mali platform device
4227  * anymore when using Device Tree.
4228  */
4229 #ifdef CONFIG_OF
4230 module_platform_driver(kbase_platform_driver);
4231 #else
4232
4233 static int __init rockchip_gpu_init_driver(void)
4234 {
4235         return platform_driver_register(&kbase_platform_driver);
4236 }
4237 late_initcall(rockchip_gpu_init_driver);
4238
4239 static int __init kbase_driver_init(void)
4240 {
4241         int ret;
4242
4243         ret = kbase_platform_early_init();
4244         if (ret)
4245                 return ret;
4246
4247 #ifndef CONFIG_MACH_MANTA
4248 #ifdef CONFIG_MALI_PLATFORM_FAKE
4249         ret = kbase_platform_fake_register();
4250         if (ret)
4251                 return ret;
4252 #endif
4253 #endif
4254         ret = platform_driver_register(&kbase_platform_driver);
4255 #ifndef CONFIG_MACH_MANTA
4256 #ifdef CONFIG_MALI_PLATFORM_FAKE
4257         if (ret)
4258                 kbase_platform_fake_unregister();
4259 #endif
4260 #endif
4261         return ret;
4262 }
4263
4264 static void __exit kbase_driver_exit(void)
4265 {
4266         platform_driver_unregister(&kbase_platform_driver);
4267 #ifndef CONFIG_MACH_MANTA
4268 #ifdef CONFIG_MALI_PLATFORM_FAKE
4269         kbase_platform_fake_unregister();
4270 #endif
4271 #endif
4272 }
4273
4274 module_init(kbase_driver_init);
4275 module_exit(kbase_driver_exit);
4276
4277 #endif /* CONFIG_OF */
4278
4279 MODULE_LICENSE("GPL");
4280 MODULE_VERSION(MALI_RELEASE_NAME " (UK version " \
4281                 __stringify(BASE_UK_VERSION_MAJOR) "." \
4282                 __stringify(BASE_UK_VERSION_MINOR) ")");
4283
4284 #if defined(CONFIG_MALI_GATOR_SUPPORT) || defined(CONFIG_MALI_SYSTEM_TRACE)
4285 #define CREATE_TRACE_POINTS
4286 #endif
4287
4288 #ifdef CONFIG_MALI_GATOR_SUPPORT
4289 /* Create the trace points (otherwise we just get code to call a tracepoint) */
4290 #include "mali_linux_trace.h"
4291
4292 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_job_slots_event);
4293 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_status);
4294 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_power_on);
4295 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_power_off);
4296 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_page_fault_insert_pages);
4297 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_mmu_as_in_use);
4298 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_mmu_as_released);
4299 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_total_alloc_pages_change);
4300 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_sw_counter);
4301
4302 void kbase_trace_mali_pm_status(u32 event, u64 value)
4303 {
4304         trace_mali_pm_status(event, value);
4305 }
4306
4307 void kbase_trace_mali_pm_power_off(u32 event, u64 value)
4308 {
4309         trace_mali_pm_power_off(event, value);
4310 }
4311
4312 void kbase_trace_mali_pm_power_on(u32 event, u64 value)
4313 {
4314         trace_mali_pm_power_on(event, value);
4315 }
4316
4317 void kbase_trace_mali_job_slots_event(u32 event, const struct kbase_context *kctx, u8 atom_id)
4318 {
4319         trace_mali_job_slots_event(event, (kctx != NULL ? kctx->tgid : 0), (kctx != NULL ? kctx->pid : 0), atom_id);
4320 }
4321
4322 void kbase_trace_mali_page_fault_insert_pages(int event, u32 value)
4323 {
4324         trace_mali_page_fault_insert_pages(event, value);
4325 }
4326
4327 void kbase_trace_mali_mmu_as_in_use(int event)
4328 {
4329         trace_mali_mmu_as_in_use(event);
4330 }
4331
4332 void kbase_trace_mali_mmu_as_released(int event)
4333 {
4334         trace_mali_mmu_as_released(event);
4335 }
4336
4337 void kbase_trace_mali_total_alloc_pages_change(long long int event)
4338 {
4339         trace_mali_total_alloc_pages_change(event);
4340 }
4341 #endif /* CONFIG_MALI_GATOR_SUPPORT */
4342 #ifdef CONFIG_MALI_SYSTEM_TRACE
4343 #include "mali_linux_kbase_trace.h"
4344 #endif