MALI: rockchip: upgrade midgard DDK to r9p0-05rel0
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / midgard / mali_kbase_core_linux.c
1
2 /*
3  *
4  * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU licence.
10  *
11  * A copy of the licence is included with the program, and can also be obtained
12  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
13  * Boston, MA  02110-1301, USA.
14  *
15  */
16
17 #define ENABLE_DEBUG_LOG
18 #include "platform/rk/custom_log.h"
19
20 #include <mali_kbase.h>
21 #include <mali_kbase_hwaccess_gpuprops.h>
22 #include <mali_kbase_config_defaults.h>
23 #include <mali_kbase_uku.h>
24 #include <mali_midg_regmap.h>
25 #include <mali_kbase_instr.h>
26 #include <mali_kbase_gator.h>
27 #include <backend/gpu/mali_kbase_js_affinity.h>
28 #include <mali_kbase_mem_linux.h>
29 #ifdef CONFIG_MALI_DEVFREQ
30 #include <backend/gpu/mali_kbase_devfreq.h>
31 #endif /* CONFIG_MALI_DEVFREQ */
32 #ifdef CONFIG_MALI_NO_MALI
33 #include "mali_kbase_model_linux.h"
34 #endif /* CONFIG_MALI_NO_MALI */
35 #include "mali_kbase_mem_profile_debugfs_buf_size.h"
36 #include "mali_kbase_debug_mem_view.h"
37 #include "mali_kbase_mem.h"
38 #include "mali_kbase_mem_pool_debugfs.h"
39 #if !MALI_CUSTOMER_RELEASE
40 #include "mali_kbase_regs_dump_debugfs.h"
41 #endif /* !MALI_CUSTOMER_RELEASE */
42 #include <mali_kbase_hwaccess_backend.h>
43 #include <mali_kbase_hwaccess_jm.h>
44 #include <backend/gpu/mali_kbase_device_internal.h>
45
46 #ifdef CONFIG_KDS
47 #include <linux/kds.h>
48 #include <linux/anon_inodes.h>
49 #include <linux/syscalls.h>
50 #endif /* CONFIG_KDS */
51
52 #include <linux/pm_runtime.h>
53 #include <linux/module.h>
54 #include <linux/init.h>
55 #include <linux/poll.h>
56 #include <linux/kernel.h>
57 #include <linux/errno.h>
58 #include <linux/of.h>
59 #include <linux/platform_device.h>
60 #include <linux/miscdevice.h>
61 #include <linux/list.h>
62 #include <linux/semaphore.h>
63 #include <linux/fs.h>
64 #include <linux/uaccess.h>
65 #include <linux/interrupt.h>
66 #include <linux/io.h>
67 #include <linux/mm.h>
68 #include <linux/compat.h>       /* is_compat_task */
69 #include <linux/mman.h>
70 #include <linux/version.h>
71 #include <linux/security.h>
72 #ifdef CONFIG_MALI_PLATFORM_DEVICETREE
73 #include <linux/pm_runtime.h>
74 #endif /* CONFIG_MALI_PLATFORM_DEVICETREE */
75 #include <mali_kbase_hw.h>
76 #include <platform/mali_kbase_platform_common.h>
77 #ifdef CONFIG_MALI_PLATFORM_FAKE
78 #include <platform/mali_kbase_platform_fake.h>
79 #endif /*CONFIG_MALI_PLATFORM_FAKE */
80 #ifdef CONFIG_SYNC
81 #include <mali_kbase_sync.h>
82 #endif /* CONFIG_SYNC */
83 #ifdef CONFIG_PM_DEVFREQ
84 #include <linux/devfreq.h>
85 #endif /* CONFIG_PM_DEVFREQ */
86 #include <linux/clk.h>
87 #include <linux/delay.h>
88
89 #include <mali_kbase_config.h>
90
91 #ifdef CONFIG_MACH_MANTA
92 #include <plat/devs.h>
93 #endif
94
95 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
96 #include <linux/pm_opp.h>
97 #else
98 #include <linux/opp.h>
99 #endif
100
101 #if defined(CONFIG_MALI_MIPE_ENABLED)
102 #include <mali_kbase_tlstream.h>
103 #endif
104
105 /* GPU IRQ Tags */
106 #define JOB_IRQ_TAG     0
107 #define MMU_IRQ_TAG     1
108 #define GPU_IRQ_TAG     2
109
110 #if MALI_UNIT_TEST
111 static struct kbase_exported_test_data shared_kernel_test_data;
112 EXPORT_SYMBOL(shared_kernel_test_data);
113 #endif /* MALI_UNIT_TEST */
114
115 #define KBASE_DRV_NAME "mali"
116 /** rk_ext : version of rk_ext on mali_ko, aka. rk_ko_ver. */
117 #define ROCKCHIP_VERSION    (13)
118
119 static const char kbase_drv_name[] = KBASE_DRV_NAME;
120
121 static int kbase_dev_nr;
122
123 static DEFINE_MUTEX(kbase_dev_list_lock);
124 static LIST_HEAD(kbase_dev_list);
125
126 #define KERNEL_SIDE_DDK_VERSION_STRING "K:" MALI_RELEASE_NAME "(GPL)"
127 static inline void __compile_time_asserts(void)
128 {
129         CSTD_COMPILE_TIME_ASSERT(sizeof(KERNEL_SIDE_DDK_VERSION_STRING) <= KBASE_GET_VERSION_BUFFER_SIZE);
130 }
131
132 #ifdef CONFIG_KDS
133
134 struct kbasep_kds_resource_set_file_data {
135         struct kds_resource_set *lock;
136 };
137
138 static int kds_resource_release(struct inode *inode, struct file *file);
139
140 static const struct file_operations kds_resource_fops = {
141         .release = kds_resource_release
142 };
143
144 struct kbase_kds_resource_list_data {
145         struct kds_resource **kds_resources;
146         unsigned long *kds_access_bitmap;
147         int num_elems;
148 };
149
150 static int kds_resource_release(struct inode *inode, struct file *file)
151 {
152         struct kbasep_kds_resource_set_file_data *data;
153
154         data = (struct kbasep_kds_resource_set_file_data *)file->private_data;
155         if (NULL != data) {
156                 if (NULL != data->lock)
157                         kds_resource_set_release(&data->lock);
158
159                 kfree(data);
160         }
161         return 0;
162 }
163
164 static int kbasep_kds_allocate_resource_list_data(struct kbase_context *kctx, struct base_external_resource *ext_res, int num_elems, struct kbase_kds_resource_list_data *resources_list)
165 {
166         struct base_external_resource *res = ext_res;
167         int res_id;
168
169         /* assume we have to wait for all */
170
171         KBASE_DEBUG_ASSERT(0 != num_elems);
172         resources_list->kds_resources = kmalloc_array(num_elems,
173                         sizeof(struct kds_resource *), GFP_KERNEL);
174
175         if (NULL == resources_list->kds_resources)
176                 return -ENOMEM;
177
178         KBASE_DEBUG_ASSERT(0 != num_elems);
179         resources_list->kds_access_bitmap = kzalloc(
180                         sizeof(unsigned long) *
181                         ((num_elems + BITS_PER_LONG - 1) / BITS_PER_LONG),
182                         GFP_KERNEL);
183
184         if (NULL == resources_list->kds_access_bitmap) {
185                 kfree(resources_list->kds_access_bitmap);
186                 return -ENOMEM;
187         }
188
189         kbase_gpu_vm_lock(kctx);
190         for (res_id = 0; res_id < num_elems; res_id++, res++) {
191                 int exclusive;
192                 struct kbase_va_region *reg;
193                 struct kds_resource *kds_res = NULL;
194
195                 exclusive = res->ext_resource & BASE_EXT_RES_ACCESS_EXCLUSIVE;
196                 reg = kbase_region_tracker_find_region_enclosing_address(kctx, res->ext_resource & ~BASE_EXT_RES_ACCESS_EXCLUSIVE);
197
198                 /* did we find a matching region object? */
199                 if (NULL == reg || (reg->flags & KBASE_REG_FREE))
200                         break;
201
202                 /* no need to check reg->alloc as only regions with an alloc has
203                  * a size, and kbase_region_tracker_find_region_enclosing_address
204                  * only returns regions with size > 0 */
205                 switch (reg->gpu_alloc->type) {
206 #if defined(CONFIG_UMP) && defined(CONFIG_KDS)
207                 case KBASE_MEM_TYPE_IMPORTED_UMP:
208                         kds_res = ump_dd_kds_resource_get(reg->gpu_alloc->imported.ump_handle);
209                         break;
210 #endif /* defined(CONFIG_UMP) && defined(CONFIG_KDS) */
211                 default:
212                         break;
213                 }
214
215                 /* no kds resource for the region ? */
216                 if (!kds_res)
217                         break;
218
219                 resources_list->kds_resources[res_id] = kds_res;
220
221                 if (exclusive)
222                         set_bit(res_id, resources_list->kds_access_bitmap);
223         }
224         kbase_gpu_vm_unlock(kctx);
225
226         /* did the loop run to completion? */
227         if (res_id == num_elems)
228                 return 0;
229
230         /* Clean up as the resource list is not valid. */
231         kfree(resources_list->kds_resources);
232         kfree(resources_list->kds_access_bitmap);
233
234         return -EINVAL;
235 }
236
237 static bool kbasep_validate_kbase_pointer(
238                 struct kbase_context *kctx, union kbase_pointer *p)
239 {
240         if (kctx->is_compat) {
241                 if (p->compat_value == 0)
242                         return false;
243         } else {
244                 if (NULL == p->value)
245                         return false;
246         }
247         return true;
248 }
249
250 static int kbase_external_buffer_lock(struct kbase_context *kctx,
251                 struct kbase_uk_ext_buff_kds_data *args, u32 args_size)
252 {
253         struct base_external_resource *ext_res_copy;
254         size_t ext_resource_size;
255         int ret = -EINVAL;
256         int fd = -EBADF;
257         struct base_external_resource __user *ext_res_user;
258         int __user *file_desc_usr;
259         struct kbasep_kds_resource_set_file_data *fdata;
260         struct kbase_kds_resource_list_data resource_list_data;
261
262         if (args_size != sizeof(struct kbase_uk_ext_buff_kds_data))
263                 return -EINVAL;
264
265         /* Check user space has provided valid data */
266         if (!kbasep_validate_kbase_pointer(kctx, &args->external_resource) ||
267                         !kbasep_validate_kbase_pointer(kctx, &args->file_descriptor) ||
268                         (0 == args->num_res) ||
269                         (args->num_res > KBASE_MAXIMUM_EXT_RESOURCES))
270                 return -EINVAL;
271
272         ext_resource_size = sizeof(struct base_external_resource) * args->num_res;
273
274         KBASE_DEBUG_ASSERT(0 != ext_resource_size);
275         ext_res_copy = kmalloc(ext_resource_size, GFP_KERNEL);
276
277         if (!ext_res_copy)
278                 return -EINVAL;
279 #ifdef CONFIG_COMPAT
280         if (kctx->is_compat) {
281                 ext_res_user = compat_ptr(args->external_resource.compat_value);
282                 file_desc_usr = compat_ptr(args->file_descriptor.compat_value);
283         } else {
284 #endif /* CONFIG_COMPAT */
285                 ext_res_user = args->external_resource.value;
286                 file_desc_usr = args->file_descriptor.value;
287 #ifdef CONFIG_COMPAT
288         }
289 #endif /* CONFIG_COMPAT */
290
291         /* Copy the external resources to lock from user space */
292         if (copy_from_user(ext_res_copy, ext_res_user, ext_resource_size))
293                 goto out;
294
295         /* Allocate data to be stored in the file */
296         fdata = kmalloc(sizeof(*fdata), GFP_KERNEL);
297
298         if (!fdata) {
299                 ret = -ENOMEM;
300                 goto out;
301         }
302
303         /* Parse given elements and create resource and access lists */
304         ret = kbasep_kds_allocate_resource_list_data(kctx,
305                         ext_res_copy, args->num_res, &resource_list_data);
306         if (!ret) {
307                 long err;
308
309                 fdata->lock = NULL;
310
311                 fd = anon_inode_getfd("kds_ext", &kds_resource_fops, fdata, 0);
312
313                 err = copy_to_user(file_desc_usr, &fd, sizeof(fd));
314
315                 /* If the file descriptor was valid and we successfully copied
316                  * it to user space, then we can try and lock the requested
317                  * kds resources.
318                  */
319                 if ((fd >= 0) && (0 == err)) {
320                         struct kds_resource_set *lock;
321
322                         lock = kds_waitall(args->num_res,
323                                         resource_list_data.kds_access_bitmap,
324                                         resource_list_data.kds_resources,
325                                         KDS_WAIT_BLOCKING);
326
327                         if (!lock) {
328                                 ret = -EINVAL;
329                         } else if (IS_ERR(lock)) {
330                                 ret = PTR_ERR(lock);
331                         } else {
332                                 ret = 0;
333                                 fdata->lock = lock;
334                         }
335                 } else {
336                         ret = -EINVAL;
337                 }
338
339                 kfree(resource_list_data.kds_resources);
340                 kfree(resource_list_data.kds_access_bitmap);
341         }
342
343         if (ret) {
344                 /* If the file was opened successfully then close it which will
345                  * clean up the file data, otherwise we clean up the file data
346                  * ourself.
347                  */
348                 if (fd >= 0)
349                         sys_close(fd);
350                 else
351                         kfree(fdata);
352         }
353 out:
354         kfree(ext_res_copy);
355
356         return ret;
357 }
358 #endif /* CONFIG_KDS */
359
360 #ifdef CONFIG_MALI_MIPE_ENABLED
361 static void kbase_create_timeline_objects(struct kbase_context *kctx)
362 {
363         struct kbase_device             *kbdev = kctx->kbdev;
364         unsigned int                    lpu_id;
365         unsigned int                    as_nr;
366         struct kbasep_kctx_list_element *element;
367
368         /* Create LPU objects. */
369         for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
370                 u32 *lpu =
371                         &kbdev->gpu_props.props.raw_props.js_features[lpu_id];
372                 kbase_tlstream_tl_summary_new_lpu(lpu, lpu_id, *lpu);
373         }
374
375         /* Create Address Space objects. */
376         for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
377                 kbase_tlstream_tl_summary_new_as(&kbdev->as[as_nr], as_nr);
378
379         /* Create GPU object and make it retain all LPUs and address spaces. */
380         kbase_tlstream_tl_summary_new_gpu(
381                         kbdev,
382                         kbdev->gpu_props.props.raw_props.gpu_id,
383                         kbdev->gpu_props.num_cores);
384
385         for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
386                 void *lpu =
387                         &kbdev->gpu_props.props.raw_props.js_features[lpu_id];
388                 kbase_tlstream_tl_summary_lifelink_lpu_gpu(lpu, kbdev);
389         }
390         for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
391                 kbase_tlstream_tl_summary_lifelink_as_gpu(
392                                 &kbdev->as[as_nr],
393                                 kbdev);
394
395         /* Create object for each known context. */
396         mutex_lock(&kbdev->kctx_list_lock);
397         list_for_each_entry(element, &kbdev->kctx_list, link) {
398                 kbase_tlstream_tl_summary_new_ctx(
399                                 element->kctx,
400                                 (u32)(element->kctx->id));
401         }
402         /* Before releasing the lock, reset body stream buffers.
403          * This will prevent context creation message to be directed to both
404          * summary and body stream. */
405         kbase_tlstream_reset_body_streams();
406         mutex_unlock(&kbdev->kctx_list_lock);
407         /* Static object are placed into summary packet that needs to be
408          * transmitted first. Flush all streams to make it available to
409          * user space. */
410         kbase_tlstream_flush_streams();
411 }
412 #endif
413
414 static void kbase_api_handshake(struct uku_version_check_args *version)
415 {
416         switch (version->major) {
417 #ifdef BASE_LEGACY_UK6_SUPPORT
418         case 6:
419                 /* We are backwards compatible with version 6,
420                  * so pretend to be the old version */
421                 version->major = 6;
422                 version->minor = 1;
423                 break;
424 #endif /* BASE_LEGACY_UK6_SUPPORT */
425 #ifdef BASE_LEGACY_UK7_SUPPORT
426         case 7:
427                 /* We are backwards compatible with version 7,
428                  * so pretend to be the old version */
429                 version->major = 7;
430                 version->minor = 1;
431                 break;
432 #endif /* BASE_LEGACY_UK7_SUPPORT */
433 #ifdef BASE_LEGACY_UK8_SUPPORT
434         case 8:
435                 /* We are backwards compatible with version 8,
436                  * so pretend to be the old version */
437                 version->major = 8;
438                 version->minor = 4;
439                 break;
440 #endif /* BASE_LEGACY_UK8_SUPPORT */
441 #ifdef BASE_LEGACY_UK9_SUPPORT
442         case 9:
443                 /* We are backwards compatible with version 9,
444                  * so pretend to be the old version */
445                 version->major = 9;
446                 version->minor = 0;
447                 break;
448 #endif /* BASE_LEGACY_UK8_SUPPORT */
449         case BASE_UK_VERSION_MAJOR:
450                 /* set minor to be the lowest common */
451                 version->minor = min_t(int, BASE_UK_VERSION_MINOR,
452                                 (int)version->minor);
453                 break;
454         default:
455                 /* We return our actual version regardless if it
456                  * matches the version returned by userspace -
457                  * userspace can bail if it can't handle this
458                  * version */
459                 version->major = BASE_UK_VERSION_MAJOR;
460                 version->minor = BASE_UK_VERSION_MINOR;
461                 break;
462         }
463 }
464
465 /**
466  * enum mali_error - Mali error codes shared with userspace
467  *
468  * This is subset of those common Mali errors that can be returned to userspace.
469  * Values of matching user and kernel space enumerators MUST be the same.
470  * MALI_ERROR_NONE is guaranteed to be 0.
471  */
472 enum mali_error {
473         MALI_ERROR_NONE = 0,
474         MALI_ERROR_OUT_OF_GPU_MEMORY,
475         MALI_ERROR_OUT_OF_MEMORY,
476         MALI_ERROR_FUNCTION_FAILED,
477 };
478
479 #ifdef CONFIG_MALI_DEBUG
480 #define INACTIVE_WAIT_MS (5000)
481
482 void kbase_set_driver_inactive(struct kbase_device *kbdev, bool inactive)
483 {
484         kbdev->driver_inactive = inactive;
485         wake_up(&kbdev->driver_inactive_wait);
486
487         /* Wait for any running IOCTLs to complete */
488         if (inactive)
489                 msleep(INACTIVE_WAIT_MS);
490 }
491 KBASE_EXPORT_TEST_API(kbase_set_driver_inactive);
492 #endif /* CONFIG_MALI_DEBUG */
493
494 static int kbase_dispatch(struct kbase_context *kctx, void * const args, u32 args_size)
495 {
496         struct kbase_device *kbdev;
497         union uk_header *ukh = args;
498         u32 id;
499         int ret = 0;
500
501         KBASE_DEBUG_ASSERT(ukh != NULL);
502
503         kbdev = kctx->kbdev;
504         id = ukh->id;
505         ukh->ret = MALI_ERROR_NONE; /* Be optimistic */
506
507 #ifdef CONFIG_MALI_DEBUG
508         wait_event(kbdev->driver_inactive_wait,
509                         kbdev->driver_inactive == false);
510 #endif /* CONFIG_MALI_DEBUG */
511
512         if (UKP_FUNC_ID_CHECK_VERSION == id) {
513                 struct uku_version_check_args *version_check;
514
515                 if (args_size != sizeof(struct uku_version_check_args)) {
516                         ukh->ret = MALI_ERROR_FUNCTION_FAILED;
517                         return 0;
518                 }
519                 version_check = (struct uku_version_check_args *)args;
520                 kbase_api_handshake(version_check);
521                 /* save the proposed version number for later use */
522                 kctx->api_version = KBASE_API_VERSION(version_check->major,
523                                 version_check->minor);
524                 ukh->ret = MALI_ERROR_NONE;
525                 return 0;
526         }
527
528         /* block calls until version handshake */
529         if (kctx->api_version == 0)
530                 return -EINVAL;
531
532         if (!atomic_read(&kctx->setup_complete)) {
533                 struct kbase_uk_set_flags *kbase_set_flags;
534
535                 /* setup pending, try to signal that we'll do the setup,
536                  * if setup was already in progress, err this call
537                  */
538                 if (atomic_cmpxchg(&kctx->setup_in_progress, 0, 1) != 0)
539                         return -EINVAL;
540
541                 /* if unexpected call, will stay stuck in setup mode
542                  * (is it the only call we accept?)
543                  */
544                 if (id != KBASE_FUNC_SET_FLAGS)
545                         return -EINVAL;
546
547                 kbase_set_flags = (struct kbase_uk_set_flags *)args;
548
549                 /* if not matching the expected call, stay in setup mode */
550                 if (sizeof(*kbase_set_flags) != args_size)
551                         goto bad_size;
552
553                 /* if bad flags, will stay stuck in setup mode */
554                 if (kbase_context_set_create_flags(kctx,
555                                 kbase_set_flags->create_flags) != 0)
556                         ukh->ret = MALI_ERROR_FUNCTION_FAILED;
557
558                 atomic_set(&kctx->setup_complete, 1);
559                 return 0;
560         }
561
562         /* setup complete, perform normal operation */
563         switch (id) {
564         case KBASE_FUNC_MEM_ALLOC:
565                 {
566                         struct kbase_uk_mem_alloc *mem = args;
567                         struct kbase_va_region *reg;
568
569                         if (sizeof(*mem) != args_size)
570                                 goto bad_size;
571
572                         reg = kbase_mem_alloc(kctx, mem->va_pages,
573                                         mem->commit_pages, mem->extent,
574                                         &mem->flags, &mem->gpu_va,
575                                         &mem->va_alignment);
576                         if (!reg)
577                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
578                         break;
579                 }
580         case KBASE_FUNC_MEM_IMPORT: {
581                         struct kbase_uk_mem_import *mem_import = args;
582                         void __user *phandle;
583
584                         if (sizeof(*mem_import) != args_size)
585                                 goto bad_size;
586 #ifdef CONFIG_COMPAT
587                         if (kctx->is_compat)
588                                 phandle = compat_ptr(mem_import->phandle.compat_value);
589                         else
590 #endif
591                                 phandle = mem_import->phandle.value;
592
593                         if (mem_import->type == BASE_MEM_IMPORT_TYPE_INVALID) {
594                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
595                                 break;
596                         }
597
598                         if (kbase_mem_import(kctx, mem_import->type, phandle,
599                                                 &mem_import->gpu_va,
600                                                 &mem_import->va_pages,
601                                                 &mem_import->flags)) {
602                                 mem_import->type = BASE_MEM_IMPORT_TYPE_INVALID;
603                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
604                         }
605                         break;
606         }
607         case KBASE_FUNC_MEM_ALIAS: {
608                         struct kbase_uk_mem_alias *alias = args;
609                         struct base_mem_aliasing_info __user *user_ai;
610                         struct base_mem_aliasing_info *ai;
611
612                         if (sizeof(*alias) != args_size)
613                                 goto bad_size;
614
615                         if (alias->nents > 2048) {
616                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
617                                 break;
618                         }
619                         if (!alias->nents) {
620                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
621                                 break;
622                         }
623
624 #ifdef CONFIG_COMPAT
625                         if (kctx->is_compat)
626                                 user_ai = compat_ptr(alias->ai.compat_value);
627                         else
628 #endif
629                                 user_ai = alias->ai.value;
630
631                         ai = vmalloc(sizeof(*ai) * alias->nents);
632
633                         if (!ai) {
634                                 ukh->ret = MALI_ERROR_OUT_OF_MEMORY;
635                                 break;
636                         }
637
638                         if (copy_from_user(ai, user_ai,
639                                            sizeof(*ai) * alias->nents)) {
640                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
641                                 goto copy_failed;
642                         }
643
644                         alias->gpu_va = kbase_mem_alias(kctx, &alias->flags,
645                                                         alias->stride,
646                                                         alias->nents, ai,
647                                                         &alias->va_pages);
648                         if (!alias->gpu_va) {
649                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
650                                 goto no_alias;
651                         }
652 no_alias:
653 copy_failed:
654                         vfree(ai);
655                         break;
656                 }
657         case KBASE_FUNC_MEM_COMMIT:
658                 {
659                         struct kbase_uk_mem_commit *commit = args;
660
661                         if (sizeof(*commit) != args_size)
662                                 goto bad_size;
663
664                         if (commit->gpu_addr & ~PAGE_MASK) {
665                                 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_COMMIT: commit->gpu_addr: passed parameter is invalid");
666                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
667                                 break;
668                         }
669
670                         if (kbase_mem_commit(kctx, commit->gpu_addr,
671                                         commit->pages,
672                                         (base_backing_threshold_status *)
673                                         &commit->result_subcode) != 0)
674                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
675
676                         break;
677                 }
678
679         case KBASE_FUNC_MEM_QUERY:
680                 {
681                         struct kbase_uk_mem_query *query = args;
682
683                         if (sizeof(*query) != args_size)
684                                 goto bad_size;
685
686                         if (query->gpu_addr & ~PAGE_MASK) {
687                                 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_QUERY: query->gpu_addr: passed parameter is invalid");
688                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
689                                 break;
690                         }
691                         if (query->query != KBASE_MEM_QUERY_COMMIT_SIZE &&
692                             query->query != KBASE_MEM_QUERY_VA_SIZE &&
693                                 query->query != KBASE_MEM_QUERY_FLAGS) {
694                                 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_QUERY: query->query = %lld unknown", (unsigned long long)query->query);
695                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
696                                 break;
697                         }
698
699                         if (kbase_mem_query(kctx, query->gpu_addr,
700                                         query->query, &query->value) != 0)
701                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
702                         else
703                                 ukh->ret = MALI_ERROR_NONE;
704                         break;
705                 }
706                 break;
707
708         case KBASE_FUNC_MEM_FLAGS_CHANGE:
709                 {
710                         struct kbase_uk_mem_flags_change *fc = args;
711
712                         if (sizeof(*fc) != args_size)
713                                 goto bad_size;
714
715                         if ((fc->gpu_va & ~PAGE_MASK) && (fc->gpu_va >= PAGE_SIZE)) {
716                                 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_FLAGS_CHANGE: mem->gpu_va: passed parameter is invalid");
717                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
718                                 break;
719                         }
720
721                         if (kbase_mem_flags_change(kctx, fc->gpu_va,
722                                         fc->flags, fc->mask) != 0)
723                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
724
725                         break;
726                 }
727         case KBASE_FUNC_MEM_FREE:
728                 {
729                         struct kbase_uk_mem_free *mem = args;
730
731                         if (sizeof(*mem) != args_size)
732                                 goto bad_size;
733
734                         if ((mem->gpu_addr & ~PAGE_MASK) && (mem->gpu_addr >= PAGE_SIZE)) {
735                                 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_FREE: mem->gpu_addr: passed parameter is invalid");
736                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
737                                 break;
738                         }
739
740                         if (kbase_mem_free(kctx, mem->gpu_addr) != 0)
741                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
742                         break;
743                 }
744
745         case KBASE_FUNC_JOB_SUBMIT:
746                 {
747                         struct kbase_uk_job_submit *job = args;
748
749                         if (sizeof(*job) != args_size)
750                                 goto bad_size;
751
752 #ifdef BASE_LEGACY_UK6_SUPPORT
753                         if (kbase_jd_submit(kctx, job, 0) != 0)
754 #else
755                         if (kbase_jd_submit(kctx, job) != 0)
756 #endif /* BASE_LEGACY_UK6_SUPPORT */
757                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
758                         break;
759                 }
760
761 #ifdef BASE_LEGACY_UK6_SUPPORT
762         case KBASE_FUNC_JOB_SUBMIT_UK6:
763                 {
764                         struct kbase_uk_job_submit *job = args;
765
766                         if (sizeof(*job) != args_size)
767                                 goto bad_size;
768
769                         if (kbase_jd_submit(kctx, job, 1) != 0)
770                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
771                         break;
772                 }
773 #endif
774
775         case KBASE_FUNC_SYNC:
776                 {
777                         struct kbase_uk_sync_now *sn = args;
778
779                         if (sizeof(*sn) != args_size)
780                                 goto bad_size;
781
782                         if (sn->sset.basep_sset.mem_handle & ~PAGE_MASK) {
783                                 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_SYNC: sn->sset.basep_sset.mem_handle: passed parameter is invalid");
784                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
785                                 break;
786                         }
787
788 #ifndef CONFIG_MALI_COH_USER
789                         if (kbase_sync_now(kctx, &sn->sset) != 0)
790                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
791 #endif
792                         break;
793                 }
794
795         case KBASE_FUNC_DISJOINT_QUERY:
796                 {
797                         struct kbase_uk_disjoint_query *dquery = args;
798
799                         if (sizeof(*dquery) != args_size)
800                                 goto bad_size;
801
802                         /* Get the disjointness counter value. */
803                         dquery->counter = kbase_disjoint_event_get(kctx->kbdev);
804                         break;
805                 }
806
807         case KBASE_FUNC_POST_TERM:
808                 {
809                         kbase_event_close(kctx);
810                         break;
811                 }
812
813         case KBASE_FUNC_HWCNT_SETUP:
814                 {
815                         struct kbase_uk_hwcnt_setup *setup = args;
816
817                         if (sizeof(*setup) != args_size)
818                                 goto bad_size;
819
820                         mutex_lock(&kctx->vinstr_cli_lock);
821                         if (kbase_vinstr_legacy_hwc_setup(kbdev->vinstr_ctx,
822                                         &kctx->vinstr_cli, setup) != 0)
823                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
824                         mutex_unlock(&kctx->vinstr_cli_lock);
825                         break;
826                 }
827
828         case KBASE_FUNC_HWCNT_DUMP:
829                 {
830                         /* args ignored */
831                         mutex_lock(&kctx->vinstr_cli_lock);
832                         if (kbase_vinstr_hwc_dump(kctx->vinstr_cli,
833                                         BASE_HWCNT_READER_EVENT_MANUAL) != 0)
834                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
835                         mutex_unlock(&kctx->vinstr_cli_lock);
836                         break;
837                 }
838
839         case KBASE_FUNC_HWCNT_CLEAR:
840                 {
841                         /* args ignored */
842                         mutex_lock(&kctx->vinstr_cli_lock);
843                         if (kbase_vinstr_hwc_clear(kctx->vinstr_cli) != 0)
844                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
845                         mutex_unlock(&kctx->vinstr_cli_lock);
846                         break;
847                 }
848
849         case KBASE_FUNC_HWCNT_READER_SETUP:
850                 {
851                         struct kbase_uk_hwcnt_reader_setup *setup = args;
852
853                         if (sizeof(*setup) != args_size)
854                                 goto bad_size;
855
856                         mutex_lock(&kctx->vinstr_cli_lock);
857                         if (kbase_vinstr_hwcnt_reader_setup(kbdev->vinstr_ctx,
858                                         setup) != 0)
859                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
860                         mutex_unlock(&kctx->vinstr_cli_lock);
861                         break;
862                 }
863
864         case KBASE_FUNC_GPU_PROPS_REG_DUMP:
865                 {
866                         struct kbase_uk_gpuprops *setup = args;
867
868                         if (sizeof(*setup) != args_size)
869                                 goto bad_size;
870
871                         if (kbase_gpuprops_uk_get_props(kctx, setup) != 0)
872                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
873                         break;
874                 }
875         case KBASE_FUNC_FIND_CPU_OFFSET:
876                 {
877                         struct kbase_uk_find_cpu_offset *find = args;
878
879                         if (sizeof(*find) != args_size)
880                                 goto bad_size;
881
882                         if (find->gpu_addr & ~PAGE_MASK) {
883                                 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_FIND_CPU_OFFSET: find->gpu_addr: passed parameter is invalid");
884                                 goto out_bad;
885                         }
886
887                         if (find->size > SIZE_MAX || find->cpu_addr > ULONG_MAX) {
888                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
889                         } else {
890                                 int err;
891
892                                 err = kbasep_find_enclosing_cpu_mapping_offset(
893                                                 kctx,
894                                                 find->gpu_addr,
895                                                 (uintptr_t) find->cpu_addr,
896                                                 (size_t) find->size,
897                                                 &find->offset);
898
899                                 if (err)
900                                         ukh->ret = MALI_ERROR_FUNCTION_FAILED;
901                         }
902                         break;
903                 }
904         case KBASE_FUNC_GET_VERSION:
905                 {
906                         struct kbase_uk_get_ddk_version *get_version = (struct kbase_uk_get_ddk_version *)args;
907
908                         if (sizeof(*get_version) != args_size)
909                                 goto bad_size;
910
911                         /* version buffer size check is made in compile time assert */
912                         memcpy(get_version->version_buffer, KERNEL_SIDE_DDK_VERSION_STRING, sizeof(KERNEL_SIDE_DDK_VERSION_STRING));
913                         get_version->version_string_size = sizeof(KERNEL_SIDE_DDK_VERSION_STRING);
914                         get_version->rk_version = ROCKCHIP_VERSION;
915                         break;
916                 }
917
918         case KBASE_FUNC_STREAM_CREATE:
919                 {
920 #ifdef CONFIG_SYNC
921                         struct kbase_uk_stream_create *screate = (struct kbase_uk_stream_create *)args;
922
923                         if (sizeof(*screate) != args_size)
924                                 goto bad_size;
925
926                         if (strnlen(screate->name, sizeof(screate->name)) >= sizeof(screate->name)) {
927                                 /* not NULL terminated */
928                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
929                                 break;
930                         }
931
932                         if (kbase_stream_create(screate->name, &screate->fd) != 0)
933                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
934                         else
935                                 ukh->ret = MALI_ERROR_NONE;
936 #else /* CONFIG_SYNC */
937                         ukh->ret = MALI_ERROR_FUNCTION_FAILED;
938 #endif /* CONFIG_SYNC */
939                         break;
940                 }
941         case KBASE_FUNC_FENCE_VALIDATE:
942                 {
943 #ifdef CONFIG_SYNC
944                         struct kbase_uk_fence_validate *fence_validate = (struct kbase_uk_fence_validate *)args;
945
946                         if (sizeof(*fence_validate) != args_size)
947                                 goto bad_size;
948
949                         if (kbase_fence_validate(fence_validate->fd) != 0)
950                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
951                         else
952                                 ukh->ret = MALI_ERROR_NONE;
953 #endif /* CONFIG_SYNC */
954                         break;
955                 }
956
957         case KBASE_FUNC_EXT_BUFFER_LOCK:
958                 {
959 #ifdef CONFIG_KDS
960                         ret = kbase_external_buffer_lock(kctx,
961                                 (struct kbase_uk_ext_buff_kds_data *)args,
962                                 args_size);
963                         switch (ret) {
964                         case 0:
965                                 ukh->ret = MALI_ERROR_NONE;
966                                 break;
967                         case -ENOMEM:
968                                 ukh->ret = MALI_ERROR_OUT_OF_MEMORY;
969                                 break;
970                         default:
971                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
972                         }
973 #endif /* CONFIG_KDS */
974                         break;
975                 }
976
977         case KBASE_FUNC_SET_TEST_DATA:
978                 {
979 #if MALI_UNIT_TEST
980                         struct kbase_uk_set_test_data *set_data = args;
981
982                         shared_kernel_test_data = set_data->test_data;
983                         shared_kernel_test_data.kctx.value = (void __user *)kctx;
984                         shared_kernel_test_data.mm.value = (void __user *)current->mm;
985                         ukh->ret = MALI_ERROR_NONE;
986 #endif /* MALI_UNIT_TEST */
987                         break;
988                 }
989
990         case KBASE_FUNC_INJECT_ERROR:
991                 {
992 #ifdef CONFIG_MALI_ERROR_INJECT
993                         unsigned long flags;
994                         struct kbase_error_params params = ((struct kbase_uk_error_params *)args)->params;
995
996                         /*mutex lock */
997                         spin_lock_irqsave(&kbdev->reg_op_lock, flags);
998                         if (job_atom_inject_error(&params) != 0)
999                                 ukh->ret = MALI_ERROR_OUT_OF_MEMORY;
1000                         else
1001                                 ukh->ret = MALI_ERROR_NONE;
1002                         spin_unlock_irqrestore(&kbdev->reg_op_lock, flags);
1003                         /*mutex unlock */
1004 #endif /* CONFIG_MALI_ERROR_INJECT */
1005                         break;
1006                 }
1007
1008         case KBASE_FUNC_MODEL_CONTROL:
1009                 {
1010 #ifdef CONFIG_MALI_NO_MALI
1011                         unsigned long flags;
1012                         struct kbase_model_control_params params =
1013                                         ((struct kbase_uk_model_control_params *)args)->params;
1014
1015                         /*mutex lock */
1016                         spin_lock_irqsave(&kbdev->reg_op_lock, flags);
1017                         if (gpu_model_control(kbdev->model, &params) != 0)
1018                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
1019                         else
1020                                 ukh->ret = MALI_ERROR_NONE;
1021                         spin_unlock_irqrestore(&kbdev->reg_op_lock, flags);
1022                         /*mutex unlock */
1023 #endif /* CONFIG_MALI_NO_MALI */
1024                         break;
1025                 }
1026
1027 #ifdef BASE_LEGACY_UK8_SUPPORT
1028         case KBASE_FUNC_KEEP_GPU_POWERED:
1029                 {
1030                         dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_KEEP_GPU_POWERED: function is deprecated and disabled\n");
1031                         ukh->ret = MALI_ERROR_FUNCTION_FAILED;
1032                         break;
1033                 }
1034 #endif /* BASE_LEGACY_UK8_SUPPORT */
1035
1036         case KBASE_FUNC_GET_PROFILING_CONTROLS:
1037                 {
1038                         struct kbase_uk_profiling_controls *controls =
1039                                         (struct kbase_uk_profiling_controls *)args;
1040                         u32 i;
1041
1042                         if (sizeof(*controls) != args_size)
1043                                 goto bad_size;
1044
1045                         for (i = FBDUMP_CONTROL_MIN; i < FBDUMP_CONTROL_MAX; i++)
1046                                 controls->profiling_controls[i] = kbase_get_profiling_control(kbdev, i);
1047
1048                         break;
1049                 }
1050
1051         /* used only for testing purposes; these controls are to be set by gator through gator API */
1052         case KBASE_FUNC_SET_PROFILING_CONTROLS:
1053                 {
1054                         struct kbase_uk_profiling_controls *controls =
1055                                         (struct kbase_uk_profiling_controls *)args;
1056                         u32 i;
1057
1058                         if (sizeof(*controls) != args_size)
1059                                 goto bad_size;
1060
1061                         for (i = FBDUMP_CONTROL_MIN; i < FBDUMP_CONTROL_MAX; i++)
1062                                 _mali_profiling_control(i, controls->profiling_controls[i]);
1063
1064                         break;
1065                 }
1066
1067         case KBASE_FUNC_DEBUGFS_MEM_PROFILE_ADD:
1068                 {
1069                         struct kbase_uk_debugfs_mem_profile_add *add_data =
1070                                         (struct kbase_uk_debugfs_mem_profile_add *)args;
1071                         char *buf;
1072                         char __user *user_buf;
1073
1074                         if (sizeof(*add_data) != args_size)
1075                                 goto bad_size;
1076
1077                         if (add_data->len > KBASE_MEM_PROFILE_MAX_BUF_SIZE) {
1078                                 dev_err(kbdev->dev, "buffer too big");
1079                                 goto out_bad;
1080                         }
1081
1082 #ifdef CONFIG_COMPAT
1083                         if (kctx->is_compat)
1084                                 user_buf = compat_ptr(add_data->buf.compat_value);
1085                         else
1086 #endif
1087                                 user_buf = add_data->buf.value;
1088
1089                         buf = kmalloc(add_data->len, GFP_KERNEL);
1090                         if (!buf)
1091                                 goto out_bad;
1092
1093                         if (0 != copy_from_user(buf, user_buf, add_data->len)) {
1094                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
1095                                 kfree(buf);
1096                                 goto out_bad;
1097                         }
1098
1099                         if (kbasep_mem_profile_debugfs_insert(kctx, buf,
1100                                                         add_data->len)) {
1101                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
1102                                 kfree(buf);
1103                                 goto out_bad;
1104                         }
1105
1106                         break;
1107                 }
1108
1109 #ifdef CONFIG_MALI_NO_MALI
1110         case KBASE_FUNC_SET_PRFCNT_VALUES:
1111                 {
1112
1113                         struct kbase_uk_prfcnt_values *params =
1114                           ((struct kbase_uk_prfcnt_values *)args);
1115                         gpu_model_set_dummy_prfcnt_sample(params->data,
1116                                         params->size);
1117
1118                         break;
1119                 }
1120 #endif /* CONFIG_MALI_NO_MALI */
1121
1122 #ifdef CONFIG_MALI_MIPE_ENABLED
1123         case KBASE_FUNC_TLSTREAM_ACQUIRE:
1124                 {
1125                         struct kbase_uk_tlstream_acquire *tlstream_acquire =
1126                                 args;
1127
1128                         if (sizeof(*tlstream_acquire) != args_size)
1129                                 goto bad_size;
1130
1131                         if (0 != kbase_tlstream_acquire(
1132                                                 kctx,
1133                                                 &tlstream_acquire->fd)) {
1134                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
1135                         } else if (0 <= tlstream_acquire->fd) {
1136                                 /* Summary stream was cleared during acquire.
1137                                  * Create static timeline objects that will be
1138                                  * read by client. */
1139                                 kbase_create_timeline_objects(kctx);
1140                         }
1141                         break;
1142                 }
1143         case KBASE_FUNC_TLSTREAM_FLUSH:
1144                 {
1145                         struct kbase_uk_tlstream_flush *tlstream_flush =
1146                                 args;
1147
1148                         if (sizeof(*tlstream_flush) != args_size)
1149                                 goto bad_size;
1150
1151                         kbase_tlstream_flush_streams();
1152                         break;
1153                 }
1154 #if MALI_UNIT_TEST
1155         case KBASE_FUNC_TLSTREAM_TEST:
1156                 {
1157                         struct kbase_uk_tlstream_test *tlstream_test = args;
1158
1159                         if (sizeof(*tlstream_test) != args_size)
1160                                 goto bad_size;
1161
1162                         kbase_tlstream_test(
1163                                         tlstream_test->tpw_count,
1164                                         tlstream_test->msg_delay,
1165                                         tlstream_test->msg_count,
1166                                         tlstream_test->aux_msg);
1167                         break;
1168                 }
1169         case KBASE_FUNC_TLSTREAM_STATS:
1170                 {
1171                         struct kbase_uk_tlstream_stats *tlstream_stats = args;
1172
1173                         if (sizeof(*tlstream_stats) != args_size)
1174                                 goto bad_size;
1175
1176                         kbase_tlstream_stats(
1177                                         &tlstream_stats->bytes_collected,
1178                                         &tlstream_stats->bytes_generated);
1179                         break;
1180                 }
1181 #endif /* MALI_UNIT_TEST */
1182 #endif /* CONFIG_MALI_MIPE_ENABLED */
1183
1184         case KBASE_FUNC_GET_CONTEXT_ID:
1185                 {
1186                         struct kbase_uk_context_id *info = args;
1187
1188                         info->id = kctx->id;
1189                         break;
1190                 }
1191
1192         default:
1193                 dev_err(kbdev->dev, "unknown ioctl %u", id);
1194                 goto out_bad;
1195         }
1196
1197         return ret;
1198
1199  bad_size:
1200         dev_err(kbdev->dev, "Wrong syscall size (%d) for %08x\n", args_size, id);
1201  out_bad:
1202         return -EINVAL;
1203 }
1204
1205 static struct kbase_device *to_kbase_device(struct device *dev)
1206 {
1207         return dev_get_drvdata(dev);
1208 }
1209
1210 /*
1211  * API to acquire device list mutex and
1212  * return pointer to the device list head
1213  */
1214 const struct list_head *kbase_dev_list_get(void)
1215 {
1216         mutex_lock(&kbase_dev_list_lock);
1217         return &kbase_dev_list;
1218 }
1219 KBASE_EXPORT_TEST_API(kbase_dev_list_get);
1220
1221 /* API to release the device list mutex */
1222 void kbase_dev_list_put(const struct list_head *dev_list)
1223 {
1224         mutex_unlock(&kbase_dev_list_lock);
1225 }
1226 KBASE_EXPORT_TEST_API(kbase_dev_list_put);
1227
1228 /* Find a particular kbase device (as specified by minor number), or find the "first" device if -1 is specified */
1229 struct kbase_device *kbase_find_device(int minor)
1230 {
1231         struct kbase_device *kbdev = NULL;
1232         struct list_head *entry;
1233         const struct list_head *dev_list = kbase_dev_list_get();
1234
1235         list_for_each(entry, dev_list) {
1236                 struct kbase_device *tmp;
1237
1238                 tmp = list_entry(entry, struct kbase_device, entry);
1239                 if (tmp->mdev.minor == minor || minor == -1) {
1240                         kbdev = tmp;
1241                         get_device(kbdev->dev);
1242                         break;
1243                 }
1244         }
1245         kbase_dev_list_put(dev_list);
1246
1247         return kbdev;
1248 }
1249 EXPORT_SYMBOL(kbase_find_device);
1250
1251 void kbase_release_device(struct kbase_device *kbdev)
1252 {
1253         put_device(kbdev->dev);
1254 }
1255 EXPORT_SYMBOL(kbase_release_device);
1256
1257 static int kbase_open(struct inode *inode, struct file *filp)
1258 {
1259         struct kbase_device *kbdev = NULL;
1260         struct kbase_context *kctx;
1261         int ret = 0;
1262 #ifdef CONFIG_DEBUG_FS
1263         char kctx_name[64];
1264 #endif
1265
1266         kbdev = kbase_find_device(iminor(inode));
1267
1268         if (!kbdev)
1269                 return -ENODEV;
1270
1271         kctx = kbase_create_context(kbdev, is_compat_task());
1272         if (!kctx) {
1273                 ret = -ENOMEM;
1274                 goto out;
1275         }
1276
1277         init_waitqueue_head(&kctx->event_queue);
1278         filp->private_data = kctx;
1279         kctx->filp = filp;
1280
1281         kctx->infinite_cache_active = kbdev->infinite_cache_active_default;
1282
1283 #ifdef CONFIG_DEBUG_FS
1284         snprintf(kctx_name, 64, "%d_%d", kctx->tgid, kctx->id);
1285
1286         kctx->kctx_dentry = debugfs_create_dir(kctx_name,
1287                         kbdev->debugfs_ctx_directory);
1288
1289         if (IS_ERR_OR_NULL(kctx->kctx_dentry)) {
1290                 ret = -ENOMEM;
1291                 goto out;
1292         }
1293
1294 #ifdef CONFIG_MALI_COH_USER
1295          /* if cache is completely coherent at hardware level, then remove the
1296           * infinite cache control support from debugfs.
1297           */
1298 #else
1299         debugfs_create_bool("infinite_cache", 0644, kctx->kctx_dentry,
1300                         (bool*)&(kctx->infinite_cache_active));
1301 #endif /* CONFIG_MALI_COH_USER */
1302
1303         mutex_init(&kctx->mem_profile_lock);
1304
1305         kbasep_jd_debugfs_ctx_add(kctx);
1306         kbase_debug_mem_view_init(filp);
1307
1308         kbase_debug_job_fault_context_init(kctx);
1309
1310         kbase_mem_pool_debugfs_add(kctx->kctx_dentry, &kctx->mem_pool);
1311
1312 #endif /* CONFIG_DEBUGFS */
1313
1314         dev_dbg(kbdev->dev, "created base context\n");
1315
1316         {
1317                 struct kbasep_kctx_list_element *element;
1318
1319                 element = kzalloc(sizeof(*element), GFP_KERNEL);
1320                 if (element) {
1321                         mutex_lock(&kbdev->kctx_list_lock);
1322                         element->kctx = kctx;
1323                         list_add(&element->link, &kbdev->kctx_list);
1324 #ifdef CONFIG_MALI_MIPE_ENABLED
1325                         kbase_tlstream_tl_new_ctx(
1326                                         element->kctx,
1327                                         (u32)(element->kctx->id));
1328 #endif
1329                         mutex_unlock(&kbdev->kctx_list_lock);
1330                 } else {
1331                         /* we don't treat this as a fail - just warn about it */
1332                         dev_warn(kbdev->dev, "couldn't add kctx to kctx_list\n");
1333                 }
1334         }
1335         return 0;
1336
1337  out:
1338         kbase_release_device(kbdev);
1339         return ret;
1340 }
1341
1342 static int kbase_release(struct inode *inode, struct file *filp)
1343 {
1344         struct kbase_context *kctx = filp->private_data;
1345         struct kbase_device *kbdev = kctx->kbdev;
1346         struct kbasep_kctx_list_element *element, *tmp;
1347         bool found_element = false;
1348
1349 #ifdef CONFIG_MALI_MIPE_ENABLED
1350         kbase_tlstream_tl_del_ctx(kctx);
1351 #endif
1352
1353 #ifdef CONFIG_DEBUG_FS
1354         debugfs_remove_recursive(kctx->kctx_dentry);
1355         kbasep_mem_profile_debugfs_remove(kctx);
1356         kbase_debug_job_fault_context_term(kctx);
1357 #endif
1358
1359         mutex_lock(&kbdev->kctx_list_lock);
1360         list_for_each_entry_safe(element, tmp, &kbdev->kctx_list, link) {
1361                 if (element->kctx == kctx) {
1362                         list_del(&element->link);
1363                         kfree(element);
1364                         found_element = true;
1365                 }
1366         }
1367         mutex_unlock(&kbdev->kctx_list_lock);
1368         if (!found_element)
1369                 dev_warn(kbdev->dev, "kctx not in kctx_list\n");
1370
1371         filp->private_data = NULL;
1372
1373         mutex_lock(&kctx->vinstr_cli_lock);
1374         /* If this client was performing hwcnt dumping and did not explicitly
1375          * detach itself, remove it from the vinstr core now */
1376         if (kctx->vinstr_cli) {
1377                 struct kbase_uk_hwcnt_setup setup;
1378
1379                 setup.dump_buffer = 0llu;
1380                 kbase_vinstr_legacy_hwc_setup(
1381                                 kbdev->vinstr_ctx, &kctx->vinstr_cli, &setup);
1382         }
1383         mutex_unlock(&kctx->vinstr_cli_lock);
1384
1385         kbase_destroy_context(kctx);
1386
1387         dev_dbg(kbdev->dev, "deleted base context\n");
1388         kbase_release_device(kbdev);
1389         return 0;
1390 }
1391
1392 #define CALL_MAX_SIZE 536
1393
1394 static long kbase_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1395 {
1396         u64 msg[(CALL_MAX_SIZE + 7) >> 3] = { 0xdeadbeefdeadbeefull };  /* alignment fixup */
1397         u32 size = _IOC_SIZE(cmd);
1398         struct kbase_context *kctx = filp->private_data;
1399
1400         if (size > CALL_MAX_SIZE)
1401                 return -ENOTTY;
1402
1403         if (0 != copy_from_user(&msg, (void __user *)arg, size)) {
1404                 dev_err(kctx->kbdev->dev, "failed to copy ioctl argument into kernel space\n");
1405                 return -EFAULT;
1406         }
1407
1408         if (kbase_dispatch(kctx, &msg, size) != 0)
1409                 return -EFAULT;
1410
1411         if (0 != copy_to_user((void __user *)arg, &msg, size)) {
1412                 dev_err(kctx->kbdev->dev, "failed to copy results of UK call back to user space\n");
1413                 return -EFAULT;
1414         }
1415         return 0;
1416 }
1417
1418 static ssize_t kbase_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
1419 {
1420         struct kbase_context *kctx = filp->private_data;
1421         struct base_jd_event_v2 uevent;
1422         int out_count = 0;
1423
1424         if (count < sizeof(uevent))
1425                 return -ENOBUFS;
1426
1427         do {
1428                 while (kbase_event_dequeue(kctx, &uevent)) {
1429                         if (out_count > 0)
1430                                 goto out;
1431
1432                         if (filp->f_flags & O_NONBLOCK)
1433                                 return -EAGAIN;
1434
1435                         if (wait_event_interruptible(kctx->event_queue,
1436                                         kbase_event_pending(kctx)) != 0)
1437                                 return -ERESTARTSYS;
1438                 }
1439                 if (uevent.event_code == BASE_JD_EVENT_DRV_TERMINATED) {
1440                         if (out_count == 0)
1441                                 return -EPIPE;
1442                         goto out;
1443                 }
1444
1445                 if (copy_to_user(buf, &uevent, sizeof(uevent)) != 0)
1446                         return -EFAULT;
1447
1448                 buf += sizeof(uevent);
1449                 out_count++;
1450                 count -= sizeof(uevent);
1451         } while (count >= sizeof(uevent));
1452
1453  out:
1454         return out_count * sizeof(uevent);
1455 }
1456
1457 static unsigned int kbase_poll(struct file *filp, poll_table *wait)
1458 {
1459         struct kbase_context *kctx = filp->private_data;
1460
1461         poll_wait(filp, &kctx->event_queue, wait);
1462         if (kbase_event_pending(kctx))
1463                 return POLLIN | POLLRDNORM;
1464
1465         return 0;
1466 }
1467
1468 void kbase_event_wakeup(struct kbase_context *kctx)
1469 {
1470         KBASE_DEBUG_ASSERT(kctx);
1471
1472         wake_up_interruptible(&kctx->event_queue);
1473 }
1474
1475 KBASE_EXPORT_TEST_API(kbase_event_wakeup);
1476
1477 static int kbase_check_flags(int flags)
1478 {
1479         /* Enforce that the driver keeps the O_CLOEXEC flag so that execve() always
1480          * closes the file descriptor in a child process.
1481          */
1482         if (0 == (flags & O_CLOEXEC))
1483                 return -EINVAL;
1484
1485         return 0;
1486 }
1487
1488 #ifdef CONFIG_64BIT
1489 /* The following function is taken from the kernel and just
1490  * renamed. As it's not exported to modules we must copy-paste it here.
1491  */
1492
1493 static unsigned long kbase_unmapped_area_topdown(struct vm_unmapped_area_info
1494                 *info)
1495 {
1496         struct mm_struct *mm = current->mm;
1497         struct vm_area_struct *vma;
1498         unsigned long length, low_limit, high_limit, gap_start, gap_end;
1499
1500         /* Adjust search length to account for worst case alignment overhead */
1501         length = info->length + info->align_mask;
1502         if (length < info->length)
1503                 return -ENOMEM;
1504
1505         /*
1506          * Adjust search limits by the desired length.
1507          * See implementation comment at top of unmapped_area().
1508          */
1509         gap_end = info->high_limit;
1510         if (gap_end < length)
1511                 return -ENOMEM;
1512         high_limit = gap_end - length;
1513
1514         if (info->low_limit > high_limit)
1515                 return -ENOMEM;
1516         low_limit = info->low_limit + length;
1517
1518         /* Check highest gap, which does not precede any rbtree node */
1519         gap_start = mm->highest_vm_end;
1520         if (gap_start <= high_limit)
1521                 goto found_highest;
1522
1523         /* Check if rbtree root looks promising */
1524         if (RB_EMPTY_ROOT(&mm->mm_rb))
1525                 return -ENOMEM;
1526         vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
1527         if (vma->rb_subtree_gap < length)
1528                 return -ENOMEM;
1529
1530         while (true) {
1531                 /* Visit right subtree if it looks promising */
1532                 gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
1533                 if (gap_start <= high_limit && vma->vm_rb.rb_right) {
1534                         struct vm_area_struct *right =
1535                                 rb_entry(vma->vm_rb.rb_right,
1536                                          struct vm_area_struct, vm_rb);
1537                         if (right->rb_subtree_gap >= length) {
1538                                 vma = right;
1539                                 continue;
1540                         }
1541                 }
1542
1543 check_current:
1544                 /* Check if current node has a suitable gap */
1545                 gap_end = vma->vm_start;
1546                 if (gap_end < low_limit)
1547                         return -ENOMEM;
1548                 if (gap_start <= high_limit && gap_end - gap_start >= length)
1549                         goto found;
1550
1551                 /* Visit left subtree if it looks promising */
1552                 if (vma->vm_rb.rb_left) {
1553                         struct vm_area_struct *left =
1554                                 rb_entry(vma->vm_rb.rb_left,
1555                                          struct vm_area_struct, vm_rb);
1556                         if (left->rb_subtree_gap >= length) {
1557                                 vma = left;
1558                                 continue;
1559                         }
1560                 }
1561
1562                 /* Go back up the rbtree to find next candidate node */
1563                 while (true) {
1564                         struct rb_node *prev = &vma->vm_rb;
1565                         if (!rb_parent(prev))
1566                                 return -ENOMEM;
1567                         vma = rb_entry(rb_parent(prev),
1568                                        struct vm_area_struct, vm_rb);
1569                         if (prev == vma->vm_rb.rb_right) {
1570                                 gap_start = vma->vm_prev ?
1571                                         vma->vm_prev->vm_end : 0;
1572                                 goto check_current;
1573                         }
1574                 }
1575         }
1576
1577 found:
1578         /* We found a suitable gap. Clip it with the original high_limit. */
1579         if (gap_end > info->high_limit)
1580                 gap_end = info->high_limit;
1581
1582 found_highest:
1583         /* Compute highest gap address at the desired alignment */
1584         gap_end -= info->length;
1585         gap_end -= (gap_end - info->align_offset) & info->align_mask;
1586
1587         VM_BUG_ON(gap_end < info->low_limit);
1588         VM_BUG_ON(gap_end < gap_start);
1589         return gap_end;
1590 }
1591
1592
1593 static unsigned long kbase_get_unmapped_area(struct file *filp,
1594                 const unsigned long addr, const unsigned long len,
1595                 const unsigned long pgoff, const unsigned long flags)
1596 {
1597         /* based on get_unmapped_area, but simplified slightly due to that some
1598          * values are known in advance */
1599         struct kbase_context *kctx = filp->private_data;
1600         struct mm_struct *mm = current->mm;
1601         struct vm_unmapped_area_info info;
1602
1603         /* err on fixed address */
1604         if ((flags & MAP_FIXED) || addr)
1605                 return -EINVAL;
1606
1607         /* too big? */
1608         if (len > TASK_SIZE - SZ_2M)
1609                 return -ENOMEM;
1610
1611         if (kctx->is_compat)
1612                 return current->mm->get_unmapped_area(filp, addr, len, pgoff,
1613                                 flags);
1614
1615         if (kbase_hw_has_feature(kctx->kbdev, BASE_HW_FEATURE_33BIT_VA)) {
1616                 info.high_limit = 1ul << 33;
1617                 info.align_mask = 0;
1618                 info.align_offset = 0;
1619         } else {
1620                 info.high_limit = mm->mmap_base;
1621                 if (len >= SZ_2M) {
1622                         info.align_offset = SZ_2M;
1623                         info.align_mask = SZ_2M - 1;
1624                 } else {
1625                         info.align_mask = 0;
1626                         info.align_offset = 0;
1627                 }
1628         }
1629
1630         info.flags = 0;
1631         info.length = len;
1632         info.low_limit = SZ_2M;
1633         return kbase_unmapped_area_topdown(&info);
1634 }
1635 #endif
1636
1637 static const struct file_operations kbase_fops = {
1638         .owner = THIS_MODULE,
1639         .open = kbase_open,
1640         .release = kbase_release,
1641         .read = kbase_read,
1642         .poll = kbase_poll,
1643         .unlocked_ioctl = kbase_ioctl,
1644         .compat_ioctl = kbase_ioctl,
1645         .mmap = kbase_mmap,
1646         .check_flags = kbase_check_flags,
1647 #ifdef CONFIG_64BIT
1648         .get_unmapped_area = kbase_get_unmapped_area,
1649 #endif
1650 };
1651
1652 #ifndef CONFIG_MALI_NO_MALI
1653 void kbase_os_reg_write(struct kbase_device *kbdev, u16 offset, u32 value)
1654 {
1655         writel(value, kbdev->reg + offset);
1656 }
1657
1658 u32 kbase_os_reg_read(struct kbase_device *kbdev, u16 offset)
1659 {
1660         return readl(kbdev->reg + offset);
1661 }
1662 #endif /* !CONFIG_MALI_NO_MALI */
1663
1664
1665 /** Show callback for the @c power_policy sysfs file.
1666  *
1667  * This function is called to get the contents of the @c power_policy sysfs
1668  * file. This is a list of the available policies with the currently active one
1669  * surrounded by square brackets.
1670  *
1671  * @param dev   The device this sysfs file is for
1672  * @param attr  The attributes of the sysfs file
1673  * @param buf   The output buffer for the sysfs file contents
1674  *
1675  * @return The number of bytes output to @c buf.
1676  */
1677 static ssize_t show_policy(struct device *dev, struct device_attribute *attr, char *const buf)
1678 {
1679         struct kbase_device *kbdev;
1680         const struct kbase_pm_policy *current_policy;
1681         const struct kbase_pm_policy *const *policy_list;
1682         int policy_count;
1683         int i;
1684         ssize_t ret = 0;
1685
1686         kbdev = to_kbase_device(dev);
1687
1688         if (!kbdev)
1689                 return -ENODEV;
1690
1691         current_policy = kbase_pm_get_policy(kbdev);
1692
1693         policy_count = kbase_pm_list_policies(&policy_list);
1694
1695         for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) {
1696                 if (policy_list[i] == current_policy)
1697                         ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name);
1698                 else
1699                         ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name);
1700         }
1701
1702         if (ret < PAGE_SIZE - 1) {
1703                 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
1704         } else {
1705                 buf[PAGE_SIZE - 2] = '\n';
1706                 buf[PAGE_SIZE - 1] = '\0';
1707                 ret = PAGE_SIZE - 1;
1708         }
1709
1710         return ret;
1711 }
1712
1713 /** Store callback for the @c power_policy sysfs file.
1714  *
1715  * This function is called when the @c power_policy sysfs file is written to.
1716  * It matches the requested policy against the available policies and if a
1717  * matching policy is found calls @ref kbase_pm_set_policy to change the
1718  * policy.
1719  *
1720  * @param dev   The device with sysfs file is for
1721  * @param attr  The attributes of the sysfs file
1722  * @param buf   The value written to the sysfs file
1723  * @param count The number of bytes written to the sysfs file
1724  *
1725  * @return @c count if the function succeeded. An error code on failure.
1726  */
1727 static ssize_t set_policy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1728 {
1729         struct kbase_device *kbdev;
1730         const struct kbase_pm_policy *new_policy = NULL;
1731         const struct kbase_pm_policy *const *policy_list;
1732         int policy_count;
1733         int i;
1734
1735         kbdev = to_kbase_device(dev);
1736
1737         if (!kbdev)
1738                 return -ENODEV;
1739
1740         policy_count = kbase_pm_list_policies(&policy_list);
1741
1742         for (i = 0; i < policy_count; i++) {
1743                 if (sysfs_streq(policy_list[i]->name, buf)) {
1744                         new_policy = policy_list[i];
1745                         break;
1746                 }
1747         }
1748
1749         if (!new_policy) {
1750                 dev_err(dev, "power_policy: policy not found\n");
1751                 return -EINVAL;
1752         }
1753
1754         kbase_pm_set_policy(kbdev, new_policy);
1755
1756         return count;
1757 }
1758
1759 /** The sysfs file @c power_policy.
1760  *
1761  * This is used for obtaining information about the available policies,
1762  * determining which policy is currently active, and changing the active
1763  * policy.
1764  */
1765 static DEVICE_ATTR(power_policy, S_IRUGO | S_IWUSR, show_policy, set_policy);
1766
1767 /** Show callback for the @c core_availability_policy sysfs file.
1768  *
1769  * This function is called to get the contents of the @c core_availability_policy
1770  * sysfs file. This is a list of the available policies with the currently
1771  * active one surrounded by square brackets.
1772  *
1773  * @param dev   The device this sysfs file is for
1774  * @param attr  The attributes of the sysfs file
1775  * @param buf   The output buffer for the sysfs file contents
1776  *
1777  * @return The number of bytes output to @c buf.
1778  */
1779 static ssize_t show_ca_policy(struct device *dev, struct device_attribute *attr, char * const buf)
1780 {
1781         struct kbase_device *kbdev;
1782         const struct kbase_pm_ca_policy *current_policy;
1783         const struct kbase_pm_ca_policy *const *policy_list;
1784         int policy_count;
1785         int i;
1786         ssize_t ret = 0;
1787
1788         kbdev = to_kbase_device(dev);
1789
1790         if (!kbdev)
1791                 return -ENODEV;
1792
1793         current_policy = kbase_pm_ca_get_policy(kbdev);
1794
1795         policy_count = kbase_pm_ca_list_policies(&policy_list);
1796
1797         for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) {
1798                 if (policy_list[i] == current_policy)
1799                         ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name);
1800                 else
1801                         ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name);
1802         }
1803
1804         if (ret < PAGE_SIZE - 1) {
1805                 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
1806         } else {
1807                 buf[PAGE_SIZE - 2] = '\n';
1808                 buf[PAGE_SIZE - 1] = '\0';
1809                 ret = PAGE_SIZE - 1;
1810         }
1811
1812         return ret;
1813 }
1814
1815 /** Store callback for the @c core_availability_policy sysfs file.
1816  *
1817  * This function is called when the @c core_availability_policy sysfs file is
1818  * written to. It matches the requested policy against the available policies
1819  * and if a matching policy is found calls @ref kbase_pm_set_policy to change
1820  * the policy.
1821  *
1822  * @param dev   The device with sysfs file is for
1823  * @param attr  The attributes of the sysfs file
1824  * @param buf   The value written to the sysfs file
1825  * @param count The number of bytes written to the sysfs file
1826  *
1827  * @return @c count if the function succeeded. An error code on failure.
1828  */
1829 static ssize_t set_ca_policy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1830 {
1831         struct kbase_device *kbdev;
1832         const struct kbase_pm_ca_policy *new_policy = NULL;
1833         const struct kbase_pm_ca_policy *const *policy_list;
1834         int policy_count;
1835         int i;
1836
1837         kbdev = to_kbase_device(dev);
1838
1839         if (!kbdev)
1840                 return -ENODEV;
1841
1842         policy_count = kbase_pm_ca_list_policies(&policy_list);
1843
1844         for (i = 0; i < policy_count; i++) {
1845                 if (sysfs_streq(policy_list[i]->name, buf)) {
1846                         new_policy = policy_list[i];
1847                         break;
1848                 }
1849         }
1850
1851         if (!new_policy) {
1852                 dev_err(dev, "core_availability_policy: policy not found\n");
1853                 return -EINVAL;
1854         }
1855
1856         kbase_pm_ca_set_policy(kbdev, new_policy);
1857
1858         return count;
1859 }
1860
1861 /** The sysfs file @c core_availability_policy
1862  *
1863  * This is used for obtaining information about the available policies,
1864  * determining which policy is currently active, and changing the active
1865  * policy.
1866  */
1867 static DEVICE_ATTR(core_availability_policy, S_IRUGO | S_IWUSR, show_ca_policy, set_ca_policy);
1868
1869 /** Show callback for the @c core_mask sysfs file.
1870  *
1871  * This function is called to get the contents of the @c core_mask sysfs
1872  * file.
1873  *
1874  * @param dev   The device this sysfs file is for
1875  * @param attr  The attributes of the sysfs file
1876  * @param buf   The output buffer for the sysfs file contents
1877  *
1878  * @return The number of bytes output to @c buf.
1879  */
1880 static ssize_t show_core_mask(struct device *dev, struct device_attribute *attr, char * const buf)
1881 {
1882         struct kbase_device *kbdev;
1883         ssize_t ret = 0;
1884
1885         kbdev = to_kbase_device(dev);
1886
1887         if (!kbdev)
1888                 return -ENODEV;
1889
1890         ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1891                         "Current core mask (JS0) : 0x%llX\n",
1892                         kbdev->pm.debug_core_mask[0]);
1893         ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1894                         "Current core mask (JS1) : 0x%llX\n",
1895                         kbdev->pm.debug_core_mask[1]);
1896         ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1897                         "Current core mask (JS2) : 0x%llX\n",
1898                         kbdev->pm.debug_core_mask[2]);
1899         ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1900                         "Available core mask : 0x%llX\n",
1901                         kbdev->gpu_props.props.raw_props.shader_present);
1902
1903         return ret;
1904 }
1905
1906 /** Store callback for the @c core_mask sysfs file.
1907  *
1908  * This function is called when the @c core_mask sysfs file is written to.
1909  *
1910  * @param dev   The device with sysfs file is for
1911  * @param attr  The attributes of the sysfs file
1912  * @param buf   The value written to the sysfs file
1913  * @param count The number of bytes written to the sysfs file
1914  *
1915  * @return @c count if the function succeeded. An error code on failure.
1916  */
1917 static ssize_t set_core_mask(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1918 {
1919         struct kbase_device *kbdev;
1920         u64 new_core_mask[3];
1921         int items;
1922
1923         kbdev = to_kbase_device(dev);
1924
1925         if (!kbdev)
1926                 return -ENODEV;
1927
1928         items = sscanf(buf, "%llx %llx %llx",
1929                         &new_core_mask[0], &new_core_mask[1],
1930                         &new_core_mask[2]);
1931
1932         if (items == 1)
1933                 new_core_mask[1] = new_core_mask[2] = new_core_mask[0];
1934
1935         if (items == 1 || items == 3) {
1936                 u64 shader_present =
1937                                 kbdev->gpu_props.props.raw_props.shader_present;
1938                 u64 group0_core_mask =
1939                                 kbdev->gpu_props.props.coherency_info.group[0].
1940                                 core_mask;
1941
1942                 if ((new_core_mask[0] & shader_present) != new_core_mask[0] ||
1943                                 !(new_core_mask[0] & group0_core_mask) ||
1944                         (new_core_mask[1] & shader_present) !=
1945                                                 new_core_mask[1] ||
1946                                 !(new_core_mask[1] & group0_core_mask) ||
1947                         (new_core_mask[2] & shader_present) !=
1948                                                 new_core_mask[2] ||
1949                                 !(new_core_mask[2] & group0_core_mask)) {
1950                         dev_err(dev, "power_policy: invalid core specification\n");
1951                         return -EINVAL;
1952                 }
1953
1954                 if (kbdev->pm.debug_core_mask[0] != new_core_mask[0] ||
1955                                 kbdev->pm.debug_core_mask[1] !=
1956                                                 new_core_mask[1] ||
1957                                 kbdev->pm.debug_core_mask[2] !=
1958                                                 new_core_mask[2]) {
1959                         unsigned long flags;
1960
1961                         spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
1962
1963                         kbase_pm_set_debug_core_mask(kbdev, new_core_mask[0],
1964                                         new_core_mask[1], new_core_mask[2]);
1965
1966                         spin_unlock_irqrestore(&kbdev->pm.power_change_lock,
1967                                         flags);
1968                 }
1969
1970                 return count;
1971         }
1972
1973         dev_err(kbdev->dev, "Couldn't process set_core_mask write operation.\n"
1974                 "Use format <core_mask>\n"
1975                 "or <core_mask_js0> <core_mask_js1> <core_mask_js2>\n");
1976         return -EINVAL;
1977 }
1978
1979 /** The sysfs file @c core_mask.
1980  *
1981  * This is used to restrict shader core availability for debugging purposes.
1982  * Reading it will show the current core mask and the mask of cores available.
1983  * Writing to it will set the current core mask.
1984  */
1985 static DEVICE_ATTR(core_mask, S_IRUGO | S_IWUSR, show_core_mask, set_core_mask);
1986
1987 /** Store callback for the @c js_timeouts sysfs file.
1988  *
1989  * This function is called to get the contents of the @c js_timeouts sysfs
1990  * file. This file contains five values separated by whitespace. The values
1991  * are basically the same as JS_SOFT_STOP_TICKS, JS_HARD_STOP_TICKS_SS,
1992  * JS_HARD_STOP_TICKS_DUMPING, JS_RESET_TICKS_SS, JS_RESET_TICKS_DUMPING
1993  * configuration values (in that order), with the difference that the js_timeout
1994  * values are expressed in MILLISECONDS.
1995  *
1996  * The js_timeouts sysfile file allows the current values in
1997  * use by the job scheduler to get override. Note that a value needs to
1998  * be other than 0 for it to override the current job scheduler value.
1999  *
2000  * @param dev   The device with sysfs file is for
2001  * @param attr  The attributes of the sysfs file
2002  * @param buf   The value written to the sysfs file
2003  * @param count The number of bytes written to the sysfs file
2004  *
2005  * @return @c count if the function succeeded. An error code on failure.
2006  */
2007 static ssize_t set_js_timeouts(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2008 {
2009         struct kbase_device *kbdev;
2010         int items;
2011         long js_soft_stop_ms;
2012         long js_soft_stop_ms_cl;
2013         long js_hard_stop_ms_ss;
2014         long js_hard_stop_ms_cl;
2015         long js_hard_stop_ms_dumping;
2016         long js_reset_ms_ss;
2017         long js_reset_ms_cl;
2018         long js_reset_ms_dumping;
2019
2020         kbdev = to_kbase_device(dev);
2021         if (!kbdev)
2022                 return -ENODEV;
2023
2024         items = sscanf(buf, "%ld %ld %ld %ld %ld %ld %ld %ld",
2025                         &js_soft_stop_ms, &js_soft_stop_ms_cl,
2026                         &js_hard_stop_ms_ss, &js_hard_stop_ms_cl,
2027                         &js_hard_stop_ms_dumping, &js_reset_ms_ss,
2028                         &js_reset_ms_cl, &js_reset_ms_dumping);
2029
2030         if (items == 8) {
2031                 u64 ticks;
2032
2033                 if (js_soft_stop_ms >= 0) {
2034                         ticks = js_soft_stop_ms * 1000000ULL;
2035                         do_div(ticks, kbdev->js_data.scheduling_period_ns);
2036                         kbdev->js_soft_stop_ticks = ticks;
2037                 } else {
2038                         kbdev->js_soft_stop_ticks = -1;
2039                 }
2040
2041                 if (js_soft_stop_ms_cl >= 0) {
2042                         ticks = js_soft_stop_ms_cl * 1000000ULL;
2043                         do_div(ticks, kbdev->js_data.scheduling_period_ns);
2044                         kbdev->js_soft_stop_ticks_cl = ticks;
2045                 } else {
2046                         kbdev->js_soft_stop_ticks_cl = -1;
2047                 }
2048
2049                 if (js_hard_stop_ms_ss >= 0) {
2050                         ticks = js_hard_stop_ms_ss * 1000000ULL;
2051                         do_div(ticks, kbdev->js_data.scheduling_period_ns);
2052                         kbdev->js_hard_stop_ticks_ss = ticks;
2053                 } else {
2054                         kbdev->js_hard_stop_ticks_ss = -1;
2055                 }
2056
2057                 if (js_hard_stop_ms_cl >= 0) {
2058                         ticks = js_hard_stop_ms_cl * 1000000ULL;
2059                         do_div(ticks, kbdev->js_data.scheduling_period_ns);
2060                         kbdev->js_hard_stop_ticks_cl = ticks;
2061                 } else {
2062                         kbdev->js_hard_stop_ticks_cl = -1;
2063                 }
2064
2065                 if (js_hard_stop_ms_dumping >= 0) {
2066                         ticks = js_hard_stop_ms_dumping * 1000000ULL;
2067                         do_div(ticks, kbdev->js_data.scheduling_period_ns);
2068                         kbdev->js_hard_stop_ticks_dumping = ticks;
2069                 } else {
2070                         kbdev->js_hard_stop_ticks_dumping = -1;
2071                 }
2072
2073                 if (js_reset_ms_ss >= 0) {
2074                         ticks = js_reset_ms_ss * 1000000ULL;
2075                         do_div(ticks, kbdev->js_data.scheduling_period_ns);
2076                         kbdev->js_reset_ticks_ss = ticks;
2077                 } else {
2078                         kbdev->js_reset_ticks_ss = -1;
2079                 }
2080
2081                 if (js_reset_ms_cl >= 0) {
2082                         ticks = js_reset_ms_cl * 1000000ULL;
2083                         do_div(ticks, kbdev->js_data.scheduling_period_ns);
2084                         kbdev->js_reset_ticks_cl = ticks;
2085                 } else {
2086                         kbdev->js_reset_ticks_cl = -1;
2087                 }
2088
2089                 if (js_reset_ms_dumping >= 0) {
2090                         ticks = js_reset_ms_dumping * 1000000ULL;
2091                         do_div(ticks, kbdev->js_data.scheduling_period_ns);
2092                         kbdev->js_reset_ticks_dumping = ticks;
2093                 } else {
2094                         kbdev->js_reset_ticks_dumping = -1;
2095                 }
2096
2097                 kbdev->js_timeouts_updated = true;
2098
2099                 dev_dbg(kbdev->dev, "Overriding JS_SOFT_STOP_TICKS with %lu ticks (%lu ms)\n",
2100                                 (unsigned long)kbdev->js_soft_stop_ticks,
2101                                 js_soft_stop_ms);
2102                 dev_dbg(kbdev->dev, "Overriding JS_SOFT_STOP_TICKS_CL with %lu ticks (%lu ms)\n",
2103                                 (unsigned long)kbdev->js_soft_stop_ticks_cl,
2104                                 js_soft_stop_ms_cl);
2105                 dev_dbg(kbdev->dev, "Overriding JS_HARD_STOP_TICKS_SS with %lu ticks (%lu ms)\n",
2106                                 (unsigned long)kbdev->js_hard_stop_ticks_ss,
2107                                 js_hard_stop_ms_ss);
2108                 dev_dbg(kbdev->dev, "Overriding JS_HARD_STOP_TICKS_CL with %lu ticks (%lu ms)\n",
2109                                 (unsigned long)kbdev->js_hard_stop_ticks_cl,
2110                                 js_hard_stop_ms_cl);
2111                 dev_dbg(kbdev->dev, "Overriding JS_HARD_STOP_TICKS_DUMPING with %lu ticks (%lu ms)\n",
2112                                 (unsigned long)
2113                                         kbdev->js_hard_stop_ticks_dumping,
2114                                 js_hard_stop_ms_dumping);
2115                 dev_dbg(kbdev->dev, "Overriding JS_RESET_TICKS_SS with %lu ticks (%lu ms)\n",
2116                                 (unsigned long)kbdev->js_reset_ticks_ss,
2117                                 js_reset_ms_ss);
2118                 dev_dbg(kbdev->dev, "Overriding JS_RESET_TICKS_CL with %lu ticks (%lu ms)\n",
2119                                 (unsigned long)kbdev->js_reset_ticks_cl,
2120                                 js_reset_ms_cl);
2121                 dev_dbg(kbdev->dev, "Overriding JS_RESET_TICKS_DUMPING with %lu ticks (%lu ms)\n",
2122                                 (unsigned long)kbdev->js_reset_ticks_dumping,
2123                                 js_reset_ms_dumping);
2124
2125                 return count;
2126         }
2127
2128         dev_err(kbdev->dev, "Couldn't process js_timeouts write operation.\n"
2129                         "Use format <soft_stop_ms> <soft_stop_ms_cl> <hard_stop_ms_ss> <hard_stop_ms_cl> <hard_stop_ms_dumping> <reset_ms_ss> <reset_ms_cl> <reset_ms_dumping>\n"
2130                         "Write 0 for no change, -1 to restore default timeout\n");
2131         return -EINVAL;
2132 }
2133
2134 /** Show callback for the @c js_timeouts sysfs file.
2135  *
2136  * This function is called to get the contents of the @c js_timeouts sysfs
2137  * file. It returns the last set values written to the js_timeouts sysfs file.
2138  * If the file didn't get written yet, the values will be current setting in
2139  * use.
2140  * @param dev   The device this sysfs file is for
2141  * @param attr  The attributes of the sysfs file
2142  * @param buf   The output buffer for the sysfs file contents
2143  *
2144  * @return The number of bytes output to @c buf.
2145  */
2146 static ssize_t show_js_timeouts(struct device *dev, struct device_attribute *attr, char * const buf)
2147 {
2148         struct kbase_device *kbdev;
2149         ssize_t ret;
2150         u64 ms;
2151         unsigned long js_soft_stop_ms;
2152         unsigned long js_soft_stop_ms_cl;
2153         unsigned long js_hard_stop_ms_ss;
2154         unsigned long js_hard_stop_ms_cl;
2155         unsigned long js_hard_stop_ms_dumping;
2156         unsigned long js_reset_ms_ss;
2157         unsigned long js_reset_ms_cl;
2158         unsigned long js_reset_ms_dumping;
2159         unsigned long ticks;
2160         u32 scheduling_period_ns;
2161
2162         kbdev = to_kbase_device(dev);
2163         if (!kbdev)
2164                 return -ENODEV;
2165
2166         /* If no contexts have been scheduled since js_timeouts was last written
2167          * to, the new timeouts might not have been latched yet. So check if an
2168          * update is pending and use the new values if necessary. */
2169         if (kbdev->js_timeouts_updated && kbdev->js_scheduling_period_ns > 0)
2170                 scheduling_period_ns = kbdev->js_scheduling_period_ns;
2171         else
2172                 scheduling_period_ns = kbdev->js_data.scheduling_period_ns;
2173
2174         if (kbdev->js_timeouts_updated && kbdev->js_soft_stop_ticks > 0)
2175                 ticks = kbdev->js_soft_stop_ticks;
2176         else
2177                 ticks = kbdev->js_data.soft_stop_ticks;
2178         ms = (u64)ticks * scheduling_period_ns;
2179         do_div(ms, 1000000UL);
2180         js_soft_stop_ms = (unsigned long)ms;
2181
2182         if (kbdev->js_timeouts_updated && kbdev->js_soft_stop_ticks_cl > 0)
2183                 ticks = kbdev->js_soft_stop_ticks_cl;
2184         else
2185                 ticks = kbdev->js_data.soft_stop_ticks_cl;
2186         ms = (u64)ticks * scheduling_period_ns;
2187         do_div(ms, 1000000UL);
2188         js_soft_stop_ms_cl = (unsigned long)ms;
2189
2190         if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_ss > 0)
2191                 ticks = kbdev->js_hard_stop_ticks_ss;
2192         else
2193                 ticks = kbdev->js_data.hard_stop_ticks_ss;
2194         ms = (u64)ticks * scheduling_period_ns;
2195         do_div(ms, 1000000UL);
2196         js_hard_stop_ms_ss = (unsigned long)ms;
2197
2198         if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_cl > 0)
2199                 ticks = kbdev->js_hard_stop_ticks_cl;
2200         else
2201                 ticks = kbdev->js_data.hard_stop_ticks_cl;
2202         ms = (u64)ticks * scheduling_period_ns;
2203         do_div(ms, 1000000UL);
2204         js_hard_stop_ms_cl = (unsigned long)ms;
2205
2206         if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_dumping > 0)
2207                 ticks = kbdev->js_hard_stop_ticks_dumping;
2208         else
2209                 ticks = kbdev->js_data.hard_stop_ticks_dumping;
2210         ms = (u64)ticks * scheduling_period_ns;
2211         do_div(ms, 1000000UL);
2212         js_hard_stop_ms_dumping = (unsigned long)ms;
2213
2214         if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_ss > 0)
2215                 ticks = kbdev->js_reset_ticks_ss;
2216         else
2217                 ticks = kbdev->js_data.gpu_reset_ticks_ss;
2218         ms = (u64)ticks * scheduling_period_ns;
2219         do_div(ms, 1000000UL);
2220         js_reset_ms_ss = (unsigned long)ms;
2221
2222         if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_cl > 0)
2223                 ticks = kbdev->js_reset_ticks_cl;
2224         else
2225                 ticks = kbdev->js_data.gpu_reset_ticks_cl;
2226         ms = (u64)ticks * scheduling_period_ns;
2227         do_div(ms, 1000000UL);
2228         js_reset_ms_cl = (unsigned long)ms;
2229
2230         if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_dumping > 0)
2231                 ticks = kbdev->js_reset_ticks_dumping;
2232         else
2233                 ticks = kbdev->js_data.gpu_reset_ticks_dumping;
2234         ms = (u64)ticks * scheduling_period_ns;
2235         do_div(ms, 1000000UL);
2236         js_reset_ms_dumping = (unsigned long)ms;
2237
2238         ret = scnprintf(buf, PAGE_SIZE, "%lu %lu %lu %lu %lu %lu %lu %lu\n",
2239                         js_soft_stop_ms, js_soft_stop_ms_cl,
2240                         js_hard_stop_ms_ss, js_hard_stop_ms_cl,
2241                         js_hard_stop_ms_dumping, js_reset_ms_ss,
2242                         js_reset_ms_cl, js_reset_ms_dumping);
2243
2244         if (ret >= PAGE_SIZE) {
2245                 buf[PAGE_SIZE - 2] = '\n';
2246                 buf[PAGE_SIZE - 1] = '\0';
2247                 ret = PAGE_SIZE - 1;
2248         }
2249
2250         return ret;
2251 }
2252
2253 /** The sysfs file @c js_timeouts.
2254  *
2255  * This is used to override the current job scheduler values for
2256  * JS_STOP_STOP_TICKS_SS
2257  * JS_STOP_STOP_TICKS_CL
2258  * JS_HARD_STOP_TICKS_SS
2259  * JS_HARD_STOP_TICKS_CL
2260  * JS_HARD_STOP_TICKS_DUMPING
2261  * JS_RESET_TICKS_SS
2262  * JS_RESET_TICKS_CL
2263  * JS_RESET_TICKS_DUMPING.
2264  */
2265 static DEVICE_ATTR(js_timeouts, S_IRUGO | S_IWUSR, show_js_timeouts, set_js_timeouts);
2266
2267 /**
2268  * set_js_scheduling_period - Store callback for the js_scheduling_period sysfs
2269  *                            file
2270  * @dev:   The device the sysfs file is for
2271  * @attr:  The attributes of the sysfs file
2272  * @buf:   The value written to the sysfs file
2273  * @count: The number of bytes written to the sysfs file
2274  *
2275  * This function is called when the js_scheduling_period sysfs file is written
2276  * to. It checks the data written, and if valid updates the js_scheduling_period
2277  * value
2278  *
2279  * Return: @c count if the function succeeded. An error code on failure.
2280  */
2281 static ssize_t set_js_scheduling_period(struct device *dev,
2282                 struct device_attribute *attr, const char *buf, size_t count)
2283 {
2284         struct kbase_device *kbdev;
2285         int ret;
2286         unsigned int js_scheduling_period;
2287         u32 new_scheduling_period_ns;
2288         u32 old_period;
2289         u64 ticks;
2290
2291         kbdev = to_kbase_device(dev);
2292         if (!kbdev)
2293                 return -ENODEV;
2294
2295         ret = kstrtouint(buf, 0, &js_scheduling_period);
2296         if (ret || !js_scheduling_period) {
2297                 dev_err(kbdev->dev, "Couldn't process js_scheduling_period write operation.\n"
2298                                 "Use format <js_scheduling_period_ms>\n");
2299                 return -EINVAL;
2300         }
2301
2302         new_scheduling_period_ns = js_scheduling_period * 1000000;
2303
2304         /* Update scheduling timeouts */
2305         mutex_lock(&kbdev->js_data.runpool_mutex);
2306
2307         /* If no contexts have been scheduled since js_timeouts was last written
2308          * to, the new timeouts might not have been latched yet. So check if an
2309          * update is pending and use the new values if necessary. */
2310
2311         /* Use previous 'new' scheduling period as a base if present. */
2312         if (kbdev->js_timeouts_updated && kbdev->js_scheduling_period_ns)
2313                 old_period = kbdev->js_scheduling_period_ns;
2314         else
2315                 old_period = kbdev->js_data.scheduling_period_ns;
2316
2317         if (kbdev->js_timeouts_updated && kbdev->js_soft_stop_ticks > 0)
2318                 ticks = (u64)kbdev->js_soft_stop_ticks * old_period;
2319         else
2320                 ticks = (u64)kbdev->js_data.soft_stop_ticks *
2321                                 kbdev->js_data.scheduling_period_ns;
2322         do_div(ticks, new_scheduling_period_ns);
2323         kbdev->js_soft_stop_ticks = ticks ? ticks : 1;
2324
2325         if (kbdev->js_timeouts_updated && kbdev->js_soft_stop_ticks_cl > 0)
2326                 ticks = (u64)kbdev->js_soft_stop_ticks_cl * old_period;
2327         else
2328                 ticks = (u64)kbdev->js_data.soft_stop_ticks_cl *
2329                                 kbdev->js_data.scheduling_period_ns;
2330         do_div(ticks, new_scheduling_period_ns);
2331         kbdev->js_soft_stop_ticks_cl = ticks ? ticks : 1;
2332
2333         if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_ss > 0)
2334                 ticks = (u64)kbdev->js_hard_stop_ticks_ss * old_period;
2335         else
2336                 ticks = (u64)kbdev->js_data.hard_stop_ticks_ss *
2337                                 kbdev->js_data.scheduling_period_ns;
2338         do_div(ticks, new_scheduling_period_ns);
2339         kbdev->js_hard_stop_ticks_ss = ticks ? ticks : 1;
2340
2341         if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_cl > 0)
2342                 ticks = (u64)kbdev->js_hard_stop_ticks_cl * old_period;
2343         else
2344                 ticks = (u64)kbdev->js_data.hard_stop_ticks_cl *
2345                                 kbdev->js_data.scheduling_period_ns;
2346         do_div(ticks, new_scheduling_period_ns);
2347         kbdev->js_hard_stop_ticks_cl = ticks ? ticks : 1;
2348
2349         if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_dumping > 0)
2350                 ticks = (u64)kbdev->js_hard_stop_ticks_dumping * old_period;
2351         else
2352                 ticks = (u64)kbdev->js_data.hard_stop_ticks_dumping *
2353                                 kbdev->js_data.scheduling_period_ns;
2354         do_div(ticks, new_scheduling_period_ns);
2355         kbdev->js_hard_stop_ticks_dumping = ticks ? ticks : 1;
2356
2357         if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_ss > 0)
2358                 ticks = (u64)kbdev->js_reset_ticks_ss * old_period;
2359         else
2360                 ticks = (u64)kbdev->js_data.gpu_reset_ticks_ss *
2361                                 kbdev->js_data.scheduling_period_ns;
2362         do_div(ticks, new_scheduling_period_ns);
2363         kbdev->js_reset_ticks_ss = ticks ? ticks : 1;
2364
2365         if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_cl > 0)
2366                 ticks = (u64)kbdev->js_reset_ticks_cl * old_period;
2367         else
2368                 ticks = (u64)kbdev->js_data.gpu_reset_ticks_cl *
2369                                 kbdev->js_data.scheduling_period_ns;
2370         do_div(ticks, new_scheduling_period_ns);
2371         kbdev->js_reset_ticks_cl = ticks ? ticks : 1;
2372
2373         if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_dumping > 0)
2374                 ticks = (u64)kbdev->js_reset_ticks_dumping * old_period;
2375         else
2376                 ticks = (u64)kbdev->js_data.gpu_reset_ticks_dumping *
2377                                 kbdev->js_data.scheduling_period_ns;
2378         do_div(ticks, new_scheduling_period_ns);
2379         kbdev->js_reset_ticks_dumping = ticks ? ticks : 1;
2380
2381         kbdev->js_scheduling_period_ns = new_scheduling_period_ns;
2382         kbdev->js_timeouts_updated = true;
2383
2384         mutex_unlock(&kbdev->js_data.runpool_mutex);
2385
2386         dev_dbg(kbdev->dev, "JS scheduling period: %dms\n",
2387                         js_scheduling_period);
2388
2389         return count;
2390 }
2391
2392 /**
2393  * show_js_scheduling_period - Show callback for the js_scheduling_period sysfs
2394  *                             entry.
2395  * @dev:  The device this sysfs file is for.
2396  * @attr: The attributes of the sysfs file.
2397  * @buf:  The output buffer to receive the GPU information.
2398  *
2399  * This function is called to get the current period used for the JS scheduling
2400  * period.
2401  *
2402  * Return: The number of bytes output to buf.
2403  */
2404 static ssize_t show_js_scheduling_period(struct device *dev,
2405                 struct device_attribute *attr, char * const buf)
2406 {
2407         struct kbase_device *kbdev;
2408         u32 period;
2409         ssize_t ret;
2410
2411         kbdev = to_kbase_device(dev);
2412         if (!kbdev)
2413                 return -ENODEV;
2414
2415         if (kbdev->js_timeouts_updated && kbdev->js_scheduling_period_ns > 0)
2416                 period = kbdev->js_scheduling_period_ns;
2417         else
2418                 period = kbdev->js_data.scheduling_period_ns;
2419
2420         ret = scnprintf(buf, PAGE_SIZE, "%d\n",
2421                         period / 1000000);
2422
2423         return ret;
2424 }
2425
2426 static DEVICE_ATTR(js_scheduling_period, S_IRUGO | S_IWUSR,
2427                 show_js_scheduling_period, set_js_scheduling_period);
2428
2429 #if !MALI_CUSTOMER_RELEASE
2430 /** Store callback for the @c force_replay sysfs file.
2431  *
2432  * @param dev   The device with sysfs file is for
2433  * @param attr  The attributes of the sysfs file
2434  * @param buf   The value written to the sysfs file
2435  * @param count The number of bytes written to the sysfs file
2436  *
2437  * @return @c count if the function succeeded. An error code on failure.
2438  */
2439 static ssize_t set_force_replay(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2440 {
2441         struct kbase_device *kbdev;
2442
2443         kbdev = to_kbase_device(dev);
2444         if (!kbdev)
2445                 return -ENODEV;
2446
2447         if (!strncmp("limit=", buf, MIN(6, count))) {
2448                 int force_replay_limit;
2449                 int items = sscanf(buf, "limit=%u", &force_replay_limit);
2450
2451                 if (items == 1) {
2452                         kbdev->force_replay_random = false;
2453                         kbdev->force_replay_limit = force_replay_limit;
2454                         kbdev->force_replay_count = 0;
2455
2456                         return count;
2457                 }
2458         } else if (!strncmp("random_limit", buf, MIN(12, count))) {
2459                 kbdev->force_replay_random = true;
2460                 kbdev->force_replay_count = 0;
2461
2462                 return count;
2463         } else if (!strncmp("norandom_limit", buf, MIN(14, count))) {
2464                 kbdev->force_replay_random = false;
2465                 kbdev->force_replay_limit = KBASEP_FORCE_REPLAY_DISABLED;
2466                 kbdev->force_replay_count = 0;
2467
2468                 return count;
2469         } else if (!strncmp("core_req=", buf, MIN(9, count))) {
2470                 unsigned int core_req;
2471                 int items = sscanf(buf, "core_req=%x", &core_req);
2472
2473                 if (items == 1) {
2474                         kbdev->force_replay_core_req = (base_jd_core_req)core_req;
2475
2476                         return count;
2477                 }
2478         }
2479         dev_err(kbdev->dev, "Couldn't process force_replay write operation.\nPossible settings: limit=<limit>, random_limit, norandom_limit, core_req=<core_req>\n");
2480         return -EINVAL;
2481 }
2482
2483 /** Show callback for the @c force_replay sysfs file.
2484  *
2485  * This function is called to get the contents of the @c force_replay sysfs
2486  * file. It returns the last set value written to the force_replay sysfs file.
2487  * If the file didn't get written yet, the values will be 0.
2488  *
2489  * @param dev   The device this sysfs file is for
2490  * @param attr  The attributes of the sysfs file
2491  * @param buf   The output buffer for the sysfs file contents
2492  *
2493  * @return The number of bytes output to @c buf.
2494  */
2495 static ssize_t show_force_replay(struct device *dev,
2496                 struct device_attribute *attr, char * const buf)
2497 {
2498         struct kbase_device *kbdev;
2499         ssize_t ret;
2500
2501         kbdev = to_kbase_device(dev);
2502         if (!kbdev)
2503                 return -ENODEV;
2504
2505         if (kbdev->force_replay_random)
2506                 ret = scnprintf(buf, PAGE_SIZE,
2507                                 "limit=0\nrandom_limit\ncore_req=%x\n",
2508                                 kbdev->force_replay_core_req);
2509         else
2510                 ret = scnprintf(buf, PAGE_SIZE,
2511                                 "limit=%u\nnorandom_limit\ncore_req=%x\n",
2512                                 kbdev->force_replay_limit,
2513                                 kbdev->force_replay_core_req);
2514
2515         if (ret >= PAGE_SIZE) {
2516                 buf[PAGE_SIZE - 2] = '\n';
2517                 buf[PAGE_SIZE - 1] = '\0';
2518                 ret = PAGE_SIZE - 1;
2519         }
2520
2521         return ret;
2522 }
2523
2524 /** The sysfs file @c force_replay.
2525  *
2526  */
2527 static DEVICE_ATTR(force_replay, S_IRUGO | S_IWUSR, show_force_replay,
2528                 set_force_replay);
2529 #endif /* !MALI_CUSTOMER_RELEASE */
2530
2531 #ifdef CONFIG_MALI_DEBUG
2532 static ssize_t set_js_softstop_always(struct device *dev,
2533                 struct device_attribute *attr, const char *buf, size_t count)
2534 {
2535         struct kbase_device *kbdev;
2536         int ret;
2537         int softstop_always;
2538
2539         kbdev = to_kbase_device(dev);
2540         if (!kbdev)
2541                 return -ENODEV;
2542
2543         ret = kstrtoint(buf, 0, &softstop_always);
2544         if (ret || ((softstop_always != 0) && (softstop_always != 1))) {
2545                 dev_err(kbdev->dev, "Couldn't process js_softstop_always write operation.\n"
2546                                 "Use format <soft_stop_always>\n");
2547                 return -EINVAL;
2548         }
2549
2550         kbdev->js_data.softstop_always = (bool) softstop_always;
2551         dev_dbg(kbdev->dev, "Support for softstop on a single context: %s\n",
2552                         (kbdev->js_data.softstop_always) ?
2553                         "Enabled" : "Disabled");
2554         return count;
2555 }
2556
2557 static ssize_t show_js_softstop_always(struct device *dev,
2558                 struct device_attribute *attr, char * const buf)
2559 {
2560         struct kbase_device *kbdev;
2561         ssize_t ret;
2562
2563         kbdev = to_kbase_device(dev);
2564         if (!kbdev)
2565                 return -ENODEV;
2566
2567         ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->js_data.softstop_always);
2568
2569         if (ret >= PAGE_SIZE) {
2570                 buf[PAGE_SIZE - 2] = '\n';
2571                 buf[PAGE_SIZE - 1] = '\0';
2572                 ret = PAGE_SIZE - 1;
2573         }
2574
2575         return ret;
2576 }
2577
2578 /*
2579  * By default, soft-stops are disabled when only a single context is present. The ability to
2580  * enable soft-stop when only a single context is present can be used for debug and unit-testing purposes.
2581  * (see CL t6xx_stress_1 unit-test as an example whereby this feature is used.)
2582  */
2583 static DEVICE_ATTR(js_softstop_always, S_IRUGO | S_IWUSR, show_js_softstop_always, set_js_softstop_always);
2584 #endif /* CONFIG_MALI_DEBUG */
2585
2586 #ifdef CONFIG_MALI_DEBUG
2587 typedef void (kbasep_debug_command_func) (struct kbase_device *);
2588
2589 enum kbasep_debug_command_code {
2590         KBASEP_DEBUG_COMMAND_DUMPTRACE,
2591
2592         /* This must be the last enum */
2593         KBASEP_DEBUG_COMMAND_COUNT
2594 };
2595
2596 struct kbasep_debug_command {
2597         char *str;
2598         kbasep_debug_command_func *func;
2599 };
2600
2601 /** Debug commands supported by the driver */
2602 static const struct kbasep_debug_command debug_commands[] = {
2603         {
2604          .str = "dumptrace",
2605          .func = &kbasep_trace_dump,
2606          }
2607 };
2608
2609 /** Show callback for the @c debug_command sysfs file.
2610  *
2611  * This function is called to get the contents of the @c debug_command sysfs
2612  * file. This is a list of the available debug commands, separated by newlines.
2613  *
2614  * @param dev   The device this sysfs file is for
2615  * @param attr  The attributes of the sysfs file
2616  * @param buf   The output buffer for the sysfs file contents
2617  *
2618  * @return The number of bytes output to @c buf.
2619  */
2620 static ssize_t show_debug(struct device *dev, struct device_attribute *attr, char * const buf)
2621 {
2622         struct kbase_device *kbdev;
2623         int i;
2624         ssize_t ret = 0;
2625
2626         kbdev = to_kbase_device(dev);
2627
2628         if (!kbdev)
2629                 return -ENODEV;
2630
2631         for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT && ret < PAGE_SIZE; i++)
2632                 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s\n", debug_commands[i].str);
2633
2634         if (ret >= PAGE_SIZE) {
2635                 buf[PAGE_SIZE - 2] = '\n';
2636                 buf[PAGE_SIZE - 1] = '\0';
2637                 ret = PAGE_SIZE - 1;
2638         }
2639
2640         return ret;
2641 }
2642
2643 /** Store callback for the @c debug_command sysfs file.
2644  *
2645  * This function is called when the @c debug_command sysfs file is written to.
2646  * It matches the requested command against the available commands, and if
2647  * a matching command is found calls the associated function from
2648  * @ref debug_commands to issue the command.
2649  *
2650  * @param dev   The device with sysfs file is for
2651  * @param attr  The attributes of the sysfs file
2652  * @param buf   The value written to the sysfs file
2653  * @param count The number of bytes written to the sysfs file
2654  *
2655  * @return @c count if the function succeeded. An error code on failure.
2656  */
2657 static ssize_t issue_debug(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2658 {
2659         struct kbase_device *kbdev;
2660         int i;
2661
2662         kbdev = to_kbase_device(dev);
2663
2664         if (!kbdev)
2665                 return -ENODEV;
2666
2667         for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT; i++) {
2668                 if (sysfs_streq(debug_commands[i].str, buf)) {
2669                         debug_commands[i].func(kbdev);
2670                         return count;
2671                 }
2672         }
2673
2674         /* Debug Command not found */
2675         dev_err(dev, "debug_command: command not known\n");
2676         return -EINVAL;
2677 }
2678
2679 /** The sysfs file @c debug_command.
2680  *
2681  * This is used to issue general debug commands to the device driver.
2682  * Reading it will produce a list of debug commands, separated by newlines.
2683  * Writing to it with one of those commands will issue said command.
2684  */
2685 static DEVICE_ATTR(debug_command, S_IRUGO | S_IWUSR, show_debug, issue_debug);
2686 #endif /* CONFIG_MALI_DEBUG */
2687
2688 /**
2689  * kbase_show_gpuinfo - Show callback for the gpuinfo sysfs entry.
2690  * @dev: The device this sysfs file is for.
2691  * @attr: The attributes of the sysfs file.
2692  * @buf: The output buffer to receive the GPU information.
2693  *
2694  * This function is called to get a description of the present Mali
2695  * GPU via the gpuinfo sysfs entry.  This includes the GPU family, the
2696  * number of cores, the hardware version and the raw product id.  For
2697  * example:
2698  *
2699  *    Mali-T60x MP4 r0p0 0x6956
2700  *
2701  * Return: The number of bytes output to buf.
2702  */
2703 static ssize_t kbase_show_gpuinfo(struct device *dev,
2704                                   struct device_attribute *attr, char *buf)
2705 {
2706         static const struct gpu_product_id_name {
2707                 unsigned id;
2708                 char *name;
2709         } gpu_product_id_names[] = {
2710                 { .id = GPU_ID_PI_T60X, .name = "Mali-T60x" },
2711                 { .id = GPU_ID_PI_T62X, .name = "Mali-T62x" },
2712                 { .id = GPU_ID_PI_T72X, .name = "Mali-T72x" },
2713                 { .id = GPU_ID_PI_T76X, .name = "Mali-T76x" },
2714                 { .id = GPU_ID_PI_T82X, .name = "Mali-T82x" },
2715                 { .id = GPU_ID_PI_T83X, .name = "Mali-T83x" },
2716                 { .id = GPU_ID_PI_T86X, .name = "Mali-T86x" },
2717                 { .id = GPU_ID_PI_TFRX, .name = "Mali-T88x" },
2718         };
2719         const char *product_name = "(Unknown Mali GPU)";
2720         struct kbase_device *kbdev;
2721         u32 gpu_id;
2722         unsigned product_id, product_id_mask;
2723         unsigned i;
2724         bool is_new_format;
2725
2726         kbdev = to_kbase_device(dev);
2727         if (!kbdev)
2728                 return -ENODEV;
2729
2730         gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
2731         product_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
2732         is_new_format = GPU_ID_IS_NEW_FORMAT(product_id);
2733         product_id_mask =
2734                 (is_new_format ?
2735                         GPU_ID2_PRODUCT_MODEL :
2736                         GPU_ID_VERSION_PRODUCT_ID) >>
2737                 GPU_ID_VERSION_PRODUCT_ID_SHIFT;
2738
2739         for (i = 0; i < ARRAY_SIZE(gpu_product_id_names); ++i) {
2740                 const struct gpu_product_id_name *p = &gpu_product_id_names[i];
2741
2742                 if ((GPU_ID_IS_NEW_FORMAT(p->id) == is_new_format) &&
2743                     (p->id & product_id_mask) ==
2744                     (product_id & product_id_mask)) {
2745                         product_name = p->name;
2746                         break;
2747                 }
2748         }
2749
2750         return scnprintf(buf, PAGE_SIZE, "%s MP%d r%dp%d 0x%04X\n",
2751                 product_name, kbdev->gpu_props.num_cores,
2752                 (gpu_id & GPU_ID_VERSION_MAJOR) >> GPU_ID_VERSION_MAJOR_SHIFT,
2753                 (gpu_id & GPU_ID_VERSION_MINOR) >> GPU_ID_VERSION_MINOR_SHIFT,
2754                 product_id);
2755 }
2756 static DEVICE_ATTR(gpuinfo, S_IRUGO, kbase_show_gpuinfo, NULL);
2757
2758 /**
2759  * set_dvfs_period - Store callback for the dvfs_period sysfs file.
2760  * @dev:   The device with sysfs file is for
2761  * @attr:  The attributes of the sysfs file
2762  * @buf:   The value written to the sysfs file
2763  * @count: The number of bytes written to the sysfs file
2764  *
2765  * This function is called when the dvfs_period sysfs file is written to. It
2766  * checks the data written, and if valid updates the DVFS period variable,
2767  *
2768  * Return: @c count if the function succeeded. An error code on failure.
2769  */
2770 static ssize_t set_dvfs_period(struct device *dev,
2771                 struct device_attribute *attr, const char *buf, size_t count)
2772 {
2773         struct kbase_device *kbdev;
2774         int ret;
2775         int dvfs_period;
2776
2777         kbdev = to_kbase_device(dev);
2778         if (!kbdev)
2779                 return -ENODEV;
2780
2781         ret = kstrtoint(buf, 0, &dvfs_period);
2782         if (ret || dvfs_period <= 0) {
2783                 dev_err(kbdev->dev, "Couldn't process dvfs_period write operation.\n"
2784                                 "Use format <dvfs_period_ms>\n");
2785                 return -EINVAL;
2786         }
2787
2788         kbdev->pm.dvfs_period = dvfs_period;
2789         dev_dbg(kbdev->dev, "DVFS period: %dms\n", dvfs_period);
2790
2791         return count;
2792 }
2793
2794 /**
2795  * show_dvfs_period - Show callback for the dvfs_period sysfs entry.
2796  * @dev:  The device this sysfs file is for.
2797  * @attr: The attributes of the sysfs file.
2798  * @buf:  The output buffer to receive the GPU information.
2799  *
2800  * This function is called to get the current period used for the DVFS sample
2801  * timer.
2802  *
2803  * Return: The number of bytes output to buf.
2804  */
2805 static ssize_t show_dvfs_period(struct device *dev,
2806                 struct device_attribute *attr, char * const buf)
2807 {
2808         struct kbase_device *kbdev;
2809         ssize_t ret;
2810
2811         kbdev = to_kbase_device(dev);
2812         if (!kbdev)
2813                 return -ENODEV;
2814
2815         ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->pm.dvfs_period);
2816
2817         return ret;
2818 }
2819
2820 static DEVICE_ATTR(dvfs_period, S_IRUGO | S_IWUSR, show_dvfs_period,
2821                 set_dvfs_period);
2822
2823 /**
2824  * set_pm_poweroff - Store callback for the pm_poweroff sysfs file.
2825  * @dev:   The device with sysfs file is for
2826  * @attr:  The attributes of the sysfs file
2827  * @buf:   The value written to the sysfs file
2828  * @count: The number of bytes written to the sysfs file
2829  *
2830  * This function is called when the pm_poweroff sysfs file is written to.
2831  *
2832  * This file contains three values separated by whitespace. The values
2833  * are gpu_poweroff_time (the period of the poweroff timer, in ns),
2834  * poweroff_shader_ticks (the number of poweroff timer ticks before an idle
2835  * shader is powered off), and poweroff_gpu_ticks (the number of poweroff timer
2836  * ticks before the GPU is powered off), in that order.
2837  *
2838  * Return: @c count if the function succeeded. An error code on failure.
2839  */
2840 static ssize_t set_pm_poweroff(struct device *dev,
2841                 struct device_attribute *attr, const char *buf, size_t count)
2842 {
2843         struct kbase_device *kbdev;
2844         int items;
2845         s64 gpu_poweroff_time;
2846         int poweroff_shader_ticks, poweroff_gpu_ticks;
2847
2848         kbdev = to_kbase_device(dev);
2849         if (!kbdev)
2850                 return -ENODEV;
2851
2852         items = sscanf(buf, "%llu %u %u", &gpu_poweroff_time,
2853                         &poweroff_shader_ticks,
2854                         &poweroff_gpu_ticks);
2855         if (items != 3) {
2856                 dev_err(kbdev->dev, "Couldn't process pm_poweroff write operation.\n"
2857                                 "Use format <gpu_poweroff_time_ns> <poweroff_shader_ticks> <poweroff_gpu_ticks>\n");
2858                 return -EINVAL;
2859         }
2860
2861         kbdev->pm.gpu_poweroff_time = HR_TIMER_DELAY_NSEC(gpu_poweroff_time);
2862         kbdev->pm.poweroff_shader_ticks = poweroff_shader_ticks;
2863         kbdev->pm.poweroff_gpu_ticks = poweroff_gpu_ticks;
2864
2865         return count;
2866 }
2867
2868 /**
2869  * show_pm_poweroff - Show callback for the pm_poweroff sysfs entry.
2870  * @dev:  The device this sysfs file is for.
2871  * @attr: The attributes of the sysfs file.
2872  * @buf:  The output buffer to receive the GPU information.
2873  *
2874  * This function is called to get the current period used for the DVFS sample
2875  * timer.
2876  *
2877  * Return: The number of bytes output to buf.
2878  */
2879 static ssize_t show_pm_poweroff(struct device *dev,
2880                 struct device_attribute *attr, char * const buf)
2881 {
2882         struct kbase_device *kbdev;
2883         ssize_t ret;
2884
2885         kbdev = to_kbase_device(dev);
2886         if (!kbdev)
2887                 return -ENODEV;
2888
2889         ret = scnprintf(buf, PAGE_SIZE, "%llu %u %u\n",
2890                         ktime_to_ns(kbdev->pm.gpu_poweroff_time),
2891                         kbdev->pm.poweroff_shader_ticks,
2892                         kbdev->pm.poweroff_gpu_ticks);
2893
2894         return ret;
2895 }
2896
2897 static DEVICE_ATTR(pm_poweroff, S_IRUGO | S_IWUSR, show_pm_poweroff,
2898                 set_pm_poweroff);
2899
2900 /**
2901  * set_reset_timeout - Store callback for the reset_timeout sysfs file.
2902  * @dev:   The device with sysfs file is for
2903  * @attr:  The attributes of the sysfs file
2904  * @buf:   The value written to the sysfs file
2905  * @count: The number of bytes written to the sysfs file
2906  *
2907  * This function is called when the reset_timeout sysfs file is written to. It
2908  * checks the data written, and if valid updates the reset timeout.
2909  *
2910  * Return: @c count if the function succeeded. An error code on failure.
2911  */
2912 static ssize_t set_reset_timeout(struct device *dev,
2913                 struct device_attribute *attr, const char *buf, size_t count)
2914 {
2915         struct kbase_device *kbdev;
2916         int ret;
2917         int reset_timeout;
2918
2919         kbdev = to_kbase_device(dev);
2920         if (!kbdev)
2921                 return -ENODEV;
2922
2923         ret = kstrtoint(buf, 0, &reset_timeout);
2924         if (ret || reset_timeout <= 0) {
2925                 dev_err(kbdev->dev, "Couldn't process reset_timeout write operation.\n"
2926                                 "Use format <reset_timeout_ms>\n");
2927                 return -EINVAL;
2928         }
2929
2930         kbdev->reset_timeout_ms = reset_timeout;
2931         dev_dbg(kbdev->dev, "Reset timeout: %dms\n", reset_timeout);
2932
2933         return count;
2934 }
2935
2936 /**
2937  * show_reset_timeout - Show callback for the reset_timeout sysfs entry.
2938  * @dev:  The device this sysfs file is for.
2939  * @attr: The attributes of the sysfs file.
2940  * @buf:  The output buffer to receive the GPU information.
2941  *
2942  * This function is called to get the current reset timeout.
2943  *
2944  * Return: The number of bytes output to buf.
2945  */
2946 static ssize_t show_reset_timeout(struct device *dev,
2947                 struct device_attribute *attr, char * const buf)
2948 {
2949         struct kbase_device *kbdev;
2950         ssize_t ret;
2951
2952         kbdev = to_kbase_device(dev);
2953         if (!kbdev)
2954                 return -ENODEV;
2955
2956         ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->reset_timeout_ms);
2957
2958         return ret;
2959 }
2960
2961 static DEVICE_ATTR(reset_timeout, S_IRUGO | S_IWUSR, show_reset_timeout,
2962                 set_reset_timeout);
2963
2964
2965
2966 static ssize_t show_mem_pool_size(struct device *dev,
2967                 struct device_attribute *attr, char * const buf)
2968 {
2969         struct kbase_device *kbdev;
2970         ssize_t ret;
2971
2972         kbdev = to_kbase_device(dev);
2973         if (!kbdev)
2974                 return -ENODEV;
2975
2976         ret = scnprintf(buf, PAGE_SIZE, "%zu\n",
2977                         kbase_mem_pool_size(&kbdev->mem_pool));
2978
2979         return ret;
2980 }
2981
2982 static ssize_t set_mem_pool_size(struct device *dev,
2983                 struct device_attribute *attr, const char *buf, size_t count)
2984 {
2985         struct kbase_device *kbdev;
2986         size_t new_size;
2987         int err;
2988
2989         kbdev = to_kbase_device(dev);
2990         if (!kbdev)
2991                 return -ENODEV;
2992
2993         err = kstrtoul(buf, 0, (unsigned long *)&new_size);
2994         if (err)
2995                 return err;
2996
2997         kbase_mem_pool_trim(&kbdev->mem_pool, new_size);
2998
2999         return count;
3000 }
3001
3002 static DEVICE_ATTR(mem_pool_size, S_IRUGO | S_IWUSR, show_mem_pool_size,
3003                 set_mem_pool_size);
3004
3005 static ssize_t show_mem_pool_max_size(struct device *dev,
3006                 struct device_attribute *attr, char * const buf)
3007 {
3008         struct kbase_device *kbdev;
3009         ssize_t ret;
3010
3011         kbdev = to_kbase_device(dev);
3012         if (!kbdev)
3013                 return -ENODEV;
3014
3015         ret = scnprintf(buf, PAGE_SIZE, "%zu\n",
3016                         kbase_mem_pool_max_size(&kbdev->mem_pool));
3017
3018         return ret;
3019 }
3020
3021 static ssize_t set_mem_pool_max_size(struct device *dev,
3022                 struct device_attribute *attr, const char *buf, size_t count)
3023 {
3024         struct kbase_device *kbdev;
3025         size_t new_max_size;
3026         int err;
3027
3028         kbdev = to_kbase_device(dev);
3029         if (!kbdev)
3030                 return -ENODEV;
3031
3032         err = kstrtoul(buf, 0, (unsigned long *)&new_max_size);
3033         if (err)
3034                 return -EINVAL;
3035
3036         kbase_mem_pool_set_max_size(&kbdev->mem_pool, new_max_size);
3037
3038         return count;
3039 }
3040
3041 static DEVICE_ATTR(mem_pool_max_size, S_IRUGO | S_IWUSR, show_mem_pool_max_size,
3042                 set_mem_pool_max_size);
3043
3044
3045
3046 static int kbasep_secure_mode_init(struct kbase_device *kbdev)
3047 {
3048
3049 #ifdef SECURE_CALLBACKS
3050         kbdev->secure_ops = SECURE_CALLBACKS;
3051         kbdev->secure_mode_support = false;
3052
3053         if (kbdev->secure_ops) {
3054                 int err;
3055
3056                 /* Make sure secure mode is disabled on startup */
3057                 err = kbdev->secure_ops->secure_mode_disable(kbdev);
3058
3059                 /* secure_mode_disable() returns -EINVAL if not supported */
3060                 kbdev->secure_mode_support = (err != -EINVAL);
3061         }
3062 #endif
3063
3064         return 0;
3065 }
3066
3067 #ifdef CONFIG_MALI_NO_MALI
3068 static int kbase_common_reg_map(struct kbase_device *kbdev)
3069 {
3070         return 0;
3071 }
3072 static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
3073 {
3074 }
3075 #else /* CONFIG_MALI_NO_MALI */
3076 static int kbase_common_reg_map(struct kbase_device *kbdev)
3077 {
3078         int err = -ENOMEM;
3079
3080         if (!request_mem_region(kbdev->reg_start, kbdev->reg_size, dev_name(kbdev->dev))) {
3081                 dev_err(kbdev->dev, "Register window unavailable\n");
3082                 err = -EIO;
3083                 goto out_region;
3084         }
3085
3086         kbdev->reg = ioremap(kbdev->reg_start, kbdev->reg_size);
3087         if (!kbdev->reg) {
3088                 dev_err(kbdev->dev, "Can't remap register window\n");
3089                 err = -EINVAL;
3090                 goto out_ioremap;
3091         }
3092
3093         return 0;
3094
3095  out_ioremap:
3096         release_mem_region(kbdev->reg_start, kbdev->reg_size);
3097  out_region:
3098         return err;
3099 }
3100
3101 static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
3102 {
3103         iounmap(kbdev->reg);
3104         release_mem_region(kbdev->reg_start, kbdev->reg_size);
3105 }
3106 #endif /* CONFIG_MALI_NO_MALI */
3107
3108
3109 #ifdef CONFIG_DEBUG_FS
3110
3111 #if KBASE_GPU_RESET_EN
3112 #include <mali_kbase_hwaccess_jm.h>
3113
3114 static void trigger_quirks_reload(struct kbase_device *kbdev)
3115 {
3116         kbase_pm_context_active(kbdev);
3117         if (kbase_prepare_to_reset_gpu(kbdev))
3118                 kbase_reset_gpu(kbdev);
3119         kbase_pm_context_idle(kbdev);
3120 }
3121
3122 #define MAKE_QUIRK_ACCESSORS(type) \
3123 static int type##_quirks_set(void *data, u64 val) \
3124 { \
3125         struct kbase_device *kbdev; \
3126         kbdev = (struct kbase_device *)data; \
3127         kbdev->hw_quirks_##type = (u32)val; \
3128         trigger_quirks_reload(kbdev); \
3129         return 0;\
3130 } \
3131 \
3132 static int type##_quirks_get(void *data, u64 *val) \
3133 { \
3134         struct kbase_device *kbdev;\
3135         kbdev = (struct kbase_device *)data;\
3136         *val = kbdev->hw_quirks_##type;\
3137         return 0;\
3138 } \
3139 DEFINE_SIMPLE_ATTRIBUTE(fops_##type##_quirks, type##_quirks_get,\
3140                 type##_quirks_set, "%llu\n")
3141
3142 MAKE_QUIRK_ACCESSORS(sc);
3143 MAKE_QUIRK_ACCESSORS(tiler);
3144 MAKE_QUIRK_ACCESSORS(mmu);
3145
3146 #endif /* KBASE_GPU_RESET_EN */
3147
3148 static int kbasep_secure_mode_seq_show(struct seq_file *m, void *p)
3149 {
3150         struct kbase_device *kbdev = m->private;
3151
3152         if (!kbdev->secure_mode_support)
3153                 seq_puts(m, "unsupported\n");
3154         else
3155                 seq_printf(m, "%s\n", kbdev->secure_mode ? "Y" : "N");
3156
3157         return 0;
3158 }
3159
3160 static int kbasep_secure_mode_debugfs_open(struct inode *in, struct file *file)
3161 {
3162         return single_open(file, kbasep_secure_mode_seq_show, in->i_private);
3163 }
3164
3165 static const struct file_operations kbasep_secure_mode_debugfs_fops = {
3166         .open = kbasep_secure_mode_debugfs_open,
3167         .read = seq_read,
3168         .llseek = seq_lseek,
3169         .release = single_release,
3170 };
3171
3172 static int kbase_device_debugfs_init(struct kbase_device *kbdev)
3173 {
3174         struct dentry *debugfs_ctx_defaults_directory;
3175         int err;
3176
3177         kbdev->mali_debugfs_directory = debugfs_create_dir(kbdev->devname,
3178                         NULL);
3179         if (!kbdev->mali_debugfs_directory) {
3180                 dev_err(kbdev->dev, "Couldn't create mali debugfs directory\n");
3181                 err = -ENOMEM;
3182                 goto out;
3183         }
3184
3185         kbdev->debugfs_ctx_directory = debugfs_create_dir("ctx",
3186                         kbdev->mali_debugfs_directory);
3187         if (!kbdev->debugfs_ctx_directory) {
3188                 dev_err(kbdev->dev, "Couldn't create mali debugfs ctx directory\n");
3189                 err = -ENOMEM;
3190                 goto out;
3191         }
3192
3193         debugfs_ctx_defaults_directory = debugfs_create_dir("defaults",
3194                         kbdev->debugfs_ctx_directory);
3195         if (!debugfs_ctx_defaults_directory) {
3196                 dev_err(kbdev->dev, "Couldn't create mali debugfs ctx defaults directory\n");
3197                 err = -ENOMEM;
3198                 goto out;
3199         }
3200
3201 #if !MALI_CUSTOMER_RELEASE
3202         kbasep_regs_dump_debugfs_add(kbdev);
3203 #endif /* !MALI_CUSTOMER_RELEASE */
3204
3205         kbase_debug_job_fault_debugfs_init(kbdev);
3206         kbasep_gpu_memory_debugfs_init(kbdev);
3207 #if KBASE_GPU_RESET_EN
3208         debugfs_create_file("quirks_sc", 0644,
3209                         kbdev->mali_debugfs_directory, kbdev,
3210                         &fops_sc_quirks);
3211         debugfs_create_file("quirks_tiler", 0644,
3212                         kbdev->mali_debugfs_directory, kbdev,
3213                         &fops_tiler_quirks);
3214         debugfs_create_file("quirks_mmu", 0644,
3215                         kbdev->mali_debugfs_directory, kbdev,
3216                         &fops_mmu_quirks);
3217 #endif /* KBASE_GPU_RESET_EN */
3218
3219 #ifndef CONFIG_MALI_COH_USER
3220         debugfs_create_bool("infinite_cache", 0644,
3221                         debugfs_ctx_defaults_directory,
3222                         (bool*)&(kbdev->infinite_cache_active_default));
3223 #endif /* CONFIG_MALI_COH_USER */
3224
3225         debugfs_create_size_t("mem_pool_max_size", 0644,
3226                         debugfs_ctx_defaults_directory,
3227                         &kbdev->mem_pool_max_size_default);
3228
3229 #if KBASE_TRACE_ENABLE
3230         kbasep_trace_debugfs_init(kbdev);
3231 #endif /* KBASE_TRACE_ENABLE */
3232
3233 #ifdef CONFIG_MALI_TRACE_TIMELINE
3234         kbasep_trace_timeline_debugfs_init(kbdev);
3235 #endif /* CONFIG_MALI_TRACE_TIMELINE */
3236
3237         debugfs_create_file("secure_mode", S_IRUGO,
3238                         kbdev->mali_debugfs_directory, kbdev,
3239                         &kbasep_secure_mode_debugfs_fops);
3240
3241         return 0;
3242
3243 out:
3244         debugfs_remove_recursive(kbdev->mali_debugfs_directory);
3245         return err;
3246 }
3247
3248 static void kbase_device_debugfs_term(struct kbase_device *kbdev)
3249 {
3250         debugfs_remove_recursive(kbdev->mali_debugfs_directory);
3251 }
3252
3253 #else /* CONFIG_DEBUG_FS */
3254 static inline int kbase_device_debugfs_init(struct kbase_device *kbdev)
3255 {
3256         return 0;
3257 }
3258
3259 static inline void kbase_device_debugfs_term(struct kbase_device *kbdev) { }
3260 #endif /* CONFIG_DEBUG_FS */
3261
3262 static void kbase_device_coherency_init(struct kbase_device *kbdev, u32 gpu_id)
3263 {
3264 #ifdef CONFIG_OF
3265         u32 supported_coherency_bitmap =
3266                 kbdev->gpu_props.props.raw_props.coherency_mode;
3267         const void *coherency_override_dts;
3268         u32 override_coherency;
3269 #endif /* CONFIG_OF */
3270
3271         kbdev->system_coherency = COHERENCY_NONE;
3272
3273         /* device tree may override the coherency */
3274 #ifdef CONFIG_OF
3275         coherency_override_dts = of_get_property(kbdev->dev->of_node,
3276                                                 "system-coherency",
3277                                                 NULL);
3278         if (coherency_override_dts) {
3279
3280                 override_coherency = be32_to_cpup(coherency_override_dts);
3281
3282                 if ((override_coherency <= COHERENCY_NONE) &&
3283                         (supported_coherency_bitmap &
3284                          COHERENCY_FEATURE_BIT(override_coherency))) {
3285
3286                         kbdev->system_coherency = override_coherency;
3287
3288                         dev_info(kbdev->dev,
3289                                 "Using coherency mode %u set from dtb",
3290                                 override_coherency);
3291                 } else
3292                         dev_warn(kbdev->dev,
3293                                 "Ignoring unsupported coherency mode %u set from dtb",
3294                                 override_coherency);
3295         }
3296
3297 #endif /* CONFIG_OF */
3298
3299         kbdev->gpu_props.props.raw_props.coherency_mode =
3300                 kbdev->system_coherency;
3301 }
3302
3303 #ifdef CONFIG_MALI_FPGA_BUS_LOGGER
3304
3305 /* Callback used by the kbase bus logger client, to initiate a GPU reset
3306  * when the bus log is restarted.  GPU reset is used as reference point
3307  * in HW bus log analyses.
3308  */
3309 static void kbase_logging_started_cb(void *data)
3310 {
3311         struct kbase_device *kbdev = (struct kbase_device *)data;
3312
3313         if (kbase_prepare_to_reset_gpu(kbdev))
3314                 kbase_reset_gpu(kbdev);
3315         dev_info(kbdev->dev, "KBASE - Bus logger restarted\n");
3316 }
3317 #endif
3318
3319
3320 static int kbase_common_device_init(struct kbase_device *kbdev)
3321 {
3322         int err;
3323         struct mali_base_gpu_core_props *core_props;
3324         enum {
3325                 inited_mem = (1u << 0),
3326                 inited_js = (1u << 1),
3327                 inited_pm_runtime_init = (1u << 6),
3328 #ifdef CONFIG_MALI_DEVFREQ
3329                 inited_devfreq = (1u << 9),
3330 #endif /* CONFIG_MALI_DEVFREQ */
3331 #ifdef CONFIG_MALI_MIPE_ENABLED
3332                 inited_tlstream = (1u << 10),
3333 #endif /* CONFIG_MALI_MIPE_ENABLED */
3334                 inited_backend_early = (1u << 11),
3335                 inited_backend_late = (1u << 12),
3336                 inited_device = (1u << 13),
3337                 inited_vinstr = (1u << 19),
3338                 inited_ipa = (1u << 20),
3339                 inited_job_fault = (1u << 21)
3340         };
3341
3342         int inited = 0;
3343         u32 gpu_id;
3344 #if defined(CONFIG_MALI_PLATFORM_VEXPRESS)
3345         u32 ve_logic_tile = 0;
3346 #endif /* CONFIG_MALI_PLATFORM_VEXPRESS */
3347
3348         dev_set_drvdata(kbdev->dev, kbdev);
3349
3350         err = kbase_backend_early_init(kbdev);
3351         if (err)
3352                 goto out_partial;
3353         inited |= inited_backend_early;
3354
3355         scnprintf(kbdev->devname, DEVNAME_SIZE, "%s%d", kbase_drv_name,
3356                         kbase_dev_nr);
3357
3358         kbase_disjoint_init(kbdev);
3359
3360         /* obtain min/max configured gpu frequencies */
3361         core_props = &(kbdev->gpu_props.props.core_props);
3362
3363         /* For versatile express platforms, min and max values of GPU frequency
3364          * depend on the type of the logic tile; these values may not be known
3365          * at the build time so in some cases a platform config file with wrong
3366          * GPU freguency values may be included; to ensure the correct value of
3367          * min and max GPU frequency is obtained, the type of the logic tile is
3368          * read from the corresponding register on the platform and frequency
3369          * values assigned accordingly.*/
3370 #if defined(CONFIG_MALI_PLATFORM_VEXPRESS)
3371         ve_logic_tile = kbase_get_platform_logic_tile_type();
3372
3373         switch (ve_logic_tile) {
3374         case 0x217:
3375                 /* Virtex 6, HBI0217 */
3376                 core_props->gpu_freq_khz_min = VE_VIRTEX6_GPU_FREQ_MIN;
3377                 core_props->gpu_freq_khz_max = VE_VIRTEX6_GPU_FREQ_MAX;
3378                 break;
3379         case 0x247:
3380                 /* Virtex 7, HBI0247 */
3381                 core_props->gpu_freq_khz_min = VE_VIRTEX7_GPU_FREQ_MIN;
3382                 core_props->gpu_freq_khz_max = VE_VIRTEX7_GPU_FREQ_MAX;
3383                 break;
3384         default:
3385                 /* all other logic tiles, i.e., Virtex 5 HBI0192
3386                  * or unsuccessful reading from the platform -
3387                  * fall back to the config_platform default */
3388                 core_props->gpu_freq_khz_min = GPU_FREQ_KHZ_MIN;
3389                 core_props->gpu_freq_khz_max = GPU_FREQ_KHZ_MAX;
3390                 break;
3391         }
3392 #else
3393                 core_props->gpu_freq_khz_min = GPU_FREQ_KHZ_MIN;
3394                 core_props->gpu_freq_khz_max = GPU_FREQ_KHZ_MAX;
3395 #endif /* CONFIG_MALI_PLATFORM_VEXPRESS */
3396
3397         kbdev->gpu_props.irq_throttle_time_us = DEFAULT_IRQ_THROTTLE_TIME_US;
3398
3399         err = kbase_device_init(kbdev);
3400         if (err) {
3401                 dev_err(kbdev->dev, "Can't initialize device (%d)\n", err);
3402                 goto out_partial;
3403         }
3404
3405         inited |= inited_device;
3406
3407         if (kbdev->pm.callback_power_runtime_init) {
3408                 err = kbdev->pm.callback_power_runtime_init(kbdev);
3409                 if (err)
3410                         goto out_partial;
3411
3412                 inited |= inited_pm_runtime_init;
3413         }
3414
3415         err = kbase_mem_init(kbdev);
3416         if (err)
3417                 goto out_partial;
3418
3419         inited |= inited_mem;
3420
3421         gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
3422         gpu_id &= GPU_ID_VERSION_PRODUCT_ID;
3423         gpu_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
3424
3425         kbase_device_coherency_init(kbdev, gpu_id);
3426
3427         err = kbasep_secure_mode_init(kbdev);
3428         if (err)
3429                 goto out_partial;
3430
3431         err = kbasep_js_devdata_init(kbdev);
3432         if (err)
3433                 goto out_partial;
3434
3435         inited |= inited_js;
3436
3437 #ifdef CONFIG_MALI_MIPE_ENABLED
3438         err = kbase_tlstream_init();
3439         if (err) {
3440                 dev_err(kbdev->dev, "Couldn't initialize timeline stream\n");
3441                 goto out_partial;
3442         }
3443         inited |= inited_tlstream;
3444 #endif /* CONFIG_MALI_MIPE_ENABLED */
3445
3446         err = kbase_backend_late_init(kbdev);
3447         if (err)
3448                 goto out_partial;
3449         inited |= inited_backend_late;
3450
3451 #ifdef CONFIG_MALI_DEVFREQ
3452         err = kbase_devfreq_init(kbdev);
3453         if (err) {
3454                 dev_err(kbdev->dev, "Couldn't initialize devfreq\n");
3455                 goto out_partial;
3456         }
3457         inited |= inited_devfreq;
3458 #endif /* CONFIG_MALI_DEVFREQ */
3459
3460         kbdev->vinstr_ctx = kbase_vinstr_init(kbdev);
3461         if (!kbdev->vinstr_ctx) {
3462                 dev_err(kbdev->dev, "Can't initialize virtual instrumentation core\n");
3463                 goto out_partial;
3464         }
3465
3466         inited |= inited_vinstr;
3467
3468         kbdev->ipa_ctx = kbase_ipa_init(kbdev);
3469         if (!kbdev->ipa_ctx) {
3470                 dev_err(kbdev->dev, "Can't initialize IPA\n");
3471                 goto out_partial;
3472         }
3473
3474         inited |= inited_ipa;
3475
3476         err = kbase_debug_job_fault_dev_init(kbdev);
3477         if (err)
3478                 goto out_partial;
3479
3480         inited |= inited_job_fault;
3481
3482         err = kbase_device_debugfs_init(kbdev);
3483         if (err)
3484                 goto out_partial;
3485
3486         /* intialise the kctx list */
3487         mutex_init(&kbdev->kctx_list_lock);
3488         INIT_LIST_HEAD(&kbdev->kctx_list);
3489
3490         kbdev->mdev.minor = MISC_DYNAMIC_MINOR;
3491         kbdev->mdev.name = kbdev->devname;
3492         kbdev->mdev.fops = &kbase_fops;
3493         kbdev->mdev.parent = get_device(kbdev->dev);
3494
3495         err = misc_register(&kbdev->mdev);
3496         if (err) {
3497                 dev_err(kbdev->dev, "Couldn't register misc dev %s\n", kbdev->devname);
3498                 goto out_misc;
3499         }
3500
3501         {
3502                 const struct list_head *dev_list = kbase_dev_list_get();
3503
3504                 list_add(&kbdev->entry, &kbase_dev_list);
3505                 kbase_dev_list_put(dev_list);
3506         }
3507
3508         dev_info(kbdev->dev, "Probed as %s\n", dev_name(kbdev->mdev.this_device));
3509
3510         kbase_dev_nr++;
3511
3512         return 0;
3513
3514 out_misc:
3515         put_device(kbdev->dev);
3516         kbase_device_debugfs_term(kbdev);
3517 out_partial:
3518         if (inited & inited_job_fault)
3519                 kbase_debug_job_fault_dev_term(kbdev);
3520         if (inited & inited_ipa)
3521                 kbase_ipa_term(kbdev->ipa_ctx);
3522         if (inited & inited_vinstr)
3523                 kbase_vinstr_term(kbdev->vinstr_ctx);
3524 #ifdef CONFIG_MALI_DEVFREQ
3525         if (inited & inited_devfreq)
3526                 kbase_devfreq_term(kbdev);
3527 #endif /* CONFIG_MALI_DEVFREQ */
3528         if (inited & inited_backend_late)
3529                 kbase_backend_late_term(kbdev);
3530 #ifdef CONFIG_MALI_MIPE_ENABLED
3531         if (inited & inited_tlstream)
3532                 kbase_tlstream_term();
3533 #endif /* CONFIG_MALI_MIPE_ENABLED */
3534
3535         if (inited & inited_js)
3536                 kbasep_js_devdata_halt(kbdev);
3537
3538         if (inited & inited_mem)
3539                 kbase_mem_halt(kbdev);
3540
3541         if (inited & inited_js)
3542                 kbasep_js_devdata_term(kbdev);
3543
3544         if (inited & inited_mem)
3545                 kbase_mem_term(kbdev);
3546
3547         if (inited & inited_pm_runtime_init) {
3548                 if (kbdev->pm.callback_power_runtime_term)
3549                         kbdev->pm.callback_power_runtime_term(kbdev);
3550         }
3551
3552         if (inited & inited_device)
3553                 kbase_device_term(kbdev);
3554
3555         if (inited & inited_backend_early)
3556                 kbase_backend_early_term(kbdev);
3557
3558         return err;
3559 }
3560
3561
3562 static struct attribute *kbase_attrs[] = {
3563 #ifdef CONFIG_MALI_DEBUG
3564         &dev_attr_debug_command.attr,
3565         &dev_attr_js_softstop_always.attr,
3566 #endif
3567 #if !MALI_CUSTOMER_RELEASE
3568         &dev_attr_force_replay.attr,
3569 #endif
3570         &dev_attr_js_timeouts.attr,
3571         &dev_attr_gpuinfo.attr,
3572         &dev_attr_dvfs_period.attr,
3573         &dev_attr_pm_poweroff.attr,
3574         &dev_attr_reset_timeout.attr,
3575         &dev_attr_js_scheduling_period.attr,
3576         &dev_attr_power_policy.attr,
3577         &dev_attr_core_availability_policy.attr,
3578         &dev_attr_core_mask.attr,
3579         &dev_attr_mem_pool_size.attr,
3580         &dev_attr_mem_pool_max_size.attr,
3581         NULL
3582 };
3583
3584 static const struct attribute_group kbase_attr_group = {
3585         .attrs = kbase_attrs,
3586 };
3587
3588 static int kbase_common_device_remove(struct kbase_device *kbdev);
3589
3590 static int kbase_platform_device_probe(struct platform_device *pdev)
3591 {
3592         struct kbase_device *kbdev;
3593         struct resource *reg_res;
3594         int err = 0;
3595         int i;
3596
3597 #ifdef CONFIG_OF
3598         err = kbase_platform_early_init();
3599         if (err) {
3600                 dev_err(&pdev->dev, "Early platform initialization failed\n");
3601                 return err;
3602         }
3603 #endif
3604
3605         kbdev = kbase_device_alloc();
3606         if (!kbdev) {
3607                 dev_err(&pdev->dev, "Can't allocate device\n");
3608                 err = -ENOMEM;
3609                 goto out;
3610         }
3611 #ifdef CONFIG_MALI_NO_MALI
3612         err = gpu_device_create(kbdev);
3613         if (err) {
3614                 dev_err(&pdev->dev, "Can't initialize dummy model\n");
3615                 goto out_midg;
3616         }
3617 #endif /* CONFIG_MALI_NO_MALI */
3618
3619         kbdev->dev = &pdev->dev;
3620         /* 3 IRQ resources */
3621         for (i = 0; i < 3; i++) {
3622                 struct resource *irq_res;
3623                 int irqtag;
3624
3625                 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
3626                 if (!irq_res) {
3627                         dev_err(kbdev->dev, "No IRQ resource at index %d\n", i);
3628                         err = -ENOENT;
3629                         goto out_platform_irq;
3630                 }
3631
3632 #ifdef CONFIG_OF
3633                 if (!strcmp(irq_res->name, "JOB")) {
3634                         irqtag = JOB_IRQ_TAG;
3635                 } else if (!strcmp(irq_res->name, "MMU")) {
3636                         irqtag = MMU_IRQ_TAG;
3637                 } else if (!strcmp(irq_res->name, "GPU")) {
3638                         irqtag = GPU_IRQ_TAG;
3639                 } else {
3640                         dev_err(&pdev->dev, "Invalid irq res name: '%s'\n",
3641                                 irq_res->name);
3642                         err = -EINVAL;
3643                         goto out_irq_name;
3644                 }
3645 #else
3646                 irqtag = i;
3647 #endif /* CONFIG_OF */
3648                 kbdev->irqs[irqtag].irq = irq_res->start;
3649                 kbdev->irqs[irqtag].flags = (irq_res->flags & IRQF_TRIGGER_MASK);
3650         }
3651                 /* the first memory resource is the physical address of the GPU
3652                  * registers */
3653                 reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3654                 if (!reg_res) {
3655                         dev_err(kbdev->dev, "Invalid register resource\n");
3656                         err = -ENOENT;
3657                         goto out_platform_mem;
3658                 }
3659
3660                 kbdev->reg_start = reg_res->start;
3661                 kbdev->reg_size = resource_size(reg_res);
3662
3663                 err = kbase_common_reg_map(kbdev);
3664                 if (err)
3665                         goto out_reg_map;
3666
3667 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \
3668                         && defined(CONFIG_REGULATOR)
3669         kbdev->regulator = regulator_get_optional(kbdev->dev, "mali");
3670         if (IS_ERR_OR_NULL(kbdev->regulator)) {
3671                 err = PTR_ERR(kbdev->regulator);
3672
3673                 kbdev->regulator = NULL;
3674                 if (err == -EPROBE_DEFER)
3675                         goto out_regulator;
3676                 dev_info(kbdev->dev, "Continuing without Mali regulator control\n");
3677                 /* Allow probe to continue without regulator */
3678         }
3679 #endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
3680
3681 #ifdef CONFIG_MALI_PLATFORM_DEVICETREE
3682         pm_runtime_enable(kbdev->dev);
3683 #endif
3684
3685         kbdev->clock = clk_get(kbdev->dev, "clk_mali");
3686         if (IS_ERR_OR_NULL(kbdev->clock)) {
3687                 err = PTR_ERR(kbdev->clock);
3688
3689                 kbdev->clock = NULL;
3690                 if (err == -EPROBE_DEFER)
3691                         goto out_clock_prepare;
3692                 dev_info(kbdev->dev, "Continuing without Mali clock control\n");
3693                 /* Allow probe to continue without clock. */
3694         } else {
3695                 err = clk_prepare_enable(kbdev->clock);
3696                 if (err) {
3697                         dev_err(kbdev->dev,
3698                                 "Failed to prepare and enable clock (%d)\n", err);
3699                         goto out_clock_prepare;
3700                 }
3701         }
3702
3703 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) && defined(CONFIG_OF) \
3704                         && defined(CONFIG_PM_OPP)
3705         /* Register the OPPs if they are available in device tree */
3706         if (dev_pm_opp_of_add_table(kbdev->dev) < 0)
3707                 dev_dbg(kbdev->dev, "OPP table not found\n");
3708 #endif
3709
3710
3711         err = kbase_common_device_init(kbdev);
3712         if (err) {
3713                 dev_err(kbdev->dev, "Failed kbase_common_device_init\n");
3714                 goto out_common_init;
3715         }
3716
3717         err = sysfs_create_group(&kbdev->dev->kobj, &kbase_attr_group);
3718         if (err) {
3719                 dev_err(&pdev->dev, "Failed to create sysfs entries\n");
3720                 goto out_sysfs;
3721         }
3722
3723 #ifdef CONFIG_MALI_FPGA_BUS_LOGGER
3724         err = bl_core_client_register(kbdev->devname,
3725                                                 kbase_logging_started_cb,
3726                                                 kbdev, &kbdev->buslogger,
3727                                                 THIS_MODULE, NULL);
3728         if (err) {
3729                 dev_err(kbdev->dev, "Couldn't register bus log client\n");
3730                 goto out_bl_core_register;
3731         }
3732
3733         bl_core_set_threshold(kbdev->buslogger, 1024*1024*1024);
3734 #endif
3735         return 0;
3736
3737 #ifdef CONFIG_MALI_FPGA_BUS_LOGGER
3738 out_bl_core_register:
3739         sysfs_remove_group(&kbdev->dev->kobj, &kbase_attr_group);
3740 #endif
3741
3742 out_sysfs:
3743         kbase_common_device_remove(kbdev);
3744 out_common_init:
3745 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
3746         dev_pm_opp_of_remove_table(kbdev->dev);
3747 #endif
3748         clk_disable_unprepare(kbdev->clock);
3749 out_clock_prepare:
3750         clk_put(kbdev->clock);
3751 #ifdef CONFIG_MALI_PLATFORM_DEVICETREE
3752         pm_runtime_disable(kbdev->dev);
3753 #endif
3754 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \
3755                         && defined(CONFIG_REGULATOR)
3756 out_regulator:
3757         regulator_put(kbdev->regulator);
3758 #endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
3759                 kbase_common_reg_unmap(kbdev);
3760 out_reg_map:
3761 out_platform_mem:
3762 #ifdef CONFIG_OF
3763 out_irq_name:
3764 #endif
3765 out_platform_irq:
3766 #ifdef CONFIG_MALI_NO_MALI
3767         gpu_device_destroy(kbdev);
3768 out_midg:
3769 #endif /* CONFIG_MALI_NO_MALI */
3770         kbase_device_free(kbdev);
3771 out:
3772         return err;
3773 }
3774
3775 static int kbase_common_device_remove(struct kbase_device *kbdev)
3776 {
3777         kbase_debug_job_fault_dev_term(kbdev);
3778         kbase_ipa_term(kbdev->ipa_ctx);
3779         kbase_vinstr_term(kbdev->vinstr_ctx);
3780         sysfs_remove_group(&kbdev->dev->kobj, &kbase_attr_group);
3781
3782 #ifdef CONFIG_MALI_FPGA_BUS_LOGGER
3783         if (kbdev->buslogger)
3784                 bl_core_client_unregister(kbdev->buslogger);
3785 #endif
3786
3787 #ifdef CONFIG_DEBUG_FS
3788         debugfs_remove_recursive(kbdev->mali_debugfs_directory);
3789 #endif
3790 #ifdef CONFIG_MALI_DEVFREQ
3791         kbase_devfreq_term(kbdev);
3792 #endif
3793
3794         kbase_backend_late_term(kbdev);
3795
3796         if (kbdev->pm.callback_power_runtime_term)
3797                 kbdev->pm.callback_power_runtime_term(kbdev);
3798 #ifdef CONFIG_MALI_PLATFORM_DEVICETREE
3799         pm_runtime_disable(kbdev->dev);
3800 #endif
3801
3802 #ifdef CONFIG_MALI_MIPE_ENABLED
3803         kbase_tlstream_term();
3804 #endif /* CONFIG_MALI_MIPE_ENABLED */
3805
3806         kbasep_js_devdata_halt(kbdev);
3807         kbase_mem_halt(kbdev);
3808
3809         kbasep_js_devdata_term(kbdev);
3810         kbase_mem_term(kbdev);
3811         kbase_backend_early_term(kbdev);
3812
3813         {
3814                 const struct list_head *dev_list = kbase_dev_list_get();
3815
3816                 list_del(&kbdev->entry);
3817                 kbase_dev_list_put(dev_list);
3818         }
3819         misc_deregister(&kbdev->mdev);
3820         put_device(kbdev->dev);
3821                 kbase_common_reg_unmap(kbdev);
3822         kbase_device_term(kbdev);
3823         if (kbdev->clock) {
3824                 clk_disable_unprepare(kbdev->clock);
3825                 clk_put(kbdev->clock);
3826                 kbdev->clock = NULL;
3827         }
3828 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \
3829                         && defined(CONFIG_REGULATOR)
3830         regulator_put(kbdev->regulator);
3831 #endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
3832 #ifdef CONFIG_MALI_NO_MALI
3833         gpu_device_destroy(kbdev);
3834 #endif /* CONFIG_MALI_NO_MALI */
3835         kbase_device_free(kbdev);
3836
3837         return 0;
3838 }
3839
3840 static int kbase_platform_device_remove(struct platform_device *pdev)
3841 {
3842         struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
3843
3844         if (!kbdev)
3845                 return -ENODEV;
3846
3847         return kbase_common_device_remove(kbdev);
3848 }
3849
3850 /** Suspend callback from the OS.
3851  *
3852  * This is called by Linux when the device should suspend.
3853  *
3854  * @param dev  The device to suspend
3855  *
3856  * @return A standard Linux error code
3857  */
3858 static int kbase_device_suspend(struct device *dev)
3859 {
3860         struct kbase_device *kbdev = to_kbase_device(dev);
3861
3862         if (!kbdev)
3863                 return -ENODEV;
3864
3865 #if defined(CONFIG_PM_DEVFREQ) && \
3866                 (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
3867         devfreq_suspend_device(kbdev->devfreq);
3868 #endif
3869
3870         kbase_pm_suspend(kbdev);
3871         return 0;
3872 }
3873
3874 /** Resume callback from the OS.
3875  *
3876  * This is called by Linux when the device should resume from suspension.
3877  *
3878  * @param dev  The device to resume
3879  *
3880  * @return A standard Linux error code
3881  */
3882 static int kbase_device_resume(struct device *dev)
3883 {
3884         struct kbase_device *kbdev = to_kbase_device(dev);
3885
3886         if (!kbdev)
3887                 return -ENODEV;
3888
3889         kbase_pm_resume(kbdev);
3890
3891 #if defined(CONFIG_PM_DEVFREQ) && \
3892                 (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
3893         devfreq_resume_device(kbdev->devfreq);
3894 #endif
3895         return 0;
3896 }
3897
3898 /** Runtime suspend callback from the OS.
3899  *
3900  * This is called by Linux when the device should prepare for a condition in which it will
3901  * not be able to communicate with the CPU(s) and RAM due to power management.
3902  *
3903  * @param dev  The device to suspend
3904  *
3905  * @return A standard Linux error code
3906  */
3907 #ifdef KBASE_PM_RUNTIME
3908 static int kbase_device_runtime_suspend(struct device *dev)
3909 {
3910         struct kbase_device *kbdev = to_kbase_device(dev);
3911
3912         if (!kbdev)
3913                 return -ENODEV;
3914
3915 #if defined(CONFIG_PM_DEVFREQ) && \
3916                 (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
3917         devfreq_suspend_device(kbdev->devfreq);
3918 #endif
3919
3920         if (kbdev->pm.backend.callback_power_runtime_off) {
3921                 kbdev->pm.backend.callback_power_runtime_off(kbdev);
3922                 dev_dbg(dev, "runtime suspend\n");
3923         }
3924         return 0;
3925 }
3926 #endif /* KBASE_PM_RUNTIME */
3927
3928 /** Runtime resume callback from the OS.
3929  *
3930  * This is called by Linux when the device should go into a fully active state.
3931  *
3932  * @param dev  The device to suspend
3933  *
3934  * @return A standard Linux error code
3935  */
3936
3937 #ifdef KBASE_PM_RUNTIME
3938 static int kbase_device_runtime_resume(struct device *dev)
3939 {
3940         int ret = 0;
3941         struct kbase_device *kbdev = to_kbase_device(dev);
3942
3943         if (!kbdev)
3944                 return -ENODEV;
3945
3946         if (kbdev->pm.backend.callback_power_runtime_on) {
3947                 ret = kbdev->pm.backend.callback_power_runtime_on(kbdev);
3948                 dev_dbg(dev, "runtime resume\n");
3949         }
3950
3951 #if defined(CONFIG_PM_DEVFREQ) && \
3952                 (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
3953         devfreq_resume_device(kbdev->devfreq);
3954 #endif
3955
3956         return ret;
3957 }
3958 #endif /* KBASE_PM_RUNTIME */
3959
3960
3961 #ifdef KBASE_PM_RUNTIME
3962 /**
3963  * kbase_device_runtime_idle - Runtime idle callback from the OS.
3964  * @dev: The device to suspend
3965  *
3966  * This is called by Linux when the device appears to be inactive and it might
3967  * be placed into a low power state.
3968  *
3969  * Return: 0 if device can be suspended, non-zero to avoid runtime autosuspend,
3970  * otherwise a standard Linux error code
3971  */
3972 static int kbase_device_runtime_idle(struct device *dev)
3973 {
3974         struct kbase_device *kbdev = to_kbase_device(dev);
3975
3976         if (!kbdev)
3977                 return -ENODEV;
3978
3979         /* Use platform specific implementation if it exists. */
3980         if (kbdev->pm.backend.callback_power_runtime_idle)
3981                 return kbdev->pm.backend.callback_power_runtime_idle(kbdev);
3982
3983         return 0;
3984 }
3985 #endif /* KBASE_PM_RUNTIME */
3986
3987 /** The power management operations for the platform driver.
3988  */
3989 static const struct dev_pm_ops kbase_pm_ops = {
3990         .suspend = kbase_device_suspend,
3991         .resume = kbase_device_resume,
3992 #ifdef KBASE_PM_RUNTIME
3993         .runtime_suspend = kbase_device_runtime_suspend,
3994         .runtime_resume = kbase_device_runtime_resume,
3995         .runtime_idle = kbase_device_runtime_idle,
3996 #endif /* KBASE_PM_RUNTIME */
3997 };
3998
3999 #ifdef CONFIG_OF
4000 static const struct of_device_id kbase_dt_ids[] = {
4001         { .compatible = "arm,malit7xx" },
4002         { .compatible = "arm,mali-midgard" },
4003         { /* sentinel */ }
4004 };
4005 MODULE_DEVICE_TABLE(of, kbase_dt_ids);
4006 #endif
4007
4008 static struct platform_driver kbase_platform_driver = {
4009         .probe = kbase_platform_device_probe,
4010         .remove = kbase_platform_device_remove,
4011         .driver = {
4012                    .name = kbase_drv_name,
4013                    .owner = THIS_MODULE,
4014                    .pm = &kbase_pm_ops,
4015                    .of_match_table = of_match_ptr(kbase_dt_ids),
4016         },
4017 };
4018
4019 /*
4020  * The driver will not provide a shortcut to create the Mali platform device
4021  * anymore when using Device Tree.
4022  */
4023 #ifdef CONFIG_OF
4024 module_platform_driver(kbase_platform_driver);
4025 #else
4026
4027 static int __init rockchip_gpu_init_driver(void)
4028 {
4029         return platform_driver_register(&kbase_platform_driver);
4030 }
4031 late_initcall(rockchip_gpu_init_driver);
4032
4033 static int __init kbase_driver_init(void)
4034 {
4035         int ret;
4036
4037         ret = kbase_platform_early_init();
4038         if (ret)
4039                 return ret;
4040
4041 #ifndef CONFIG_MACH_MANTA
4042 #ifdef CONFIG_MALI_PLATFORM_FAKE
4043         ret = kbase_platform_fake_register();
4044         if (ret)
4045                 return ret;
4046 #endif
4047 #endif
4048         ret = platform_driver_register(&kbase_platform_driver);
4049 #ifndef CONFIG_MACH_MANTA
4050 #ifdef CONFIG_MALI_PLATFORM_FAKE
4051         if (ret)
4052                 kbase_platform_fake_unregister();
4053 #endif
4054 #endif
4055         return ret;
4056 }
4057
4058 static void __exit kbase_driver_exit(void)
4059 {
4060         platform_driver_unregister(&kbase_platform_driver);
4061 #ifndef CONFIG_MACH_MANTA
4062 #ifdef CONFIG_MALI_PLATFORM_FAKE
4063         kbase_platform_fake_unregister();
4064 #endif
4065 #endif
4066 }
4067
4068 module_init(kbase_driver_init);
4069 module_exit(kbase_driver_exit);
4070
4071 #endif /* CONFIG_OF */
4072
4073 MODULE_LICENSE("GPL");
4074 MODULE_VERSION(MALI_RELEASE_NAME " (UK version " \
4075                 __stringify(BASE_UK_VERSION_MAJOR) "." \
4076                 __stringify(BASE_UK_VERSION_MINOR) ")");
4077
4078 #if defined(CONFIG_MALI_GATOR_SUPPORT) || defined(CONFIG_MALI_SYSTEM_TRACE)
4079 #define CREATE_TRACE_POINTS
4080 #endif
4081
4082 #ifdef CONFIG_MALI_GATOR_SUPPORT
4083 /* Create the trace points (otherwise we just get code to call a tracepoint) */
4084 #include "mali_linux_trace.h"
4085
4086 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_job_slots_event);
4087 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_status);
4088 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_power_on);
4089 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_power_off);
4090 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_page_fault_insert_pages);
4091 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_mmu_as_in_use);
4092 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_mmu_as_released);
4093 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_total_alloc_pages_change);
4094 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_sw_counter);
4095
4096 void kbase_trace_mali_pm_status(u32 event, u64 value)
4097 {
4098         trace_mali_pm_status(event, value);
4099 }
4100
4101 void kbase_trace_mali_pm_power_off(u32 event, u64 value)
4102 {
4103         trace_mali_pm_power_off(event, value);
4104 }
4105
4106 void kbase_trace_mali_pm_power_on(u32 event, u64 value)
4107 {
4108         trace_mali_pm_power_on(event, value);
4109 }
4110
4111 void kbase_trace_mali_job_slots_event(u32 event, const struct kbase_context *kctx, u8 atom_id)
4112 {
4113         trace_mali_job_slots_event(event, (kctx != NULL ? kctx->tgid : 0), (kctx != NULL ? kctx->pid : 0), atom_id);
4114 }
4115
4116 void kbase_trace_mali_page_fault_insert_pages(int event, u32 value)
4117 {
4118         trace_mali_page_fault_insert_pages(event, value);
4119 }
4120
4121 void kbase_trace_mali_mmu_as_in_use(int event)
4122 {
4123         trace_mali_mmu_as_in_use(event);
4124 }
4125
4126 void kbase_trace_mali_mmu_as_released(int event)
4127 {
4128         trace_mali_mmu_as_released(event);
4129 }
4130
4131 void kbase_trace_mali_total_alloc_pages_change(long long int event)
4132 {
4133         trace_mali_total_alloc_pages_change(event);
4134 }
4135 #endif /* CONFIG_MALI_GATOR_SUPPORT */
4136 #ifdef CONFIG_MALI_SYSTEM_TRACE
4137 #include "mali_linux_kbase_trace.h"
4138 #endif