ee59504cd4e87aaa798b6404510a2239d0144214
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / midgard / mali_kbase_core_linux.c
1 /*
2  *
3  * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * A copy of the licence is included with the program, and can also be obtained
11  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12  * Boston, MA  02110-1301, USA.
13  *
14  */
15
16 #define ENABLE_DEBUG_LOG
17 #include "platform/rk/custom_log.h"
18
19 #include <mali_kbase.h>
20 #include <mali_kbase_config_defaults.h>
21 #include <mali_kbase_uku.h>
22 #include <mali_midg_regmap.h>
23 #include <mali_kbase_gator.h>
24 #include <mali_kbase_mem_linux.h>
25 #ifdef CONFIG_MALI_DEVFREQ
26 #include <backend/gpu/mali_kbase_devfreq.h>
27 #endif /* CONFIG_MALI_DEVFREQ */
28 #ifdef CONFIG_MALI_NO_MALI
29 #include "mali_kbase_model_linux.h"
30 #endif /* CONFIG_MALI_NO_MALI */
31 #include "mali_kbase_mem_profile_debugfs_buf_size.h"
32 #include "mali_kbase_debug_mem_view.h"
33 #include "mali_kbase_mem.h"
34 #include "mali_kbase_mem_pool_debugfs.h"
35 #if !MALI_CUSTOMER_RELEASE
36 #include "mali_kbase_regs_dump_debugfs.h"
37 #endif /* !MALI_CUSTOMER_RELEASE */
38 #include <mali_kbase_hwaccess_backend.h>
39 #include <mali_kbase_hwaccess_jm.h>
40 #include <backend/gpu/mali_kbase_device_internal.h>
41
42 #ifdef CONFIG_KDS
43 #include <linux/kds.h>
44 #include <linux/anon_inodes.h>
45 #include <linux/syscalls.h>
46 #endif /* CONFIG_KDS */
47
48 #include <linux/pm_runtime.h>
49 #include <linux/module.h>
50 #include <linux/init.h>
51 #include <linux/poll.h>
52 #include <linux/kernel.h>
53 #include <linux/errno.h>
54 #include <linux/of.h>
55 #include <linux/platform_device.h>
56 #include <linux/miscdevice.h>
57 #include <linux/list.h>
58 #include <linux/semaphore.h>
59 #include <linux/fs.h>
60 #include <linux/uaccess.h>
61 #include <linux/interrupt.h>
62 #include <linux/mm.h>
63 #include <linux/compat.h>       /* is_compat_task */
64 #include <linux/mman.h>
65 #include <linux/version.h>
66 #ifdef CONFIG_MALI_PLATFORM_DEVICETREE
67 #include <linux/pm_runtime.h>
68 #endif /* CONFIG_MALI_PLATFORM_DEVICETREE */
69 #include <mali_kbase_hw.h>
70 #include <platform/mali_kbase_platform_common.h>
71 #ifdef CONFIG_MALI_PLATFORM_FAKE
72 #include <platform/mali_kbase_platform_fake.h>
73 #endif /*CONFIG_MALI_PLATFORM_FAKE */
74 #ifdef CONFIG_SYNC
75 #include <mali_kbase_sync.h>
76 #endif /* CONFIG_SYNC */
77 #ifdef CONFIG_PM_DEVFREQ
78 #include <linux/devfreq.h>
79 #endif /* CONFIG_PM_DEVFREQ */
80 #include <linux/clk.h>
81 #include <linux/delay.h>
82
83 #include <mali_kbase_config.h>
84
85
86 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
87 #include <linux/pm_opp.h>
88 #else
89 #include <linux/opp.h>
90 #endif
91
92 #include <mali_kbase_tlstream.h>
93
94 #include <mali_kbase_as_fault_debugfs.h>
95
96 /* GPU IRQ Tags */
97 #define JOB_IRQ_TAG     0
98 #define MMU_IRQ_TAG     1
99 #define GPU_IRQ_TAG     2
100
101 #if MALI_UNIT_TEST
102 static struct kbase_exported_test_data shared_kernel_test_data;
103 EXPORT_SYMBOL(shared_kernel_test_data);
104 #endif /* MALI_UNIT_TEST */
105
106 /** rk_ext : version of rk_ext on mali_ko, aka. rk_ko_ver. */
107 #define ROCKCHIP_VERSION    (13)
108
109 static int kbase_dev_nr;
110
111 static DEFINE_MUTEX(kbase_dev_list_lock);
112 static LIST_HEAD(kbase_dev_list);
113
114 #define KERNEL_SIDE_DDK_VERSION_STRING "K:" MALI_RELEASE_NAME "(GPL)"
115 static inline void __compile_time_asserts(void)
116 {
117         CSTD_COMPILE_TIME_ASSERT(sizeof(KERNEL_SIDE_DDK_VERSION_STRING) <= KBASE_GET_VERSION_BUFFER_SIZE);
118 }
119
120 #ifdef CONFIG_KDS
121
122 struct kbasep_kds_resource_set_file_data {
123         struct kds_resource_set *lock;
124 };
125
126 static int kds_resource_release(struct inode *inode, struct file *file);
127
128 static const struct file_operations kds_resource_fops = {
129         .release = kds_resource_release
130 };
131
132 struct kbase_kds_resource_list_data {
133         struct kds_resource **kds_resources;
134         unsigned long *kds_access_bitmap;
135         int num_elems;
136 };
137
138 static int kds_resource_release(struct inode *inode, struct file *file)
139 {
140         struct kbasep_kds_resource_set_file_data *data;
141
142         data = (struct kbasep_kds_resource_set_file_data *)file->private_data;
143         if (NULL != data) {
144                 if (NULL != data->lock)
145                         kds_resource_set_release(&data->lock);
146
147                 kfree(data);
148         }
149         return 0;
150 }
151 #endif /* CONFIG_KDS */
152
153 static void kbase_create_timeline_objects(struct kbase_context *kctx)
154 {
155         struct kbase_device             *kbdev = kctx->kbdev;
156         unsigned int                    lpu_id;
157         unsigned int                    as_nr;
158         struct kbasep_kctx_list_element *element;
159
160         /* Create LPU objects. */
161         for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
162                 u32 *lpu =
163                         &kbdev->gpu_props.props.raw_props.js_features[lpu_id];
164                 kbase_tlstream_tl_summary_new_lpu(lpu, lpu_id, *lpu);
165         }
166
167         /* Create Address Space objects. */
168         for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
169                 kbase_tlstream_tl_summary_new_as(&kbdev->as[as_nr], as_nr);
170
171         /* Create GPU object and make it retain all LPUs and address spaces. */
172         kbase_tlstream_tl_summary_new_gpu(
173                         kbdev,
174                         kbdev->gpu_props.props.raw_props.gpu_id,
175                         kbdev->gpu_props.num_cores);
176
177         for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
178                 void *lpu =
179                         &kbdev->gpu_props.props.raw_props.js_features[lpu_id];
180                 kbase_tlstream_tl_summary_lifelink_lpu_gpu(lpu, kbdev);
181         }
182         for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
183                 kbase_tlstream_tl_summary_lifelink_as_gpu(
184                                 &kbdev->as[as_nr],
185                                 kbdev);
186
187         /* Create object for each known context. */
188         mutex_lock(&kbdev->kctx_list_lock);
189         list_for_each_entry(element, &kbdev->kctx_list, link) {
190                 kbase_tlstream_tl_summary_new_ctx(
191                                 element->kctx,
192                                 (u32)(element->kctx->id),
193                                 (u32)(element->kctx->tgid));
194         }
195         /* Before releasing the lock, reset body stream buffers.
196          * This will prevent context creation message to be directed to both
197          * summary and body stream. */
198         kbase_tlstream_reset_body_streams();
199         mutex_unlock(&kbdev->kctx_list_lock);
200         /* Static object are placed into summary packet that needs to be
201          * transmitted first. Flush all streams to make it available to
202          * user space. */
203         kbase_tlstream_flush_streams();
204 }
205
206 static void kbase_api_handshake(struct uku_version_check_args *version)
207 {
208         switch (version->major) {
209 #ifdef BASE_LEGACY_UK6_SUPPORT
210         case 6:
211                 /* We are backwards compatible with version 6,
212                  * so pretend to be the old version */
213                 version->major = 6;
214                 version->minor = 1;
215                 break;
216 #endif /* BASE_LEGACY_UK6_SUPPORT */
217 #ifdef BASE_LEGACY_UK7_SUPPORT
218         case 7:
219                 /* We are backwards compatible with version 7,
220                  * so pretend to be the old version */
221                 version->major = 7;
222                 version->minor = 1;
223                 break;
224 #endif /* BASE_LEGACY_UK7_SUPPORT */
225 #ifdef BASE_LEGACY_UK8_SUPPORT
226         case 8:
227                 /* We are backwards compatible with version 8,
228                  * so pretend to be the old version */
229                 version->major = 8;
230                 version->minor = 4;
231                 break;
232 #endif /* BASE_LEGACY_UK8_SUPPORT */
233 #ifdef BASE_LEGACY_UK9_SUPPORT
234         case 9:
235                 /* We are backwards compatible with version 9,
236                  * so pretend to be the old version */
237                 version->major = 9;
238                 version->minor = 0;
239                 break;
240 #endif /* BASE_LEGACY_UK8_SUPPORT */
241         case BASE_UK_VERSION_MAJOR:
242                 /* set minor to be the lowest common */
243                 version->minor = min_t(int, BASE_UK_VERSION_MINOR,
244                                 (int)version->minor);
245                 break;
246         default:
247                 /* We return our actual version regardless if it
248                  * matches the version returned by userspace -
249                  * userspace can bail if it can't handle this
250                  * version */
251                 version->major = BASE_UK_VERSION_MAJOR;
252                 version->minor = BASE_UK_VERSION_MINOR;
253                 break;
254         }
255 }
256
257 /**
258  * enum mali_error - Mali error codes shared with userspace
259  *
260  * This is subset of those common Mali errors that can be returned to userspace.
261  * Values of matching user and kernel space enumerators MUST be the same.
262  * MALI_ERROR_NONE is guaranteed to be 0.
263  */
264 enum mali_error {
265         MALI_ERROR_NONE = 0,
266         MALI_ERROR_OUT_OF_GPU_MEMORY,
267         MALI_ERROR_OUT_OF_MEMORY,
268         MALI_ERROR_FUNCTION_FAILED,
269 };
270
271 enum {
272         inited_mem = (1u << 0),
273         inited_js = (1u << 1),
274         inited_pm_runtime_init = (1u << 2),
275 #ifdef CONFIG_MALI_DEVFREQ
276         inited_devfreq = (1u << 3),
277 #endif /* CONFIG_MALI_DEVFREQ */
278         inited_tlstream = (1u << 4),
279         inited_backend_early = (1u << 5),
280         inited_backend_late = (1u << 6),
281         inited_device = (1u << 7),
282         inited_vinstr = (1u << 8),
283 #ifndef CONFIG_MALI_PRFCNT_SET_SECONDARY
284         inited_ipa = (1u << 9),
285 #endif /* CONFIG_MALI_PRFCNT_SET_SECONDARY */
286         inited_job_fault = (1u << 10),
287         inited_misc_register = (1u << 11),
288         inited_get_device = (1u << 12),
289         inited_sysfs_group = (1u << 13),
290         inited_dev_list = (1u << 14),
291         inited_debugfs = (1u << 15),
292         inited_gpu_device = (1u << 16),
293         inited_registers_map = (1u << 17),
294         inited_power_control = (1u << 19),
295         inited_buslogger = (1u << 20)
296 };
297
298
299 #ifdef CONFIG_MALI_DEBUG
300 #define INACTIVE_WAIT_MS (5000)
301
302 void kbase_set_driver_inactive(struct kbase_device *kbdev, bool inactive)
303 {
304         kbdev->driver_inactive = inactive;
305         wake_up(&kbdev->driver_inactive_wait);
306
307         /* Wait for any running IOCTLs to complete */
308         if (inactive)
309                 msleep(INACTIVE_WAIT_MS);
310 }
311 KBASE_EXPORT_TEST_API(kbase_set_driver_inactive);
312 #endif /* CONFIG_MALI_DEBUG */
313
314 static int kbase_dispatch(struct kbase_context *kctx, void * const args, u32 args_size)
315 {
316         struct kbase_device *kbdev;
317         union uk_header *ukh = args;
318         u32 id;
319         int ret = 0;
320
321         KBASE_DEBUG_ASSERT(ukh != NULL);
322
323         kbdev = kctx->kbdev;
324         id = ukh->id;
325         ukh->ret = MALI_ERROR_NONE; /* Be optimistic */
326
327 #ifdef CONFIG_MALI_DEBUG
328         wait_event(kbdev->driver_inactive_wait,
329                         kbdev->driver_inactive == false);
330 #endif /* CONFIG_MALI_DEBUG */
331
332         if (UKP_FUNC_ID_CHECK_VERSION == id) {
333                 struct uku_version_check_args *version_check;
334
335                 if (args_size != sizeof(struct uku_version_check_args)) {
336                         ukh->ret = MALI_ERROR_FUNCTION_FAILED;
337                         return 0;
338                 }
339                 version_check = (struct uku_version_check_args *)args;
340                 kbase_api_handshake(version_check);
341                 /* save the proposed version number for later use */
342                 kctx->api_version = KBASE_API_VERSION(version_check->major,
343                                 version_check->minor);
344                 ukh->ret = MALI_ERROR_NONE;
345                 return 0;
346         }
347
348         /* block calls until version handshake */
349         if (kctx->api_version == 0)
350                 return -EINVAL;
351
352         if (!atomic_read(&kctx->setup_complete)) {
353                 struct kbase_uk_set_flags *kbase_set_flags;
354
355                 /* setup pending, try to signal that we'll do the setup,
356                  * if setup was already in progress, err this call
357                  */
358                 if (atomic_cmpxchg(&kctx->setup_in_progress, 0, 1) != 0)
359                         return -EINVAL;
360
361                 /* if unexpected call, will stay stuck in setup mode
362                  * (is it the only call we accept?)
363                  */
364                 if (id != KBASE_FUNC_SET_FLAGS)
365                         return -EINVAL;
366
367                 kbase_set_flags = (struct kbase_uk_set_flags *)args;
368
369                 /* if not matching the expected call, stay in setup mode */
370                 if (sizeof(*kbase_set_flags) != args_size)
371                         goto bad_size;
372
373                 /* if bad flags, will stay stuck in setup mode */
374                 if (kbase_context_set_create_flags(kctx,
375                                 kbase_set_flags->create_flags) != 0)
376                         ukh->ret = MALI_ERROR_FUNCTION_FAILED;
377
378                 atomic_set(&kctx->setup_complete, 1);
379                 return 0;
380         }
381
382         /* setup complete, perform normal operation */
383         switch (id) {
384         case KBASE_FUNC_MEM_JIT_INIT:
385                 {
386                         struct kbase_uk_mem_jit_init *jit_init = args;
387
388                         if (sizeof(*jit_init) != args_size)
389                                 goto bad_size;
390
391                         if (kbase_region_tracker_init_jit(kctx,
392                                         jit_init->va_pages))
393                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
394                         break;
395                 }
396         case KBASE_FUNC_MEM_ALLOC:
397                 {
398                         struct kbase_uk_mem_alloc *mem = args;
399                         struct kbase_va_region *reg;
400
401                         if (sizeof(*mem) != args_size)
402                                 goto bad_size;
403
404 #if defined(CONFIG_64BIT)
405                         if (!kctx->is_compat) {
406                                 /* force SAME_VA if a 64-bit client */
407                                 mem->flags |= BASE_MEM_SAME_VA;
408                         }
409 #endif
410
411                         reg = kbase_mem_alloc(kctx, mem->va_pages,
412                                         mem->commit_pages, mem->extent,
413                                         &mem->flags, &mem->gpu_va,
414                                         &mem->va_alignment);
415                         if (!reg)
416                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
417                         break;
418                 }
419         case KBASE_FUNC_MEM_IMPORT: {
420                         struct kbase_uk_mem_import *mem_import = args;
421                         void __user *phandle;
422
423                         if (sizeof(*mem_import) != args_size)
424                                 goto bad_size;
425 #ifdef CONFIG_COMPAT
426                         if (kctx->is_compat)
427                                 phandle = compat_ptr(mem_import->phandle.compat_value);
428                         else
429 #endif
430                                 phandle = mem_import->phandle.value;
431
432                         if (mem_import->type == BASE_MEM_IMPORT_TYPE_INVALID) {
433                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
434                                 break;
435                         }
436
437                         if (kbase_mem_import(kctx,
438                                         (enum base_mem_import_type)
439                                         mem_import->type,
440                                         phandle,
441                                         &mem_import->gpu_va,
442                                         &mem_import->va_pages,
443                                         &mem_import->flags)) {
444                                 mem_import->type = BASE_MEM_IMPORT_TYPE_INVALID;
445                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
446                         }
447                         break;
448         }
449         case KBASE_FUNC_MEM_ALIAS: {
450                         struct kbase_uk_mem_alias *alias = args;
451                         struct base_mem_aliasing_info __user *user_ai;
452                         struct base_mem_aliasing_info *ai;
453
454                         if (sizeof(*alias) != args_size)
455                                 goto bad_size;
456
457                         if (alias->nents > 2048) {
458                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
459                                 break;
460                         }
461                         if (!alias->nents) {
462                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
463                                 break;
464                         }
465
466 #ifdef CONFIG_COMPAT
467                         if (kctx->is_compat)
468                                 user_ai = compat_ptr(alias->ai.compat_value);
469                         else
470 #endif
471                                 user_ai = alias->ai.value;
472
473                         ai = vmalloc(sizeof(*ai) * alias->nents);
474
475                         if (!ai) {
476                                 ukh->ret = MALI_ERROR_OUT_OF_MEMORY;
477                                 break;
478                         }
479
480                         if (copy_from_user(ai, user_ai,
481                                            sizeof(*ai) * alias->nents)) {
482                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
483                                 goto copy_failed;
484                         }
485
486                         alias->gpu_va = kbase_mem_alias(kctx, &alias->flags,
487                                                         alias->stride,
488                                                         alias->nents, ai,
489                                                         &alias->va_pages);
490                         if (!alias->gpu_va) {
491                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
492                                 goto no_alias;
493                         }
494 no_alias:
495 copy_failed:
496                         vfree(ai);
497                         break;
498                 }
499         case KBASE_FUNC_MEM_COMMIT:
500                 {
501                         struct kbase_uk_mem_commit *commit = args;
502
503                         if (sizeof(*commit) != args_size)
504                                 goto bad_size;
505
506                         if (commit->gpu_addr & ~PAGE_MASK) {
507                                 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_COMMIT: commit->gpu_addr: passed parameter is invalid");
508                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
509                                 break;
510                         }
511
512                         if (kbase_mem_commit(kctx, commit->gpu_addr,
513                                         commit->pages,
514                                         (base_backing_threshold_status *)
515                                         &commit->result_subcode) != 0)
516                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
517
518                         break;
519                 }
520
521         case KBASE_FUNC_MEM_QUERY:
522                 {
523                         struct kbase_uk_mem_query *query = args;
524
525                         if (sizeof(*query) != args_size)
526                                 goto bad_size;
527
528                         if (query->gpu_addr & ~PAGE_MASK) {
529                                 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_QUERY: query->gpu_addr: passed parameter is invalid");
530                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
531                                 break;
532                         }
533                         if (query->query != KBASE_MEM_QUERY_COMMIT_SIZE &&
534                             query->query != KBASE_MEM_QUERY_VA_SIZE &&
535                                 query->query != KBASE_MEM_QUERY_FLAGS) {
536                                 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_QUERY: query->query = %lld unknown", (unsigned long long)query->query);
537                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
538                                 break;
539                         }
540
541                         if (kbase_mem_query(kctx, query->gpu_addr,
542                                         query->query, &query->value) != 0)
543                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
544                         else
545                                 ukh->ret = MALI_ERROR_NONE;
546                         break;
547                 }
548                 break;
549
550         case KBASE_FUNC_MEM_FLAGS_CHANGE:
551                 {
552                         struct kbase_uk_mem_flags_change *fc = args;
553
554                         if (sizeof(*fc) != args_size)
555                                 goto bad_size;
556
557                         if ((fc->gpu_va & ~PAGE_MASK) && (fc->gpu_va >= PAGE_SIZE)) {
558                                 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_FLAGS_CHANGE: mem->gpu_va: passed parameter is invalid");
559                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
560                                 break;
561                         }
562
563                         if (kbase_mem_flags_change(kctx, fc->gpu_va,
564                                         fc->flags, fc->mask) != 0)
565                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
566
567                         break;
568                 }
569         case KBASE_FUNC_MEM_FREE:
570                 {
571                         struct kbase_uk_mem_free *mem = args;
572
573                         if (sizeof(*mem) != args_size)
574                                 goto bad_size;
575
576                         if ((mem->gpu_addr & ~PAGE_MASK) && (mem->gpu_addr >= PAGE_SIZE)) {
577                                 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_FREE: mem->gpu_addr: passed parameter is invalid");
578                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
579                                 break;
580                         }
581
582                         if (kbase_mem_free(kctx, mem->gpu_addr) != 0)
583                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
584                         break;
585                 }
586
587         case KBASE_FUNC_JOB_SUBMIT:
588                 {
589                         struct kbase_uk_job_submit *job = args;
590
591                         if (sizeof(*job) != args_size)
592                                 goto bad_size;
593
594 #ifdef BASE_LEGACY_UK6_SUPPORT
595                         if (kbase_jd_submit(kctx, job, 0) != 0)
596 #else
597                         if (kbase_jd_submit(kctx, job) != 0)
598 #endif /* BASE_LEGACY_UK6_SUPPORT */
599                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
600                         break;
601                 }
602
603 #ifdef BASE_LEGACY_UK6_SUPPORT
604         case KBASE_FUNC_JOB_SUBMIT_UK6:
605                 {
606                         struct kbase_uk_job_submit *job = args;
607
608                         if (sizeof(*job) != args_size)
609                                 goto bad_size;
610
611                         if (kbase_jd_submit(kctx, job, 1) != 0)
612                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
613                         break;
614                 }
615 #endif
616
617         case KBASE_FUNC_SYNC:
618                 {
619                         struct kbase_uk_sync_now *sn = args;
620
621                         if (sizeof(*sn) != args_size)
622                                 goto bad_size;
623
624                         if (sn->sset.basep_sset.mem_handle.basep.handle & ~PAGE_MASK) {
625                                 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_SYNC: sn->sset.basep_sset.mem_handle: passed parameter is invalid");
626                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
627                                 break;
628                         }
629
630 #ifndef CONFIG_MALI_COH_USER
631                         if (kbase_sync_now(kctx, &sn->sset) != 0)
632                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
633 #endif
634                         break;
635                 }
636
637         case KBASE_FUNC_DISJOINT_QUERY:
638                 {
639                         struct kbase_uk_disjoint_query *dquery = args;
640
641                         if (sizeof(*dquery) != args_size)
642                                 goto bad_size;
643
644                         /* Get the disjointness counter value. */
645                         dquery->counter = kbase_disjoint_event_get(kctx->kbdev);
646                         break;
647                 }
648
649         case KBASE_FUNC_POST_TERM:
650                 {
651                         kbase_event_close(kctx);
652                         break;
653                 }
654
655         case KBASE_FUNC_HWCNT_SETUP:
656                 {
657                         struct kbase_uk_hwcnt_setup *setup = args;
658
659                         if (sizeof(*setup) != args_size)
660                                 goto bad_size;
661
662                         mutex_lock(&kctx->vinstr_cli_lock);
663                         if (kbase_vinstr_legacy_hwc_setup(kbdev->vinstr_ctx,
664                                         &kctx->vinstr_cli, setup) != 0)
665                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
666                         mutex_unlock(&kctx->vinstr_cli_lock);
667                         break;
668                 }
669
670         case KBASE_FUNC_HWCNT_DUMP:
671                 {
672                         /* args ignored */
673                         mutex_lock(&kctx->vinstr_cli_lock);
674                         if (kbase_vinstr_hwc_dump(kctx->vinstr_cli,
675                                         BASE_HWCNT_READER_EVENT_MANUAL) != 0)
676                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
677                         mutex_unlock(&kctx->vinstr_cli_lock);
678                         break;
679                 }
680
681         case KBASE_FUNC_HWCNT_CLEAR:
682                 {
683                         /* args ignored */
684                         mutex_lock(&kctx->vinstr_cli_lock);
685                         if (kbase_vinstr_hwc_clear(kctx->vinstr_cli) != 0)
686                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
687                         mutex_unlock(&kctx->vinstr_cli_lock);
688                         break;
689                 }
690
691         case KBASE_FUNC_HWCNT_READER_SETUP:
692                 {
693                         struct kbase_uk_hwcnt_reader_setup *setup = args;
694
695                         if (sizeof(*setup) != args_size)
696                                 goto bad_size;
697
698                         mutex_lock(&kctx->vinstr_cli_lock);
699                         if (kbase_vinstr_hwcnt_reader_setup(kbdev->vinstr_ctx,
700                                         setup) != 0)
701                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
702                         mutex_unlock(&kctx->vinstr_cli_lock);
703                         break;
704                 }
705
706         case KBASE_FUNC_GPU_PROPS_REG_DUMP:
707                 {
708                         struct kbase_uk_gpuprops *setup = args;
709
710                         if (sizeof(*setup) != args_size)
711                                 goto bad_size;
712
713                         if (kbase_gpuprops_uk_get_props(kctx, setup) != 0)
714                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
715                         break;
716                 }
717         case KBASE_FUNC_FIND_CPU_OFFSET:
718                 {
719                         struct kbase_uk_find_cpu_offset *find = args;
720
721                         if (sizeof(*find) != args_size)
722                                 goto bad_size;
723
724                         if (find->gpu_addr & ~PAGE_MASK) {
725                                 dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_FIND_CPU_OFFSET: find->gpu_addr: passed parameter is invalid");
726                                 goto out_bad;
727                         }
728
729                         if (find->size > SIZE_MAX || find->cpu_addr > ULONG_MAX) {
730                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
731                         } else {
732                                 int err;
733
734                                 err = kbasep_find_enclosing_cpu_mapping_offset(
735                                                 kctx,
736                                                 find->gpu_addr,
737                                                 (uintptr_t) find->cpu_addr,
738                                                 (size_t) find->size,
739                                                 &find->offset);
740
741                                 if (err)
742                                         ukh->ret = MALI_ERROR_FUNCTION_FAILED;
743                         }
744                         break;
745                 }
746         case KBASE_FUNC_GET_VERSION:
747                 {
748                         struct kbase_uk_get_ddk_version *get_version = (struct kbase_uk_get_ddk_version *)args;
749
750                         if (sizeof(*get_version) != args_size)
751                                 goto bad_size;
752
753                         /* version buffer size check is made in compile time assert */
754                         memcpy(get_version->version_buffer, KERNEL_SIDE_DDK_VERSION_STRING, sizeof(KERNEL_SIDE_DDK_VERSION_STRING));
755                         get_version->version_string_size = sizeof(KERNEL_SIDE_DDK_VERSION_STRING);
756                         get_version->rk_version = ROCKCHIP_VERSION;
757                         break;
758                 }
759
760         case KBASE_FUNC_STREAM_CREATE:
761                 {
762 #ifdef CONFIG_SYNC
763                         struct kbase_uk_stream_create *screate = (struct kbase_uk_stream_create *)args;
764
765                         if (sizeof(*screate) != args_size)
766                                 goto bad_size;
767
768                         if (strnlen(screate->name, sizeof(screate->name)) >= sizeof(screate->name)) {
769                                 /* not NULL terminated */
770                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
771                                 break;
772                         }
773
774                         if (kbase_stream_create(screate->name, &screate->fd) != 0)
775                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
776                         else
777                                 ukh->ret = MALI_ERROR_NONE;
778 #else /* CONFIG_SYNC */
779                         ukh->ret = MALI_ERROR_FUNCTION_FAILED;
780 #endif /* CONFIG_SYNC */
781                         break;
782                 }
783         case KBASE_FUNC_FENCE_VALIDATE:
784                 {
785 #ifdef CONFIG_SYNC
786                         struct kbase_uk_fence_validate *fence_validate = (struct kbase_uk_fence_validate *)args;
787
788                         if (sizeof(*fence_validate) != args_size)
789                                 goto bad_size;
790
791                         if (kbase_fence_validate(fence_validate->fd) != 0)
792                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
793                         else
794                                 ukh->ret = MALI_ERROR_NONE;
795 #endif /* CONFIG_SYNC */
796                         break;
797                 }
798
799         case KBASE_FUNC_SET_TEST_DATA:
800                 {
801 #if MALI_UNIT_TEST
802                         struct kbase_uk_set_test_data *set_data = args;
803
804                         shared_kernel_test_data = set_data->test_data;
805                         shared_kernel_test_data.kctx.value = (void __user *)kctx;
806                         shared_kernel_test_data.mm.value = (void __user *)current->mm;
807                         ukh->ret = MALI_ERROR_NONE;
808 #endif /* MALI_UNIT_TEST */
809                         break;
810                 }
811
812         case KBASE_FUNC_INJECT_ERROR:
813                 {
814 #ifdef CONFIG_MALI_ERROR_INJECT
815                         unsigned long flags;
816                         struct kbase_error_params params = ((struct kbase_uk_error_params *)args)->params;
817
818                         /*mutex lock */
819                         spin_lock_irqsave(&kbdev->reg_op_lock, flags);
820                         if (job_atom_inject_error(&params) != 0)
821                                 ukh->ret = MALI_ERROR_OUT_OF_MEMORY;
822                         else
823                                 ukh->ret = MALI_ERROR_NONE;
824                         spin_unlock_irqrestore(&kbdev->reg_op_lock, flags);
825                         /*mutex unlock */
826 #endif /* CONFIG_MALI_ERROR_INJECT */
827                         break;
828                 }
829
830         case KBASE_FUNC_MODEL_CONTROL:
831                 {
832 #ifdef CONFIG_MALI_NO_MALI
833                         unsigned long flags;
834                         struct kbase_model_control_params params =
835                                         ((struct kbase_uk_model_control_params *)args)->params;
836
837                         /*mutex lock */
838                         spin_lock_irqsave(&kbdev->reg_op_lock, flags);
839                         if (gpu_model_control(kbdev->model, &params) != 0)
840                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
841                         else
842                                 ukh->ret = MALI_ERROR_NONE;
843                         spin_unlock_irqrestore(&kbdev->reg_op_lock, flags);
844                         /*mutex unlock */
845 #endif /* CONFIG_MALI_NO_MALI */
846                         break;
847                 }
848
849 #ifdef BASE_LEGACY_UK8_SUPPORT
850         case KBASE_FUNC_KEEP_GPU_POWERED:
851                 {
852                         dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_KEEP_GPU_POWERED: function is deprecated and disabled\n");
853                         ukh->ret = MALI_ERROR_FUNCTION_FAILED;
854                         break;
855                 }
856 #endif /* BASE_LEGACY_UK8_SUPPORT */
857
858         case KBASE_FUNC_GET_PROFILING_CONTROLS:
859                 {
860                         struct kbase_uk_profiling_controls *controls =
861                                         (struct kbase_uk_profiling_controls *)args;
862                         u32 i;
863
864                         if (sizeof(*controls) != args_size)
865                                 goto bad_size;
866
867                         for (i = FBDUMP_CONTROL_MIN; i < FBDUMP_CONTROL_MAX; i++)
868                                 controls->profiling_controls[i] = kbase_get_profiling_control(kbdev, i);
869
870                         break;
871                 }
872
873         /* used only for testing purposes; these controls are to be set by gator through gator API */
874         case KBASE_FUNC_SET_PROFILING_CONTROLS:
875                 {
876                         struct kbase_uk_profiling_controls *controls =
877                                         (struct kbase_uk_profiling_controls *)args;
878                         u32 i;
879
880                         if (sizeof(*controls) != args_size)
881                                 goto bad_size;
882
883                         for (i = FBDUMP_CONTROL_MIN; i < FBDUMP_CONTROL_MAX; i++)
884                                 _mali_profiling_control(i, controls->profiling_controls[i]);
885
886                         break;
887                 }
888
889         case KBASE_FUNC_DEBUGFS_MEM_PROFILE_ADD:
890                 {
891                         struct kbase_uk_debugfs_mem_profile_add *add_data =
892                                         (struct kbase_uk_debugfs_mem_profile_add *)args;
893                         char *buf;
894                         char __user *user_buf;
895
896                         if (sizeof(*add_data) != args_size)
897                                 goto bad_size;
898
899                         if (add_data->len > KBASE_MEM_PROFILE_MAX_BUF_SIZE) {
900                                 dev_err(kbdev->dev, "buffer too big\n");
901                                 goto out_bad;
902                         }
903
904 #ifdef CONFIG_COMPAT
905                         if (kctx->is_compat)
906                                 user_buf = compat_ptr(add_data->buf.compat_value);
907                         else
908 #endif
909                                 user_buf = add_data->buf.value;
910
911                         buf = kmalloc(add_data->len, GFP_KERNEL);
912                         if (!buf)
913                                 goto out_bad;
914
915                         if (0 != copy_from_user(buf, user_buf, add_data->len)) {
916                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
917                                 kfree(buf);
918                                 goto out_bad;
919                         }
920
921                         if (kbasep_mem_profile_debugfs_insert(kctx, buf,
922                                                         add_data->len)) {
923                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
924                                 kfree(buf);
925                                 goto out_bad;
926                         }
927
928                         break;
929                 }
930
931 #ifdef CONFIG_MALI_NO_MALI
932         case KBASE_FUNC_SET_PRFCNT_VALUES:
933                 {
934
935                         struct kbase_uk_prfcnt_values *params =
936                           ((struct kbase_uk_prfcnt_values *)args);
937                         gpu_model_set_dummy_prfcnt_sample(params->data,
938                                         params->size);
939
940                         break;
941                 }
942 #endif /* CONFIG_MALI_NO_MALI */
943
944         case KBASE_FUNC_TLSTREAM_ACQUIRE:
945                 {
946                         struct kbase_uk_tlstream_acquire *tlstream_acquire =
947                                 args;
948
949                         if (sizeof(*tlstream_acquire) != args_size)
950                                 goto bad_size;
951
952                         if (0 != kbase_tlstream_acquire(
953                                                 kctx,
954                                                 &tlstream_acquire->fd)) {
955                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
956                         } else if (0 <= tlstream_acquire->fd) {
957                                 /* Summary stream was cleared during acquire.
958                                  * Create static timeline objects that will be
959                                  * read by client. */
960                                 kbase_create_timeline_objects(kctx);
961                         }
962                         break;
963                 }
964         case KBASE_FUNC_TLSTREAM_FLUSH:
965                 {
966                         struct kbase_uk_tlstream_flush *tlstream_flush =
967                                 args;
968
969                         if (sizeof(*tlstream_flush) != args_size)
970                                 goto bad_size;
971
972                         kbase_tlstream_flush_streams();
973                         break;
974                 }
975 #if MALI_UNIT_TEST
976         case KBASE_FUNC_TLSTREAM_TEST:
977                 {
978                         struct kbase_uk_tlstream_test *tlstream_test = args;
979
980                         if (sizeof(*tlstream_test) != args_size)
981                                 goto bad_size;
982
983                         kbase_tlstream_test(
984                                         tlstream_test->tpw_count,
985                                         tlstream_test->msg_delay,
986                                         tlstream_test->msg_count,
987                                         tlstream_test->aux_msg);
988                         break;
989                 }
990         case KBASE_FUNC_TLSTREAM_STATS:
991                 {
992                         struct kbase_uk_tlstream_stats *tlstream_stats = args;
993
994                         if (sizeof(*tlstream_stats) != args_size)
995                                 goto bad_size;
996
997                         kbase_tlstream_stats(
998                                         &tlstream_stats->bytes_collected,
999                                         &tlstream_stats->bytes_generated);
1000                         break;
1001                 }
1002 #endif /* MALI_UNIT_TEST */
1003
1004         case KBASE_FUNC_GET_CONTEXT_ID:
1005                 {
1006                         struct kbase_uk_context_id *info = args;
1007
1008                         info->id = kctx->id;
1009                         break;
1010                 }
1011
1012         case KBASE_FUNC_SOFT_EVENT_UPDATE:
1013                 {
1014                         struct kbase_uk_soft_event_update *update = args;
1015
1016                         if (sizeof(*update) != args_size)
1017                                 goto bad_size;
1018
1019                         if (((update->new_status != BASE_JD_SOFT_EVENT_SET) &&
1020                             (update->new_status != BASE_JD_SOFT_EVENT_RESET)) ||
1021                             (update->flags != 0))
1022                                 goto out_bad;
1023
1024                         if (kbase_soft_event_update(kctx, update->evt,
1025                                                 update->new_status))
1026                                 ukh->ret = MALI_ERROR_FUNCTION_FAILED;
1027
1028                         break;
1029                 }
1030
1031         default:
1032                 dev_err(kbdev->dev, "unknown ioctl %u\n", id);
1033                 goto out_bad;
1034         }
1035
1036         return ret;
1037
1038  bad_size:
1039         dev_err(kbdev->dev, "Wrong syscall size (%d) for %08x\n", args_size, id);
1040  out_bad:
1041         return -EINVAL;
1042 }
1043
1044 static struct kbase_device *to_kbase_device(struct device *dev)
1045 {
1046         return dev_get_drvdata(dev);
1047 }
1048
1049 static int assign_irqs(struct platform_device *pdev)
1050 {
1051         struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
1052         int i;
1053
1054         if (!kbdev)
1055                 return -ENODEV;
1056
1057         /* 3 IRQ resources */
1058         for (i = 0; i < 3; i++) {
1059                 struct resource *irq_res;
1060                 int irqtag;
1061
1062                 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
1063                 if (!irq_res) {
1064                         dev_err(kbdev->dev, "No IRQ resource at index %d\n", i);
1065                         return -ENOENT;
1066                 }
1067
1068 #ifdef CONFIG_OF
1069                 if (!strcmp(irq_res->name, "JOB")) {
1070                         irqtag = JOB_IRQ_TAG;
1071                 } else if (!strcmp(irq_res->name, "MMU")) {
1072                         irqtag = MMU_IRQ_TAG;
1073                 } else if (!strcmp(irq_res->name, "GPU")) {
1074                         irqtag = GPU_IRQ_TAG;
1075                 } else {
1076                         dev_err(&pdev->dev, "Invalid irq res name: '%s'\n",
1077                                 irq_res->name);
1078                         return -EINVAL;
1079                 }
1080 #else
1081                 irqtag = i;
1082 #endif /* CONFIG_OF */
1083                 kbdev->irqs[irqtag].irq = irq_res->start;
1084                 kbdev->irqs[irqtag].flags = irq_res->flags & IRQF_TRIGGER_MASK;
1085         }
1086
1087         return 0;
1088 }
1089
1090 /*
1091  * API to acquire device list mutex and
1092  * return pointer to the device list head
1093  */
1094 const struct list_head *kbase_dev_list_get(void)
1095 {
1096         mutex_lock(&kbase_dev_list_lock);
1097         return &kbase_dev_list;
1098 }
1099 KBASE_EXPORT_TEST_API(kbase_dev_list_get);
1100
1101 /* API to release the device list mutex */
1102 void kbase_dev_list_put(const struct list_head *dev_list)
1103 {
1104         mutex_unlock(&kbase_dev_list_lock);
1105 }
1106 KBASE_EXPORT_TEST_API(kbase_dev_list_put);
1107
1108 /* Find a particular kbase device (as specified by minor number), or find the "first" device if -1 is specified */
1109 struct kbase_device *kbase_find_device(int minor)
1110 {
1111         struct kbase_device *kbdev = NULL;
1112         struct list_head *entry;
1113         const struct list_head *dev_list = kbase_dev_list_get();
1114
1115         list_for_each(entry, dev_list) {
1116                 struct kbase_device *tmp;
1117
1118                 tmp = list_entry(entry, struct kbase_device, entry);
1119                 if (tmp->mdev.minor == minor || minor == -1) {
1120                         kbdev = tmp;
1121                         get_device(kbdev->dev);
1122                         break;
1123                 }
1124         }
1125         kbase_dev_list_put(dev_list);
1126
1127         return kbdev;
1128 }
1129 EXPORT_SYMBOL(kbase_find_device);
1130
1131 void kbase_release_device(struct kbase_device *kbdev)
1132 {
1133         put_device(kbdev->dev);
1134 }
1135 EXPORT_SYMBOL(kbase_release_device);
1136
1137 static int kbase_open(struct inode *inode, struct file *filp)
1138 {
1139         struct kbase_device *kbdev = NULL;
1140         struct kbase_context *kctx;
1141         int ret = 0;
1142 #ifdef CONFIG_DEBUG_FS
1143         char kctx_name[64];
1144 #endif
1145
1146         kbdev = kbase_find_device(iminor(inode));
1147
1148         if (!kbdev)
1149                 return -ENODEV;
1150
1151         kctx = kbase_create_context(kbdev, is_compat_task());
1152         if (!kctx) {
1153                 ret = -ENOMEM;
1154                 goto out;
1155         }
1156
1157         init_waitqueue_head(&kctx->event_queue);
1158         filp->private_data = kctx;
1159         kctx->filp = filp;
1160
1161         kctx->infinite_cache_active = kbdev->infinite_cache_active_default;
1162
1163 #ifdef CONFIG_DEBUG_FS
1164         snprintf(kctx_name, 64, "%d_%d", kctx->tgid, kctx->id);
1165
1166         kctx->kctx_dentry = debugfs_create_dir(kctx_name,
1167                         kbdev->debugfs_ctx_directory);
1168
1169         if (IS_ERR_OR_NULL(kctx->kctx_dentry)) {
1170                 ret = -ENOMEM;
1171                 goto out;
1172         }
1173
1174 #ifdef CONFIG_MALI_COH_USER
1175          /* if cache is completely coherent at hardware level, then remove the
1176           * infinite cache control support from debugfs.
1177           */
1178 #else
1179         debugfs_create_bool("infinite_cache", 0644, kctx->kctx_dentry,
1180                         (bool*)&(kctx->infinite_cache_active));
1181 #endif /* CONFIG_MALI_COH_USER */
1182
1183         mutex_init(&kctx->mem_profile_lock);
1184
1185         kbasep_jd_debugfs_ctx_add(kctx);
1186         kbase_debug_mem_view_init(filp);
1187
1188         kbase_debug_job_fault_context_init(kctx);
1189
1190         kbase_mem_pool_debugfs_add(kctx->kctx_dentry, &kctx->mem_pool);
1191
1192         kbase_jit_debugfs_add(kctx);
1193 #endif /* CONFIG_DEBUG_FS */
1194
1195         dev_dbg(kbdev->dev, "created base context\n");
1196
1197         {
1198                 struct kbasep_kctx_list_element *element;
1199
1200                 element = kzalloc(sizeof(*element), GFP_KERNEL);
1201                 if (element) {
1202                         mutex_lock(&kbdev->kctx_list_lock);
1203                         element->kctx = kctx;
1204                         list_add(&element->link, &kbdev->kctx_list);
1205                         kbase_tlstream_tl_new_ctx(
1206                                         element->kctx,
1207                                         (u32)(element->kctx->id),
1208                                         (u32)(element->kctx->tgid));
1209                         mutex_unlock(&kbdev->kctx_list_lock);
1210                 } else {
1211                         /* we don't treat this as a fail - just warn about it */
1212                         dev_warn(kbdev->dev, "couldn't add kctx to kctx_list\n");
1213                 }
1214         }
1215         return 0;
1216
1217  out:
1218         kbase_release_device(kbdev);
1219         return ret;
1220 }
1221
1222 static int kbase_release(struct inode *inode, struct file *filp)
1223 {
1224         struct kbase_context *kctx = filp->private_data;
1225         struct kbase_device *kbdev = kctx->kbdev;
1226         struct kbasep_kctx_list_element *element, *tmp;
1227         bool found_element = false;
1228
1229         kbase_tlstream_tl_del_ctx(kctx);
1230
1231 #ifdef CONFIG_DEBUG_FS
1232         debugfs_remove_recursive(kctx->kctx_dentry);
1233         kbasep_mem_profile_debugfs_remove(kctx);
1234         kbase_debug_job_fault_context_term(kctx);
1235 #endif
1236
1237         mutex_lock(&kbdev->kctx_list_lock);
1238         list_for_each_entry_safe(element, tmp, &kbdev->kctx_list, link) {
1239                 if (element->kctx == kctx) {
1240                         list_del(&element->link);
1241                         kfree(element);
1242                         found_element = true;
1243                 }
1244         }
1245         mutex_unlock(&kbdev->kctx_list_lock);
1246         if (!found_element)
1247                 dev_warn(kbdev->dev, "kctx not in kctx_list\n");
1248
1249         filp->private_data = NULL;
1250
1251         mutex_lock(&kctx->vinstr_cli_lock);
1252         /* If this client was performing hwcnt dumping and did not explicitly
1253          * detach itself, remove it from the vinstr core now */
1254         if (kctx->vinstr_cli) {
1255                 struct kbase_uk_hwcnt_setup setup;
1256
1257                 setup.dump_buffer = 0llu;
1258                 kbase_vinstr_legacy_hwc_setup(
1259                                 kbdev->vinstr_ctx, &kctx->vinstr_cli, &setup);
1260         }
1261         mutex_unlock(&kctx->vinstr_cli_lock);
1262
1263         kbase_destroy_context(kctx);
1264
1265         dev_dbg(kbdev->dev, "deleted base context\n");
1266         kbase_release_device(kbdev);
1267         return 0;
1268 }
1269
1270 #define CALL_MAX_SIZE 536
1271
1272 static long kbase_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1273 {
1274         u64 msg[(CALL_MAX_SIZE + 7) >> 3] = { 0xdeadbeefdeadbeefull };  /* alignment fixup */
1275         u32 size = _IOC_SIZE(cmd);
1276         struct kbase_context *kctx = filp->private_data;
1277
1278         if (size > CALL_MAX_SIZE)
1279                 return -ENOTTY;
1280
1281         if (0 != copy_from_user(&msg, (void __user *)arg, size)) {
1282                 dev_err(kctx->kbdev->dev, "failed to copy ioctl argument into kernel space\n");
1283                 return -EFAULT;
1284         }
1285
1286         if (kbase_dispatch(kctx, &msg, size) != 0)
1287                 return -EFAULT;
1288
1289         if (0 != copy_to_user((void __user *)arg, &msg, size)) {
1290                 dev_err(kctx->kbdev->dev, "failed to copy results of UK call back to user space\n");
1291                 return -EFAULT;
1292         }
1293         return 0;
1294 }
1295
1296 static ssize_t kbase_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
1297 {
1298         struct kbase_context *kctx = filp->private_data;
1299         struct base_jd_event_v2 uevent;
1300         int out_count = 0;
1301
1302         if (count < sizeof(uevent))
1303                 return -ENOBUFS;
1304
1305         do {
1306                 while (kbase_event_dequeue(kctx, &uevent)) {
1307                         if (out_count > 0)
1308                                 goto out;
1309
1310                         if (filp->f_flags & O_NONBLOCK)
1311                                 return -EAGAIN;
1312
1313                         if (wait_event_interruptible(kctx->event_queue,
1314                                         kbase_event_pending(kctx)) != 0)
1315                                 return -ERESTARTSYS;
1316                 }
1317                 if (uevent.event_code == BASE_JD_EVENT_DRV_TERMINATED) {
1318                         if (out_count == 0)
1319                                 return -EPIPE;
1320                         goto out;
1321                 }
1322
1323                 if (copy_to_user(buf, &uevent, sizeof(uevent)) != 0)
1324                         return -EFAULT;
1325
1326                 buf += sizeof(uevent);
1327                 out_count++;
1328                 count -= sizeof(uevent);
1329         } while (count >= sizeof(uevent));
1330
1331  out:
1332         return out_count * sizeof(uevent);
1333 }
1334
1335 static unsigned int kbase_poll(struct file *filp, poll_table *wait)
1336 {
1337         struct kbase_context *kctx = filp->private_data;
1338
1339         poll_wait(filp, &kctx->event_queue, wait);
1340         if (kbase_event_pending(kctx))
1341                 return POLLIN | POLLRDNORM;
1342
1343         return 0;
1344 }
1345
1346 void kbase_event_wakeup(struct kbase_context *kctx)
1347 {
1348         KBASE_DEBUG_ASSERT(kctx);
1349
1350         wake_up_interruptible(&kctx->event_queue);
1351 }
1352
1353 KBASE_EXPORT_TEST_API(kbase_event_wakeup);
1354
1355 static int kbase_check_flags(int flags)
1356 {
1357         /* Enforce that the driver keeps the O_CLOEXEC flag so that execve() always
1358          * closes the file descriptor in a child process.
1359          */
1360         if (0 == (flags & O_CLOEXEC))
1361                 return -EINVAL;
1362
1363         return 0;
1364 }
1365
1366 #ifdef CONFIG_64BIT
1367 /* The following function is taken from the kernel and just
1368  * renamed. As it's not exported to modules we must copy-paste it here.
1369  */
1370
1371 static unsigned long kbase_unmapped_area_topdown(struct vm_unmapped_area_info
1372                 *info)
1373 {
1374         struct mm_struct *mm = current->mm;
1375         struct vm_area_struct *vma;
1376         unsigned long length, low_limit, high_limit, gap_start, gap_end;
1377
1378         /* Adjust search length to account for worst case alignment overhead */
1379         length = info->length + info->align_mask;
1380         if (length < info->length)
1381                 return -ENOMEM;
1382
1383         /*
1384          * Adjust search limits by the desired length.
1385          * See implementation comment at top of unmapped_area().
1386          */
1387         gap_end = info->high_limit;
1388         if (gap_end < length)
1389                 return -ENOMEM;
1390         high_limit = gap_end - length;
1391
1392         if (info->low_limit > high_limit)
1393                 return -ENOMEM;
1394         low_limit = info->low_limit + length;
1395
1396         /* Check highest gap, which does not precede any rbtree node */
1397         gap_start = mm->highest_vm_end;
1398         if (gap_start <= high_limit)
1399                 goto found_highest;
1400
1401         /* Check if rbtree root looks promising */
1402         if (RB_EMPTY_ROOT(&mm->mm_rb))
1403                 return -ENOMEM;
1404         vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
1405         if (vma->rb_subtree_gap < length)
1406                 return -ENOMEM;
1407
1408         while (true) {
1409                 /* Visit right subtree if it looks promising */
1410                 gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
1411                 if (gap_start <= high_limit && vma->vm_rb.rb_right) {
1412                         struct vm_area_struct *right =
1413                                 rb_entry(vma->vm_rb.rb_right,
1414                                          struct vm_area_struct, vm_rb);
1415                         if (right->rb_subtree_gap >= length) {
1416                                 vma = right;
1417                                 continue;
1418                         }
1419                 }
1420
1421 check_current:
1422                 /* Check if current node has a suitable gap */
1423                 gap_end = vma->vm_start;
1424                 if (gap_end < low_limit)
1425                         return -ENOMEM;
1426                 if (gap_start <= high_limit && gap_end - gap_start >= length)
1427                         goto found;
1428
1429                 /* Visit left subtree if it looks promising */
1430                 if (vma->vm_rb.rb_left) {
1431                         struct vm_area_struct *left =
1432                                 rb_entry(vma->vm_rb.rb_left,
1433                                          struct vm_area_struct, vm_rb);
1434                         if (left->rb_subtree_gap >= length) {
1435                                 vma = left;
1436                                 continue;
1437                         }
1438                 }
1439
1440                 /* Go back up the rbtree to find next candidate node */
1441                 while (true) {
1442                         struct rb_node *prev = &vma->vm_rb;
1443                         if (!rb_parent(prev))
1444                                 return -ENOMEM;
1445                         vma = rb_entry(rb_parent(prev),
1446                                        struct vm_area_struct, vm_rb);
1447                         if (prev == vma->vm_rb.rb_right) {
1448                                 gap_start = vma->vm_prev ?
1449                                         vma->vm_prev->vm_end : 0;
1450                                 goto check_current;
1451                         }
1452                 }
1453         }
1454
1455 found:
1456         /* We found a suitable gap. Clip it with the original high_limit. */
1457         if (gap_end > info->high_limit)
1458                 gap_end = info->high_limit;
1459
1460 found_highest:
1461         /* Compute highest gap address at the desired alignment */
1462         gap_end -= info->length;
1463         gap_end -= (gap_end - info->align_offset) & info->align_mask;
1464
1465         VM_BUG_ON(gap_end < info->low_limit);
1466         VM_BUG_ON(gap_end < gap_start);
1467         return gap_end;
1468 }
1469
1470
1471 static unsigned long kbase_get_unmapped_area(struct file *filp,
1472                 const unsigned long addr, const unsigned long len,
1473                 const unsigned long pgoff, const unsigned long flags)
1474 {
1475         /* based on get_unmapped_area, but simplified slightly due to that some
1476          * values are known in advance */
1477         struct kbase_context *kctx = filp->private_data;
1478         struct mm_struct *mm = current->mm;
1479         struct vm_unmapped_area_info info;
1480
1481         /* err on fixed address */
1482         if ((flags & MAP_FIXED) || addr)
1483                 return -EINVAL;
1484
1485         /* too big? */
1486         if (len > TASK_SIZE - SZ_2M)
1487                 return -ENOMEM;
1488
1489         if (kctx->is_compat)
1490                 return current->mm->get_unmapped_area(filp, addr, len, pgoff,
1491                                 flags);
1492
1493         if (kbase_hw_has_feature(kctx->kbdev, BASE_HW_FEATURE_33BIT_VA)) {
1494                 info.high_limit = kctx->same_va_end << PAGE_SHIFT;
1495                 info.align_mask = 0;
1496                 info.align_offset = 0;
1497         } else {
1498                 info.high_limit = min_t(unsigned long, mm->mmap_base,
1499                                         (kctx->same_va_end << PAGE_SHIFT));
1500                 if (len >= SZ_2M) {
1501                         info.align_offset = SZ_2M;
1502                         info.align_mask = SZ_2M - 1;
1503                 } else {
1504                         info.align_mask = 0;
1505                         info.align_offset = 0;
1506                 }
1507         }
1508
1509         info.flags = 0;
1510         info.length = len;
1511         info.low_limit = SZ_2M;
1512         return kbase_unmapped_area_topdown(&info);
1513 }
1514 #endif
1515
1516 static const struct file_operations kbase_fops = {
1517         .owner = THIS_MODULE,
1518         .open = kbase_open,
1519         .release = kbase_release,
1520         .read = kbase_read,
1521         .poll = kbase_poll,
1522         .unlocked_ioctl = kbase_ioctl,
1523         .compat_ioctl = kbase_ioctl,
1524         .mmap = kbase_mmap,
1525         .check_flags = kbase_check_flags,
1526 #ifdef CONFIG_64BIT
1527         .get_unmapped_area = kbase_get_unmapped_area,
1528 #endif
1529 };
1530
1531 #ifndef CONFIG_MALI_NO_MALI
1532 void kbase_os_reg_write(struct kbase_device *kbdev, u16 offset, u32 value)
1533 {
1534         writel(value, kbdev->reg + offset);
1535 }
1536
1537 u32 kbase_os_reg_read(struct kbase_device *kbdev, u16 offset)
1538 {
1539         return readl(kbdev->reg + offset);
1540 }
1541 #endif /* !CONFIG_MALI_NO_MALI */
1542
1543 /** Show callback for the @c power_policy sysfs file.
1544  *
1545  * This function is called to get the contents of the @c power_policy sysfs
1546  * file. This is a list of the available policies with the currently active one
1547  * surrounded by square brackets.
1548  *
1549  * @param dev   The device this sysfs file is for
1550  * @param attr  The attributes of the sysfs file
1551  * @param buf   The output buffer for the sysfs file contents
1552  *
1553  * @return The number of bytes output to @c buf.
1554  */
1555 static ssize_t show_policy(struct device *dev, struct device_attribute *attr, char *const buf)
1556 {
1557         struct kbase_device *kbdev;
1558         const struct kbase_pm_policy *current_policy;
1559         const struct kbase_pm_policy *const *policy_list;
1560         int policy_count;
1561         int i;
1562         ssize_t ret = 0;
1563
1564         kbdev = to_kbase_device(dev);
1565
1566         if (!kbdev)
1567                 return -ENODEV;
1568
1569         current_policy = kbase_pm_get_policy(kbdev);
1570
1571         policy_count = kbase_pm_list_policies(&policy_list);
1572
1573         for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) {
1574                 if (policy_list[i] == current_policy)
1575                         ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name);
1576                 else
1577                         ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name);
1578         }
1579
1580         if (ret < PAGE_SIZE - 1) {
1581                 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
1582         } else {
1583                 buf[PAGE_SIZE - 2] = '\n';
1584                 buf[PAGE_SIZE - 1] = '\0';
1585                 ret = PAGE_SIZE - 1;
1586         }
1587
1588         return ret;
1589 }
1590
1591 /** Store callback for the @c power_policy sysfs file.
1592  *
1593  * This function is called when the @c power_policy sysfs file is written to.
1594  * It matches the requested policy against the available policies and if a
1595  * matching policy is found calls @ref kbase_pm_set_policy to change the
1596  * policy.
1597  *
1598  * @param dev   The device with sysfs file is for
1599  * @param attr  The attributes of the sysfs file
1600  * @param buf   The value written to the sysfs file
1601  * @param count The number of bytes written to the sysfs file
1602  *
1603  * @return @c count if the function succeeded. An error code on failure.
1604  */
1605 static ssize_t set_policy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1606 {
1607         struct kbase_device *kbdev;
1608         const struct kbase_pm_policy *new_policy = NULL;
1609         const struct kbase_pm_policy *const *policy_list;
1610         int policy_count;
1611         int i;
1612
1613         kbdev = to_kbase_device(dev);
1614
1615         if (!kbdev)
1616                 return -ENODEV;
1617
1618         policy_count = kbase_pm_list_policies(&policy_list);
1619
1620         for (i = 0; i < policy_count; i++) {
1621                 if (sysfs_streq(policy_list[i]->name, buf)) {
1622                         new_policy = policy_list[i];
1623                         break;
1624                 }
1625         }
1626
1627         if (!new_policy) {
1628                 dev_err(dev, "power_policy: policy not found\n");
1629                 return -EINVAL;
1630         }
1631
1632         kbase_pm_set_policy(kbdev, new_policy);
1633
1634         return count;
1635 }
1636
1637 /** The sysfs file @c power_policy.
1638  *
1639  * This is used for obtaining information about the available policies,
1640  * determining which policy is currently active, and changing the active
1641  * policy.
1642  */
1643 static DEVICE_ATTR(power_policy, S_IRUGO | S_IWUSR, show_policy, set_policy);
1644
1645 /** Show callback for the @c core_availability_policy sysfs file.
1646  *
1647  * This function is called to get the contents of the @c core_availability_policy
1648  * sysfs file. This is a list of the available policies with the currently
1649  * active one surrounded by square brackets.
1650  *
1651  * @param dev   The device this sysfs file is for
1652  * @param attr  The attributes of the sysfs file
1653  * @param buf   The output buffer for the sysfs file contents
1654  *
1655  * @return The number of bytes output to @c buf.
1656  */
1657 static ssize_t show_ca_policy(struct device *dev, struct device_attribute *attr, char * const buf)
1658 {
1659         struct kbase_device *kbdev;
1660         const struct kbase_pm_ca_policy *current_policy;
1661         const struct kbase_pm_ca_policy *const *policy_list;
1662         int policy_count;
1663         int i;
1664         ssize_t ret = 0;
1665
1666         kbdev = to_kbase_device(dev);
1667
1668         if (!kbdev)
1669                 return -ENODEV;
1670
1671         current_policy = kbase_pm_ca_get_policy(kbdev);
1672
1673         policy_count = kbase_pm_ca_list_policies(&policy_list);
1674
1675         for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) {
1676                 if (policy_list[i] == current_policy)
1677                         ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name);
1678                 else
1679                         ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name);
1680         }
1681
1682         if (ret < PAGE_SIZE - 1) {
1683                 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
1684         } else {
1685                 buf[PAGE_SIZE - 2] = '\n';
1686                 buf[PAGE_SIZE - 1] = '\0';
1687                 ret = PAGE_SIZE - 1;
1688         }
1689
1690         return ret;
1691 }
1692
1693 /** Store callback for the @c core_availability_policy sysfs file.
1694  *
1695  * This function is called when the @c core_availability_policy sysfs file is
1696  * written to. It matches the requested policy against the available policies
1697  * and if a matching policy is found calls @ref kbase_pm_set_policy to change
1698  * the policy.
1699  *
1700  * @param dev   The device with sysfs file is for
1701  * @param attr  The attributes of the sysfs file
1702  * @param buf   The value written to the sysfs file
1703  * @param count The number of bytes written to the sysfs file
1704  *
1705  * @return @c count if the function succeeded. An error code on failure.
1706  */
1707 static ssize_t set_ca_policy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1708 {
1709         struct kbase_device *kbdev;
1710         const struct kbase_pm_ca_policy *new_policy = NULL;
1711         const struct kbase_pm_ca_policy *const *policy_list;
1712         int policy_count;
1713         int i;
1714
1715         kbdev = to_kbase_device(dev);
1716
1717         if (!kbdev)
1718                 return -ENODEV;
1719
1720         policy_count = kbase_pm_ca_list_policies(&policy_list);
1721
1722         for (i = 0; i < policy_count; i++) {
1723                 if (sysfs_streq(policy_list[i]->name, buf)) {
1724                         new_policy = policy_list[i];
1725                         break;
1726                 }
1727         }
1728
1729         if (!new_policy) {
1730                 dev_err(dev, "core_availability_policy: policy not found\n");
1731                 return -EINVAL;
1732         }
1733
1734         kbase_pm_ca_set_policy(kbdev, new_policy);
1735
1736         return count;
1737 }
1738
1739 /** The sysfs file @c core_availability_policy
1740  *
1741  * This is used for obtaining information about the available policies,
1742  * determining which policy is currently active, and changing the active
1743  * policy.
1744  */
1745 static DEVICE_ATTR(core_availability_policy, S_IRUGO | S_IWUSR, show_ca_policy, set_ca_policy);
1746
1747 /** Show callback for the @c core_mask sysfs file.
1748  *
1749  * This function is called to get the contents of the @c core_mask sysfs
1750  * file.
1751  *
1752  * @param dev   The device this sysfs file is for
1753  * @param attr  The attributes of the sysfs file
1754  * @param buf   The output buffer for the sysfs file contents
1755  *
1756  * @return The number of bytes output to @c buf.
1757  */
1758 static ssize_t show_core_mask(struct device *dev, struct device_attribute *attr, char * const buf)
1759 {
1760         struct kbase_device *kbdev;
1761         ssize_t ret = 0;
1762
1763         kbdev = to_kbase_device(dev);
1764
1765         if (!kbdev)
1766                 return -ENODEV;
1767
1768         ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1769                         "Current core mask (JS0) : 0x%llX\n",
1770                         kbdev->pm.debug_core_mask[0]);
1771         ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1772                         "Current core mask (JS1) : 0x%llX\n",
1773                         kbdev->pm.debug_core_mask[1]);
1774         ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1775                         "Current core mask (JS2) : 0x%llX\n",
1776                         kbdev->pm.debug_core_mask[2]);
1777         ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1778                         "Available core mask : 0x%llX\n",
1779                         kbdev->gpu_props.props.raw_props.shader_present);
1780
1781         return ret;
1782 }
1783
1784 /** Store callback for the @c core_mask sysfs file.
1785  *
1786  * This function is called when the @c core_mask sysfs file is written to.
1787  *
1788  * @param dev   The device with sysfs file is for
1789  * @param attr  The attributes of the sysfs file
1790  * @param buf   The value written to the sysfs file
1791  * @param count The number of bytes written to the sysfs file
1792  *
1793  * @return @c count if the function succeeded. An error code on failure.
1794  */
1795 static ssize_t set_core_mask(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1796 {
1797         struct kbase_device *kbdev;
1798         u64 new_core_mask[3];
1799         int items;
1800
1801         kbdev = to_kbase_device(dev);
1802
1803         if (!kbdev)
1804                 return -ENODEV;
1805
1806         items = sscanf(buf, "%llx %llx %llx",
1807                         &new_core_mask[0], &new_core_mask[1],
1808                         &new_core_mask[2]);
1809
1810         if (items == 1)
1811                 new_core_mask[1] = new_core_mask[2] = new_core_mask[0];
1812
1813         if (items == 1 || items == 3) {
1814                 u64 shader_present =
1815                                 kbdev->gpu_props.props.raw_props.shader_present;
1816                 u64 group0_core_mask =
1817                                 kbdev->gpu_props.props.coherency_info.group[0].
1818                                 core_mask;
1819
1820                 if ((new_core_mask[0] & shader_present) != new_core_mask[0] ||
1821                                 !(new_core_mask[0] & group0_core_mask) ||
1822                         (new_core_mask[1] & shader_present) !=
1823                                                 new_core_mask[1] ||
1824                                 !(new_core_mask[1] & group0_core_mask) ||
1825                         (new_core_mask[2] & shader_present) !=
1826                                                 new_core_mask[2] ||
1827                                 !(new_core_mask[2] & group0_core_mask)) {
1828                         dev_err(dev, "power_policy: invalid core specification\n");
1829                         return -EINVAL;
1830                 }
1831
1832                 if (kbdev->pm.debug_core_mask[0] != new_core_mask[0] ||
1833                                 kbdev->pm.debug_core_mask[1] !=
1834                                                 new_core_mask[1] ||
1835                                 kbdev->pm.debug_core_mask[2] !=
1836                                                 new_core_mask[2]) {
1837                         unsigned long flags;
1838
1839                         spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
1840
1841                         kbase_pm_set_debug_core_mask(kbdev, new_core_mask[0],
1842                                         new_core_mask[1], new_core_mask[2]);
1843
1844                         spin_unlock_irqrestore(&kbdev->pm.power_change_lock,
1845                                         flags);
1846                 }
1847
1848                 return count;
1849         }
1850
1851         dev_err(kbdev->dev, "Couldn't process set_core_mask write operation.\n"
1852                 "Use format <core_mask>\n"
1853                 "or <core_mask_js0> <core_mask_js1> <core_mask_js2>\n");
1854         return -EINVAL;
1855 }
1856
1857 /** The sysfs file @c core_mask.
1858  *
1859  * This is used to restrict shader core availability for debugging purposes.
1860  * Reading it will show the current core mask and the mask of cores available.
1861  * Writing to it will set the current core mask.
1862  */
1863 static DEVICE_ATTR(core_mask, S_IRUGO | S_IWUSR, show_core_mask, set_core_mask);
1864
1865 /**
1866  * set_soft_job_timeout() - Store callback for the soft_job_timeout sysfs
1867  * file.
1868  *
1869  * @dev: The device this sysfs file is for.
1870  * @attr: The attributes of the sysfs file.
1871  * @buf: The value written to the sysfs file.
1872  * @count: The number of bytes written to the sysfs file.
1873  *
1874  * This allows setting the timeout for software jobs. Waiting soft event wait
1875  * jobs will be cancelled after this period expires, while soft fence wait jobs
1876  * will print debug information if the fence debug feature is enabled.
1877  *
1878  * This is expressed in milliseconds.
1879  *
1880  * Return: count if the function succeeded. An error code on failure.
1881  */
1882 static ssize_t set_soft_job_timeout(struct device *dev,
1883                                       struct device_attribute *attr,
1884                                       const char *buf, size_t count)
1885 {
1886         struct kbase_device *kbdev;
1887         int soft_job_timeout_ms;
1888
1889         kbdev = to_kbase_device(dev);
1890         if (!kbdev)
1891                 return -ENODEV;
1892
1893         if ((kstrtoint(buf, 0, &soft_job_timeout_ms) != 0) ||
1894             (soft_job_timeout_ms <= 0))
1895                 return -EINVAL;
1896
1897         atomic_set(&kbdev->js_data.soft_job_timeout_ms,
1898                    soft_job_timeout_ms);
1899
1900         return count;
1901 }
1902
1903 /**
1904  * show_soft_job_timeout() - Show callback for the soft_job_timeout sysfs
1905  * file.
1906  *
1907  * This will return the timeout for the software jobs.
1908  *
1909  * @dev: The device this sysfs file is for.
1910  * @attr: The attributes of the sysfs file.
1911  * @buf: The output buffer for the sysfs file contents.
1912  *
1913  * Return: The number of bytes output to buf.
1914  */
1915 static ssize_t show_soft_job_timeout(struct device *dev,
1916                                        struct device_attribute *attr,
1917                                        char * const buf)
1918 {
1919         struct kbase_device *kbdev;
1920
1921         kbdev = to_kbase_device(dev);
1922         if (!kbdev)
1923                 return -ENODEV;
1924
1925         return scnprintf(buf, PAGE_SIZE, "%i\n",
1926                          atomic_read(&kbdev->js_data.soft_job_timeout_ms));
1927 }
1928
1929 static DEVICE_ATTR(soft_job_timeout, S_IRUGO | S_IWUSR,
1930                    show_soft_job_timeout, set_soft_job_timeout);
1931
1932 static u32 timeout_ms_to_ticks(struct kbase_device *kbdev, long timeout_ms,
1933                                 int default_ticks, u32 old_ticks)
1934 {
1935         if (timeout_ms > 0) {
1936                 u64 ticks = timeout_ms * 1000000ULL;
1937                 do_div(ticks, kbdev->js_data.scheduling_period_ns);
1938                 if (!ticks)
1939                         return 1;
1940                 return ticks;
1941         } else if (timeout_ms < 0) {
1942                 return default_ticks;
1943         } else {
1944                 return old_ticks;
1945         }
1946 }
1947
1948 /** Store callback for the @c js_timeouts sysfs file.
1949  *
1950  * This function is called to get the contents of the @c js_timeouts sysfs
1951  * file. This file contains five values separated by whitespace. The values
1952  * are basically the same as JS_SOFT_STOP_TICKS, JS_HARD_STOP_TICKS_SS,
1953  * JS_HARD_STOP_TICKS_DUMPING, JS_RESET_TICKS_SS, JS_RESET_TICKS_DUMPING
1954  * configuration values (in that order), with the difference that the js_timeout
1955  * values are expressed in MILLISECONDS.
1956  *
1957  * The js_timeouts sysfile file allows the current values in
1958  * use by the job scheduler to get override. Note that a value needs to
1959  * be other than 0 for it to override the current job scheduler value.
1960  *
1961  * @param dev   The device with sysfs file is for
1962  * @param attr  The attributes of the sysfs file
1963  * @param buf   The value written to the sysfs file
1964  * @param count The number of bytes written to the sysfs file
1965  *
1966  * @return @c count if the function succeeded. An error code on failure.
1967  */
1968 static ssize_t set_js_timeouts(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1969 {
1970         struct kbase_device *kbdev;
1971         int items;
1972         long js_soft_stop_ms;
1973         long js_soft_stop_ms_cl;
1974         long js_hard_stop_ms_ss;
1975         long js_hard_stop_ms_cl;
1976         long js_hard_stop_ms_dumping;
1977         long js_reset_ms_ss;
1978         long js_reset_ms_cl;
1979         long js_reset_ms_dumping;
1980
1981         kbdev = to_kbase_device(dev);
1982         if (!kbdev)
1983                 return -ENODEV;
1984
1985         items = sscanf(buf, "%ld %ld %ld %ld %ld %ld %ld %ld",
1986                         &js_soft_stop_ms, &js_soft_stop_ms_cl,
1987                         &js_hard_stop_ms_ss, &js_hard_stop_ms_cl,
1988                         &js_hard_stop_ms_dumping, &js_reset_ms_ss,
1989                         &js_reset_ms_cl, &js_reset_ms_dumping);
1990
1991         if (items == 8) {
1992                 struct kbasep_js_device_data *js_data = &kbdev->js_data;
1993                 unsigned long flags;
1994
1995                 spin_lock_irqsave(&kbdev->js_data.runpool_irq.lock, flags);
1996
1997 #define UPDATE_TIMEOUT(ticks_name, ms_name, default) do {\
1998         js_data->ticks_name = timeout_ms_to_ticks(kbdev, ms_name, \
1999                         default, js_data->ticks_name); \
2000         dev_dbg(kbdev->dev, "Overriding " #ticks_name \
2001                         " with %lu ticks (%lu ms)\n", \
2002                         (unsigned long)js_data->ticks_name, \
2003                         ms_name); \
2004         } while (0)
2005
2006                 UPDATE_TIMEOUT(soft_stop_ticks, js_soft_stop_ms,
2007                                 DEFAULT_JS_SOFT_STOP_TICKS);
2008                 UPDATE_TIMEOUT(soft_stop_ticks_cl, js_soft_stop_ms_cl,
2009                                 DEFAULT_JS_SOFT_STOP_TICKS_CL);
2010                 UPDATE_TIMEOUT(hard_stop_ticks_ss, js_hard_stop_ms_ss,
2011                                 kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408) ?
2012                                 DEFAULT_JS_HARD_STOP_TICKS_SS_8408 :
2013                                 DEFAULT_JS_HARD_STOP_TICKS_SS);
2014                 UPDATE_TIMEOUT(hard_stop_ticks_cl, js_hard_stop_ms_cl,
2015                                 DEFAULT_JS_HARD_STOP_TICKS_CL);
2016                 UPDATE_TIMEOUT(hard_stop_ticks_dumping,
2017                                 js_hard_stop_ms_dumping,
2018                                 DEFAULT_JS_HARD_STOP_TICKS_DUMPING);
2019                 UPDATE_TIMEOUT(gpu_reset_ticks_ss, js_reset_ms_ss,
2020                                 kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408) ?
2021                                 DEFAULT_JS_RESET_TICKS_SS_8408 :
2022                                 DEFAULT_JS_RESET_TICKS_SS);
2023                 UPDATE_TIMEOUT(gpu_reset_ticks_cl, js_reset_ms_cl,
2024                                 DEFAULT_JS_RESET_TICKS_CL);
2025                 UPDATE_TIMEOUT(gpu_reset_ticks_dumping, js_reset_ms_dumping,
2026                                 DEFAULT_JS_RESET_TICKS_DUMPING);
2027
2028                 kbase_js_set_timeouts(kbdev);
2029
2030                 spin_unlock_irqrestore(&kbdev->js_data.runpool_irq.lock, flags);
2031
2032                 return count;
2033         }
2034
2035         dev_err(kbdev->dev, "Couldn't process js_timeouts write operation.\n"
2036                         "Use format <soft_stop_ms> <soft_stop_ms_cl> <hard_stop_ms_ss> <hard_stop_ms_cl> <hard_stop_ms_dumping> <reset_ms_ss> <reset_ms_cl> <reset_ms_dumping>\n"
2037                         "Write 0 for no change, -1 to restore default timeout\n");
2038         return -EINVAL;
2039 }
2040
2041 static unsigned long get_js_timeout_in_ms(
2042                 u32 scheduling_period_ns,
2043                 u32 ticks)
2044 {
2045         u64 ms = (u64)ticks * scheduling_period_ns;
2046
2047         do_div(ms, 1000000UL);
2048         return ms;
2049 }
2050
2051 /** Show callback for the @c js_timeouts sysfs file.
2052  *
2053  * This function is called to get the contents of the @c js_timeouts sysfs
2054  * file. It returns the last set values written to the js_timeouts sysfs file.
2055  * If the file didn't get written yet, the values will be current setting in
2056  * use.
2057  * @param dev   The device this sysfs file is for
2058  * @param attr  The attributes of the sysfs file
2059  * @param buf   The output buffer for the sysfs file contents
2060  *
2061  * @return The number of bytes output to @c buf.
2062  */
2063 static ssize_t show_js_timeouts(struct device *dev, struct device_attribute *attr, char * const buf)
2064 {
2065         struct kbase_device *kbdev;
2066         ssize_t ret;
2067         unsigned long js_soft_stop_ms;
2068         unsigned long js_soft_stop_ms_cl;
2069         unsigned long js_hard_stop_ms_ss;
2070         unsigned long js_hard_stop_ms_cl;
2071         unsigned long js_hard_stop_ms_dumping;
2072         unsigned long js_reset_ms_ss;
2073         unsigned long js_reset_ms_cl;
2074         unsigned long js_reset_ms_dumping;
2075         u32 scheduling_period_ns;
2076
2077         kbdev = to_kbase_device(dev);
2078         if (!kbdev)
2079                 return -ENODEV;
2080
2081         scheduling_period_ns = kbdev->js_data.scheduling_period_ns;
2082
2083 #define GET_TIMEOUT(name) get_js_timeout_in_ms(\
2084                 scheduling_period_ns, \
2085                 kbdev->js_data.name)
2086
2087         js_soft_stop_ms = GET_TIMEOUT(soft_stop_ticks);
2088         js_soft_stop_ms_cl = GET_TIMEOUT(soft_stop_ticks_cl);
2089         js_hard_stop_ms_ss = GET_TIMEOUT(hard_stop_ticks_ss);
2090         js_hard_stop_ms_cl = GET_TIMEOUT(hard_stop_ticks_cl);
2091         js_hard_stop_ms_dumping = GET_TIMEOUT(hard_stop_ticks_dumping);
2092         js_reset_ms_ss = GET_TIMEOUT(gpu_reset_ticks_ss);
2093         js_reset_ms_cl = GET_TIMEOUT(gpu_reset_ticks_cl);
2094         js_reset_ms_dumping = GET_TIMEOUT(gpu_reset_ticks_dumping);
2095
2096 #undef GET_TIMEOUT
2097
2098         ret = scnprintf(buf, PAGE_SIZE, "%lu %lu %lu %lu %lu %lu %lu %lu\n",
2099                         js_soft_stop_ms, js_soft_stop_ms_cl,
2100                         js_hard_stop_ms_ss, js_hard_stop_ms_cl,
2101                         js_hard_stop_ms_dumping, js_reset_ms_ss,
2102                         js_reset_ms_cl, js_reset_ms_dumping);
2103
2104         if (ret >= PAGE_SIZE) {
2105                 buf[PAGE_SIZE - 2] = '\n';
2106                 buf[PAGE_SIZE - 1] = '\0';
2107                 ret = PAGE_SIZE - 1;
2108         }
2109
2110         return ret;
2111 }
2112
2113 /** The sysfs file @c js_timeouts.
2114  *
2115  * This is used to override the current job scheduler values for
2116  * JS_STOP_STOP_TICKS_SS
2117  * JS_STOP_STOP_TICKS_CL
2118  * JS_HARD_STOP_TICKS_SS
2119  * JS_HARD_STOP_TICKS_CL
2120  * JS_HARD_STOP_TICKS_DUMPING
2121  * JS_RESET_TICKS_SS
2122  * JS_RESET_TICKS_CL
2123  * JS_RESET_TICKS_DUMPING.
2124  */
2125 static DEVICE_ATTR(js_timeouts, S_IRUGO | S_IWUSR, show_js_timeouts, set_js_timeouts);
2126
2127 static u32 get_new_js_timeout(
2128                 u32 old_period,
2129                 u32 old_ticks,
2130                 u32 new_scheduling_period_ns)
2131 {
2132         u64 ticks = (u64)old_period * (u64)old_ticks;
2133         do_div(ticks, new_scheduling_period_ns);
2134         return ticks?ticks:1;
2135 }
2136
2137 /**
2138  * set_js_scheduling_period - Store callback for the js_scheduling_period sysfs
2139  *                            file
2140  * @dev:   The device the sysfs file is for
2141  * @attr:  The attributes of the sysfs file
2142  * @buf:   The value written to the sysfs file
2143  * @count: The number of bytes written to the sysfs file
2144  *
2145  * This function is called when the js_scheduling_period sysfs file is written
2146  * to. It checks the data written, and if valid updates the js_scheduling_period
2147  * value
2148  *
2149  * Return: @c count if the function succeeded. An error code on failure.
2150  */
2151 static ssize_t set_js_scheduling_period(struct device *dev,
2152                 struct device_attribute *attr, const char *buf, size_t count)
2153 {
2154         struct kbase_device *kbdev;
2155         int ret;
2156         unsigned int js_scheduling_period;
2157         u32 new_scheduling_period_ns;
2158         u32 old_period;
2159         struct kbasep_js_device_data *js_data;
2160         unsigned long flags;
2161
2162         kbdev = to_kbase_device(dev);
2163         if (!kbdev)
2164                 return -ENODEV;
2165
2166         js_data = &kbdev->js_data;
2167
2168         ret = kstrtouint(buf, 0, &js_scheduling_period);
2169         if (ret || !js_scheduling_period) {
2170                 dev_err(kbdev->dev, "Couldn't process js_scheduling_period write operation.\n"
2171                                 "Use format <js_scheduling_period_ms>\n");
2172                 return -EINVAL;
2173         }
2174
2175         new_scheduling_period_ns = js_scheduling_period * 1000000;
2176
2177         /* Update scheduling timeouts */
2178         mutex_lock(&js_data->runpool_mutex);
2179         spin_lock_irqsave(&js_data->runpool_irq.lock, flags);
2180
2181         /* If no contexts have been scheduled since js_timeouts was last written
2182          * to, the new timeouts might not have been latched yet. So check if an
2183          * update is pending and use the new values if necessary. */
2184
2185         /* Use previous 'new' scheduling period as a base if present. */
2186         old_period = js_data->scheduling_period_ns;
2187
2188 #define SET_TIMEOUT(name) \
2189                 (js_data->name = get_new_js_timeout(\
2190                                 old_period, \
2191                                 kbdev->js_data.name, \
2192                                 new_scheduling_period_ns))
2193
2194         SET_TIMEOUT(soft_stop_ticks);
2195         SET_TIMEOUT(soft_stop_ticks_cl);
2196         SET_TIMEOUT(hard_stop_ticks_ss);
2197         SET_TIMEOUT(hard_stop_ticks_cl);
2198         SET_TIMEOUT(hard_stop_ticks_dumping);
2199         SET_TIMEOUT(gpu_reset_ticks_ss);
2200         SET_TIMEOUT(gpu_reset_ticks_cl);
2201         SET_TIMEOUT(gpu_reset_ticks_dumping);
2202
2203 #undef SET_TIMEOUT
2204
2205         js_data->scheduling_period_ns = new_scheduling_period_ns;
2206
2207         kbase_js_set_timeouts(kbdev);
2208
2209         spin_unlock_irqrestore(&js_data->runpool_irq.lock, flags);
2210         mutex_unlock(&js_data->runpool_mutex);
2211
2212         dev_dbg(kbdev->dev, "JS scheduling period: %dms\n",
2213                         js_scheduling_period);
2214
2215         return count;
2216 }
2217
2218 /**
2219  * show_js_scheduling_period - Show callback for the js_scheduling_period sysfs
2220  *                             entry.
2221  * @dev:  The device this sysfs file is for.
2222  * @attr: The attributes of the sysfs file.
2223  * @buf:  The output buffer to receive the GPU information.
2224  *
2225  * This function is called to get the current period used for the JS scheduling
2226  * period.
2227  *
2228  * Return: The number of bytes output to buf.
2229  */
2230 static ssize_t show_js_scheduling_period(struct device *dev,
2231                 struct device_attribute *attr, char * const buf)
2232 {
2233         struct kbase_device *kbdev;
2234         u32 period;
2235         ssize_t ret;
2236
2237         kbdev = to_kbase_device(dev);
2238         if (!kbdev)
2239                 return -ENODEV;
2240
2241         period = kbdev->js_data.scheduling_period_ns;
2242
2243         ret = scnprintf(buf, PAGE_SIZE, "%d\n",
2244                         period / 1000000);
2245
2246         return ret;
2247 }
2248
2249 static DEVICE_ATTR(js_scheduling_period, S_IRUGO | S_IWUSR,
2250                 show_js_scheduling_period, set_js_scheduling_period);
2251
2252 #if !MALI_CUSTOMER_RELEASE
2253 /** Store callback for the @c force_replay sysfs file.
2254  *
2255  * @param dev   The device with sysfs file is for
2256  * @param attr  The attributes of the sysfs file
2257  * @param buf   The value written to the sysfs file
2258  * @param count The number of bytes written to the sysfs file
2259  *
2260  * @return @c count if the function succeeded. An error code on failure.
2261  */
2262 static ssize_t set_force_replay(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2263 {
2264         struct kbase_device *kbdev;
2265
2266         kbdev = to_kbase_device(dev);
2267         if (!kbdev)
2268                 return -ENODEV;
2269
2270         if (!strncmp("limit=", buf, MIN(6, count))) {
2271                 int force_replay_limit;
2272                 int items = sscanf(buf, "limit=%u", &force_replay_limit);
2273
2274                 if (items == 1) {
2275                         kbdev->force_replay_random = false;
2276                         kbdev->force_replay_limit = force_replay_limit;
2277                         kbdev->force_replay_count = 0;
2278
2279                         return count;
2280                 }
2281         } else if (!strncmp("random_limit", buf, MIN(12, count))) {
2282                 kbdev->force_replay_random = true;
2283                 kbdev->force_replay_count = 0;
2284
2285                 return count;
2286         } else if (!strncmp("norandom_limit", buf, MIN(14, count))) {
2287                 kbdev->force_replay_random = false;
2288                 kbdev->force_replay_limit = KBASEP_FORCE_REPLAY_DISABLED;
2289                 kbdev->force_replay_count = 0;
2290
2291                 return count;
2292         } else if (!strncmp("core_req=", buf, MIN(9, count))) {
2293                 unsigned int core_req;
2294                 int items = sscanf(buf, "core_req=%x", &core_req);
2295
2296                 if (items == 1) {
2297                         kbdev->force_replay_core_req = (base_jd_core_req)core_req;
2298
2299                         return count;
2300                 }
2301         }
2302         dev_err(kbdev->dev, "Couldn't process force_replay write operation.\nPossible settings: limit=<limit>, random_limit, norandom_limit, core_req=<core_req>\n");
2303         return -EINVAL;
2304 }
2305
2306 /** Show callback for the @c force_replay sysfs file.
2307  *
2308  * This function is called to get the contents of the @c force_replay sysfs
2309  * file. It returns the last set value written to the force_replay sysfs file.
2310  * If the file didn't get written yet, the values will be 0.
2311  *
2312  * @param dev   The device this sysfs file is for
2313  * @param attr  The attributes of the sysfs file
2314  * @param buf   The output buffer for the sysfs file contents
2315  *
2316  * @return The number of bytes output to @c buf.
2317  */
2318 static ssize_t show_force_replay(struct device *dev,
2319                 struct device_attribute *attr, char * const buf)
2320 {
2321         struct kbase_device *kbdev;
2322         ssize_t ret;
2323
2324         kbdev = to_kbase_device(dev);
2325         if (!kbdev)
2326                 return -ENODEV;
2327
2328         if (kbdev->force_replay_random)
2329                 ret = scnprintf(buf, PAGE_SIZE,
2330                                 "limit=0\nrandom_limit\ncore_req=%x\n",
2331                                 kbdev->force_replay_core_req);
2332         else
2333                 ret = scnprintf(buf, PAGE_SIZE,
2334                                 "limit=%u\nnorandom_limit\ncore_req=%x\n",
2335                                 kbdev->force_replay_limit,
2336                                 kbdev->force_replay_core_req);
2337
2338         if (ret >= PAGE_SIZE) {
2339                 buf[PAGE_SIZE - 2] = '\n';
2340                 buf[PAGE_SIZE - 1] = '\0';
2341                 ret = PAGE_SIZE - 1;
2342         }
2343
2344         return ret;
2345 }
2346
2347 /** The sysfs file @c force_replay.
2348  *
2349  */
2350 static DEVICE_ATTR(force_replay, S_IRUGO | S_IWUSR, show_force_replay,
2351                 set_force_replay);
2352 #endif /* !MALI_CUSTOMER_RELEASE */
2353
2354 #ifdef CONFIG_MALI_DEBUG
2355 static ssize_t set_js_softstop_always(struct device *dev,
2356                 struct device_attribute *attr, const char *buf, size_t count)
2357 {
2358         struct kbase_device *kbdev;
2359         int ret;
2360         int softstop_always;
2361
2362         kbdev = to_kbase_device(dev);
2363         if (!kbdev)
2364                 return -ENODEV;
2365
2366         ret = kstrtoint(buf, 0, &softstop_always);
2367         if (ret || ((softstop_always != 0) && (softstop_always != 1))) {
2368                 dev_err(kbdev->dev, "Couldn't process js_softstop_always write operation.\n"
2369                                 "Use format <soft_stop_always>\n");
2370                 return -EINVAL;
2371         }
2372
2373         kbdev->js_data.softstop_always = (bool) softstop_always;
2374         dev_dbg(kbdev->dev, "Support for softstop on a single context: %s\n",
2375                         (kbdev->js_data.softstop_always) ?
2376                         "Enabled" : "Disabled");
2377         return count;
2378 }
2379
2380 static ssize_t show_js_softstop_always(struct device *dev,
2381                 struct device_attribute *attr, char * const buf)
2382 {
2383         struct kbase_device *kbdev;
2384         ssize_t ret;
2385
2386         kbdev = to_kbase_device(dev);
2387         if (!kbdev)
2388                 return -ENODEV;
2389
2390         ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->js_data.softstop_always);
2391
2392         if (ret >= PAGE_SIZE) {
2393                 buf[PAGE_SIZE - 2] = '\n';
2394                 buf[PAGE_SIZE - 1] = '\0';
2395                 ret = PAGE_SIZE - 1;
2396         }
2397
2398         return ret;
2399 }
2400
2401 /*
2402  * By default, soft-stops are disabled when only a single context is present. The ability to
2403  * enable soft-stop when only a single context is present can be used for debug and unit-testing purposes.
2404  * (see CL t6xx_stress_1 unit-test as an example whereby this feature is used.)
2405  */
2406 static DEVICE_ATTR(js_softstop_always, S_IRUGO | S_IWUSR, show_js_softstop_always, set_js_softstop_always);
2407 #endif /* CONFIG_MALI_DEBUG */
2408
2409 #ifdef CONFIG_MALI_DEBUG
2410 typedef void (kbasep_debug_command_func) (struct kbase_device *);
2411
2412 enum kbasep_debug_command_code {
2413         KBASEP_DEBUG_COMMAND_DUMPTRACE,
2414
2415         /* This must be the last enum */
2416         KBASEP_DEBUG_COMMAND_COUNT
2417 };
2418
2419 struct kbasep_debug_command {
2420         char *str;
2421         kbasep_debug_command_func *func;
2422 };
2423
2424 /** Debug commands supported by the driver */
2425 static const struct kbasep_debug_command debug_commands[] = {
2426         {
2427          .str = "dumptrace",
2428          .func = &kbasep_trace_dump,
2429          }
2430 };
2431
2432 /** Show callback for the @c debug_command sysfs file.
2433  *
2434  * This function is called to get the contents of the @c debug_command sysfs
2435  * file. This is a list of the available debug commands, separated by newlines.
2436  *
2437  * @param dev   The device this sysfs file is for
2438  * @param attr  The attributes of the sysfs file
2439  * @param buf   The output buffer for the sysfs file contents
2440  *
2441  * @return The number of bytes output to @c buf.
2442  */
2443 static ssize_t show_debug(struct device *dev, struct device_attribute *attr, char * const buf)
2444 {
2445         struct kbase_device *kbdev;
2446         int i;
2447         ssize_t ret = 0;
2448
2449         kbdev = to_kbase_device(dev);
2450
2451         if (!kbdev)
2452                 return -ENODEV;
2453
2454         for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT && ret < PAGE_SIZE; i++)
2455                 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s\n", debug_commands[i].str);
2456
2457         if (ret >= PAGE_SIZE) {
2458                 buf[PAGE_SIZE - 2] = '\n';
2459                 buf[PAGE_SIZE - 1] = '\0';
2460                 ret = PAGE_SIZE - 1;
2461         }
2462
2463         return ret;
2464 }
2465
2466 /** Store callback for the @c debug_command sysfs file.
2467  *
2468  * This function is called when the @c debug_command sysfs file is written to.
2469  * It matches the requested command against the available commands, and if
2470  * a matching command is found calls the associated function from
2471  * @ref debug_commands to issue the command.
2472  *
2473  * @param dev   The device with sysfs file is for
2474  * @param attr  The attributes of the sysfs file
2475  * @param buf   The value written to the sysfs file
2476  * @param count The number of bytes written to the sysfs file
2477  *
2478  * @return @c count if the function succeeded. An error code on failure.
2479  */
2480 static ssize_t issue_debug(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2481 {
2482         struct kbase_device *kbdev;
2483         int i;
2484
2485         kbdev = to_kbase_device(dev);
2486
2487         if (!kbdev)
2488                 return -ENODEV;
2489
2490         for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT; i++) {
2491                 if (sysfs_streq(debug_commands[i].str, buf)) {
2492                         debug_commands[i].func(kbdev);
2493                         return count;
2494                 }
2495         }
2496
2497         /* Debug Command not found */
2498         dev_err(dev, "debug_command: command not known\n");
2499         return -EINVAL;
2500 }
2501
2502 /** The sysfs file @c debug_command.
2503  *
2504  * This is used to issue general debug commands to the device driver.
2505  * Reading it will produce a list of debug commands, separated by newlines.
2506  * Writing to it with one of those commands will issue said command.
2507  */
2508 static DEVICE_ATTR(debug_command, S_IRUGO | S_IWUSR, show_debug, issue_debug);
2509 #endif /* CONFIG_MALI_DEBUG */
2510
2511 /**
2512  * kbase_show_gpuinfo - Show callback for the gpuinfo sysfs entry.
2513  * @dev: The device this sysfs file is for.
2514  * @attr: The attributes of the sysfs file.
2515  * @buf: The output buffer to receive the GPU information.
2516  *
2517  * This function is called to get a description of the present Mali
2518  * GPU via the gpuinfo sysfs entry.  This includes the GPU family, the
2519  * number of cores, the hardware version and the raw product id.  For
2520  * example:
2521  *
2522  *    Mali-T60x MP4 r0p0 0x6956
2523  *
2524  * Return: The number of bytes output to buf.
2525  */
2526 static ssize_t kbase_show_gpuinfo(struct device *dev,
2527                                   struct device_attribute *attr, char *buf)
2528 {
2529         static const struct gpu_product_id_name {
2530                 unsigned id;
2531                 char *name;
2532         } gpu_product_id_names[] = {
2533                 { .id = GPU_ID_PI_T60X, .name = "Mali-T60x" },
2534                 { .id = GPU_ID_PI_T62X, .name = "Mali-T62x" },
2535                 { .id = GPU_ID_PI_T72X, .name = "Mali-T72x" },
2536                 { .id = GPU_ID_PI_T76X, .name = "Mali-T76x" },
2537                 { .id = GPU_ID_PI_T82X, .name = "Mali-T82x" },
2538                 { .id = GPU_ID_PI_T83X, .name = "Mali-T83x" },
2539                 { .id = GPU_ID_PI_T86X, .name = "Mali-T86x" },
2540                 { .id = GPU_ID_PI_TFRX, .name = "Mali-T88x" },
2541                 { .id = GPU_ID2_PRODUCT_TMIX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
2542                   .name = "Mali-G71" },
2543         };
2544         const char *product_name = "(Unknown Mali GPU)";
2545         struct kbase_device *kbdev;
2546         u32 gpu_id;
2547         unsigned product_id, product_id_mask;
2548         unsigned i;
2549         bool is_new_format;
2550
2551         kbdev = to_kbase_device(dev);
2552         if (!kbdev)
2553                 return -ENODEV;
2554
2555         gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
2556         product_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
2557         is_new_format = GPU_ID_IS_NEW_FORMAT(product_id);
2558         product_id_mask =
2559                 (is_new_format ?
2560                         GPU_ID2_PRODUCT_MODEL :
2561                         GPU_ID_VERSION_PRODUCT_ID) >>
2562                 GPU_ID_VERSION_PRODUCT_ID_SHIFT;
2563
2564         for (i = 0; i < ARRAY_SIZE(gpu_product_id_names); ++i) {
2565                 const struct gpu_product_id_name *p = &gpu_product_id_names[i];
2566
2567                 if ((GPU_ID_IS_NEW_FORMAT(p->id) == is_new_format) &&
2568                     (p->id & product_id_mask) ==
2569                     (product_id & product_id_mask)) {
2570                         product_name = p->name;
2571                         break;
2572                 }
2573         }
2574
2575         return scnprintf(buf, PAGE_SIZE, "%s MP%d r%dp%d 0x%04X\n",
2576                 product_name, kbdev->gpu_props.num_cores,
2577                 (gpu_id & GPU_ID_VERSION_MAJOR) >> GPU_ID_VERSION_MAJOR_SHIFT,
2578                 (gpu_id & GPU_ID_VERSION_MINOR) >> GPU_ID_VERSION_MINOR_SHIFT,
2579                 product_id);
2580 }
2581 static DEVICE_ATTR(gpuinfo, S_IRUGO, kbase_show_gpuinfo, NULL);
2582
2583 /**
2584  * set_dvfs_period - Store callback for the dvfs_period sysfs file.
2585  * @dev:   The device with sysfs file is for
2586  * @attr:  The attributes of the sysfs file
2587  * @buf:   The value written to the sysfs file
2588  * @count: The number of bytes written to the sysfs file
2589  *
2590  * This function is called when the dvfs_period sysfs file is written to. It
2591  * checks the data written, and if valid updates the DVFS period variable,
2592  *
2593  * Return: @c count if the function succeeded. An error code on failure.
2594  */
2595 static ssize_t set_dvfs_period(struct device *dev,
2596                 struct device_attribute *attr, const char *buf, size_t count)
2597 {
2598         struct kbase_device *kbdev;
2599         int ret;
2600         int dvfs_period;
2601
2602         kbdev = to_kbase_device(dev);
2603         if (!kbdev)
2604                 return -ENODEV;
2605
2606         ret = kstrtoint(buf, 0, &dvfs_period);
2607         if (ret || dvfs_period <= 0) {
2608                 dev_err(kbdev->dev, "Couldn't process dvfs_period write operation.\n"
2609                                 "Use format <dvfs_period_ms>\n");
2610                 return -EINVAL;
2611         }
2612
2613         kbdev->pm.dvfs_period = dvfs_period;
2614         dev_dbg(kbdev->dev, "DVFS period: %dms\n", dvfs_period);
2615
2616         return count;
2617 }
2618
2619 /**
2620  * show_dvfs_period - Show callback for the dvfs_period sysfs entry.
2621  * @dev:  The device this sysfs file is for.
2622  * @attr: The attributes of the sysfs file.
2623  * @buf:  The output buffer to receive the GPU information.
2624  *
2625  * This function is called to get the current period used for the DVFS sample
2626  * timer.
2627  *
2628  * Return: The number of bytes output to buf.
2629  */
2630 static ssize_t show_dvfs_period(struct device *dev,
2631                 struct device_attribute *attr, char * const buf)
2632 {
2633         struct kbase_device *kbdev;
2634         ssize_t ret;
2635
2636         kbdev = to_kbase_device(dev);
2637         if (!kbdev)
2638                 return -ENODEV;
2639
2640         ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->pm.dvfs_period);
2641
2642         return ret;
2643 }
2644
2645 static DEVICE_ATTR(dvfs_period, S_IRUGO | S_IWUSR, show_dvfs_period,
2646                 set_dvfs_period);
2647
2648 /**
2649  * set_pm_poweroff - Store callback for the pm_poweroff sysfs file.
2650  * @dev:   The device with sysfs file is for
2651  * @attr:  The attributes of the sysfs file
2652  * @buf:   The value written to the sysfs file
2653  * @count: The number of bytes written to the sysfs file
2654  *
2655  * This function is called when the pm_poweroff sysfs file is written to.
2656  *
2657  * This file contains three values separated by whitespace. The values
2658  * are gpu_poweroff_time (the period of the poweroff timer, in ns),
2659  * poweroff_shader_ticks (the number of poweroff timer ticks before an idle
2660  * shader is powered off), and poweroff_gpu_ticks (the number of poweroff timer
2661  * ticks before the GPU is powered off), in that order.
2662  *
2663  * Return: @c count if the function succeeded. An error code on failure.
2664  */
2665 static ssize_t set_pm_poweroff(struct device *dev,
2666                 struct device_attribute *attr, const char *buf, size_t count)
2667 {
2668         struct kbase_device *kbdev;
2669         int items;
2670         s64 gpu_poweroff_time;
2671         int poweroff_shader_ticks, poweroff_gpu_ticks;
2672
2673         kbdev = to_kbase_device(dev);
2674         if (!kbdev)
2675                 return -ENODEV;
2676
2677         items = sscanf(buf, "%llu %u %u", &gpu_poweroff_time,
2678                         &poweroff_shader_ticks,
2679                         &poweroff_gpu_ticks);
2680         if (items != 3) {
2681                 dev_err(kbdev->dev, "Couldn't process pm_poweroff write operation.\n"
2682                                 "Use format <gpu_poweroff_time_ns> <poweroff_shader_ticks> <poweroff_gpu_ticks>\n");
2683                 return -EINVAL;
2684         }
2685
2686         kbdev->pm.gpu_poweroff_time = HR_TIMER_DELAY_NSEC(gpu_poweroff_time);
2687         kbdev->pm.poweroff_shader_ticks = poweroff_shader_ticks;
2688         kbdev->pm.poweroff_gpu_ticks = poweroff_gpu_ticks;
2689
2690         return count;
2691 }
2692
2693 /**
2694  * show_pm_poweroff - Show callback for the pm_poweroff sysfs entry.
2695  * @dev:  The device this sysfs file is for.
2696  * @attr: The attributes of the sysfs file.
2697  * @buf:  The output buffer to receive the GPU information.
2698  *
2699  * This function is called to get the current period used for the DVFS sample
2700  * timer.
2701  *
2702  * Return: The number of bytes output to buf.
2703  */
2704 static ssize_t show_pm_poweroff(struct device *dev,
2705                 struct device_attribute *attr, char * const buf)
2706 {
2707         struct kbase_device *kbdev;
2708         ssize_t ret;
2709
2710         kbdev = to_kbase_device(dev);
2711         if (!kbdev)
2712                 return -ENODEV;
2713
2714         ret = scnprintf(buf, PAGE_SIZE, "%llu %u %u\n",
2715                         ktime_to_ns(kbdev->pm.gpu_poweroff_time),
2716                         kbdev->pm.poweroff_shader_ticks,
2717                         kbdev->pm.poweroff_gpu_ticks);
2718
2719         return ret;
2720 }
2721
2722 static DEVICE_ATTR(pm_poweroff, S_IRUGO | S_IWUSR, show_pm_poweroff,
2723                 set_pm_poweroff);
2724
2725 /**
2726  * set_reset_timeout - Store callback for the reset_timeout sysfs file.
2727  * @dev:   The device with sysfs file is for
2728  * @attr:  The attributes of the sysfs file
2729  * @buf:   The value written to the sysfs file
2730  * @count: The number of bytes written to the sysfs file
2731  *
2732  * This function is called when the reset_timeout sysfs file is written to. It
2733  * checks the data written, and if valid updates the reset timeout.
2734  *
2735  * Return: @c count if the function succeeded. An error code on failure.
2736  */
2737 static ssize_t set_reset_timeout(struct device *dev,
2738                 struct device_attribute *attr, const char *buf, size_t count)
2739 {
2740         struct kbase_device *kbdev;
2741         int ret;
2742         int reset_timeout;
2743
2744         kbdev = to_kbase_device(dev);
2745         if (!kbdev)
2746                 return -ENODEV;
2747
2748         ret = kstrtoint(buf, 0, &reset_timeout);
2749         if (ret || reset_timeout <= 0) {
2750                 dev_err(kbdev->dev, "Couldn't process reset_timeout write operation.\n"
2751                                 "Use format <reset_timeout_ms>\n");
2752                 return -EINVAL;
2753         }
2754
2755         kbdev->reset_timeout_ms = reset_timeout;
2756         dev_dbg(kbdev->dev, "Reset timeout: %dms\n", reset_timeout);
2757
2758         return count;
2759 }
2760
2761 /**
2762  * show_reset_timeout - Show callback for the reset_timeout sysfs entry.
2763  * @dev:  The device this sysfs file is for.
2764  * @attr: The attributes of the sysfs file.
2765  * @buf:  The output buffer to receive the GPU information.
2766  *
2767  * This function is called to get the current reset timeout.
2768  *
2769  * Return: The number of bytes output to buf.
2770  */
2771 static ssize_t show_reset_timeout(struct device *dev,
2772                 struct device_attribute *attr, char * const buf)
2773 {
2774         struct kbase_device *kbdev;
2775         ssize_t ret;
2776
2777         kbdev = to_kbase_device(dev);
2778         if (!kbdev)
2779                 return -ENODEV;
2780
2781         ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->reset_timeout_ms);
2782
2783         return ret;
2784 }
2785
2786 static DEVICE_ATTR(reset_timeout, S_IRUGO | S_IWUSR, show_reset_timeout,
2787                 set_reset_timeout);
2788
2789
2790
2791 static ssize_t show_mem_pool_size(struct device *dev,
2792                 struct device_attribute *attr, char * const buf)
2793 {
2794         struct kbase_device *kbdev;
2795         ssize_t ret;
2796
2797         kbdev = to_kbase_device(dev);
2798         if (!kbdev)
2799                 return -ENODEV;
2800
2801         ret = scnprintf(buf, PAGE_SIZE, "%zu\n",
2802                         kbase_mem_pool_size(&kbdev->mem_pool));
2803
2804         return ret;
2805 }
2806
2807 static ssize_t set_mem_pool_size(struct device *dev,
2808                 struct device_attribute *attr, const char *buf, size_t count)
2809 {
2810         struct kbase_device *kbdev;
2811         size_t new_size;
2812         int err;
2813
2814         kbdev = to_kbase_device(dev);
2815         if (!kbdev)
2816                 return -ENODEV;
2817
2818         err = kstrtoul(buf, 0, (unsigned long *)&new_size);
2819         if (err)
2820                 return err;
2821
2822         kbase_mem_pool_trim(&kbdev->mem_pool, new_size);
2823
2824         return count;
2825 }
2826
2827 static DEVICE_ATTR(mem_pool_size, S_IRUGO | S_IWUSR, show_mem_pool_size,
2828                 set_mem_pool_size);
2829
2830 static ssize_t show_mem_pool_max_size(struct device *dev,
2831                 struct device_attribute *attr, char * const buf)
2832 {
2833         struct kbase_device *kbdev;
2834         ssize_t ret;
2835
2836         kbdev = to_kbase_device(dev);
2837         if (!kbdev)
2838                 return -ENODEV;
2839
2840         ret = scnprintf(buf, PAGE_SIZE, "%zu\n",
2841                         kbase_mem_pool_max_size(&kbdev->mem_pool));
2842
2843         return ret;
2844 }
2845
2846 static ssize_t set_mem_pool_max_size(struct device *dev,
2847                 struct device_attribute *attr, const char *buf, size_t count)
2848 {
2849         struct kbase_device *kbdev;
2850         size_t new_max_size;
2851         int err;
2852
2853         kbdev = to_kbase_device(dev);
2854         if (!kbdev)
2855                 return -ENODEV;
2856
2857         err = kstrtoul(buf, 0, (unsigned long *)&new_max_size);
2858         if (err)
2859                 return -EINVAL;
2860
2861         kbase_mem_pool_set_max_size(&kbdev->mem_pool, new_max_size);
2862
2863         return count;
2864 }
2865
2866 static DEVICE_ATTR(mem_pool_max_size, S_IRUGO | S_IWUSR, show_mem_pool_max_size,
2867                 set_mem_pool_max_size);
2868
2869
2870 static int kbasep_protected_mode_enter(struct kbase_device *kbdev)
2871 {
2872         kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
2873                 GPU_COMMAND_SET_PROTECTED_MODE, NULL);
2874         return 0;
2875 }
2876
2877 static bool kbasep_protected_mode_supported(struct kbase_device *kbdev)
2878 {
2879         return true;
2880 }
2881
2882 static struct kbase_protected_ops kbasep_protected_ops = {
2883         .protected_mode_enter = kbasep_protected_mode_enter,
2884         .protected_mode_reset = NULL,
2885         .protected_mode_supported = kbasep_protected_mode_supported,
2886 };
2887
2888 static void kbasep_protected_mode_init(struct kbase_device *kbdev)
2889 {
2890         kbdev->protected_ops = NULL;
2891
2892         if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE)) {
2893                 /* Use native protected ops */
2894                 kbdev->protected_ops = &kbasep_protected_ops;
2895         }
2896 #ifdef PROTECTED_CALLBACKS
2897         else
2898                 kbdev->protected_ops = PROTECTED_CALLBACKS;
2899 #endif
2900
2901         if (kbdev->protected_ops)
2902                 kbdev->protected_mode_support =
2903                                 kbdev->protected_ops->protected_mode_supported(kbdev);
2904         else
2905                 kbdev->protected_mode_support = false;
2906 }
2907
2908 #ifdef CONFIG_MALI_NO_MALI
2909 static int kbase_common_reg_map(struct kbase_device *kbdev)
2910 {
2911         return 0;
2912 }
2913 static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
2914 {
2915 }
2916 #else /* CONFIG_MALI_NO_MALI */
2917 static int kbase_common_reg_map(struct kbase_device *kbdev)
2918 {
2919         int err = -ENOMEM;
2920
2921         if (!request_mem_region(kbdev->reg_start, kbdev->reg_size, dev_name(kbdev->dev))) {
2922                 dev_err(kbdev->dev, "Register window unavailable\n");
2923                 err = -EIO;
2924                 goto out_region;
2925         }
2926
2927         kbdev->reg = ioremap(kbdev->reg_start, kbdev->reg_size);
2928         if (!kbdev->reg) {
2929                 dev_err(kbdev->dev, "Can't remap register window\n");
2930                 err = -EINVAL;
2931                 goto out_ioremap;
2932         }
2933
2934         return 0;
2935
2936  out_ioremap:
2937         release_mem_region(kbdev->reg_start, kbdev->reg_size);
2938  out_region:
2939         return err;
2940 }
2941
2942 static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
2943 {
2944         if (kbdev->reg) {
2945                 iounmap(kbdev->reg);
2946                 release_mem_region(kbdev->reg_start, kbdev->reg_size);
2947                 kbdev->reg = NULL;
2948                 kbdev->reg_start = 0;
2949                 kbdev->reg_size = 0;
2950         }
2951 }
2952 #endif /* CONFIG_MALI_NO_MALI */
2953
2954 static int registers_map(struct kbase_device * const kbdev)
2955 {
2956
2957                 /* the first memory resource is the physical address of the GPU
2958                  * registers */
2959                 struct platform_device *pdev = to_platform_device(kbdev->dev);
2960                 struct resource *reg_res;
2961                 int err;
2962
2963                 reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2964                 if (!reg_res) {
2965                         dev_err(kbdev->dev, "Invalid register resource\n");
2966                         return -ENOENT;
2967                 }
2968
2969                 kbdev->reg_start = reg_res->start;
2970                 kbdev->reg_size = resource_size(reg_res);
2971
2972                 err = kbase_common_reg_map(kbdev);
2973                 if (err) {
2974                         dev_err(kbdev->dev, "Failed to map registers\n");
2975                         return err;
2976                 }
2977
2978         return 0;
2979 }
2980
2981 static void registers_unmap(struct kbase_device *kbdev)
2982 {
2983         kbase_common_reg_unmap(kbdev);
2984 }
2985
2986 static int power_control_init(struct platform_device *pdev)
2987 {
2988         struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
2989         int err = 0;
2990
2991         if (!kbdev)
2992                 return -ENODEV;
2993
2994 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \
2995                         && defined(CONFIG_REGULATOR)
2996         kbdev->regulator = regulator_get_optional(kbdev->dev, "mali");
2997         if (IS_ERR_OR_NULL(kbdev->regulator)) {
2998                 err = PTR_ERR(kbdev->regulator);
2999                 kbdev->regulator = NULL;
3000                 if (err == -EPROBE_DEFER) {
3001                         dev_err(&pdev->dev, "Failed to get regulator\n");
3002                         return err;
3003                 }
3004                 dev_info(kbdev->dev,
3005                         "Continuing without Mali regulator control\n");
3006                 /* Allow probe to continue without regulator */
3007         }
3008 #endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
3009
3010         kbdev->clock = clk_get(kbdev->dev, "clk_mali");
3011         if (IS_ERR_OR_NULL(kbdev->clock)) {
3012                 err = PTR_ERR(kbdev->clock);
3013                 kbdev->clock = NULL;
3014                 if (err == -EPROBE_DEFER) {
3015                         dev_err(&pdev->dev, "Failed to get clock\n");
3016                         goto fail;
3017                 }
3018                 dev_info(kbdev->dev, "Continuing without Mali clock control\n");
3019                 /* Allow probe to continue without clock. */
3020         } else {
3021                 err = clk_prepare(kbdev->clock);
3022                 if (err) {
3023                         dev_err(kbdev->dev,
3024                                 "Failed to prepare and enable clock (%d)\n",
3025                                 err);
3026                         goto fail;
3027                 }
3028         }
3029
3030 #if defined(CONFIG_OF) && defined(CONFIG_PM_OPP)
3031         /* Register the OPPs if they are available in device tree */
3032 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
3033         err = dev_pm_opp_of_add_table(kbdev->dev);
3034 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
3035         err = of_init_opp_table(kbdev->dev);
3036 #else
3037         err = 0;
3038 #endif /* LINUX_VERSION_CODE */
3039         if (err)
3040                 dev_dbg(kbdev->dev, "OPP table not found\n");
3041 #endif /* CONFIG_OF && CONFIG_PM_OPP */
3042
3043         return 0;
3044
3045 fail:
3046
3047 if (kbdev->clock != NULL) {
3048         clk_put(kbdev->clock);
3049         kbdev->clock = NULL;
3050 }
3051
3052 #ifdef CONFIG_REGULATOR
3053         if (NULL != kbdev->regulator) {
3054                 regulator_put(kbdev->regulator);
3055                 kbdev->regulator = NULL;
3056         }
3057 #endif
3058
3059         return err;
3060 }
3061
3062 static void power_control_term(struct kbase_device *kbdev)
3063 {
3064 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
3065         dev_pm_opp_of_remove_table(kbdev->dev);
3066 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
3067         of_free_opp_table(kbdev->dev);
3068 #endif
3069
3070         if (kbdev->clock) {
3071                 clk_unprepare(kbdev->clock);
3072                 clk_put(kbdev->clock);
3073                 kbdev->clock = NULL;
3074         }
3075
3076 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \
3077                         && defined(CONFIG_REGULATOR)
3078         if (kbdev->regulator) {
3079                 regulator_put(kbdev->regulator);
3080                 kbdev->regulator = NULL;
3081         }
3082 #endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
3083 }
3084
3085 #ifdef CONFIG_DEBUG_FS
3086
3087 #if KBASE_GPU_RESET_EN
3088 #include <mali_kbase_hwaccess_jm.h>
3089
3090 static void trigger_quirks_reload(struct kbase_device *kbdev)
3091 {
3092         kbase_pm_context_active(kbdev);
3093         if (kbase_prepare_to_reset_gpu(kbdev))
3094                 kbase_reset_gpu(kbdev);
3095         kbase_pm_context_idle(kbdev);
3096 }
3097
3098 #define MAKE_QUIRK_ACCESSORS(type) \
3099 static int type##_quirks_set(void *data, u64 val) \
3100 { \
3101         struct kbase_device *kbdev; \
3102         kbdev = (struct kbase_device *)data; \
3103         kbdev->hw_quirks_##type = (u32)val; \
3104         trigger_quirks_reload(kbdev); \
3105         return 0;\
3106 } \
3107 \
3108 static int type##_quirks_get(void *data, u64 *val) \
3109 { \
3110         struct kbase_device *kbdev;\
3111         kbdev = (struct kbase_device *)data;\
3112         *val = kbdev->hw_quirks_##type;\
3113         return 0;\
3114 } \
3115 DEFINE_SIMPLE_ATTRIBUTE(fops_##type##_quirks, type##_quirks_get,\
3116                 type##_quirks_set, "%llu\n")
3117
3118 MAKE_QUIRK_ACCESSORS(sc);
3119 MAKE_QUIRK_ACCESSORS(tiler);
3120 MAKE_QUIRK_ACCESSORS(mmu);
3121
3122 #endif /* KBASE_GPU_RESET_EN */
3123
3124 static int kbase_device_debugfs_init(struct kbase_device *kbdev)
3125 {
3126         struct dentry *debugfs_ctx_defaults_directory;
3127         int err;
3128
3129         kbdev->mali_debugfs_directory = debugfs_create_dir(kbdev->devname,
3130                         NULL);
3131         if (!kbdev->mali_debugfs_directory) {
3132                 dev_err(kbdev->dev, "Couldn't create mali debugfs directory\n");
3133                 err = -ENOMEM;
3134                 goto out;
3135         }
3136
3137         kbdev->debugfs_ctx_directory = debugfs_create_dir("ctx",
3138                         kbdev->mali_debugfs_directory);
3139         if (!kbdev->debugfs_ctx_directory) {
3140                 dev_err(kbdev->dev, "Couldn't create mali debugfs ctx directory\n");
3141                 err = -ENOMEM;
3142                 goto out;
3143         }
3144
3145         debugfs_ctx_defaults_directory = debugfs_create_dir("defaults",
3146                         kbdev->debugfs_ctx_directory);
3147         if (!debugfs_ctx_defaults_directory) {
3148                 dev_err(kbdev->dev, "Couldn't create mali debugfs ctx defaults directory\n");
3149                 err = -ENOMEM;
3150                 goto out;
3151         }
3152
3153 #if !MALI_CUSTOMER_RELEASE
3154         kbasep_regs_dump_debugfs_add(kbdev);
3155 #endif /* !MALI_CUSTOMER_RELEASE */
3156
3157         kbase_debug_job_fault_debugfs_init(kbdev);
3158         kbasep_gpu_memory_debugfs_init(kbdev);
3159         kbase_as_fault_debugfs_init(kbdev);
3160 #if KBASE_GPU_RESET_EN
3161         debugfs_create_file("quirks_sc", 0644,
3162                         kbdev->mali_debugfs_directory, kbdev,
3163                         &fops_sc_quirks);
3164         debugfs_create_file("quirks_tiler", 0644,
3165                         kbdev->mali_debugfs_directory, kbdev,
3166                         &fops_tiler_quirks);
3167         debugfs_create_file("quirks_mmu", 0644,
3168                         kbdev->mali_debugfs_directory, kbdev,
3169                         &fops_mmu_quirks);
3170 #endif /* KBASE_GPU_RESET_EN */
3171
3172 #ifndef CONFIG_MALI_COH_USER
3173         debugfs_create_bool("infinite_cache", 0644,
3174                         debugfs_ctx_defaults_directory,
3175                         (bool*)&(kbdev->infinite_cache_active_default));
3176 #endif /* CONFIG_MALI_COH_USER */
3177
3178         debugfs_create_size_t("mem_pool_max_size", 0644,
3179                         debugfs_ctx_defaults_directory,
3180                         &kbdev->mem_pool_max_size_default);
3181
3182 #if KBASE_TRACE_ENABLE
3183         kbasep_trace_debugfs_init(kbdev);
3184 #endif /* KBASE_TRACE_ENABLE */
3185
3186 #ifdef CONFIG_MALI_TRACE_TIMELINE
3187         kbasep_trace_timeline_debugfs_init(kbdev);
3188 #endif /* CONFIG_MALI_TRACE_TIMELINE */
3189
3190         return 0;
3191
3192 out:
3193         debugfs_remove_recursive(kbdev->mali_debugfs_directory);
3194         return err;
3195 }
3196
3197 static void kbase_device_debugfs_term(struct kbase_device *kbdev)
3198 {
3199         debugfs_remove_recursive(kbdev->mali_debugfs_directory);
3200 }
3201
3202 #else /* CONFIG_DEBUG_FS */
3203 static inline int kbase_device_debugfs_init(struct kbase_device *kbdev)
3204 {
3205         return 0;
3206 }
3207
3208 static inline void kbase_device_debugfs_term(struct kbase_device *kbdev) { }
3209 #endif /* CONFIG_DEBUG_FS */
3210
3211 static void kbase_device_coherency_init(struct kbase_device *kbdev, u32 gpu_id)
3212 {
3213 #ifdef CONFIG_OF
3214         u32 supported_coherency_bitmap =
3215                 kbdev->gpu_props.props.raw_props.coherency_mode;
3216         const void *coherency_override_dts;
3217         u32 override_coherency;
3218 #endif /* CONFIG_OF */
3219
3220         kbdev->system_coherency = COHERENCY_NONE;
3221
3222         /* device tree may override the coherency */
3223 #ifdef CONFIG_OF
3224         coherency_override_dts = of_get_property(kbdev->dev->of_node,
3225                                                 "system-coherency",
3226                                                 NULL);
3227         if (coherency_override_dts) {
3228
3229                 override_coherency = be32_to_cpup(coherency_override_dts);
3230
3231                 if ((override_coherency <= COHERENCY_NONE) &&
3232                         (supported_coherency_bitmap &
3233                          COHERENCY_FEATURE_BIT(override_coherency))) {
3234
3235                         kbdev->system_coherency = override_coherency;
3236
3237                         dev_info(kbdev->dev,
3238                                 "Using coherency mode %u set from dtb",
3239                                 override_coherency);
3240                 } else
3241                         dev_warn(kbdev->dev,
3242                                 "Ignoring unsupported coherency mode %u set from dtb",
3243                                 override_coherency);
3244         }
3245
3246 #endif /* CONFIG_OF */
3247
3248         kbdev->gpu_props.props.raw_props.coherency_mode =
3249                 kbdev->system_coherency;
3250 }
3251
3252 #ifdef CONFIG_MALI_FPGA_BUS_LOGGER
3253
3254 /* Callback used by the kbase bus logger client, to initiate a GPU reset
3255  * when the bus log is restarted.  GPU reset is used as reference point
3256  * in HW bus log analyses.
3257  */
3258 static void kbase_logging_started_cb(void *data)
3259 {
3260         struct kbase_device *kbdev = (struct kbase_device *)data;
3261
3262         if (kbase_prepare_to_reset_gpu(kbdev))
3263                 kbase_reset_gpu(kbdev);
3264         dev_info(kbdev->dev, "KBASE - Bus logger restarted\n");
3265 }
3266 #endif
3267
3268 static struct attribute *kbase_attrs[] = {
3269 #ifdef CONFIG_MALI_DEBUG
3270         &dev_attr_debug_command.attr,
3271         &dev_attr_js_softstop_always.attr,
3272 #endif
3273 #if !MALI_CUSTOMER_RELEASE
3274         &dev_attr_force_replay.attr,
3275 #endif
3276         &dev_attr_js_timeouts.attr,
3277         &dev_attr_soft_job_timeout.attr,
3278         &dev_attr_gpuinfo.attr,
3279         &dev_attr_dvfs_period.attr,
3280         &dev_attr_pm_poweroff.attr,
3281         &dev_attr_reset_timeout.attr,
3282         &dev_attr_js_scheduling_period.attr,
3283         &dev_attr_power_policy.attr,
3284         &dev_attr_core_availability_policy.attr,
3285         &dev_attr_core_mask.attr,
3286         &dev_attr_mem_pool_size.attr,
3287         &dev_attr_mem_pool_max_size.attr,
3288         NULL
3289 };
3290
3291 static const struct attribute_group kbase_attr_group = {
3292         .attrs = kbase_attrs,
3293 };
3294
3295 static int kbase_platform_device_remove(struct platform_device *pdev)
3296 {
3297         struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
3298         const struct list_head *dev_list;
3299
3300         if (!kbdev)
3301                 return -ENODEV;
3302
3303 #ifdef CONFIG_MALI_FPGA_BUS_LOGGER
3304         if (kbdev->inited_subsys & inited_buslogger) {
3305                 bl_core_client_unregister(kbdev->buslogger);
3306                 kbdev->inited_subsys &= ~inited_buslogger;
3307         }
3308 #endif
3309
3310         if (kbdev->inited_subsys & inited_sysfs_group) {
3311                 sysfs_remove_group(&kbdev->dev->kobj, &kbase_attr_group);
3312                 kbdev->inited_subsys &= ~inited_sysfs_group;
3313         }
3314
3315         if (kbdev->inited_subsys & inited_dev_list) {
3316                 dev_list = kbase_dev_list_get();
3317                 list_del(&kbdev->entry);
3318                 kbase_dev_list_put(dev_list);
3319                 kbdev->inited_subsys &= ~inited_dev_list;
3320         }
3321
3322         if (kbdev->inited_subsys & inited_misc_register) {
3323                 misc_deregister(&kbdev->mdev);
3324                 kbdev->inited_subsys &= ~inited_misc_register;
3325         }
3326
3327         if (kbdev->inited_subsys & inited_get_device) {
3328                 put_device(kbdev->dev);
3329                 kbdev->inited_subsys &= ~inited_get_device;
3330         }
3331
3332         if (kbdev->inited_subsys & inited_debugfs) {
3333                 kbase_device_debugfs_term(kbdev);
3334                 kbdev->inited_subsys &= ~inited_debugfs;
3335         }
3336
3337         if (kbdev->inited_subsys & inited_job_fault) {
3338                 kbase_debug_job_fault_dev_term(kbdev);
3339                 kbdev->inited_subsys &= ~inited_job_fault;
3340         }
3341
3342 #ifndef CONFIG_MALI_PRFCNT_SET_SECONDARY
3343         if (kbdev->inited_subsys & inited_ipa) {
3344                 kbase_ipa_term(kbdev->ipa_ctx);
3345                 kbdev->inited_subsys &= ~inited_ipa;
3346         }
3347 #endif /* CONFIG_MALI_PRFCNT_SET_SECONDARY */
3348
3349         if (kbdev->inited_subsys & inited_vinstr) {
3350                 kbase_vinstr_term(kbdev->vinstr_ctx);
3351                 kbdev->inited_subsys &= ~inited_vinstr;
3352         }
3353
3354 #ifdef CONFIG_MALI_DEVFREQ
3355         if (kbdev->inited_subsys & inited_devfreq) {
3356                 kbase_devfreq_term(kbdev);
3357                 kbdev->inited_subsys &= ~inited_devfreq;
3358         }
3359 #endif
3360
3361         if (kbdev->inited_subsys & inited_backend_late) {
3362                 kbase_backend_late_term(kbdev);
3363                 kbdev->inited_subsys &= ~inited_backend_late;
3364         }
3365
3366         if (kbdev->inited_subsys & inited_tlstream) {
3367                 kbase_tlstream_term();
3368                 kbdev->inited_subsys &= ~inited_tlstream;
3369         }
3370
3371         /* Bring job and mem sys to a halt before we continue termination */
3372
3373         if (kbdev->inited_subsys & inited_js)
3374                 kbasep_js_devdata_halt(kbdev);
3375
3376         if (kbdev->inited_subsys & inited_mem)
3377                 kbase_mem_halt(kbdev);
3378
3379         if (kbdev->inited_subsys & inited_js) {
3380                 kbasep_js_devdata_term(kbdev);
3381                 kbdev->inited_subsys &= ~inited_js;
3382         }
3383
3384         if (kbdev->inited_subsys & inited_mem) {
3385                 kbase_mem_term(kbdev);
3386                 kbdev->inited_subsys &= ~inited_mem;
3387         }
3388
3389         if (kbdev->inited_subsys & inited_pm_runtime_init) {
3390                 kbdev->pm.callback_power_runtime_term(kbdev);
3391                 kbdev->inited_subsys &= ~inited_pm_runtime_init;
3392         }
3393
3394         if (kbdev->inited_subsys & inited_device) {
3395                 kbase_device_term(kbdev);
3396                 kbdev->inited_subsys &= ~inited_device;
3397         }
3398
3399         if (kbdev->inited_subsys & inited_backend_early) {
3400                 kbase_backend_early_term(kbdev);
3401                 kbdev->inited_subsys &= ~inited_backend_early;
3402         }
3403
3404         if (kbdev->inited_subsys & inited_power_control) {
3405                 power_control_term(kbdev);
3406                 kbdev->inited_subsys &= ~inited_power_control;
3407         }
3408
3409         if (kbdev->inited_subsys & inited_registers_map) {
3410                 registers_unmap(kbdev);
3411                 kbdev->inited_subsys &= ~inited_registers_map;
3412         }
3413
3414 #ifdef CONFIG_MALI_NO_MALI
3415         if (kbdev->inited_subsys & inited_gpu_device) {
3416                 gpu_device_destroy(kbdev);
3417                 kbdev->inited_subsys &= ~inited_gpu_device;
3418         }
3419 #endif /* CONFIG_MALI_NO_MALI */
3420
3421         if (kbdev->inited_subsys != 0)
3422                 dev_err(kbdev->dev, "Missing sub system termination\n");
3423
3424         kbase_device_free(kbdev);
3425
3426         return 0;
3427 }
3428
3429 extern void kbase_platform_rk_shutdown(struct kbase_device *kbdev);
3430 static void kbase_platform_device_shutdown(struct platform_device *pdev)
3431 {
3432         struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
3433
3434         kbase_platform_rk_shutdown(kbdev);
3435 }
3436
3437 static int kbase_platform_device_probe(struct platform_device *pdev)
3438 {
3439         struct kbase_device *kbdev;
3440         struct mali_base_gpu_core_props *core_props;
3441         u32 gpu_id;
3442         const struct list_head *dev_list;
3443         int err = 0;
3444
3445 #ifdef CONFIG_OF
3446         err = kbase_platform_early_init();
3447         if (err) {
3448                 dev_err(&pdev->dev, "Early platform initialization failed\n");
3449                 kbase_platform_device_remove(pdev);
3450                 return err;
3451         }
3452 #endif
3453
3454         kbdev = kbase_device_alloc();
3455         if (!kbdev) {
3456                 dev_err(&pdev->dev, "Allocate device failed\n");
3457                 kbase_platform_device_remove(pdev);
3458                 return -ENOMEM;
3459         }
3460
3461         kbdev->dev = &pdev->dev;
3462         dev_set_drvdata(kbdev->dev, kbdev);
3463
3464 #ifdef CONFIG_MALI_NO_MALI
3465         err = gpu_device_create(kbdev);
3466         if (err) {
3467                 dev_err(&pdev->dev, "Dummy model initialization failed\n");
3468                 kbase_platform_device_remove(pdev);
3469                 return err;
3470         }
3471         kbdev->inited_subsys |= inited_gpu_device;
3472 #endif /* CONFIG_MALI_NO_MALI */
3473
3474         err = assign_irqs(pdev);
3475         if (err) {
3476                 dev_err(&pdev->dev, "IRQ search failed\n");
3477                 kbase_platform_device_remove(pdev);
3478                 return err;
3479         }
3480
3481         err = registers_map(kbdev);
3482         if (err) {
3483                 dev_err(&pdev->dev, "Register map failed\n");
3484                 kbase_platform_device_remove(pdev);
3485                 return err;
3486         }
3487         kbdev->inited_subsys |= inited_registers_map;
3488
3489         err = power_control_init(pdev);
3490         if (err) {
3491                 dev_err(&pdev->dev, "Power control initialization failed\n");
3492                 kbase_platform_device_remove(pdev);
3493                 return err;
3494         }
3495         kbdev->inited_subsys |= inited_power_control;
3496
3497         err = kbase_backend_early_init(kbdev);
3498         if (err) {
3499                 dev_err(kbdev->dev, "Early backend initialization failed\n");
3500                 kbase_platform_device_remove(pdev);
3501                 return err;
3502         }
3503         kbdev->inited_subsys |= inited_backend_early;
3504
3505         scnprintf(kbdev->devname, DEVNAME_SIZE, "%s%d", kbase_drv_name,
3506                         kbase_dev_nr);
3507
3508         kbase_disjoint_init(kbdev);
3509
3510         /* obtain min/max configured gpu frequencies */
3511         core_props = &(kbdev->gpu_props.props.core_props);
3512         core_props->gpu_freq_khz_min = GPU_FREQ_KHZ_MIN;
3513         core_props->gpu_freq_khz_max = GPU_FREQ_KHZ_MAX;
3514
3515         kbdev->gpu_props.irq_throttle_time_us = DEFAULT_IRQ_THROTTLE_TIME_US;
3516
3517         err = kbase_device_init(kbdev);
3518         if (err) {
3519                 dev_err(kbdev->dev, "Device initialization failed (%d)\n", err);
3520                 kbase_platform_device_remove(pdev);
3521                 return err;
3522         }
3523         kbdev->inited_subsys |= inited_device;
3524
3525         if (kbdev->pm.callback_power_runtime_init) {
3526                 err = kbdev->pm.callback_power_runtime_init(kbdev);
3527                 if (err) {
3528                         dev_err(kbdev->dev,
3529                                 "Runtime PM initialization failed\n");
3530                         kbase_platform_device_remove(pdev);
3531                         return err;
3532                 }
3533                 kbdev->inited_subsys |= inited_pm_runtime_init;
3534         }
3535
3536         err = kbase_mem_init(kbdev);
3537         if (err) {
3538                 dev_err(kbdev->dev, "Memory subsystem initialization failed\n");
3539                 kbase_platform_device_remove(pdev);
3540                 return err;
3541         }
3542         kbdev->inited_subsys |= inited_mem;
3543
3544         gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
3545         gpu_id &= GPU_ID_VERSION_PRODUCT_ID;
3546         gpu_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
3547
3548         kbase_device_coherency_init(kbdev, gpu_id);
3549
3550         kbasep_protected_mode_init(kbdev);
3551
3552         err = kbasep_js_devdata_init(kbdev);
3553         if (err) {
3554                 dev_err(kbdev->dev, "Job JS devdata initialization failed\n");
3555                 kbase_platform_device_remove(pdev);
3556                 return err;
3557         }
3558         kbdev->inited_subsys |= inited_js;
3559
3560         err = kbase_tlstream_init();
3561         if (err) {
3562                 dev_err(kbdev->dev, "Timeline stream initialization failed\n");
3563                 kbase_platform_device_remove(pdev);
3564                 return err;
3565         }
3566         kbdev->inited_subsys |= inited_tlstream;
3567
3568         err = kbase_backend_late_init(kbdev);
3569         if (err) {
3570                 dev_err(kbdev->dev, "Late backend initialization failed\n");
3571                 kbase_platform_device_remove(pdev);
3572                 return err;
3573         }
3574         kbdev->inited_subsys |= inited_backend_late;
3575
3576 #ifdef CONFIG_MALI_DEVFREQ
3577         err = kbase_devfreq_init(kbdev);
3578         if (err) {
3579                 dev_err(kbdev->dev, "Fevfreq initialization failed\n");
3580                 kbase_platform_device_remove(pdev);
3581                 return err;
3582         }
3583         kbdev->inited_subsys |= inited_devfreq;
3584 #endif /* CONFIG_MALI_DEVFREQ */
3585
3586         kbdev->vinstr_ctx = kbase_vinstr_init(kbdev);
3587         if (!kbdev->vinstr_ctx) {
3588                 dev_err(kbdev->dev,
3589                         "Virtual instrumentation initialization failed\n");
3590                 kbase_platform_device_remove(pdev);
3591                 return -EINVAL;
3592         }
3593         kbdev->inited_subsys |= inited_vinstr;
3594
3595 #ifndef CONFIG_MALI_PRFCNT_SET_SECONDARY
3596         kbdev->ipa_ctx = kbase_ipa_init(kbdev);
3597         if (!kbdev->ipa_ctx) {
3598                 dev_err(kbdev->dev, "IPA initialization failed\n");
3599                 kbase_platform_device_remove(pdev);
3600                 return -EINVAL;
3601         }
3602
3603         kbdev->inited_subsys |= inited_ipa;
3604 #endif  /* CONFIG_MALI_PRFCNT_SET_SECONDARY */
3605
3606         err = kbase_debug_job_fault_dev_init(kbdev);
3607         if (err) {
3608                 dev_err(kbdev->dev, "Job fault debug initialization failed\n");
3609                 kbase_platform_device_remove(pdev);
3610                 return err;
3611         }
3612         kbdev->inited_subsys |= inited_job_fault;
3613
3614         err = kbase_device_debugfs_init(kbdev);
3615         if (err) {
3616                 dev_err(kbdev->dev, "DebugFS initialization failed");
3617                 kbase_platform_device_remove(pdev);
3618                 return err;
3619         }
3620         kbdev->inited_subsys |= inited_debugfs;
3621
3622         /* initialize the kctx list */
3623         mutex_init(&kbdev->kctx_list_lock);
3624         INIT_LIST_HEAD(&kbdev->kctx_list);
3625
3626         kbdev->mdev.minor = MISC_DYNAMIC_MINOR;
3627         kbdev->mdev.name = kbdev->devname;
3628         kbdev->mdev.fops = &kbase_fops;
3629         kbdev->mdev.parent = get_device(kbdev->dev);
3630         kbdev->inited_subsys |= inited_get_device;
3631
3632         err = misc_register(&kbdev->mdev);
3633         if (err) {
3634                 dev_err(kbdev->dev, "Misc device registration failed for %s\n",
3635                         kbdev->devname);
3636                 kbase_platform_device_remove(pdev);
3637                 return err;
3638         }
3639         kbdev->inited_subsys |= inited_misc_register;
3640
3641         dev_list = kbase_dev_list_get();
3642         list_add(&kbdev->entry, &kbase_dev_list);
3643         kbase_dev_list_put(dev_list);
3644         kbdev->inited_subsys |= inited_dev_list;
3645
3646         err = sysfs_create_group(&kbdev->dev->kobj, &kbase_attr_group);
3647         if (err) {
3648                 dev_err(&pdev->dev, "SysFS group creation failed\n");
3649                 kbase_platform_device_remove(pdev);
3650                 return err;
3651         }
3652         kbdev->inited_subsys |= inited_sysfs_group;
3653
3654 #ifdef CONFIG_MALI_FPGA_BUS_LOGGER
3655         err = bl_core_client_register(kbdev->devname,
3656                                                 kbase_logging_started_cb,
3657                                                 kbdev, &kbdev->buslogger,
3658                                                 THIS_MODULE, NULL);
3659         if (err == 0) {
3660                 kbdev->inited_subsys |= inited_buslogger;
3661                 bl_core_set_threshold(kbdev->buslogger, 1024*1024*1024);
3662         } else {
3663                 dev_warn(kbdev->dev, "Bus log client registration failed\n");
3664                 err = 0;
3665         }
3666 #endif
3667
3668         dev_info(kbdev->dev,
3669                         "Probed as %s\n", dev_name(kbdev->mdev.this_device));
3670
3671         kbase_dev_nr++;
3672
3673         return err;
3674 }
3675
3676 /** Suspend callback from the OS.
3677  *
3678  * This is called by Linux when the device should suspend.
3679  *
3680  * @param dev  The device to suspend
3681  *
3682  * @return A standard Linux error code
3683  */
3684 static int kbase_device_suspend(struct device *dev)
3685 {
3686         struct kbase_device *kbdev = to_kbase_device(dev);
3687
3688         if (!kbdev)
3689                 return -ENODEV;
3690
3691 #if defined(CONFIG_PM_DEVFREQ) && \
3692                 (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
3693         devfreq_suspend_device(kbdev->devfreq);
3694 #endif
3695
3696         kbase_pm_suspend(kbdev);
3697         return 0;
3698 }
3699
3700 /** Resume callback from the OS.
3701  *
3702  * This is called by Linux when the device should resume from suspension.
3703  *
3704  * @param dev  The device to resume
3705  *
3706  * @return A standard Linux error code
3707  */
3708 static int kbase_device_resume(struct device *dev)
3709 {
3710         struct kbase_device *kbdev = to_kbase_device(dev);
3711
3712         if (!kbdev)
3713                 return -ENODEV;
3714
3715         kbase_pm_resume(kbdev);
3716
3717 #if defined(CONFIG_PM_DEVFREQ) && \
3718                 (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
3719         devfreq_resume_device(kbdev->devfreq);
3720 #endif
3721         return 0;
3722 }
3723
3724 /** Runtime suspend callback from the OS.
3725  *
3726  * This is called by Linux when the device should prepare for a condition in which it will
3727  * not be able to communicate with the CPU(s) and RAM due to power management.
3728  *
3729  * @param dev  The device to suspend
3730  *
3731  * @return A standard Linux error code
3732  */
3733 #ifdef KBASE_PM_RUNTIME
3734 static int kbase_device_runtime_suspend(struct device *dev)
3735 {
3736         struct kbase_device *kbdev = to_kbase_device(dev);
3737
3738         if (!kbdev)
3739                 return -ENODEV;
3740
3741 #if defined(CONFIG_PM_DEVFREQ) && \
3742                 (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
3743         devfreq_suspend_device(kbdev->devfreq);
3744 #endif
3745
3746         if (kbdev->pm.backend.callback_power_runtime_off) {
3747                 kbdev->pm.backend.callback_power_runtime_off(kbdev);
3748                 dev_dbg(dev, "runtime suspend\n");
3749         }
3750         return 0;
3751 }
3752 #endif /* KBASE_PM_RUNTIME */
3753
3754 /** Runtime resume callback from the OS.
3755  *
3756  * This is called by Linux when the device should go into a fully active state.
3757  *
3758  * @param dev  The device to suspend
3759  *
3760  * @return A standard Linux error code
3761  */
3762
3763 #ifdef KBASE_PM_RUNTIME
3764 static int kbase_device_runtime_resume(struct device *dev)
3765 {
3766         int ret = 0;
3767         struct kbase_device *kbdev = to_kbase_device(dev);
3768
3769         if (!kbdev)
3770                 return -ENODEV;
3771
3772         if (kbdev->pm.backend.callback_power_runtime_on) {
3773                 ret = kbdev->pm.backend.callback_power_runtime_on(kbdev);
3774                 dev_dbg(dev, "runtime resume\n");
3775         }
3776
3777 #if defined(CONFIG_PM_DEVFREQ) && \
3778                 (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
3779         devfreq_resume_device(kbdev->devfreq);
3780 #endif
3781
3782         return ret;
3783 }
3784 #endif /* KBASE_PM_RUNTIME */
3785
3786
3787 #ifdef KBASE_PM_RUNTIME
3788 /**
3789  * kbase_device_runtime_idle - Runtime idle callback from the OS.
3790  * @dev: The device to suspend
3791  *
3792  * This is called by Linux when the device appears to be inactive and it might
3793  * be placed into a low power state.
3794  *
3795  * Return: 0 if device can be suspended, non-zero to avoid runtime autosuspend,
3796  * otherwise a standard Linux error code
3797  */
3798 static int kbase_device_runtime_idle(struct device *dev)
3799 {
3800         struct kbase_device *kbdev = to_kbase_device(dev);
3801
3802         if (!kbdev)
3803                 return -ENODEV;
3804
3805         /* Use platform specific implementation if it exists. */
3806         if (kbdev->pm.backend.callback_power_runtime_idle)
3807                 return kbdev->pm.backend.callback_power_runtime_idle(kbdev);
3808
3809         return 0;
3810 }
3811 #endif /* KBASE_PM_RUNTIME */
3812
3813 /** The power management operations for the platform driver.
3814  */
3815 static const struct dev_pm_ops kbase_pm_ops = {
3816         .suspend = kbase_device_suspend,
3817         .resume = kbase_device_resume,
3818 #ifdef KBASE_PM_RUNTIME
3819         .runtime_suspend = kbase_device_runtime_suspend,
3820         .runtime_resume = kbase_device_runtime_resume,
3821         .runtime_idle = kbase_device_runtime_idle,
3822 #endif /* KBASE_PM_RUNTIME */
3823 };
3824
3825 #ifdef CONFIG_OF
3826 static const struct of_device_id kbase_dt_ids[] = {
3827         { .compatible = "arm,malit7xx" },
3828         { .compatible = "arm,mali-midgard" },
3829         { /* sentinel */ }
3830 };
3831 MODULE_DEVICE_TABLE(of, kbase_dt_ids);
3832 #endif
3833
3834 static struct platform_driver kbase_platform_driver = {
3835         .probe = kbase_platform_device_probe,
3836         .remove = kbase_platform_device_remove,
3837         .shutdown = kbase_platform_device_shutdown,
3838         .driver = {
3839                    .name = kbase_drv_name,
3840                    .owner = THIS_MODULE,
3841                    .pm = &kbase_pm_ops,
3842                    .of_match_table = of_match_ptr(kbase_dt_ids),
3843         },
3844 };
3845
3846 /*
3847  * The driver will not provide a shortcut to create the Mali platform device
3848  * anymore when using Device Tree.
3849  */
3850 #ifdef CONFIG_OF
3851 module_platform_driver(kbase_platform_driver);
3852 #else
3853
3854 static int __init rockchip_gpu_init_driver(void)
3855 {
3856         return platform_driver_register(&kbase_platform_driver);
3857 }
3858 late_initcall(rockchip_gpu_init_driver);
3859
3860 static int __init kbase_driver_init(void)
3861 {
3862         int ret;
3863
3864         ret = kbase_platform_early_init();
3865         if (ret)
3866                 return ret;
3867
3868 #ifdef CONFIG_MALI_PLATFORM_FAKE
3869         ret = kbase_platform_fake_register();
3870         if (ret)
3871                 return ret;
3872 #endif
3873         ret = platform_driver_register(&kbase_platform_driver);
3874 #ifdef CONFIG_MALI_PLATFORM_FAKE
3875         if (ret)
3876                 kbase_platform_fake_unregister();
3877 #endif
3878         return ret;
3879 }
3880
3881 static void __exit kbase_driver_exit(void)
3882 {
3883         platform_driver_unregister(&kbase_platform_driver);
3884 #ifdef CONFIG_MALI_PLATFORM_FAKE
3885         kbase_platform_fake_unregister();
3886 #endif
3887 }
3888
3889 module_init(kbase_driver_init);
3890 module_exit(kbase_driver_exit);
3891
3892 #endif /* CONFIG_OF */
3893
3894 MODULE_LICENSE("GPL");
3895 MODULE_VERSION(MALI_RELEASE_NAME " (UK version " \
3896                 __stringify(BASE_UK_VERSION_MAJOR) "." \
3897                 __stringify(BASE_UK_VERSION_MINOR) ")");
3898
3899 #if defined(CONFIG_MALI_GATOR_SUPPORT) || defined(CONFIG_MALI_SYSTEM_TRACE)
3900 #define CREATE_TRACE_POINTS
3901 #endif
3902
3903 #ifdef CONFIG_MALI_GATOR_SUPPORT
3904 /* Create the trace points (otherwise we just get code to call a tracepoint) */
3905 #include "mali_linux_trace.h"
3906
3907 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_job_slots_event);
3908 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_status);
3909 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_power_on);
3910 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_power_off);
3911 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_page_fault_insert_pages);
3912 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_mmu_as_in_use);
3913 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_mmu_as_released);
3914 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_total_alloc_pages_change);
3915
3916 void kbase_trace_mali_pm_status(u32 event, u64 value)
3917 {
3918         trace_mali_pm_status(event, value);
3919 }
3920
3921 void kbase_trace_mali_pm_power_off(u32 event, u64 value)
3922 {
3923         trace_mali_pm_power_off(event, value);
3924 }
3925
3926 void kbase_trace_mali_pm_power_on(u32 event, u64 value)
3927 {
3928         trace_mali_pm_power_on(event, value);
3929 }
3930
3931 void kbase_trace_mali_job_slots_event(u32 event, const struct kbase_context *kctx, u8 atom_id)
3932 {
3933         trace_mali_job_slots_event(event, (kctx != NULL ? kctx->tgid : 0), (kctx != NULL ? kctx->pid : 0), atom_id);
3934 }
3935
3936 void kbase_trace_mali_page_fault_insert_pages(int event, u32 value)
3937 {
3938         trace_mali_page_fault_insert_pages(event, value);
3939 }
3940
3941 void kbase_trace_mali_mmu_as_in_use(int event)
3942 {
3943         trace_mali_mmu_as_in_use(event);
3944 }
3945
3946 void kbase_trace_mali_mmu_as_released(int event)
3947 {
3948         trace_mali_mmu_as_released(event);
3949 }
3950
3951 void kbase_trace_mali_total_alloc_pages_change(long long int event)
3952 {
3953         trace_mali_total_alloc_pages_change(event);
3954 }
3955 #endif /* CONFIG_MALI_GATOR_SUPPORT */
3956 #ifdef CONFIG_MALI_SYSTEM_TRACE
3957 #include "mali_linux_kbase_trace.h"
3958 #endif