MALI: midgard: RK: add separate src dir of Midgard driver for RK Linux device
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / midgard_for_linux / mali_kbase_softjobs.c
1 /*
2  *
3  * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * A copy of the licence is included with the program, and can also be obtained
11  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12  * Boston, MA  02110-1301, USA.
13  *
14  */
15
16
17
18
19
20 #include <mali_kbase.h>
21
22 #include <linux/dma-mapping.h>
23 #ifdef CONFIG_SYNC
24 #include "sync.h"
25 #include <linux/syscalls.h>
26 #include "mali_kbase_sync.h"
27 #endif
28 #include <mali_kbase_hwaccess_time.h>
29 #include <linux/version.h>
30
31 /* Mask to check cache alignment of data structures */
32 #define KBASE_CACHE_ALIGNMENT_MASK              ((1<<L1_CACHE_SHIFT)-1)
33
34 /**
35  * @file mali_kbase_softjobs.c
36  *
37  * This file implements the logic behind software only jobs that are
38  * executed within the driver rather than being handed over to the GPU.
39  */
40
41 static int kbase_dump_cpu_gpu_time(struct kbase_jd_atom *katom)
42 {
43         struct kbase_va_region *reg;
44         phys_addr_t addr = 0;
45         u64 pfn;
46         u32 offset;
47         char *page;
48         struct timespec ts;
49         struct base_dump_cpu_gpu_counters data;
50         u64 system_time;
51         u64 cycle_counter;
52         u64 jc = katom->jc;
53         struct kbase_context *kctx = katom->kctx;
54         int pm_active_err;
55
56         memset(&data, 0, sizeof(data));
57
58         /* Take the PM active reference as late as possible - otherwise, it could
59          * delay suspend until we process the atom (which may be at the end of a
60          * long chain of dependencies */
61         pm_active_err = kbase_pm_context_active_handle_suspend(kctx->kbdev, KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE);
62         if (pm_active_err) {
63                 struct kbasep_js_device_data *js_devdata = &kctx->kbdev->js_data;
64
65                 /* We're suspended - queue this on the list of suspended jobs
66                  * Use dep_item[1], because dep_item[0] is in use for 'waiting_soft_jobs' */
67                 mutex_lock(&js_devdata->runpool_mutex);
68                 list_add_tail(&katom->dep_item[1], &js_devdata->suspended_soft_jobs_list);
69                 mutex_unlock(&js_devdata->runpool_mutex);
70
71                 return pm_active_err;
72         }
73
74         kbase_backend_get_gpu_time(kctx->kbdev, &cycle_counter, &system_time,
75                                                                         &ts);
76
77         kbase_pm_context_idle(kctx->kbdev);
78
79         data.sec = ts.tv_sec;
80         data.usec = ts.tv_nsec / 1000;
81         data.system_time = system_time;
82         data.cycle_counter = cycle_counter;
83
84         pfn = jc >> PAGE_SHIFT;
85         offset = jc & ~PAGE_MASK;
86
87         /* Assume this atom will be cancelled until we know otherwise */
88         katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
89         if (offset > 0x1000 - sizeof(data)) {
90                 /* Wouldn't fit in the page */
91                 return 0;
92         }
93
94         kbase_gpu_vm_lock(kctx);
95         reg = kbase_region_tracker_find_region_enclosing_address(kctx, jc);
96         if (reg &&
97             (reg->flags & KBASE_REG_GPU_WR) &&
98             reg->cpu_alloc && reg->cpu_alloc->pages)
99                 addr = reg->cpu_alloc->pages[pfn - reg->start_pfn];
100
101         kbase_gpu_vm_unlock(kctx);
102         if (!addr)
103                 return 0;
104
105         page = kmap(pfn_to_page(PFN_DOWN(addr)));
106         if (!page)
107                 return 0;
108
109         kbase_sync_single_for_cpu(katom->kctx->kbdev,
110                         kbase_dma_addr(pfn_to_page(PFN_DOWN(addr))) +
111                         offset, sizeof(data),
112                         DMA_BIDIRECTIONAL);
113
114         memcpy(page + offset, &data, sizeof(data));
115
116         kbase_sync_single_for_device(katom->kctx->kbdev,
117                         kbase_dma_addr(pfn_to_page(PFN_DOWN(addr))) +
118                         offset, sizeof(data),
119                         DMA_BIDIRECTIONAL);
120
121         kunmap(pfn_to_page(PFN_DOWN(addr)));
122
123         /* Atom was fine - mark it as done */
124         katom->event_code = BASE_JD_EVENT_DONE;
125
126         return 0;
127 }
128
129 #ifdef CONFIG_SYNC
130
131 /* Complete an atom that has returned '1' from kbase_process_soft_job (i.e. has waited)
132  *
133  * @param katom     The atom to complete
134  */
135 static void complete_soft_job(struct kbase_jd_atom *katom)
136 {
137         struct kbase_context *kctx = katom->kctx;
138
139         mutex_lock(&kctx->jctx.lock);
140         list_del(&katom->dep_item[0]);
141         kbase_finish_soft_job(katom);
142         if (jd_done_nolock(katom, NULL))
143                 kbase_js_sched_all(kctx->kbdev);
144         mutex_unlock(&kctx->jctx.lock);
145 }
146
147 static enum base_jd_event_code kbase_fence_trigger(struct kbase_jd_atom *katom, int result)
148 {
149         struct sync_pt *pt;
150         struct sync_timeline *timeline;
151
152 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
153         if (!list_is_singular(&katom->fence->pt_list_head)) {
154 #else
155         if (katom->fence->num_fences != 1) {
156 #endif
157                 /* Not exactly one item in the list - so it didn't (directly) come from us */
158                 return BASE_JD_EVENT_JOB_CANCELLED;
159         }
160
161 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
162         pt = list_first_entry(&katom->fence->pt_list_head, struct sync_pt, pt_list);
163 #else
164         pt = container_of(katom->fence->cbs[0].sync_pt, struct sync_pt, base);
165 #endif
166         timeline = sync_pt_parent(pt);
167
168         if (!kbase_sync_timeline_is_ours(timeline)) {
169                 /* Fence has a sync_pt which isn't ours! */
170                 return BASE_JD_EVENT_JOB_CANCELLED;
171         }
172
173         kbase_sync_signal_pt(pt, result);
174
175         sync_timeline_signal(timeline);
176
177         return (result < 0) ? BASE_JD_EVENT_JOB_CANCELLED : BASE_JD_EVENT_DONE;
178 }
179
180 static void kbase_fence_wait_worker(struct work_struct *data)
181 {
182         struct kbase_jd_atom *katom;
183         struct kbase_context *kctx;
184
185         katom = container_of(data, struct kbase_jd_atom, work);
186         kctx = katom->kctx;
187
188         complete_soft_job(katom);
189 }
190
191 static void kbase_fence_wait_callback(struct sync_fence *fence, struct sync_fence_waiter *waiter)
192 {
193         struct kbase_jd_atom *katom = container_of(waiter, struct kbase_jd_atom, sync_waiter);
194         struct kbase_context *kctx;
195
196         KBASE_DEBUG_ASSERT(NULL != katom);
197
198         kctx = katom->kctx;
199
200         KBASE_DEBUG_ASSERT(NULL != kctx);
201
202         /* Propagate the fence status to the atom.
203          * If negative then cancel this atom and its dependencies.
204          */
205 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
206         if (fence->status < 0)
207 #else
208         if (atomic_read(&fence->status) < 0)
209 #endif
210                 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
211
212         /* To prevent a potential deadlock we schedule the work onto the job_done_wq workqueue
213          *
214          * The issue is that we may signal the timeline while holding kctx->jctx.lock and
215          * the callbacks are run synchronously from sync_timeline_signal. So we simply defer the work.
216          */
217
218         KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&katom->work));
219         INIT_WORK(&katom->work, kbase_fence_wait_worker);
220         queue_work(kctx->jctx.job_done_wq, &katom->work);
221 }
222
223 static int kbase_fence_wait(struct kbase_jd_atom *katom)
224 {
225         int ret;
226
227         KBASE_DEBUG_ASSERT(NULL != katom);
228         KBASE_DEBUG_ASSERT(NULL != katom->kctx);
229
230         sync_fence_waiter_init(&katom->sync_waiter, kbase_fence_wait_callback);
231
232         ret = sync_fence_wait_async(katom->fence, &katom->sync_waiter);
233
234         if (ret == 1) {
235                 /* Already signalled */
236                 return 0;
237         } else if (ret < 0) {
238                 goto cancel_atom;
239         }
240         return 1;
241
242  cancel_atom:
243         katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
244         /* We should cause the dependant jobs in the bag to be failed,
245          * to do this we schedule the work queue to complete this job */
246         KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&katom->work));
247         INIT_WORK(&katom->work, kbase_fence_wait_worker);
248         queue_work(katom->kctx->jctx.job_done_wq, &katom->work);
249         return 1;
250 }
251
252 static void kbase_fence_cancel_wait(struct kbase_jd_atom *katom)
253 {
254         if(!katom)
255         {
256                 pr_err("katom null.forbiden return\n");
257                 return;
258         }
259         if(!katom->fence)
260         {
261                 pr_info("katom->fence null.may release out of order.so continue unfinished step\n");
262                 /*
263                 if return here,may result in  infinite loop?
264                 we need to delete dep_item[0] from kctx->waiting_soft_jobs?
265                 jd_done_nolock function move the dep_item[0] to complete job list and then delete?
266                 */
267                 goto finish_softjob;
268         }
269
270         if (sync_fence_cancel_async(katom->fence, &katom->sync_waiter) != 0) {
271                 /* The wait wasn't cancelled - leave the cleanup for kbase_fence_wait_callback */
272                 return;
273         }
274
275         /* Wait was cancelled - zap the atoms */
276 finish_softjob:
277         katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
278
279         kbase_finish_soft_job(katom);
280
281         if (jd_done_nolock(katom, NULL))
282                 kbase_js_sched_all(katom->kctx->kbdev);
283 }
284 #endif /* CONFIG_SYNC */
285
286 int kbase_process_soft_job(struct kbase_jd_atom *katom)
287 {
288         switch (katom->core_req & BASEP_JD_REQ_ATOM_TYPE) {
289         case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
290                 return kbase_dump_cpu_gpu_time(katom);
291 #ifdef CONFIG_SYNC
292         case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
293                 KBASE_DEBUG_ASSERT(katom->fence != NULL);
294                 katom->event_code = kbase_fence_trigger(katom, katom->event_code == BASE_JD_EVENT_DONE ? 0 : -EFAULT);
295                 /* Release the reference as we don't need it any more */
296                 sync_fence_put(katom->fence);
297                 katom->fence = NULL;
298                 break;
299         case BASE_JD_REQ_SOFT_FENCE_WAIT:
300                 return kbase_fence_wait(katom);
301 #endif                          /* CONFIG_SYNC */
302         case BASE_JD_REQ_SOFT_REPLAY:
303                 return kbase_replay_process(katom);
304         }
305
306         /* Atom is complete */
307         return 0;
308 }
309
310 void kbase_cancel_soft_job(struct kbase_jd_atom *katom)
311 {
312         switch (katom->core_req & BASEP_JD_REQ_ATOM_TYPE) {
313 #ifdef CONFIG_SYNC
314         case BASE_JD_REQ_SOFT_FENCE_WAIT:
315                 kbase_fence_cancel_wait(katom);
316                 break;
317 #endif
318         default:
319                 /* This soft-job doesn't support cancellation! */
320                 KBASE_DEBUG_ASSERT(0);
321         }
322 }
323
324 int kbase_prepare_soft_job(struct kbase_jd_atom *katom)
325 {
326         switch (katom->core_req & BASEP_JD_REQ_ATOM_TYPE) {
327         case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
328                 {
329                         if (0 != (katom->jc & KBASE_CACHE_ALIGNMENT_MASK))
330                                 return -EINVAL;
331                 }
332                 break;
333 #ifdef CONFIG_SYNC
334         case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
335                 {
336                         struct base_fence fence;
337                         int fd;
338
339                         if (0 != copy_from_user(&fence, (__user void *)(uintptr_t) katom->jc, sizeof(fence)))
340                                 return -EINVAL;
341
342                         fd = kbase_stream_create_fence(fence.basep.stream_fd);
343                         if (fd < 0)
344                                 return -EINVAL;
345
346                         katom->fence = sync_fence_fdget(fd);
347
348                         if (katom->fence == NULL) {
349                                 /* The only way the fence can be NULL is if userspace closed it for us.
350                                  * So we don't need to clear it up */
351                                 return -EINVAL;
352                         }
353                         fence.basep.fd = fd;
354                         if (0 != copy_to_user((__user void *)(uintptr_t) katom->jc, &fence, sizeof(fence))) {
355                                 katom->fence = NULL;
356                                 sys_close(fd);
357                                 return -EINVAL;
358                         }
359                 }
360                 break;
361         case BASE_JD_REQ_SOFT_FENCE_WAIT:
362                 {
363                         struct base_fence fence;
364
365                         if (0 != copy_from_user(&fence, (__user void *)(uintptr_t) katom->jc, sizeof(fence)))
366                                 return -EINVAL;
367
368                         /* Get a reference to the fence object */
369                         katom->fence = sync_fence_fdget(fence.basep.fd);
370                         if (katom->fence == NULL)
371                                 return -EINVAL;
372                 }
373                 break;
374 #endif                          /* CONFIG_SYNC */
375         case BASE_JD_REQ_SOFT_REPLAY:
376                 break;
377         default:
378                 /* Unsupported soft-job */
379                 return -EINVAL;
380         }
381         return 0;
382 }
383
384 void kbase_finish_soft_job(struct kbase_jd_atom *katom)
385 {
386         switch (katom->core_req & BASEP_JD_REQ_ATOM_TYPE) {
387         case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
388                 /* Nothing to do */
389                 break;
390 #ifdef CONFIG_SYNC
391         case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
392                 /* If fence has not yet been signalled, do it now */
393                 if (katom->fence) {
394                         kbase_fence_trigger(katom, katom->event_code ==
395                                         BASE_JD_EVENT_DONE ? 0 : -EFAULT);
396                         sync_fence_put(katom->fence);
397                         katom->fence = NULL;
398                 }
399                 break;
400         case BASE_JD_REQ_SOFT_FENCE_WAIT:
401                 /* Release the reference to the fence object */
402                 if(katom->fence) {
403                         sync_fence_put(katom->fence);
404                         katom->fence = NULL;
405                 }
406                 break;
407 #endif                          /* CONFIG_SYNC */
408         }
409 }
410
411 void kbase_resume_suspended_soft_jobs(struct kbase_device *kbdev)
412 {
413         LIST_HEAD(local_suspended_soft_jobs);
414         struct kbase_jd_atom *tmp_iter;
415         struct kbase_jd_atom *katom_iter;
416         struct kbasep_js_device_data *js_devdata;
417         bool resched = false;
418
419         KBASE_DEBUG_ASSERT(kbdev);
420
421         js_devdata = &kbdev->js_data;
422
423         /* Move out the entire list */
424         mutex_lock(&js_devdata->runpool_mutex);
425         list_splice_init(&js_devdata->suspended_soft_jobs_list,
426                         &local_suspended_soft_jobs);
427         mutex_unlock(&js_devdata->runpool_mutex);
428
429         /*
430          * Each atom must be detached from the list and ran separately -
431          * it could be re-added to the old list, but this is unlikely
432          */
433         list_for_each_entry_safe(katom_iter, tmp_iter,
434                         &local_suspended_soft_jobs, dep_item[1]) {
435                 struct kbase_context *kctx = katom_iter->kctx;
436
437                 mutex_lock(&kctx->jctx.lock);
438
439                 /* Remove from the global list */
440                 list_del(&katom_iter->dep_item[1]);
441                 /* Remove from the context's list of waiting soft jobs */
442                 list_del(&katom_iter->dep_item[0]);
443
444                 if (kbase_process_soft_job(katom_iter) == 0) {
445                         kbase_finish_soft_job(katom_iter);
446                         resched |= jd_done_nolock(katom_iter, NULL);
447                 } else {
448                         /* The job has not completed */
449                         KBASE_DEBUG_ASSERT((katom_iter->core_req &
450                                         BASEP_JD_REQ_ATOM_TYPE)
451                                         != BASE_JD_REQ_SOFT_REPLAY);
452                         list_add_tail(&katom_iter->dep_item[0],
453                                         &kctx->waiting_soft_jobs);
454                 }
455
456                 mutex_unlock(&kctx->jctx.lock);
457         }
458
459         if (resched)
460                 kbase_js_sched_all(kbdev);
461 }