mali_760_driver : rk_ext on arm_release_ver, from r5p0-02dev0.
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / midgard / mali_kbase_softjobs.c
1 /*
2  *
3  * (C) COPYRIGHT ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * A copy of the licence is included with the program, and can also be obtained
11  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12  * Boston, MA  02110-1301, USA.
13  *
14  */
15
16
17
18
19
20 #include <mali_kbase.h>
21
22 #include <linux/dma-mapping.h>
23 #ifdef CONFIG_SYNC
24 #include "sync.h"
25 #include <linux/syscalls.h>
26 #include "mali_kbase_sync.h"
27 #endif
28
29
30 /* Mask to check cache alignment of data structures */
31 #define KBASE_CACHE_ALIGNMENT_MASK              ((1<<L1_CACHE_SHIFT)-1)
32
33 /**
34  * @file mali_kbase_softjobs.c
35  *
36  * This file implements the logic behind software only jobs that are
37  * executed within the driver rather than being handed over to the GPU.
38  */
39
40 static int kbase_dump_cpu_gpu_time(struct kbase_jd_atom *katom)
41 {
42         struct kbase_va_region *reg;
43         phys_addr_t addr = 0;
44         u64 pfn;
45         u32 offset;
46         char *page;
47         struct timespec ts;
48         struct base_dump_cpu_gpu_counters data;
49         u64 system_time;
50         u64 cycle_counter;
51         mali_addr64 jc = katom->jc;
52         struct kbase_context *kctx = katom->kctx;
53         int pm_active_err;
54
55         u32 hi1, hi2;
56
57         memset(&data, 0, sizeof(data));
58
59         /* Take the PM active reference as late as possible - otherwise, it could
60          * delay suspend until we process the atom (which may be at the end of a
61          * long chain of dependencies */
62         pm_active_err = kbase_pm_context_active_handle_suspend(kctx->kbdev, KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE);
63         if (pm_active_err) {
64                 struct kbasep_js_device_data *js_devdata = &kctx->kbdev->js_data;
65
66                 /* We're suspended - queue this on the list of suspended jobs
67                  * Use dep_item[1], because dep_item[0] is in use for 'waiting_soft_jobs' */
68                 mutex_lock(&js_devdata->runpool_mutex);
69                 list_add_tail(&katom->dep_item[1], &js_devdata->suspended_soft_jobs_list);
70                 mutex_unlock(&js_devdata->runpool_mutex);
71
72                 return pm_active_err;
73         }
74
75         kbase_pm_request_gpu_cycle_counter(kctx->kbdev);
76
77         /* Read hi, lo, hi to ensure that overflow from lo to hi is handled correctly */
78         do {
79                 hi1 = kbase_reg_read(kctx->kbdev, GPU_CONTROL_REG(CYCLE_COUNT_HI), NULL);
80                 cycle_counter = kbase_reg_read(kctx->kbdev, GPU_CONTROL_REG(CYCLE_COUNT_LO), NULL);
81                 hi2 = kbase_reg_read(kctx->kbdev, GPU_CONTROL_REG(CYCLE_COUNT_HI), NULL);
82                 cycle_counter |= (((u64) hi1) << 32);
83         } while (hi1 != hi2);
84
85         /* Read hi, lo, hi to ensure that overflow from lo to hi is handled correctly */
86         do {
87                 hi1 = kbase_reg_read(kctx->kbdev, GPU_CONTROL_REG(TIMESTAMP_HI), NULL);
88                 system_time = kbase_reg_read(kctx->kbdev, GPU_CONTROL_REG(TIMESTAMP_LO), NULL);
89                 hi2 = kbase_reg_read(kctx->kbdev, GPU_CONTROL_REG(TIMESTAMP_HI), NULL);
90                 system_time |= (((u64) hi1) << 32);
91         } while (hi1 != hi2);
92
93         /* Record the CPU's idea of current time */
94         getrawmonotonic(&ts);
95
96         kbase_pm_release_gpu_cycle_counter(kctx->kbdev);
97
98         kbase_pm_context_idle(kctx->kbdev);
99
100         data.sec = ts.tv_sec;
101         data.usec = ts.tv_nsec / 1000;
102         data.system_time = system_time;
103         data.cycle_counter = cycle_counter;
104
105         pfn = jc >> PAGE_SHIFT;
106         offset = jc & ~PAGE_MASK;
107
108         /* Assume this atom will be cancelled until we know otherwise */
109         katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
110         if (offset > 0x1000 - sizeof(data)) {
111                 /* Wouldn't fit in the page */
112                 return 0;
113         }
114
115         kbase_gpu_vm_lock(kctx);
116         reg = kbase_region_tracker_find_region_enclosing_address(kctx, jc);
117         if (reg &&
118             (reg->flags & KBASE_REG_GPU_WR) &&
119             reg->alloc && reg->alloc->pages)
120                 addr = reg->alloc->pages[pfn - reg->start_pfn];
121
122         kbase_gpu_vm_unlock(kctx);
123         if (!addr)
124                 return 0;
125
126         page = kmap(pfn_to_page(PFN_DOWN(addr)));
127         if (!page)
128                 return 0;
129
130         dma_sync_single_for_cpu(katom->kctx->kbdev->dev,
131                         kbase_dma_addr(pfn_to_page(PFN_DOWN(addr))) +
132                         offset, sizeof(data),
133                         DMA_BIDIRECTIONAL);
134         memcpy(page + offset, &data, sizeof(data));
135         dma_sync_single_for_device(katom->kctx->kbdev->dev,
136                         kbase_dma_addr(pfn_to_page(PFN_DOWN(addr))) +
137                         offset, sizeof(data),
138                         DMA_BIDIRECTIONAL);
139         kunmap(pfn_to_page(PFN_DOWN(addr)));
140
141         /* Atom was fine - mark it as done */
142         katom->event_code = BASE_JD_EVENT_DONE;
143
144         return 0;
145 }
146
147 #ifdef CONFIG_SYNC
148
149 /* Complete an atom that has returned '1' from kbase_process_soft_job (i.e. has waited)
150  *
151  * @param katom     The atom to complete
152  */
153 static void complete_soft_job(struct kbase_jd_atom *katom)
154 {
155         struct kbase_context *kctx = katom->kctx;
156
157         mutex_lock(&kctx->jctx.lock);
158         list_del(&katom->dep_item[0]);
159         kbase_finish_soft_job(katom);
160         if (jd_done_nolock(katom))
161                 kbasep_js_try_schedule_head_ctx(kctx->kbdev);
162         mutex_unlock(&kctx->jctx.lock);
163 }
164
165 static enum base_jd_event_code kbase_fence_trigger(struct kbase_jd_atom *katom, int result)
166 {
167         struct sync_pt *pt;
168         struct sync_timeline *timeline;
169
170         if (!list_is_singular(&katom->fence->pt_list_head)) {
171                 /* Not exactly one item in the list - so it didn't (directly) come from us */
172                 return BASE_JD_EVENT_JOB_CANCELLED;
173         }
174
175         pt = list_first_entry(&katom->fence->pt_list_head, struct sync_pt, pt_list);
176         timeline = pt->parent;
177
178         if (!kbase_sync_timeline_is_ours(timeline)) {
179                 /* Fence has a sync_pt which isn't ours! */
180                 return BASE_JD_EVENT_JOB_CANCELLED;
181         }
182
183         kbase_sync_signal_pt(pt, result);
184
185         sync_timeline_signal(timeline);
186
187         return (result < 0) ? BASE_JD_EVENT_JOB_CANCELLED : BASE_JD_EVENT_DONE;
188 }
189
190 static void kbase_fence_wait_worker(struct work_struct *data)
191 {
192         struct kbase_jd_atom *katom;
193         struct kbase_context *kctx;
194
195         katom = container_of(data, struct kbase_jd_atom, work);
196         kctx = katom->kctx;
197
198         complete_soft_job(katom);
199 }
200
201 static void kbase_fence_wait_callback(struct sync_fence *fence, struct sync_fence_waiter *waiter)
202 {
203         struct kbase_jd_atom *katom = container_of(waiter, struct kbase_jd_atom, sync_waiter);
204         struct kbase_context *kctx;
205
206         KBASE_DEBUG_ASSERT(NULL != katom);
207
208         kctx = katom->kctx;
209
210         KBASE_DEBUG_ASSERT(NULL != kctx);
211
212         /* Propagate the fence status to the atom.
213          * If negative then cancel this atom and its dependencies.
214          */
215         if (fence->status < 0)
216                 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
217
218         /* To prevent a potential deadlock we schedule the work onto the job_done_wq workqueue
219          *
220          * The issue is that we may signal the timeline while holding kctx->jctx.lock and
221          * the callbacks are run synchronously from sync_timeline_signal. So we simply defer the work.
222          */
223
224         KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&katom->work));
225         INIT_WORK(&katom->work, kbase_fence_wait_worker);
226         queue_work(kctx->jctx.job_done_wq, &katom->work);
227 }
228
229 static int kbase_fence_wait(struct kbase_jd_atom *katom)
230 {
231         int ret;
232
233         KBASE_DEBUG_ASSERT(NULL != katom);
234         KBASE_DEBUG_ASSERT(NULL != katom->kctx);
235
236         sync_fence_waiter_init(&katom->sync_waiter, kbase_fence_wait_callback);
237
238         ret = sync_fence_wait_async(katom->fence, &katom->sync_waiter);
239
240         if (ret == 1) {
241                 /* Already signalled */
242                 return 0;
243         } else if (ret < 0) {
244                 goto cancel_atom;
245         }
246         return 1;
247
248  cancel_atom:
249         katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
250         /* We should cause the dependant jobs in the bag to be failed,
251          * to do this we schedule the work queue to complete this job */
252         KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&katom->work));
253         INIT_WORK(&katom->work, kbase_fence_wait_worker);
254         queue_work(katom->kctx->jctx.job_done_wq, &katom->work);
255         return 1;
256 }
257
258 static void kbase_fence_cancel_wait(struct kbase_jd_atom *katom)
259 {
260         if(!katom)
261         {
262                 pr_err("katom null.forbiden return\n");
263                 return;
264         }
265         if(!katom->fence)
266         {
267                 pr_info("katom->fence null.may release out of order.so continue unfinished step\n");
268                 /*
269                 if return here,may result in  infinite loop?
270                 we need to delete dep_item[0] from kctx->waiting_soft_jobs?
271                 jd_done_nolock function move the dep_item[0] to complete job list and then delete?
272                 */
273                 goto finish_softjob;
274         }
275
276         if (sync_fence_cancel_async(katom->fence, &katom->sync_waiter) != 0) {
277                 /* The wait wasn't cancelled - leave the cleanup for kbase_fence_wait_callback */
278                 return;
279         }
280
281         /* Wait was cancelled - zap the atoms */
282 finish_softjob:
283         katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
284
285         kbase_finish_soft_job(katom);
286
287         if (jd_done_nolock(katom))
288                 kbasep_js_try_schedule_head_ctx(katom->kctx->kbdev);
289
290         return;
291 }
292 #endif /* CONFIG_SYNC */
293
294 int kbase_process_soft_job(struct kbase_jd_atom *katom)
295 {
296         switch (katom->core_req & BASEP_JD_REQ_ATOM_TYPE) {
297         case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
298                 return kbase_dump_cpu_gpu_time(katom);
299 #ifdef CONFIG_SYNC
300         case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
301                 KBASE_DEBUG_ASSERT(katom->fence != NULL);
302                 katom->event_code = kbase_fence_trigger(katom, katom->event_code == BASE_JD_EVENT_DONE ? 0 : -EFAULT);
303                 /* Release the reference as we don't need it any more */
304                 sync_fence_put(katom->fence);
305                 katom->fence = NULL;
306                 break;
307         case BASE_JD_REQ_SOFT_FENCE_WAIT:
308                 return kbase_fence_wait(katom);
309 #endif                          /* CONFIG_SYNC */
310         case BASE_JD_REQ_SOFT_REPLAY:
311                 return kbase_replay_process(katom);
312         }
313
314         /* Atom is complete */
315         return 0;
316 }
317
318 void kbase_cancel_soft_job(struct kbase_jd_atom *katom)
319 {
320         switch (katom->core_req & BASEP_JD_REQ_ATOM_TYPE) {
321 #ifdef CONFIG_SYNC
322         case BASE_JD_REQ_SOFT_FENCE_WAIT:
323                 kbase_fence_cancel_wait(katom);
324                 break;
325 #endif
326         default:
327                 /* This soft-job doesn't support cancellation! */
328                 KBASE_DEBUG_ASSERT(0);
329         }
330 }
331
332 mali_error kbase_prepare_soft_job(struct kbase_jd_atom *katom)
333 {
334         switch (katom->core_req & BASEP_JD_REQ_ATOM_TYPE) {
335         case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
336                 {
337                         if (0 != (katom->jc & KBASE_CACHE_ALIGNMENT_MASK))
338                                 return MALI_ERROR_FUNCTION_FAILED;
339                 }
340                 break;
341 #ifdef CONFIG_SYNC
342         case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
343                 {
344                         struct base_fence fence;
345                         int fd;
346
347                         if (0 != copy_from_user(&fence, (__user void *)(uintptr_t) katom->jc, sizeof(fence)))
348                                 return MALI_ERROR_FUNCTION_FAILED;
349
350                         fd = kbase_stream_create_fence(fence.basep.stream_fd);
351                         if (fd < 0)
352                                 return MALI_ERROR_FUNCTION_FAILED;
353
354                         katom->fence = sync_fence_fdget(fd);
355
356                         if (katom->fence == NULL) {
357                                 /* The only way the fence can be NULL is if userspace closed it for us.
358                                  * So we don't need to clear it up */
359                                 return MALI_ERROR_FUNCTION_FAILED;
360                         }
361                         fence.basep.fd = fd;
362                         if (0 != copy_to_user((__user void *)(uintptr_t) katom->jc, &fence, sizeof(fence))) {
363                                 katom->fence = NULL;
364                                 sys_close(fd);
365                                 return MALI_ERROR_FUNCTION_FAILED;
366                         }
367                 }
368                 break;
369         case BASE_JD_REQ_SOFT_FENCE_WAIT:
370                 {
371                         struct base_fence fence;
372
373                         if (0 != copy_from_user(&fence, (__user void *)(uintptr_t) katom->jc, sizeof(fence)))
374                                 return MALI_ERROR_FUNCTION_FAILED;
375
376                         /* Get a reference to the fence object */
377                         katom->fence = sync_fence_fdget(fence.basep.fd);
378                         if (katom->fence == NULL)
379                                 return MALI_ERROR_FUNCTION_FAILED;
380                 }
381                 break;
382 #endif                          /* CONFIG_SYNC */
383         case BASE_JD_REQ_SOFT_REPLAY:
384                 break;
385         default:
386                 /* Unsupported soft-job */
387                 return MALI_ERROR_FUNCTION_FAILED;
388         }
389         return MALI_ERROR_NONE;
390 }
391
392 void kbase_finish_soft_job(struct kbase_jd_atom *katom)
393 {
394         switch (katom->core_req & BASEP_JD_REQ_ATOM_TYPE) {
395         case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
396                 /* Nothing to do */
397                 break;
398 #ifdef CONFIG_SYNC
399         case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
400                 if (katom->fence) {
401                         /* The fence has not yet been signalled, so we do it now */
402                         kbase_fence_trigger(katom, katom->event_code == BASE_JD_EVENT_DONE ? 0 : -EFAULT);
403                         sync_fence_put(katom->fence);
404                         katom->fence = NULL;
405                 }
406                 break;
407         case BASE_JD_REQ_SOFT_FENCE_WAIT:
408                 /* Release the reference to the fence object */
409                 if(katom->fence) {
410                         sync_fence_put(katom->fence);
411                         katom->fence = NULL;
412                 }
413                 break;
414 #endif                          /* CONFIG_SYNC */
415         }
416 }
417
418 void kbase_resume_suspended_soft_jobs(struct kbase_device *kbdev)
419 {
420         LIST_HEAD(local_suspended_soft_jobs);
421         struct kbase_jd_atom *tmp_iter;
422         struct kbase_jd_atom *katom_iter;
423         struct kbasep_js_device_data *js_devdata;
424         mali_bool resched = MALI_FALSE;
425
426         KBASE_DEBUG_ASSERT(kbdev);
427
428         js_devdata = &kbdev->js_data;
429
430         /* Move out the entire list */
431         mutex_lock(&js_devdata->runpool_mutex);
432         list_splice_init(&js_devdata->suspended_soft_jobs_list, &local_suspended_soft_jobs);
433         mutex_unlock(&js_devdata->runpool_mutex);
434
435         /* Each atom must be detached from the list and ran separately - it could
436          * be re-added to the old list, but this is unlikely */
437         list_for_each_entry_safe(katom_iter, tmp_iter, &local_suspended_soft_jobs, dep_item[1])
438         {
439                 struct kbase_context *kctx = katom_iter->kctx;
440
441                 mutex_lock(&kctx->jctx.lock);
442
443                 /* Remove from the global list */
444                 list_del(&katom_iter->dep_item[1]);
445                 /* Remove from the context's list of waiting soft jobs */
446                 list_del(&katom_iter->dep_item[0]);
447
448                 if (kbase_process_soft_job(katom_iter) == 0) {
449                         kbase_finish_soft_job(katom_iter);
450                         resched |= jd_done_nolock(katom_iter);
451                 } else {
452                         /* The job has not completed */
453                         KBASE_DEBUG_ASSERT((katom_iter->core_req & BASEP_JD_REQ_ATOM_TYPE)
454                                         != BASE_JD_REQ_SOFT_REPLAY);
455                         list_add_tail(&katom_iter->dep_item[0], &kctx->waiting_soft_jobs);
456                 }
457
458                 mutex_unlock(&kctx->jctx.lock);
459         }
460
461         if (resched)
462                 kbasep_js_try_schedule_head_ctx(kbdev);
463 }