f762996cfdd4ada4ec7568e6ae09e0cd6853bd5d
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / midgard / mali_kbase_softjobs.c
1 /*
2  *
3  * (C) COPYRIGHT ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * A copy of the licence is included with the program, and can also be obtained
11  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12  * Boston, MA  02110-1301, USA.
13  *
14  */
15
16
17
18
19
20 #include <mali_kbase.h>
21
22 #include <linux/dma-mapping.h>
23 #ifdef CONFIG_SYNC
24 #include "sync.h"
25 #include <linux/syscalls.h>
26 #include "mali_kbase_sync.h"
27 #endif
28
29
30 /* Mask to check cache alignment of data structures */
31 #define KBASE_CACHE_ALIGNMENT_MASK              ((1<<L1_CACHE_SHIFT)-1)
32
33 /**
34  * @file mali_kbase_softjobs.c
35  *
36  * This file implements the logic behind software only jobs that are
37  * executed within the driver rather than being handed over to the GPU.
38  */
39
40 static int kbase_dump_cpu_gpu_time(struct kbase_jd_atom *katom)
41 {
42         struct kbase_va_region *reg;
43         phys_addr_t addr = 0;
44         u64 pfn;
45         u32 offset;
46         char *page;
47         struct timespec ts;
48         struct base_dump_cpu_gpu_counters data;
49         u64 system_time;
50         u64 cycle_counter;
51         mali_addr64 jc = katom->jc;
52         struct kbase_context *kctx = katom->kctx;
53         int pm_active_err;
54
55         u32 hi1, hi2;
56
57         memset(&data, 0, sizeof(data));
58
59         /* Take the PM active reference as late as possible - otherwise, it could
60          * delay suspend until we process the atom (which may be at the end of a
61          * long chain of dependencies */
62         pm_active_err = kbase_pm_context_active_handle_suspend(kctx->kbdev, KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE);
63         if (pm_active_err) {
64                 struct kbasep_js_device_data *js_devdata = &kctx->kbdev->js_data;
65
66                 /* We're suspended - queue this on the list of suspended jobs
67                  * Use dep_item[1], because dep_item[0] is in use for 'waiting_soft_jobs' */
68                 mutex_lock(&js_devdata->runpool_mutex);
69                 list_add_tail(&katom->dep_item[1], &js_devdata->suspended_soft_jobs_list);
70                 mutex_unlock(&js_devdata->runpool_mutex);
71
72                 return pm_active_err;
73         }
74
75         kbase_pm_request_gpu_cycle_counter(kctx->kbdev);
76
77         /* Read hi, lo, hi to ensure that overflow from lo to hi is handled correctly */
78         do {
79                 hi1 = kbase_reg_read(kctx->kbdev, GPU_CONTROL_REG(CYCLE_COUNT_HI), NULL);
80                 cycle_counter = kbase_reg_read(kctx->kbdev, GPU_CONTROL_REG(CYCLE_COUNT_LO), NULL);
81                 hi2 = kbase_reg_read(kctx->kbdev, GPU_CONTROL_REG(CYCLE_COUNT_HI), NULL);
82                 cycle_counter |= (((u64) hi1) << 32);
83         } while (hi1 != hi2);
84
85         /* Read hi, lo, hi to ensure that overflow from lo to hi is handled correctly */
86         do {
87                 hi1 = kbase_reg_read(kctx->kbdev, GPU_CONTROL_REG(TIMESTAMP_HI), NULL);
88                 system_time = kbase_reg_read(kctx->kbdev, GPU_CONTROL_REG(TIMESTAMP_LO), NULL);
89                 hi2 = kbase_reg_read(kctx->kbdev, GPU_CONTROL_REG(TIMESTAMP_HI), NULL);
90                 system_time |= (((u64) hi1) << 32);
91         } while (hi1 != hi2);
92
93         /* Record the CPU's idea of current time */
94         getrawmonotonic(&ts);
95
96         kbase_pm_release_gpu_cycle_counter(kctx->kbdev);
97
98         kbase_pm_context_idle(kctx->kbdev);
99
100         data.sec = ts.tv_sec;
101         data.usec = ts.tv_nsec / 1000;
102         data.system_time = system_time;
103         data.cycle_counter = cycle_counter;
104
105         pfn = jc >> PAGE_SHIFT;
106         offset = jc & ~PAGE_MASK;
107
108         /* Assume this atom will be cancelled until we know otherwise */
109         katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
110         if (offset > 0x1000 - sizeof(data)) {
111                 /* Wouldn't fit in the page */
112                 return 0;
113         }
114
115         kbase_gpu_vm_lock(kctx);
116         reg = kbase_region_tracker_find_region_enclosing_address(kctx, jc);
117         if (reg &&
118             (reg->flags & KBASE_REG_GPU_WR) &&
119             reg->alloc && reg->alloc->pages)
120                 addr = reg->alloc->pages[pfn - reg->start_pfn];
121
122         kbase_gpu_vm_unlock(kctx);
123         if (!addr)
124                 return 0;
125
126         page = kmap(pfn_to_page(PFN_DOWN(addr)));
127         if (!page)
128                 return 0;
129
130         dma_sync_single_for_cpu(katom->kctx->kbdev->dev,
131                         kbase_dma_addr(pfn_to_page(PFN_DOWN(addr))) +
132                         offset, sizeof(data),
133                         DMA_BIDIRECTIONAL);
134         memcpy(page + offset, &data, sizeof(data));
135         dma_sync_single_for_device(katom->kctx->kbdev->dev,
136                         kbase_dma_addr(pfn_to_page(PFN_DOWN(addr))) +
137                         offset, sizeof(data),
138                         DMA_BIDIRECTIONAL);
139         kunmap(pfn_to_page(PFN_DOWN(addr)));
140
141         /* Atom was fine - mark it as done */
142         katom->event_code = BASE_JD_EVENT_DONE;
143
144         return 0;
145 }
146
147 #ifdef CONFIG_SYNC
148
149 /* Complete an atom that has returned '1' from kbase_process_soft_job (i.e. has waited)
150  *
151  * @param katom     The atom to complete
152  */
153 static void complete_soft_job(struct kbase_jd_atom *katom)
154 {
155         struct kbase_context *kctx = katom->kctx;
156
157         mutex_lock(&kctx->jctx.lock);
158         list_del(&katom->dep_item[0]);
159         kbase_finish_soft_job(katom);
160         if (jd_done_nolock(katom))
161                 kbasep_js_try_schedule_head_ctx(kctx->kbdev);
162         mutex_unlock(&kctx->jctx.lock);
163 }
164
165 static enum base_jd_event_code kbase_fence_trigger(struct kbase_jd_atom *katom, int result)
166 {
167         struct sync_pt *pt;
168         struct sync_timeline *timeline;
169
170         if (!list_is_singular(&katom->fence->pt_list_head)) {
171                 /* Not exactly one item in the list - so it didn't (directly) come from us */
172                 return BASE_JD_EVENT_JOB_CANCELLED;
173         }
174
175         pt = list_first_entry(&katom->fence->pt_list_head, struct sync_pt, pt_list);
176         timeline = pt->parent;
177
178         if (!kbase_sync_timeline_is_ours(timeline)) {
179                 /* Fence has a sync_pt which isn't ours! */
180                 return BASE_JD_EVENT_JOB_CANCELLED;
181         }
182
183         kbase_sync_signal_pt(pt, result);
184
185         sync_timeline_signal(timeline);
186
187         return (result < 0) ? BASE_JD_EVENT_JOB_CANCELLED : BASE_JD_EVENT_DONE;
188 }
189
190 static void kbase_fence_wait_worker(struct work_struct *data)
191 {
192         struct kbase_jd_atom *katom;
193         struct kbase_context *kctx;
194
195         katom = container_of(data, struct kbase_jd_atom, work);
196         kctx = katom->kctx;
197
198         complete_soft_job(katom);
199 }
200
201 static void kbase_fence_wait_callback(struct sync_fence *fence, struct sync_fence_waiter *waiter)
202 {
203         struct kbase_jd_atom *katom = container_of(waiter, struct kbase_jd_atom, sync_waiter);
204         struct kbase_context *kctx;
205
206         KBASE_DEBUG_ASSERT(NULL != katom);
207
208         kctx = katom->kctx;
209
210         KBASE_DEBUG_ASSERT(NULL != kctx);
211
212         /* Propagate the fence status to the atom.
213          * If negative then cancel this atom and its dependencies.
214          */
215         if (fence->status < 0)
216                 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
217
218         /* To prevent a potential deadlock we schedule the work onto the job_done_wq workqueue
219          *
220          * The issue is that we may signal the timeline while holding kctx->jctx.lock and
221          * the callbacks are run synchronously from sync_timeline_signal. So we simply defer the work.
222          */
223
224         KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&katom->work));
225         INIT_WORK(&katom->work, kbase_fence_wait_worker);
226         queue_work(kctx->jctx.job_done_wq, &katom->work);
227 }
228
229 static int kbase_fence_wait(struct kbase_jd_atom *katom)
230 {
231         int ret;
232
233         KBASE_DEBUG_ASSERT(NULL != katom);
234         KBASE_DEBUG_ASSERT(NULL != katom->kctx);
235
236         sync_fence_waiter_init(&katom->sync_waiter, kbase_fence_wait_callback);
237
238         ret = sync_fence_wait_async(katom->fence, &katom->sync_waiter);
239
240         if (ret == 1) {
241                 /* Already signalled */
242                 return 0;
243         } else if (ret < 0) {
244                 goto cancel_atom;
245         }
246         return 1;
247
248  cancel_atom:
249         katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
250         /* We should cause the dependant jobs in the bag to be failed,
251          * to do this we schedule the work queue to complete this job */
252         KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&katom->work));
253         INIT_WORK(&katom->work, kbase_fence_wait_worker);
254         queue_work(katom->kctx->jctx.job_done_wq, &katom->work);
255         return 1;
256 }
257
258 static void kbase_fence_cancel_wait(struct kbase_jd_atom *katom)
259 {
260         if (sync_fence_cancel_async(katom->fence, &katom->sync_waiter) != 0) {
261                 /* The wait wasn't cancelled - leave the cleanup for kbase_fence_wait_callback */
262                 return;
263         }
264
265         /* Wait was cancelled - zap the atoms */
266         katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
267
268         kbase_finish_soft_job(katom);
269
270         if (jd_done_nolock(katom))
271                 kbasep_js_try_schedule_head_ctx(katom->kctx->kbdev);
272 }
273 #endif /* CONFIG_SYNC */
274
275 int kbase_process_soft_job(struct kbase_jd_atom *katom)
276 {
277         switch (katom->core_req & BASEP_JD_REQ_ATOM_TYPE) {
278         case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
279                 return kbase_dump_cpu_gpu_time(katom);
280 #ifdef CONFIG_SYNC
281         case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
282                 KBASE_DEBUG_ASSERT(katom->fence != NULL);
283                 katom->event_code = kbase_fence_trigger(katom, katom->event_code == BASE_JD_EVENT_DONE ? 0 : -EFAULT);
284                 /* Release the reference as we don't need it any more */
285                 sync_fence_put(katom->fence);
286                 katom->fence = NULL;
287                 break;
288         case BASE_JD_REQ_SOFT_FENCE_WAIT:
289                 return kbase_fence_wait(katom);
290 #endif                          /* CONFIG_SYNC */
291         case BASE_JD_REQ_SOFT_REPLAY:
292                 return kbase_replay_process(katom);
293         }
294
295         /* Atom is complete */
296         return 0;
297 }
298
299 void kbase_cancel_soft_job(struct kbase_jd_atom *katom)
300 {
301         switch (katom->core_req & BASEP_JD_REQ_ATOM_TYPE) {
302 #ifdef CONFIG_SYNC
303         case BASE_JD_REQ_SOFT_FENCE_WAIT:
304                 kbase_fence_cancel_wait(katom);
305                 break;
306 #endif
307         default:
308                 /* This soft-job doesn't support cancellation! */
309                 KBASE_DEBUG_ASSERT(0);
310         }
311 }
312
313 mali_error kbase_prepare_soft_job(struct kbase_jd_atom *katom)
314 {
315         switch (katom->core_req & BASEP_JD_REQ_ATOM_TYPE) {
316         case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
317                 {
318                         if (0 != (katom->jc & KBASE_CACHE_ALIGNMENT_MASK))
319                                 return MALI_ERROR_FUNCTION_FAILED;
320                 }
321                 break;
322 #ifdef CONFIG_SYNC
323         case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
324                 {
325                         struct base_fence fence;
326                         int fd;
327
328                         if (0 != copy_from_user(&fence, (__user void *)(uintptr_t) katom->jc, sizeof(fence)))
329                                 return MALI_ERROR_FUNCTION_FAILED;
330
331                         fd = kbase_stream_create_fence(fence.basep.stream_fd);
332                         if (fd < 0)
333                                 return MALI_ERROR_FUNCTION_FAILED;
334
335                         katom->fence = sync_fence_fdget(fd);
336
337                         if (katom->fence == NULL) {
338                                 /* The only way the fence can be NULL is if userspace closed it for us.
339                                  * So we don't need to clear it up */
340                                 return MALI_ERROR_FUNCTION_FAILED;
341                         }
342                         fence.basep.fd = fd;
343                         if (0 != copy_to_user((__user void *)(uintptr_t) katom->jc, &fence, sizeof(fence))) {
344                                 katom->fence = NULL;
345                                 sys_close(fd);
346                                 return MALI_ERROR_FUNCTION_FAILED;
347                         }
348                 }
349                 break;
350         case BASE_JD_REQ_SOFT_FENCE_WAIT:
351                 {
352                         struct base_fence fence;
353
354                         if (0 != copy_from_user(&fence, (__user void *)(uintptr_t) katom->jc, sizeof(fence)))
355                                 return MALI_ERROR_FUNCTION_FAILED;
356
357                         /* Get a reference to the fence object */
358                         katom->fence = sync_fence_fdget(fence.basep.fd);
359                         if (katom->fence == NULL)
360                                 return MALI_ERROR_FUNCTION_FAILED;
361                 }
362                 break;
363 #endif                          /* CONFIG_SYNC */
364         case BASE_JD_REQ_SOFT_REPLAY:
365                 break;
366         default:
367                 /* Unsupported soft-job */
368                 return MALI_ERROR_FUNCTION_FAILED;
369         }
370         return MALI_ERROR_NONE;
371 }
372
373 void kbase_finish_soft_job(struct kbase_jd_atom *katom)
374 {
375         switch (katom->core_req & BASEP_JD_REQ_ATOM_TYPE) {
376         case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
377                 /* Nothing to do */
378                 break;
379 #ifdef CONFIG_SYNC
380         case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
381                 if (katom->fence) {
382                         /* The fence has not yet been signalled, so we do it now */
383                         kbase_fence_trigger(katom, katom->event_code == BASE_JD_EVENT_DONE ? 0 : -EFAULT);
384                         sync_fence_put(katom->fence);
385                         katom->fence = NULL;
386                 }
387                 break;
388         case BASE_JD_REQ_SOFT_FENCE_WAIT:
389                 /* Release the reference to the fence object */
390                 sync_fence_put(katom->fence);
391                 katom->fence = NULL;
392                 break;
393 #endif                          /* CONFIG_SYNC */
394         }
395 }
396
397 void kbase_resume_suspended_soft_jobs(struct kbase_device *kbdev)
398 {
399         LIST_HEAD(local_suspended_soft_jobs);
400         struct kbase_jd_atom *tmp_iter;
401         struct kbase_jd_atom *katom_iter;
402         struct kbasep_js_device_data *js_devdata;
403         mali_bool resched = MALI_FALSE;
404
405         KBASE_DEBUG_ASSERT(kbdev);
406
407         js_devdata = &kbdev->js_data;
408
409         /* Move out the entire list */
410         mutex_lock(&js_devdata->runpool_mutex);
411         list_splice_init(&js_devdata->suspended_soft_jobs_list, &local_suspended_soft_jobs);
412         mutex_unlock(&js_devdata->runpool_mutex);
413
414         /* Each atom must be detached from the list and ran separately - it could
415          * be re-added to the old list, but this is unlikely */
416         list_for_each_entry_safe(katom_iter, tmp_iter, &local_suspended_soft_jobs, dep_item[1])
417         {
418                 struct kbase_context *kctx = katom_iter->kctx;
419
420                 mutex_lock(&kctx->jctx.lock);
421
422                 /* Remove from the global list */
423                 list_del(&katom_iter->dep_item[1]);
424                 /* Remove from the context's list of waiting soft jobs */
425                 list_del(&katom_iter->dep_item[0]);
426
427                 if (kbase_process_soft_job(katom_iter) == 0) {
428                         kbase_finish_soft_job(katom_iter);
429                         resched |= jd_done_nolock(katom_iter);
430                 } else {
431                         /* The job has not completed */
432                         KBASE_DEBUG_ASSERT((katom_iter->core_req & BASEP_JD_REQ_ATOM_TYPE)
433                                         != BASE_JD_REQ_SOFT_REPLAY);
434                         list_add_tail(&katom_iter->dep_item[0], &kctx->waiting_soft_jobs);
435                 }
436
437                 mutex_unlock(&kctx->jctx.lock);
438         }
439
440         if (resched)
441                 kbasep_js_try_schedule_head_ctx(kbdev);
442 }