MALI: rockchip: upgrade midgard DDK to r14p0-01rel0
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / midgard / mali_kbase_dma_fence.c
1 /*
2  *
3  * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * A copy of the licence is included with the program, and can also be obtained
11  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12  * Boston, MA  02110-1301, USA.
13  *
14  */
15
16
17
18
19 /* Include mali_kbase_dma_fence.h before checking for CONFIG_MALI_DMA_FENCE as
20  * it will be set there.
21  */
22 #include "mali_kbase_dma_fence.h"
23
24 #include <linux/atomic.h>
25 #include <linux/fence.h>
26 #include <linux/list.h>
27 #include <linux/lockdep.h>
28 #include <linux/mutex.h>
29 #include <linux/reservation.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <linux/workqueue.h>
33 #include <linux/ww_mutex.h>
34
35 #include <mali_kbase.h>
36
37
38 /* Spin lock protecting all Mali fences as fence->lock. */
39 static DEFINE_SPINLOCK(kbase_dma_fence_lock);
40
41 static void
42 kbase_dma_fence_work(struct work_struct *pwork);
43
44 static void
45 kbase_dma_fence_waiters_add(struct kbase_jd_atom *katom)
46 {
47         struct kbase_context *kctx = katom->kctx;
48
49         list_add_tail(&katom->queue, &kctx->dma_fence.waiting_resource);
50 }
51
52 void
53 kbase_dma_fence_waiters_remove(struct kbase_jd_atom *katom)
54 {
55         list_del(&katom->queue);
56 }
57
58 static const char *
59 kbase_dma_fence_get_driver_name(struct fence *fence)
60 {
61         return kbase_drv_name;
62 }
63
64 static const char *
65 kbase_dma_fence_get_timeline_name(struct fence *fence)
66 {
67         return kbase_timeline_name;
68 }
69
70 static bool
71 kbase_dma_fence_enable_signaling(struct fence *fence)
72 {
73         /* If in the future we need to add code here remember to
74          * to get a reference to the fence and release it when signaling
75          * as stated in fence.h
76          */
77         return true;
78 }
79
80 static void
81 kbase_dma_fence_fence_value_str(struct fence *fence, char *str, int size)
82 {
83         snprintf(str, size, "%u", fence->seqno);
84 }
85
86 static const struct fence_ops kbase_dma_fence_ops = {
87         .get_driver_name = kbase_dma_fence_get_driver_name,
88         .get_timeline_name = kbase_dma_fence_get_timeline_name,
89         .enable_signaling = kbase_dma_fence_enable_signaling,
90         /* Use the default wait */
91         .wait = fence_default_wait,
92         .fence_value_str = kbase_dma_fence_fence_value_str,
93 };
94
95 static struct fence *
96 kbase_dma_fence_new(unsigned int context, unsigned int seqno)
97 {
98         struct fence *fence;
99
100         fence = kzalloc(sizeof(*fence), GFP_KERNEL);
101         if (!fence)
102                 return NULL;
103
104         fence_init(fence,
105                    &kbase_dma_fence_ops,
106                    &kbase_dma_fence_lock,
107                    context,
108                    seqno);
109
110         return fence;
111 }
112
113 static int
114 kbase_dma_fence_lock_reservations(struct kbase_dma_fence_resv_info *info,
115                                   struct ww_acquire_ctx *ctx)
116 {
117         struct reservation_object *content_res = NULL;
118         unsigned int content_res_idx = 0;
119         unsigned int r;
120         int err = 0;
121
122         ww_acquire_init(ctx, &reservation_ww_class);
123
124 retry:
125         for (r = 0; r < info->dma_fence_resv_count; r++) {
126                 if (info->resv_objs[r] == content_res) {
127                         content_res = NULL;
128                         continue;
129                 }
130
131                 err = ww_mutex_lock(&info->resv_objs[r]->lock, ctx);
132                 if (err)
133                         goto error;
134         }
135
136         ww_acquire_done(ctx);
137         return err;
138
139 error:
140         content_res_idx = r;
141
142         /* Unlock the locked one ones */
143         while (r--)
144                 ww_mutex_unlock(&info->resv_objs[r]->lock);
145
146         if (content_res)
147                 ww_mutex_unlock(&content_res->lock);
148
149         /* If we deadlock try with lock_slow and retry */
150         if (err == -EDEADLK) {
151                 content_res = info->resv_objs[content_res_idx];
152                 ww_mutex_lock_slow(&content_res->lock, ctx);
153                 goto retry;
154         }
155
156         /* If we are here the function failed */
157         ww_acquire_fini(ctx);
158         return err;
159 }
160
161 static void
162 kbase_dma_fence_unlock_reservations(struct kbase_dma_fence_resv_info *info,
163                                     struct ww_acquire_ctx *ctx)
164 {
165         unsigned int r;
166
167         for (r = 0; r < info->dma_fence_resv_count; r++)
168                 ww_mutex_unlock(&info->resv_objs[r]->lock);
169         ww_acquire_fini(ctx);
170 }
171
172 /**
173  * kbase_dma_fence_queue_work() - Queue work to handle @katom
174  * @katom: Pointer to atom for which to queue work
175  *
176  * Queue kbase_dma_fence_work() for @katom to clean up the fence callbacks and
177  * submit the atom.
178  */
179 static void
180 kbase_dma_fence_queue_work(struct kbase_jd_atom *katom)
181 {
182         struct kbase_context *kctx = katom->kctx;
183         bool ret;
184
185         INIT_WORK(&katom->work, kbase_dma_fence_work);
186         ret = queue_work(kctx->dma_fence.wq, &katom->work);
187         /* Warn if work was already queued, that should not happen. */
188         WARN_ON(!ret);
189 }
190
191 /**
192  * kbase_dma_fence_free_callbacks - Free dma-fence callbacks on a katom
193  * @katom: Pointer to katom
194  * @queue_worker: Boolean indicating if fence worker is to be queued when
195  *                dep_count reaches 0.
196  *
197  * This function will free all fence callbacks on the katom's list of
198  * callbacks. Callbacks that have not yet been called, because their fence
199  * hasn't yet signaled, will first be removed from the fence.
200  *
201  * Locking: katom->dma_fence.callbacks list assumes jctx.lock is held.
202  */
203 static void
204 kbase_dma_fence_free_callbacks(struct kbase_jd_atom *katom, bool queue_worker)
205 {
206         struct kbase_dma_fence_cb *cb, *tmp;
207
208         lockdep_assert_held(&katom->kctx->jctx.lock);
209
210         /* Clean up and free callbacks. */
211         list_for_each_entry_safe(cb, tmp, &katom->dma_fence.callbacks, node) {
212                 bool ret;
213
214                 /* Cancel callbacks that hasn't been called yet. */
215                 ret = fence_remove_callback(cb->fence, &cb->fence_cb);
216                 if (ret) {
217                         int ret;
218
219                         /* Fence had not signaled, clean up after
220                          * canceling.
221                          */
222                         ret = atomic_dec_return(&katom->dma_fence.dep_count);
223
224                         if (unlikely(queue_worker && ret == 0)) {
225                                 /*
226                                  * dep_count went to zero and queue_worker is
227                                  * true. Queue the worker to handle the
228                                  * completion of the katom.
229                                  */
230                                 kbase_dma_fence_queue_work(katom);
231                         }
232                 }
233
234                 /*
235                  * Release the reference taken in
236                  * kbase_dma_fence_add_callback().
237                  */
238                 fence_put(cb->fence);
239                 list_del(&cb->node);
240                 kfree(cb);
241         }
242 }
243
244 /**
245  * kbase_dma_fence_cancel_atom() - Cancels waiting on an atom
246  * @katom:      Katom to cancel
247  *
248  * Locking: katom->dma_fence.callbacks list assumes jctx.lock is held.
249  */
250 static void
251 kbase_dma_fence_cancel_atom(struct kbase_jd_atom *katom)
252 {
253         lockdep_assert_held(&katom->kctx->jctx.lock);
254
255         /* Cancel callbacks and clean up. */
256         kbase_dma_fence_free_callbacks(katom, false);
257
258         KBASE_DEBUG_ASSERT(atomic_read(&katom->dma_fence.dep_count) == 0);
259
260         /* Mark the atom as handled in case all fences signaled just before
261          * canceling the callbacks and the worker was queued.
262          */
263         atomic_set(&katom->dma_fence.dep_count, -1);
264
265         /* Prevent job_done_nolock from being called twice on an atom when
266          * there is a race between job completion and cancellation.
267          */
268
269         if (katom->status == KBASE_JD_ATOM_STATE_QUEUED) {
270                 /* Wait was cancelled - zap the atom */
271                 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
272                 if (jd_done_nolock(katom, NULL))
273                         kbase_js_sched_all(katom->kctx->kbdev);
274         }
275 }
276
277 /**
278  * kbase_dma_fence_work() - Worker thread called when a fence is signaled
279  * @pwork:      work_struct containing a pointer to a katom
280  *
281  * This function will clean and mark all dependencies as satisfied
282  */
283 static void
284 kbase_dma_fence_work(struct work_struct *pwork)
285 {
286         struct kbase_jd_atom *katom;
287         struct kbase_jd_context *ctx;
288
289         katom = container_of(pwork, struct kbase_jd_atom, work);
290         ctx = &katom->kctx->jctx;
291
292         mutex_lock(&ctx->lock);
293         if (atomic_read(&katom->dma_fence.dep_count) != 0)
294                 goto out;
295
296         atomic_set(&katom->dma_fence.dep_count, -1);
297
298         /* Remove atom from list of dma-fence waiting atoms. */
299         kbase_dma_fence_waiters_remove(katom);
300         /* Cleanup callbacks. */
301         kbase_dma_fence_free_callbacks(katom, false);
302         /*
303          * Queue atom on GPU, unless it has already completed due to a failing
304          * dependency. Run jd_done_nolock() on the katom if it is completed.
305          */
306         if (unlikely(katom->status == KBASE_JD_ATOM_STATE_COMPLETED))
307                 jd_done_nolock(katom, NULL);
308         else
309                 kbase_jd_dep_clear_locked(katom);
310
311 out:
312         mutex_unlock(&ctx->lock);
313 }
314
315 /**
316  * kbase_dma_fence_add_callback() - Add callback on @fence to block @katom
317  * @katom: Pointer to katom that will be blocked by @fence
318  * @fence: Pointer to fence on which to set up the callback
319  * @callback: Pointer to function to be called when fence is signaled
320  *
321  * Caller needs to hold a reference to @fence when calling this function, and
322  * the caller is responsible for releasing that reference.  An additional
323  * reference to @fence will be taken when the callback was successfully set up
324  * and @fence needs to be kept valid until the callback has been called and
325  * cleanup have been done.
326  *
327  * Return: 0 on success: fence was either already signalled, or callback was
328  * set up. Negative error code is returned on error.
329  */
330 static int
331 kbase_dma_fence_add_callback(struct kbase_jd_atom *katom,
332                              struct fence *fence,
333                              fence_func_t callback)
334 {
335         int err = 0;
336         struct kbase_dma_fence_cb *kbase_fence_cb;
337
338         kbase_fence_cb = kmalloc(sizeof(*kbase_fence_cb), GFP_KERNEL);
339         if (!kbase_fence_cb)
340                 return -ENOMEM;
341
342         kbase_fence_cb->fence = fence;
343         kbase_fence_cb->katom = katom;
344         INIT_LIST_HEAD(&kbase_fence_cb->node);
345
346         err = fence_add_callback(fence, &kbase_fence_cb->fence_cb, callback);
347         if (err == -ENOENT) {
348                 /* Fence signaled, clear the error and return */
349                 err = 0;
350                 kbase_fence_cb->fence = NULL;
351                 kfree(kbase_fence_cb);
352         } else if (err) {
353                 kfree(kbase_fence_cb);
354         } else {
355                 /*
356                  * Get reference to fence that will be kept until callback gets
357                  * cleaned up in kbase_dma_fence_free_callbacks().
358                  */
359                 fence_get(fence);
360                 atomic_inc(&katom->dma_fence.dep_count);
361                 /* Add callback to katom's list of callbacks */
362                 list_add(&kbase_fence_cb->node, &katom->dma_fence.callbacks);
363         }
364
365         return err;
366 }
367
368 static void
369 kbase_dma_fence_cb(struct fence *fence, struct fence_cb *cb)
370 {
371         struct kbase_dma_fence_cb *kcb = container_of(cb,
372                                 struct kbase_dma_fence_cb,
373                                 fence_cb);
374         struct kbase_jd_atom *katom = kcb->katom;
375
376         /* If the atom is zapped dep_count will be forced to a negative number
377          * preventing this callback from ever scheduling work. Which in turn
378          * would reschedule the atom.
379          */
380         if (atomic_dec_and_test(&katom->dma_fence.dep_count))
381                 kbase_dma_fence_queue_work(katom);
382 }
383
384 static int
385 kbase_dma_fence_add_reservation_callback(struct kbase_jd_atom *katom,
386                                          struct reservation_object *resv,
387                                          bool exclusive)
388 {
389         struct fence *excl_fence = NULL;
390         struct fence **shared_fences = NULL;
391         unsigned int shared_count = 0;
392         int err, i;
393
394         err = reservation_object_get_fences_rcu(resv,
395                                                 &excl_fence,
396                                                 &shared_count,
397                                                 &shared_fences);
398         if (err)
399                 return err;
400
401         if (excl_fence) {
402                 err = kbase_dma_fence_add_callback(katom,
403                                                    excl_fence,
404                                                    kbase_dma_fence_cb);
405
406                 /* Release our reference, taken by reservation_object_get_fences_rcu(),
407                  * to the fence. We have set up our callback (if that was possible),
408                  * and it's the fence's owner is responsible for singling the fence
409                  * before allowing it to disappear.
410                  */
411                 fence_put(excl_fence);
412
413                 if (err)
414                         goto out;
415         }
416
417         if (exclusive) {
418                 for (i = 0; i < shared_count; i++) {
419                         err = kbase_dma_fence_add_callback(katom,
420                                                            shared_fences[i],
421                                                            kbase_dma_fence_cb);
422                         if (err)
423                                 goto out;
424                 }
425         }
426
427         /* Release all our references to the shared fences, taken by
428          * reservation_object_get_fences_rcu(). We have set up our callback (if
429          * that was possible), and it's the fence's owner is responsible for
430          * signaling the fence before allowing it to disappear.
431          */
432 out:
433         for (i = 0; i < shared_count; i++)
434                 fence_put(shared_fences[i]);
435         kfree(shared_fences);
436
437         if (err) {
438                 /*
439                  * On error, cancel and clean up all callbacks that was set up
440                  * before the error.
441                  */
442                 kbase_dma_fence_free_callbacks(katom, false);
443         }
444
445         return err;
446 }
447
448 void kbase_dma_fence_add_reservation(struct reservation_object *resv,
449                                      struct kbase_dma_fence_resv_info *info,
450                                      bool exclusive)
451 {
452         unsigned int i;
453
454         for (i = 0; i < info->dma_fence_resv_count; i++) {
455                 /* Duplicate resource, ignore */
456                 if (info->resv_objs[i] == resv)
457                         return;
458         }
459
460         info->resv_objs[info->dma_fence_resv_count] = resv;
461         if (exclusive)
462                 set_bit(info->dma_fence_resv_count,
463                         info->dma_fence_excl_bitmap);
464         (info->dma_fence_resv_count)++;
465 }
466
467 int kbase_dma_fence_wait(struct kbase_jd_atom *katom,
468                          struct kbase_dma_fence_resv_info *info)
469 {
470         int err, i;
471         struct fence *fence;
472         struct ww_acquire_ctx ww_ctx;
473
474         lockdep_assert_held(&katom->kctx->jctx.lock);
475
476         fence = kbase_dma_fence_new(katom->dma_fence.context,
477                                     atomic_inc_return(&katom->dma_fence.seqno));
478         if (!fence) {
479                 err = -ENOMEM;
480                 dev_err(katom->kctx->kbdev->dev,
481                         "Error %d creating fence.\n", err);
482                 return err;
483         }
484
485         katom->dma_fence.fence = fence;
486         atomic_set(&katom->dma_fence.dep_count, 1);
487
488         err = kbase_dma_fence_lock_reservations(info, &ww_ctx);
489         if (err) {
490                 dev_err(katom->kctx->kbdev->dev,
491                         "Error %d locking reservations.\n", err);
492                 atomic_set(&katom->dma_fence.dep_count, -1);
493                 fence_put(fence);
494                 return err;
495         }
496
497         for (i = 0; i < info->dma_fence_resv_count; i++) {
498                 struct reservation_object *obj = info->resv_objs[i];
499
500                 if (!test_bit(i, info->dma_fence_excl_bitmap)) {
501                         err = reservation_object_reserve_shared(obj);
502                         if (err) {
503                                 dev_err(katom->kctx->kbdev->dev,
504                                         "Error %d reserving space for shared fence.\n", err);
505                                 goto end;
506                         }
507
508                         err = kbase_dma_fence_add_reservation_callback(katom, obj, false);
509                         if (err) {
510                                 dev_err(katom->kctx->kbdev->dev,
511                                         "Error %d adding reservation to callback.\n", err);
512                                 goto end;
513                         }
514
515                         reservation_object_add_shared_fence(obj, katom->dma_fence.fence);
516                 } else {
517                         err = kbase_dma_fence_add_reservation_callback(katom, obj, true);
518                         if (err) {
519                                 dev_err(katom->kctx->kbdev->dev,
520                                         "Error %d adding reservation to callback.\n", err);
521                                 goto end;
522                         }
523
524                         reservation_object_add_excl_fence(obj, katom->dma_fence.fence);
525                 }
526         }
527
528 end:
529         kbase_dma_fence_unlock_reservations(info, &ww_ctx);
530
531         if (likely(!err)) {
532                 /* Test if the callbacks are already triggered */
533                 if (atomic_dec_and_test(&katom->dma_fence.dep_count)) {
534                         atomic_set(&katom->dma_fence.dep_count, -1);
535                         kbase_dma_fence_free_callbacks(katom, false);
536                 } else {
537                         /* Add katom to the list of dma-buf fence waiting atoms
538                          * only if it is still waiting.
539                          */
540                         kbase_dma_fence_waiters_add(katom);
541                 }
542         } else {
543                 /* There was an error, cancel callbacks, set dep_count to -1 to
544                  * indicate that the atom has been handled (the caller will
545                  * kill it for us), signal the fence, free callbacks and the
546                  * fence.
547                  */
548                 kbase_dma_fence_free_callbacks(katom, false);
549                 atomic_set(&katom->dma_fence.dep_count, -1);
550                 kbase_dma_fence_signal(katom);
551         }
552
553         return err;
554 }
555
556 void kbase_dma_fence_cancel_all_atoms(struct kbase_context *kctx)
557 {
558         struct list_head *list = &kctx->dma_fence.waiting_resource;
559
560         while (!list_empty(list)) {
561                 struct kbase_jd_atom *katom;
562
563                 katom = list_first_entry(list, struct kbase_jd_atom, queue);
564                 kbase_dma_fence_waiters_remove(katom);
565                 kbase_dma_fence_cancel_atom(katom);
566         }
567 }
568
569 void kbase_dma_fence_cancel_callbacks(struct kbase_jd_atom *katom)
570 {
571         /* Cancel callbacks and clean up. */
572         kbase_dma_fence_free_callbacks(katom, true);
573 }
574
575 void kbase_dma_fence_signal(struct kbase_jd_atom *katom)
576 {
577         if (!katom->dma_fence.fence)
578                 return;
579
580         KBASE_DEBUG_ASSERT(atomic_read(&katom->dma_fence.dep_count) == -1);
581
582         /* Signal the atom's fence. */
583         fence_signal(katom->dma_fence.fence);
584         fence_put(katom->dma_fence.fence);
585         katom->dma_fence.fence = NULL;
586
587         kbase_dma_fence_free_callbacks(katom, false);
588 }
589
590 void kbase_dma_fence_term(struct kbase_context *kctx)
591 {
592         destroy_workqueue(kctx->dma_fence.wq);
593         kctx->dma_fence.wq = NULL;
594 }
595
596 int kbase_dma_fence_init(struct kbase_context *kctx)
597 {
598         INIT_LIST_HEAD(&kctx->dma_fence.waiting_resource);
599
600         kctx->dma_fence.wq = alloc_workqueue("mali-fence-%d",
601                                              WQ_UNBOUND, 1, kctx->pid);
602         if (!kctx->dma_fence.wq)
603                 return -ENOMEM;
604
605         return 0;
606 }