Revert "MALI: rockchip: upgrade midgard DDK to r14p0-01rel0"
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / midgard / mali_kbase_dma_fence.c
1 /*
2  *
3  * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * A copy of the licence is included with the program, and can also be obtained
11  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12  * Boston, MA  02110-1301, USA.
13  *
14  */
15
16
17
18
19 /* Include mali_kbase_dma_fence.h before checking for CONFIG_MALI_DMA_FENCE as
20  * it will be set there.
21  */
22 #include "mali_kbase_dma_fence.h"
23
24 #include <linux/atomic.h>
25 #include <linux/fence.h>
26 #include <linux/list.h>
27 #include <linux/lockdep.h>
28 #include <linux/mutex.h>
29 #include <linux/reservation.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <linux/workqueue.h>
33 #include <linux/ww_mutex.h>
34
35 #include <mali_kbase.h>
36
37
38 /* Spin lock protecting all Mali fences as fence->lock. */
39 static DEFINE_SPINLOCK(kbase_dma_fence_lock);
40
41
42 static void
43 kbase_dma_fence_waiters_add(struct kbase_jd_atom *katom)
44 {
45         struct kbase_context *kctx = katom->kctx;
46
47         list_add_tail(&katom->queue, &kctx->dma_fence.waiting_resource);
48 }
49
50 void
51 kbase_dma_fence_waiters_remove(struct kbase_jd_atom *katom)
52 {
53         list_del(&katom->queue);
54 }
55
56 static const char *
57 kbase_dma_fence_get_driver_name(struct fence *fence)
58 {
59         return kbase_drv_name;
60 }
61
62 static const char *
63 kbase_dma_fence_get_timeline_name(struct fence *fence)
64 {
65         return kbase_timeline_name;
66 }
67
68 static bool
69 kbase_dma_fence_enable_signaling(struct fence *fence)
70 {
71         /* If in the future we need to add code here remember to
72          * to get a reference to the fence and release it when signaling
73          * as stated in fence.h
74          */
75         return true;
76 }
77
78 static void
79 kbase_dma_fence_fence_value_str(struct fence *fence, char *str, int size)
80 {
81         snprintf(str, size, "%u", fence->seqno);
82 }
83
84 static const struct fence_ops kbase_dma_fence_ops = {
85         .get_driver_name = kbase_dma_fence_get_driver_name,
86         .get_timeline_name = kbase_dma_fence_get_timeline_name,
87         .enable_signaling = kbase_dma_fence_enable_signaling,
88         /* Use the default wait */
89         .wait = fence_default_wait,
90         .fence_value_str = kbase_dma_fence_fence_value_str,
91 };
92
93 static struct fence *
94 kbase_dma_fence_new(unsigned int context, unsigned int seqno)
95 {
96         struct fence *fence;
97
98         fence = kzalloc(sizeof(*fence), GFP_KERNEL);
99         if (!fence)
100                 return NULL;
101
102         fence_init(fence,
103                    &kbase_dma_fence_ops,
104                    &kbase_dma_fence_lock,
105                    context,
106                    seqno);
107
108         return fence;
109 }
110
111 static int
112 kbase_dma_fence_lock_reservations(struct kbase_dma_fence_resv_info *info,
113                                   struct ww_acquire_ctx *ctx)
114 {
115         struct reservation_object *content_res = NULL;
116         unsigned int content_res_idx = 0;
117         unsigned int r;
118         int err = 0;
119
120         ww_acquire_init(ctx, &reservation_ww_class);
121
122 retry:
123         for (r = 0; r < info->dma_fence_resv_count; r++) {
124                 if (info->resv_objs[r] == content_res) {
125                         content_res = NULL;
126                         continue;
127                 }
128
129                 err = ww_mutex_lock(&info->resv_objs[r]->lock, ctx);
130                 if (err)
131                         goto error;
132         }
133
134         ww_acquire_done(ctx);
135         return err;
136
137 error:
138         content_res_idx = r;
139
140         /* Unlock the locked one ones */
141         while (r--)
142                 ww_mutex_unlock(&info->resv_objs[r]->lock);
143
144         if (content_res)
145                 ww_mutex_unlock(&content_res->lock);
146
147         /* If we deadlock try with lock_slow and retry */
148         if (err == -EDEADLK) {
149                 content_res = info->resv_objs[content_res_idx];
150                 ww_mutex_lock_slow(&content_res->lock, ctx);
151                 goto retry;
152         }
153
154         /* If we are here the function failed */
155         ww_acquire_fini(ctx);
156         return err;
157 }
158
159 static void
160 kbase_dma_fence_unlock_reservations(struct kbase_dma_fence_resv_info *info,
161                                     struct ww_acquire_ctx *ctx)
162 {
163         unsigned int r;
164
165         for (r = 0; r < info->dma_fence_resv_count; r++)
166                 ww_mutex_unlock(&info->resv_objs[r]->lock);
167         ww_acquire_fini(ctx);
168 }
169
170 /**
171  * kbase_dma_fence_free_callbacks - Free dma-fence callbacks on a katom
172  * @katom: Pointer to katom
173  *
174  * This function will free all fence callbacks on the katom's list of
175  * callbacks. Callbacks that have not yet been called, because their fence
176  * hasn't yet signaled, will first be removed from the fence.
177  *
178  * Locking: katom->dma_fence.callbacks list assumes jctx.lock is held.
179  */
180 static void
181 kbase_dma_fence_free_callbacks(struct kbase_jd_atom *katom)
182 {
183         struct kbase_dma_fence_cb *cb, *tmp;
184
185         lockdep_assert_held(&katom->kctx->jctx.lock);
186
187         /* Clean up and free callbacks. */
188         list_for_each_entry_safe(cb, tmp, &katom->dma_fence.callbacks, node) {
189                 bool ret;
190
191                 /* Cancel callbacks that hasn't been called yet. */
192                 ret = fence_remove_callback(cb->fence, &cb->fence_cb);
193                 if (ret) {
194                         /* Fence had not signaled, clean up after
195                          * canceling.
196                          */
197                         atomic_dec(&katom->dma_fence.dep_count);
198                 }
199
200                 /*
201                  * Release the reference taken in
202                  * kbase_dma_fence_add_callback().
203                  */
204                 fence_put(cb->fence);
205                 list_del(&cb->node);
206                 kfree(cb);
207         }
208 }
209
210 /**
211  * kbase_dma_fence_cancel_atom() - Cancels waiting on an atom
212  * @katom:      Katom to cancel
213  *
214  * Locking: katom->dma_fence.callbacks list assumes jctx.lock is held.
215  */
216 static void
217 kbase_dma_fence_cancel_atom(struct kbase_jd_atom *katom)
218 {
219         lockdep_assert_held(&katom->kctx->jctx.lock);
220
221         /* Cancel callbacks and clean up. */
222         kbase_dma_fence_free_callbacks(katom);
223
224         KBASE_DEBUG_ASSERT(atomic_read(&katom->dma_fence.dep_count) == 0);
225
226         /* Mark the atom as handled in case all fences signaled just before
227          * canceling the callbacks and the worker was queued.
228          */
229         atomic_set(&katom->dma_fence.dep_count, -1);
230
231         /* Prevent job_done_nolock from being called twice on an atom when
232          * there is a race between job completion and cancellation.
233          */
234
235         if (katom->status == KBASE_JD_ATOM_STATE_QUEUED) {
236                 /* Wait was cancelled - zap the atom */
237                 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
238                 if (jd_done_nolock(katom, NULL))
239                         kbase_js_sched_all(katom->kctx->kbdev);
240         }
241 }
242
243 /**
244  * kbase_dma_fence_work() - Worker thread called when a fence is signaled
245  * @pwork:      work_struct containing a pointer to a katom
246  *
247  * This function will clean and mark all dependencies as satisfied
248  */
249 static void
250 kbase_dma_fence_work(struct work_struct *pwork)
251 {
252         struct kbase_jd_atom *katom;
253         struct kbase_jd_context *ctx;
254
255         katom = container_of(pwork, struct kbase_jd_atom, work);
256         ctx = &katom->kctx->jctx;
257
258         mutex_lock(&ctx->lock);
259         if (atomic_read(&katom->dma_fence.dep_count) != 0)
260                 goto out;
261
262         atomic_set(&katom->dma_fence.dep_count, -1);
263
264         /* Remove atom from list of dma-fence waiting atoms. */
265         kbase_dma_fence_waiters_remove(katom);
266         /* Cleanup callbacks. */
267         kbase_dma_fence_free_callbacks(katom);
268         /* Queue atom on GPU. */
269         kbase_jd_dep_clear_locked(katom);
270
271 out:
272         mutex_unlock(&ctx->lock);
273 }
274
275 /**
276  * kbase_dma_fence_add_callback() - Add callback on @fence to block @katom
277  * @katom: Pointer to katom that will be blocked by @fence
278  * @fence: Pointer to fence on which to set up the callback
279  * @callback: Pointer to function to be called when fence is signaled
280  *
281  * Caller needs to hold a reference to @fence when calling this function, and
282  * the caller is responsible for releasing that reference.  An additional
283  * reference to @fence will be taken when the callback was successfully set up
284  * and @fence needs to be kept valid until the callback has been called and
285  * cleanup have been done.
286  *
287  * Return: 0 on success: fence was either already signalled, or callback was
288  * set up. Negative error code is returned on error.
289  */
290 static int
291 kbase_dma_fence_add_callback(struct kbase_jd_atom *katom,
292                              struct fence *fence,
293                              fence_func_t callback)
294 {
295         int err = 0;
296         struct kbase_dma_fence_cb *kbase_fence_cb;
297
298         kbase_fence_cb = kmalloc(sizeof(*kbase_fence_cb), GFP_KERNEL);
299         if (!kbase_fence_cb)
300                 return -ENOMEM;
301
302         kbase_fence_cb->fence = fence;
303         kbase_fence_cb->katom = katom;
304         INIT_LIST_HEAD(&kbase_fence_cb->node);
305
306         err = fence_add_callback(fence, &kbase_fence_cb->fence_cb, callback);
307         if (err == -ENOENT) {
308                 /* Fence signaled, clear the error and return */
309                 err = 0;
310                 kbase_fence_cb->fence = NULL;
311                 kfree(kbase_fence_cb);
312         } else if (err) {
313                 kfree(kbase_fence_cb);
314         } else {
315                 /*
316                  * Get reference to fence that will be kept until callback gets
317                  * cleaned up in kbase_dma_fence_free_callbacks().
318                  */
319                 fence_get(fence);
320                 atomic_inc(&katom->dma_fence.dep_count);
321                 /* Add callback to katom's list of callbacks */
322                 list_add(&kbase_fence_cb->node, &katom->dma_fence.callbacks);
323         }
324
325         return err;
326 }
327
328 static void
329 kbase_dma_fence_cb(struct fence *fence, struct fence_cb *cb)
330 {
331         struct kbase_dma_fence_cb *kcb = container_of(cb,
332                                 struct kbase_dma_fence_cb,
333                                 fence_cb);
334         struct kbase_jd_atom *katom = kcb->katom;
335         struct kbase_context *kctx = katom->kctx;
336
337         /* If the atom is zapped dep_count will be forced to a negative number
338          * preventing this callback from ever scheduling work. Which in turn
339          * would reschedule the atom.
340          */
341         if (atomic_dec_and_test(&katom->dma_fence.dep_count)) {
342                 bool ret;
343
344                 INIT_WORK(&katom->work, kbase_dma_fence_work);
345                 ret = queue_work(kctx->dma_fence.wq, &katom->work);
346                 /* Warn if work was already queued, that should not happen. */
347                 WARN_ON(!ret);
348         }
349 }
350
351 static int
352 kbase_dma_fence_add_reservation_callback(struct kbase_jd_atom *katom,
353                                          struct reservation_object *resv,
354                                          bool exclusive)
355 {
356         struct fence *excl_fence = NULL;
357         struct fence **shared_fences = NULL;
358         unsigned int shared_count = 0;
359         int err, i;
360
361         err = reservation_object_get_fences_rcu(resv,
362                                                 &excl_fence,
363                                                 &shared_count,
364                                                 &shared_fences);
365         if (err)
366                 return err;
367
368         if (excl_fence) {
369                 err = kbase_dma_fence_add_callback(katom,
370                                                    excl_fence,
371                                                    kbase_dma_fence_cb);
372
373                 /* Release our reference, taken by reservation_object_get_fences_rcu(),
374                  * to the fence. We have set up our callback (if that was possible),
375                  * and it's the fence's owner is responsible for singling the fence
376                  * before allowing it to disappear.
377                  */
378                 fence_put(excl_fence);
379
380                 if (err)
381                         goto out;
382         }
383
384         if (exclusive) {
385                 for (i = 0; i < shared_count; i++) {
386                         err = kbase_dma_fence_add_callback(katom,
387                                                            shared_fences[i],
388                                                            kbase_dma_fence_cb);
389                         if (err)
390                                 goto out;
391                 }
392         }
393
394         /* Release all our references to the shared fences, taken by
395          * reservation_object_get_fences_rcu(). We have set up our callback (if
396          * that was possible), and it's the fence's owner is responsible for
397          * signaling the fence before allowing it to disappear.
398          */
399 out:
400         for (i = 0; i < shared_count; i++)
401                 fence_put(shared_fences[i]);
402         kfree(shared_fences);
403
404         if (err) {
405                 /*
406                  * On error, cancel and clean up all callbacks that was set up
407                  * before the error.
408                  */
409                 kbase_dma_fence_free_callbacks(katom);
410         }
411
412         return err;
413 }
414
415 void kbase_dma_fence_add_reservation(struct reservation_object *resv,
416                                      struct kbase_dma_fence_resv_info *info,
417                                      bool exclusive)
418 {
419         unsigned int i;
420
421         for (i = 0; i < info->dma_fence_resv_count; i++) {
422                 /* Duplicate resource, ignore */
423                 if (info->resv_objs[i] == resv)
424                         return;
425         }
426
427         info->resv_objs[info->dma_fence_resv_count] = resv;
428         if (exclusive)
429                 set_bit(info->dma_fence_resv_count,
430                         info->dma_fence_excl_bitmap);
431         (info->dma_fence_resv_count)++;
432 }
433
434 int kbase_dma_fence_wait(struct kbase_jd_atom *katom,
435                          struct kbase_dma_fence_resv_info *info)
436 {
437         int err, i;
438         struct fence *fence;
439         struct ww_acquire_ctx ww_ctx;
440
441         lockdep_assert_held(&katom->kctx->jctx.lock);
442
443         fence = kbase_dma_fence_new(katom->dma_fence.context,
444                                     atomic_inc_return(&katom->dma_fence.seqno));
445         if (!fence) {
446                 err = -ENOMEM;
447                 dev_err(katom->kctx->kbdev->dev,
448                         "Error %d creating fence.\n", err);
449                 return err;
450         }
451
452         katom->dma_fence.fence = fence;
453         atomic_set(&katom->dma_fence.dep_count, 1);
454
455         err = kbase_dma_fence_lock_reservations(info, &ww_ctx);
456         if (err) {
457                 dev_err(katom->kctx->kbdev->dev,
458                         "Error %d locking reservations.\n", err);
459                 atomic_set(&katom->dma_fence.dep_count, -1);
460                 fence_put(fence);
461                 return err;
462         }
463
464         for (i = 0; i < info->dma_fence_resv_count; i++) {
465                 struct reservation_object *obj = info->resv_objs[i];
466
467                 if (!test_bit(i, info->dma_fence_excl_bitmap)) {
468                         err = reservation_object_reserve_shared(obj);
469                         if (err) {
470                                 dev_err(katom->kctx->kbdev->dev,
471                                         "Error %d reserving space for shared fence.\n", err);
472                                 goto end;
473                         }
474
475                         err = kbase_dma_fence_add_reservation_callback(katom, obj, false);
476                         if (err) {
477                                 dev_err(katom->kctx->kbdev->dev,
478                                         "Error %d adding reservation to callback.\n", err);
479                                 goto end;
480                         }
481
482                         reservation_object_add_shared_fence(obj, katom->dma_fence.fence);
483                 } else {
484                         err = kbase_dma_fence_add_reservation_callback(katom, obj, true);
485                         if (err) {
486                                 dev_err(katom->kctx->kbdev->dev,
487                                         "Error %d adding reservation to callback.\n", err);
488                                 goto end;
489                         }
490
491                         reservation_object_add_excl_fence(obj, katom->dma_fence.fence);
492                 }
493         }
494
495 end:
496         kbase_dma_fence_unlock_reservations(info, &ww_ctx);
497
498         if (likely(!err)) {
499                 /* Test if the callbacks are already triggered */
500                 if (atomic_dec_and_test(&katom->dma_fence.dep_count)) {
501                         atomic_set(&katom->dma_fence.dep_count, -1);
502                         kbase_dma_fence_free_callbacks(katom);
503                 } else {
504                         /* Add katom to the list of dma-buf fence waiting atoms
505                          * only if it is still waiting.
506                          */
507                         kbase_dma_fence_waiters_add(katom);
508                 }
509         } else {
510                 /* There was an error, cancel callbacks, set dep_count to -1 to
511                  * indicate that the atom has been handled (the caller will
512                  * kill it for us), signal the fence, free callbacks and the
513                  * fence.
514                  */
515                 kbase_dma_fence_free_callbacks(katom);
516                 atomic_set(&katom->dma_fence.dep_count, -1);
517                 kbase_dma_fence_signal(katom);
518         }
519
520         return err;
521 }
522
523 void kbase_dma_fence_cancel_all_atoms(struct kbase_context *kctx)
524 {
525         struct kbase_jd_atom *katom, *katom_tmp;
526
527         list_for_each_entry_safe(katom, katom_tmp,
528                                  &kctx->dma_fence.waiting_resource, queue) {
529                 kbase_dma_fence_waiters_remove(katom);
530                 kbase_dma_fence_cancel_atom(katom);
531         }
532 }
533
534 void kbase_dma_fence_cancel_callbacks(struct kbase_jd_atom *katom)
535 {
536         /* Cancel callbacks and clean up. */
537         kbase_dma_fence_free_callbacks(katom);
538 }
539
540 void kbase_dma_fence_signal(struct kbase_jd_atom *katom)
541 {
542         if (!katom->dma_fence.fence)
543                 return;
544
545         KBASE_DEBUG_ASSERT(atomic_read(&katom->dma_fence.dep_count) == -1);
546
547         /* Signal the atom's fence. */
548         fence_signal(katom->dma_fence.fence);
549         fence_put(katom->dma_fence.fence);
550         katom->dma_fence.fence = NULL;
551
552         kbase_dma_fence_free_callbacks(katom);
553 }
554
555 void kbase_dma_fence_term(struct kbase_context *kctx)
556 {
557         destroy_workqueue(kctx->dma_fence.wq);
558         kctx->dma_fence.wq = NULL;
559 }
560
561 int kbase_dma_fence_init(struct kbase_context *kctx)
562 {
563         INIT_LIST_HEAD(&kctx->dma_fence.waiting_resource);
564
565         kctx->dma_fence.wq = alloc_workqueue("mali-fence-%d",
566                                              WQ_UNBOUND, 1, kctx->pid);
567         if (!kctx->dma_fence.wq)
568                 return -ENOMEM;
569
570         return 0;
571 }