3 * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
19 /* Include mali_kbase_dma_fence.h before checking for CONFIG_MALI_DMA_FENCE as
20 * it will be set there.
22 #include "mali_kbase_dma_fence.h"
24 #include <linux/atomic.h>
25 #include <linux/fence.h>
26 #include <linux/list.h>
27 #include <linux/lockdep.h>
28 #include <linux/mutex.h>
29 #include <linux/reservation.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <linux/workqueue.h>
33 #include <linux/ww_mutex.h>
35 #include <mali_kbase.h>
38 /* Spin lock protecting all Mali fences as fence->lock. */
39 static DEFINE_SPINLOCK(kbase_dma_fence_lock);
42 kbase_dma_fence_work(struct work_struct *pwork);
45 kbase_dma_fence_waiters_add(struct kbase_jd_atom *katom)
47 struct kbase_context *kctx = katom->kctx;
49 list_add_tail(&katom->queue, &kctx->dma_fence.waiting_resource);
53 kbase_dma_fence_waiters_remove(struct kbase_jd_atom *katom)
55 list_del(&katom->queue);
59 kbase_dma_fence_get_driver_name(struct fence *fence)
61 return kbase_drv_name;
65 kbase_dma_fence_get_timeline_name(struct fence *fence)
67 return kbase_timeline_name;
71 kbase_dma_fence_enable_signaling(struct fence *fence)
73 /* If in the future we need to add code here remember to
74 * to get a reference to the fence and release it when signaling
75 * as stated in fence.h
81 kbase_dma_fence_fence_value_str(struct fence *fence, char *str, int size)
83 snprintf(str, size, "%u", fence->seqno);
86 static const struct fence_ops kbase_dma_fence_ops = {
87 .get_driver_name = kbase_dma_fence_get_driver_name,
88 .get_timeline_name = kbase_dma_fence_get_timeline_name,
89 .enable_signaling = kbase_dma_fence_enable_signaling,
90 /* Use the default wait */
91 .wait = fence_default_wait,
92 .fence_value_str = kbase_dma_fence_fence_value_str,
96 kbase_dma_fence_new(unsigned int context, unsigned int seqno)
100 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
105 &kbase_dma_fence_ops,
106 &kbase_dma_fence_lock,
114 kbase_dma_fence_lock_reservations(struct kbase_dma_fence_resv_info *info,
115 struct ww_acquire_ctx *ctx)
117 struct reservation_object *content_res = NULL;
118 unsigned int content_res_idx = 0;
122 ww_acquire_init(ctx, &reservation_ww_class);
125 for (r = 0; r < info->dma_fence_resv_count; r++) {
126 if (info->resv_objs[r] == content_res) {
131 err = ww_mutex_lock(&info->resv_objs[r]->lock, ctx);
136 ww_acquire_done(ctx);
142 /* Unlock the locked one ones */
144 ww_mutex_unlock(&info->resv_objs[r]->lock);
147 ww_mutex_unlock(&content_res->lock);
149 /* If we deadlock try with lock_slow and retry */
150 if (err == -EDEADLK) {
151 content_res = info->resv_objs[content_res_idx];
152 ww_mutex_lock_slow(&content_res->lock, ctx);
156 /* If we are here the function failed */
157 ww_acquire_fini(ctx);
162 kbase_dma_fence_unlock_reservations(struct kbase_dma_fence_resv_info *info,
163 struct ww_acquire_ctx *ctx)
167 for (r = 0; r < info->dma_fence_resv_count; r++)
168 ww_mutex_unlock(&info->resv_objs[r]->lock);
169 ww_acquire_fini(ctx);
173 * kbase_dma_fence_queue_work() - Queue work to handle @katom
174 * @katom: Pointer to atom for which to queue work
176 * Queue kbase_dma_fence_work() for @katom to clean up the fence callbacks and
180 kbase_dma_fence_queue_work(struct kbase_jd_atom *katom)
182 struct kbase_context *kctx = katom->kctx;
185 INIT_WORK(&katom->work, kbase_dma_fence_work);
186 ret = queue_work(kctx->dma_fence.wq, &katom->work);
187 /* Warn if work was already queued, that should not happen. */
192 * kbase_dma_fence_free_callbacks - Free dma-fence callbacks on a katom
193 * @katom: Pointer to katom
194 * @queue_worker: Boolean indicating if fence worker is to be queued when
195 * dep_count reaches 0.
197 * This function will free all fence callbacks on the katom's list of
198 * callbacks. Callbacks that have not yet been called, because their fence
199 * hasn't yet signaled, will first be removed from the fence.
201 * Locking: katom->dma_fence.callbacks list assumes jctx.lock is held.
204 kbase_dma_fence_free_callbacks(struct kbase_jd_atom *katom, bool queue_worker)
206 struct kbase_dma_fence_cb *cb, *tmp;
208 lockdep_assert_held(&katom->kctx->jctx.lock);
210 /* Clean up and free callbacks. */
211 list_for_each_entry_safe(cb, tmp, &katom->dma_fence.callbacks, node) {
214 /* Cancel callbacks that hasn't been called yet. */
215 ret = fence_remove_callback(cb->fence, &cb->fence_cb);
219 /* Fence had not signaled, clean up after
222 ret = atomic_dec_return(&katom->dma_fence.dep_count);
224 if (unlikely(queue_worker && ret == 0)) {
226 * dep_count went to zero and queue_worker is
227 * true. Queue the worker to handle the
228 * completion of the katom.
230 kbase_dma_fence_queue_work(katom);
235 * Release the reference taken in
236 * kbase_dma_fence_add_callback().
238 fence_put(cb->fence);
245 * kbase_dma_fence_cancel_atom() - Cancels waiting on an atom
246 * @katom: Katom to cancel
248 * Locking: katom->dma_fence.callbacks list assumes jctx.lock is held.
251 kbase_dma_fence_cancel_atom(struct kbase_jd_atom *katom)
253 lockdep_assert_held(&katom->kctx->jctx.lock);
255 /* Cancel callbacks and clean up. */
256 kbase_dma_fence_free_callbacks(katom, false);
258 KBASE_DEBUG_ASSERT(atomic_read(&katom->dma_fence.dep_count) == 0);
260 /* Mark the atom as handled in case all fences signaled just before
261 * canceling the callbacks and the worker was queued.
263 atomic_set(&katom->dma_fence.dep_count, -1);
265 /* Prevent job_done_nolock from being called twice on an atom when
266 * there is a race between job completion and cancellation.
269 if (katom->status == KBASE_JD_ATOM_STATE_QUEUED) {
270 /* Wait was cancelled - zap the atom */
271 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
272 if (jd_done_nolock(katom, NULL))
273 kbase_js_sched_all(katom->kctx->kbdev);
278 * kbase_dma_fence_work() - Worker thread called when a fence is signaled
279 * @pwork: work_struct containing a pointer to a katom
281 * This function will clean and mark all dependencies as satisfied
284 kbase_dma_fence_work(struct work_struct *pwork)
286 struct kbase_jd_atom *katom;
287 struct kbase_jd_context *ctx;
289 katom = container_of(pwork, struct kbase_jd_atom, work);
290 ctx = &katom->kctx->jctx;
292 mutex_lock(&ctx->lock);
293 if (atomic_read(&katom->dma_fence.dep_count) != 0)
296 atomic_set(&katom->dma_fence.dep_count, -1);
298 /* Remove atom from list of dma-fence waiting atoms. */
299 kbase_dma_fence_waiters_remove(katom);
300 /* Cleanup callbacks. */
301 kbase_dma_fence_free_callbacks(katom, false);
303 * Queue atom on GPU, unless it has already completed due to a failing
304 * dependency. Run jd_done_nolock() on the katom if it is completed.
306 if (unlikely(katom->status == KBASE_JD_ATOM_STATE_COMPLETED))
307 jd_done_nolock(katom, NULL);
309 kbase_jd_dep_clear_locked(katom);
312 mutex_unlock(&ctx->lock);
316 * kbase_dma_fence_add_callback() - Add callback on @fence to block @katom
317 * @katom: Pointer to katom that will be blocked by @fence
318 * @fence: Pointer to fence on which to set up the callback
319 * @callback: Pointer to function to be called when fence is signaled
321 * Caller needs to hold a reference to @fence when calling this function, and
322 * the caller is responsible for releasing that reference. An additional
323 * reference to @fence will be taken when the callback was successfully set up
324 * and @fence needs to be kept valid until the callback has been called and
325 * cleanup have been done.
327 * Return: 0 on success: fence was either already signalled, or callback was
328 * set up. Negative error code is returned on error.
331 kbase_dma_fence_add_callback(struct kbase_jd_atom *katom,
333 fence_func_t callback)
336 struct kbase_dma_fence_cb *kbase_fence_cb;
338 kbase_fence_cb = kmalloc(sizeof(*kbase_fence_cb), GFP_KERNEL);
342 kbase_fence_cb->fence = fence;
343 kbase_fence_cb->katom = katom;
344 INIT_LIST_HEAD(&kbase_fence_cb->node);
346 err = fence_add_callback(fence, &kbase_fence_cb->fence_cb, callback);
347 if (err == -ENOENT) {
348 /* Fence signaled, clear the error and return */
350 kbase_fence_cb->fence = NULL;
351 kfree(kbase_fence_cb);
353 kfree(kbase_fence_cb);
356 * Get reference to fence that will be kept until callback gets
357 * cleaned up in kbase_dma_fence_free_callbacks().
360 atomic_inc(&katom->dma_fence.dep_count);
361 /* Add callback to katom's list of callbacks */
362 list_add(&kbase_fence_cb->node, &katom->dma_fence.callbacks);
369 kbase_dma_fence_cb(struct fence *fence, struct fence_cb *cb)
371 struct kbase_dma_fence_cb *kcb = container_of(cb,
372 struct kbase_dma_fence_cb,
374 struct kbase_jd_atom *katom = kcb->katom;
376 /* If the atom is zapped dep_count will be forced to a negative number
377 * preventing this callback from ever scheduling work. Which in turn
378 * would reschedule the atom.
380 if (atomic_dec_and_test(&katom->dma_fence.dep_count))
381 kbase_dma_fence_queue_work(katom);
385 kbase_dma_fence_add_reservation_callback(struct kbase_jd_atom *katom,
386 struct reservation_object *resv,
389 struct fence *excl_fence = NULL;
390 struct fence **shared_fences = NULL;
391 unsigned int shared_count = 0;
394 err = reservation_object_get_fences_rcu(resv,
402 err = kbase_dma_fence_add_callback(katom,
406 /* Release our reference, taken by reservation_object_get_fences_rcu(),
407 * to the fence. We have set up our callback (if that was possible),
408 * and it's the fence's owner is responsible for singling the fence
409 * before allowing it to disappear.
411 fence_put(excl_fence);
418 for (i = 0; i < shared_count; i++) {
419 err = kbase_dma_fence_add_callback(katom,
427 /* Release all our references to the shared fences, taken by
428 * reservation_object_get_fences_rcu(). We have set up our callback (if
429 * that was possible), and it's the fence's owner is responsible for
430 * signaling the fence before allowing it to disappear.
433 for (i = 0; i < shared_count; i++)
434 fence_put(shared_fences[i]);
435 kfree(shared_fences);
439 * On error, cancel and clean up all callbacks that was set up
442 kbase_dma_fence_free_callbacks(katom, false);
448 void kbase_dma_fence_add_reservation(struct reservation_object *resv,
449 struct kbase_dma_fence_resv_info *info,
454 for (i = 0; i < info->dma_fence_resv_count; i++) {
455 /* Duplicate resource, ignore */
456 if (info->resv_objs[i] == resv)
460 info->resv_objs[info->dma_fence_resv_count] = resv;
462 set_bit(info->dma_fence_resv_count,
463 info->dma_fence_excl_bitmap);
464 (info->dma_fence_resv_count)++;
467 int kbase_dma_fence_wait(struct kbase_jd_atom *katom,
468 struct kbase_dma_fence_resv_info *info)
472 struct ww_acquire_ctx ww_ctx;
474 lockdep_assert_held(&katom->kctx->jctx.lock);
476 fence = kbase_dma_fence_new(katom->dma_fence.context,
477 atomic_inc_return(&katom->dma_fence.seqno));
480 dev_err(katom->kctx->kbdev->dev,
481 "Error %d creating fence.\n", err);
485 katom->dma_fence.fence = fence;
486 atomic_set(&katom->dma_fence.dep_count, 1);
488 err = kbase_dma_fence_lock_reservations(info, &ww_ctx);
490 dev_err(katom->kctx->kbdev->dev,
491 "Error %d locking reservations.\n", err);
492 atomic_set(&katom->dma_fence.dep_count, -1);
497 for (i = 0; i < info->dma_fence_resv_count; i++) {
498 struct reservation_object *obj = info->resv_objs[i];
500 if (!test_bit(i, info->dma_fence_excl_bitmap)) {
501 err = reservation_object_reserve_shared(obj);
503 dev_err(katom->kctx->kbdev->dev,
504 "Error %d reserving space for shared fence.\n", err);
508 err = kbase_dma_fence_add_reservation_callback(katom, obj, false);
510 dev_err(katom->kctx->kbdev->dev,
511 "Error %d adding reservation to callback.\n", err);
515 reservation_object_add_shared_fence(obj, katom->dma_fence.fence);
517 err = kbase_dma_fence_add_reservation_callback(katom, obj, true);
519 dev_err(katom->kctx->kbdev->dev,
520 "Error %d adding reservation to callback.\n", err);
524 reservation_object_add_excl_fence(obj, katom->dma_fence.fence);
529 kbase_dma_fence_unlock_reservations(info, &ww_ctx);
532 /* Test if the callbacks are already triggered */
533 if (atomic_dec_and_test(&katom->dma_fence.dep_count)) {
534 atomic_set(&katom->dma_fence.dep_count, -1);
535 kbase_dma_fence_free_callbacks(katom, false);
537 /* Add katom to the list of dma-buf fence waiting atoms
538 * only if it is still waiting.
540 kbase_dma_fence_waiters_add(katom);
543 /* There was an error, cancel callbacks, set dep_count to -1 to
544 * indicate that the atom has been handled (the caller will
545 * kill it for us), signal the fence, free callbacks and the
548 kbase_dma_fence_free_callbacks(katom, false);
549 atomic_set(&katom->dma_fence.dep_count, -1);
550 kbase_dma_fence_signal(katom);
556 void kbase_dma_fence_cancel_all_atoms(struct kbase_context *kctx)
558 struct list_head *list = &kctx->dma_fence.waiting_resource;
560 while (!list_empty(list)) {
561 struct kbase_jd_atom *katom;
563 katom = list_first_entry(list, struct kbase_jd_atom, queue);
564 kbase_dma_fence_waiters_remove(katom);
565 kbase_dma_fence_cancel_atom(katom);
569 void kbase_dma_fence_cancel_callbacks(struct kbase_jd_atom *katom)
571 /* Cancel callbacks and clean up. */
572 kbase_dma_fence_free_callbacks(katom, true);
575 void kbase_dma_fence_signal(struct kbase_jd_atom *katom)
577 if (!katom->dma_fence.fence)
580 KBASE_DEBUG_ASSERT(atomic_read(&katom->dma_fence.dep_count) == -1);
582 /* Signal the atom's fence. */
583 fence_signal(katom->dma_fence.fence);
584 fence_put(katom->dma_fence.fence);
585 katom->dma_fence.fence = NULL;
587 kbase_dma_fence_free_callbacks(katom, false);
590 void kbase_dma_fence_term(struct kbase_context *kctx)
592 destroy_workqueue(kctx->dma_fence.wq);
593 kctx->dma_fence.wq = NULL;
596 int kbase_dma_fence_init(struct kbase_context *kctx)
598 INIT_LIST_HEAD(&kctx->dma_fence.waiting_resource);
600 kctx->dma_fence.wq = alloc_workqueue("mali-fence-%d",
601 WQ_UNBOUND, 1, kctx->pid);
602 if (!kctx->dma_fence.wq)