3 * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
19 /* Include mali_kbase_dma_fence.h before checking for CONFIG_MALI_DMA_FENCE as
20 * it will be set there.
22 #include "mali_kbase_dma_fence.h"
24 #include <linux/atomic.h>
25 #include <linux/fence.h>
26 #include <linux/list.h>
27 #include <linux/lockdep.h>
28 #include <linux/mutex.h>
29 #include <linux/reservation.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <linux/workqueue.h>
33 #include <linux/ww_mutex.h>
35 #include <mali_kbase.h>
38 /* Spin lock protecting all Mali fences as fence->lock. */
39 static DEFINE_SPINLOCK(kbase_dma_fence_lock);
43 kbase_dma_fence_waiters_add(struct kbase_jd_atom *katom)
45 struct kbase_context *kctx = katom->kctx;
47 list_add_tail(&katom->queue, &kctx->dma_fence.waiting_resource);
51 kbase_dma_fence_waiters_remove(struct kbase_jd_atom *katom)
53 list_del(&katom->queue);
57 kbase_dma_fence_get_driver_name(struct fence *fence)
59 return kbase_drv_name;
63 kbase_dma_fence_get_timeline_name(struct fence *fence)
65 return kbase_timeline_name;
69 kbase_dma_fence_enable_signaling(struct fence *fence)
71 /* If in the future we need to add code here remember to
72 * to get a reference to the fence and release it when signaling
73 * as stated in fence.h
79 kbase_dma_fence_fence_value_str(struct fence *fence, char *str, int size)
81 snprintf(str, size, "%u", fence->seqno);
84 static const struct fence_ops kbase_dma_fence_ops = {
85 .get_driver_name = kbase_dma_fence_get_driver_name,
86 .get_timeline_name = kbase_dma_fence_get_timeline_name,
87 .enable_signaling = kbase_dma_fence_enable_signaling,
88 /* Use the default wait */
89 .wait = fence_default_wait,
90 .fence_value_str = kbase_dma_fence_fence_value_str,
94 kbase_dma_fence_new(unsigned int context, unsigned int seqno)
98 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
103 &kbase_dma_fence_ops,
104 &kbase_dma_fence_lock,
112 kbase_dma_fence_lock_reservations(struct kbase_dma_fence_resv_info *info,
113 struct ww_acquire_ctx *ctx)
115 struct reservation_object *content_res = NULL;
116 unsigned int content_res_idx = 0;
120 ww_acquire_init(ctx, &reservation_ww_class);
123 for (r = 0; r < info->dma_fence_resv_count; r++) {
124 if (info->resv_objs[r] == content_res) {
129 err = ww_mutex_lock(&info->resv_objs[r]->lock, ctx);
134 ww_acquire_done(ctx);
140 /* Unlock the locked one ones */
142 ww_mutex_unlock(&info->resv_objs[r]->lock);
145 ww_mutex_unlock(&content_res->lock);
147 /* If we deadlock try with lock_slow and retry */
148 if (err == -EDEADLK) {
149 content_res = info->resv_objs[content_res_idx];
150 ww_mutex_lock_slow(&content_res->lock, ctx);
154 /* If we are here the function failed */
155 ww_acquire_fini(ctx);
160 kbase_dma_fence_unlock_reservations(struct kbase_dma_fence_resv_info *info,
161 struct ww_acquire_ctx *ctx)
165 for (r = 0; r < info->dma_fence_resv_count; r++)
166 ww_mutex_unlock(&info->resv_objs[r]->lock);
167 ww_acquire_fini(ctx);
171 * kbase_dma_fence_free_callbacks - Free dma-fence callbacks on a katom
172 * @katom: Pointer to katom
174 * This function will free all fence callbacks on the katom's list of
175 * callbacks. Callbacks that have not yet been called, because their fence
176 * hasn't yet signaled, will first be removed from the fence.
178 * Locking: katom->dma_fence.callbacks list assumes jctx.lock is held.
181 kbase_dma_fence_free_callbacks(struct kbase_jd_atom *katom)
183 struct kbase_dma_fence_cb *cb, *tmp;
185 lockdep_assert_held(&katom->kctx->jctx.lock);
187 /* Clean up and free callbacks. */
188 list_for_each_entry_safe(cb, tmp, &katom->dma_fence.callbacks, node) {
191 /* Cancel callbacks that hasn't been called yet. */
192 ret = fence_remove_callback(cb->fence, &cb->fence_cb);
194 /* Fence had not signaled, clean up after
197 atomic_dec(&katom->dma_fence.dep_count);
201 * Release the reference taken in
202 * kbase_dma_fence_add_callback().
204 fence_put(cb->fence);
211 * kbase_dma_fence_cancel_atom() - Cancels waiting on an atom
212 * @katom: Katom to cancel
214 * Locking: katom->dma_fence.callbacks list assumes jctx.lock is held.
217 kbase_dma_fence_cancel_atom(struct kbase_jd_atom *katom)
219 lockdep_assert_held(&katom->kctx->jctx.lock);
221 /* Cancel callbacks and clean up. */
222 kbase_dma_fence_free_callbacks(katom);
224 KBASE_DEBUG_ASSERT(atomic_read(&katom->dma_fence.dep_count) == 0);
226 /* Mark the atom as handled in case all fences signaled just before
227 * canceling the callbacks and the worker was queued.
229 atomic_set(&katom->dma_fence.dep_count, -1);
231 /* Prevent job_done_nolock from being called twice on an atom when
232 * there is a race between job completion and cancellation.
235 if (katom->status == KBASE_JD_ATOM_STATE_QUEUED) {
236 /* Wait was cancelled - zap the atom */
237 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
238 if (jd_done_nolock(katom, NULL))
239 kbase_js_sched_all(katom->kctx->kbdev);
244 * kbase_dma_fence_work() - Worker thread called when a fence is signaled
245 * @pwork: work_struct containing a pointer to a katom
247 * This function will clean and mark all dependencies as satisfied
250 kbase_dma_fence_work(struct work_struct *pwork)
252 struct kbase_jd_atom *katom;
253 struct kbase_jd_context *ctx;
255 katom = container_of(pwork, struct kbase_jd_atom, work);
256 ctx = &katom->kctx->jctx;
258 mutex_lock(&ctx->lock);
259 if (atomic_read(&katom->dma_fence.dep_count) != 0)
262 atomic_set(&katom->dma_fence.dep_count, -1);
264 /* Remove atom from list of dma-fence waiting atoms. */
265 kbase_dma_fence_waiters_remove(katom);
266 /* Cleanup callbacks. */
267 kbase_dma_fence_free_callbacks(katom);
268 /* Queue atom on GPU. */
269 kbase_jd_dep_clear_locked(katom);
272 mutex_unlock(&ctx->lock);
276 * kbase_dma_fence_add_callback() - Add callback on @fence to block @katom
277 * @katom: Pointer to katom that will be blocked by @fence
278 * @fence: Pointer to fence on which to set up the callback
279 * @callback: Pointer to function to be called when fence is signaled
281 * Caller needs to hold a reference to @fence when calling this function, and
282 * the caller is responsible for releasing that reference. An additional
283 * reference to @fence will be taken when the callback was successfully set up
284 * and @fence needs to be kept valid until the callback has been called and
285 * cleanup have been done.
287 * Return: 0 on success: fence was either already signalled, or callback was
288 * set up. Negative error code is returned on error.
291 kbase_dma_fence_add_callback(struct kbase_jd_atom *katom,
293 fence_func_t callback)
296 struct kbase_dma_fence_cb *kbase_fence_cb;
298 kbase_fence_cb = kmalloc(sizeof(*kbase_fence_cb), GFP_KERNEL);
302 kbase_fence_cb->fence = fence;
303 kbase_fence_cb->katom = katom;
304 INIT_LIST_HEAD(&kbase_fence_cb->node);
306 err = fence_add_callback(fence, &kbase_fence_cb->fence_cb, callback);
307 if (err == -ENOENT) {
308 /* Fence signaled, clear the error and return */
310 kbase_fence_cb->fence = NULL;
311 kfree(kbase_fence_cb);
313 kfree(kbase_fence_cb);
316 * Get reference to fence that will be kept until callback gets
317 * cleaned up in kbase_dma_fence_free_callbacks().
320 atomic_inc(&katom->dma_fence.dep_count);
321 /* Add callback to katom's list of callbacks */
322 list_add(&kbase_fence_cb->node, &katom->dma_fence.callbacks);
329 kbase_dma_fence_cb(struct fence *fence, struct fence_cb *cb)
331 struct kbase_dma_fence_cb *kcb = container_of(cb,
332 struct kbase_dma_fence_cb,
334 struct kbase_jd_atom *katom = kcb->katom;
335 struct kbase_context *kctx = katom->kctx;
337 /* If the atom is zapped dep_count will be forced to a negative number
338 * preventing this callback from ever scheduling work. Which in turn
339 * would reschedule the atom.
341 if (atomic_dec_and_test(&katom->dma_fence.dep_count)) {
344 INIT_WORK(&katom->work, kbase_dma_fence_work);
345 ret = queue_work(kctx->dma_fence.wq, &katom->work);
346 /* Warn if work was already queued, that should not happen. */
352 kbase_dma_fence_add_reservation_callback(struct kbase_jd_atom *katom,
353 struct reservation_object *resv,
356 struct fence *excl_fence = NULL;
357 struct fence **shared_fences = NULL;
358 unsigned int shared_count = 0;
361 err = reservation_object_get_fences_rcu(resv,
369 err = kbase_dma_fence_add_callback(katom,
373 /* Release our reference, taken by reservation_object_get_fences_rcu(),
374 * to the fence. We have set up our callback (if that was possible),
375 * and it's the fence's owner is responsible for singling the fence
376 * before allowing it to disappear.
378 fence_put(excl_fence);
385 for (i = 0; i < shared_count; i++) {
386 err = kbase_dma_fence_add_callback(katom,
394 /* Release all our references to the shared fences, taken by
395 * reservation_object_get_fences_rcu(). We have set up our callback (if
396 * that was possible), and it's the fence's owner is responsible for
397 * signaling the fence before allowing it to disappear.
400 for (i = 0; i < shared_count; i++)
401 fence_put(shared_fences[i]);
402 kfree(shared_fences);
406 * On error, cancel and clean up all callbacks that was set up
409 kbase_dma_fence_free_callbacks(katom);
415 void kbase_dma_fence_add_reservation(struct reservation_object *resv,
416 struct kbase_dma_fence_resv_info *info,
421 for (i = 0; i < info->dma_fence_resv_count; i++) {
422 /* Duplicate resource, ignore */
423 if (info->resv_objs[i] == resv)
427 info->resv_objs[info->dma_fence_resv_count] = resv;
429 set_bit(info->dma_fence_resv_count,
430 info->dma_fence_excl_bitmap);
431 (info->dma_fence_resv_count)++;
434 int kbase_dma_fence_wait(struct kbase_jd_atom *katom,
435 struct kbase_dma_fence_resv_info *info)
439 struct ww_acquire_ctx ww_ctx;
441 lockdep_assert_held(&katom->kctx->jctx.lock);
443 fence = kbase_dma_fence_new(katom->dma_fence.context,
444 atomic_inc_return(&katom->dma_fence.seqno));
447 dev_err(katom->kctx->kbdev->dev,
448 "Error %d creating fence.\n", err);
452 katom->dma_fence.fence = fence;
453 atomic_set(&katom->dma_fence.dep_count, 1);
455 err = kbase_dma_fence_lock_reservations(info, &ww_ctx);
457 dev_err(katom->kctx->kbdev->dev,
458 "Error %d locking reservations.\n", err);
459 atomic_set(&katom->dma_fence.dep_count, -1);
464 for (i = 0; i < info->dma_fence_resv_count; i++) {
465 struct reservation_object *obj = info->resv_objs[i];
467 if (!test_bit(i, info->dma_fence_excl_bitmap)) {
468 err = reservation_object_reserve_shared(obj);
470 dev_err(katom->kctx->kbdev->dev,
471 "Error %d reserving space for shared fence.\n", err);
475 err = kbase_dma_fence_add_reservation_callback(katom, obj, false);
477 dev_err(katom->kctx->kbdev->dev,
478 "Error %d adding reservation to callback.\n", err);
482 reservation_object_add_shared_fence(obj, katom->dma_fence.fence);
484 err = kbase_dma_fence_add_reservation_callback(katom, obj, true);
486 dev_err(katom->kctx->kbdev->dev,
487 "Error %d adding reservation to callback.\n", err);
491 reservation_object_add_excl_fence(obj, katom->dma_fence.fence);
496 kbase_dma_fence_unlock_reservations(info, &ww_ctx);
499 /* Test if the callbacks are already triggered */
500 if (atomic_dec_and_test(&katom->dma_fence.dep_count)) {
501 atomic_set(&katom->dma_fence.dep_count, -1);
502 kbase_dma_fence_free_callbacks(katom);
504 /* Add katom to the list of dma-buf fence waiting atoms
505 * only if it is still waiting.
507 kbase_dma_fence_waiters_add(katom);
510 /* There was an error, cancel callbacks, set dep_count to -1 to
511 * indicate that the atom has been handled (the caller will
512 * kill it for us), signal the fence, free callbacks and the
515 kbase_dma_fence_free_callbacks(katom);
516 atomic_set(&katom->dma_fence.dep_count, -1);
517 kbase_dma_fence_signal(katom);
523 void kbase_dma_fence_cancel_all_atoms(struct kbase_context *kctx)
525 struct kbase_jd_atom *katom, *katom_tmp;
527 list_for_each_entry_safe(katom, katom_tmp,
528 &kctx->dma_fence.waiting_resource, queue) {
529 kbase_dma_fence_waiters_remove(katom);
530 kbase_dma_fence_cancel_atom(katom);
534 void kbase_dma_fence_cancel_callbacks(struct kbase_jd_atom *katom)
536 /* Cancel callbacks and clean up. */
537 kbase_dma_fence_free_callbacks(katom);
540 void kbase_dma_fence_signal(struct kbase_jd_atom *katom)
542 if (!katom->dma_fence.fence)
545 KBASE_DEBUG_ASSERT(atomic_read(&katom->dma_fence.dep_count) == -1);
547 /* Signal the atom's fence. */
548 fence_signal(katom->dma_fence.fence);
549 fence_put(katom->dma_fence.fence);
550 katom->dma_fence.fence = NULL;
552 kbase_dma_fence_free_callbacks(katom);
555 void kbase_dma_fence_term(struct kbase_context *kctx)
557 destroy_workqueue(kctx->dma_fence.wq);
558 kctx->dma_fence.wq = NULL;
561 int kbase_dma_fence_init(struct kbase_context *kctx)
563 INIT_LIST_HEAD(&kctx->dma_fence.waiting_resource);
565 kctx->dma_fence.wq = alloc_workqueue("mali-fence-%d",
566 WQ_UNBOUND, 1, kctx->pid);
567 if (!kctx->dma_fence.wq)