2 * drm_sync_helper.c: software fence and helper functions for fences and
3 * reservations used for dma buffer access synchronization between drivers.
5 * Copyright 2014 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
17 #include <linux/module.h>
18 #include <drm/drm_sync_helper.h>
19 #include <linux/slab.h>
20 #include <linux/reservation.h>
22 static DEFINE_SPINLOCK(sw_fence_lock);
24 void drm_add_reservation(struct reservation_object *resv,
25 struct reservation_object **resvs,
26 unsigned long *excl_resvs_bitmap,
27 unsigned int *num_resvs, bool exclusive)
31 for (r = 0; r < *num_resvs; r++) {
35 resvs[*num_resvs] = resv;
37 set_bit(*num_resvs, excl_resvs_bitmap);
40 EXPORT_SYMBOL(drm_add_reservation);
42 int drm_lock_reservations(struct reservation_object **resvs,
43 unsigned int num_resvs, struct ww_acquire_ctx *ctx)
46 struct reservation_object *slow_res = NULL;
48 ww_acquire_init(ctx, &reservation_ww_class);
51 for (r = 0; r < num_resvs; r++) {
53 /* skip the resv we locked with slow lock */
54 if (resvs[r] == slow_res) {
58 ret = ww_mutex_lock(&resvs[r]->lock, ctx);
60 unsigned int slow_r = r;
62 * undo all the locks we already done,
67 ww_mutex_unlock(&resvs[r]->lock);
70 ww_mutex_unlock(&slow_res->lock);
71 if (ret == -EDEADLK) {
72 slow_res = resvs[slow_r];
73 ww_mutex_lock_slow(&slow_res->lock, ctx);
84 EXPORT_SYMBOL(drm_lock_reservations);
86 void drm_unlock_reservations(struct reservation_object **resvs,
87 unsigned int num_resvs,
88 struct ww_acquire_ctx *ctx)
92 for (r = 0; r < num_resvs; r++)
93 ww_mutex_unlock(&resvs[r]->lock);
97 EXPORT_SYMBOL(drm_unlock_reservations);
99 static void reservation_cb_fence_cb(struct fence *fence, struct fence_cb *cb)
101 struct drm_reservation_fence_cb *rfcb =
102 container_of(cb, struct drm_reservation_fence_cb, base);
103 struct drm_reservation_cb *rcb = rfcb->parent;
105 if (atomic_dec_and_test(&rcb->count))
106 schedule_work(&rcb->work);
110 reservation_cb_cleanup(struct drm_reservation_cb *rcb)
114 for (cb = 0; cb < rcb->num_fence_cbs; cb++) {
115 if (rcb->fence_cbs[cb]) {
116 fence_remove_callback(rcb->fence_cbs[cb]->fence,
117 &rcb->fence_cbs[cb]->base);
118 fence_put(rcb->fence_cbs[cb]->fence);
119 kfree(rcb->fence_cbs[cb]);
120 rcb->fence_cbs[cb] = NULL;
123 kfree(rcb->fence_cbs);
124 rcb->fence_cbs = NULL;
125 rcb->num_fence_cbs = 0;
128 static void reservation_cb_work(struct work_struct *pwork)
130 struct drm_reservation_cb *rcb =
131 container_of(pwork, struct drm_reservation_cb, work);
133 * clean up everything before calling the callback, because the callback
134 * may free structure containing rcb and work_struct
136 reservation_cb_cleanup(rcb);
137 rcb->func(rcb, rcb->context);
141 reservation_cb_add_fence_cb(struct drm_reservation_cb *rcb, struct fence *fence)
144 struct drm_reservation_fence_cb *fence_cb;
145 struct drm_reservation_fence_cb **new_fence_cbs;
147 new_fence_cbs = krealloc(rcb->fence_cbs,
148 (rcb->num_fence_cbs + 1)
149 * sizeof(struct drm_reservation_fence_cb *),
153 rcb->fence_cbs = new_fence_cbs;
155 fence_cb = kzalloc(sizeof(struct drm_reservation_fence_cb), GFP_KERNEL);
160 * do not want for fence to disappear on us while we are waiting for
161 * callback and we need it in case we want to remove callbacks
164 fence_cb->fence = fence;
165 fence_cb->parent = rcb;
166 rcb->fence_cbs[rcb->num_fence_cbs] = fence_cb;
167 atomic_inc(&rcb->count);
168 ret = fence_add_callback(fence, &fence_cb->base,
169 reservation_cb_fence_cb);
170 if (ret == -ENOENT) {
171 /* already signaled */
172 atomic_dec(&rcb->count);
173 fence_put(fence_cb->fence);
176 } else if (ret < 0) {
177 atomic_dec(&rcb->count);
178 fence_put(fence_cb->fence);
182 rcb->num_fence_cbs++;
188 drm_reservation_cb_init(struct drm_reservation_cb *rcb,
189 drm_reservation_cb_func_t func, void *context)
191 INIT_WORK(&rcb->work, reservation_cb_work);
192 atomic_set(&rcb->count, 1);
193 rcb->num_fence_cbs = 0;
194 rcb->fence_cbs = NULL;
196 rcb->context = context;
198 EXPORT_SYMBOL(drm_reservation_cb_init);
201 drm_reservation_cb_add(struct drm_reservation_cb *rcb,
202 struct reservation_object *resv, bool exclusive)
206 unsigned shared_count = 0, f;
207 struct fence **shared_fences = NULL;
209 /* enum all the fences in the reservation and add callbacks */
210 ret = reservation_object_get_fences_rcu(resv, &fence,
211 &shared_count, &shared_fences);
216 ret = reservation_cb_add_fence_cb(rcb, fence);
218 reservation_cb_cleanup(rcb);
224 for (f = 0; f < shared_count; f++) {
225 ret = reservation_cb_add_fence_cb(rcb,
228 reservation_cb_cleanup(rcb);
238 for (f = 0; f < shared_count; f++)
239 fence_put(shared_fences[f]);
240 kfree(shared_fences);
244 EXPORT_SYMBOL(drm_reservation_cb_add);
247 drm_reservation_cb_done(struct drm_reservation_cb *rcb)
250 * we need to decrement from initial 1
251 * and trigger the callback in case all the
252 * fences were already triggered
254 if (atomic_dec_and_test(&rcb->count)) {
256 * we could call the callback here directly but in case
257 * the callback function needs to lock the same mutex
258 * as our caller it could cause a deadlock, so it is
259 * safer to call it from a worker
261 schedule_work(&rcb->work);
264 EXPORT_SYMBOL(drm_reservation_cb_done);
267 drm_reservation_cb_fini(struct drm_reservation_cb *rcb)
269 /* make sure no work will be triggered */
270 atomic_set(&rcb->count, 0);
271 cancel_work_sync(&rcb->work);
272 reservation_cb_cleanup(rcb);
274 EXPORT_SYMBOL(drm_reservation_cb_fini);
276 static bool sw_fence_enable_signaling(struct fence *f)
281 static const char *sw_fence_get_get_driver_name(struct fence *fence)
283 return "drm_sync_helper";
286 static const char *sw_fence_get_timeline_name(struct fence *f)
288 return "drm_sync.sw";
291 static const struct fence_ops sw_fence_ops = {
292 .get_driver_name = sw_fence_get_get_driver_name,
293 .get_timeline_name = sw_fence_get_timeline_name,
294 .enable_signaling = sw_fence_enable_signaling,
296 .wait = fence_default_wait,
300 struct fence *drm_sw_fence_new(unsigned int context, unsigned seqno)
304 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
306 return ERR_PTR(-ENOMEM);
314 EXPORT_SYMBOL(drm_sw_fence_new);