1 /*************************************************************************/ /*!
3 @Title Kernel driver for Android's sync mechanism
4 @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
5 @License Dual MIT/GPLv2
7 The contents of this file are subject to the MIT license as set out below.
9 Permission is hereby granted, free of charge, to any person obtaining a copy
10 of this software and associated documentation files (the "Software"), to deal
11 in the Software without restriction, including without limitation the rights
12 to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
13 copies of the Software, and to permit persons to whom the Software is
14 furnished to do so, subject to the following conditions:
16 The above copyright notice and this permission notice shall be included in
17 all copies or substantial portions of the Software.
19 Alternatively, the contents of this file may be used under the terms of
20 the GNU General Public License Version 2 ("GPL") in which case the provisions
21 of GPL are applicable instead of those above.
23 If you wish to allow use of your version of this file only under the terms of
24 GPL, and not to allow others to use your version of this file under the terms
25 of the MIT license, indicate your decision by deleting the provisions above
26 and replace them with the notice and other provisions required by GPL as set
27 out in the file called "GPL-COPYING" included in this distribution. If you do
28 not delete the provisions above, a recipient may use your version of this file
29 under the terms of either the MIT license or GPL.
31 This License is also included in this distribution in the file called
34 EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
35 PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
36 BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
37 PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
38 COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
39 IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
40 CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
41 */ /**************************************************************************/
45 #include "pvr_fd_sync_kernel.h"
46 #include "services_kernel_client.h"
48 #include <linux/slab.h>
49 #include <linux/file.h>
50 #include <linux/module.h>
51 #include <linux/uaccess.h>
52 #include <linux/version.h>
53 #include <linux/syscalls.h>
54 #include <linux/miscdevice.h>
55 #include <linux/anon_inodes.h>
57 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0))
58 #include <linux/sync.h>
59 #ifndef CONFIG_SW_SYNC_USER
60 #include <linux/sw_sync.h>
63 #include <../drivers/staging/android/sync.h>
64 #ifndef CONFIG_SW_SYNC_USER
65 #include <../drivers/staging/android/sw_sync.h>
69 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
71 static inline int kref_put_mutex(struct kref *kref,
72 void (*release)(struct kref *kref),
75 WARN_ON(release == NULL);
76 if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) {
78 if (unlikely(!atomic_dec_and_test(&kref->refcount))) {
88 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) */
90 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))
92 static inline struct sync_timeline *sync_pt_parent(struct sync_pt *pt)
97 static inline int sync_pt_get_status(struct sync_pt *pt)
102 #define for_each_sync_pt(s, f, c) \
104 list_for_each_entry((s), &(f)->pt_list_head, pt_list)
106 #else /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) */
108 static inline int sync_pt_get_status(struct sync_pt *pt)
110 /* No error state for raw dma-buf fences */
111 return fence_is_signaled(&pt->base) ? 1 : 0;
114 #define for_each_sync_pt(s, f, c) \
115 for ((c) = 0, (s) = (struct sync_pt *)(f)->cbs[0].sync_pt; \
116 (c) < (f)->num_fences; \
117 (c)++, (s) = (struct sync_pt *)(f)->cbs[c].sync_pt)
119 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) */
121 /* #define DEBUG_OUTPUT 1 */
124 #define DPF(fmt, ...) pr_err("pvr_sync: " fmt "\n", __VA_ARGS__)
126 #define DPF(fmt, ...) do {} while (0)
129 #define PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, fmt, ...) \
131 if (pfnDumpDebugPrintf) { \
132 pfnDumpDebugPrintf(fmt, __VA_ARGS__); \
134 pr_info("pvr_sync: " fmt, __VA_ARGS__); \
138 #define SYNC_MAX_POOL_SIZE 10
142 SYNC_PT_FENCE_TYPE = 1,
143 SYNC_PT_CLEANUP_TYPE = 2,
144 SYNC_PT_FOREIGN_FENCE_TYPE = 3,
145 SYNC_PT_FOREIGN_CLEANUP_TYPE = 4,
148 struct pvr_sync_append_data {
150 PRGXFWIF_UFO_ADDR *update_ufo_addresses;
153 PRGXFWIF_UFO_ADDR *check_ufo_addresses;
156 /* The cleanup list is needed for rollback (as that's the only op
159 struct pvr_sync_native_sync_prim **cleanup_syncs;
161 /* Keep the sync points around for fput and if rollback is needed */
162 struct pvr_sync_alloc_data *update_sync_data;
164 struct sync_fence *fences[];
168 /* Services client sync prim wrapper. This is used to hold debug information
169 * and make it possible to cache unused syncs. */
170 struct pvr_sync_native_sync_prim {
171 /* List for the sync pool support. */
172 struct list_head list;
174 /* Base services sync prim structure */
175 struct PVRSRV_CLIENT_SYNC_PRIM *client_sync;
177 /* The next queued value which should be used */
180 /* Every sync data will get some unique id */
183 /* FWAddr used by the client sync */
186 /* The type this sync is used for in our driver. Used in
187 * pvr_sync_debug_request. */
190 /* A debug class name also printed in pvr_sync_debug_request */
194 /* This is the actual timeline metadata. We might keep this around after the
195 * base sync driver has destroyed the pvr_sync_timeline_wrapper object.
197 struct pvr_sync_timeline {
198 /* Back reference to the sync_timeline. Not always valid */
199 struct sync_timeline *obj;
201 /* Global timeline list support */
202 struct list_head list;
205 struct pvr_sync_kernel_pair *kernel;
207 /* Should we do timeline idle detection when creating a new fence? */
208 bool fencing_enabled;
210 /* Reference count for this object */
213 /* Used only by pvr_sync_update_all_timelines(). False if the timeline
214 * has been detected as racing with pvr_sync_destroy_timeline().
219 /* This is the IMG extension of a sync_timeline */
220 struct pvr_sync_timeline_wrapper {
221 /* Original timeline struct. Needs to come first. */
222 struct sync_timeline obj;
224 /* Pointer to extra timeline data. Separated life-cycle. */
225 struct pvr_sync_timeline *timeline;
228 struct pvr_sync_kernel_pair {
229 /* Binary sync point representing the android native sync in hw. */
230 struct pvr_sync_native_sync_prim *fence_sync;
232 /* Cleanup sync structure.
233 * If the base sync prim is used for "checking" only within a gl stream,
234 * there is no way of knowing when this has happened. So use a second
235 * sync prim which just gets updated and check the update count when
236 * freeing this struct. */
237 struct pvr_sync_native_sync_prim *cleanup_sync;
239 /* Sync points can go away when there are deferred hardware operations
240 * still outstanding. We must not free the SERVER_SYNC_PRIMITIVE until
241 * the hardware is finished, so we add it to a defer list which is
242 * processed periodically ("defer-free").
244 * Note that the defer-free list is global, not per-timeline.
246 struct list_head list;
249 struct pvr_sync_data {
250 /* Every sync point has a services sync object. This object is used
251 * by the hardware to enforce ordering -- it is attached as a source
252 * dependency to various commands.
254 struct pvr_sync_kernel_pair *kernel;
256 /* The timeline update value for this sync point. */
257 u32 timeline_update_value;
259 /* This refcount is incremented at create and dup time, and decremented
260 * at free time. It ensures the object doesn't start the defer-free
261 * process until it is no longer referenced.
266 struct pvr_sync_alloc_data {
267 struct pvr_sync_timeline *timeline;
268 struct pvr_sync_data *sync_data;
272 /* This is the IMG extension of a sync_pt */
274 /* Original sync_pt structure. Needs to come first. */
277 /* Private shared data */
278 struct pvr_sync_data *sync_data;
281 /* This is the IMG extension of a sync_fence */
282 struct pvr_sync_fence {
283 /* Original sync_fence structure. Needs to come first. */
284 struct sync_fence *fence;
286 /* To ensure callbacks are always received for fences / sync_pts, even
287 * after the fence has been 'put' (freed), we must take a reference to
288 * the fence. We still need to 'put' the fence ourselves, but this might
289 * happen in irq context, where fput() is not allowed (in kernels <3.6).
290 * We must add the fence to a list which is processed in WQ context.
292 struct list_head list;
295 /* Any sync point from a foreign (non-PVR) timeline needs to have a "shadow"
296 * sync prim. This is modelled as a software operation. The foreign driver
297 * completes the operation by calling a callback we registered with it. */
298 struct pvr_sync_fence_waiter {
299 /* Base sync driver waiter structure */
300 struct sync_fence_waiter waiter;
302 /* "Shadow" sync prim backing the foreign driver's sync_pt */
303 struct pvr_sync_kernel_pair *kernel;
305 /* Optimizes lookup of fence for defer-put operation */
306 struct pvr_sync_fence *sync_fence;
309 /* Global data for the sync driver */
311 /* Services connection */
314 /* Complete notify handle */
315 void *command_complete_handle;
317 /* defer_free workqueue. Syncs may still be in use by the HW when freed,
318 * so we have to keep them around until the HW is done with them at
319 * some later time. This workqueue iterates over the list of free'd
320 * syncs, checks if they are in use, and frees the sync device memory
322 struct workqueue_struct *defer_free_wq;
323 struct work_struct defer_free_work;
325 /* check_status workqueue: When a foreign point is completed, a SW
326 * operation marks the sync as completed to allow the operations to
327 * continue. This completion may require the hardware to be notified,
328 * which may be expensive/take locks, so we push that to a workqueue
330 struct workqueue_struct *check_status_wq;
331 struct work_struct check_status_work;
333 /* Context used to create client sync prims. */
334 struct SYNC_PRIM_CONTEXT *sync_prim_context;
336 /* Debug notify handle */
337 void *debug_notify_handle;
339 /* Unique id counter for the sync prims */
342 /* The global event object (used to wait between checks for deferred-
343 * free sync status) */
344 void *event_object_handle;
347 /* List of timelines created by this driver */
348 static LIST_HEAD(timeline_list);
349 static DEFINE_MUTEX(timeline_list_mutex);
351 /* Sync pool support */
352 static LIST_HEAD(sync_pool_free_list);
353 static LIST_HEAD(sync_pool_active_list);
354 static DEFINE_MUTEX(sync_pool_mutex);
355 static s32 sync_pool_size;
356 static u32 sync_pool_created;
357 static u32 sync_pool_reused;
359 /* The "defer-free" object list. Driver global. */
360 static LIST_HEAD(sync_prim_free_list);
361 static DEFINE_SPINLOCK(sync_prim_free_list_spinlock);
363 /* The "defer-put" object list. Driver global. */
364 static LIST_HEAD(sync_fence_put_list);
365 static DEFINE_SPINLOCK(sync_fence_put_list_spinlock);
367 static inline void set_sync_value(struct pvr_sync_native_sync_prim *sync,
370 *(sync->client_sync->pui32LinAddr) = value;
373 static inline u32 get_sync_value(struct pvr_sync_native_sync_prim *sync)
375 return *(sync->client_sync->pui32LinAddr);
378 static inline void complete_sync(struct pvr_sync_native_sync_prim *sync)
380 *(sync->client_sync->pui32LinAddr) = sync->next_value;
383 static inline int is_sync_met(struct pvr_sync_native_sync_prim *sync)
385 return *(sync->client_sync->pui32LinAddr) == sync->next_value;
388 static inline struct pvr_sync_timeline *get_timeline(struct sync_timeline *obj)
390 return ((struct pvr_sync_timeline_wrapper *)obj)->timeline;
393 static inline struct pvr_sync_timeline *get_timeline_pt(struct sync_pt *pt)
395 return get_timeline(sync_pt_parent(pt));
399 pvr_sync_has_kernel_signaled(struct pvr_sync_kernel_pair *kernel)
401 /* Idle syncs are always signaled */
405 return is_sync_met(kernel->fence_sync);
408 static struct pvr_sync_alloc_data *pvr_sync_alloc_fence_fdget(int fd);
412 static char *debug_info_timeline(struct pvr_sync_timeline *timeline)
414 static char info[256];
416 snprintf(info, sizeof(info),
417 "n='%s' id=%u fw=0x%x tl_curr=%u tl_next=%u",
418 timeline->obj ? timeline->obj->name : "?",
419 timeline->kernel->fence_sync->id,
420 timeline->kernel->fence_sync->vaddr,
421 get_sync_value(timeline->kernel->fence_sync),
422 timeline->kernel->fence_sync->next_value);
427 static char *debug_info_sync_pt(struct sync_pt *pt)
429 struct pvr_sync_timeline *timeline = get_timeline_pt(pt);
430 struct pvr_sync_pt *pvr_pt = (struct pvr_sync_pt *)pt;
431 struct pvr_sync_kernel_pair *kernel = pvr_pt->sync_data->kernel;
432 static char info[256], info1[256];
435 struct pvr_sync_native_sync_prim *cleanup_sync =
436 kernel->cleanup_sync;
439 snprintf(info1, sizeof(info1),
440 " # cleanup: id=%u fw=0x%x curr=%u next=%u",
443 get_sync_value(cleanup_sync),
444 cleanup_sync->next_value);
449 snprintf(info, sizeof(info),
450 "status=%d tl_taken=%u ref=%d # sync: id=%u fw=0x%x curr=%u next=%u%s # tl: %s",
451 pvr_sync_has_kernel_signaled(kernel),
452 pvr_pt->sync_data->timeline_update_value,
453 atomic_read(&pvr_pt->sync_data->kref.refcount),
454 kernel->fence_sync->id,
455 kernel->fence_sync->vaddr,
456 get_sync_value(kernel->fence_sync),
457 kernel->fence_sync->next_value,
458 info1, debug_info_timeline(timeline));
460 snprintf(info, sizeof(info),
461 "status=%d tl_taken=%u ref=%d # sync: idle # tl: %s",
462 pvr_sync_has_kernel_signaled(kernel),
463 pvr_pt->sync_data->timeline_update_value,
464 atomic_read(&pvr_pt->sync_data->kref.refcount),
465 debug_info_timeline(timeline));
471 #endif /* DEBUG_OUTPUT */
473 static enum PVRSRV_ERROR
474 sync_pool_get(struct pvr_sync_native_sync_prim **_sync,
475 const char *class_name, u8 type)
477 struct pvr_sync_native_sync_prim *sync;
478 enum PVRSRV_ERROR error = PVRSRV_OK;
480 mutex_lock(&sync_pool_mutex);
482 if (list_empty(&sync_pool_free_list)) {
483 /* If there is nothing in the pool, create a new sync prim. */
484 sync = kmalloc(sizeof(struct pvr_sync_native_sync_prim),
487 pr_err("pvr_sync: %s: Failed to allocate sync data",
489 error = PVRSRV_ERROR_OUT_OF_MEMORY;
493 error = SyncPrimAlloc(pvr_sync_data.sync_prim_context,
494 &sync->client_sync, class_name);
495 if (error != PVRSRV_OK) {
496 pr_err("pvr_sync: %s: Failed to allocate sync prim (%s)",
497 __func__, PVRSRVGetErrorStringKM(error));
501 sync->vaddr = SyncPrimGetFirmwareAddr(sync->client_sync);
503 list_add_tail(&sync->list, &sync_pool_active_list);
506 sync = list_first_entry(&sync_pool_free_list,
507 struct pvr_sync_native_sync_prim, list);
508 list_move_tail(&sync->list, &sync_pool_active_list);
513 sync->id = atomic_inc_return(&pvr_sync_data.sync_id);
516 strncpy(sync->class, class_name, sizeof(sync->class));
517 /* Its crucial to reset the sync to zero */
518 set_sync_value(sync, 0);
519 sync->next_value = 0;
523 mutex_unlock(&sync_pool_mutex);
531 static void sync_pool_put(struct pvr_sync_native_sync_prim *sync)
533 mutex_lock(&sync_pool_mutex);
535 if (sync_pool_size < SYNC_MAX_POOL_SIZE) {
536 /* Mark it as unused */
537 set_sync_value(sync, 0xffffffff);
539 list_move(&sync->list, &sync_pool_free_list);
542 /* Mark it as invalid */
543 set_sync_value(sync, 0xdeadbeef);
545 list_del(&sync->list);
546 SyncPrimFree(sync->client_sync);
550 mutex_unlock(&sync_pool_mutex);
553 static void sync_pool_clear(void)
555 struct pvr_sync_native_sync_prim *sync, *n;
557 mutex_lock(&sync_pool_mutex);
559 list_for_each_entry_safe(sync, n, &sync_pool_free_list, list) {
560 /* Mark it as invalid */
561 set_sync_value(sync, 0xdeadbeef);
563 list_del(&sync->list);
564 SyncPrimFree(sync->client_sync);
569 mutex_unlock(&sync_pool_mutex);
572 static void pvr_sync_debug_request(void *hDebugRequestHandle,
575 struct pvr_sync_native_sync_prim *sync;
577 static const char *const type_names[] = {
578 "Timeline", "Fence", "Cleanup",
579 "Foreign Fence", "Foreign Cleanup"
582 if (ui32VerbLevel == DEBUG_REQUEST_VERBOSITY_HIGH) {
583 mutex_lock(&sync_pool_mutex);
585 PVR_DUMPDEBUG_LOG(g_pfnDumpDebugPrintf,
586 "Dumping all pending android native syncs (Pool usage: %d%% - %d %d)",
589 ((sync_pool_created + sync_pool_reused) *
590 100 / sync_pool_reused)) : 0,
591 sync_pool_created, sync_pool_reused);
593 list_for_each_entry(sync, &sync_pool_active_list, list) {
594 if (is_sync_met(sync))
597 BUG_ON(sync->type >= ARRAY_SIZE(type_names));
599 PVR_DUMPDEBUG_LOG(g_pfnDumpDebugPrintf,
600 "\tID = %d, FWAddr = 0x%08x: Current = 0x%08x, Next = 0x%08x, %s (%s)",
601 sync->id, sync->vaddr,
602 get_sync_value(sync),
605 type_names[sync->type]);
608 PVR_DUMPDEBUG_LOG(g_pfnDumpDebugPrintf,
609 "Dumping all unused syncs");
610 list_for_each_entry(sync, &sync_pool_free_list, list) {
611 BUG_ON(sync->type >= ARRAY_SIZE(type_names));
613 PVR_DUMPDEBUG_LOG(g_pfnDumpDebugPrintf,
614 "\tID = %d, FWAddr = 0x%08x: Current = 0x%08x, Next = 0x%08x, %s (%s)",
615 sync->id, sync->vaddr,
616 get_sync_value(sync),
619 type_names[sync->type]);
622 mutex_unlock(&sync_pool_mutex);
626 static struct sync_pt *pvr_sync_dup(struct sync_pt *sync_pt)
628 struct pvr_sync_pt *pvr_pt_a = (struct pvr_sync_pt *)sync_pt;
629 struct pvr_sync_pt *pvr_pt_b = NULL;
631 DPF("%s: # %s", __func__, debug_info_sync_pt(sync_pt));
633 pvr_pt_b = (struct pvr_sync_pt *)
634 sync_pt_create(sync_pt_parent(sync_pt),
635 sizeof(struct pvr_sync_pt));
637 pr_err("pvr_sync: %s: Failed to dup sync pt", __func__);
641 kref_get(&pvr_pt_a->sync_data->kref);
643 pvr_pt_b->sync_data = pvr_pt_a->sync_data;
646 return (struct sync_pt *)pvr_pt_b;
649 static int pvr_sync_has_signaled(struct sync_pt *sync_pt)
651 struct pvr_sync_pt *pvr_pt = (struct pvr_sync_pt *)sync_pt;
653 DPF("%s: # %s", __func__, debug_info_sync_pt(sync_pt));
655 return pvr_sync_has_kernel_signaled(pvr_pt->sync_data->kernel);
658 static int pvr_sync_compare(struct sync_pt *a, struct sync_pt *b)
660 u32 a1 = ((struct pvr_sync_pt *)a)->sync_data->timeline_update_value;
661 u32 b1 = ((struct pvr_sync_pt *)b)->sync_data->timeline_update_value;
663 DPF("%s: a # %s", __func__, debug_info_sync_pt(a));
664 DPF("%s: b # %s", __func__, debug_info_sync_pt(b));
669 /* Take integer wrapping into account */
670 return ((s32)a1 - (s32)b1) < 0 ? -1 : 1;
673 static void wait_for_sync(struct pvr_sync_native_sync_prim *sync)
676 void *event_object = NULL;
677 enum PVRSRV_ERROR error = PVRSRV_OK;
679 while (sync && !is_sync_met(sync)) {
681 error = OSEventObjectOpen(
682 pvr_sync_data.event_object_handle,
684 if (error != PVRSRV_OK) {
685 pr_err("pvr_sync: %s: Error opening event object (%s)\n",
687 PVRSRVGetErrorStringKM(error));
691 error = OSEventObjectWait(event_object);
692 if (error != PVRSRV_OK && error != PVRSRV_ERROR_TIMEOUT) {
693 pr_err("pvr_sync: %s: Error waiting on event object (%s)\n",
695 PVRSRVGetErrorStringKM(error));
700 OSEventObjectClose(event_object);
704 static void pvr_sync_defer_free(struct pvr_sync_kernel_pair *kernel)
708 spin_lock_irqsave(&sync_prim_free_list_spinlock, flags);
709 list_add_tail(&kernel->list, &sync_prim_free_list);
710 spin_unlock_irqrestore(&sync_prim_free_list_spinlock, flags);
712 queue_work(pvr_sync_data.defer_free_wq, &pvr_sync_data.defer_free_work);
715 /* This function assumes the timeline_list_mutex is held while it runs */
717 static void pvr_sync_destroy_timeline_locked(struct kref *kref)
719 struct pvr_sync_timeline *timeline = (struct pvr_sync_timeline *)
720 container_of(kref, struct pvr_sync_timeline, kref);
722 pvr_sync_defer_free(timeline->kernel);
723 list_del(&timeline->list);
727 static void pvr_sync_destroy_timeline(struct kref *kref)
729 mutex_lock(&timeline_list_mutex);
730 pvr_sync_destroy_timeline_locked(kref);
731 mutex_unlock(&timeline_list_mutex);
734 static void pvr_sync_release_timeline(struct sync_timeline *obj)
736 struct pvr_sync_timeline *timeline = get_timeline(obj);
738 /* If pvr_sync_open failed after calling sync_timeline_create, this
739 * can be called with a timeline that has not got a timeline sync
740 * or been added to our timeline list. Use a NULL timeline to
741 * detect and handle this condition
746 DPF("%s: # %s", __func__, debug_info_timeline(timeline));
748 wait_for_sync(timeline->kernel->fence_sync);
750 /* Whether or not we're the last reference, obj is going away
751 * after this function returns, so remove our back reference
754 timeline->obj = NULL;
756 /* This might be the last reference to the timeline object.
757 * If so, we'll go ahead and delete it now.
759 kref_put(&timeline->kref, pvr_sync_destroy_timeline);
762 /* The print_obj() and print_pt() functions have been removed, so we're forced
763 * to use the timeline_value_str() and pt_value_str() functions. These are
764 * worse because we're limited to 64 characters, and the strings for sync
765 * pts have to be formatted like:
767 * pt active: pt_info / tl_info
769 * For us, the tl_info is complicated and doesn't need to be repeated over
770 * and over. So try to detect the way sync_print_pt() calls the two value_str
771 * functions and change what pvr_sync_timeline_value_str() returns dynamically.
773 static struct sync_timeline *last_pt_timeline;
775 static void pvr_sync_timeline_value_str(struct sync_timeline *sync_timeline,
778 struct pvr_sync_timeline *timeline = get_timeline(sync_timeline);
780 if (sync_timeline != last_pt_timeline) {
781 snprintf(str, size, "%u 0x%x %u/%u",
782 timeline->kernel->fence_sync->id,
783 timeline->kernel->fence_sync->vaddr,
784 get_sync_value(timeline->kernel->fence_sync),
785 timeline->kernel->fence_sync->next_value);
787 snprintf(str, size, "%u",
788 get_sync_value(timeline->kernel->fence_sync));
792 static void pvr_sync_pt_value_str(struct sync_pt *sync_pt, char *str, int size)
794 struct pvr_sync_pt *pvr_pt = (struct pvr_sync_pt *)sync_pt;
795 struct pvr_sync_kernel_pair *kernel;
797 if (!pvr_pt->sync_data)
800 kernel = pvr_pt->sync_data->kernel;
802 /* Messages must be at most 64 bytes (including the null terminator):
804 * 123456789012345678901234567890123456789012345678901234567890123
806 * ID FW ADDR C/N # REF TAKEN
807 * 123456 0xdeadbeef 0/1 # r=2 123456
809 * ID FW ADDR C/N # ID FW ADDR C/N # REF TAKEN
810 * 123456 0xdeadbeef 0/1 # 123456 0xdeadbeef 0/1 # r=2 123456
813 if (!kernel->cleanup_sync) {
815 "%u 0x%x %u/%u r=%d %u",
816 kernel->fence_sync->id,
817 kernel->fence_sync->vaddr,
818 get_sync_value(kernel->fence_sync),
819 kernel->fence_sync->next_value,
820 atomic_read(&pvr_pt->sync_data->kref.refcount),
821 pvr_pt->sync_data->timeline_update_value);
824 "%u 0x%x %u/%u # %u 0x%x %u/%u # r=%d %u",
825 kernel->fence_sync->id,
826 kernel->fence_sync->vaddr,
827 get_sync_value(kernel->fence_sync),
828 kernel->fence_sync->next_value,
829 kernel->cleanup_sync->id,
830 kernel->cleanup_sync->vaddr,
831 get_sync_value(kernel->cleanup_sync),
832 kernel->cleanup_sync->next_value,
833 atomic_read(&pvr_pt->sync_data->kref.refcount),
834 pvr_pt->sync_data->timeline_update_value);
837 snprintf(str, size, "idle # r=%d %u",
838 atomic_read(&pvr_pt->sync_data->kref.refcount),
839 pvr_pt->sync_data->timeline_update_value);
842 last_pt_timeline = sync_pt_parent(sync_pt);
845 static struct pvr_sync_data *
846 pvr_sync_create_sync_data(struct sync_timeline *obj)
848 struct pvr_sync_data *sync_data = NULL;
849 enum PVRSRV_ERROR error;
851 sync_data = kzalloc(sizeof(struct pvr_sync_data), GFP_KERNEL);
855 kref_init(&sync_data->kref);
858 kzalloc(sizeof(struct pvr_sync_kernel_pair),
861 if (!sync_data->kernel)
864 OSAcquireBridgeLock();
865 error = sync_pool_get(&sync_data->kernel->fence_sync,
866 obj->name, SYNC_PT_FENCE_TYPE);
867 OSReleaseBridgeLock();
869 if (error != PVRSRV_OK) {
870 pr_err("pvr_sync: %s: Failed to allocate sync prim (%s)",
871 __func__, PVRSRVGetErrorStringKM(error));
872 goto err_free_kernel;
879 kfree(sync_data->kernel);
886 static void pvr_sync_free_sync_data(struct kref *kref)
888 struct pvr_sync_data *sync_data = (struct pvr_sync_data *)
889 container_of(kref, struct pvr_sync_data, kref);
891 if (sync_data->kernel)
892 pvr_sync_defer_free(sync_data->kernel);
896 static void pvr_sync_free_sync(struct sync_pt *sync_pt)
898 struct pvr_sync_pt *pvr_pt = (struct pvr_sync_pt *)sync_pt;
900 DPF("%s: # %s", __func__, debug_info_sync_pt(sync_pt));
902 kref_put(&pvr_pt->sync_data->kref, pvr_sync_free_sync_data);
905 /* this function uses pvr_sync_timeline_ops defined below */
906 static int pvr_sync_fill_driver_data(struct sync_pt *, void *, int);
908 static struct sync_timeline_ops pvr_sync_timeline_ops = {
909 .driver_name = PVRSYNC_MODNAME,
911 .has_signaled = pvr_sync_has_signaled,
912 .compare = pvr_sync_compare,
913 .free_pt = pvr_sync_free_sync,
914 .release_obj = pvr_sync_release_timeline,
915 .timeline_value_str = pvr_sync_timeline_value_str,
916 .pt_value_str = pvr_sync_pt_value_str,
917 .fill_driver_data = pvr_sync_fill_driver_data,
920 static inline bool is_pvr_timeline(struct sync_timeline *obj)
922 return obj->ops == &pvr_sync_timeline_ops;
925 static inline bool is_pvr_timeline_pt(struct sync_pt *pt)
927 return is_pvr_timeline(sync_pt_parent(pt));
931 pvr_sync_fill_driver_data(struct sync_pt *sync_pt, void *data, int size)
933 struct pvr_sync_pt_info *info = (struct pvr_sync_pt_info *)data;
934 struct pvr_sync_pt *pvr_pt = (struct pvr_sync_pt *)sync_pt;
935 struct pvr_sync_data *sync_data = pvr_pt->sync_data;
936 struct pvr_sync_kernel_pair *kernel = sync_data->kernel;
938 if (size < sizeof(struct pvr_sync_pt_info))
941 info->ui32TlTaken = sync_data->timeline_update_value;
944 info->id = kernel->fence_sync->id;
945 info->ui32FWAddr = kernel->fence_sync->vaddr;
946 info->ui32CurrOp = get_sync_value(kernel->fence_sync);
947 info->ui32NextOp = kernel->fence_sync->next_value;
950 info->ui32FWAddr = 0;
951 info->ui32CurrOp = 0;
952 info->ui32NextOp = 0;
955 return sizeof(struct pvr_sync_pt_info);
958 /* foreign sync handling */
960 static void pvr_sync_foreign_sync_pt_signaled(struct sync_fence *fence,
961 struct sync_fence_waiter *_waiter)
963 struct pvr_sync_fence_waiter *waiter =
964 (struct pvr_sync_fence_waiter *)_waiter;
967 /* Complete the SW operation and free the sync if we can. If we can't,
968 * it will be checked by a later workqueue kick. */
969 complete_sync(waiter->kernel->fence_sync);
971 /* We can 'put' the fence now, but this function might be called in
972 * irq context so we must defer to WQ.
973 * This WQ is triggered in pvr_sync_defer_free, so adding it to the
974 * put list before that should guarantee it's cleaned up on the next
976 spin_lock_irqsave(&sync_fence_put_list_spinlock, flags);
977 list_add_tail(&waiter->sync_fence->list, &sync_fence_put_list);
978 spin_unlock_irqrestore(&sync_fence_put_list_spinlock, flags);
980 pvr_sync_defer_free(waiter->kernel);
982 /* The completed sw-sync may allow other tasks to complete,
983 * so we need to allow them to progress */
984 queue_work(pvr_sync_data.check_status_wq,
985 &pvr_sync_data.check_status_work);
990 static struct pvr_sync_kernel_pair *
991 pvr_sync_create_waiter_for_foreign_sync(int fd)
993 struct pvr_sync_kernel_pair *kernel = NULL;
994 struct pvr_sync_fence_waiter *waiter;
995 struct pvr_sync_fence *sync_fence;
996 struct sync_fence *fence;
997 enum PVRSRV_ERROR error;
1000 fence = sync_fence_fdget(fd);
1002 pr_err("pvr_sync: %s: Failed to take reference on fence",
1007 kernel = kmalloc(sizeof(struct pvr_sync_kernel_pair), GFP_KERNEL);
1009 pr_err("pvr_sync: %s: Failed to allocate sync kernel",
1014 sync_fence = kmalloc(sizeof(struct pvr_sync_fence), GFP_KERNEL);
1016 pr_err("pvr_sync: %s: Failed to allocate pvr sync fence",
1018 goto err_free_kernel;
1021 sync_fence->fence = fence;
1023 error = sync_pool_get(&kernel->fence_sync,
1024 fence->name, SYNC_PT_FOREIGN_FENCE_TYPE);
1025 if (error != PVRSRV_OK) {
1026 pr_err("pvr_sync: %s: Failed to allocate sync prim (%s)",
1027 __func__, PVRSRVGetErrorStringKM(error));
1028 goto err_free_sync_fence;
1031 kernel->fence_sync->next_value++;
1033 error = sync_pool_get(&kernel->cleanup_sync,
1034 fence->name, SYNC_PT_FOREIGN_CLEANUP_TYPE);
1035 if (error != PVRSRV_OK) {
1036 pr_err("pvr_sync: %s: Failed to allocate cleanup sync prim (%s)",
1037 __func__, PVRSRVGetErrorStringKM(error));
1041 kernel->cleanup_sync->next_value++;
1043 /* The custom waiter structure is freed in the waiter callback */
1044 waiter = kmalloc(sizeof(struct pvr_sync_fence_waiter), GFP_KERNEL);
1046 pr_err("pvr_sync: %s: Failed to allocate waiter", __func__);
1047 goto err_free_cleanup_sync;
1050 waiter->kernel = kernel;
1051 waiter->sync_fence = sync_fence;
1053 sync_fence_waiter_init(&waiter->waiter,
1054 pvr_sync_foreign_sync_pt_signaled);
1056 err = sync_fence_wait_async(fence, &waiter->waiter);
1059 pr_err("pvr_sync: %s: Fence was in error state (%d)",
1064 /* -1 means the fence was broken, 1 means the fence already
1065 * signalled. In either case, roll back what we've done and
1066 * skip using this sync_pt for synchronization.
1068 goto err_free_waiter;
1075 err_free_cleanup_sync:
1076 sync_pool_put(kernel->cleanup_sync);
1078 sync_pool_put(kernel->fence_sync);
1079 err_free_sync_fence:
1085 sync_fence_put(fence);
1089 enum PVRSRV_ERROR pvr_sync_append_fences(
1091 const u32 nr_check_fences,
1092 const s32 *check_fence_fds,
1093 const s32 update_fence_fd,
1094 const u32 nr_updates,
1095 const PRGXFWIF_UFO_ADDR *update_ufo_addresses,
1096 const u32 *update_values,
1097 const u32 nr_checks,
1098 const PRGXFWIF_UFO_ADDR *check_ufo_addresses,
1099 const u32 *check_values,
1100 struct pvr_sync_append_data **append_sync_data)
1102 struct pvr_sync_append_data *sync_data;
1103 enum PVRSRV_ERROR err = PVRSRV_OK;
1104 struct pvr_sync_native_sync_prim **cleanup_sync_pos;
1105 PRGXFWIF_UFO_ADDR *update_address_pos;
1106 PRGXFWIF_UFO_ADDR *check_address_pos;
1107 u32 *update_value_pos;
1108 u32 *check_value_pos;
1109 unsigned num_used_sync_checks;
1110 unsigned num_used_sync_updates;
1111 struct pvr_sync_alloc_data *alloc_sync_data = NULL;
1114 if ((nr_updates && (!update_ufo_addresses || !update_values)) ||
1115 (nr_checks && (!check_ufo_addresses || !check_values)))
1116 return PVRSRV_ERROR_INVALID_PARAMS;
1119 kzalloc(sizeof(struct pvr_sync_append_data)
1120 + nr_check_fences * sizeof(struct sync_fence *),
1123 err = PVRSRV_ERROR_OUT_OF_MEMORY;
1127 sync_data->nr_checks = nr_checks;
1128 sync_data->nr_updates = nr_updates;
1130 sync_data->nr_fences = nr_check_fences;
1132 /* Loop through once to get the fences and count the total number of
1134 for (i = 0; i < nr_check_fences; i++) {
1135 struct sync_fence *fence = sync_fence_fdget(check_fence_fds[i]);
1136 struct pvr_sync_kernel_pair *sync_kernel;
1137 unsigned int points_on_fence = 0;
1138 bool has_foreign_point = false;
1139 struct sync_pt *sync_pt;
1143 pr_err("pvr_sync: %s: Failed to read sync private data for fd %d\n",
1144 __func__, check_fence_fds[i]);
1145 err = PVRSRV_ERROR_HANDLE_NOT_FOUND;
1146 goto err_free_append_data;
1149 sync_data->fences[i] = fence;
1151 for_each_sync_pt(sync_pt, fence, j) {
1152 struct pvr_sync_pt *pvr_pt;
1154 if (!is_pvr_timeline_pt(sync_pt)) {
1155 if (!sync_pt_get_status(sync_pt))
1156 has_foreign_point = true;
1160 pvr_pt = (struct pvr_sync_pt *)sync_pt;
1161 sync_kernel = pvr_pt->sync_data->kernel;
1164 is_sync_met(sync_kernel->fence_sync)) {
1167 /* We will use the above sync for "check" only. In this
1168 * case also insert a "cleanup" update command into the
1169 * opengl stream. This can later be used for checking if
1170 * the sync prim could be freed. */
1171 if (!sync_kernel->cleanup_sync) {
1172 err = sync_pool_get(&sync_kernel->cleanup_sync,
1173 sync_pt_parent(&pvr_pt->pt)->name,
1174 SYNC_PT_CLEANUP_TYPE);
1175 if (err != PVRSRV_OK) {
1176 pr_err("pvr_sync: %s: Failed to allocate cleanup sync prim (%s)",
1178 PVRSRVGetErrorStringKM(err));
1179 goto err_free_append_data;
1185 if (has_foreign_point)
1188 /* Each point has 1 check value, and 1 update value (for the
1190 sync_data->nr_checks += points_on_fence;
1191 sync_data->nr_updates += points_on_fence;
1192 sync_data->nr_cleaup_syncs += points_on_fence;
1195 if (update_fence_fd >= 0) {
1196 alloc_sync_data = pvr_sync_alloc_fence_fdget(update_fence_fd);
1197 if (!alloc_sync_data) {
1198 pr_err("pvr_sync: %s: Failed to read alloc sync private data for fd %d\n",
1199 __func__, update_fence_fd);
1200 err = PVRSRV_ERROR_HANDLE_NOT_FOUND;
1201 goto err_free_append_data;
1203 /* Store the alloc sync data now, so it's correctly fput()
1205 sync_data->update_sync_data = alloc_sync_data;
1206 /* If an alloc-sync has already been appended to a kick that
1207 * is an error (and the sync_data will be NULL */
1208 if (!alloc_sync_data->sync_data) {
1209 pr_err("pvr_sync: %s: Failed to read alloc sync sync_data for fd %d\n",
1210 __func__, update_fence_fd);
1211 err = PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
1212 goto err_free_append_data;
1215 /* A fence update requires 2 update values (fence and timeline)
1217 sync_data->nr_updates += 2;
1220 sync_data->update_ufo_addresses =
1221 kzalloc(sizeof(PRGXFWIF_UFO_ADDR) * sync_data->nr_updates,
1223 if (!sync_data->update_ufo_addresses) {
1224 pr_err("pvr_sync: %s: Failed to allocate update UFO address list\n",
1226 err = PVRSRV_ERROR_OUT_OF_MEMORY;
1227 goto err_free_append_data;
1230 sync_data->update_values =
1231 kzalloc(sizeof(u32) * sync_data->nr_updates,
1233 if (!sync_data->update_values) {
1234 pr_err("pvr_sync: %s: Failed to allocate update value list\n",
1236 err = PVRSRV_ERROR_OUT_OF_MEMORY;
1237 goto err_free_append_data;
1240 sync_data->check_ufo_addresses =
1241 kzalloc(sizeof(PRGXFWIF_UFO_ADDR) * sync_data->nr_checks,
1243 if (!sync_data->check_ufo_addresses) {
1244 pr_err("pvr_sync: %s: Failed to allocate check UFO address list\n",
1246 err = PVRSRV_ERROR_OUT_OF_MEMORY;
1247 goto err_free_append_data;
1250 sync_data->check_values =
1251 kzalloc(sizeof(u32) * sync_data->nr_checks,
1253 if (!sync_data->check_values) {
1254 pr_err("pvr_sync: %s: Failed to allocate check value list\n",
1256 err = PVRSRV_ERROR_OUT_OF_MEMORY;
1257 goto err_free_append_data;
1260 sync_data->cleanup_syncs =
1261 kzalloc(sizeof(struct pvr_sync_native_sync_prim *) *
1262 sync_data->nr_cleaup_syncs, GFP_KERNEL);
1263 if (!sync_data->cleanup_syncs) {
1264 pr_err("pvr_sync: %s: Failed to allocate cleanup rollback list\n",
1266 err = PVRSRV_ERROR_OUT_OF_MEMORY;
1267 goto err_free_append_data;
1270 update_address_pos = sync_data->update_ufo_addresses;
1271 update_value_pos = sync_data->update_values;
1272 check_address_pos = sync_data->check_ufo_addresses;
1273 check_value_pos = sync_data->check_values;
1274 cleanup_sync_pos = sync_data->cleanup_syncs;
1277 /* Everything should be allocated/sanity checked. No errors are possible
1278 * after this point */
1280 /* Append any check syncs */
1281 for (i = 0; i < nr_check_fences; i++) {
1282 struct sync_fence *fence = sync_data->fences[i];
1283 bool has_foreign_point = false;
1284 struct sync_pt *sync_pt;
1287 for_each_sync_pt(sync_pt, fence, j) {
1288 struct pvr_sync_pt *pvr_pt;
1289 struct pvr_sync_kernel_pair *sync_kernel;
1291 if (!is_pvr_timeline_pt(sync_pt)) {
1292 if (!sync_pt_get_status(sync_pt))
1293 has_foreign_point = true;
1297 pvr_pt = (struct pvr_sync_pt *)sync_pt;
1298 sync_kernel = pvr_pt->sync_data->kernel;
1301 is_sync_met(sync_kernel->fence_sync)) {
1305 (*check_address_pos++).ui32Addr =
1306 sync_kernel->fence_sync->vaddr;
1307 *check_value_pos++ =
1308 sync_kernel->fence_sync->next_value;
1310 (*update_address_pos++).ui32Addr =
1311 sync_kernel->cleanup_sync->vaddr;
1312 *update_value_pos++ =
1313 ++sync_kernel->cleanup_sync->next_value;
1314 *cleanup_sync_pos++ = sync_kernel->cleanup_sync;
1317 if (has_foreign_point) {
1318 struct pvr_sync_kernel_pair *foreign_sync_kernel =
1319 pvr_sync_create_waiter_for_foreign_sync(
1320 check_fence_fds[i]);
1322 if (foreign_sync_kernel) {
1323 struct pvr_sync_native_sync_prim *fence_sync =
1324 foreign_sync_kernel->fence_sync;
1325 struct pvr_sync_native_sync_prim *cleanup_sync =
1326 foreign_sync_kernel->cleanup_sync;
1329 (*check_address_pos++).ui32Addr =
1331 *check_value_pos++ =
1332 fence_sync->next_value;
1334 (*update_address_pos++).ui32Addr =
1335 cleanup_sync->vaddr;
1336 *update_value_pos++ =
1337 ++cleanup_sync->next_value;
1338 *cleanup_sync_pos++ = cleanup_sync;
1343 /* Append the update sync (if supplied) */
1344 if (sync_data->update_sync_data) {
1345 struct pvr_sync_alloc_data *update_data =
1346 sync_data->update_sync_data;
1347 struct pvr_sync_timeline *timeline =
1348 update_data->timeline;
1349 struct pvr_sync_kernel_pair *sync_kernel =
1350 update_data->sync_data->kernel;
1352 (*update_address_pos++).ui32Addr =
1353 sync_kernel->fence_sync->vaddr;
1354 *update_value_pos++ =
1355 ++sync_kernel->fence_sync->next_value;
1357 (*update_address_pos++).ui32Addr =
1358 timeline->kernel->fence_sync->vaddr;
1360 /* Increment the timeline value... */
1361 update_data->sync_data->timeline_update_value =
1362 ++timeline->kernel->fence_sync->next_value;
1364 /* ...and set that to be updated when this kick is completed */
1365 *update_value_pos++ =
1366 update_data->sync_data->timeline_update_value;
1369 /* Reset the fencing enabled flag. If nobody sets this to 1
1370 * until the next fence point is inserted, we will do timeline
1371 * idle detection. */
1372 timeline->fencing_enabled = false;
1374 /* We count the total number of sync points we attach, as it's possible
1375 * some have become complete since the first loop through, or a waiter
1376 * for a foreign point skipped (But they can never become un-complete, so
1377 * it will only ever be the same or less, so the allocated arrays should
1378 * still be sufficiently sized) */
1379 num_used_sync_updates =
1380 update_address_pos - sync_data->update_ufo_addresses;
1381 num_used_sync_checks =
1382 check_address_pos - sync_data->check_ufo_addresses;
1385 sync_data->nr_checks = nr_checks + num_used_sync_checks;
1386 sync_data->nr_updates = nr_updates + num_used_sync_updates;
1387 /* Append original check and update sync values/addresses */
1388 if (update_ufo_addresses)
1389 memcpy(update_address_pos, update_ufo_addresses,
1390 sizeof(PRGXFWIF_UFO_ADDR) * nr_updates);
1392 memcpy(update_value_pos, update_values,
1393 sizeof(u32) * nr_updates);
1395 if (check_ufo_addresses)
1396 memcpy(check_address_pos, check_ufo_addresses,
1397 sizeof(PRGXFWIF_UFO_ADDR) * nr_checks);
1399 memcpy(check_value_pos, check_values,
1400 sizeof(u32) * nr_checks);
1402 *append_sync_data = sync_data;
1406 err_free_append_data:
1407 pvr_sync_free_append_fences_data(sync_data);
1412 void pvr_sync_get_updates(const struct pvr_sync_append_data *sync_data,
1413 u32 *nr_fences, PRGXFWIF_UFO_ADDR **ufo_addrs, u32 **values)
1415 *nr_fences = sync_data->nr_updates;
1416 *ufo_addrs = sync_data->update_ufo_addresses;
1417 *values = sync_data->update_values;
1420 void pvr_sync_get_checks(const struct pvr_sync_append_data *sync_data,
1421 u32 *nr_fences, PRGXFWIF_UFO_ADDR **ufo_addrs, u32 **values)
1423 *nr_fences = sync_data->nr_checks;
1424 *ufo_addrs = sync_data->check_ufo_addresses;
1425 *values = sync_data->check_values;
1428 void pvr_sync_rollback_append_fences(
1429 struct pvr_sync_append_data *sync_append_data)
1433 if (!sync_append_data)
1436 for (i = 0; i < sync_append_data->nr_cleaup_syncs; i++) {
1437 struct pvr_sync_native_sync_prim *cleanup_sync =
1438 sync_append_data->cleanup_syncs[i];
1439 /* If this cleanup was called on a partially-created data set
1440 * it's possible to have NULL cleanup sync pointers */
1443 cleanup_sync->next_value--;
1446 if (sync_append_data->update_sync_data) {
1447 struct pvr_sync_data *sync_data =
1448 sync_append_data->update_sync_data->sync_data;
1449 struct pvr_sync_timeline *timeline =
1450 sync_append_data->update_sync_data->timeline;
1451 /* We can get a NULL sync_data if the corresponding
1452 * append failed with a re-used alloc sync */
1454 sync_data->kernel->fence_sync->next_value--;
1455 timeline->fencing_enabled = true;
1456 timeline->kernel->fence_sync->next_value--;
1461 void pvr_sync_free_append_fences_data(
1462 struct pvr_sync_append_data *sync_append_data)
1466 if (!sync_append_data)
1469 for (i = 0; i < sync_append_data->nr_fences; i++) {
1470 struct sync_fence *fence = sync_append_data->fences[i];
1471 /* If this cleanup was called on a partially-created data set
1472 * it's possible to have NULL sync data pointers */
1475 sync_fence_put(fence);
1478 if (sync_append_data->update_sync_data)
1479 fput(sync_append_data->update_sync_data->file);
1481 kfree(sync_append_data->update_ufo_addresses);
1482 kfree(sync_append_data->update_values);
1483 kfree(sync_append_data->check_ufo_addresses);
1484 kfree(sync_append_data->check_values);
1485 kfree(sync_append_data->cleanup_syncs);
1486 kfree(sync_append_data);
1489 void pvr_sync_nohw_complete_fences(
1490 struct pvr_sync_append_data *sync_append_data)
1494 if (!sync_append_data)
1497 for (i = 0; i < sync_append_data->nr_cleaup_syncs; i++) {
1498 struct pvr_sync_native_sync_prim *cleanup_sync =
1499 sync_append_data->cleanup_syncs[i];
1504 complete_sync(cleanup_sync);
1506 if (sync_append_data->update_sync_data) {
1507 /* Skip any invalid update syncs (should only be hit on error */
1508 if (sync_append_data->update_sync_data->sync_data) {
1509 struct pvr_sync_data *sync_data =
1510 sync_append_data->update_sync_data->sync_data;
1511 struct pvr_sync_timeline *timeline =
1512 sync_append_data->update_sync_data->timeline;
1513 complete_sync(sync_data->kernel->fence_sync);
1514 set_sync_value(timeline->kernel->fence_sync,
1515 sync_data->timeline_update_value);
1520 /* ioctl and fops handling */
1522 static int pvr_sync_open(struct inode *inode, struct file *file)
1524 struct pvr_sync_timeline_wrapper *timeline_wrapper;
1525 struct pvr_sync_timeline *timeline;
1526 char task_comm[TASK_COMM_LEN];
1527 enum PVRSRV_ERROR error;
1530 get_task_comm(task_comm, current);
1532 timeline_wrapper = (struct pvr_sync_timeline_wrapper *)
1533 sync_timeline_create(&pvr_sync_timeline_ops,
1534 sizeof(struct pvr_sync_timeline_wrapper), task_comm);
1535 if (!timeline_wrapper) {
1536 pr_err("pvr_sync: %s: sync_timeline_create failed", __func__);
1540 timeline = kmalloc(sizeof(struct pvr_sync_timeline), GFP_KERNEL);
1542 pr_err("pvr_sync: %s: Out of memory", __func__);
1543 goto err_free_timeline_wrapper;
1546 timeline->kernel = kzalloc(sizeof(struct pvr_sync_kernel_pair),
1548 if (!timeline->kernel) {
1549 pr_err("pvr_sync: %s: Out of memory", __func__);
1550 goto err_free_timeline;
1553 OSAcquireBridgeLock();
1554 error = sync_pool_get(&timeline->kernel->fence_sync,
1555 task_comm, SYNC_TL_TYPE);
1556 OSReleaseBridgeLock();
1558 if (error != PVRSRV_OK) {
1559 pr_err("pvr_sync: %s: Failed to allocate sync prim (%s)",
1560 __func__, PVRSRVGetErrorStringKM(error));
1561 goto err_free_timeline_kernel;
1564 timeline_wrapper->timeline = timeline;
1566 timeline->obj = &timeline_wrapper->obj;
1567 timeline->fencing_enabled = true;
1568 kref_init(&timeline->kref);
1570 mutex_lock(&timeline_list_mutex);
1571 list_add_tail(&timeline->list, &timeline_list);
1572 mutex_unlock(&timeline_list_mutex);
1574 DPF("%s: # %s", __func__, debug_info_timeline(timeline));
1576 file->private_data = timeline_wrapper;
1581 err_free_timeline_kernel:
1582 kfree(timeline->kernel);
1586 /* Use a NULL timeline to detect this partially-setup timeline in the
1587 * timeline release function (called by sync_timeline_destroy) and
1588 * handle it appropriately.
1590 timeline_wrapper->timeline = NULL;
1591 err_free_timeline_wrapper:
1592 sync_timeline_destroy(&timeline_wrapper->obj);
1596 static int pvr_sync_close(struct inode *inode, struct file *file)
1598 struct sync_timeline *obj = file->private_data;
1600 if (is_pvr_timeline(obj)) {
1601 DPF("%s: # %s", __func__,
1602 debug_info_timeline(get_timeline(obj)));
1605 sync_timeline_destroy(obj);
1609 static int pvr_sync_alloc_release(struct inode *inode, struct file *file)
1611 struct pvr_sync_alloc_data *alloc_sync_data = file->private_data;
1613 /* Drop alloc sync's reference to the raw timeline structure. We need
1614 * to hold the timeline list lock here too, so we don't race with
1615 * pvr_sync_update_all_timelines().
1617 if (kref_put_mutex(&alloc_sync_data->timeline->kref,
1618 pvr_sync_destroy_timeline_locked,
1619 &timeline_list_mutex)) {
1620 mutex_unlock(&timeline_list_mutex);
1623 /* Normally ->sync_data is NULL unless the fd was never used */
1624 if (alloc_sync_data->sync_data) {
1625 kref_put(&alloc_sync_data->sync_data->kref,
1626 pvr_sync_free_sync_data);
1629 kfree(alloc_sync_data);
1633 static const struct file_operations pvr_alloc_sync_fops = {
1634 .release = pvr_sync_alloc_release,
1637 static struct pvr_sync_alloc_data *pvr_sync_alloc_fence_fdget(int fd)
1639 struct file *file = fget(fd);
1643 if (file->f_op != &pvr_alloc_sync_fops)
1645 return file->private_data;
1651 static long pvr_sync_ioctl_create_fence(struct pvr_sync_timeline *timeline,
1652 void __user *user_data)
1654 struct pvr_sync_create_fence_ioctl_data data;
1655 struct pvr_sync_alloc_data *alloc_sync_data;
1656 int err = -EFAULT, fd = get_unused_fd();
1657 struct pvr_sync_data *sync_data;
1658 struct pvr_sync_pt *pvr_pt;
1659 struct sync_fence *fence;
1662 pr_err("pvr_sync: %s: Failed to find unused fd (%d)",
1667 if (!access_ok(VERIFY_READ, user_data, sizeof(data)))
1670 if (copy_from_user(&data, user_data, sizeof(data)))
1673 alloc_sync_data = pvr_sync_alloc_fence_fdget(data.iAllocFenceFd);
1674 if (!alloc_sync_data) {
1675 pr_err("pvr_sync: %s: Invalid alloc sync fd (%d)\n",
1676 __func__, data.iAllocFenceFd);
1680 if (alloc_sync_data->timeline != timeline) {
1681 pr_err("pvr_sync: %s: Trying to create sync from alloc of timeline %p in timeline %p\n",
1682 __func__, alloc_sync_data->timeline, timeline);
1683 fput(alloc_sync_data->file);
1687 /* Take ownership of the sync_data */
1688 sync_data = alloc_sync_data->sync_data;
1689 alloc_sync_data->sync_data = NULL;
1691 pvr_pt = (struct pvr_sync_pt *)
1692 sync_pt_create(timeline->obj, sizeof(struct pvr_sync_pt));
1694 fput(alloc_sync_data->file);
1697 pr_err("pvr_sync: %s: Failed to create sync pt", __func__);
1698 kref_put(&sync_data->kref, pvr_sync_free_sync_data);
1703 /* Point owns the sync data now. Let sync_pt_free() deal with it. */
1704 pvr_pt->sync_data = sync_data;
1706 data.szName[sizeof(data.szName) - 1] = '\0';
1708 DPF("%s: %d('%s') # %s", __func__,
1709 fd, data.szName, debug_info_timeline(timeline));
1711 fence = sync_fence_create(data.szName, &pvr_pt->pt);
1713 pr_err("pvr_sync: %s: Failed to create a fence (%d)",
1715 sync_pt_free(&pvr_pt->pt);
1722 if (!access_ok(VERIFY_WRITE, user_data, sizeof(data)))
1725 if (copy_to_user(user_data, &data, sizeof(data)))
1728 sync_fence_install(fence, fd);
1734 sync_fence_put(fence);
1740 static long pvr_sync_ioctl_alloc_fence(struct pvr_sync_timeline *timeline,
1741 void __user *user_data)
1743 struct pvr_sync_alloc_fence_ioctl_data data;
1744 struct pvr_sync_alloc_data *alloc_sync_data;
1745 int err = -EFAULT, fd = get_unused_fd();
1746 struct pvr_sync_data *sync_data;
1750 pr_err("pvr_sync: %s: Failed to find unused fd (%d)",
1755 if (!access_ok(VERIFY_READ, user_data, sizeof(data)))
1758 if (!access_ok(VERIFY_WRITE, user_data, sizeof(data)))
1762 kzalloc(sizeof(struct pvr_sync_alloc_data), GFP_KERNEL);
1763 if (!alloc_sync_data) {
1765 pr_err("pvr_sync: %s: Failed to alloc sync data\n", __func__);
1769 sync_data = pvr_sync_create_sync_data(timeline->obj);
1772 pr_err("pvr_sync: %s: Failed to create sync data\n", __func__);
1773 goto err_free_alloc_data;
1776 file = anon_inode_getfile("pvr_sync_alloc", &pvr_alloc_sync_fops,
1777 alloc_sync_data, 0);
1780 pr_err("pvr_sync: %s: Failed to create alloc inode\n",
1785 alloc_sync_data->file = file;
1786 alloc_sync_data->sync_data = sync_data;
1788 /* We pass the raw timeline pointer through to the alloc sync, but
1789 * to make sure the timeline data doesn't go away if the timeline
1790 * is destroyed, we increment the timeline reference count.
1792 alloc_sync_data->timeline = timeline;
1793 kref_get(&timeline->kref);
1795 data.bTimelineIdle = is_sync_met(timeline->kernel->fence_sync) &&
1796 timeline->fencing_enabled == false;
1800 if (!access_ok(VERIFY_WRITE, user_data, sizeof(data)))
1801 goto err_timeline_kref_put;
1803 if (copy_to_user(user_data, &data, sizeof(data)))
1804 goto err_timeline_kref_put;
1806 fd_install(fd, file);
1812 err_timeline_kref_put:
1813 if (kref_put_mutex(&timeline->kref,
1814 pvr_sync_destroy_timeline_locked,
1815 &timeline_list_mutex)) {
1816 mutex_unlock(&timeline_list_mutex);
1819 kref_put(&sync_data->kref, pvr_sync_free_sync_data);
1820 err_free_alloc_data:
1821 kfree(alloc_sync_data);
1827 static long pvr_sync_ioctl_enable_fencing(struct pvr_sync_timeline *timeline,
1828 void __user *user_data)
1830 struct pvr_sync_enable_fencing_ioctl_data data;
1833 if (!access_ok(VERIFY_READ, user_data, sizeof(data)))
1836 if (copy_from_user(&data, user_data, sizeof(data)))
1839 timeline->fencing_enabled = data.bFencingEnabled;
1845 static long pvr_sync_ioctl_rename(struct pvr_sync_timeline *timeline,
1846 void __user *user_data)
1849 struct pvr_sync_rename_ioctl_data data;
1851 if (!access_ok(VERIFY_READ, user_data, sizeof(data))) {
1856 if (copy_from_user(&data, user_data, sizeof(data))) {
1861 data.szName[sizeof(data.szName) - 1] = '\0';
1862 strlcpy(timeline->obj->name, data.szName, sizeof(timeline->obj->name));
1868 #ifndef CONFIG_SW_SYNC_USER
1870 static long pvr_sync_ioctl_force_sw_only(struct pvr_sync_timeline *timeline,
1871 void **private_data)
1873 struct sw_sync_timeline *sw_sync_timeline;
1875 /* We can only convert an empty GPU timeline */
1876 if (timeline->kernel->fence_sync->next_value)
1879 /* Create a sw_sync timeline with the old GPU timeline's name */
1880 sw_sync_timeline = sw_sync_timeline_create(timeline->obj->name);
1881 if (!sw_sync_timeline)
1884 /* Destroy the old GPU timeline and update the struct file */
1885 DPF("%s: # %s", __func__, debug_info_timeline(timeline));
1887 sync_timeline_destroy(timeline->obj);
1888 *private_data = sw_sync_timeline;
1892 static long pvr_sync_ioctl_sw_create_fence(struct sw_sync_timeline *timeline,
1893 void __user *user_data)
1895 struct sw_sync_create_fence_data data;
1896 struct sync_fence *fence;
1897 int fd = get_unused_fd();
1898 struct sync_pt *sync_pt;
1902 pr_err("pvr_sync: %s: Failed to find unused fd (%d)",
1907 if (copy_from_user(&data, user_data, sizeof(data)))
1910 sync_pt = sw_sync_pt_create(timeline, data.value);
1912 pr_err("pvr_sync: %s: Failed to create a sync point (%d)",
1918 data.name[sizeof(data.name) - 1] = '\0';
1919 fence = sync_fence_create(data.name, sync_pt);
1921 pr_err("pvr_sync: %s: Failed to create a fence (%d)",
1923 sync_pt_free(sync_pt);
1930 if (copy_to_user(user_data, &data, sizeof(data)))
1933 sync_fence_install(fence, fd);
1938 sync_fence_put(fence);
1944 static long pvr_sync_ioctl_sw_inc(struct sw_sync_timeline *timeline,
1945 void __user *user_data)
1949 if (copy_from_user(&value, user_data, sizeof(value)))
1952 sw_sync_timeline_inc(timeline, value);
1956 #endif /* !CONFIG_SW_SYNC_USER */
1959 pvr_sync_ioctl(struct file *file, unsigned int cmd, unsigned long __user arg)
1961 struct sync_timeline *obj = file->private_data;
1962 void __user *user_data = (void __user *)arg;
1965 if (is_pvr_timeline(obj)) {
1966 struct pvr_sync_timeline *pvr = get_timeline(obj);
1969 case PVR_SYNC_IOC_CREATE_FENCE:
1970 err = pvr_sync_ioctl_create_fence(pvr, user_data);
1972 case PVR_SYNC_IOC_ENABLE_FENCING:
1973 err = pvr_sync_ioctl_enable_fencing(pvr, user_data);
1975 case PVR_SYNC_IOC_ALLOC_FENCE:
1976 err = pvr_sync_ioctl_alloc_fence(pvr, user_data);
1978 case PVR_SYNC_IOC_RENAME:
1979 err = pvr_sync_ioctl_rename(pvr, user_data);
1981 #ifndef CONFIG_SW_SYNC_USER
1982 case PVR_SYNC_IOC_FORCE_SW_ONLY:
1983 err = pvr_sync_ioctl_force_sw_only(pvr,
1984 &file->private_data);
1986 #endif /* !CONFIG_SW_SYNC_USER */
1991 #ifndef CONFIG_SW_SYNC_USER
1992 struct sw_sync_timeline *sw = file->private_data;
1995 case SW_SYNC_IOC_CREATE_FENCE:
1996 err = pvr_sync_ioctl_sw_create_fence(sw, user_data);
1998 case SW_SYNC_IOC_INC:
1999 err = pvr_sync_ioctl_sw_inc(sw, user_data);
2004 #endif /* !CONFIG_SW_SYNC_USER */
2011 pvr_sync_check_status_work_queue_function(struct work_struct *data)
2013 /* A completed SW operation may un-block the GPU */
2014 PVRSRVCheckStatus(NULL);
2017 /* Returns true if the freelist still has entries, else false if empty */
2019 pvr_sync_clean_freelist(void)
2021 struct pvr_sync_kernel_pair *kernel, *k;
2022 struct pvr_sync_fence *sync_fence, *f;
2023 LIST_HEAD(unlocked_free_list);
2024 unsigned long flags;
2025 bool freelist_empty;
2027 /* We can't call PVRSRVServerSyncFreeKM directly in this loop because
2028 * that will take the mmap mutex. We can't take mutexes while we have
2029 * this list locked with a spinlock. So move all the items we want to
2030 * free to another, local list (no locking required) and process it
2031 * in a second loop. */
2033 spin_lock_irqsave(&sync_prim_free_list_spinlock, flags);
2034 list_for_each_entry_safe(kernel, k, &sync_prim_free_list, list) {
2035 /* Check if this sync is not used anymore. */
2036 if (!is_sync_met(kernel->fence_sync) ||
2037 (kernel->cleanup_sync &&
2038 !is_sync_met(kernel->cleanup_sync))) {
2042 /* Remove the entry from the free list. */
2043 list_move_tail(&kernel->list, &unlocked_free_list);
2046 /* Wait and loop if there are still syncs on the free list (IE
2047 * are still in use by the HW) */
2048 freelist_empty = list_empty(&sync_prim_free_list);
2050 spin_unlock_irqrestore(&sync_prim_free_list_spinlock, flags);
2052 OSAcquireBridgeLock();
2054 list_for_each_entry_safe(kernel, k, &unlocked_free_list, list) {
2055 list_del(&kernel->list);
2057 sync_pool_put(kernel->fence_sync);
2058 if (kernel->cleanup_sync)
2059 sync_pool_put(kernel->cleanup_sync);
2063 OSReleaseBridgeLock();
2065 /* sync_fence_put() must be called from process/WQ context
2066 * because it uses fput(), which is not allowed to be called
2067 * from interrupt context in kernels <3.6.
2069 INIT_LIST_HEAD(&unlocked_free_list);
2071 spin_lock_irqsave(&sync_fence_put_list_spinlock, flags);
2072 list_for_each_entry_safe(sync_fence, f, &sync_fence_put_list, list) {
2073 list_move_tail(&sync_fence->list, &unlocked_free_list);
2075 spin_unlock_irqrestore(&sync_fence_put_list_spinlock, flags);
2077 list_for_each_entry_safe(sync_fence, f, &unlocked_free_list, list) {
2078 list_del(&sync_fence->list);
2079 sync_fence_put(sync_fence->fence);
2083 return !freelist_empty;
2087 pvr_sync_defer_free_work_queue_function(struct work_struct *data)
2089 enum PVRSRV_ERROR error = PVRSRV_OK;
2092 error = OSEventObjectOpen(pvr_sync_data.event_object_handle,
2094 if (error != PVRSRV_OK) {
2095 pr_err("pvr_sync: %s: Error opening event object (%s)\n",
2096 __func__, PVRSRVGetErrorStringKM(error));
2101 while (pvr_sync_clean_freelist()) {
2103 error = OSEventObjectWait(event_object);
2108 case PVRSRV_ERROR_TIMEOUT:
2109 /* Timeout is normal behaviour */
2112 pr_err("pvr_sync: %s: Error waiting for event object (%s)\n",
2113 __func__, PVRSRVGetErrorStringKM(error));
2117 error = OSEventObjectClose(event_object);
2118 if (error != PVRSRV_OK) {
2119 pr_err("pvr_sync: %s: Error closing event object (%s)\n",
2120 __func__, PVRSRVGetErrorStringKM(error));
2124 static const struct file_operations pvr_sync_fops = {
2125 .owner = THIS_MODULE,
2126 .open = pvr_sync_open,
2127 .release = pvr_sync_close,
2128 .unlocked_ioctl = pvr_sync_ioctl,
2129 .compat_ioctl = pvr_sync_ioctl,
2132 static struct miscdevice pvr_sync_device = {
2133 .minor = MISC_DYNAMIC_MINOR,
2134 .name = PVRSYNC_MODNAME,
2135 .fops = &pvr_sync_fops,
2139 void pvr_sync_update_all_timelines(void *command_complete_handle)
2141 struct pvr_sync_timeline *timeline, *n;
2143 mutex_lock(&timeline_list_mutex);
2145 list_for_each_entry(timeline, &timeline_list, list) {
2146 /* If a timeline is destroyed via pvr_sync_release_timeline()
2147 * in parallel with a call to pvr_sync_update_all_timelines(),
2148 * the timeline_list_mutex will block destruction of the
2149 * 'timeline' pointer. Use kref_get_unless_zero() to detect
2150 * and handle this race. Skip the timeline if it's being
2151 * destroyed, blocked only on the timeline_list_mutex.
2154 kref_get_unless_zero(&timeline->kref) ? true : false;
2157 list_for_each_entry_safe(timeline, n, &timeline_list, list) {
2158 /* We know timeline is valid at this point because we're
2159 * holding the list lock (so pvr_sync_destroy_timeline() has
2162 void *obj = timeline->obj;
2164 /* If we're racing with pvr_sync_release_timeline(), ignore */
2165 if (!timeline->valid)
2168 /* If syncs have signaled on the GPU, echo this in pvr_sync.
2170 * At this point we know the timeline is valid, but obj might
2171 * have raced and been set to NULL. It's only important that
2172 * we use NULL / non-NULL consistently with the if() and call
2173 * to sync_timeline_signal() -- the timeline->obj can't be
2174 * freed (pvr_sync_release_timeline() will be stuck waiting
2175 * for the timeline_list_mutex) but it might have been made
2176 * invalid by the base sync driver, in which case this call
2177 * will bounce harmlessly.
2180 sync_timeline_signal(obj);
2182 /* We're already holding the timeline_list_mutex */
2183 kref_put(&timeline->kref, pvr_sync_destroy_timeline_locked);
2186 mutex_unlock(&timeline_list_mutex);
2189 enum PVRSRV_ERROR pvr_sync_init(void)
2191 enum PVRSRV_ERROR error;
2194 DPF("%s", __func__);
2196 atomic_set(&pvr_sync_data.sync_id, 0);
2198 error = PVRSRVAcquireDeviceDataKM(0, PVRSRV_DEVICE_TYPE_RGX,
2199 &pvr_sync_data.device_cookie);
2200 if (error != PVRSRV_OK) {
2201 pr_err("pvr_sync: %s: Failed to initialise services (%s)",
2202 __func__, PVRSRVGetErrorStringKM(error));
2206 error = AcquireGlobalEventObjectServer(
2207 &pvr_sync_data.event_object_handle);
2208 if (error != PVRSRV_OK) {
2209 pr_err("pvr_sync: %s: Failed to acquire global event object (%s)",
2210 __func__, PVRSRVGetErrorStringKM(error));
2211 goto err_release_device_data;
2214 OSAcquireBridgeLock();
2216 error = SyncPrimContextCreate(0,
2217 pvr_sync_data.device_cookie,
2218 &pvr_sync_data.sync_prim_context);
2219 if (error != PVRSRV_OK) {
2220 pr_err("pvr_sync: %s: Failed to create sync prim context (%s)",
2221 __func__, PVRSRVGetErrorStringKM(error));
2222 OSReleaseBridgeLock();
2223 goto err_release_event_object;
2226 OSReleaseBridgeLock();
2228 pvr_sync_data.defer_free_wq =
2229 create_freezable_workqueue("pvr_sync_defer_free_workqueue");
2230 if (!pvr_sync_data.defer_free_wq) {
2231 pr_err("pvr_sync: %s: Failed to create pvr_sync defer_free workqueue",
2233 goto err_free_sync_context;
2236 INIT_WORK(&pvr_sync_data.defer_free_work,
2237 pvr_sync_defer_free_work_queue_function);
2239 pvr_sync_data.check_status_wq =
2240 create_freezable_workqueue("pvr_sync_check_status_workqueue");
2241 if (!pvr_sync_data.check_status_wq) {
2242 pr_err("pvr_sync: %s: Failed to create pvr_sync check_status workqueue",
2244 goto err_destroy_defer_free_wq;
2247 INIT_WORK(&pvr_sync_data.check_status_work,
2248 pvr_sync_check_status_work_queue_function);
2249 error = PVRSRVRegisterCmdCompleteNotify(
2250 &pvr_sync_data.command_complete_handle,
2251 &pvr_sync_update_all_timelines,
2252 &pvr_sync_data.device_cookie);
2253 if (error != PVRSRV_OK) {
2254 pr_err("pvr_sync: %s: Failed to register MISR notification (%s)",
2255 __func__, PVRSRVGetErrorStringKM(error));
2256 goto err_destroy_status_wq;
2259 error = PVRSRVRegisterDbgRequestNotify(
2260 &pvr_sync_data.debug_notify_handle,
2261 pvr_sync_debug_request,
2262 DEBUG_REQUEST_ANDROIDSYNC,
2264 if (error != PVRSRV_OK) {
2265 pr_err("pvr_sync: %s: Failed to register debug notifier (%s)",
2266 __func__, PVRSRVGetErrorStringKM(error));
2267 goto err_unregister_cmd_complete;
2270 err = misc_register(&pvr_sync_device);
2272 pr_err("pvr_sync: %s: Failed to register pvr_sync device (%d)",
2274 error = PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
2275 goto err_unregister_dbg;
2282 PVRSRVUnregisterDbgRequestNotify(pvr_sync_data.debug_notify_handle);
2283 err_unregister_cmd_complete:
2284 PVRSRVUnregisterCmdCompleteNotify(
2285 pvr_sync_data.command_complete_handle);
2286 err_destroy_status_wq:
2287 destroy_workqueue(pvr_sync_data.check_status_wq);
2288 err_destroy_defer_free_wq:
2289 destroy_workqueue(pvr_sync_data.defer_free_wq);
2290 err_free_sync_context:
2291 OSAcquireBridgeLock();
2292 SyncPrimContextDestroy(pvr_sync_data.sync_prim_context);
2293 OSReleaseBridgeLock();
2294 err_release_event_object:
2295 ReleaseGlobalEventObjectServer(pvr_sync_data.event_object_handle);
2296 err_release_device_data:
2297 PVRSRVReleaseDeviceDataKM(pvr_sync_data.device_cookie);
2303 void pvr_sync_deinit(void)
2305 DPF("%s", __func__);
2307 misc_deregister(&pvr_sync_device);
2309 PVRSRVUnregisterDbgRequestNotify(pvr_sync_data.debug_notify_handle);
2311 PVRSRVUnregisterCmdCompleteNotify(
2312 pvr_sync_data.command_complete_handle);
2314 /* This will drain the workqueue, so we guarantee that all deferred
2315 * syncs are free'd before returning */
2316 destroy_workqueue(pvr_sync_data.defer_free_wq);
2317 destroy_workqueue(pvr_sync_data.check_status_wq);
2319 OSAcquireBridgeLock();
2323 SyncPrimContextDestroy(pvr_sync_data.sync_prim_context);
2325 OSReleaseBridgeLock();
2327 ReleaseGlobalEventObjectServer(pvr_sync_data.event_object_handle);
2329 PVRSRVReleaseDeviceDataKM(pvr_sync_data.device_cookie);