RK3368 GPU version: Rogue M 1.31+
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / rogue_m / kernel / drivers / staging / imgtec / pvr_sync.c
1 /*************************************************************************/ /*!
2 @File           pvr_sync.c
3 @Title          Kernel driver for Android's sync mechanism
4 @Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
5 @License        Dual MIT/GPLv2
6
7 The contents of this file are subject to the MIT license as set out below.
8
9 Permission is hereby granted, free of charge, to any person obtaining a copy
10 of this software and associated documentation files (the "Software"), to deal
11 in the Software without restriction, including without limitation the rights
12 to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
13 copies of the Software, and to permit persons to whom the Software is
14 furnished to do so, subject to the following conditions:
15
16 The above copyright notice and this permission notice shall be included in
17 all copies or substantial portions of the Software.
18
19 Alternatively, the contents of this file may be used under the terms of
20 the GNU General Public License Version 2 ("GPL") in which case the provisions
21 of GPL are applicable instead of those above.
22
23 If you wish to allow use of your version of this file only under the terms of
24 GPL, and not to allow others to use your version of this file under the terms
25 of the MIT license, indicate your decision by deleting the provisions above
26 and replace them with the notice and other provisions required by GPL as set
27 out in the file called "GPL-COPYING" included in this distribution. If you do
28 not delete the provisions above, a recipient may use your version of this file
29 under the terms of either the MIT license or GPL.
30
31 This License is also included in this distribution in the file called
32 "MIT-COPYING".
33
34 EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
35 PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
36 BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
37 PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
38 COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
39 IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
40 CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
41 */ /**************************************************************************/
42 /* vi: set ts=8: */
43
44 #include "pvr_sync.h"
45 #include "pvr_fd_sync_kernel.h"
46 #include "services_kernel_client.h"
47
48 #include <linux/slab.h>
49 #include <linux/file.h>
50 #include <linux/module.h>
51 #include <linux/uaccess.h>
52 #include <linux/version.h>
53 #include <linux/syscalls.h>
54 #include <linux/miscdevice.h>
55 #include <linux/anon_inodes.h>
56
57 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0))
58 #include <linux/sync.h>
59 #ifndef CONFIG_SW_SYNC_USER
60 #include <linux/sw_sync.h>
61 #endif
62 #else
63 #include <../drivers/staging/android/sync.h>
64 #ifndef CONFIG_SW_SYNC_USER
65 #include <../drivers/staging/android/sw_sync.h>
66 #endif
67 #endif
68
69 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
70
71 static inline int kref_put_mutex(struct kref *kref,
72                                  void (*release)(struct kref *kref),
73                                  struct mutex *lock)
74 {
75         WARN_ON(release == NULL);
76         if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) {
77                 mutex_lock(lock);
78                 if (unlikely(!atomic_dec_and_test(&kref->refcount))) {
79                         mutex_unlock(lock);
80                         return 0;
81                 }
82                 release(kref);
83                 return 1;
84         }
85         return 0;
86 }
87
88 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) */
89
90 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))
91
92 static inline struct sync_timeline *sync_pt_parent(struct sync_pt *pt)
93 {
94         return pt->parent;
95 }
96
97 static inline int sync_pt_get_status(struct sync_pt *pt)
98 {
99         return pt->status;
100 }
101
102 #define for_each_sync_pt(s, f, c) \
103         (void)c; \
104         list_for_each_entry((s), &(f)->pt_list_head, pt_list)
105
106 #else /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) */
107
108 static inline int sync_pt_get_status(struct sync_pt *pt)
109 {
110         /* No error state for raw dma-buf fences */
111         return fence_is_signaled(&pt->base) ? 1 : 0;
112 }
113
114 #define for_each_sync_pt(s, f, c) \
115         for ((c) = 0, (s) = (struct sync_pt *)(f)->cbs[0].sync_pt; \
116              (c) < (f)->num_fences; \
117              (c)++,   (s) = (struct sync_pt *)(f)->cbs[c].sync_pt)
118
119 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) */
120
121 /* #define DEBUG_OUTPUT 1 */
122
123 #ifdef DEBUG_OUTPUT
124 #define DPF(fmt, ...) pr_err("pvr_sync: " fmt "\n", __VA_ARGS__)
125 #else
126 #define DPF(fmt, ...) do {} while (0)
127 #endif
128
129 #define PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, fmt, ...) \
130         do { \
131                 if (pfnDumpDebugPrintf) { \
132                         pfnDumpDebugPrintf(fmt, __VA_ARGS__); \
133                 } else { \
134                         pr_info("pvr_sync: " fmt, __VA_ARGS__); \
135                 } \
136         } while (0)
137
138 #define SYNC_MAX_POOL_SIZE 10
139
140 enum {
141         SYNC_TL_TYPE = 0,
142         SYNC_PT_FENCE_TYPE = 1,
143         SYNC_PT_CLEANUP_TYPE = 2,
144         SYNC_PT_FOREIGN_FENCE_TYPE = 3,
145         SYNC_PT_FOREIGN_CLEANUP_TYPE = 4,
146 };
147
148 struct pvr_sync_append_data {
149         u32                             nr_updates;
150         PRGXFWIF_UFO_ADDR               *update_ufo_addresses;
151         u32                             *update_values;
152         u32                             nr_checks;
153         PRGXFWIF_UFO_ADDR               *check_ufo_addresses;
154         u32                             *check_values;
155
156         /* The cleanup list is needed for rollback (as that's the only op
157          * taken) */
158         u32                             nr_cleaup_syncs;
159         struct pvr_sync_native_sync_prim        **cleanup_syncs;
160
161         /* Keep the sync points around for fput and if rollback is needed */
162         struct pvr_sync_alloc_data      *update_sync_data;
163         u32                             nr_fences;
164         struct sync_fence               *fences[];
165 };
166
167
168 /* Services client sync prim wrapper. This is used to hold debug information
169  * and make it possible to cache unused syncs. */
170 struct pvr_sync_native_sync_prim {
171         /* List for the sync pool support. */
172         struct list_head list;
173
174         /* Base services sync prim structure */
175         struct PVRSRV_CLIENT_SYNC_PRIM *client_sync;
176
177         /* The next queued value which should be used */
178         u32 next_value;
179
180         /* Every sync data will get some unique id */
181         u32 id;
182
183         /* FWAddr used by the client sync */
184         u32 vaddr;
185
186         /* The type this sync is used for in our driver. Used in
187          * pvr_sync_debug_request. */
188         u8 type;
189
190         /* A debug class name also printed in pvr_sync_debug_request */
191         char class[32];
192 };
193
194 /* This is the actual timeline metadata. We might keep this around after the
195  * base sync driver has destroyed the pvr_sync_timeline_wrapper object.
196  */
197 struct pvr_sync_timeline {
198         /* Back reference to the sync_timeline. Not always valid */
199         struct sync_timeline *obj;
200
201         /* Global timeline list support */
202         struct list_head list;
203
204         /* Timeline sync */
205         struct pvr_sync_kernel_pair *kernel;
206
207         /* Should we do timeline idle detection when creating a new fence? */
208         bool fencing_enabled;
209
210         /* Reference count for this object */
211         struct kref kref;
212
213         /* Used only by pvr_sync_update_all_timelines(). False if the timeline
214          * has been detected as racing with pvr_sync_destroy_timeline().
215          */
216         bool valid;
217 };
218
219 /* This is the IMG extension of a sync_timeline */
220 struct pvr_sync_timeline_wrapper {
221         /* Original timeline struct. Needs to come first. */
222         struct sync_timeline obj;
223
224         /* Pointer to extra timeline data. Separated life-cycle. */
225         struct pvr_sync_timeline *timeline;
226 };
227
228 struct pvr_sync_kernel_pair {
229         /* Binary sync point representing the android native sync in hw. */
230         struct pvr_sync_native_sync_prim *fence_sync;
231
232         /* Cleanup sync structure.
233          * If the base sync prim is used for "checking" only within a gl stream,
234          * there is no way of knowing when this has happened. So use a second
235          * sync prim which just gets updated and check the update count when
236          * freeing this struct. */
237         struct pvr_sync_native_sync_prim *cleanup_sync;
238
239         /* Sync points can go away when there are deferred hardware operations
240          * still outstanding. We must not free the SERVER_SYNC_PRIMITIVE until
241          * the hardware is finished, so we add it to a defer list which is
242          * processed periodically ("defer-free").
243          *
244          * Note that the defer-free list is global, not per-timeline.
245          */
246         struct list_head list;
247 };
248
249 struct pvr_sync_data {
250         /* Every sync point has a services sync object. This object is used
251          * by the hardware to enforce ordering -- it is attached as a source
252          * dependency to various commands.
253          */
254         struct pvr_sync_kernel_pair *kernel;
255
256         /* The timeline update value for this sync point. */
257         u32 timeline_update_value;
258
259         /* This refcount is incremented at create and dup time, and decremented
260          * at free time. It ensures the object doesn't start the defer-free
261          * process until it is no longer referenced.
262          */
263         struct kref kref;
264 };
265
266 struct pvr_sync_alloc_data {
267         struct pvr_sync_timeline *timeline;
268         struct pvr_sync_data *sync_data;
269         struct file *file;
270 };
271
272 /* This is the IMG extension of a sync_pt */
273 struct pvr_sync_pt {
274         /* Original sync_pt structure. Needs to come first. */
275         struct sync_pt pt;
276
277         /* Private shared data */
278         struct pvr_sync_data *sync_data;
279 };
280
281 /* This is the IMG extension of a sync_fence */
282 struct pvr_sync_fence {
283         /* Original sync_fence structure. Needs to come first. */
284         struct sync_fence *fence;
285
286         /* To ensure callbacks are always received for fences / sync_pts, even
287          * after the fence has been 'put' (freed), we must take a reference to
288          * the fence. We still need to 'put' the fence ourselves, but this might
289          * happen in irq context, where fput() is not allowed (in kernels <3.6).
290          * We must add the fence to a list which is processed in WQ context.
291          */
292         struct list_head list;
293 };
294
295 /* Any sync point from a foreign (non-PVR) timeline needs to have a "shadow"
296  * sync prim. This is modelled as a software operation. The foreign driver
297  * completes the operation by calling a callback we registered with it. */
298 struct pvr_sync_fence_waiter {
299         /* Base sync driver waiter structure */
300         struct sync_fence_waiter waiter;
301
302         /* "Shadow" sync prim backing the foreign driver's sync_pt */
303         struct pvr_sync_kernel_pair *kernel;
304
305         /* Optimizes lookup of fence for defer-put operation */
306         struct pvr_sync_fence *sync_fence;
307 };
308
309 /* Global data for the sync driver */
310 static struct {
311         /* Services connection */
312         void *device_cookie;
313
314         /* Complete notify handle */
315         void *command_complete_handle;
316
317         /* defer_free workqueue. Syncs may still be in use by the HW when freed,
318          * so we have to keep them around until the HW is done with them at
319          * some later time. This workqueue iterates over the list of free'd
320          * syncs, checks if they are in use, and frees the sync device memory
321          * when done with. */
322         struct workqueue_struct *defer_free_wq;
323         struct work_struct defer_free_work;
324
325         /* check_status workqueue: When a foreign point is completed, a SW
326          * operation marks the sync as completed to allow the operations to
327          * continue. This completion may require the hardware to be notified,
328          * which may be expensive/take locks, so we push that to a workqueue
329          */
330         struct workqueue_struct *check_status_wq;
331         struct work_struct check_status_work;
332
333         /* Context used to create client sync prims. */
334         struct SYNC_PRIM_CONTEXT *sync_prim_context;
335
336         /* Debug notify handle */
337         void *debug_notify_handle;
338
339         /* Unique id counter for the sync prims */
340         atomic_t sync_id;
341
342         /* The global event object (used to wait between checks for deferred-
343          * free sync status) */
344         void *event_object_handle;
345 } pvr_sync_data;
346
347 /* List of timelines created by this driver */
348 static LIST_HEAD(timeline_list);
349 static DEFINE_MUTEX(timeline_list_mutex);
350
351 /* Sync pool support */
352 static LIST_HEAD(sync_pool_free_list);
353 static LIST_HEAD(sync_pool_active_list);
354 static DEFINE_MUTEX(sync_pool_mutex);
355 static s32 sync_pool_size;
356 static u32 sync_pool_created;
357 static u32 sync_pool_reused;
358
359 /* The "defer-free" object list. Driver global. */
360 static LIST_HEAD(sync_prim_free_list);
361 static DEFINE_SPINLOCK(sync_prim_free_list_spinlock);
362
363 /* The "defer-put" object list. Driver global. */
364 static LIST_HEAD(sync_fence_put_list);
365 static DEFINE_SPINLOCK(sync_fence_put_list_spinlock);
366
367 static inline void set_sync_value(struct pvr_sync_native_sync_prim *sync,
368                                   u32 value)
369 {
370         *(sync->client_sync->pui32LinAddr) = value;
371 }
372
373 static inline u32 get_sync_value(struct pvr_sync_native_sync_prim *sync)
374 {
375         return *(sync->client_sync->pui32LinAddr);
376 }
377
378 static inline void complete_sync(struct pvr_sync_native_sync_prim *sync)
379 {
380         *(sync->client_sync->pui32LinAddr) = sync->next_value;
381 }
382
383 static inline int is_sync_met(struct pvr_sync_native_sync_prim *sync)
384 {
385         return *(sync->client_sync->pui32LinAddr) == sync->next_value;
386 }
387
388 static inline struct pvr_sync_timeline *get_timeline(struct sync_timeline *obj)
389 {
390         return ((struct pvr_sync_timeline_wrapper *)obj)->timeline;
391 }
392
393 static inline struct pvr_sync_timeline *get_timeline_pt(struct sync_pt *pt)
394 {
395         return get_timeline(sync_pt_parent(pt));
396 }
397
398 static inline int
399 pvr_sync_has_kernel_signaled(struct pvr_sync_kernel_pair *kernel)
400 {
401         /* Idle syncs are always signaled */
402         if (!kernel)
403                 return 1;
404
405         return is_sync_met(kernel->fence_sync);
406 }
407
408 static struct pvr_sync_alloc_data *pvr_sync_alloc_fence_fdget(int fd);
409
410 #ifdef DEBUG_OUTPUT
411
412 static char *debug_info_timeline(struct pvr_sync_timeline *timeline)
413 {
414         static char info[256];
415
416         snprintf(info, sizeof(info),
417                  "n='%s' id=%u fw=0x%x tl_curr=%u tl_next=%u",
418                  timeline->obj ? timeline->obj->name : "?",
419                  timeline->kernel->fence_sync->id,
420                  timeline->kernel->fence_sync->vaddr,
421                  get_sync_value(timeline->kernel->fence_sync),
422                  timeline->kernel->fence_sync->next_value);
423
424         return info;
425 }
426
427 static char *debug_info_sync_pt(struct sync_pt *pt)
428 {
429         struct pvr_sync_timeline *timeline = get_timeline_pt(pt);
430         struct pvr_sync_pt *pvr_pt = (struct pvr_sync_pt *)pt;
431         struct pvr_sync_kernel_pair *kernel = pvr_pt->sync_data->kernel;
432         static char info[256], info1[256];
433
434         if (kernel) {
435                 struct pvr_sync_native_sync_prim *cleanup_sync =
436                         kernel->cleanup_sync;
437
438                 if (cleanup_sync) {
439                         snprintf(info1, sizeof(info1),
440                                  " # cleanup: id=%u fw=0x%x curr=%u next=%u",
441                                  cleanup_sync->id,
442                                  cleanup_sync->vaddr,
443                                  get_sync_value(cleanup_sync),
444                                  cleanup_sync->next_value);
445                 } else {
446                         info1[0] = 0;
447                 }
448
449                 snprintf(info, sizeof(info),
450                          "status=%d tl_taken=%u ref=%d # sync: id=%u fw=0x%x curr=%u next=%u%s # tl: %s",
451                          pvr_sync_has_kernel_signaled(kernel),
452                          pvr_pt->sync_data->timeline_update_value,
453                          atomic_read(&pvr_pt->sync_data->kref.refcount),
454                          kernel->fence_sync->id,
455                          kernel->fence_sync->vaddr,
456                          get_sync_value(kernel->fence_sync),
457                          kernel->fence_sync->next_value,
458                          info1, debug_info_timeline(timeline));
459         } else {
460                 snprintf(info, sizeof(info),
461                          "status=%d tl_taken=%u ref=%d # sync: idle # tl: %s",
462                          pvr_sync_has_kernel_signaled(kernel),
463                          pvr_pt->sync_data->timeline_update_value,
464                          atomic_read(&pvr_pt->sync_data->kref.refcount),
465                          debug_info_timeline(timeline));
466         }
467
468         return info;
469 }
470
471 #endif /* DEBUG_OUTPUT */
472
473 static enum PVRSRV_ERROR
474 sync_pool_get(struct pvr_sync_native_sync_prim **_sync,
475               const char *class_name, u8 type)
476 {
477         struct pvr_sync_native_sync_prim *sync;
478         enum PVRSRV_ERROR error = PVRSRV_OK;
479
480         mutex_lock(&sync_pool_mutex);
481
482         if (list_empty(&sync_pool_free_list)) {
483                 /* If there is nothing in the pool, create a new sync prim. */
484                 sync = kmalloc(sizeof(struct pvr_sync_native_sync_prim),
485                                GFP_KERNEL);
486                 if (!sync) {
487                         pr_err("pvr_sync: %s: Failed to allocate sync data",
488                                __func__);
489                         error = PVRSRV_ERROR_OUT_OF_MEMORY;
490                         goto err_unlock;
491                 }
492
493                 error = SyncPrimAlloc(pvr_sync_data.sync_prim_context,
494                                       &sync->client_sync, class_name);
495                 if (error != PVRSRV_OK) {
496                         pr_err("pvr_sync: %s: Failed to allocate sync prim (%s)",
497                                __func__, PVRSRVGetErrorStringKM(error));
498                         goto err_free;
499                 }
500
501                 sync->vaddr = SyncPrimGetFirmwareAddr(sync->client_sync);
502
503                 list_add_tail(&sync->list, &sync_pool_active_list);
504                 ++sync_pool_created;
505         } else {
506                 sync = list_first_entry(&sync_pool_free_list,
507                                         struct pvr_sync_native_sync_prim, list);
508                 list_move_tail(&sync->list, &sync_pool_active_list);
509                 --sync_pool_size;
510                 ++sync_pool_reused;
511         }
512
513         sync->id = atomic_inc_return(&pvr_sync_data.sync_id);
514         sync->type = type;
515
516         strncpy(sync->class, class_name, sizeof(sync->class));
517         /* Its crucial to reset the sync to zero */
518         set_sync_value(sync, 0);
519         sync->next_value = 0;
520
521         *_sync = sync;
522 err_unlock:
523         mutex_unlock(&sync_pool_mutex);
524         return error;
525
526 err_free:
527         kfree(sync);
528         goto err_unlock;
529 }
530
531 static void sync_pool_put(struct pvr_sync_native_sync_prim *sync)
532 {
533         mutex_lock(&sync_pool_mutex);
534
535         if (sync_pool_size < SYNC_MAX_POOL_SIZE) {
536                 /* Mark it as unused */
537                 set_sync_value(sync, 0xffffffff);
538
539                 list_move(&sync->list, &sync_pool_free_list);
540                 ++sync_pool_size;
541         } else {
542                 /* Mark it as invalid */
543                 set_sync_value(sync, 0xdeadbeef);
544
545                 list_del(&sync->list);
546                 SyncPrimFree(sync->client_sync);
547                 kfree(sync);
548         }
549
550         mutex_unlock(&sync_pool_mutex);
551 }
552
553 static void sync_pool_clear(void)
554 {
555         struct pvr_sync_native_sync_prim *sync, *n;
556
557         mutex_lock(&sync_pool_mutex);
558
559         list_for_each_entry_safe(sync, n, &sync_pool_free_list, list) {
560                 /* Mark it as invalid */
561                 set_sync_value(sync, 0xdeadbeef);
562
563                 list_del(&sync->list);
564                 SyncPrimFree(sync->client_sync);
565                 kfree(sync);
566                 --sync_pool_size;
567         }
568
569         mutex_unlock(&sync_pool_mutex);
570 }
571
572 static void pvr_sync_debug_request(void *hDebugRequestHandle,
573                                    u32 ui32VerbLevel)
574 {
575         struct pvr_sync_native_sync_prim *sync;
576
577         static const char *const type_names[] = {
578                 "Timeline", "Fence", "Cleanup",
579                 "Foreign Fence", "Foreign Cleanup"
580         };
581
582         if (ui32VerbLevel == DEBUG_REQUEST_VERBOSITY_HIGH) {
583                 mutex_lock(&sync_pool_mutex);
584
585                 PVR_DUMPDEBUG_LOG(g_pfnDumpDebugPrintf,
586                                   "Dumping all pending android native syncs (Pool usage: %d%% - %d %d)",
587                                   sync_pool_reused ?
588                                   (10000 /
589                                    ((sync_pool_created + sync_pool_reused) *
590                                     100 / sync_pool_reused)) : 0,
591                                   sync_pool_created, sync_pool_reused);
592
593                 list_for_each_entry(sync, &sync_pool_active_list, list) {
594                         if (is_sync_met(sync))
595                                 continue;
596
597                         BUG_ON(sync->type >= ARRAY_SIZE(type_names));
598
599                         PVR_DUMPDEBUG_LOG(g_pfnDumpDebugPrintf,
600                                           "\tID = %d, FWAddr = 0x%08x: Current = 0x%08x, Next = 0x%08x, %s (%s)",
601                                           sync->id, sync->vaddr,
602                                           get_sync_value(sync),
603                                           sync->next_value,
604                                           sync->class,
605                                           type_names[sync->type]);
606                 }
607 #if 0
608                 PVR_DUMPDEBUG_LOG(g_pfnDumpDebugPrintf,
609                                   "Dumping all unused syncs");
610                 list_for_each_entry(sync, &sync_pool_free_list, list) {
611                         BUG_ON(sync->type >= ARRAY_SIZE(type_names));
612
613                         PVR_DUMPDEBUG_LOG(g_pfnDumpDebugPrintf,
614                                           "\tID = %d, FWAddr = 0x%08x: Current = 0x%08x, Next = 0x%08x, %s (%s)",
615                                           sync->id, sync->vaddr,
616                                           get_sync_value(sync),
617                                           sync->next_value,
618                                           sync->class,
619                                           type_names[sync->type]);
620                 }
621 #endif
622                 mutex_unlock(&sync_pool_mutex);
623         }
624 }
625
626 static struct sync_pt *pvr_sync_dup(struct sync_pt *sync_pt)
627 {
628         struct pvr_sync_pt *pvr_pt_a = (struct pvr_sync_pt *)sync_pt;
629         struct pvr_sync_pt *pvr_pt_b = NULL;
630
631         DPF("%s: # %s", __func__, debug_info_sync_pt(sync_pt));
632
633         pvr_pt_b = (struct pvr_sync_pt *)
634                 sync_pt_create(sync_pt_parent(sync_pt),
635                                sizeof(struct pvr_sync_pt));
636         if (!pvr_pt_b) {
637                 pr_err("pvr_sync: %s: Failed to dup sync pt", __func__);
638                 goto err_out;
639         }
640
641         kref_get(&pvr_pt_a->sync_data->kref);
642
643         pvr_pt_b->sync_data = pvr_pt_a->sync_data;
644
645 err_out:
646         return (struct sync_pt *)pvr_pt_b;
647 }
648
649 static int pvr_sync_has_signaled(struct sync_pt *sync_pt)
650 {
651         struct pvr_sync_pt *pvr_pt = (struct pvr_sync_pt *)sync_pt;
652
653         DPF("%s: # %s", __func__, debug_info_sync_pt(sync_pt));
654
655         return pvr_sync_has_kernel_signaled(pvr_pt->sync_data->kernel);
656 }
657
658 static int pvr_sync_compare(struct sync_pt *a, struct sync_pt *b)
659 {
660         u32 a1 = ((struct pvr_sync_pt *)a)->sync_data->timeline_update_value;
661         u32 b1 = ((struct pvr_sync_pt *)b)->sync_data->timeline_update_value;
662
663         DPF("%s: a # %s", __func__, debug_info_sync_pt(a));
664         DPF("%s: b # %s", __func__, debug_info_sync_pt(b));
665
666         if (a1 == b1)
667                 return 0;
668
669         /* Take integer wrapping into account */
670         return ((s32)a1 - (s32)b1) < 0 ? -1 : 1;
671 }
672
673 static void wait_for_sync(struct pvr_sync_native_sync_prim *sync)
674 {
675 #ifndef NO_HARDWARE
676         void *event_object = NULL;
677         enum PVRSRV_ERROR error = PVRSRV_OK;
678
679         while (sync && !is_sync_met(sync)) {
680                 if (!event_object) {
681                         error = OSEventObjectOpen(
682                                 pvr_sync_data.event_object_handle,
683                                 &event_object);
684                         if (error != PVRSRV_OK) {
685                                 pr_err("pvr_sync: %s: Error opening event object (%s)\n",
686                                         __func__,
687                                         PVRSRVGetErrorStringKM(error));
688                                 break;
689                         }
690                 }
691                 error = OSEventObjectWait(event_object);
692                 if (error != PVRSRV_OK && error != PVRSRV_ERROR_TIMEOUT) {
693                         pr_err("pvr_sync: %s: Error waiting on event object (%s)\n",
694                                 __func__,
695                                 PVRSRVGetErrorStringKM(error));
696                 }
697         }
698
699         if (event_object)
700                 OSEventObjectClose(event_object);
701 #endif
702 }
703
704 static void pvr_sync_defer_free(struct pvr_sync_kernel_pair *kernel)
705 {
706         unsigned long flags;
707
708         spin_lock_irqsave(&sync_prim_free_list_spinlock, flags);
709         list_add_tail(&kernel->list, &sync_prim_free_list);
710         spin_unlock_irqrestore(&sync_prim_free_list_spinlock, flags);
711
712         queue_work(pvr_sync_data.defer_free_wq, &pvr_sync_data.defer_free_work);
713 }
714
715 /* This function assumes the timeline_list_mutex is held while it runs */
716
717 static void pvr_sync_destroy_timeline_locked(struct kref *kref)
718 {
719         struct pvr_sync_timeline *timeline = (struct pvr_sync_timeline *)
720                 container_of(kref, struct pvr_sync_timeline, kref);
721
722         pvr_sync_defer_free(timeline->kernel);
723         list_del(&timeline->list);
724         kfree(timeline);
725 }
726
727 static void pvr_sync_destroy_timeline(struct kref *kref)
728 {
729         mutex_lock(&timeline_list_mutex);
730         pvr_sync_destroy_timeline_locked(kref);
731         mutex_unlock(&timeline_list_mutex);
732 }
733
734 static void pvr_sync_release_timeline(struct sync_timeline *obj)
735 {
736         struct pvr_sync_timeline *timeline = get_timeline(obj);
737
738         /* If pvr_sync_open failed after calling sync_timeline_create, this
739          * can be called with a timeline that has not got a timeline sync
740          * or been added to our timeline list. Use a NULL timeline to
741          * detect and handle this condition
742          */
743         if (!timeline)
744                 return;
745
746         DPF("%s: # %s", __func__, debug_info_timeline(timeline));
747
748         wait_for_sync(timeline->kernel->fence_sync);
749
750         /* Whether or not we're the last reference, obj is going away
751          * after this function returns, so remove our back reference
752          * to it.
753          */
754         timeline->obj = NULL;
755
756         /* This might be the last reference to the timeline object.
757          * If so, we'll go ahead and delete it now.
758          */
759         kref_put(&timeline->kref, pvr_sync_destroy_timeline);
760 }
761
762 /* The print_obj() and print_pt() functions have been removed, so we're forced
763  * to use the timeline_value_str() and pt_value_str() functions. These are
764  * worse because we're limited to 64 characters, and the strings for sync
765  * pts have to be formatted like:
766  *
767  *   pt active: pt_info / tl_info
768  *
769  * For us, the tl_info is complicated and doesn't need to be repeated over
770  * and over. So try to detect the way sync_print_pt() calls the two value_str
771  * functions and change what pvr_sync_timeline_value_str() returns dynamically.
772  */
773 static struct sync_timeline *last_pt_timeline;
774
775 static void pvr_sync_timeline_value_str(struct sync_timeline *sync_timeline,
776                                         char *str, int size)
777 {
778         struct pvr_sync_timeline *timeline = get_timeline(sync_timeline);
779
780         if (sync_timeline != last_pt_timeline) {
781                 snprintf(str, size, "%u 0x%x %u/%u",
782                          timeline->kernel->fence_sync->id,
783                          timeline->kernel->fence_sync->vaddr,
784                          get_sync_value(timeline->kernel->fence_sync),
785                          timeline->kernel->fence_sync->next_value);
786         } else {
787                 snprintf(str, size, "%u",
788                          get_sync_value(timeline->kernel->fence_sync));
789         }
790 }
791
792 static void pvr_sync_pt_value_str(struct sync_pt *sync_pt, char *str, int size)
793 {
794         struct pvr_sync_pt *pvr_pt = (struct pvr_sync_pt *)sync_pt;
795         struct pvr_sync_kernel_pair *kernel;
796
797         if (!pvr_pt->sync_data)
798                 return;
799
800         kernel = pvr_pt->sync_data->kernel;
801
802         /* Messages must be at most 64 bytes (including the null terminator):
803          *
804          * 123456789012345678901234567890123456789012345678901234567890123
805          *
806          * ID     FW ADDR    C/N # REF TAKEN
807          * 123456 0xdeadbeef 0/1 # r=2 123456
808          *
809          * ID     FW ADDR    C/N # ID     FW ADDR    C/N # REF TAKEN
810          * 123456 0xdeadbeef 0/1 # 123456 0xdeadbeef 0/1 # r=2 123456
811          */
812         if (kernel) {
813                 if (!kernel->cleanup_sync) {
814                         snprintf(str, size,
815                                  "%u 0x%x %u/%u r=%d %u",
816                                  kernel->fence_sync->id,
817                                  kernel->fence_sync->vaddr,
818                                  get_sync_value(kernel->fence_sync),
819                                  kernel->fence_sync->next_value,
820                                  atomic_read(&pvr_pt->sync_data->kref.refcount),
821                                  pvr_pt->sync_data->timeline_update_value);
822                 } else {
823                         snprintf(str, size,
824                                  "%u 0x%x %u/%u # %u 0x%x %u/%u # r=%d %u",
825                                  kernel->fence_sync->id,
826                                  kernel->fence_sync->vaddr,
827                                  get_sync_value(kernel->fence_sync),
828                                  kernel->fence_sync->next_value,
829                                  kernel->cleanup_sync->id,
830                                  kernel->cleanup_sync->vaddr,
831                                  get_sync_value(kernel->cleanup_sync),
832                                  kernel->cleanup_sync->next_value,
833                                  atomic_read(&pvr_pt->sync_data->kref.refcount),
834                                  pvr_pt->sync_data->timeline_update_value);
835                 }
836         } else {
837                 snprintf(str, size, "idle # r=%d %u",
838                          atomic_read(&pvr_pt->sync_data->kref.refcount),
839                          pvr_pt->sync_data->timeline_update_value);
840         }
841
842         last_pt_timeline = sync_pt_parent(sync_pt);
843 }
844
845 static struct pvr_sync_data *
846 pvr_sync_create_sync_data(struct sync_timeline *obj)
847 {
848         struct pvr_sync_data *sync_data = NULL;
849         enum PVRSRV_ERROR error;
850
851         sync_data = kzalloc(sizeof(struct pvr_sync_data), GFP_KERNEL);
852         if (!sync_data)
853                 goto err_out;
854
855         kref_init(&sync_data->kref);
856
857         sync_data->kernel =
858                 kzalloc(sizeof(struct pvr_sync_kernel_pair),
859                 GFP_KERNEL);
860
861         if (!sync_data->kernel)
862                 goto err_free_data;
863
864         OSAcquireBridgeLock();
865         error = sync_pool_get(&sync_data->kernel->fence_sync,
866                               obj->name, SYNC_PT_FENCE_TYPE);
867         OSReleaseBridgeLock();
868
869         if (error != PVRSRV_OK) {
870                 pr_err("pvr_sync: %s: Failed to allocate sync prim (%s)",
871                        __func__, PVRSRVGetErrorStringKM(error));
872                 goto err_free_kernel;
873         }
874
875 err_out:
876         return sync_data;
877
878 err_free_kernel:
879         kfree(sync_data->kernel);
880 err_free_data:
881         kfree(sync_data);
882         sync_data = NULL;
883         goto err_out;
884 }
885
886 static void pvr_sync_free_sync_data(struct kref *kref)
887 {
888         struct pvr_sync_data *sync_data = (struct pvr_sync_data *)
889                 container_of(kref, struct pvr_sync_data, kref);
890
891         if (sync_data->kernel)
892                 pvr_sync_defer_free(sync_data->kernel);
893         kfree(sync_data);
894 }
895
896 static void pvr_sync_free_sync(struct sync_pt *sync_pt)
897 {
898         struct pvr_sync_pt *pvr_pt = (struct pvr_sync_pt *)sync_pt;
899
900         DPF("%s: # %s", __func__, debug_info_sync_pt(sync_pt));
901
902         kref_put(&pvr_pt->sync_data->kref, pvr_sync_free_sync_data);
903 }
904
905 /* this function uses pvr_sync_timeline_ops defined below */
906 static int pvr_sync_fill_driver_data(struct sync_pt *, void *, int);
907
908 static struct sync_timeline_ops pvr_sync_timeline_ops = {
909         .driver_name        = PVRSYNC_MODNAME,
910         .dup                = pvr_sync_dup,
911         .has_signaled       = pvr_sync_has_signaled,
912         .compare            = pvr_sync_compare,
913         .free_pt            = pvr_sync_free_sync,
914         .release_obj        = pvr_sync_release_timeline,
915         .timeline_value_str = pvr_sync_timeline_value_str,
916         .pt_value_str       = pvr_sync_pt_value_str,
917         .fill_driver_data   = pvr_sync_fill_driver_data,
918 };
919
920 static inline bool is_pvr_timeline(struct sync_timeline *obj)
921 {
922         return obj->ops == &pvr_sync_timeline_ops;
923 }
924
925 static inline bool is_pvr_timeline_pt(struct sync_pt *pt)
926 {
927         return is_pvr_timeline(sync_pt_parent(pt));
928 }
929
930 static int
931 pvr_sync_fill_driver_data(struct sync_pt *sync_pt, void *data, int size)
932 {
933         struct pvr_sync_pt_info *info = (struct pvr_sync_pt_info *)data;
934         struct pvr_sync_pt *pvr_pt = (struct pvr_sync_pt *)sync_pt;
935         struct pvr_sync_data *sync_data = pvr_pt->sync_data;
936         struct pvr_sync_kernel_pair *kernel = sync_data->kernel;
937
938         if (size < sizeof(struct pvr_sync_pt_info))
939                 return -ENOMEM;
940
941         info->ui32TlTaken = sync_data->timeline_update_value;
942
943         if (kernel) {
944                 info->id         = kernel->fence_sync->id;
945                 info->ui32FWAddr = kernel->fence_sync->vaddr;
946                 info->ui32CurrOp = get_sync_value(kernel->fence_sync);
947                 info->ui32NextOp = kernel->fence_sync->next_value;
948         } else {
949                 info->id         = 0;
950                 info->ui32FWAddr = 0;
951                 info->ui32CurrOp = 0;
952                 info->ui32NextOp = 0;
953         }
954
955         return sizeof(struct pvr_sync_pt_info);
956 }
957
958 /* foreign sync handling */
959
960 static void pvr_sync_foreign_sync_pt_signaled(struct sync_fence *fence,
961                                               struct sync_fence_waiter *_waiter)
962 {
963         struct pvr_sync_fence_waiter *waiter =
964                 (struct pvr_sync_fence_waiter *)_waiter;
965         unsigned long flags;
966
967         /* Complete the SW operation and free the sync if we can. If we can't,
968          * it will be checked by a later workqueue kick. */
969         complete_sync(waiter->kernel->fence_sync);
970
971         /* We can 'put' the fence now, but this function might be called in
972         * irq context so we must defer to WQ.
973         * This WQ is triggered in pvr_sync_defer_free, so adding it to the
974         * put list before that should guarantee it's cleaned up on the next
975         * wq run */
976         spin_lock_irqsave(&sync_fence_put_list_spinlock, flags);
977         list_add_tail(&waiter->sync_fence->list, &sync_fence_put_list);
978         spin_unlock_irqrestore(&sync_fence_put_list_spinlock, flags);
979
980         pvr_sync_defer_free(waiter->kernel);
981
982         /* The completed sw-sync may allow other tasks to complete,
983          * so we need to allow them to progress */
984         queue_work(pvr_sync_data.check_status_wq,
985                 &pvr_sync_data.check_status_work);
986
987         kfree(waiter);
988 }
989
990 static struct pvr_sync_kernel_pair *
991 pvr_sync_create_waiter_for_foreign_sync(int fd)
992 {
993         struct pvr_sync_kernel_pair *kernel = NULL;
994         struct pvr_sync_fence_waiter *waiter;
995         struct pvr_sync_fence *sync_fence;
996         struct sync_fence *fence;
997         enum PVRSRV_ERROR error;
998         int err;
999
1000         fence = sync_fence_fdget(fd);
1001         if (!fence) {
1002                 pr_err("pvr_sync: %s: Failed to take reference on fence",
1003                        __func__);
1004                 goto err_out;
1005         }
1006
1007         kernel = kmalloc(sizeof(struct pvr_sync_kernel_pair), GFP_KERNEL);
1008         if (!kernel) {
1009                 pr_err("pvr_sync: %s: Failed to allocate sync kernel",
1010                        __func__);
1011                 goto err_put_fence;
1012         }
1013
1014         sync_fence = kmalloc(sizeof(struct pvr_sync_fence), GFP_KERNEL);
1015         if (!sync_fence) {
1016                 pr_err("pvr_sync: %s: Failed to allocate pvr sync fence",
1017                        __func__);
1018                 goto err_free_kernel;
1019         }
1020
1021         sync_fence->fence = fence;
1022
1023         error = sync_pool_get(&kernel->fence_sync,
1024                               fence->name, SYNC_PT_FOREIGN_FENCE_TYPE);
1025         if (error != PVRSRV_OK) {
1026                 pr_err("pvr_sync: %s: Failed to allocate sync prim (%s)",
1027                        __func__, PVRSRVGetErrorStringKM(error));
1028                 goto err_free_sync_fence;
1029         }
1030
1031         kernel->fence_sync->next_value++;
1032
1033         error = sync_pool_get(&kernel->cleanup_sync,
1034                               fence->name, SYNC_PT_FOREIGN_CLEANUP_TYPE);
1035         if (error != PVRSRV_OK) {
1036                 pr_err("pvr_sync: %s: Failed to allocate cleanup sync prim (%s)",
1037                        __func__, PVRSRVGetErrorStringKM(error));
1038                 goto err_free_sync;
1039         }
1040
1041         kernel->cleanup_sync->next_value++;
1042
1043         /* The custom waiter structure is freed in the waiter callback */
1044         waiter = kmalloc(sizeof(struct pvr_sync_fence_waiter), GFP_KERNEL);
1045         if (!waiter) {
1046                 pr_err("pvr_sync: %s: Failed to allocate waiter", __func__);
1047                 goto err_free_cleanup_sync;
1048         }
1049
1050         waiter->kernel = kernel;
1051         waiter->sync_fence = sync_fence;
1052
1053         sync_fence_waiter_init(&waiter->waiter,
1054                                pvr_sync_foreign_sync_pt_signaled);
1055
1056         err = sync_fence_wait_async(fence, &waiter->waiter);
1057         if (err) {
1058                 if (err < 0) {
1059                         pr_err("pvr_sync: %s: Fence was in error state (%d)",
1060                                __func__, err);
1061                         /* Fall-thru */
1062                 }
1063
1064                 /* -1 means the fence was broken, 1 means the fence already
1065                  * signalled. In either case, roll back what we've done and
1066                  * skip using this sync_pt for synchronization.
1067                  */
1068                 goto err_free_waiter;
1069         }
1070
1071 err_out:
1072         return kernel;
1073 err_free_waiter:
1074         kfree(waiter);
1075 err_free_cleanup_sync:
1076         sync_pool_put(kernel->cleanup_sync);
1077 err_free_sync:
1078         sync_pool_put(kernel->fence_sync);
1079 err_free_sync_fence:
1080         kfree(sync_fence);
1081 err_free_kernel:
1082         kfree(kernel);
1083         kernel = NULL;
1084 err_put_fence:
1085         sync_fence_put(fence);
1086         goto err_out;
1087 }
1088
1089 enum PVRSRV_ERROR pvr_sync_append_fences(
1090         const char                  *name,
1091         const u32                   nr_check_fences,
1092         const s32                   *check_fence_fds,
1093         const s32                   update_fence_fd,
1094         const u32                   nr_updates,
1095         const PRGXFWIF_UFO_ADDR     *update_ufo_addresses,
1096         const u32                   *update_values,
1097         const u32                   nr_checks,
1098         const PRGXFWIF_UFO_ADDR     *check_ufo_addresses,
1099         const u32                   *check_values,
1100         struct pvr_sync_append_data **append_sync_data)
1101 {
1102         struct pvr_sync_append_data *sync_data;
1103         enum PVRSRV_ERROR err = PVRSRV_OK;
1104         struct pvr_sync_native_sync_prim **cleanup_sync_pos;
1105         PRGXFWIF_UFO_ADDR *update_address_pos;
1106         PRGXFWIF_UFO_ADDR *check_address_pos;
1107         u32 *update_value_pos;
1108         u32 *check_value_pos;
1109         unsigned num_used_sync_checks;
1110         unsigned num_used_sync_updates;
1111         struct pvr_sync_alloc_data *alloc_sync_data = NULL;
1112         unsigned i;
1113
1114         if ((nr_updates && (!update_ufo_addresses || !update_values)) ||
1115                 (nr_checks && (!check_ufo_addresses || !check_values)))
1116                 return PVRSRV_ERROR_INVALID_PARAMS;
1117
1118         sync_data =
1119                 kzalloc(sizeof(struct pvr_sync_append_data)
1120                         + nr_check_fences * sizeof(struct sync_fence *),
1121                         GFP_KERNEL);
1122         if (!sync_data) {
1123                 err = PVRSRV_ERROR_OUT_OF_MEMORY;
1124                 goto err_out;
1125         }
1126
1127         sync_data->nr_checks = nr_checks;
1128         sync_data->nr_updates = nr_updates;
1129
1130         sync_data->nr_fences = nr_check_fences;
1131
1132         /* Loop through once to get the fences and count the total number of
1133          * points */
1134         for (i = 0; i < nr_check_fences; i++) {
1135                 struct sync_fence *fence = sync_fence_fdget(check_fence_fds[i]);
1136                 struct pvr_sync_kernel_pair *sync_kernel;
1137                 unsigned int points_on_fence = 0;
1138                 bool has_foreign_point = false;
1139                 struct sync_pt *sync_pt;
1140                 int j;
1141
1142                 if (!fence) {
1143                         pr_err("pvr_sync: %s: Failed to read sync private data for fd %d\n",
1144                                 __func__, check_fence_fds[i]);
1145                         err = PVRSRV_ERROR_HANDLE_NOT_FOUND;
1146                         goto err_free_append_data;
1147                 }
1148
1149                 sync_data->fences[i] = fence;
1150
1151                 for_each_sync_pt(sync_pt, fence, j) {
1152                         struct pvr_sync_pt *pvr_pt;
1153
1154                         if (!is_pvr_timeline_pt(sync_pt)) {
1155                                 if (!sync_pt_get_status(sync_pt))
1156                                         has_foreign_point = true;
1157                                 continue;
1158                         }
1159
1160                         pvr_pt = (struct pvr_sync_pt *)sync_pt;
1161                         sync_kernel = pvr_pt->sync_data->kernel;
1162
1163                         if (!sync_kernel ||
1164                             is_sync_met(sync_kernel->fence_sync)) {
1165                                 continue;
1166                         }
1167                         /* We will use the above sync for "check" only. In this
1168                         * case also insert a "cleanup" update command into the
1169                         * opengl stream. This can later be used for checking if
1170                         * the sync prim could be freed. */
1171                         if (!sync_kernel->cleanup_sync) {
1172                                 err = sync_pool_get(&sync_kernel->cleanup_sync,
1173                                         sync_pt_parent(&pvr_pt->pt)->name,
1174                                         SYNC_PT_CLEANUP_TYPE);
1175                                 if (err != PVRSRV_OK) {
1176                                         pr_err("pvr_sync: %s: Failed to allocate cleanup sync prim (%s)",
1177                                                __func__,
1178                                                PVRSRVGetErrorStringKM(err));
1179                                         goto err_free_append_data;
1180                                 }
1181                         }
1182                         points_on_fence++;
1183                 }
1184
1185                 if (has_foreign_point)
1186                         points_on_fence++;
1187
1188                 /* Each point has 1 check value, and 1 update value (for the
1189                  * cleanup fence) */
1190                 sync_data->nr_checks += points_on_fence;
1191                 sync_data->nr_updates += points_on_fence;
1192                 sync_data->nr_cleaup_syncs += points_on_fence;
1193         }
1194
1195         if (update_fence_fd >= 0) {
1196                 alloc_sync_data = pvr_sync_alloc_fence_fdget(update_fence_fd);
1197                 if (!alloc_sync_data) {
1198                         pr_err("pvr_sync: %s: Failed to read alloc sync private data for fd %d\n",
1199                                 __func__, update_fence_fd);
1200                         err = PVRSRV_ERROR_HANDLE_NOT_FOUND;
1201                         goto err_free_append_data;
1202                 }
1203                 /* Store the alloc sync data now, so it's correctly fput()
1204                  * even on error */
1205                 sync_data->update_sync_data = alloc_sync_data;
1206                 /* If an alloc-sync has already been appended to a kick that
1207                  * is an error (and the sync_data will be NULL */
1208                 if (!alloc_sync_data->sync_data) {
1209                         pr_err("pvr_sync: %s: Failed to read alloc sync sync_data for fd %d\n",
1210                                 __func__, update_fence_fd);
1211                         err = PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
1212                         goto err_free_append_data;
1213
1214                 }
1215                 /* A fence update requires 2 update values (fence and timeline)
1216                  */
1217                  sync_data->nr_updates += 2;
1218         }
1219
1220         sync_data->update_ufo_addresses =
1221                 kzalloc(sizeof(PRGXFWIF_UFO_ADDR) * sync_data->nr_updates,
1222                         GFP_KERNEL);
1223         if (!sync_data->update_ufo_addresses) {
1224                 pr_err("pvr_sync: %s: Failed to allocate update UFO address list\n",
1225                         __func__);
1226                 err = PVRSRV_ERROR_OUT_OF_MEMORY;
1227                 goto err_free_append_data;
1228         }
1229
1230         sync_data->update_values =
1231                 kzalloc(sizeof(u32) * sync_data->nr_updates,
1232                         GFP_KERNEL);
1233         if (!sync_data->update_values) {
1234                 pr_err("pvr_sync: %s: Failed to allocate update value list\n",
1235                         __func__);
1236                 err = PVRSRV_ERROR_OUT_OF_MEMORY;
1237                 goto err_free_append_data;
1238         }
1239
1240         sync_data->check_ufo_addresses =
1241                 kzalloc(sizeof(PRGXFWIF_UFO_ADDR) * sync_data->nr_checks,
1242                         GFP_KERNEL);
1243         if (!sync_data->check_ufo_addresses) {
1244                 pr_err("pvr_sync: %s: Failed to allocate check UFO address list\n",
1245                         __func__);
1246                 err = PVRSRV_ERROR_OUT_OF_MEMORY;
1247                 goto err_free_append_data;
1248         }
1249
1250         sync_data->check_values =
1251                 kzalloc(sizeof(u32) * sync_data->nr_checks,
1252                         GFP_KERNEL);
1253         if (!sync_data->check_values) {
1254                 pr_err("pvr_sync: %s: Failed to allocate check value list\n",
1255                         __func__);
1256                 err = PVRSRV_ERROR_OUT_OF_MEMORY;
1257                 goto err_free_append_data;
1258         }
1259
1260         sync_data->cleanup_syncs =
1261                 kzalloc(sizeof(struct pvr_sync_native_sync_prim *) *
1262                         sync_data->nr_cleaup_syncs, GFP_KERNEL);
1263         if (!sync_data->cleanup_syncs) {
1264                 pr_err("pvr_sync: %s: Failed to allocate cleanup rollback list\n",
1265                         __func__);
1266                 err = PVRSRV_ERROR_OUT_OF_MEMORY;
1267                 goto err_free_append_data;
1268         }
1269
1270         update_address_pos = sync_data->update_ufo_addresses;
1271         update_value_pos = sync_data->update_values;
1272         check_address_pos = sync_data->check_ufo_addresses;
1273         check_value_pos = sync_data->check_values;
1274         cleanup_sync_pos = sync_data->cleanup_syncs;
1275
1276
1277         /* Everything should be allocated/sanity checked. No errors are possible
1278          * after this point */
1279
1280         /* Append any check syncs */
1281         for (i = 0; i < nr_check_fences; i++) {
1282                 struct sync_fence *fence = sync_data->fences[i];
1283                 bool has_foreign_point = false;
1284                 struct sync_pt *sync_pt;
1285                 int j;
1286
1287                 for_each_sync_pt(sync_pt, fence, j) {
1288                         struct pvr_sync_pt *pvr_pt;
1289                         struct pvr_sync_kernel_pair *sync_kernel;
1290
1291                         if (!is_pvr_timeline_pt(sync_pt)) {
1292                                 if (!sync_pt_get_status(sync_pt))
1293                                         has_foreign_point = true;
1294                                 continue;
1295                         }
1296
1297                         pvr_pt = (struct pvr_sync_pt *)sync_pt;
1298                         sync_kernel = pvr_pt->sync_data->kernel;
1299
1300                         if (!sync_kernel ||
1301                             is_sync_met(sync_kernel->fence_sync)) {
1302                                 continue;
1303                         }
1304
1305                         (*check_address_pos++).ui32Addr =
1306                                 sync_kernel->fence_sync->vaddr;
1307                         *check_value_pos++ =
1308                                 sync_kernel->fence_sync->next_value;
1309
1310                         (*update_address_pos++).ui32Addr =
1311                                 sync_kernel->cleanup_sync->vaddr;
1312                         *update_value_pos++ =
1313                                 ++sync_kernel->cleanup_sync->next_value;
1314                         *cleanup_sync_pos++ = sync_kernel->cleanup_sync;
1315                 }
1316
1317                 if (has_foreign_point) {
1318                         struct pvr_sync_kernel_pair *foreign_sync_kernel =
1319                                 pvr_sync_create_waiter_for_foreign_sync(
1320                                         check_fence_fds[i]);
1321
1322                         if (foreign_sync_kernel) {
1323                                 struct pvr_sync_native_sync_prim *fence_sync =
1324                                         foreign_sync_kernel->fence_sync;
1325                                 struct pvr_sync_native_sync_prim *cleanup_sync =
1326                                         foreign_sync_kernel->cleanup_sync;
1327
1328
1329                                 (*check_address_pos++).ui32Addr =
1330                                         fence_sync->vaddr;
1331                                 *check_value_pos++ =
1332                                         fence_sync->next_value;
1333
1334                                 (*update_address_pos++).ui32Addr =
1335                                         cleanup_sync->vaddr;
1336                                 *update_value_pos++ =
1337                                         ++cleanup_sync->next_value;
1338                                 *cleanup_sync_pos++ = cleanup_sync;
1339                         }
1340                 }
1341         }
1342
1343         /* Append the update sync (if supplied) */
1344         if (sync_data->update_sync_data) {
1345                 struct pvr_sync_alloc_data *update_data =
1346                         sync_data->update_sync_data;
1347                 struct pvr_sync_timeline *timeline =
1348                         update_data->timeline;
1349                 struct pvr_sync_kernel_pair *sync_kernel =
1350                         update_data->sync_data->kernel;
1351
1352                 (*update_address_pos++).ui32Addr =
1353                         sync_kernel->fence_sync->vaddr;
1354                 *update_value_pos++ =
1355                         ++sync_kernel->fence_sync->next_value;
1356
1357                 (*update_address_pos++).ui32Addr =
1358                         timeline->kernel->fence_sync->vaddr;
1359
1360                 /* Increment the timeline value... */
1361                 update_data->sync_data->timeline_update_value =
1362                         ++timeline->kernel->fence_sync->next_value;
1363
1364                 /* ...and set that to be updated when this kick is completed */
1365                 *update_value_pos++ =
1366                         update_data->sync_data->timeline_update_value;
1367
1368
1369                 /* Reset the fencing enabled flag. If nobody sets this to 1
1370                  * until the next fence point is inserted, we will do timeline
1371                  * idle detection. */
1372                 timeline->fencing_enabled = false;
1373         }
1374         /* We count the total number of sync points we attach, as it's possible
1375         * some have become complete since the first loop through, or a waiter
1376         * for a foreign point skipped (But they can never become un-complete, so
1377         * it will only ever be the same or less, so the allocated arrays should
1378         * still be sufficiently sized) */
1379         num_used_sync_updates =
1380                 update_address_pos - sync_data->update_ufo_addresses;
1381         num_used_sync_checks =
1382                 check_address_pos - sync_data->check_ufo_addresses;
1383
1384
1385         sync_data->nr_checks = nr_checks + num_used_sync_checks;
1386         sync_data->nr_updates = nr_updates + num_used_sync_updates;
1387         /* Append original check and update sync values/addresses */
1388         if (update_ufo_addresses)
1389                 memcpy(update_address_pos, update_ufo_addresses,
1390                            sizeof(PRGXFWIF_UFO_ADDR) * nr_updates);
1391         if (update_values)
1392                 memcpy(update_value_pos, update_values,
1393                            sizeof(u32) * nr_updates);
1394
1395         if (check_ufo_addresses)
1396                 memcpy(check_address_pos, check_ufo_addresses,
1397                            sizeof(PRGXFWIF_UFO_ADDR) * nr_checks);
1398         if (check_values)
1399                 memcpy(check_value_pos, check_values,
1400                            sizeof(u32) * nr_checks);
1401
1402         *append_sync_data = sync_data;
1403
1404         return PVRSRV_OK;
1405
1406 err_free_append_data:
1407         pvr_sync_free_append_fences_data(sync_data);
1408 err_out:
1409         return err;
1410 }
1411
1412 void pvr_sync_get_updates(const struct pvr_sync_append_data *sync_data,
1413         u32 *nr_fences, PRGXFWIF_UFO_ADDR **ufo_addrs, u32 **values)
1414 {
1415         *nr_fences = sync_data->nr_updates;
1416         *ufo_addrs = sync_data->update_ufo_addresses;
1417         *values = sync_data->update_values;
1418 }
1419
1420 void pvr_sync_get_checks(const struct pvr_sync_append_data *sync_data,
1421         u32 *nr_fences, PRGXFWIF_UFO_ADDR **ufo_addrs, u32 **values)
1422 {
1423         *nr_fences = sync_data->nr_checks;
1424         *ufo_addrs = sync_data->check_ufo_addresses;
1425         *values = sync_data->check_values;
1426 }
1427
1428 void pvr_sync_rollback_append_fences(
1429         struct pvr_sync_append_data *sync_append_data)
1430 {
1431         unsigned i;
1432
1433         if (!sync_append_data)
1434                 return;
1435
1436         for (i = 0; i < sync_append_data->nr_cleaup_syncs; i++) {
1437                 struct pvr_sync_native_sync_prim *cleanup_sync =
1438                         sync_append_data->cleanup_syncs[i];
1439                 /* If this cleanup was called on a partially-created data set
1440                  * it's possible to have NULL cleanup sync pointers */
1441                 if (!cleanup_sync)
1442                         continue;
1443                 cleanup_sync->next_value--;
1444         }
1445
1446         if (sync_append_data->update_sync_data) {
1447                 struct pvr_sync_data *sync_data =
1448                         sync_append_data->update_sync_data->sync_data;
1449                 struct pvr_sync_timeline *timeline =
1450                         sync_append_data->update_sync_data->timeline;
1451                 /* We can get a NULL sync_data if the corresponding
1452                  * append failed with a re-used alloc sync */
1453                 if (sync_data) {
1454                         sync_data->kernel->fence_sync->next_value--;
1455                         timeline->fencing_enabled = true;
1456                         timeline->kernel->fence_sync->next_value--;
1457                 }
1458         }
1459 }
1460
1461 void pvr_sync_free_append_fences_data(
1462         struct pvr_sync_append_data *sync_append_data)
1463 {
1464         unsigned i;
1465
1466         if (!sync_append_data)
1467                 return;
1468
1469         for (i = 0; i < sync_append_data->nr_fences; i++) {
1470                 struct sync_fence *fence = sync_append_data->fences[i];
1471                 /* If this cleanup was called on a partially-created data set
1472                  * it's possible to have NULL sync data pointers */
1473                 if (!fence)
1474                         continue;
1475                 sync_fence_put(fence);
1476         }
1477
1478         if (sync_append_data->update_sync_data)
1479                 fput(sync_append_data->update_sync_data->file);
1480
1481         kfree(sync_append_data->update_ufo_addresses);
1482         kfree(sync_append_data->update_values);
1483         kfree(sync_append_data->check_ufo_addresses);
1484         kfree(sync_append_data->check_values);
1485         kfree(sync_append_data->cleanup_syncs);
1486         kfree(sync_append_data);
1487 }
1488
1489 void pvr_sync_nohw_complete_fences(
1490         struct pvr_sync_append_data *sync_append_data)
1491 {
1492         unsigned i;
1493
1494         if (!sync_append_data)
1495                 return;
1496
1497         for (i = 0; i < sync_append_data->nr_cleaup_syncs; i++) {
1498                 struct pvr_sync_native_sync_prim *cleanup_sync =
1499                         sync_append_data->cleanup_syncs[i];
1500
1501                 if (!cleanup_sync)
1502                         continue;
1503
1504                 complete_sync(cleanup_sync);
1505         }
1506         if (sync_append_data->update_sync_data) {
1507                 /* Skip any invalid update syncs (should only be hit on error */
1508                 if (sync_append_data->update_sync_data->sync_data) {
1509                         struct pvr_sync_data *sync_data =
1510                                 sync_append_data->update_sync_data->sync_data;
1511                         struct pvr_sync_timeline *timeline =
1512                                 sync_append_data->update_sync_data->timeline;
1513                         complete_sync(sync_data->kernel->fence_sync);
1514                         set_sync_value(timeline->kernel->fence_sync,
1515                                 sync_data->timeline_update_value);
1516                 }
1517         }
1518 }
1519
1520 /* ioctl and fops handling */
1521
1522 static int pvr_sync_open(struct inode *inode, struct file *file)
1523 {
1524         struct pvr_sync_timeline_wrapper *timeline_wrapper;
1525         struct pvr_sync_timeline *timeline;
1526         char task_comm[TASK_COMM_LEN];
1527         enum PVRSRV_ERROR error;
1528         int err = -ENOMEM;
1529
1530         get_task_comm(task_comm, current);
1531
1532         timeline_wrapper = (struct pvr_sync_timeline_wrapper *)
1533                 sync_timeline_create(&pvr_sync_timeline_ops,
1534                         sizeof(struct pvr_sync_timeline_wrapper), task_comm);
1535         if (!timeline_wrapper) {
1536                 pr_err("pvr_sync: %s: sync_timeline_create failed", __func__);
1537                 goto err_out;
1538         }
1539
1540         timeline = kmalloc(sizeof(struct pvr_sync_timeline), GFP_KERNEL);
1541         if (!timeline) {
1542                 pr_err("pvr_sync: %s: Out of memory", __func__);
1543                 goto err_free_timeline_wrapper;
1544         }
1545
1546         timeline->kernel = kzalloc(sizeof(struct pvr_sync_kernel_pair),
1547                                    GFP_KERNEL);
1548         if (!timeline->kernel) {
1549                 pr_err("pvr_sync: %s: Out of memory", __func__);
1550                 goto err_free_timeline;
1551         }
1552
1553         OSAcquireBridgeLock();
1554         error = sync_pool_get(&timeline->kernel->fence_sync,
1555                               task_comm, SYNC_TL_TYPE);
1556         OSReleaseBridgeLock();
1557
1558         if (error != PVRSRV_OK) {
1559                 pr_err("pvr_sync: %s: Failed to allocate sync prim (%s)",
1560                         __func__, PVRSRVGetErrorStringKM(error));
1561                 goto err_free_timeline_kernel;
1562         }
1563
1564         timeline_wrapper->timeline = timeline;
1565
1566         timeline->obj = &timeline_wrapper->obj;
1567         timeline->fencing_enabled = true;
1568         kref_init(&timeline->kref);
1569
1570         mutex_lock(&timeline_list_mutex);
1571         list_add_tail(&timeline->list, &timeline_list);
1572         mutex_unlock(&timeline_list_mutex);
1573
1574         DPF("%s: # %s", __func__, debug_info_timeline(timeline));
1575
1576         file->private_data = timeline_wrapper;
1577         err = 0;
1578 err_out:
1579         return err;
1580
1581 err_free_timeline_kernel:
1582         kfree(timeline->kernel);
1583 err_free_timeline:
1584         kfree(timeline);
1585
1586         /* Use a NULL timeline to detect this partially-setup timeline in the
1587          * timeline release function (called by sync_timeline_destroy) and
1588          * handle it appropriately.
1589          */
1590         timeline_wrapper->timeline = NULL;
1591 err_free_timeline_wrapper:
1592         sync_timeline_destroy(&timeline_wrapper->obj);
1593         goto err_out;
1594 }
1595
1596 static int pvr_sync_close(struct inode *inode, struct file *file)
1597 {
1598         struct sync_timeline *obj = file->private_data;
1599
1600         if (is_pvr_timeline(obj)) {
1601                 DPF("%s: # %s", __func__,
1602                     debug_info_timeline(get_timeline(obj)));
1603         }
1604
1605         sync_timeline_destroy(obj);
1606         return 0;
1607 }
1608
1609 static int pvr_sync_alloc_release(struct inode *inode, struct file *file)
1610 {
1611         struct pvr_sync_alloc_data *alloc_sync_data = file->private_data;
1612
1613         /* Drop alloc sync's reference to the raw timeline structure. We need
1614          * to hold the timeline list lock here too, so we don't race with
1615          * pvr_sync_update_all_timelines().
1616          */
1617         if (kref_put_mutex(&alloc_sync_data->timeline->kref,
1618                            pvr_sync_destroy_timeline_locked,
1619                            &timeline_list_mutex)) {
1620                 mutex_unlock(&timeline_list_mutex);
1621         }
1622
1623         /* Normally ->sync_data is NULL unless the fd was never used */
1624         if (alloc_sync_data->sync_data) {
1625                 kref_put(&alloc_sync_data->sync_data->kref,
1626                          pvr_sync_free_sync_data);
1627         }
1628
1629         kfree(alloc_sync_data);
1630         return 0;
1631 }
1632
1633 static const struct file_operations pvr_alloc_sync_fops = {
1634         .release = pvr_sync_alloc_release,
1635 };
1636
1637 static struct pvr_sync_alloc_data *pvr_sync_alloc_fence_fdget(int fd)
1638 {
1639         struct file *file = fget(fd);
1640
1641         if (!file)
1642                 return NULL;
1643         if (file->f_op != &pvr_alloc_sync_fops)
1644                 goto err;
1645         return file->private_data;
1646 err:
1647         fput(file);
1648         return NULL;
1649 }
1650
1651 static long pvr_sync_ioctl_create_fence(struct pvr_sync_timeline *timeline,
1652         void __user *user_data)
1653 {
1654         struct pvr_sync_create_fence_ioctl_data data;
1655         struct pvr_sync_alloc_data *alloc_sync_data;
1656         int err = -EFAULT, fd = get_unused_fd_flags(0);
1657         struct pvr_sync_data *sync_data;
1658         struct pvr_sync_pt *pvr_pt;
1659         struct sync_fence *fence;
1660
1661         if (fd < 0) {
1662                 pr_err("pvr_sync: %s: Failed to find unused fd (%d)",
1663                        __func__, fd);
1664                 goto err_out;
1665         }
1666
1667         if (!access_ok(VERIFY_READ, user_data, sizeof(data)))
1668                 goto err_put_fd;
1669
1670         if (copy_from_user(&data, user_data, sizeof(data)))
1671                 goto err_put_fd;
1672
1673         alloc_sync_data = pvr_sync_alloc_fence_fdget(data.iAllocFenceFd);
1674         if (!alloc_sync_data) {
1675                 pr_err("pvr_sync: %s: Invalid alloc sync fd (%d)\n",
1676                         __func__, data.iAllocFenceFd);
1677                 goto err_put_fd;
1678         }
1679
1680         if (alloc_sync_data->timeline != timeline) {
1681                 pr_err("pvr_sync: %s: Trying to create sync from alloc of timeline %p in timeline %p\n",
1682                         __func__, alloc_sync_data->timeline, timeline);
1683                 fput(alloc_sync_data->file);
1684                 goto err_put_fd;
1685         }
1686
1687         /* Take ownership of the sync_data */
1688         sync_data = alloc_sync_data->sync_data;
1689         alloc_sync_data->sync_data = NULL;
1690
1691         pvr_pt = (struct pvr_sync_pt *)
1692                 sync_pt_create(timeline->obj, sizeof(struct pvr_sync_pt));
1693
1694         fput(alloc_sync_data->file);
1695
1696         if (!pvr_pt) {
1697                 pr_err("pvr_sync: %s: Failed to create sync pt", __func__);
1698                 kref_put(&sync_data->kref, pvr_sync_free_sync_data);
1699                 err = -ENOMEM;
1700                 goto err_put_fd;
1701         }
1702
1703         /* Point owns the sync data now. Let sync_pt_free() deal with it. */
1704         pvr_pt->sync_data = sync_data;
1705
1706         data.szName[sizeof(data.szName) - 1] = '\0';
1707
1708         DPF("%s: %d('%s') # %s", __func__,
1709             fd, data.szName, debug_info_timeline(timeline));
1710
1711         fence = sync_fence_create(data.szName, &pvr_pt->pt);
1712         if (!fence) {
1713                 pr_err("pvr_sync: %s: Failed to create a fence (%d)",
1714                        __func__, fd);
1715                 sync_pt_free(&pvr_pt->pt);
1716                 err = -ENOMEM;
1717                 goto err_put_fd;
1718         }
1719
1720         data.iFenceFd = fd;
1721
1722         if (!access_ok(VERIFY_WRITE, user_data, sizeof(data)))
1723                 goto err_put_fence;
1724
1725         if (copy_to_user(user_data, &data, sizeof(data)))
1726                 goto err_put_fence;
1727
1728         sync_fence_install(fence, fd);
1729         err = 0;
1730 err_out:
1731         return err;
1732
1733 err_put_fence:
1734         sync_fence_put(fence);
1735 err_put_fd:
1736         put_unused_fd(fd);
1737         goto err_out;
1738 }
1739
1740 static long pvr_sync_ioctl_alloc_fence(struct pvr_sync_timeline *timeline,
1741         void __user *user_data)
1742 {
1743         struct pvr_sync_alloc_fence_ioctl_data data;
1744         struct pvr_sync_alloc_data *alloc_sync_data;
1745         int err = -EFAULT, fd = get_unused_fd_flags(0);
1746         struct pvr_sync_data *sync_data;
1747         struct file *file;
1748
1749         if (fd < 0) {
1750                 pr_err("pvr_sync: %s: Failed to find unused fd (%d)",
1751                        __func__, fd);
1752                 goto err_out;
1753         }
1754
1755         if (!access_ok(VERIFY_READ, user_data, sizeof(data)))
1756                 goto err_put_fd;
1757
1758         if (!access_ok(VERIFY_WRITE, user_data, sizeof(data)))
1759                 goto err_put_fd;
1760
1761         alloc_sync_data =
1762                 kzalloc(sizeof(struct pvr_sync_alloc_data), GFP_KERNEL);
1763         if (!alloc_sync_data) {
1764                 err = -ENOMEM;
1765                 pr_err("pvr_sync: %s: Failed to alloc sync data\n", __func__);
1766                 goto err_put_fd;
1767         }
1768
1769         sync_data = pvr_sync_create_sync_data(timeline->obj);
1770         if (!sync_data) {
1771                 err = -ENOMEM;
1772                 pr_err("pvr_sync: %s: Failed to create sync data\n", __func__);
1773                 goto err_free_alloc_data;
1774         }
1775
1776         file = anon_inode_getfile("pvr_sync_alloc", &pvr_alloc_sync_fops,
1777                 alloc_sync_data, 0);
1778         if (!file) {
1779                 err = -ENOMEM;
1780                 pr_err("pvr_sync: %s: Failed to create alloc inode\n",
1781                         __func__);
1782                 goto err_free_data;
1783         }
1784
1785         alloc_sync_data->file = file;
1786         alloc_sync_data->sync_data = sync_data;
1787
1788         /* We pass the raw timeline pointer through to the alloc sync, but
1789          * to make sure the timeline data doesn't go away if the timeline
1790          * is destroyed, we increment the timeline reference count.
1791          */
1792         alloc_sync_data->timeline = timeline;
1793         kref_get(&timeline->kref);
1794
1795         data.bTimelineIdle = is_sync_met(timeline->kernel->fence_sync) &&
1796                 timeline->fencing_enabled == false;
1797
1798         data.iFenceFd = fd;
1799
1800         if (!access_ok(VERIFY_WRITE, user_data, sizeof(data)))
1801                 goto err_timeline_kref_put;
1802
1803         if (copy_to_user(user_data, &data, sizeof(data)))
1804                 goto err_timeline_kref_put;
1805
1806         fd_install(fd, file);
1807         err = 0;
1808
1809 err_out:
1810         return err;
1811
1812 err_timeline_kref_put:
1813         if (kref_put_mutex(&timeline->kref,
1814                            pvr_sync_destroy_timeline_locked,
1815                            &timeline_list_mutex)) {
1816                 mutex_unlock(&timeline_list_mutex);
1817         }
1818 err_free_data:
1819         kref_put(&sync_data->kref, pvr_sync_free_sync_data);
1820 err_free_alloc_data:
1821         kfree(alloc_sync_data);
1822 err_put_fd:
1823         put_unused_fd(fd);
1824         goto err_out;
1825 }
1826
1827 static long pvr_sync_ioctl_enable_fencing(struct pvr_sync_timeline *timeline,
1828         void __user *user_data)
1829 {
1830         struct pvr_sync_enable_fencing_ioctl_data data;
1831         int err = -EFAULT;
1832
1833         if (!access_ok(VERIFY_READ, user_data, sizeof(data)))
1834                 goto err_out;
1835
1836         if (copy_from_user(&data, user_data, sizeof(data)))
1837                 goto err_out;
1838
1839         timeline->fencing_enabled = data.bFencingEnabled;
1840         err = 0;
1841 err_out:
1842         return err;
1843 }
1844
1845 static long pvr_sync_ioctl_rename(struct pvr_sync_timeline *timeline,
1846         void __user *user_data)
1847 {
1848         int err = 0;
1849         struct pvr_sync_rename_ioctl_data data;
1850
1851         if (!access_ok(VERIFY_READ, user_data, sizeof(data))) {
1852                 err = -EFAULT;
1853                 goto err;
1854         }
1855
1856         if (copy_from_user(&data, user_data, sizeof(data))) {
1857                 err = -EFAULT;
1858                 goto err;
1859         }
1860
1861         data.szName[sizeof(data.szName) - 1] = '\0';
1862         strlcpy(timeline->obj->name, data.szName, sizeof(timeline->obj->name));
1863
1864 err:
1865         return err;
1866 }
1867
1868 #ifndef CONFIG_SW_SYNC_USER
1869
1870 static long pvr_sync_ioctl_force_sw_only(struct pvr_sync_timeline *timeline,
1871         void **private_data)
1872 {
1873         struct sw_sync_timeline *sw_sync_timeline;
1874
1875         /* We can only convert an empty GPU timeline */
1876         if (timeline->kernel->fence_sync->next_value)
1877                 return -EFAULT;
1878
1879         /* Create a sw_sync timeline with the old GPU timeline's name */
1880         sw_sync_timeline = sw_sync_timeline_create(timeline->obj->name);
1881         if (!sw_sync_timeline)
1882                 return -ENOMEM;
1883
1884         /* Destroy the old GPU timeline and update the struct file */
1885         DPF("%s: # %s", __func__, debug_info_timeline(timeline));
1886
1887         sync_timeline_destroy(timeline->obj);
1888         *private_data = sw_sync_timeline;
1889         return 0;
1890 }
1891
1892 static long pvr_sync_ioctl_sw_create_fence(struct sw_sync_timeline *timeline,
1893         void __user *user_data)
1894 {
1895         struct sw_sync_create_fence_data data;
1896         struct sync_fence *fence;
1897         int fd = get_unused_fd_flags(0);
1898         struct sync_pt *sync_pt;
1899         int err = -EFAULT;
1900
1901         if (fd < 0) {
1902                 pr_err("pvr_sync: %s: Failed to find unused fd (%d)",
1903                        __func__, fd);
1904                 goto err_out;
1905         }
1906
1907         if (copy_from_user(&data, user_data, sizeof(data)))
1908                 goto err_put_fd;
1909
1910         sync_pt = sw_sync_pt_create(timeline, data.value);
1911         if (!sync_pt) {
1912                 pr_err("pvr_sync: %s: Failed to create a sync point (%d)",
1913                        __func__, fd);
1914                 err = -ENOMEM;
1915                 goto err_put_fd;
1916         }
1917
1918         data.name[sizeof(data.name) - 1] = '\0';
1919         fence = sync_fence_create(data.name, sync_pt);
1920         if (!fence) {
1921                 pr_err("pvr_sync: %s: Failed to create a fence (%d)",
1922                        __func__, fd);
1923                 sync_pt_free(sync_pt);
1924                 err = -ENOMEM;
1925                 goto err_put_fd;
1926         }
1927
1928         data.fence = fd;
1929
1930         if (copy_to_user(user_data, &data, sizeof(data)))
1931                 goto err_put_fence;
1932
1933         sync_fence_install(fence, fd);
1934         err = 0;
1935 err_out:
1936         return err;
1937 err_put_fence:
1938         sync_fence_put(fence);
1939 err_put_fd:
1940         put_unused_fd(fd);
1941         goto err_out;
1942 }
1943
1944 static long pvr_sync_ioctl_sw_inc(struct sw_sync_timeline *timeline,
1945         void __user *user_data)
1946 {
1947         u32 value;
1948
1949         if (copy_from_user(&value, user_data, sizeof(value)))
1950                 return -EFAULT;
1951
1952         sw_sync_timeline_inc(timeline, value);
1953         return 0;
1954 }
1955
1956 #endif /* !CONFIG_SW_SYNC_USER */
1957
1958 static long
1959 pvr_sync_ioctl(struct file *file, unsigned int cmd, unsigned long __user arg)
1960 {
1961         struct sync_timeline *obj = file->private_data;
1962         void __user *user_data = (void __user *)arg;
1963         long err = -ENOTTY;
1964
1965         if (is_pvr_timeline(obj)) {
1966                 struct pvr_sync_timeline *pvr = get_timeline(obj);
1967
1968                 switch (cmd) {
1969                 case PVR_SYNC_IOC_CREATE_FENCE:
1970                         err = pvr_sync_ioctl_create_fence(pvr, user_data);
1971                         break;
1972                 case PVR_SYNC_IOC_ENABLE_FENCING:
1973                         err = pvr_sync_ioctl_enable_fencing(pvr, user_data);
1974                         break;
1975                 case PVR_SYNC_IOC_ALLOC_FENCE:
1976                         err = pvr_sync_ioctl_alloc_fence(pvr, user_data);
1977                         break;
1978                 case PVR_SYNC_IOC_RENAME:
1979                         err = pvr_sync_ioctl_rename(pvr, user_data);
1980                         break;
1981 #ifndef CONFIG_SW_SYNC_USER
1982                 case PVR_SYNC_IOC_FORCE_SW_ONLY:
1983                         err = pvr_sync_ioctl_force_sw_only(pvr,
1984                                 &file->private_data);
1985                         break;
1986 #endif /* !CONFIG_SW_SYNC_USER */
1987                 default:
1988                         break;
1989                 }
1990         } else {
1991 #ifndef CONFIG_SW_SYNC_USER
1992                 struct sw_sync_timeline *sw = file->private_data;
1993
1994                 switch (cmd) {
1995                 case SW_SYNC_IOC_CREATE_FENCE:
1996                         err = pvr_sync_ioctl_sw_create_fence(sw, user_data);
1997                         break;
1998                 case SW_SYNC_IOC_INC:
1999                         err = pvr_sync_ioctl_sw_inc(sw, user_data);
2000                         break;
2001                 default:
2002                         break;
2003                 }
2004 #endif /* !CONFIG_SW_SYNC_USER */
2005         }
2006
2007         return err;
2008 }
2009
2010 static void
2011 pvr_sync_check_status_work_queue_function(struct work_struct *data)
2012 {
2013         /* A completed SW operation may un-block the GPU */
2014         PVRSRVCheckStatus(NULL);
2015 }
2016
2017 /* Returns true if the freelist still has entries, else false if empty */
2018 static bool
2019 pvr_sync_clean_freelist(void)
2020 {
2021         struct pvr_sync_kernel_pair *kernel, *k;
2022         struct pvr_sync_fence *sync_fence, *f;
2023         LIST_HEAD(unlocked_free_list);
2024         unsigned long flags;
2025         bool freelist_empty;
2026
2027         /* We can't call PVRSRVServerSyncFreeKM directly in this loop because
2028          * that will take the mmap mutex. We can't take mutexes while we have
2029          * this list locked with a spinlock. So move all the items we want to
2030          * free to another, local list (no locking required) and process it
2031          * in a second loop. */
2032
2033         spin_lock_irqsave(&sync_prim_free_list_spinlock, flags);
2034         list_for_each_entry_safe(kernel, k, &sync_prim_free_list, list) {
2035                 /* Check if this sync is not used anymore. */
2036                 if (!is_sync_met(kernel->fence_sync) ||
2037                     (kernel->cleanup_sync &&
2038                      !is_sync_met(kernel->cleanup_sync))) {
2039                         continue;
2040                 }
2041
2042                 /* Remove the entry from the free list. */
2043                 list_move_tail(&kernel->list, &unlocked_free_list);
2044         }
2045
2046         /* Wait and loop if there are still syncs on the free list (IE
2047          * are still in use by the HW) */
2048         freelist_empty = list_empty(&sync_prim_free_list);
2049
2050         spin_unlock_irqrestore(&sync_prim_free_list_spinlock, flags);
2051
2052         OSAcquireBridgeLock();
2053
2054         list_for_each_entry_safe(kernel, k, &unlocked_free_list, list) {
2055                 list_del(&kernel->list);
2056
2057                 sync_pool_put(kernel->fence_sync);
2058                 if (kernel->cleanup_sync)
2059                         sync_pool_put(kernel->cleanup_sync);
2060                 kfree(kernel);
2061         }
2062
2063         OSReleaseBridgeLock();
2064
2065         /* sync_fence_put() must be called from process/WQ context
2066          * because it uses fput(), which is not allowed to be called
2067          * from interrupt context in kernels <3.6.
2068          */
2069         INIT_LIST_HEAD(&unlocked_free_list);
2070
2071         spin_lock_irqsave(&sync_fence_put_list_spinlock, flags);
2072         list_for_each_entry_safe(sync_fence, f, &sync_fence_put_list, list) {
2073                 list_move_tail(&sync_fence->list, &unlocked_free_list);
2074         }
2075         spin_unlock_irqrestore(&sync_fence_put_list_spinlock, flags);
2076
2077         list_for_each_entry_safe(sync_fence, f, &unlocked_free_list, list) {
2078                 list_del(&sync_fence->list);
2079                 sync_fence_put(sync_fence->fence);
2080                 kfree(sync_fence);
2081         }
2082
2083         return !freelist_empty;
2084 }
2085
2086 static void
2087 pvr_sync_defer_free_work_queue_function(struct work_struct *data)
2088 {
2089         enum PVRSRV_ERROR error = PVRSRV_OK;
2090         void *event_object;
2091
2092         error = OSEventObjectOpen(pvr_sync_data.event_object_handle,
2093                 &event_object);
2094         if (error != PVRSRV_OK) {
2095                 pr_err("pvr_sync: %s: Error opening event object (%s)\n",
2096                         __func__, PVRSRVGetErrorStringKM(error));
2097                 return;
2098
2099         }
2100
2101         while (pvr_sync_clean_freelist()) {
2102
2103                 error = OSEventObjectWait(event_object);
2104
2105                 switch (error) {
2106
2107                 case PVRSRV_OK:
2108                 case PVRSRV_ERROR_TIMEOUT:
2109                         /* Timeout is normal behaviour */
2110                         continue;
2111                 default:
2112                         pr_err("pvr_sync: %s: Error waiting for event object (%s)\n",
2113                                 __func__, PVRSRVGetErrorStringKM(error));
2114                         break;
2115                 }
2116         }
2117         error = OSEventObjectClose(event_object);
2118         if (error != PVRSRV_OK) {
2119                 pr_err("pvr_sync: %s: Error closing event object (%s)\n",
2120                         __func__, PVRSRVGetErrorStringKM(error));
2121         }
2122 }
2123
2124 static const struct file_operations pvr_sync_fops = {
2125         .owner          = THIS_MODULE,
2126         .open           = pvr_sync_open,
2127         .release        = pvr_sync_close,
2128         .unlocked_ioctl = pvr_sync_ioctl,
2129         .compat_ioctl   = pvr_sync_ioctl,
2130 };
2131
2132 static struct miscdevice pvr_sync_device = {
2133         .minor          = MISC_DYNAMIC_MINOR,
2134         .name           = PVRSYNC_MODNAME,
2135         .fops           = &pvr_sync_fops,
2136 };
2137
2138 static
2139 void pvr_sync_update_all_timelines(void *command_complete_handle)
2140 {
2141         struct pvr_sync_timeline *timeline, *n;
2142
2143         mutex_lock(&timeline_list_mutex);
2144
2145         list_for_each_entry(timeline, &timeline_list, list) {
2146                 /* If a timeline is destroyed via pvr_sync_release_timeline()
2147                  * in parallel with a call to pvr_sync_update_all_timelines(),
2148                  * the timeline_list_mutex will block destruction of the
2149                  * 'timeline' pointer. Use kref_get_unless_zero() to detect
2150                  * and handle this race. Skip the timeline if it's being
2151                  * destroyed, blocked only on the timeline_list_mutex.
2152                  */
2153                 timeline->valid =
2154                         kref_get_unless_zero(&timeline->kref) ? true : false;
2155         }
2156
2157         list_for_each_entry_safe(timeline, n, &timeline_list, list) {
2158                 /* We know timeline is valid at this point because we're
2159                  * holding the list lock (so pvr_sync_destroy_timeline() has
2160                  * to wait).
2161                  */
2162                 void *obj = timeline->obj;
2163
2164                 /* If we're racing with pvr_sync_release_timeline(), ignore */
2165                 if (!timeline->valid)
2166                         continue;
2167
2168                 /* If syncs have signaled on the GPU, echo this in pvr_sync.
2169                  *
2170                  * At this point we know the timeline is valid, but obj might
2171                  * have raced and been set to NULL. It's only important that
2172                  * we use NULL / non-NULL consistently with the if() and call
2173                  * to sync_timeline_signal() -- the timeline->obj can't be
2174                  * freed (pvr_sync_release_timeline() will be stuck waiting
2175                  * for the timeline_list_mutex) but it might have been made
2176                  * invalid by the base sync driver, in which case this call
2177                  * will bounce harmlessly.
2178                  */
2179                 if (obj)
2180                         sync_timeline_signal(obj);
2181
2182                 /* We're already holding the timeline_list_mutex */
2183                 kref_put(&timeline->kref, pvr_sync_destroy_timeline_locked);
2184         }
2185
2186         mutex_unlock(&timeline_list_mutex);
2187 }
2188
2189 enum PVRSRV_ERROR pvr_sync_init(void)
2190 {
2191         enum PVRSRV_ERROR error;
2192         int err;
2193
2194         DPF("%s", __func__);
2195
2196         atomic_set(&pvr_sync_data.sync_id, 0);
2197
2198         error = PVRSRVAcquireDeviceDataKM(0, PVRSRV_DEVICE_TYPE_RGX,
2199                                           &pvr_sync_data.device_cookie);
2200         if (error != PVRSRV_OK) {
2201                 pr_err("pvr_sync: %s: Failed to initialise services (%s)",
2202                        __func__, PVRSRVGetErrorStringKM(error));
2203                 goto err_out;
2204         }
2205
2206         error = AcquireGlobalEventObjectServer(
2207                 &pvr_sync_data.event_object_handle);
2208         if (error != PVRSRV_OK) {
2209                 pr_err("pvr_sync: %s: Failed to acquire global event object (%s)",
2210                         __func__, PVRSRVGetErrorStringKM(error));
2211                 goto err_release_device_data;
2212         }
2213
2214         OSAcquireBridgeLock();
2215
2216         error = SyncPrimContextCreate(0,
2217                                       pvr_sync_data.device_cookie,
2218                                       &pvr_sync_data.sync_prim_context);
2219         if (error != PVRSRV_OK) {
2220                 pr_err("pvr_sync: %s: Failed to create sync prim context (%s)",
2221                        __func__, PVRSRVGetErrorStringKM(error));
2222                 OSReleaseBridgeLock();
2223                 goto err_release_event_object;
2224         }
2225
2226         OSReleaseBridgeLock();
2227
2228         pvr_sync_data.defer_free_wq =
2229                 create_freezable_workqueue("pvr_sync_defer_free_workqueue");
2230         if (!pvr_sync_data.defer_free_wq) {
2231                 pr_err("pvr_sync: %s: Failed to create pvr_sync defer_free workqueue",
2232                        __func__);
2233                 goto err_free_sync_context;
2234         }
2235
2236         INIT_WORK(&pvr_sync_data.defer_free_work,
2237                 pvr_sync_defer_free_work_queue_function);
2238
2239         pvr_sync_data.check_status_wq =
2240                 create_freezable_workqueue("pvr_sync_check_status_workqueue");
2241         if (!pvr_sync_data.check_status_wq) {
2242                 pr_err("pvr_sync: %s: Failed to create pvr_sync check_status workqueue",
2243                        __func__);
2244                 goto err_destroy_defer_free_wq;
2245         }
2246
2247         INIT_WORK(&pvr_sync_data.check_status_work,
2248                 pvr_sync_check_status_work_queue_function);
2249         error = PVRSRVRegisterCmdCompleteNotify(
2250                         &pvr_sync_data.command_complete_handle,
2251                         &pvr_sync_update_all_timelines,
2252                         &pvr_sync_data.device_cookie);
2253         if (error != PVRSRV_OK) {
2254                 pr_err("pvr_sync: %s: Failed to register MISR notification (%s)",
2255                        __func__, PVRSRVGetErrorStringKM(error));
2256                 goto err_destroy_status_wq;
2257         }
2258
2259         error = PVRSRVRegisterDbgRequestNotify(
2260                         &pvr_sync_data.debug_notify_handle,
2261                         pvr_sync_debug_request,
2262                         DEBUG_REQUEST_ANDROIDSYNC,
2263                         NULL);
2264         if (error != PVRSRV_OK) {
2265                 pr_err("pvr_sync: %s: Failed to register debug notifier (%s)",
2266                         __func__, PVRSRVGetErrorStringKM(error));
2267                 goto err_unregister_cmd_complete;
2268         }
2269
2270         err = misc_register(&pvr_sync_device);
2271         if (err) {
2272                 pr_err("pvr_sync: %s: Failed to register pvr_sync device (%d)",
2273                        __func__, err);
2274                 error = PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
2275                 goto err_unregister_dbg;
2276         }
2277
2278         error = PVRSRV_OK;
2279         return error;
2280
2281 err_unregister_dbg:
2282         PVRSRVUnregisterDbgRequestNotify(pvr_sync_data.debug_notify_handle);
2283 err_unregister_cmd_complete:
2284         PVRSRVUnregisterCmdCompleteNotify(
2285                 pvr_sync_data.command_complete_handle);
2286 err_destroy_status_wq:
2287         destroy_workqueue(pvr_sync_data.check_status_wq);
2288 err_destroy_defer_free_wq:
2289         destroy_workqueue(pvr_sync_data.defer_free_wq);
2290 err_free_sync_context:
2291         OSAcquireBridgeLock();
2292         SyncPrimContextDestroy(pvr_sync_data.sync_prim_context);
2293         OSReleaseBridgeLock();
2294 err_release_event_object:
2295         ReleaseGlobalEventObjectServer(pvr_sync_data.event_object_handle);
2296 err_release_device_data:
2297         PVRSRVReleaseDeviceDataKM(pvr_sync_data.device_cookie);
2298 err_out:
2299
2300         return error;
2301 }
2302
2303 void pvr_sync_deinit(void)
2304 {
2305         DPF("%s", __func__);
2306
2307         misc_deregister(&pvr_sync_device);
2308
2309         PVRSRVUnregisterDbgRequestNotify(pvr_sync_data.debug_notify_handle);
2310
2311         PVRSRVUnregisterCmdCompleteNotify(
2312                 pvr_sync_data.command_complete_handle);
2313
2314         /* This will drain the workqueue, so we guarantee that all deferred
2315          * syncs are free'd before returning */
2316         destroy_workqueue(pvr_sync_data.defer_free_wq);
2317         destroy_workqueue(pvr_sync_data.check_status_wq);
2318
2319         OSAcquireBridgeLock();
2320
2321         sync_pool_clear();
2322
2323         SyncPrimContextDestroy(pvr_sync_data.sync_prim_context);
2324
2325         OSReleaseBridgeLock();
2326
2327         ReleaseGlobalEventObjectServer(pvr_sync_data.event_object_handle);
2328
2329         PVRSRVReleaseDeviceDataKM(pvr_sync_data.device_cookie);
2330 }