ANDROID: goldfish: Add goldfish sync driver
[firefly-linux-kernel-4.4.55.git] / drivers / staging / goldfish / goldfish_sync.c
1 /*
2  * Copyright (C) 2016 Google, Inc.
3  *
4  * This software is licensed under the terms of the GNU General Public
5  * License version 2, as published by the Free Software Foundation, and
6  * may be copied, distributed, and modified under those terms.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  */
14
15 #include <linux/fdtable.h>
16 #include <linux/file.h>
17 #include <linux/init.h>
18 #include <linux/miscdevice.h>
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/platform_device.h>
22
23 #include <linux/interrupt.h>
24 #include <linux/kref.h>
25 #include <linux/spinlock.h>
26 #include <linux/types.h>
27
28 #include <linux/io.h>
29 #include <linux/mm.h>
30 #include <linux/acpi.h>
31
32 #include <linux/string.h>
33 #include <linux/syscalls.h>
34
35 #include "sw_sync.h"
36 #include "sync.h"
37
38 #define ERR(...) printk(KERN_ERR __VA_ARGS__);
39
40 #define INFO(...) printk(KERN_INFO __VA_ARGS__);
41
42 #define DPRINT(...) pr_debug(__VA_ARGS__);
43
44 #define DTRACE() DPRINT("%s: enter", __func__)
45
46 /* The Goldfish sync driver is designed to provide a interface
47  * between the underlying host's sync device and the kernel's
48  * sw_sync.
49  * The purpose of the device/driver is to enable lightweight
50  * creation and signaling of timelines and fences
51  * in order to synchronize the guest with host-side graphics events.
52  *
53  * Each time the interrupt trips, the driver
54  * may perform a sw_sync operation.
55  */
56
57 /* The operations are: */
58
59 /* Ready signal - used to mark when irq should lower */
60 #define CMD_SYNC_READY            0
61
62 /* Create a new timeline. writes timeline handle */
63 #define CMD_CREATE_SYNC_TIMELINE  1
64
65 /* Create a fence object. reads timeline handle and time argument.
66  * Writes fence fd to the SYNC_REG_HANDLE register. */
67 #define CMD_CREATE_SYNC_FENCE     2
68
69 /* Increments timeline. reads timeline handle and time argument */
70 #define CMD_SYNC_TIMELINE_INC     3
71
72 /* Destroys a timeline. reads timeline handle */
73 #define CMD_DESTROY_SYNC_TIMELINE 4
74
75 /* Starts a wait on the host with
76  * the given glsync object and sync thread handle. */
77 #define CMD_TRIGGER_HOST_WAIT     5
78
79 /* The register layout is: */
80
81 #define SYNC_REG_BATCH_COMMAND                0x00 /* host->guest batch commands */
82 #define SYNC_REG_BATCH_GUESTCOMMAND           0x04 /* guest->host batch commands */
83 #define SYNC_REG_BATCH_COMMAND_ADDR           0x08 /* communicate physical address of host->guest batch commands */
84 #define SYNC_REG_BATCH_COMMAND_ADDR_HIGH      0x0c /* 64-bit part */
85 #define SYNC_REG_BATCH_GUESTCOMMAND_ADDR      0x10 /* communicate physical address of guest->host commands */
86 #define SYNC_REG_BATCH_GUESTCOMMAND_ADDR_HIGH 0x14 /* 64-bit part */
87 #define SYNC_REG_INIT                         0x18 /* signals that the device has been probed */
88
89 /* There is an ioctl associated with goldfish sync driver.
90  * Make it conflict with ioctls that are not likely to be used
91  * in the emulator.
92  *
93  * '@'  00-0F   linux/radeonfb.h        conflict!
94  * '@'  00-0F   drivers/video/aty/aty128fb.c    conflict!
95  */
96 #define GOLDFISH_SYNC_IOC_MAGIC '@'
97
98 #define GOLDFISH_SYNC_IOC_QUEUE_WORK    _IOWR(GOLDFISH_SYNC_IOC_MAGIC, 0, struct goldfish_sync_ioctl_info)
99
100 /* The above definitions (command codes, register layout, ioctl definitions)
101  * need to be in sync with the following files:
102  *
103  * Host-side (emulator):
104  * external/qemu/android/emulation/goldfish_sync.h
105  * external/qemu-android/hw/misc/goldfish_sync.c
106  *
107  * Guest-side (system image):
108  * device/generic/goldfish-opengl/system/egl/goldfish_sync.h
109  * device/generic/goldfish/ueventd.ranchu.rc
110  * platform/build/target/board/generic/sepolicy/file_contexts
111  */
112 struct goldfish_sync_hostcmd {
113         /* sorted for alignment */
114         uint64_t handle;
115         uint64_t hostcmd_handle;
116         uint32_t cmd;
117         uint32_t time_arg;
118 };
119
120 struct goldfish_sync_guestcmd {
121         uint64_t host_command; /* uint64_t for alignment */
122         uint64_t glsync_handle;
123         uint64_t thread_handle;
124         uint64_t guest_timeline_handle;
125 };
126
127 #define GOLDFISH_SYNC_MAX_CMDS 64
128
129 struct goldfish_sync_state {
130         char __iomem *reg_base;
131         int irq;
132
133         /* Spinlock protects |to_do| / |to_do_end|. */
134         spinlock_t lock;
135         /* |mutex_lock| protects all concurrent access
136          * to timelines for both kernel and user space. */
137         struct mutex mutex_lock;
138
139         /* Buffer holding commands issued from host. */
140         struct goldfish_sync_hostcmd to_do[GOLDFISH_SYNC_MAX_CMDS];
141         uint32_t to_do_end;
142
143         /* Addresses for the reading or writing
144          * of individual commands. The host can directly write
145          * to |batch_hostcmd| (and then this driver immediately
146          * copies contents to |to_do|). This driver either replies
147          * through |batch_hostcmd| or simply issues a
148          * guest->host command through |batch_guestcmd|.
149          */
150         struct goldfish_sync_hostcmd *batch_hostcmd;
151         struct goldfish_sync_guestcmd *batch_guestcmd;
152
153         /* Used to give this struct itself to a work queue
154          * function for executing actual sync commands. */
155         struct work_struct work_item;
156 };
157
158 static struct goldfish_sync_state global_sync_state[1];
159
160 struct goldfish_sync_timeline_obj {
161         struct sw_sync_timeline *sw_sync_tl;
162         uint32_t current_time;
163         /* We need to be careful about when we deallocate
164          * this |goldfish_sync_timeline_obj| struct.
165          * In order to ensure proper cleanup, we need to
166          * consider the triggered host-side wait that may
167          * still be in flight when the guest close()'s a
168          * goldfish_sync device's sync context fd (and
169          * destroys the |sw_sync_tl| field above).
170          * The host-side wait may raise IRQ
171          * and tell the kernel to increment the timeline _after_
172          * the |sw_sync_tl| has already been set to null.
173          *
174          * From observations on OpenGL apps and CTS tests, this
175          * happens at some very low probability upon context
176          * destruction or process close, but it does happen
177          * and it needs to be handled properly. Otherwise,
178          * if we clean up the surrounding |goldfish_sync_timeline_obj|
179          * too early, any |handle| field of any host->guest command
180          * might not even point to a null |sw_sync_tl| field,
181          * but to garbage memory or even a reclaimed |sw_sync_tl|.
182          * If we do not count such "pending waits" and kfree the object
183          * immediately upon |goldfish_sync_timeline_destroy|,
184          * we might get mysterous RCU stalls after running a long
185          * time because the garbage memory that is being read
186          * happens to be interpretable as a |spinlock_t| struct
187          * that is currently in the locked state.
188          *
189          * To track when to free the |goldfish_sync_timeline_obj|
190          * itself, we maintain a kref.
191          * The kref essentially counts the timeline itself plus
192          * the number of waits in flight. kref_init/kref_put
193          * are issued on
194          * |goldfish_sync_timeline_create|/|goldfish_sync_timeline_destroy|
195          * and kref_get/kref_put are issued on
196          * |goldfish_sync_fence_create|/|goldfish_sync_timeline_inc|.
197          *
198          * The timeline is destroyed after reference count
199          * reaches zero, which would happen after
200          * |goldfish_sync_timeline_destroy| and all pending
201          * |goldfish_sync_timeline_inc|'s are fulfilled.
202          *
203          * NOTE (1): We assume that |fence_create| and
204          * |timeline_inc| calls are 1:1, otherwise the kref scheme
205          * will not work. This is a valid assumption as long
206          * as the host-side virtual device implementation
207          * does not insert any timeline increments
208          * that we did not trigger from here.
209          *
210          * NOTE (2): The use of kref by itself requires no locks,
211          * but this does not mean everything works without locks.
212          * Related timeline operations do require a lock of some sort,
213          * or at least are not proven to work without it.
214          * In particualr, we assume that all the operations
215          * done on the |kref| field above are done in contexts where
216          * |global_sync_state->mutex_lock| is held. Do not
217          * remove that lock until everything is proven to work
218          * without it!!! */
219         struct kref kref;
220 };
221
222 /* We will call |delete_timeline_obj| when the last reference count
223  * of the kref is decremented. This deletes the sw_sync
224  * timeline object along with the wrapper itself. */
225 static void delete_timeline_obj(struct kref* kref) {
226         struct goldfish_sync_timeline_obj* obj =
227                 container_of(kref, struct goldfish_sync_timeline_obj, kref);
228
229         sync_timeline_destroy(&obj->sw_sync_tl->obj);
230         obj->sw_sync_tl = NULL;
231         kfree(obj);
232 }
233
234 static uint64_t gensym_ctr;
235 static void gensym(char *dst)
236 {
237         sprintf(dst, "goldfish_sync:gensym:%llu", gensym_ctr);
238         gensym_ctr++;
239 }
240
241 /* |goldfish_sync_timeline_create| assumes that |global_sync_state->mutex_lock|
242  * is held. */
243 static struct goldfish_sync_timeline_obj*
244 goldfish_sync_timeline_create(void)
245 {
246
247         char timeline_name[256];
248         struct sw_sync_timeline *res_sync_tl = NULL;
249         struct goldfish_sync_timeline_obj *res;
250
251         DTRACE();
252
253         gensym(timeline_name);
254
255         res_sync_tl = sw_sync_timeline_create(timeline_name);
256         if (!res_sync_tl) {
257                 ERR("Failed to create sw_sync timeline.");
258                 return NULL;
259         }
260
261         res = kzalloc(sizeof(struct goldfish_sync_timeline_obj), GFP_KERNEL);
262         res->sw_sync_tl = res_sync_tl;
263         res->current_time = 0;
264         kref_init(&res->kref);
265
266         DPRINT("new timeline_obj=0x%p", res);
267         return res;
268 }
269
270 /* |goldfish_sync_fence_create| assumes that |global_sync_state->mutex_lock|
271  * is held. */
272 static int
273 goldfish_sync_fence_create(struct goldfish_sync_timeline_obj *obj,
274                                                         uint32_t val)
275 {
276
277         int fd;
278         char fence_name[256];
279         struct sync_pt *syncpt = NULL;
280         struct sync_fence *sync_obj = NULL;
281         struct sw_sync_timeline *tl;
282
283         DTRACE();
284
285         if (!obj) return -1;
286
287         tl = obj->sw_sync_tl;
288
289         syncpt = sw_sync_pt_create(tl, val);
290         if (!syncpt) {
291                 ERR("could not create sync point! "
292                         "sync_timeline=0x%p val=%d",
293                            tl, val);
294                 return -1;
295         }
296
297         fd = get_unused_fd_flags(O_CLOEXEC);
298         if (fd < 0) {
299                 ERR("could not get unused fd for sync fence. "
300                         "errno=%d", fd);
301                 goto err_cleanup_pt;
302         }
303
304         gensym(fence_name);
305
306         sync_obj = sync_fence_create(fence_name, syncpt);
307         if (!sync_obj) {
308                 ERR("could not create sync fence! "
309                         "sync_timeline=0x%p val=%d sync_pt=0x%p",
310                            tl, val, syncpt);
311                 goto err_cleanup_fd_pt;
312         }
313
314         DPRINT("installing sync fence into fd %d sync_obj=0x%p", fd, sync_obj);
315         sync_fence_install(sync_obj, fd);
316         kref_get(&obj->kref);
317
318         return fd;
319
320 err_cleanup_fd_pt:
321         put_unused_fd(fd);
322 err_cleanup_pt:
323         sync_pt_free(syncpt);
324         return -1;
325 }
326
327 /* |goldfish_sync_timeline_inc| assumes that |global_sync_state->mutex_lock|
328  * is held. */
329 static void
330 goldfish_sync_timeline_inc(struct goldfish_sync_timeline_obj *obj, uint32_t inc)
331 {
332         DTRACE();
333         /* Just give up if someone else nuked the timeline.
334          * Whoever it was won't care that it doesn't get signaled. */
335         if (!obj) return;
336
337         DPRINT("timeline_obj=0x%p", obj);
338         sw_sync_timeline_inc(obj->sw_sync_tl, inc);
339         DPRINT("incremented timeline. increment max_time");
340         obj->current_time += inc;
341
342         /* Here, we will end up deleting the timeline object if it
343          * turns out that this call was a pending increment after
344          * |goldfish_sync_timeline_destroy| was called. */
345         kref_put(&obj->kref, delete_timeline_obj);
346         DPRINT("done");
347 }
348
349 /* |goldfish_sync_timeline_destroy| assumes
350  * that |global_sync_state->mutex_lock| is held. */
351 static void
352 goldfish_sync_timeline_destroy(struct goldfish_sync_timeline_obj *obj)
353 {
354         DTRACE();
355         /* See description of |goldfish_sync_timeline_obj| for why we
356          * should not immediately destroy |obj| */
357         kref_put(&obj->kref, delete_timeline_obj);
358 }
359
360 static inline void
361 goldfish_sync_cmd_queue(struct goldfish_sync_state *sync_state,
362                                                 uint32_t cmd,
363                                                 uint64_t handle,
364                                                 uint32_t time_arg,
365                                                 uint64_t hostcmd_handle)
366 {
367         struct goldfish_sync_hostcmd *to_add;
368
369         DTRACE();
370
371         BUG_ON(sync_state->to_do_end == GOLDFISH_SYNC_MAX_CMDS);
372
373         to_add = &sync_state->to_do[sync_state->to_do_end];
374
375         to_add->cmd = cmd;
376         to_add->handle = handle;
377         to_add->time_arg = time_arg;
378         to_add->hostcmd_handle = hostcmd_handle;
379
380         sync_state->to_do_end += 1;
381 }
382
383 static inline void
384 goldfish_sync_hostcmd_reply(struct goldfish_sync_state *sync_state,
385                                                         uint32_t cmd,
386                                                         uint64_t handle,
387                                                         uint32_t time_arg,
388                                                         uint64_t hostcmd_handle)
389 {
390         unsigned long irq_flags;
391         struct goldfish_sync_hostcmd *batch_hostcmd =
392                 sync_state->batch_hostcmd;
393
394         DTRACE();
395
396         spin_lock_irqsave(&sync_state->lock, irq_flags);
397
398         batch_hostcmd->cmd = cmd;
399         batch_hostcmd->handle = handle;
400         batch_hostcmd->time_arg = time_arg;
401         batch_hostcmd->hostcmd_handle = hostcmd_handle;
402         writel(0, sync_state->reg_base + SYNC_REG_BATCH_COMMAND);
403
404         spin_unlock_irqrestore(&sync_state->lock, irq_flags);
405 }
406
407 static inline void
408 goldfish_sync_send_guestcmd(struct goldfish_sync_state *sync_state,
409                                                         uint32_t cmd,
410                                                         uint64_t glsync_handle,
411                                                         uint64_t thread_handle,
412                                                         uint64_t timeline_handle)
413 {
414         unsigned long irq_flags;
415         struct goldfish_sync_guestcmd *batch_guestcmd =
416                 sync_state->batch_guestcmd;
417
418         DTRACE();
419
420         spin_lock_irqsave(&sync_state->lock, irq_flags);
421
422         batch_guestcmd->host_command = (uint64_t)cmd;
423         batch_guestcmd->glsync_handle = (uint64_t)glsync_handle;
424         batch_guestcmd->thread_handle = (uint64_t)thread_handle;
425         batch_guestcmd->guest_timeline_handle = (uint64_t)timeline_handle;
426         writel(0, sync_state->reg_base + SYNC_REG_BATCH_GUESTCOMMAND);
427
428         spin_unlock_irqrestore(&sync_state->lock, irq_flags);
429 }
430
431 /* |goldfish_sync_interrupt| handles IRQ raises from the virtual device.
432  * In the context of OpenGL, this interrupt will fire whenever we need
433  * to signal a fence fd in the guest, with the command
434  * |CMD_SYNC_TIMELINE_INC|.
435  * However, because this function will be called in an interrupt context,
436  * it is necessary to do the actual work of signaling off of interrupt context.
437  * The shared work queue is used for this purpose. At the end when
438  * all pending commands are intercepted by the interrupt handler,
439  * we call |schedule_work|, which will later run the actual
440  * desired sync command in |goldfish_sync_work_item_fn|.
441  */
442 static irqreturn_t goldfish_sync_interrupt(int irq, void *dev_id)
443 {
444
445         struct goldfish_sync_state *sync_state = dev_id;
446
447         uint32_t nextcmd;
448         uint32_t command_r;
449         uint64_t handle_rw;
450         uint32_t time_r;
451         uint64_t hostcmd_handle_rw;
452
453         int count = 0;
454
455         DTRACE();
456
457         sync_state = dev_id;
458
459         spin_lock(&sync_state->lock);
460
461         for (;;) {
462
463                 readl(sync_state->reg_base + SYNC_REG_BATCH_COMMAND);
464                 nextcmd = sync_state->batch_hostcmd->cmd;
465
466                 if (nextcmd == 0)
467                         break;
468
469                 command_r = nextcmd;
470                 handle_rw = sync_state->batch_hostcmd->handle;
471                 time_r = sync_state->batch_hostcmd->time_arg;
472                 hostcmd_handle_rw = sync_state->batch_hostcmd->hostcmd_handle;
473
474                 goldfish_sync_cmd_queue(
475                                 sync_state,
476                                 command_r,
477                                 handle_rw,
478                                 time_r,
479                                 hostcmd_handle_rw);
480
481                 count++;
482         }
483
484         spin_unlock(&sync_state->lock);
485
486         schedule_work(&sync_state->work_item);
487
488         return (count == 0) ? IRQ_NONE : IRQ_HANDLED;
489 }
490
491 /* |goldfish_sync_work_item_fn| does the actual work of servicing
492  * host->guest sync commands. This function is triggered whenever
493  * the IRQ for the goldfish sync device is raised. Once it starts
494  * running, it grabs the contents of the buffer containing the
495  * commands it needs to execute (there may be multiple, because
496  * our IRQ is active high and not edge triggered), and then
497  * runs all of them one after the other.
498  */
499 static void goldfish_sync_work_item_fn(struct work_struct *input)
500 {
501
502         struct goldfish_sync_state *sync_state;
503         int sync_fence_fd;
504
505         struct goldfish_sync_timeline_obj *timeline;
506         uint64_t timeline_ptr;
507
508         uint64_t hostcmd_handle;
509
510         uint32_t cmd;
511         uint64_t handle;
512         uint32_t time_arg;
513
514         struct goldfish_sync_hostcmd *todo;
515         uint32_t todo_end;
516
517         unsigned long irq_flags;
518
519         struct goldfish_sync_hostcmd to_run[GOLDFISH_SYNC_MAX_CMDS];
520         uint32_t i = 0;
521
522         sync_state = container_of(input, struct goldfish_sync_state, work_item);
523
524         mutex_lock(&sync_state->mutex_lock);
525
526         spin_lock_irqsave(&sync_state->lock, irq_flags); {
527
528                 todo_end = sync_state->to_do_end;
529
530                 DPRINT("num sync todos: %u", sync_state->to_do_end);
531
532                 for (i = 0; i < todo_end; i++)
533                         to_run[i] = sync_state->to_do[i];
534
535                 /* We expect that commands will come in at a slow enough rate
536                  * so that incoming items will not be more than
537                  * GOLDFISH_SYNC_MAX_CMDS.
538                  *
539                  * This is because the way the sync device is used,
540                  * it's only for managing buffer data transfers per frame,
541                  * with a sequential dependency between putting things in
542                  * to_do and taking them out. Once a set of commands is
543                  * queued up in to_do, the user of the device waits for
544                  * them to be processed before queuing additional commands,
545                  * which limits the rate at which commands come in
546                  * to the rate at which we take them out here.
547                  *
548                  * We also don't expect more than MAX_CMDS to be issued
549                  * at once; there is a correspondence between
550                  * which buffers need swapping to the (display / buffer queue)
551                  * to particular commands, and we don't expect there to be
552                  * enough display or buffer queues in operation at once
553                  * to overrun GOLDFISH_SYNC_MAX_CMDS.
554                  */
555                 sync_state->to_do_end = 0;
556
557         } spin_unlock_irqrestore(&sync_state->lock, irq_flags);
558
559         for (i = 0; i < todo_end; i++) {
560                 DPRINT("todo index: %u", i);
561
562                 todo = &to_run[i];
563
564                 cmd = todo->cmd;
565
566                 handle = (uint64_t)todo->handle;
567                 time_arg = todo->time_arg;
568                 hostcmd_handle = (uint64_t)todo->hostcmd_handle;
569
570                 DTRACE();
571
572                 timeline = (struct goldfish_sync_timeline_obj *)(uintptr_t)handle;
573
574                 switch (cmd) {
575                 case CMD_SYNC_READY:
576                         break;
577                 case CMD_CREATE_SYNC_TIMELINE:
578                         DPRINT("exec CMD_CREATE_SYNC_TIMELINE: "
579                                         "handle=0x%llx time_arg=%d",
580                                         handle, time_arg);
581                         timeline = goldfish_sync_timeline_create();
582                         timeline_ptr = (uintptr_t)timeline;
583                         goldfish_sync_hostcmd_reply(sync_state, CMD_CREATE_SYNC_TIMELINE,
584                                                                                 timeline_ptr,
585                                                                                 0,
586                                                                                 hostcmd_handle);
587                         DPRINT("sync timeline created: %p", timeline);
588                         break;
589                 case CMD_CREATE_SYNC_FENCE:
590                         DPRINT("exec CMD_CREATE_SYNC_FENCE: "
591                                         "handle=0x%llx time_arg=%d",
592                                         handle, time_arg);
593                         sync_fence_fd = goldfish_sync_fence_create(timeline, time_arg);
594                         goldfish_sync_hostcmd_reply(sync_state, CMD_CREATE_SYNC_FENCE,
595                                                                                 sync_fence_fd,
596                                                                                 0,
597                                                                                 hostcmd_handle);
598                         break;
599                 case CMD_SYNC_TIMELINE_INC:
600                         DPRINT("exec CMD_SYNC_TIMELINE_INC: "
601                                         "handle=0x%llx time_arg=%d",
602                                         handle, time_arg);
603                         goldfish_sync_timeline_inc(timeline, time_arg);
604                         break;
605                 case CMD_DESTROY_SYNC_TIMELINE:
606                         DPRINT("exec CMD_DESTROY_SYNC_TIMELINE: "
607                                         "handle=0x%llx time_arg=%d",
608                                         handle, time_arg);
609                         goldfish_sync_timeline_destroy(timeline);
610                         break;
611                 }
612                 DPRINT("Done executing sync command");
613         }
614         mutex_unlock(&sync_state->mutex_lock);
615 }
616
617 /* Guest-side interface: file operations */
618
619 /* Goldfish sync context and ioctl info.
620  *
621  * When a sync context is created by open()-ing the goldfish sync device, we
622  * create a sync context (|goldfish_sync_context|).
623  *
624  * Currently, the only data required to track is the sync timeline itself
625  * along with the current time, which are all packed up in the
626  * |goldfish_sync_timeline_obj| field. We use a |goldfish_sync_context|
627  * as the filp->private_data.
628  *
629  * Next, when a sync context user requests that work be queued and a fence
630  * fd provided, we use the |goldfish_sync_ioctl_info| struct, which holds
631  * information about which host handles to touch for this particular
632  * queue-work operation. We need to know about the host-side sync thread
633  * and the particular host-side GLsync object. We also possibly write out
634  * a file descriptor.
635  */
636 struct goldfish_sync_context {
637         struct goldfish_sync_timeline_obj *timeline;
638 };
639
640 struct goldfish_sync_ioctl_info {
641         uint64_t host_glsync_handle_in;
642         uint64_t host_syncthread_handle_in;
643         int fence_fd_out;
644 };
645
646 static int goldfish_sync_open(struct inode *inode, struct file *file)
647 {
648
649         struct goldfish_sync_context *sync_context;
650
651         DTRACE();
652
653         mutex_lock(&global_sync_state->mutex_lock);
654
655         sync_context = kzalloc(sizeof(struct goldfish_sync_context), GFP_KERNEL);
656
657         if (sync_context == NULL) {
658                 ERR("Creation of goldfish sync context failed!");
659                 mutex_unlock(&global_sync_state->mutex_lock);
660                 return -ENOMEM;
661         }
662
663         sync_context->timeline = NULL;
664
665         file->private_data = sync_context;
666
667         DPRINT("successfully create a sync context @0x%p", sync_context);
668
669         mutex_unlock(&global_sync_state->mutex_lock);
670
671         return 0;
672 }
673
674 static int goldfish_sync_release(struct inode *inode, struct file *file)
675 {
676
677         struct goldfish_sync_context *sync_context;
678
679         DTRACE();
680
681         mutex_lock(&global_sync_state->mutex_lock);
682
683         sync_context = file->private_data;
684
685         if (sync_context->timeline)
686                 goldfish_sync_timeline_destroy(sync_context->timeline);
687
688         sync_context->timeline = NULL;
689
690         kfree(sync_context);
691
692         mutex_unlock(&global_sync_state->mutex_lock);
693
694         return 0;
695 }
696
697 /* |goldfish_sync_ioctl| is the guest-facing interface of goldfish sync
698  * and is used in conjunction with eglCreateSyncKHR to queue up the
699  * actual work of waiting for the EGL sync command to complete,
700  * possibly returning a fence fd to the guest.
701  */
702 static long goldfish_sync_ioctl(struct file *file,
703                                                                 unsigned int cmd,
704                                                                 unsigned long arg)
705 {
706         struct goldfish_sync_context *sync_context_data;
707         struct goldfish_sync_timeline_obj *timeline;
708         int fd_out;
709         struct goldfish_sync_ioctl_info ioctl_data;
710
711         DTRACE();
712
713         sync_context_data = file->private_data;
714         fd_out = -1;
715
716         switch (cmd) {
717         case GOLDFISH_SYNC_IOC_QUEUE_WORK:
718
719                 DPRINT("exec GOLDFISH_SYNC_IOC_QUEUE_WORK");
720
721                 mutex_lock(&global_sync_state->mutex_lock);
722
723                 if (copy_from_user(&ioctl_data,
724                                                 (void __user *)arg,
725                                                 sizeof(ioctl_data))) {
726                         ERR("Failed to copy memory for ioctl_data from user.");
727                         mutex_unlock(&global_sync_state->mutex_lock);
728                         return -EFAULT;
729                 }
730
731                 if (ioctl_data.host_syncthread_handle_in == 0) {
732                         DPRINT("Error: zero host syncthread handle!!!");
733                         mutex_unlock(&global_sync_state->mutex_lock);
734                         return -EFAULT;
735                 }
736
737                 if (!sync_context_data->timeline) {
738                         DPRINT("no timeline yet, create one.");
739                         sync_context_data->timeline = goldfish_sync_timeline_create();
740                         DPRINT("timeline: 0x%p", &sync_context_data->timeline);
741                 }
742
743                 timeline = sync_context_data->timeline;
744                 fd_out = goldfish_sync_fence_create(timeline,
745                                                                                         timeline->current_time + 1);
746                 DPRINT("Created fence with fd %d and current time %u (timeline: 0x%p)",
747                            fd_out,
748                            sync_context_data->timeline->current_time + 1,
749                            sync_context_data->timeline);
750
751                 ioctl_data.fence_fd_out = fd_out;
752
753                 if (copy_to_user((void __user *)arg,
754                                                 &ioctl_data,
755                                                 sizeof(ioctl_data))) {
756                         DPRINT("Error, could not copy to user!!!");
757
758                         sys_close(fd_out);
759                         /* We won't be doing an increment, kref_put immediately. */
760                         kref_put(&timeline->kref, delete_timeline_obj);
761                         mutex_unlock(&global_sync_state->mutex_lock);
762                         return -EFAULT;
763                 }
764
765                 /* We are now about to trigger a host-side wait;
766                  * accumulate on |pending_waits|. */
767                 goldfish_sync_send_guestcmd(global_sync_state,
768                                 CMD_TRIGGER_HOST_WAIT,
769                                 ioctl_data.host_glsync_handle_in,
770                                 ioctl_data.host_syncthread_handle_in,
771                                 (uint64_t)(uintptr_t)(sync_context_data->timeline));
772
773                 mutex_unlock(&global_sync_state->mutex_lock);
774                 return 0;
775         default:
776                 return -ENOTTY;
777         }
778 }
779
780 static const struct file_operations goldfish_sync_fops = {
781         .owner = THIS_MODULE,
782         .open = goldfish_sync_open,
783         .release = goldfish_sync_release,
784         .unlocked_ioctl = goldfish_sync_ioctl,
785         .compat_ioctl = goldfish_sync_ioctl,
786 };
787
788 static struct miscdevice goldfish_sync_device = {
789         .name = "goldfish_sync",
790         .fops = &goldfish_sync_fops,
791 };
792
793
794 static bool setup_verify_batch_cmd_addr(struct goldfish_sync_state *sync_state,
795                                                                                 void *batch_addr,
796                                                                                 uint32_t addr_offset,
797                                                                                 uint32_t addr_offset_high)
798 {
799         uint64_t batch_addr_phys;
800         uint32_t batch_addr_phys_test_lo;
801         uint32_t batch_addr_phys_test_hi;
802
803         if (!batch_addr) {
804                 ERR("Could not use batch command address!");
805                 return false;
806         }
807
808         batch_addr_phys = virt_to_phys(batch_addr);
809         writel((uint32_t)(batch_addr_phys),
810                         sync_state->reg_base + addr_offset);
811         writel((uint32_t)(batch_addr_phys >> 32),
812                         sync_state->reg_base + addr_offset_high);
813
814         batch_addr_phys_test_lo =
815                 readl(sync_state->reg_base + addr_offset);
816         batch_addr_phys_test_hi =
817                 readl(sync_state->reg_base + addr_offset_high);
818
819         if (virt_to_phys(batch_addr) !=
820                         (((uint64_t)batch_addr_phys_test_hi << 32) |
821                          batch_addr_phys_test_lo)) {
822                 ERR("Invalid batch command address!");
823                 return false;
824         }
825
826         return true;
827 }
828
829 int goldfish_sync_probe(struct platform_device *pdev)
830 {
831         struct resource *ioresource;
832         struct goldfish_sync_state *sync_state = global_sync_state;
833         int status;
834
835         DTRACE();
836
837         sync_state->to_do_end = 0;
838
839         spin_lock_init(&sync_state->lock);
840         mutex_init(&sync_state->mutex_lock);
841
842         platform_set_drvdata(pdev, sync_state);
843
844         ioresource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
845         if (ioresource == NULL) {
846                 ERR("platform_get_resource failed");
847                 return -ENODEV;
848         }
849
850         sync_state->reg_base = devm_ioremap(&pdev->dev, ioresource->start, PAGE_SIZE);
851         if (sync_state->reg_base == NULL) {
852                 ERR("Could not ioremap");
853                 return -ENOMEM;
854         }
855
856         sync_state->irq = platform_get_irq(pdev, 0);
857         if (sync_state->irq < 0) {
858                 ERR("Could not platform_get_irq");
859                 return -ENODEV;
860         }
861
862         status = devm_request_irq(&pdev->dev,
863                                                         sync_state->irq,
864                                                         goldfish_sync_interrupt,
865                                                         IRQF_SHARED,
866                                                         pdev->name,
867                                                         sync_state);
868         if (status) {
869                 ERR("request_irq failed");
870                 return -ENODEV;
871         }
872
873         INIT_WORK(&sync_state->work_item,
874                           goldfish_sync_work_item_fn);
875
876         misc_register(&goldfish_sync_device);
877
878         /* Obtain addresses for batch send/recv of commands. */
879         {
880                 struct goldfish_sync_hostcmd *batch_addr_hostcmd;
881                 struct goldfish_sync_guestcmd *batch_addr_guestcmd;
882
883                 batch_addr_hostcmd = devm_kzalloc(&pdev->dev, sizeof(struct goldfish_sync_hostcmd),
884                                 GFP_KERNEL);
885                 batch_addr_guestcmd = devm_kzalloc(&pdev->dev, sizeof(struct goldfish_sync_guestcmd),
886                                 GFP_KERNEL);
887
888                 if (!setup_verify_batch_cmd_addr(sync_state,
889                                         batch_addr_hostcmd,
890                                         SYNC_REG_BATCH_COMMAND_ADDR,
891                                         SYNC_REG_BATCH_COMMAND_ADDR_HIGH)) {
892                         ERR("goldfish_sync: Could not setup batch command address");
893                         return -ENODEV;
894                 }
895
896                 if (!setup_verify_batch_cmd_addr(sync_state,
897                                         batch_addr_guestcmd,
898                                         SYNC_REG_BATCH_GUESTCOMMAND_ADDR,
899                                         SYNC_REG_BATCH_GUESTCOMMAND_ADDR_HIGH)) {
900                         ERR("goldfish_sync: Could not setup batch guest command address");
901                         return -ENODEV;
902                 }
903
904                 sync_state->batch_hostcmd = batch_addr_hostcmd;
905                 sync_state->batch_guestcmd = batch_addr_guestcmd;
906         }
907
908         INFO("goldfish_sync: Initialized goldfish sync device");
909
910         writel(0, sync_state->reg_base + SYNC_REG_INIT);
911
912         return 0;
913 }
914
915 static int goldfish_sync_remove(struct platform_device *pdev)
916 {
917         struct goldfish_sync_state *sync_state = global_sync_state;
918
919         DTRACE();
920
921         misc_deregister(&goldfish_sync_device);
922         memset(sync_state, 0, sizeof(struct goldfish_sync_state));
923         return 0;
924 }
925
926 static const struct of_device_id goldfish_sync_of_match[] = {
927         { .compatible = "google,goldfish-sync", },
928         {},
929 };
930 MODULE_DEVICE_TABLE(of, goldfish_sync_of_match);
931
932 static const struct acpi_device_id goldfish_sync_acpi_match[] = {
933         { "GFSH0006", 0 },
934         { },
935 };
936
937 MODULE_DEVICE_TABLE(acpi, goldfish_sync_acpi_match);
938
939 static struct platform_driver goldfish_sync = {
940         .probe = goldfish_sync_probe,
941         .remove = goldfish_sync_remove,
942         .driver = {
943                 .name = "goldfish_sync",
944                 .of_match_table = goldfish_sync_of_match,
945                 .acpi_match_table = ACPI_PTR(goldfish_sync_acpi_match),
946         }
947 };
948
949 module_platform_driver(goldfish_sync);
950
951 MODULE_AUTHOR("Google, Inc.");
952 MODULE_DESCRIPTION("Android QEMU Sync Driver");
953 MODULE_LICENSE("GPL");
954 MODULE_VERSION("1.0");
955
956 /* This function is only to run a basic test of sync framework.
957  * It creates a timeline and fence object whose signal point is at 1.
958  * The timeline is incremented, and we use the sync framework's
959  * sync_fence_wait on that fence object. If everything works out,
960  * we should not hang in the wait and return immediately.
961  * There is no way to explicitly run this test yet, but it
962  * can be used by inserting it at the end of goldfish_sync_probe.
963  */
964 void test_kernel_sync(void)
965 {
966         struct goldfish_sync_timeline_obj *test_timeline;
967         int test_fence_fd;
968
969         DTRACE();
970
971         DPRINT("test sw_sync");
972
973         test_timeline = goldfish_sync_timeline_create();
974         DPRINT("sw_sync_timeline_create -> 0x%p", test_timeline);
975
976         test_fence_fd = goldfish_sync_fence_create(test_timeline, 1);
977         DPRINT("sync_fence_create -> %d", test_fence_fd);
978
979         DPRINT("incrementing test timeline");
980         goldfish_sync_timeline_inc(test_timeline, 1);
981
982         DPRINT("test waiting (should NOT hang)");
983         sync_fence_wait(
984                         sync_fence_fdget(test_fence_fd), -1);
985
986         DPRINT("test waiting (afterward)");
987 }