2 * Copyright (C) 2016 Google, Inc.
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
15 #include <linux/fdtable.h>
16 #include <linux/file.h>
17 #include <linux/init.h>
18 #include <linux/miscdevice.h>
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/platform_device.h>
23 #include <linux/interrupt.h>
24 #include <linux/kref.h>
25 #include <linux/spinlock.h>
26 #include <linux/types.h>
30 #include <linux/acpi.h>
32 #include <linux/string.h>
33 #include <linux/syscalls.h>
38 #define ERR(...) printk(KERN_ERR __VA_ARGS__);
40 #define INFO(...) printk(KERN_INFO __VA_ARGS__);
42 #define DPRINT(...) pr_debug(__VA_ARGS__);
44 #define DTRACE() DPRINT("%s: enter", __func__)
46 /* The Goldfish sync driver is designed to provide a interface
47 * between the underlying host's sync device and the kernel's
49 * The purpose of the device/driver is to enable lightweight
50 * creation and signaling of timelines and fences
51 * in order to synchronize the guest with host-side graphics events.
53 * Each time the interrupt trips, the driver
54 * may perform a sw_sync operation.
57 /* The operations are: */
59 /* Ready signal - used to mark when irq should lower */
60 #define CMD_SYNC_READY 0
62 /* Create a new timeline. writes timeline handle */
63 #define CMD_CREATE_SYNC_TIMELINE 1
65 /* Create a fence object. reads timeline handle and time argument.
66 * Writes fence fd to the SYNC_REG_HANDLE register. */
67 #define CMD_CREATE_SYNC_FENCE 2
69 /* Increments timeline. reads timeline handle and time argument */
70 #define CMD_SYNC_TIMELINE_INC 3
72 /* Destroys a timeline. reads timeline handle */
73 #define CMD_DESTROY_SYNC_TIMELINE 4
75 /* Starts a wait on the host with
76 * the given glsync object and sync thread handle. */
77 #define CMD_TRIGGER_HOST_WAIT 5
79 /* The register layout is: */
81 #define SYNC_REG_BATCH_COMMAND 0x00 /* host->guest batch commands */
82 #define SYNC_REG_BATCH_GUESTCOMMAND 0x04 /* guest->host batch commands */
83 #define SYNC_REG_BATCH_COMMAND_ADDR 0x08 /* communicate physical address of host->guest batch commands */
84 #define SYNC_REG_BATCH_COMMAND_ADDR_HIGH 0x0c /* 64-bit part */
85 #define SYNC_REG_BATCH_GUESTCOMMAND_ADDR 0x10 /* communicate physical address of guest->host commands */
86 #define SYNC_REG_BATCH_GUESTCOMMAND_ADDR_HIGH 0x14 /* 64-bit part */
87 #define SYNC_REG_INIT 0x18 /* signals that the device has been probed */
89 /* There is an ioctl associated with goldfish sync driver.
90 * Make it conflict with ioctls that are not likely to be used
93 * '@' 00-0F linux/radeonfb.h conflict!
94 * '@' 00-0F drivers/video/aty/aty128fb.c conflict!
96 #define GOLDFISH_SYNC_IOC_MAGIC '@'
98 #define GOLDFISH_SYNC_IOC_QUEUE_WORK _IOWR(GOLDFISH_SYNC_IOC_MAGIC, 0, struct goldfish_sync_ioctl_info)
100 /* The above definitions (command codes, register layout, ioctl definitions)
101 * need to be in sync with the following files:
103 * Host-side (emulator):
104 * external/qemu/android/emulation/goldfish_sync.h
105 * external/qemu-android/hw/misc/goldfish_sync.c
107 * Guest-side (system image):
108 * device/generic/goldfish-opengl/system/egl/goldfish_sync.h
109 * device/generic/goldfish/ueventd.ranchu.rc
110 * platform/build/target/board/generic/sepolicy/file_contexts
112 struct goldfish_sync_hostcmd {
113 /* sorted for alignment */
115 uint64_t hostcmd_handle;
120 struct goldfish_sync_guestcmd {
121 uint64_t host_command; /* uint64_t for alignment */
122 uint64_t glsync_handle;
123 uint64_t thread_handle;
124 uint64_t guest_timeline_handle;
127 #define GOLDFISH_SYNC_MAX_CMDS 64
129 struct goldfish_sync_state {
130 char __iomem *reg_base;
133 /* Spinlock protects |to_do| / |to_do_end|. */
135 /* |mutex_lock| protects all concurrent access
136 * to timelines for both kernel and user space. */
137 struct mutex mutex_lock;
139 /* Buffer holding commands issued from host. */
140 struct goldfish_sync_hostcmd to_do[GOLDFISH_SYNC_MAX_CMDS];
143 /* Addresses for the reading or writing
144 * of individual commands. The host can directly write
145 * to |batch_hostcmd| (and then this driver immediately
146 * copies contents to |to_do|). This driver either replies
147 * through |batch_hostcmd| or simply issues a
148 * guest->host command through |batch_guestcmd|.
150 struct goldfish_sync_hostcmd *batch_hostcmd;
151 struct goldfish_sync_guestcmd *batch_guestcmd;
153 /* Used to give this struct itself to a work queue
154 * function for executing actual sync commands. */
155 struct work_struct work_item;
158 static struct goldfish_sync_state global_sync_state[1];
160 struct goldfish_sync_timeline_obj {
161 struct sw_sync_timeline *sw_sync_tl;
162 uint32_t current_time;
163 /* We need to be careful about when we deallocate
164 * this |goldfish_sync_timeline_obj| struct.
165 * In order to ensure proper cleanup, we need to
166 * consider the triggered host-side wait that may
167 * still be in flight when the guest close()'s a
168 * goldfish_sync device's sync context fd (and
169 * destroys the |sw_sync_tl| field above).
170 * The host-side wait may raise IRQ
171 * and tell the kernel to increment the timeline _after_
172 * the |sw_sync_tl| has already been set to null.
174 * From observations on OpenGL apps and CTS tests, this
175 * happens at some very low probability upon context
176 * destruction or process close, but it does happen
177 * and it needs to be handled properly. Otherwise,
178 * if we clean up the surrounding |goldfish_sync_timeline_obj|
179 * too early, any |handle| field of any host->guest command
180 * might not even point to a null |sw_sync_tl| field,
181 * but to garbage memory or even a reclaimed |sw_sync_tl|.
182 * If we do not count such "pending waits" and kfree the object
183 * immediately upon |goldfish_sync_timeline_destroy|,
184 * we might get mysterous RCU stalls after running a long
185 * time because the garbage memory that is being read
186 * happens to be interpretable as a |spinlock_t| struct
187 * that is currently in the locked state.
189 * To track when to free the |goldfish_sync_timeline_obj|
190 * itself, we maintain a kref.
191 * The kref essentially counts the timeline itself plus
192 * the number of waits in flight. kref_init/kref_put
194 * |goldfish_sync_timeline_create|/|goldfish_sync_timeline_destroy|
195 * and kref_get/kref_put are issued on
196 * |goldfish_sync_fence_create|/|goldfish_sync_timeline_inc|.
198 * The timeline is destroyed after reference count
199 * reaches zero, which would happen after
200 * |goldfish_sync_timeline_destroy| and all pending
201 * |goldfish_sync_timeline_inc|'s are fulfilled.
203 * NOTE (1): We assume that |fence_create| and
204 * |timeline_inc| calls are 1:1, otherwise the kref scheme
205 * will not work. This is a valid assumption as long
206 * as the host-side virtual device implementation
207 * does not insert any timeline increments
208 * that we did not trigger from here.
210 * NOTE (2): The use of kref by itself requires no locks,
211 * but this does not mean everything works without locks.
212 * Related timeline operations do require a lock of some sort,
213 * or at least are not proven to work without it.
214 * In particualr, we assume that all the operations
215 * done on the |kref| field above are done in contexts where
216 * |global_sync_state->mutex_lock| is held. Do not
217 * remove that lock until everything is proven to work
222 /* We will call |delete_timeline_obj| when the last reference count
223 * of the kref is decremented. This deletes the sw_sync
224 * timeline object along with the wrapper itself. */
225 static void delete_timeline_obj(struct kref* kref) {
226 struct goldfish_sync_timeline_obj* obj =
227 container_of(kref, struct goldfish_sync_timeline_obj, kref);
229 sync_timeline_destroy(&obj->sw_sync_tl->obj);
230 obj->sw_sync_tl = NULL;
234 static uint64_t gensym_ctr;
235 static void gensym(char *dst)
237 sprintf(dst, "goldfish_sync:gensym:%llu", gensym_ctr);
241 /* |goldfish_sync_timeline_create| assumes that |global_sync_state->mutex_lock|
243 static struct goldfish_sync_timeline_obj*
244 goldfish_sync_timeline_create(void)
247 char timeline_name[256];
248 struct sw_sync_timeline *res_sync_tl = NULL;
249 struct goldfish_sync_timeline_obj *res;
253 gensym(timeline_name);
255 res_sync_tl = sw_sync_timeline_create(timeline_name);
257 ERR("Failed to create sw_sync timeline.");
261 res = kzalloc(sizeof(struct goldfish_sync_timeline_obj), GFP_KERNEL);
262 res->sw_sync_tl = res_sync_tl;
263 res->current_time = 0;
264 kref_init(&res->kref);
266 DPRINT("new timeline_obj=0x%p", res);
270 /* |goldfish_sync_fence_create| assumes that |global_sync_state->mutex_lock|
273 goldfish_sync_fence_create(struct goldfish_sync_timeline_obj *obj,
278 char fence_name[256];
279 struct sync_pt *syncpt = NULL;
280 struct sync_fence *sync_obj = NULL;
281 struct sw_sync_timeline *tl;
287 tl = obj->sw_sync_tl;
289 syncpt = sw_sync_pt_create(tl, val);
291 ERR("could not create sync point! "
292 "sync_timeline=0x%p val=%d",
297 fd = get_unused_fd_flags(O_CLOEXEC);
299 ERR("could not get unused fd for sync fence. "
306 sync_obj = sync_fence_create(fence_name, syncpt);
308 ERR("could not create sync fence! "
309 "sync_timeline=0x%p val=%d sync_pt=0x%p",
311 goto err_cleanup_fd_pt;
314 DPRINT("installing sync fence into fd %d sync_obj=0x%p", fd, sync_obj);
315 sync_fence_install(sync_obj, fd);
316 kref_get(&obj->kref);
323 sync_pt_free(syncpt);
327 /* |goldfish_sync_timeline_inc| assumes that |global_sync_state->mutex_lock|
330 goldfish_sync_timeline_inc(struct goldfish_sync_timeline_obj *obj, uint32_t inc)
333 /* Just give up if someone else nuked the timeline.
334 * Whoever it was won't care that it doesn't get signaled. */
337 DPRINT("timeline_obj=0x%p", obj);
338 sw_sync_timeline_inc(obj->sw_sync_tl, inc);
339 DPRINT("incremented timeline. increment max_time");
340 obj->current_time += inc;
342 /* Here, we will end up deleting the timeline object if it
343 * turns out that this call was a pending increment after
344 * |goldfish_sync_timeline_destroy| was called. */
345 kref_put(&obj->kref, delete_timeline_obj);
349 /* |goldfish_sync_timeline_destroy| assumes
350 * that |global_sync_state->mutex_lock| is held. */
352 goldfish_sync_timeline_destroy(struct goldfish_sync_timeline_obj *obj)
355 /* See description of |goldfish_sync_timeline_obj| for why we
356 * should not immediately destroy |obj| */
357 kref_put(&obj->kref, delete_timeline_obj);
361 goldfish_sync_cmd_queue(struct goldfish_sync_state *sync_state,
365 uint64_t hostcmd_handle)
367 struct goldfish_sync_hostcmd *to_add;
371 BUG_ON(sync_state->to_do_end == GOLDFISH_SYNC_MAX_CMDS);
373 to_add = &sync_state->to_do[sync_state->to_do_end];
376 to_add->handle = handle;
377 to_add->time_arg = time_arg;
378 to_add->hostcmd_handle = hostcmd_handle;
380 sync_state->to_do_end += 1;
384 goldfish_sync_hostcmd_reply(struct goldfish_sync_state *sync_state,
388 uint64_t hostcmd_handle)
390 unsigned long irq_flags;
391 struct goldfish_sync_hostcmd *batch_hostcmd =
392 sync_state->batch_hostcmd;
396 spin_lock_irqsave(&sync_state->lock, irq_flags);
398 batch_hostcmd->cmd = cmd;
399 batch_hostcmd->handle = handle;
400 batch_hostcmd->time_arg = time_arg;
401 batch_hostcmd->hostcmd_handle = hostcmd_handle;
402 writel(0, sync_state->reg_base + SYNC_REG_BATCH_COMMAND);
404 spin_unlock_irqrestore(&sync_state->lock, irq_flags);
408 goldfish_sync_send_guestcmd(struct goldfish_sync_state *sync_state,
410 uint64_t glsync_handle,
411 uint64_t thread_handle,
412 uint64_t timeline_handle)
414 unsigned long irq_flags;
415 struct goldfish_sync_guestcmd *batch_guestcmd =
416 sync_state->batch_guestcmd;
420 spin_lock_irqsave(&sync_state->lock, irq_flags);
422 batch_guestcmd->host_command = (uint64_t)cmd;
423 batch_guestcmd->glsync_handle = (uint64_t)glsync_handle;
424 batch_guestcmd->thread_handle = (uint64_t)thread_handle;
425 batch_guestcmd->guest_timeline_handle = (uint64_t)timeline_handle;
426 writel(0, sync_state->reg_base + SYNC_REG_BATCH_GUESTCOMMAND);
428 spin_unlock_irqrestore(&sync_state->lock, irq_flags);
431 /* |goldfish_sync_interrupt| handles IRQ raises from the virtual device.
432 * In the context of OpenGL, this interrupt will fire whenever we need
433 * to signal a fence fd in the guest, with the command
434 * |CMD_SYNC_TIMELINE_INC|.
435 * However, because this function will be called in an interrupt context,
436 * it is necessary to do the actual work of signaling off of interrupt context.
437 * The shared work queue is used for this purpose. At the end when
438 * all pending commands are intercepted by the interrupt handler,
439 * we call |schedule_work|, which will later run the actual
440 * desired sync command in |goldfish_sync_work_item_fn|.
442 static irqreturn_t goldfish_sync_interrupt(int irq, void *dev_id)
445 struct goldfish_sync_state *sync_state = dev_id;
451 uint64_t hostcmd_handle_rw;
459 spin_lock(&sync_state->lock);
463 readl(sync_state->reg_base + SYNC_REG_BATCH_COMMAND);
464 nextcmd = sync_state->batch_hostcmd->cmd;
470 handle_rw = sync_state->batch_hostcmd->handle;
471 time_r = sync_state->batch_hostcmd->time_arg;
472 hostcmd_handle_rw = sync_state->batch_hostcmd->hostcmd_handle;
474 goldfish_sync_cmd_queue(
484 spin_unlock(&sync_state->lock);
486 schedule_work(&sync_state->work_item);
488 return (count == 0) ? IRQ_NONE : IRQ_HANDLED;
491 /* |goldfish_sync_work_item_fn| does the actual work of servicing
492 * host->guest sync commands. This function is triggered whenever
493 * the IRQ for the goldfish sync device is raised. Once it starts
494 * running, it grabs the contents of the buffer containing the
495 * commands it needs to execute (there may be multiple, because
496 * our IRQ is active high and not edge triggered), and then
497 * runs all of them one after the other.
499 static void goldfish_sync_work_item_fn(struct work_struct *input)
502 struct goldfish_sync_state *sync_state;
505 struct goldfish_sync_timeline_obj *timeline;
506 uint64_t timeline_ptr;
508 uint64_t hostcmd_handle;
514 struct goldfish_sync_hostcmd *todo;
517 unsigned long irq_flags;
519 struct goldfish_sync_hostcmd to_run[GOLDFISH_SYNC_MAX_CMDS];
522 sync_state = container_of(input, struct goldfish_sync_state, work_item);
524 mutex_lock(&sync_state->mutex_lock);
526 spin_lock_irqsave(&sync_state->lock, irq_flags); {
528 todo_end = sync_state->to_do_end;
530 DPRINT("num sync todos: %u", sync_state->to_do_end);
532 for (i = 0; i < todo_end; i++)
533 to_run[i] = sync_state->to_do[i];
535 /* We expect that commands will come in at a slow enough rate
536 * so that incoming items will not be more than
537 * GOLDFISH_SYNC_MAX_CMDS.
539 * This is because the way the sync device is used,
540 * it's only for managing buffer data transfers per frame,
541 * with a sequential dependency between putting things in
542 * to_do and taking them out. Once a set of commands is
543 * queued up in to_do, the user of the device waits for
544 * them to be processed before queuing additional commands,
545 * which limits the rate at which commands come in
546 * to the rate at which we take them out here.
548 * We also don't expect more than MAX_CMDS to be issued
549 * at once; there is a correspondence between
550 * which buffers need swapping to the (display / buffer queue)
551 * to particular commands, and we don't expect there to be
552 * enough display or buffer queues in operation at once
553 * to overrun GOLDFISH_SYNC_MAX_CMDS.
555 sync_state->to_do_end = 0;
557 } spin_unlock_irqrestore(&sync_state->lock, irq_flags);
559 for (i = 0; i < todo_end; i++) {
560 DPRINT("todo index: %u", i);
566 handle = (uint64_t)todo->handle;
567 time_arg = todo->time_arg;
568 hostcmd_handle = (uint64_t)todo->hostcmd_handle;
572 timeline = (struct goldfish_sync_timeline_obj *)(uintptr_t)handle;
577 case CMD_CREATE_SYNC_TIMELINE:
578 DPRINT("exec CMD_CREATE_SYNC_TIMELINE: "
579 "handle=0x%llx time_arg=%d",
581 timeline = goldfish_sync_timeline_create();
582 timeline_ptr = (uintptr_t)timeline;
583 goldfish_sync_hostcmd_reply(sync_state, CMD_CREATE_SYNC_TIMELINE,
587 DPRINT("sync timeline created: %p", timeline);
589 case CMD_CREATE_SYNC_FENCE:
590 DPRINT("exec CMD_CREATE_SYNC_FENCE: "
591 "handle=0x%llx time_arg=%d",
593 sync_fence_fd = goldfish_sync_fence_create(timeline, time_arg);
594 goldfish_sync_hostcmd_reply(sync_state, CMD_CREATE_SYNC_FENCE,
599 case CMD_SYNC_TIMELINE_INC:
600 DPRINT("exec CMD_SYNC_TIMELINE_INC: "
601 "handle=0x%llx time_arg=%d",
603 goldfish_sync_timeline_inc(timeline, time_arg);
605 case CMD_DESTROY_SYNC_TIMELINE:
606 DPRINT("exec CMD_DESTROY_SYNC_TIMELINE: "
607 "handle=0x%llx time_arg=%d",
609 goldfish_sync_timeline_destroy(timeline);
612 DPRINT("Done executing sync command");
614 mutex_unlock(&sync_state->mutex_lock);
617 /* Guest-side interface: file operations */
619 /* Goldfish sync context and ioctl info.
621 * When a sync context is created by open()-ing the goldfish sync device, we
622 * create a sync context (|goldfish_sync_context|).
624 * Currently, the only data required to track is the sync timeline itself
625 * along with the current time, which are all packed up in the
626 * |goldfish_sync_timeline_obj| field. We use a |goldfish_sync_context|
627 * as the filp->private_data.
629 * Next, when a sync context user requests that work be queued and a fence
630 * fd provided, we use the |goldfish_sync_ioctl_info| struct, which holds
631 * information about which host handles to touch for this particular
632 * queue-work operation. We need to know about the host-side sync thread
633 * and the particular host-side GLsync object. We also possibly write out
636 struct goldfish_sync_context {
637 struct goldfish_sync_timeline_obj *timeline;
640 struct goldfish_sync_ioctl_info {
641 uint64_t host_glsync_handle_in;
642 uint64_t host_syncthread_handle_in;
646 static int goldfish_sync_open(struct inode *inode, struct file *file)
649 struct goldfish_sync_context *sync_context;
653 mutex_lock(&global_sync_state->mutex_lock);
655 sync_context = kzalloc(sizeof(struct goldfish_sync_context), GFP_KERNEL);
657 if (sync_context == NULL) {
658 ERR("Creation of goldfish sync context failed!");
659 mutex_unlock(&global_sync_state->mutex_lock);
663 sync_context->timeline = NULL;
665 file->private_data = sync_context;
667 DPRINT("successfully create a sync context @0x%p", sync_context);
669 mutex_unlock(&global_sync_state->mutex_lock);
674 static int goldfish_sync_release(struct inode *inode, struct file *file)
677 struct goldfish_sync_context *sync_context;
681 mutex_lock(&global_sync_state->mutex_lock);
683 sync_context = file->private_data;
685 if (sync_context->timeline)
686 goldfish_sync_timeline_destroy(sync_context->timeline);
688 sync_context->timeline = NULL;
692 mutex_unlock(&global_sync_state->mutex_lock);
697 /* |goldfish_sync_ioctl| is the guest-facing interface of goldfish sync
698 * and is used in conjunction with eglCreateSyncKHR to queue up the
699 * actual work of waiting for the EGL sync command to complete,
700 * possibly returning a fence fd to the guest.
702 static long goldfish_sync_ioctl(struct file *file,
706 struct goldfish_sync_context *sync_context_data;
707 struct goldfish_sync_timeline_obj *timeline;
709 struct goldfish_sync_ioctl_info ioctl_data;
713 sync_context_data = file->private_data;
717 case GOLDFISH_SYNC_IOC_QUEUE_WORK:
719 DPRINT("exec GOLDFISH_SYNC_IOC_QUEUE_WORK");
721 mutex_lock(&global_sync_state->mutex_lock);
723 if (copy_from_user(&ioctl_data,
725 sizeof(ioctl_data))) {
726 ERR("Failed to copy memory for ioctl_data from user.");
727 mutex_unlock(&global_sync_state->mutex_lock);
731 if (ioctl_data.host_syncthread_handle_in == 0) {
732 DPRINT("Error: zero host syncthread handle!!!");
733 mutex_unlock(&global_sync_state->mutex_lock);
737 if (!sync_context_data->timeline) {
738 DPRINT("no timeline yet, create one.");
739 sync_context_data->timeline = goldfish_sync_timeline_create();
740 DPRINT("timeline: 0x%p", &sync_context_data->timeline);
743 timeline = sync_context_data->timeline;
744 fd_out = goldfish_sync_fence_create(timeline,
745 timeline->current_time + 1);
746 DPRINT("Created fence with fd %d and current time %u (timeline: 0x%p)",
748 sync_context_data->timeline->current_time + 1,
749 sync_context_data->timeline);
751 ioctl_data.fence_fd_out = fd_out;
753 if (copy_to_user((void __user *)arg,
755 sizeof(ioctl_data))) {
756 DPRINT("Error, could not copy to user!!!");
759 /* We won't be doing an increment, kref_put immediately. */
760 kref_put(&timeline->kref, delete_timeline_obj);
761 mutex_unlock(&global_sync_state->mutex_lock);
765 /* We are now about to trigger a host-side wait;
766 * accumulate on |pending_waits|. */
767 goldfish_sync_send_guestcmd(global_sync_state,
768 CMD_TRIGGER_HOST_WAIT,
769 ioctl_data.host_glsync_handle_in,
770 ioctl_data.host_syncthread_handle_in,
771 (uint64_t)(uintptr_t)(sync_context_data->timeline));
773 mutex_unlock(&global_sync_state->mutex_lock);
780 static const struct file_operations goldfish_sync_fops = {
781 .owner = THIS_MODULE,
782 .open = goldfish_sync_open,
783 .release = goldfish_sync_release,
784 .unlocked_ioctl = goldfish_sync_ioctl,
785 .compat_ioctl = goldfish_sync_ioctl,
788 static struct miscdevice goldfish_sync_device = {
789 .name = "goldfish_sync",
790 .fops = &goldfish_sync_fops,
794 static bool setup_verify_batch_cmd_addr(struct goldfish_sync_state *sync_state,
796 uint32_t addr_offset,
797 uint32_t addr_offset_high)
799 uint64_t batch_addr_phys;
800 uint32_t batch_addr_phys_test_lo;
801 uint32_t batch_addr_phys_test_hi;
804 ERR("Could not use batch command address!");
808 batch_addr_phys = virt_to_phys(batch_addr);
809 writel((uint32_t)(batch_addr_phys),
810 sync_state->reg_base + addr_offset);
811 writel((uint32_t)(batch_addr_phys >> 32),
812 sync_state->reg_base + addr_offset_high);
814 batch_addr_phys_test_lo =
815 readl(sync_state->reg_base + addr_offset);
816 batch_addr_phys_test_hi =
817 readl(sync_state->reg_base + addr_offset_high);
819 if (virt_to_phys(batch_addr) !=
820 (((uint64_t)batch_addr_phys_test_hi << 32) |
821 batch_addr_phys_test_lo)) {
822 ERR("Invalid batch command address!");
829 int goldfish_sync_probe(struct platform_device *pdev)
831 struct resource *ioresource;
832 struct goldfish_sync_state *sync_state = global_sync_state;
837 sync_state->to_do_end = 0;
839 spin_lock_init(&sync_state->lock);
840 mutex_init(&sync_state->mutex_lock);
842 platform_set_drvdata(pdev, sync_state);
844 ioresource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
845 if (ioresource == NULL) {
846 ERR("platform_get_resource failed");
850 sync_state->reg_base = devm_ioremap(&pdev->dev, ioresource->start, PAGE_SIZE);
851 if (sync_state->reg_base == NULL) {
852 ERR("Could not ioremap");
856 sync_state->irq = platform_get_irq(pdev, 0);
857 if (sync_state->irq < 0) {
858 ERR("Could not platform_get_irq");
862 status = devm_request_irq(&pdev->dev,
864 goldfish_sync_interrupt,
869 ERR("request_irq failed");
873 INIT_WORK(&sync_state->work_item,
874 goldfish_sync_work_item_fn);
876 misc_register(&goldfish_sync_device);
878 /* Obtain addresses for batch send/recv of commands. */
880 struct goldfish_sync_hostcmd *batch_addr_hostcmd;
881 struct goldfish_sync_guestcmd *batch_addr_guestcmd;
883 batch_addr_hostcmd = devm_kzalloc(&pdev->dev, sizeof(struct goldfish_sync_hostcmd),
885 batch_addr_guestcmd = devm_kzalloc(&pdev->dev, sizeof(struct goldfish_sync_guestcmd),
888 if (!setup_verify_batch_cmd_addr(sync_state,
890 SYNC_REG_BATCH_COMMAND_ADDR,
891 SYNC_REG_BATCH_COMMAND_ADDR_HIGH)) {
892 ERR("goldfish_sync: Could not setup batch command address");
896 if (!setup_verify_batch_cmd_addr(sync_state,
898 SYNC_REG_BATCH_GUESTCOMMAND_ADDR,
899 SYNC_REG_BATCH_GUESTCOMMAND_ADDR_HIGH)) {
900 ERR("goldfish_sync: Could not setup batch guest command address");
904 sync_state->batch_hostcmd = batch_addr_hostcmd;
905 sync_state->batch_guestcmd = batch_addr_guestcmd;
908 INFO("goldfish_sync: Initialized goldfish sync device");
910 writel(0, sync_state->reg_base + SYNC_REG_INIT);
915 static int goldfish_sync_remove(struct platform_device *pdev)
917 struct goldfish_sync_state *sync_state = global_sync_state;
921 misc_deregister(&goldfish_sync_device);
922 memset(sync_state, 0, sizeof(struct goldfish_sync_state));
926 static const struct of_device_id goldfish_sync_of_match[] = {
927 { .compatible = "google,goldfish-sync", },
930 MODULE_DEVICE_TABLE(of, goldfish_sync_of_match);
932 static const struct acpi_device_id goldfish_sync_acpi_match[] = {
937 MODULE_DEVICE_TABLE(acpi, goldfish_sync_acpi_match);
939 static struct platform_driver goldfish_sync = {
940 .probe = goldfish_sync_probe,
941 .remove = goldfish_sync_remove,
943 .name = "goldfish_sync",
944 .of_match_table = goldfish_sync_of_match,
945 .acpi_match_table = ACPI_PTR(goldfish_sync_acpi_match),
949 module_platform_driver(goldfish_sync);
951 MODULE_AUTHOR("Google, Inc.");
952 MODULE_DESCRIPTION("Android QEMU Sync Driver");
953 MODULE_LICENSE("GPL");
954 MODULE_VERSION("1.0");
956 /* This function is only to run a basic test of sync framework.
957 * It creates a timeline and fence object whose signal point is at 1.
958 * The timeline is incremented, and we use the sync framework's
959 * sync_fence_wait on that fence object. If everything works out,
960 * we should not hang in the wait and return immediately.
961 * There is no way to explicitly run this test yet, but it
962 * can be used by inserting it at the end of goldfish_sync_probe.
964 void test_kernel_sync(void)
966 struct goldfish_sync_timeline_obj *test_timeline;
971 DPRINT("test sw_sync");
973 test_timeline = goldfish_sync_timeline_create();
974 DPRINT("sw_sync_timeline_create -> 0x%p", test_timeline);
976 test_fence_fd = goldfish_sync_fence_create(test_timeline, 1);
977 DPRINT("sync_fence_create -> %d", test_fence_fd);
979 DPRINT("incrementing test timeline");
980 goldfish_sync_timeline_inc(test_timeline, 1);
982 DPRINT("test waiting (should NOT hang)");
984 sync_fence_fdget(test_fence_fd), -1);
986 DPRINT("test waiting (afterward)");