2 * drivers/video/tegra/host/dev.c
4 * Tegra Graphics Host Driver Entrypoint
6 * Copyright (c) 2010, NVIDIA Corporation.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
25 #include <linux/slab.h>
26 #include <linux/string.h>
27 #include <linux/spinlock.h>
29 #include <linux/cdev.h>
30 #include <linux/platform_device.h>
31 #include <linux/uaccess.h>
32 #include <linux/file.h>
33 #include <linux/clk.h>
37 #include <mach/nvhost.h>
38 #include <mach/nvmap.h>
40 #define DRIVER_NAME "tegra_grhost"
41 #define IFACE_NAME "nvhost"
43 static int nvhost_major = NVHOST_MAJOR;
44 static int nvhost_minor = NVHOST_CHANNEL_BASE;
46 struct nvhost_channel_userctx {
47 struct nvhost_channel *ch;
48 struct nvhost_hwctx *hwctx;
55 struct nvmap_handle_ref *gather_mem;
56 struct nvhost_op_pair *gathers;
59 struct nvmap_pinarray_elem pinarray[NVHOST_MAX_HANDLES];
60 struct nvmap_handle *unpinarray[NVHOST_MAX_HANDLES];
61 struct nvmap_client *nvmap;
62 struct nvhost_waitchk waitchks[NVHOST_MAX_WAIT_CHECKS];
67 struct nvhost_ctrl_userctx {
68 struct nvhost_master *dev;
69 u32 mod_locks[NV_HOST1X_NB_MLOCKS];
72 static int nvhost_channelrelease(struct inode *inode, struct file *filp)
74 struct nvhost_channel_userctx *priv = filp->private_data;
76 filp->private_data = NULL;
78 nvhost_putchannel(priv->ch, priv->hwctx);
81 priv->ch->ctxhandler.put(priv->hwctx);
84 nvmap_munmap(priv->gather_mem, priv->gathers);
86 if (!IS_ERR_OR_NULL(priv->gather_mem))
87 nvmap_free(priv->ch->dev->nvmap, priv->gather_mem);
89 nvmap_client_put(priv->nvmap);
94 static int nvhost_channelopen(struct inode *inode, struct file *filp)
96 struct nvhost_channel_userctx *priv;
97 struct nvhost_channel *ch;
100 ch = container_of(inode->i_cdev, struct nvhost_channel, cdev);
101 ch = nvhost_getchannel(ch);
105 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
107 nvhost_putchannel(ch, NULL);
110 filp->private_data = priv;
112 gather_size = sizeof(struct nvhost_op_pair) * NVHOST_MAX_GATHERS;
113 priv->gather_mem = nvmap_alloc(ch->dev->nvmap, gather_size, 32,
114 NVMAP_HANDLE_CACHEABLE);
115 if (IS_ERR(priv->gather_mem))
118 if (ch->ctxhandler.alloc) {
119 priv->hwctx = ch->ctxhandler.alloc(ch);
124 priv->gathers = (struct nvhost_op_pair *)nvmap_mmap(priv->gather_mem);
128 nvhost_channelrelease(inode, filp);
132 static void add_gather(struct nvhost_channel_userctx *ctx, int idx,
133 u32 mem_id, u32 words, u32 offset)
135 struct nvmap_pinarray_elem *pin;
136 pin = &ctx->pinarray[ctx->pinarray_size++];
137 pin->patch_mem = (u32)nvmap_ref_to_handle(ctx->gather_mem);
138 pin->patch_offset = (idx * sizeof(struct nvhost_op_pair)) +
139 offsetof(struct nvhost_op_pair, op2);
140 pin->pin_mem = mem_id;
141 pin->pin_offset = offset;
142 ctx->gathers[idx].op1 = nvhost_opcode_gather(0, words);
145 static void reset_submit(struct nvhost_channel_userctx *ctx)
147 ctx->cmdbufs_pending = 0;
148 ctx->relocs_pending = 0;
149 ctx->waitchk_pending = 0;
152 static ssize_t nvhost_channelwrite(struct file *filp, const char __user *buf,
153 size_t count, loff_t *offp)
155 struct nvhost_channel_userctx *priv = filp->private_data;
156 size_t remaining = count;
161 if (!priv->relocs_pending && !priv->cmdbufs_pending && !priv->waitchk_pending) {
162 consumed = sizeof(struct nvhost_submit_hdr);
163 if (remaining < consumed)
165 if (copy_from_user(&priv->syncpt_id, buf, consumed)) {
169 if (!priv->cmdbufs_pending) {
173 /* leave room for ctx switch */
174 priv->num_gathers = 2;
175 priv->pinarray_size = 0;
176 priv->waitchk_mask |= priv->waitchk_ref;
177 } else if (priv->cmdbufs_pending) {
178 struct nvhost_cmdbuf cmdbuf;
179 consumed = sizeof(cmdbuf);
180 if (remaining < consumed)
182 if (copy_from_user(&cmdbuf, buf, consumed)) {
186 add_gather(priv, priv->num_gathers++,
187 cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
188 priv->cmdbufs_pending--;
189 } else if (priv->relocs_pending) {
190 int numrelocs = remaining / sizeof(struct nvhost_reloc);
193 numrelocs = min_t(int, numrelocs, priv->relocs_pending);
194 consumed = numrelocs * sizeof(struct nvhost_reloc);
195 if (copy_from_user(&priv->pinarray[priv->pinarray_size],
200 priv->pinarray_size += numrelocs;
201 priv->relocs_pending -= numrelocs;
202 } else if (priv->waitchk_pending) {
203 struct nvhost_waitchk *waitp;
204 consumed = sizeof(struct nvhost_waitchk);
205 if (remaining < consumed)
207 waitp = &priv->waitchks[priv->num_waitchks];
208 if (copy_from_user(waitp, buf, consumed)) {
212 priv->num_waitchks++;
213 priv->waitchk_pending--;
218 remaining -= consumed;
223 dev_err(&priv->ch->dev->pdev->dev, "channel write error\n");
228 return (count - remaining);
231 static int nvhost_ioctl_channel_flush(struct nvhost_channel_userctx *ctx,
232 struct nvhost_get_param_args *args)
234 struct nvhost_cpuinterrupt ctxsw;
241 if (ctx->relocs_pending || ctx->cmdbufs_pending || ctx->waitchk_pending) {
243 dev_err(&ctx->ch->dev->pdev->dev, "channel submit out of sync\n");
247 dev_err(&ctx->ch->dev->pdev->dev, "no nvmap context set\n");
250 if (ctx->num_gathers <= 2)
253 /* keep module powered */
254 nvhost_module_busy(&ctx->ch->mod);
256 /* pin mem handles and patch physical addresses */
257 num_unpin = nvmap_pin_array(ctx->nvmap,
258 nvmap_ref_to_handle(ctx->gather_mem),
259 ctx->pinarray, ctx->pinarray_size,
262 dev_warn(&ctx->ch->dev->pdev->dev, "nvmap_pin_array failed: "
264 nvhost_module_idle(&ctx->ch->mod);
268 /* get submit lock */
269 err = mutex_lock_interruptible(&ctx->ch->submitlock);
271 nvmap_unpin_handles(ctx->nvmap, ctx->unpinarray, num_unpin);
272 nvhost_module_idle(&ctx->ch->mod);
276 /* remove stale waits */
277 if (ctx->num_waitchks) {
278 err = nvhost_syncpt_wait_check(ctx->nvmap,
279 &ctx->ch->dev->syncpt, ctx->waitchk_mask,
280 ctx->waitchks, ctx->num_waitchks);
282 dev_warn(&ctx->ch->dev->pdev->dev,
283 "nvhost_syncpt_wait_check failed: %d\n", err);
284 mutex_unlock(&ctx->ch->submitlock);
285 nvmap_unpin_handles(ctx->nvmap, ctx->unpinarray, num_unpin);
286 nvhost_module_idle(&ctx->ch->mod);
289 ctx->num_waitchks = 0;
290 ctx->waitchk_mask = 0;
294 if (ctx->ch->cur_ctx != ctx->hwctx) {
295 struct nvhost_hwctx *hw = ctx->hwctx;
296 if (hw && hw->valid) {
298 ctx->gathers[gather_idx].op1 =
299 nvhost_opcode_gather(0, hw->restore_size);
300 ctx->gathers[gather_idx].op2 = hw->restore_phys;
301 ctx->syncpt_incrs += hw->restore_incrs;
303 hw = ctx->ch->cur_ctx;
306 ctx->gathers[gather_idx].op1 =
307 nvhost_opcode_gather(0, hw->save_size);
308 ctx->gathers[gather_idx].op2 = hw->save_phys;
309 ctx->syncpt_incrs += hw->save_incrs;
311 ctxsw.syncpt_val = hw->save_incrs - 1;
312 ctxsw.intr_data = hw;
314 ctx->ch->ctxhandler.get(hw);
316 ctx->ch->cur_ctx = ctx->hwctx;
319 /* add a setclass for modules that require it */
320 if (gather_idx == 2 && ctx->ch->desc->class) {
322 ctx->gathers[gather_idx].op1 =
323 nvhost_opcode_setclass(ctx->ch->desc->class, 0, 0);
324 ctx->gathers[gather_idx].op2 = NVHOST_OPCODE_NOOP;
327 /* get absolute sync value */
328 if (BIT(ctx->syncpt_id) & NVSYNCPTS_CLIENT_MANAGED)
329 syncval = nvhost_syncpt_set_max(&ctx->ch->dev->syncpt,
330 ctx->syncpt_id, ctx->syncpt_incrs);
332 syncval = nvhost_syncpt_incr_max(&ctx->ch->dev->syncpt,
333 ctx->syncpt_id, ctx->syncpt_incrs);
335 /* patch absolute syncpt value into interrupt triggers */
336 ctxsw.syncpt_val += syncval - ctx->syncpt_incrs;
338 nvhost_channel_submit(ctx->ch, ctx->nvmap, &ctx->gathers[gather_idx],
339 ctx->num_gathers - gather_idx, &ctxsw, num_intrs,
340 ctx->unpinarray, num_unpin,
341 ctx->syncpt_id, syncval);
343 /* schedule a submit complete interrupt */
344 nvhost_intr_add_action(&ctx->ch->dev->intr, ctx->syncpt_id, syncval,
345 NVHOST_INTR_ACTION_SUBMIT_COMPLETE, ctx->ch, NULL);
347 mutex_unlock(&ctx->ch->submitlock);
348 args->value = syncval;
352 static long nvhost_channelctl(struct file *filp,
353 unsigned int cmd, unsigned long arg)
355 struct nvhost_channel_userctx *priv = filp->private_data;
356 u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE];
359 if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
360 (_IOC_NR(cmd) == 0) ||
361 (_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST))
364 BUG_ON(_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE);
366 if (_IOC_DIR(cmd) & _IOC_WRITE) {
367 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
372 case NVHOST_IOCTL_CHANNEL_FLUSH:
373 err = nvhost_ioctl_channel_flush(priv, (void *)buf);
375 case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS:
376 /* host syncpt ID is used by the RM (and never be given out) */
377 BUG_ON(priv->ch->desc->syncpts & (1 << NVSYNCPT_GRAPHICS_HOST));
378 ((struct nvhost_get_param_args *)buf)->value =
379 priv->ch->desc->syncpts;
381 case NVHOST_IOCTL_CHANNEL_GET_WAITBASES:
382 ((struct nvhost_get_param_args *)buf)->value =
383 priv->ch->desc->waitbases;
385 case NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES:
386 ((struct nvhost_get_param_args *)buf)->value =
387 priv->ch->desc->modulemutexes;
389 case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
391 int fd = (int)((struct nvhost_set_nvmap_fd_args *)buf)->fd;
392 struct nvmap_client *new_client = nvmap_client_get_file(fd);
394 if (IS_ERR(new_client)) {
395 err = PTR_ERR(new_client);
400 nvmap_client_put(priv->nvmap);
402 priv->nvmap = new_client;
410 if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
411 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
416 static struct file_operations nvhost_channelops = {
417 .owner = THIS_MODULE,
418 .release = nvhost_channelrelease,
419 .open = nvhost_channelopen,
420 .write = nvhost_channelwrite,
421 .unlocked_ioctl = nvhost_channelctl
424 static int nvhost_ctrlrelease(struct inode *inode, struct file *filp)
426 struct nvhost_ctrl_userctx *priv = filp->private_data;
429 filp->private_data = NULL;
430 if (priv->mod_locks[0])
431 nvhost_module_idle(&priv->dev->mod);
432 for (i = 1; i < NV_HOST1X_NB_MLOCKS; i++)
433 if (priv->mod_locks[i])
434 nvhost_mutex_unlock(&priv->dev->cpuaccess, i);
439 static int nvhost_ctrlopen(struct inode *inode, struct file *filp)
441 struct nvhost_master *host = container_of(inode->i_cdev, struct nvhost_master, cdev);
442 struct nvhost_ctrl_userctx *priv;
444 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
449 filp->private_data = priv;
453 static int nvhost_ioctl_ctrl_syncpt_read(
454 struct nvhost_ctrl_userctx *ctx,
455 struct nvhost_ctrl_syncpt_read_args *args)
457 if (args->id >= NV_HOST1X_SYNCPT_NB_PTS)
459 args->value = nvhost_syncpt_read(&ctx->dev->syncpt, args->id);
463 static int nvhost_ioctl_ctrl_syncpt_incr(
464 struct nvhost_ctrl_userctx *ctx,
465 struct nvhost_ctrl_syncpt_incr_args *args)
467 if (args->id >= NV_HOST1X_SYNCPT_NB_PTS)
469 nvhost_syncpt_incr(&ctx->dev->syncpt, args->id);
473 static int nvhost_ioctl_ctrl_syncpt_wait(
474 struct nvhost_ctrl_userctx *ctx,
475 struct nvhost_ctrl_syncpt_wait_args *args)
478 if (args->id >= NV_HOST1X_SYNCPT_NB_PTS)
480 if (args->timeout == NVHOST_NO_TIMEOUT)
481 timeout = MAX_SCHEDULE_TIMEOUT;
483 timeout = (u32)msecs_to_jiffies(args->timeout);
485 return nvhost_syncpt_wait_timeout(&ctx->dev->syncpt, args->id,
486 args->thresh, timeout);
489 static int nvhost_ioctl_ctrl_module_mutex(
490 struct nvhost_ctrl_userctx *ctx,
491 struct nvhost_ctrl_module_mutex_args *args)
494 if (args->id >= NV_HOST1X_NB_MLOCKS ||
498 if (args->lock && !ctx->mod_locks[args->id]) {
500 nvhost_module_busy(&ctx->dev->mod);
502 err = nvhost_mutex_try_lock(&ctx->dev->cpuaccess, args->id);
504 ctx->mod_locks[args->id] = 1;
506 else if (!args->lock && ctx->mod_locks[args->id]) {
508 nvhost_module_idle(&ctx->dev->mod);
510 nvhost_mutex_unlock(&ctx->dev->cpuaccess, args->id);
511 ctx->mod_locks[args->id] = 0;
516 static int nvhost_ioctl_ctrl_module_regrdwr(
517 struct nvhost_ctrl_userctx *ctx,
518 struct nvhost_ctrl_module_regrdwr_args *args)
520 u32 num_offsets = args->num_offsets;
521 u32 *offsets = args->offsets;
522 void *values = args->values;
525 if (!nvhost_access_module_regs(&ctx->dev->cpuaccess, args->id) ||
529 while (num_offsets--) {
530 u32 remaining = args->block_size;
532 if (get_user(offs, offsets))
536 u32 batch = min(remaining, 64*sizeof(u32));
538 if (copy_from_user(vals, values, batch))
540 nvhost_write_module_regs(&ctx->dev->cpuaccess,
541 args->id, offs, batch, vals);
543 nvhost_read_module_regs(&ctx->dev->cpuaccess,
544 args->id, offs, batch, vals);
545 if (copy_to_user(values, vals, batch))
557 static long nvhost_ctrlctl(struct file *filp,
558 unsigned int cmd, unsigned long arg)
560 struct nvhost_ctrl_userctx *priv = filp->private_data;
561 u8 buf[NVHOST_IOCTL_CTRL_MAX_ARG_SIZE];
564 if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
565 (_IOC_NR(cmd) == 0) ||
566 (_IOC_NR(cmd) > NVHOST_IOCTL_CTRL_LAST))
569 BUG_ON(_IOC_SIZE(cmd) > NVHOST_IOCTL_CTRL_MAX_ARG_SIZE);
571 if (_IOC_DIR(cmd) & _IOC_WRITE) {
572 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
577 case NVHOST_IOCTL_CTRL_SYNCPT_READ:
578 err = nvhost_ioctl_ctrl_syncpt_read(priv, (void *)buf);
580 case NVHOST_IOCTL_CTRL_SYNCPT_INCR:
581 err = nvhost_ioctl_ctrl_syncpt_incr(priv, (void *)buf);
583 case NVHOST_IOCTL_CTRL_SYNCPT_WAIT:
584 err = nvhost_ioctl_ctrl_syncpt_wait(priv, (void *)buf);
586 case NVHOST_IOCTL_CTRL_MODULE_MUTEX:
587 err = nvhost_ioctl_ctrl_module_mutex(priv, (void *)buf);
589 case NVHOST_IOCTL_CTRL_MODULE_REGRDWR:
590 err = nvhost_ioctl_ctrl_module_regrdwr(priv, (void *)buf);
597 if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
598 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
603 static struct file_operations nvhost_ctrlops = {
604 .owner = THIS_MODULE,
605 .release = nvhost_ctrlrelease,
606 .open = nvhost_ctrlopen,
607 .unlocked_ioctl = nvhost_ctrlctl
610 static void power_host(struct nvhost_module *mod, enum nvhost_power_action action)
612 struct nvhost_master *dev = container_of(mod, struct nvhost_master, mod);
614 if (action == NVHOST_POWER_ACTION_ON) {
615 nvhost_intr_configure(&dev->intr, clk_get_rate(mod->clk[0]));
617 else if (action == NVHOST_POWER_ACTION_OFF) {
619 for (i = 0; i < NVHOST_NUMCHANNELS; i++)
620 nvhost_channel_suspend(&dev->channels[i]);
621 nvhost_syncpt_save(&dev->syncpt);
625 static int __devinit nvhost_user_init(struct nvhost_master *host)
629 host->nvhost_class = class_create(THIS_MODULE, IFACE_NAME);
630 if (IS_ERR(host->nvhost_class)) {
631 err = PTR_ERR(host->nvhost_class);
632 dev_err(&host->pdev->dev, "failed to create class\n");
637 devno = MKDEV(nvhost_major, nvhost_minor);
638 err = register_chrdev_region(devno, NVHOST_NUMCHANNELS + 1, IFACE_NAME);
640 err = alloc_chrdev_region(&devno, nvhost_minor,
641 NVHOST_NUMCHANNELS + 1, IFACE_NAME);
642 nvhost_major = MAJOR(devno);
645 dev_err(&host->pdev->dev, "failed to reserve chrdev region\n");
649 for (i = 0; i < NVHOST_NUMCHANNELS; i++) {
650 struct nvhost_channel *ch = &host->channels[i];
652 cdev_init(&ch->cdev, &nvhost_channelops);
653 ch->cdev.owner = THIS_MODULE;
655 devno = MKDEV(nvhost_major, nvhost_minor + i);
656 err = cdev_add(&ch->cdev, devno, 1);
658 dev_err(&host->pdev->dev, "failed to add chan %i cdev\n", i);
661 ch->node = device_create(host->nvhost_class, NULL, devno, NULL,
662 IFACE_NAME "-%s", ch->desc->name);
663 if (IS_ERR(ch->node)) {
664 err = PTR_ERR(ch->node);
665 dev_err(&host->pdev->dev, "failed to create chan %i device\n", i);
670 cdev_init(&host->cdev, &nvhost_ctrlops);
671 host->cdev.owner = THIS_MODULE;
672 devno = MKDEV(nvhost_major, nvhost_minor + NVHOST_NUMCHANNELS);
673 err = cdev_add(&host->cdev, devno, 1);
676 host->ctrl = device_create(host->nvhost_class, NULL, devno, NULL,
678 if (IS_ERR(host->ctrl)) {
679 err = PTR_ERR(host->ctrl);
680 dev_err(&host->pdev->dev, "failed to create ctrl device\n");
689 static int __devinit nvhost_probe(struct platform_device *pdev)
691 struct nvhost_master *host;
692 struct resource *regs, *intr0, *intr1;
695 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
696 intr0 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
697 intr1 = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
699 if (!regs || !intr0 || !intr1) {
700 dev_err(&pdev->dev, "missing required platform resources\n");
704 host = kzalloc(sizeof(*host), GFP_KERNEL);
710 host->nvmap = nvmap_create_client(nvmap_dev, "nvhost");
712 dev_err(&pdev->dev, "unable to create nvmap client\n");
717 host->reg_mem = request_mem_region(regs->start,
718 resource_size(regs), pdev->name);
719 if (!host->reg_mem) {
720 dev_err(&pdev->dev, "failed to get host register memory\n");
724 host->aperture = ioremap(regs->start, resource_size(regs));
725 if (!host->aperture) {
726 dev_err(&pdev->dev, "failed to remap host registers\n");
730 host->sync_aperture = host->aperture +
731 (NV_HOST1X_CHANNEL0_BASE +
732 HOST1X_CHANNEL_SYNC_REG_BASE);
734 for (i = 0; i < NVHOST_NUMCHANNELS; i++) {
735 struct nvhost_channel *ch = &host->channels[i];
736 err = nvhost_channel_init(ch, host, i);
738 dev_err(&pdev->dev, "failed to init channel %d\n", i);
743 err = nvhost_cpuaccess_init(&host->cpuaccess, pdev);
745 err = nvhost_intr_init(&host->intr, intr1->start, intr0->start);
747 err = nvhost_user_init(host);
749 err = nvhost_module_init(&host->mod, "host1x", power_host, NULL, &pdev->dev);
752 platform_set_drvdata(pdev, host);
754 clk_enable(host->mod.clk[0]);
755 nvhost_syncpt_reset(&host->syncpt);
756 clk_disable(host->mod.clk[0]);
758 nvhost_bus_register(host);
760 nvhost_debug_init(host);
762 dev_info(&pdev->dev, "initialized\n");
767 nvmap_client_put(host->nvmap);
768 /* TODO: [ahatala 2010-05-04] */
773 static int __exit nvhost_remove(struct platform_device *pdev)
778 static int nvhost_suspend(struct platform_device *pdev, pm_message_t state)
780 struct nvhost_master *host = platform_get_drvdata(pdev);
781 dev_info(&pdev->dev, "suspending\n");
782 nvhost_module_suspend(&host->mod);
783 clk_enable(host->mod.clk[0]);
784 nvhost_syncpt_save(&host->syncpt);
785 clk_disable(host->mod.clk[0]);
786 dev_info(&pdev->dev, "suspended\n");
790 static int nvhost_resume(struct platform_device *pdev)
792 struct nvhost_master *host = platform_get_drvdata(pdev);
793 dev_info(&pdev->dev, "resuming\n");
794 clk_enable(host->mod.clk[0]);
795 nvhost_syncpt_reset(&host->syncpt);
796 clk_disable(host->mod.clk[0]);
797 dev_info(&pdev->dev, "resumed\n");
801 static struct platform_driver nvhost_driver = {
802 .remove = __exit_p(nvhost_remove),
803 .suspend = nvhost_suspend,
804 .resume = nvhost_resume,
806 .owner = THIS_MODULE,
811 static int __init nvhost_mod_init(void)
813 return platform_driver_probe(&nvhost_driver, nvhost_probe);
816 static void __exit nvhost_mod_exit(void)
818 platform_driver_unregister(&nvhost_driver);
821 module_init(nvhost_mod_init);
822 module_exit(nvhost_mod_exit);
824 MODULE_AUTHOR("NVIDIA");
825 MODULE_DESCRIPTION("Graphics host driver for Tegra products");
826 MODULE_VERSION("1.0");
827 MODULE_LICENSE("Dual BSD/GPL");
828 MODULE_ALIAS("platform-nvhost");