Merge remote branch 'common/android-2.6.36' into android-tegra-2.6.36
[firefly-linux-kernel-4.4.55.git] / drivers / video / tegra / host / dev.c
1 /*
2  * drivers/video/tegra/host/dev.c
3  *
4  * Tegra Graphics Host Driver Entrypoint
5  *
6  * Copyright (c) 2010, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include "dev.h"
24
25 #include <linux/slab.h>
26 #include <linux/string.h>
27 #include <linux/spinlock.h>
28 #include <linux/fs.h>
29 #include <linux/cdev.h>
30 #include <linux/platform_device.h>
31 #include <linux/uaccess.h>
32 #include <linux/file.h>
33 #include <linux/clk.h>
34
35 #include <asm/io.h>
36
37 #include <mach/nvhost.h>
38 #include <mach/nvmap.h>
39
40 #define DRIVER_NAME "tegra_grhost"
41 #define IFACE_NAME "nvhost"
42
43 static int nvhost_major = NVHOST_MAJOR;
44 static int nvhost_minor = NVHOST_CHANNEL_BASE;
45
46 struct nvhost_channel_userctx {
47         struct nvhost_channel *ch;
48         struct nvhost_hwctx *hwctx;
49         u32 syncpt_id;
50         u32 syncpt_incrs;
51         u32 cmdbufs_pending;
52         u32 relocs_pending;
53         u32 waitchk_pending;
54         u32 waitchk_ref;
55         struct nvmap_handle_ref *gather_mem;
56         struct nvhost_op_pair *gathers;
57         int num_gathers;
58         int pinarray_size;
59         struct nvmap_pinarray_elem pinarray[NVHOST_MAX_HANDLES];
60         struct nvmap_handle *unpinarray[NVHOST_MAX_HANDLES];
61         struct nvmap_client *nvmap;
62         struct nvhost_waitchk waitchks[NVHOST_MAX_WAIT_CHECKS];
63         u32 num_waitchks;
64         u32 waitchk_mask;
65 };
66
67 struct nvhost_ctrl_userctx {
68         struct nvhost_master *dev;
69         u32 mod_locks[NV_HOST1X_NB_MLOCKS];
70 };
71
72 static int nvhost_channelrelease(struct inode *inode, struct file *filp)
73 {
74         struct nvhost_channel_userctx *priv = filp->private_data;
75
76         filp->private_data = NULL;
77
78         nvhost_putchannel(priv->ch, priv->hwctx);
79
80         if (priv->hwctx)
81                 priv->ch->ctxhandler.put(priv->hwctx);
82
83         if (priv->gathers)
84                 nvmap_munmap(priv->gather_mem, priv->gathers);
85
86         if (!IS_ERR_OR_NULL(priv->gather_mem))
87                 nvmap_free(priv->ch->dev->nvmap, priv->gather_mem);
88
89         nvmap_client_put(priv->nvmap);
90         kfree(priv);
91         return 0;
92 }
93
94 static int nvhost_channelopen(struct inode *inode, struct file *filp)
95 {
96         struct nvhost_channel_userctx *priv;
97         struct nvhost_channel *ch;
98         size_t gather_size;
99
100         ch = container_of(inode->i_cdev, struct nvhost_channel, cdev);
101         ch = nvhost_getchannel(ch);
102         if (!ch)
103                 return -ENOMEM;
104
105         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
106         if (!priv) {
107                 nvhost_putchannel(ch, NULL);
108                 return -ENOMEM;
109         }
110         filp->private_data = priv;
111         priv->ch = ch;
112         gather_size = sizeof(struct nvhost_op_pair) * NVHOST_MAX_GATHERS;
113         priv->gather_mem = nvmap_alloc(ch->dev->nvmap, gather_size, 32,
114                                        NVMAP_HANDLE_CACHEABLE);
115         if (IS_ERR(priv->gather_mem))
116                 goto fail;
117
118         if (ch->ctxhandler.alloc) {
119                 priv->hwctx = ch->ctxhandler.alloc(ch);
120                 if (!priv->hwctx)
121                         goto fail;
122         }
123
124         priv->gathers = (struct nvhost_op_pair *)nvmap_mmap(priv->gather_mem);
125
126         return 0;
127 fail:
128         nvhost_channelrelease(inode, filp);
129         return -ENOMEM;
130 }
131
132 static void add_gather(struct nvhost_channel_userctx *ctx, int idx,
133                        u32 mem_id, u32 words, u32 offset)
134 {
135         struct nvmap_pinarray_elem *pin;
136         pin = &ctx->pinarray[ctx->pinarray_size++];
137         pin->patch_mem = (u32)nvmap_ref_to_handle(ctx->gather_mem);
138         pin->patch_offset = (idx * sizeof(struct nvhost_op_pair)) +
139                 offsetof(struct nvhost_op_pair, op2);
140         pin->pin_mem = mem_id;
141         pin->pin_offset = offset;
142         ctx->gathers[idx].op1 = nvhost_opcode_gather(0, words);
143 }
144
145 static void reset_submit(struct nvhost_channel_userctx *ctx)
146 {
147         ctx->cmdbufs_pending = 0;
148         ctx->relocs_pending = 0;
149         ctx->waitchk_pending = 0;
150 }
151
152 static ssize_t nvhost_channelwrite(struct file *filp, const char __user *buf,
153                                 size_t count, loff_t *offp)
154 {
155         struct nvhost_channel_userctx *priv = filp->private_data;
156         size_t remaining = count;
157         int err = 0;
158
159         while (remaining) {
160                 size_t consumed;
161                 if (!priv->relocs_pending && !priv->cmdbufs_pending && !priv->waitchk_pending) {
162                         consumed = sizeof(struct nvhost_submit_hdr);
163                         if (remaining < consumed)
164                                 break;
165                         if (copy_from_user(&priv->syncpt_id, buf, consumed)) {
166                                 err = -EFAULT;
167                                 break;
168                         }
169                         if (!priv->cmdbufs_pending) {
170                                 err = -EFAULT;
171                                 break;
172                         }
173                         /* leave room for ctx switch */
174                         priv->num_gathers = 2;
175                         priv->pinarray_size = 0;
176                         priv->waitchk_mask |= priv->waitchk_ref;
177                 } else if (priv->cmdbufs_pending) {
178                         struct nvhost_cmdbuf cmdbuf;
179                         consumed = sizeof(cmdbuf);
180                         if (remaining < consumed)
181                                 break;
182                         if (copy_from_user(&cmdbuf, buf, consumed)) {
183                                 err = -EFAULT;
184                                 break;
185                         }
186                         add_gather(priv, priv->num_gathers++,
187                                    cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
188                         priv->cmdbufs_pending--;
189                 } else if (priv->relocs_pending) {
190                         int numrelocs = remaining / sizeof(struct nvhost_reloc);
191                         if (!numrelocs)
192                                 break;
193                         numrelocs = min_t(int, numrelocs, priv->relocs_pending);
194                         consumed = numrelocs * sizeof(struct nvhost_reloc);
195                         if (copy_from_user(&priv->pinarray[priv->pinarray_size],
196                                                 buf, consumed)) {
197                                 err = -EFAULT;
198                                 break;
199                         }
200                         priv->pinarray_size += numrelocs;
201                         priv->relocs_pending -= numrelocs;
202                 } else if (priv->waitchk_pending) {
203                         struct nvhost_waitchk *waitp;
204                         consumed = sizeof(struct nvhost_waitchk);
205                         if (remaining < consumed)
206                                 break;
207                         waitp = &priv->waitchks[priv->num_waitchks];
208                         if (copy_from_user(waitp, buf, consumed)) {
209                                 err = -EFAULT;
210                                 break;
211                         }
212                         priv->num_waitchks++;
213                         priv->waitchk_pending--;
214                 } else {
215                         err = -EFAULT;
216                         break;
217                 }
218                 remaining -= consumed;
219                 buf += consumed;
220         }
221
222         if (err < 0) {
223                 dev_err(&priv->ch->dev->pdev->dev, "channel write error\n");
224                 reset_submit(priv);
225                 return err;
226         }
227
228         return (count - remaining);
229 }
230
231 static int nvhost_ioctl_channel_flush(struct nvhost_channel_userctx *ctx,
232                                       struct nvhost_get_param_args *args)
233 {
234         struct nvhost_cpuinterrupt ctxsw;
235         int gather_idx = 2;
236         int num_intrs = 0;
237         u32 syncval;
238         int num_unpin;
239         int err;
240
241         if (ctx->relocs_pending || ctx->cmdbufs_pending || ctx->waitchk_pending) {
242                 reset_submit(ctx);
243                 dev_err(&ctx->ch->dev->pdev->dev, "channel submit out of sync\n");
244                 return -EFAULT;
245         }
246         if (!ctx->nvmap) {
247                 dev_err(&ctx->ch->dev->pdev->dev, "no nvmap context set\n");
248                 return -EFAULT;
249         }
250         if (ctx->num_gathers <= 2)
251                 return 0;
252
253         /* keep module powered */
254         nvhost_module_busy(&ctx->ch->mod);
255
256         /* pin mem handles and patch physical addresses */
257         num_unpin = nvmap_pin_array(ctx->nvmap,
258                                     nvmap_ref_to_handle(ctx->gather_mem),
259                                     ctx->pinarray, ctx->pinarray_size,
260                                     ctx->unpinarray);
261         if (num_unpin < 0) {
262                 dev_warn(&ctx->ch->dev->pdev->dev, "nvmap_pin_array failed: "
263                          "%d\n", num_unpin);
264                 nvhost_module_idle(&ctx->ch->mod);
265                 return num_unpin;
266         }
267
268         /* get submit lock */
269         err = mutex_lock_interruptible(&ctx->ch->submitlock);
270         if (err) {
271                 nvmap_unpin_handles(ctx->nvmap, ctx->unpinarray, num_unpin);
272                 nvhost_module_idle(&ctx->ch->mod);
273                 return err;
274         }
275
276         /* remove stale waits */
277         if (ctx->num_waitchks) {
278                 err = nvhost_syncpt_wait_check(ctx->nvmap,
279                                 &ctx->ch->dev->syncpt, ctx->waitchk_mask,
280                                 ctx->waitchks, ctx->num_waitchks);
281                 if (err) {
282                         dev_warn(&ctx->ch->dev->pdev->dev,
283                                 "nvhost_syncpt_wait_check failed: %d\n", err);
284                         mutex_unlock(&ctx->ch->submitlock);
285                         nvmap_unpin_handles(ctx->nvmap, ctx->unpinarray, num_unpin);
286                         nvhost_module_idle(&ctx->ch->mod);
287                         return err;
288                 }
289                 ctx->num_waitchks = 0;
290                 ctx->waitchk_mask = 0;
291         }
292
293         /* context switch */
294         if (ctx->ch->cur_ctx != ctx->hwctx) {
295                 struct nvhost_hwctx *hw = ctx->hwctx;
296                 if (hw && hw->valid) {
297                         gather_idx--;
298                         ctx->gathers[gather_idx].op1 =
299                                 nvhost_opcode_gather(0, hw->restore_size);
300                         ctx->gathers[gather_idx].op2 = hw->restore_phys;
301                         ctx->syncpt_incrs += hw->restore_incrs;
302                 }
303                 hw = ctx->ch->cur_ctx;
304                 if (hw) {
305                         gather_idx--;
306                         ctx->gathers[gather_idx].op1 =
307                                 nvhost_opcode_gather(0, hw->save_size);
308                         ctx->gathers[gather_idx].op2 = hw->save_phys;
309                         ctx->syncpt_incrs += hw->save_incrs;
310                         num_intrs = 1;
311                         ctxsw.syncpt_val = hw->save_incrs - 1;
312                         ctxsw.intr_data = hw;
313                         hw->valid = true;
314                         ctx->ch->ctxhandler.get(hw);
315                 }
316                 ctx->ch->cur_ctx = ctx->hwctx;
317         }
318
319         /* add a setclass for modules that require it */
320         if (gather_idx == 2 && ctx->ch->desc->class) {
321                 gather_idx--;
322                 ctx->gathers[gather_idx].op1 =
323                         nvhost_opcode_setclass(ctx->ch->desc->class, 0, 0);
324                 ctx->gathers[gather_idx].op2 = NVHOST_OPCODE_NOOP;
325         }
326
327         /* get absolute sync value */
328         if (BIT(ctx->syncpt_id) & NVSYNCPTS_CLIENT_MANAGED)
329                 syncval = nvhost_syncpt_set_max(&ctx->ch->dev->syncpt,
330                                                 ctx->syncpt_id, ctx->syncpt_incrs);
331         else
332                 syncval = nvhost_syncpt_incr_max(&ctx->ch->dev->syncpt,
333                                                 ctx->syncpt_id, ctx->syncpt_incrs);
334
335         /* patch absolute syncpt value into interrupt triggers */
336         ctxsw.syncpt_val += syncval - ctx->syncpt_incrs;
337
338         nvhost_channel_submit(ctx->ch, ctx->nvmap, &ctx->gathers[gather_idx],
339                               ctx->num_gathers - gather_idx, &ctxsw, num_intrs,
340                               ctx->unpinarray, num_unpin,
341                               ctx->syncpt_id, syncval);
342
343         /* schedule a submit complete interrupt */
344         nvhost_intr_add_action(&ctx->ch->dev->intr, ctx->syncpt_id, syncval,
345                         NVHOST_INTR_ACTION_SUBMIT_COMPLETE, ctx->ch, NULL);
346
347         mutex_unlock(&ctx->ch->submitlock);
348         args->value = syncval;
349         return 0;
350 }
351
352 static long nvhost_channelctl(struct file *filp,
353         unsigned int cmd, unsigned long arg)
354 {
355         struct nvhost_channel_userctx *priv = filp->private_data;
356         u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE];
357         int err = 0;
358
359         if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
360                 (_IOC_NR(cmd) == 0) ||
361                 (_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST))
362                 return -EFAULT;
363
364         BUG_ON(_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE);
365
366         if (_IOC_DIR(cmd) & _IOC_WRITE) {
367                 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
368                         return -EFAULT;
369         }
370
371         switch (cmd) {
372         case NVHOST_IOCTL_CHANNEL_FLUSH:
373                 err = nvhost_ioctl_channel_flush(priv, (void *)buf);
374                 break;
375         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS:
376                 /* host syncpt ID is used by the RM (and never be given out) */
377                 BUG_ON(priv->ch->desc->syncpts & (1 << NVSYNCPT_GRAPHICS_HOST));
378                 ((struct nvhost_get_param_args *)buf)->value =
379                         priv->ch->desc->syncpts;
380                 break;
381         case NVHOST_IOCTL_CHANNEL_GET_WAITBASES:
382                 ((struct nvhost_get_param_args *)buf)->value =
383                         priv->ch->desc->waitbases;
384                 break;
385         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES:
386                 ((struct nvhost_get_param_args *)buf)->value =
387                         priv->ch->desc->modulemutexes;
388                 break;
389         case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
390         {
391                 int fd = (int)((struct nvhost_set_nvmap_fd_args *)buf)->fd;
392                 struct nvmap_client *new_client = nvmap_client_get_file(fd);
393
394                 if (IS_ERR(new_client)) {
395                         err = PTR_ERR(new_client);
396                         break;
397                 }
398
399                 if (priv->nvmap)
400                         nvmap_client_put(priv->nvmap);
401
402                 priv->nvmap = new_client;
403                 break;
404         }
405         default:
406                 err = -ENOTTY;
407                 break;
408         }
409
410         if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
411                 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
412
413         return err;
414 }
415
416 static struct file_operations nvhost_channelops = {
417         .owner = THIS_MODULE,
418         .release = nvhost_channelrelease,
419         .open = nvhost_channelopen,
420         .write = nvhost_channelwrite,
421         .unlocked_ioctl = nvhost_channelctl
422 };
423
424 static int nvhost_ctrlrelease(struct inode *inode, struct file *filp)
425 {
426         struct nvhost_ctrl_userctx *priv = filp->private_data;
427         int i;
428
429         filp->private_data = NULL;
430         if (priv->mod_locks[0])
431                 nvhost_module_idle(&priv->dev->mod);
432         for (i = 1; i < NV_HOST1X_NB_MLOCKS; i++)
433                 if (priv->mod_locks[i])
434                         nvhost_mutex_unlock(&priv->dev->cpuaccess, i);
435         kfree(priv);
436         return 0;
437 }
438
439 static int nvhost_ctrlopen(struct inode *inode, struct file *filp)
440 {
441         struct nvhost_master *host = container_of(inode->i_cdev, struct nvhost_master, cdev);
442         struct nvhost_ctrl_userctx *priv;
443
444         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
445         if (!priv)
446                 return -ENOMEM;
447
448         priv->dev = host;
449         filp->private_data = priv;
450         return 0;
451 }
452
453 static int nvhost_ioctl_ctrl_syncpt_read(
454         struct nvhost_ctrl_userctx *ctx,
455         struct nvhost_ctrl_syncpt_read_args *args)
456 {
457         if (args->id >= NV_HOST1X_SYNCPT_NB_PTS)
458                 return -EINVAL;
459         args->value = nvhost_syncpt_read(&ctx->dev->syncpt, args->id);
460         return 0;
461 }
462
463 static int nvhost_ioctl_ctrl_syncpt_incr(
464         struct nvhost_ctrl_userctx *ctx,
465         struct nvhost_ctrl_syncpt_incr_args *args)
466 {
467         if (args->id >= NV_HOST1X_SYNCPT_NB_PTS)
468                 return -EINVAL;
469         nvhost_syncpt_incr(&ctx->dev->syncpt, args->id);
470         return 0;
471 }
472
473 static int nvhost_ioctl_ctrl_syncpt_wait(
474         struct nvhost_ctrl_userctx *ctx,
475         struct nvhost_ctrl_syncpt_wait_args *args)
476 {
477         u32 timeout;
478         if (args->id >= NV_HOST1X_SYNCPT_NB_PTS)
479                 return -EINVAL;
480         if (args->timeout == NVHOST_NO_TIMEOUT)
481                 timeout = MAX_SCHEDULE_TIMEOUT;
482         else
483                 timeout = (u32)msecs_to_jiffies(args->timeout);
484
485         return nvhost_syncpt_wait_timeout(&ctx->dev->syncpt, args->id,
486                                         args->thresh, timeout);
487 }
488
489 static int nvhost_ioctl_ctrl_module_mutex(
490         struct nvhost_ctrl_userctx *ctx,
491         struct nvhost_ctrl_module_mutex_args *args)
492 {
493         int err = 0;
494         if (args->id >= NV_HOST1X_NB_MLOCKS ||
495             args->lock > 1)
496                 return -EINVAL;
497
498         if (args->lock && !ctx->mod_locks[args->id]) {
499                 if (args->id == 0)
500                         nvhost_module_busy(&ctx->dev->mod);
501                 else
502                         err = nvhost_mutex_try_lock(&ctx->dev->cpuaccess, args->id);
503                 if (!err)
504                         ctx->mod_locks[args->id] = 1;
505         }
506         else if (!args->lock && ctx->mod_locks[args->id]) {
507                 if (args->id == 0)
508                         nvhost_module_idle(&ctx->dev->mod);
509                 else
510                         nvhost_mutex_unlock(&ctx->dev->cpuaccess, args->id);
511                 ctx->mod_locks[args->id] = 0;
512         }
513         return err;
514 }
515
516 static int nvhost_ioctl_ctrl_module_regrdwr(
517         struct nvhost_ctrl_userctx *ctx,
518         struct nvhost_ctrl_module_regrdwr_args *args)
519 {
520         u32 num_offsets = args->num_offsets;
521         u32 *offsets = args->offsets;
522         void *values = args->values;
523         u32 vals[64];
524
525         if (!nvhost_access_module_regs(&ctx->dev->cpuaccess, args->id) ||
526             (num_offsets == 0))
527                 return -EINVAL;
528
529         while (num_offsets--) {
530                 u32 remaining = args->block_size;
531                 u32 offs;
532                 if (get_user(offs, offsets))
533                         return -EFAULT;
534                 offsets++;
535                 while (remaining) {
536                         u32 batch = min(remaining, 64*sizeof(u32));
537                         if (args->write) {
538                                 if (copy_from_user(vals, values, batch))
539                                         return -EFAULT;
540                                 nvhost_write_module_regs(&ctx->dev->cpuaccess,
541                                                         args->id, offs, batch, vals);
542                         } else {
543                                 nvhost_read_module_regs(&ctx->dev->cpuaccess,
544                                                         args->id, offs, batch, vals);
545                                 if (copy_to_user(values, vals, batch))
546                                         return -EFAULT;
547                         }
548                         remaining -= batch;
549                         offs += batch;
550                         values += batch;
551                 }
552         }
553
554         return 0;
555 }
556
557 static long nvhost_ctrlctl(struct file *filp,
558         unsigned int cmd, unsigned long arg)
559 {
560         struct nvhost_ctrl_userctx *priv = filp->private_data;
561         u8 buf[NVHOST_IOCTL_CTRL_MAX_ARG_SIZE];
562         int err = 0;
563
564         if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
565                 (_IOC_NR(cmd) == 0) ||
566                 (_IOC_NR(cmd) > NVHOST_IOCTL_CTRL_LAST))
567                 return -EFAULT;
568
569         BUG_ON(_IOC_SIZE(cmd) > NVHOST_IOCTL_CTRL_MAX_ARG_SIZE);
570
571         if (_IOC_DIR(cmd) & _IOC_WRITE) {
572                 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
573                         return -EFAULT;
574         }
575
576         switch (cmd) {
577         case NVHOST_IOCTL_CTRL_SYNCPT_READ:
578                 err = nvhost_ioctl_ctrl_syncpt_read(priv, (void *)buf);
579                 break;
580         case NVHOST_IOCTL_CTRL_SYNCPT_INCR:
581                 err = nvhost_ioctl_ctrl_syncpt_incr(priv, (void *)buf);
582                 break;
583         case NVHOST_IOCTL_CTRL_SYNCPT_WAIT:
584                 err = nvhost_ioctl_ctrl_syncpt_wait(priv, (void *)buf);
585                 break;
586         case NVHOST_IOCTL_CTRL_MODULE_MUTEX:
587                 err = nvhost_ioctl_ctrl_module_mutex(priv, (void *)buf);
588                 break;
589         case NVHOST_IOCTL_CTRL_MODULE_REGRDWR:
590                 err = nvhost_ioctl_ctrl_module_regrdwr(priv, (void *)buf);
591                 break;
592         default:
593                 err = -ENOTTY;
594                 break;
595         }
596
597         if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
598                 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
599
600         return err;
601 }
602
603 static struct file_operations nvhost_ctrlops = {
604         .owner = THIS_MODULE,
605         .release = nvhost_ctrlrelease,
606         .open = nvhost_ctrlopen,
607         .unlocked_ioctl = nvhost_ctrlctl
608 };
609
610 static void power_host(struct nvhost_module *mod, enum nvhost_power_action action)
611 {
612         struct nvhost_master *dev = container_of(mod, struct nvhost_master, mod);
613
614         if (action == NVHOST_POWER_ACTION_ON) {
615                 nvhost_intr_configure(&dev->intr, clk_get_rate(mod->clk[0]));
616         }
617         else if (action == NVHOST_POWER_ACTION_OFF) {
618                 int i;
619                 for (i = 0; i < NVHOST_NUMCHANNELS; i++)
620                         nvhost_channel_suspend(&dev->channels[i]);
621                 nvhost_syncpt_save(&dev->syncpt);
622         }
623 }
624
625 static int __devinit nvhost_user_init(struct nvhost_master *host)
626 {
627         int i, err, devno;
628
629         host->nvhost_class = class_create(THIS_MODULE, IFACE_NAME);
630         if (IS_ERR(host->nvhost_class)) {
631                 err = PTR_ERR(host->nvhost_class);
632                 dev_err(&host->pdev->dev, "failed to create class\n");
633                 goto fail;
634         }
635
636         if (nvhost_major) {
637                 devno = MKDEV(nvhost_major, nvhost_minor);
638                 err = register_chrdev_region(devno, NVHOST_NUMCHANNELS + 1, IFACE_NAME);
639         } else {
640                 err = alloc_chrdev_region(&devno, nvhost_minor,
641                                         NVHOST_NUMCHANNELS + 1, IFACE_NAME);
642                 nvhost_major = MAJOR(devno);
643         }
644         if (err < 0) {
645                 dev_err(&host->pdev->dev, "failed to reserve chrdev region\n");
646                 goto fail;
647         }
648
649         for (i = 0; i < NVHOST_NUMCHANNELS; i++) {
650                 struct nvhost_channel *ch = &host->channels[i];
651
652                 cdev_init(&ch->cdev, &nvhost_channelops);
653                 ch->cdev.owner = THIS_MODULE;
654
655                 devno = MKDEV(nvhost_major, nvhost_minor + i);
656                 err = cdev_add(&ch->cdev, devno, 1);
657                 if (err < 0) {
658                         dev_err(&host->pdev->dev, "failed to add chan %i cdev\n", i);
659                         goto fail;
660                 }
661                 ch->node = device_create(host->nvhost_class, NULL, devno, NULL,
662                                 IFACE_NAME "-%s", ch->desc->name);
663                 if (IS_ERR(ch->node)) {
664                         err = PTR_ERR(ch->node);
665                         dev_err(&host->pdev->dev, "failed to create chan %i device\n", i);
666                         goto fail;
667                 }
668         }
669
670         cdev_init(&host->cdev, &nvhost_ctrlops);
671         host->cdev.owner = THIS_MODULE;
672         devno = MKDEV(nvhost_major, nvhost_minor + NVHOST_NUMCHANNELS);
673         err = cdev_add(&host->cdev, devno, 1);
674         if (err < 0)
675                 goto fail;
676         host->ctrl = device_create(host->nvhost_class, NULL, devno, NULL,
677                         IFACE_NAME "-ctrl");
678         if (IS_ERR(host->ctrl)) {
679                 err = PTR_ERR(host->ctrl);
680                 dev_err(&host->pdev->dev, "failed to create ctrl device\n");
681                 goto fail;
682         }
683
684         return 0;
685 fail:
686         return err;
687 }
688
689 static int __devinit nvhost_probe(struct platform_device *pdev)
690 {
691         struct nvhost_master *host;
692         struct resource *regs, *intr0, *intr1;
693         int i, err;
694
695         regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
696         intr0 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
697         intr1 = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
698
699         if (!regs || !intr0 || !intr1) {
700                 dev_err(&pdev->dev, "missing required platform resources\n");
701                 return -ENXIO;
702         }
703
704         host = kzalloc(sizeof(*host), GFP_KERNEL);
705         if (!host)
706                 return -ENOMEM;
707
708         host->pdev = pdev;
709
710         host->nvmap = nvmap_create_client(nvmap_dev, "nvhost");
711         if (!host->nvmap) {
712                 dev_err(&pdev->dev, "unable to create nvmap client\n");
713                 err = -EIO;
714                 goto fail;
715         }
716
717         host->reg_mem = request_mem_region(regs->start,
718                                         resource_size(regs), pdev->name);
719         if (!host->reg_mem) {
720                 dev_err(&pdev->dev, "failed to get host register memory\n");
721                 err = -ENXIO;
722                 goto fail;
723         }
724         host->aperture = ioremap(regs->start, resource_size(regs));
725         if (!host->aperture) {
726                 dev_err(&pdev->dev, "failed to remap host registers\n");
727                 err = -ENXIO;
728                 goto fail;
729         }
730         host->sync_aperture = host->aperture +
731                 (NV_HOST1X_CHANNEL0_BASE +
732                         HOST1X_CHANNEL_SYNC_REG_BASE);
733
734         for (i = 0; i < NVHOST_NUMCHANNELS; i++) {
735                 struct nvhost_channel *ch = &host->channels[i];
736                 err = nvhost_channel_init(ch, host, i);
737                 if (err < 0) {
738                         dev_err(&pdev->dev, "failed to init channel %d\n", i);
739                         goto fail;
740                 }
741         }
742
743         err = nvhost_cpuaccess_init(&host->cpuaccess, pdev);
744         if (err) goto fail;
745         err = nvhost_intr_init(&host->intr, intr1->start, intr0->start);
746         if (err) goto fail;
747         err = nvhost_user_init(host);
748         if (err) goto fail;
749         err = nvhost_module_init(&host->mod, "host1x", power_host, NULL, &pdev->dev);
750         if (err) goto fail;
751
752         platform_set_drvdata(pdev, host);
753
754         clk_enable(host->mod.clk[0]);
755         nvhost_syncpt_reset(&host->syncpt);
756         clk_disable(host->mod.clk[0]);
757
758         nvhost_bus_register(host);
759
760         nvhost_debug_init(host);
761
762         dev_info(&pdev->dev, "initialized\n");
763         return 0;
764
765 fail:
766         if (host->nvmap)
767                 nvmap_client_put(host->nvmap);
768         /* TODO: [ahatala 2010-05-04] */
769         kfree(host);
770         return err;
771 }
772
773 static int __exit nvhost_remove(struct platform_device *pdev)
774 {
775         return 0;
776 }
777
778 static int nvhost_suspend(struct platform_device *pdev, pm_message_t state)
779 {
780         struct nvhost_master *host = platform_get_drvdata(pdev);
781         dev_info(&pdev->dev, "suspending\n");
782         nvhost_module_suspend(&host->mod);
783         clk_enable(host->mod.clk[0]);
784         nvhost_syncpt_save(&host->syncpt);
785         clk_disable(host->mod.clk[0]);
786         dev_info(&pdev->dev, "suspended\n");
787         return 0;
788 }
789
790 static int nvhost_resume(struct platform_device *pdev)
791 {
792         struct nvhost_master *host = platform_get_drvdata(pdev);
793         dev_info(&pdev->dev, "resuming\n");
794         clk_enable(host->mod.clk[0]);
795         nvhost_syncpt_reset(&host->syncpt);
796         clk_disable(host->mod.clk[0]);
797         dev_info(&pdev->dev, "resumed\n");
798         return 0;
799 }
800
801 static struct platform_driver nvhost_driver = {
802         .remove = __exit_p(nvhost_remove),
803         .suspend = nvhost_suspend,
804         .resume = nvhost_resume,
805         .driver = {
806                 .owner = THIS_MODULE,
807                 .name = DRIVER_NAME
808         }
809 };
810
811 static int __init nvhost_mod_init(void)
812 {
813         return platform_driver_probe(&nvhost_driver, nvhost_probe);
814 }
815
816 static void __exit nvhost_mod_exit(void)
817 {
818         platform_driver_unregister(&nvhost_driver);
819 }
820
821 module_init(nvhost_mod_init);
822 module_exit(nvhost_mod_exit);
823
824 MODULE_AUTHOR("NVIDIA");
825 MODULE_DESCRIPTION("Graphics host driver for Tegra products");
826 MODULE_VERSION("1.0");
827 MODULE_LICENSE("Dual BSD/GPL");
828 MODULE_ALIAS("platform-nvhost");