2e0944ecb02c4c90f9097c554e85821fe14aad60
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / rockchip / rockchip_drm_rga.c
1 #include <linux/clk.h>
2 #include <linux/debugfs.h>
3 #include <linux/delay.h>
4 #include <linux/dma-buf.h>
5 #include <linux/dma-mapping.h>
6 #include <linux/interrupt.h>
7 #include <linux/of.h>
8 #include <linux/of_address.h>
9 #include <linux/of_device.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/reset.h>
12 #include <linux/seq_file.h>
13 #include <linux/slab.h>
14 #include <linux/uaccess.h>
15
16 #include <asm/cacheflush.h>
17 #include <drm/drmP.h>
18 #include <drm/rockchip_drm.h>
19
20 #include "rockchip_drm_drv.h"
21 #include "rockchip_drm_rga.h"
22
23 #define RGA_MODE_BASE_REG               0x0100
24 #define RGA_MODE_MAX_REG                0x017C
25
26 #define RGA_SYS_CTRL                    0x0000
27 #define RGA_CMD_CTRL                    0x0004
28 #define RGA_CMD_BASE                    0x0008
29 #define RGA_INT                         0x0010
30 #define RGA_MMU_CTRL0                   0x0014
31 #define RGA_VERSION_INFO                0x0028
32
33 #define RGA_SRC_Y_RGB_BASE_ADDR         0x0108
34 #define RGA_SRC_CB_BASE_ADDR            0x010C
35 #define RGA_SRC_CR_BASE_ADDR            0x0110
36 #define RGA_SRC1_RGB_BASE_ADDR          0x0114
37 #define RGA_DST_Y_RGB_BASE_ADDR         0x013C
38 #define RGA_DST_CB_BASE_ADDR            0x0140
39 #define RGA_DST_CR_BASE_ADDR            0x014C
40 #define RGA_MMU_CTRL1                   0x016C
41 #define RGA_MMU_SRC_BASE                0x0170
42 #define RGA_MMU_SRC1_BASE               0x0174
43 #define RGA_MMU_DST_BASE                0x0178
44
45 static void rga_dma_flush_range(void *ptr, int size)
46 {
47 #ifdef CONFIG_ARM
48         dmac_flush_range(ptr, ptr + size);
49         outer_flush_range(virt_to_phys(ptr), virt_to_phys(ptr + size));
50 #elif CONFIG_ARM64
51         __dma_flush_range(ptr, ptr + size);
52 #endif
53 }
54
55 static inline void rga_write(struct rockchip_rga *rga, u32 reg, u32 value)
56 {
57         writel(value, rga->regs + reg);
58 }
59
60 static inline u32 rga_read(struct rockchip_rga *rga, u32 reg)
61 {
62         return readl(rga->regs + reg);
63 }
64
65 static inline void rga_mod(struct rockchip_rga *rga, u32 reg, u32 val, u32 mask)
66 {
67         u32 temp = rga_read(rga, reg) & ~(mask);
68
69         temp |= val & mask;
70         rga_write(rga, reg, temp);
71 }
72
73 static int rga_enable_clocks(struct rockchip_rga *rga)
74 {
75         int ret;
76
77         ret = clk_prepare_enable(rga->sclk);
78         if (ret) {
79                 dev_err(rga->dev, "Cannot enable rga sclk: %d\n", ret);
80                 return ret;
81         }
82
83         ret = clk_prepare_enable(rga->aclk);
84         if (ret) {
85                 dev_err(rga->dev, "Cannot enable rga aclk: %d\n", ret);
86                 goto err_disable_sclk;
87         }
88
89         ret = clk_prepare_enable(rga->hclk);
90         if (ret) {
91                 dev_err(rga->dev, "Cannot enable rga hclk: %d\n", ret);
92                 goto err_disable_aclk;
93         }
94
95         return 0;
96
97 err_disable_sclk:
98         clk_disable_unprepare(rga->sclk);
99 err_disable_aclk:
100         clk_disable_unprepare(rga->aclk);
101
102         return ret;
103 }
104
105 static void rga_disable_clocks(struct rockchip_rga *rga)
106 {
107         clk_disable_unprepare(rga->sclk);
108         clk_disable_unprepare(rga->hclk);
109         clk_disable_unprepare(rga->aclk);
110 }
111
112 static void rga_init_cmdlist(struct rockchip_rga *rga)
113 {
114         struct rga_cmdlist_node *node;
115         int nr;
116
117         node = rga->cmdlist_node;
118
119         for (nr = 0; nr < ARRAY_SIZE(rga->cmdlist_node); nr++)
120                 list_add_tail(&node[nr].list, &rga->free_cmdlist);
121 }
122
123 static int rga_alloc_dma_buf_for_cmdlist(struct rga_runqueue_node *runqueue)
124 {
125         struct list_head *run_cmdlist = &runqueue->run_cmdlist;
126         struct device *dev = runqueue->dev;
127         struct dma_attrs cmdlist_dma_attrs;
128         struct rga_cmdlist_node *node;
129         void *cmdlist_pool_virt;
130         dma_addr_t cmdlist_pool;
131         int cmdlist_cnt = 0;
132         int count = 0;
133
134         list_for_each_entry(node, run_cmdlist, list)
135                 cmdlist_cnt++;
136
137         init_dma_attrs(&cmdlist_dma_attrs);
138         dma_set_attr(DMA_ATTR_WRITE_COMBINE, &runqueue->cmdlist_dma_attrs);
139
140         cmdlist_pool_virt = dma_alloc_attrs(dev, cmdlist_cnt * RGA_CMDLIST_SIZE,
141                                             &cmdlist_pool, GFP_KERNEL,
142                                             &cmdlist_dma_attrs);
143         if (!cmdlist_pool_virt) {
144                 dev_err(dev, "failed to allocate cmdlist dma memory\n");
145                 return -ENOMEM;
146         }
147
148         /*
149          * Fill in the RGA operation registers from cmdlist command buffer,
150          * and also filled in the MMU TLB base information.
151          */
152         list_for_each_entry(node, run_cmdlist, list) {
153                 struct rga_cmdlist *cmdlist = &node->cmdlist;
154                 unsigned int mmu_ctrl = 0;
155                 unsigned int reg;
156                 u32 *dest;
157                 int i;
158
159                 dest = cmdlist_pool_virt + RGA_CMDLIST_SIZE * 4 * count++;
160
161                 for (i = 0; i < cmdlist->last / 2; i++) {
162                         reg = (node->cmdlist.data[2 * i] - RGA_MODE_BASE_REG);
163                         if (reg > RGA_MODE_BASE_REG)
164                                 continue;
165                         dest[reg >> 2] = cmdlist->data[2 * i + 1];
166                 }
167
168                 if (cmdlist->src_mmu_pages) {
169                         reg = RGA_MMU_SRC_BASE - RGA_MODE_BASE_REG;
170                         dest[reg >> 2] = virt_to_phys(cmdlist->src_mmu_pages) >> 4;
171                         mmu_ctrl |= 0x7;
172                 }
173
174                 if (cmdlist->dst_mmu_pages) {
175                         reg = RGA_MMU_DST_BASE - RGA_MODE_BASE_REG;
176                         dest[reg >> 2] = virt_to_phys(cmdlist->dst_mmu_pages) >> 4;
177                         mmu_ctrl |= 0x7 << 8;
178                 }
179
180                 if (cmdlist->src1_mmu_pages) {
181                         reg = RGA_MMU_SRC1_BASE - RGA_MODE_BASE_REG;
182                         dest[reg >> 2] = virt_to_phys(cmdlist->src1_mmu_pages) >> 4;
183                         mmu_ctrl |= 0x7 << 4;
184                 }
185
186                 reg = RGA_MMU_CTRL1 - RGA_MODE_BASE_REG;
187                 dest[reg >> 2] = mmu_ctrl;
188         }
189
190         rga_dma_flush_range(cmdlist_pool_virt, cmdlist_cnt * RGA_CMDLIST_SIZE);
191
192         runqueue->cmdlist_dma_attrs = cmdlist_dma_attrs;
193         runqueue->cmdlist_pool_virt = cmdlist_pool_virt;
194         runqueue->cmdlist_pool = cmdlist_pool;
195         runqueue->cmdlist_cnt = cmdlist_cnt;
196
197         return 0;
198 }
199
200 static int rga_check_reg_offset(struct device *dev,
201                                 struct rga_cmdlist_node *node)
202 {
203         struct rga_cmdlist *cmdlist = &node->cmdlist;
204         int index;
205         int reg;
206         int i;
207
208         for (i = 0; i < cmdlist->last / 2; i++) {
209                 index = cmdlist->last - 2 * (i + 1);
210                 reg = cmdlist->data[index];
211
212                 switch (reg) {
213                 case RGA_BUF_TYPE_GEMFD | RGA_DST_Y_RGB_BASE_ADDR:
214                 case RGA_BUF_TYPE_GEMFD | RGA_SRC_Y_RGB_BASE_ADDR:
215                         break;
216
217                 case RGA_BUF_TYPE_USERPTR | RGA_DST_Y_RGB_BASE_ADDR:
218                 case RGA_BUF_TYPE_USERPTR | RGA_SRC_Y_RGB_BASE_ADDR:
219                         goto err;
220
221                 default:
222                         if (reg < RGA_MODE_BASE_REG || reg > RGA_MODE_MAX_REG)
223                                 goto err;
224
225                         if (reg % 4)
226                                 goto err;
227                 }
228         }
229
230         return 0;
231
232 err:
233         dev_err(dev, "Bad register offset: 0x%x\n", cmdlist->data[index]);
234         return -EINVAL;
235 }
236
237 static struct dma_buf_attachment *
238 rga_gem_buf_to_pages(struct rockchip_rga *rga, void **mmu_pages, int fd)
239 {
240         struct dma_buf_attachment *attach;
241         struct dma_buf *dmabuf;
242         struct sg_table *sgt;
243         struct scatterlist *sgl;
244         unsigned int mapped_size = 0;
245         unsigned int address;
246         unsigned int len;
247         unsigned int i, p;
248         unsigned int *pages;
249         int ret;
250
251         dmabuf = dma_buf_get(fd);
252         if (IS_ERR(dmabuf)) {
253                 dev_err(rga->dev, "Failed to get dma_buf with fd %d\n", fd);
254                 return ERR_PTR(-EINVAL);
255         }
256
257         attach = dma_buf_attach(dmabuf, rga->dev);
258         if (IS_ERR(attach)) {
259                 dev_err(rga->dev, "Failed to attach dma_buf\n");
260                 ret = PTR_ERR(attach);
261                 goto failed_attach;
262         }
263
264         sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
265         if (IS_ERR(sgt)) {
266                 dev_err(rga->dev, "Failed to map dma_buf attachment\n");
267                 ret = PTR_ERR(sgt);
268                 goto failed_detach;
269         }
270
271         /*
272          * Alloc (2^3 * 4K) = 32K byte for storing pages, those space could
273          * cover 32K * 4K = 128M ram address.
274          */
275         pages = (unsigned int *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 3);
276
277         for_each_sg(sgt->sgl, sgl, sgt->nents, i) {
278                 len = sg_dma_len(sgl) >> PAGE_SHIFT;
279                 address = sg_phys(sgl);
280
281                 for (p = 0; p < len; p++) {
282                         dma_addr_t phys = address + (p << PAGE_SHIFT);
283                         void *virt = phys_to_virt(phys);
284
285                         rga_dma_flush_range(virt, 4 * 1024);
286                         pages[mapped_size + p] = phys;
287                 }
288
289                 mapped_size += len;
290         }
291
292         rga_dma_flush_range(pages, 32 * 1024);
293
294         *mmu_pages = pages;
295
296         dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
297
298         return attach;
299
300 failed_detach:
301         dma_buf_detach(dmabuf, attach);
302 failed_attach:
303         dma_buf_put(dmabuf);
304
305         return ERR_PTR(ret);
306 }
307
308 static int rga_map_cmdlist_gem(struct rockchip_rga *rga,
309                                struct rga_cmdlist_node *node,
310                                struct drm_device *drm_dev,
311                                struct drm_file *file)
312 {
313         struct rga_cmdlist *cmdlist = &node->cmdlist;
314         struct dma_buf_attachment *attach;
315         void *mmu_pages;
316         int fd;
317         int i;
318
319         for (i = 0; i < cmdlist->last / 2; i++) {
320                 int index = cmdlist->last - 2 * (i + 1);
321
322                 switch (cmdlist->data[index]) {
323                 case RGA_SRC_Y_RGB_BASE_ADDR | RGA_BUF_TYPE_GEMFD:
324                         fd = cmdlist->data[index + 1];
325                         attach = rga_gem_buf_to_pages(rga, &mmu_pages, fd);
326
327                         cmdlist->src_attach = attach;
328                         cmdlist->src_mmu_pages = mmu_pages;
329                         break;
330
331                 case RGA_DST_Y_RGB_BASE_ADDR | RGA_BUF_TYPE_GEMFD:
332                         fd = cmdlist->data[index + 1];
333                         attach = rga_gem_buf_to_pages(rga, &mmu_pages, fd);
334
335                         cmdlist->dst_attach = attach;
336                         cmdlist->dst_mmu_pages = mmu_pages;
337                         break;
338                 }
339         }
340
341         return 0;
342 }
343
344 static void rga_unmap_cmdlist_gem(struct rockchip_rga *rga,
345                                   struct rga_cmdlist_node *node)
346 {
347         struct dma_buf_attachment *attach;
348         struct dma_buf *dma_buf;
349
350         attach = node->cmdlist.src_attach;
351         if (attach) {
352                 dma_buf = attach->dmabuf;
353                 dma_buf_detach(dma_buf, attach);
354                 dma_buf_put(dma_buf);
355         }
356         node->cmdlist.src_attach = NULL;
357
358         attach = node->cmdlist.dst_attach;
359         if (attach) {
360                 dma_buf = attach->dmabuf;
361                 dma_buf_detach(dma_buf, attach);
362                 dma_buf_put(dma_buf);
363         }
364         node->cmdlist.dst_attach = NULL;
365
366         if (node->cmdlist.src_mmu_pages)
367                 free_pages((unsigned long)node->cmdlist.src_mmu_pages, 3);
368         node->cmdlist.src_mmu_pages = NULL;
369
370         if (node->cmdlist.src1_mmu_pages)
371                 free_pages((unsigned long)node->cmdlist.src1_mmu_pages, 3);
372         node->cmdlist.src1_mmu_pages = NULL;
373
374         if (node->cmdlist.dst_mmu_pages)
375                 free_pages((unsigned long)node->cmdlist.dst_mmu_pages, 3);
376         node->cmdlist.dst_mmu_pages = NULL;
377 }
378
379 static void rga_cmd_start(struct rockchip_rga *rga,
380                           struct rga_runqueue_node *runqueue)
381 {
382         int ret;
383
384         ret = pm_runtime_get_sync(rga->dev);
385         if (ret < 0)
386                 return;
387
388         rga_write(rga, RGA_SYS_CTRL, 0x00);
389
390         rga_write(rga, RGA_CMD_BASE, runqueue->cmdlist_pool);
391
392         rga_write(rga, RGA_SYS_CTRL, 0x22);
393
394         rga_write(rga, RGA_INT, 0x600);
395
396         rga_write(rga, RGA_CMD_CTRL, ((runqueue->cmdlist_cnt - 1) << 3) | 0x1);
397 }
398
399 static void rga_free_runqueue_node(struct rockchip_rga *rga,
400                                    struct rga_runqueue_node *runqueue)
401 {
402         struct rga_cmdlist_node *node;
403
404         if (!runqueue)
405                 return;
406
407         if (runqueue->cmdlist_pool_virt && runqueue->cmdlist_pool)
408                 dma_free_attrs(rga->dev, runqueue->cmdlist_cnt * RGA_CMDLIST_SIZE,
409                                runqueue->cmdlist_pool_virt,
410                                runqueue->cmdlist_pool,
411                                &runqueue->cmdlist_dma_attrs);
412
413         mutex_lock(&rga->cmdlist_mutex);
414         /*
415          * commands in run_cmdlist have been completed so unmap all gem
416          * objects in each command node so that they are unreferenced.
417          */
418         list_for_each_entry(node, &runqueue->run_cmdlist, list)
419                 rga_unmap_cmdlist_gem(rga, node);
420         list_splice_tail_init(&runqueue->run_cmdlist, &rga->free_cmdlist);
421         mutex_unlock(&rga->cmdlist_mutex);
422
423         kmem_cache_free(rga->runqueue_slab, runqueue);
424 }
425
426 static struct rga_runqueue_node *rga_get_runqueue(struct rockchip_rga *rga)
427 {
428         struct rga_runqueue_node *runqueue;
429
430         if (list_empty(&rga->runqueue_list))
431                 return NULL;
432
433         runqueue = list_first_entry(&rga->runqueue_list,
434                                     struct rga_runqueue_node, list);
435         list_del_init(&runqueue->list);
436
437         return runqueue;
438 }
439
440 static void rga_exec_runqueue(struct rockchip_rga *rga)
441 {
442         rga->runqueue_node = rga_get_runqueue(rga);
443         if (rga->runqueue_node)
444                 rga_cmd_start(rga, rga->runqueue_node);
445 }
446
447 static struct rga_cmdlist_node *rga_get_cmdlist(struct rockchip_rga *rga)
448 {
449         struct rga_cmdlist_node *node;
450         struct device *dev = rga->dev;
451
452         mutex_lock(&rga->cmdlist_mutex);
453         if (list_empty(&rga->free_cmdlist)) {
454                 dev_err(dev, "there is no free cmdlist\n");
455                 mutex_unlock(&rga->cmdlist_mutex);
456                 return NULL;
457         }
458
459         node = list_first_entry(&rga->free_cmdlist,
460                                 struct rga_cmdlist_node, list);
461         list_del_init(&node->list);
462         mutex_unlock(&rga->cmdlist_mutex);
463
464         return node;
465 }
466
467 static void rga_add_cmdlist_to_inuse(struct rockchip_drm_rga_private *rga_priv,
468                                      struct rga_cmdlist_node *node)
469 {
470         struct rga_cmdlist_node *lnode;
471
472         if (list_empty(&rga_priv->inuse_cmdlist))
473                 goto add_to_list;
474
475         /* this links to base address of new cmdlist */
476         lnode = list_entry(rga_priv->inuse_cmdlist.prev,
477                            struct rga_cmdlist_node, list);
478
479 add_to_list:
480         list_add_tail(&node->list, &rga_priv->inuse_cmdlist);
481 }
482
483 /*
484  * IOCRL functions for userspace to get RGA version.
485  */
486 int rockchip_rga_get_ver_ioctl(struct drm_device *drm_dev, void *data,
487                                struct drm_file *file)
488 {
489         struct rockchip_drm_file_private *file_priv = file->driver_priv;
490         struct rockchip_drm_rga_private *rga_priv = file_priv->rga_priv;
491         struct drm_rockchip_rga_get_ver *ver = data;
492         struct rockchip_rga *rga;
493         struct device *dev;
494
495         if (!rga_priv)
496                 return -ENODEV;
497
498         dev = rga_priv->dev;
499         if (!dev)
500                 return -ENODEV;
501
502         rga = dev_get_drvdata(dev);
503         if (!rga)
504                 return -EFAULT;
505
506         ver->major = rga->version.major;
507         ver->minor = rga->version.minor;
508
509         return 0;
510 }
511
512 /*
513  * IOCRL functions for userspace to send an RGA request.
514  */
515 int rockchip_rga_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
516                                    struct drm_file *file)
517 {
518         struct rockchip_drm_file_private *file_priv = file->driver_priv;
519         struct rockchip_drm_rga_private *rga_priv = file_priv->rga_priv;
520         struct drm_rockchip_rga_set_cmdlist *req = data;
521         struct rga_cmdlist_node *node;
522         struct rga_cmdlist *cmdlist;
523         struct rockchip_rga *rga;
524         int ret;
525
526         if (!rga_priv)
527                 return -ENODEV;
528
529         if (!rga_priv->dev)
530                 return -ENODEV;
531
532         rga = dev_get_drvdata(rga_priv->dev);
533         if (!rga)
534                 return -EFAULT;
535
536         node = rga_get_cmdlist(rga);
537         if (!node)
538                 return -ENOMEM;
539
540         cmdlist = &node->cmdlist;
541         cmdlist->last = 0;
542
543         if (req->cmd_nr > RGA_CMDLIST_SIZE || req->cmd_buf_nr > RGA_CMDBUF_SIZE) {
544                 dev_err(rga->dev, "cmdlist size is too big\n");
545                 return -EINVAL;
546         }
547
548         /*
549          * Copy the command / buffer registers setting from userspace, each
550          * command have two integer, one for register offset, another for
551          * register value.
552          */
553         if (copy_from_user(cmdlist->data, (void __user *)req->cmd,
554                            sizeof(struct drm_rockchip_rga_cmd) * req->cmd_nr))
555                 return -EFAULT;
556         cmdlist->last += req->cmd_nr * 2;
557
558         if (copy_from_user(&cmdlist->data[cmdlist->last],
559                            (void __user *)req->cmd_buf,
560                            sizeof(struct drm_rockchip_rga_cmd) * req->cmd_buf_nr))
561                 return -EFAULT;
562         cmdlist->last += req->cmd_buf_nr * 2;
563
564         /*
565          * Check the userspace command registers, and mapping the framebuffer,
566          * create the RGA mmu pages or get the framebuffer dma address.
567          */
568         ret = rga_check_reg_offset(rga->dev, node);
569         if (ret < 0)
570                 return ret;
571
572         ret = rga_map_cmdlist_gem(rga, node, drm_dev, file);
573         if (ret < 0)
574                 return ret;
575
576         rga_add_cmdlist_to_inuse(rga_priv, node);
577
578         return 0;
579 }
580
581 /*
582  * IOCRL functions for userspace to start RGA transform.
583  */
584 int rockchip_rga_exec_ioctl(struct drm_device *drm_dev, void *data,
585                             struct drm_file *file)
586 {
587         struct rockchip_drm_file_private *file_priv = file->driver_priv;
588         struct rockchip_drm_rga_private *rga_priv = file_priv->rga_priv;
589         struct rga_runqueue_node *runqueue;
590         struct rockchip_rga *rga;
591         struct device *dev;
592         int ret;
593
594         if (!rga_priv)
595                 return -ENODEV;
596
597         dev = rga_priv->dev;
598         if (!dev)
599                 return -ENODEV;
600
601         rga = dev_get_drvdata(dev);
602         if (!rga)
603                 return -EFAULT;
604
605         runqueue = kmem_cache_alloc(rga->runqueue_slab, GFP_KERNEL);
606         if (!runqueue) {
607                 dev_err(rga->dev, "failed to allocate memory\n");
608                 return -ENOMEM;
609         }
610
611         runqueue->dev = rga->dev;
612
613         init_completion(&runqueue->complete);
614
615         INIT_LIST_HEAD(&runqueue->run_cmdlist);
616
617         list_splice_init(&rga_priv->inuse_cmdlist, &runqueue->run_cmdlist);
618
619         if (list_empty(&runqueue->run_cmdlist)) {
620                 dev_err(rga->dev, "there is no inuse cmdlist\n");
621                 kmem_cache_free(rga->runqueue_slab, runqueue);
622                 return -EPERM;
623         }
624
625         ret = rga_alloc_dma_buf_for_cmdlist(runqueue);
626         if (ret < 0) {
627                 dev_err(rga->dev, "cmdlist init failed\n");
628                 return ret;
629         }
630
631         mutex_lock(&rga->runqueue_mutex);
632         runqueue->pid = current->pid;
633         runqueue->file = file;
634         list_add_tail(&runqueue->list, &rga->runqueue_list);
635         if (!rga->runqueue_node)
636                 rga_exec_runqueue(rga);
637         mutex_unlock(&rga->runqueue_mutex);
638
639         wait_for_completion(&runqueue->complete);
640         rga_free_runqueue_node(rga, runqueue);
641
642         return 0;
643 }
644
645 static int rockchip_rga_open(struct drm_device *drm_dev, struct device *dev,
646                              struct drm_file *file)
647 {
648         struct rockchip_drm_file_private *file_priv = file->driver_priv;
649         struct rockchip_drm_rga_private *rga_priv;
650
651         rga_priv = kzalloc(sizeof(*rga_priv), GFP_KERNEL);
652         if (!rga_priv)
653                 return -ENOMEM;
654
655         rga_priv->dev = dev;
656         file_priv->rga_priv = rga_priv;
657
658         INIT_LIST_HEAD(&rga_priv->inuse_cmdlist);
659
660         return 0;
661 }
662
663 static void rockchip_rga_close(struct drm_device *drm_dev, struct device *dev,
664                                struct drm_file *file)
665 {
666         struct rockchip_drm_file_private *file_priv = file->driver_priv;
667         struct rockchip_drm_rga_private *rga_priv = file_priv->rga_priv;
668         struct rga_cmdlist_node *node, *n;
669         struct rockchip_rga *rga;
670
671         if (!dev)
672                 return;
673
674         rga = dev_get_drvdata(dev);
675         if (!rga)
676                 return;
677
678         mutex_lock(&rga->cmdlist_mutex);
679         list_for_each_entry_safe(node, n, &rga_priv->inuse_cmdlist, list) {
680                 /*
681                  * unmap all gem objects not completed.
682                  *
683                  * P.S. if current process was terminated forcely then
684                  * there may be some commands in inuse_cmdlist so unmap
685                  * them.
686                  */
687                 rga_unmap_cmdlist_gem(rga, node);
688                 list_move_tail(&node->list, &rga->free_cmdlist);
689         }
690         mutex_unlock(&rga->cmdlist_mutex);
691
692         kfree(file_priv->rga_priv);
693 }
694
695 static void rga_runqueue_worker(struct work_struct *work)
696 {
697         struct rockchip_rga *rga = container_of(work, struct rockchip_rga,
698                                             runqueue_work);
699
700         mutex_lock(&rga->runqueue_mutex);
701         pm_runtime_put_sync(rga->dev);
702
703         complete(&rga->runqueue_node->complete);
704
705         if (rga->suspended)
706                 rga->runqueue_node = NULL;
707         else
708                 rga_exec_runqueue(rga);
709
710         mutex_unlock(&rga->runqueue_mutex);
711 }
712
713 static irqreturn_t rga_irq_handler(int irq, void *dev_id)
714 {
715         struct rockchip_rga *rga = dev_id;
716         int intr;
717
718         intr = rga_read(rga, RGA_INT) & 0xf;
719
720         rga_mod(rga, RGA_INT, intr << 4, 0xf << 4);
721
722         if (intr & 0x04)
723                 queue_work(rga->rga_workq, &rga->runqueue_work);
724
725         return IRQ_HANDLED;
726 }
727
728 static int rga_parse_dt(struct rockchip_rga *rga)
729 {
730         struct reset_control *core_rst, *axi_rst, *ahb_rst;
731
732         core_rst = devm_reset_control_get(rga->dev, "core");
733         if (IS_ERR(core_rst)) {
734                 dev_err(rga->dev, "failed to get core reset controller\n");
735                 return PTR_ERR(core_rst);
736         }
737
738         axi_rst = devm_reset_control_get(rga->dev, "axi");
739         if (IS_ERR(axi_rst)) {
740                 dev_err(rga->dev, "failed to get axi reset controller\n");
741                 return PTR_ERR(axi_rst);
742         }
743
744         ahb_rst = devm_reset_control_get(rga->dev, "ahb");
745         if (IS_ERR(ahb_rst)) {
746                 dev_err(rga->dev, "failed to get ahb reset controller\n");
747                 return PTR_ERR(ahb_rst);
748         }
749
750         reset_control_assert(core_rst);
751         udelay(1);
752         reset_control_deassert(core_rst);
753
754         reset_control_assert(axi_rst);
755         udelay(1);
756         reset_control_deassert(axi_rst);
757
758         reset_control_assert(ahb_rst);
759         udelay(1);
760         reset_control_deassert(ahb_rst);
761
762         rga->sclk = devm_clk_get(rga->dev, "sclk");
763         if (IS_ERR(rga->sclk)) {
764                 dev_err(rga->dev, "failed to get sclk clock\n");
765                 return PTR_ERR(rga->sclk);
766         }
767
768         rga->aclk = devm_clk_get(rga->dev, "aclk");
769         if (IS_ERR(rga->aclk)) {
770                 dev_err(rga->dev, "failed to get aclk clock\n");
771                 return PTR_ERR(rga->aclk);
772         }
773
774         rga->hclk = devm_clk_get(rga->dev, "hclk");
775         if (IS_ERR(rga->hclk)) {
776                 dev_err(rga->dev, "failed to get hclk clock\n");
777                 return PTR_ERR(rga->hclk);
778         }
779
780         return rga_enable_clocks(rga);
781 }
782
783 static const struct of_device_id rockchip_rga_dt_ids[] = {
784         { .compatible = "rockchip,rk3288-rga", },
785         { .compatible = "rockchip,rk3228-rga", },
786         { .compatible = "rockchip,rk3399-rga", },
787         {},
788 };
789 MODULE_DEVICE_TABLE(of, rockchip_rga_dt_ids);
790
791 static int rga_probe(struct platform_device *pdev)
792 {
793         struct drm_rockchip_subdrv *subdrv;
794         struct rockchip_rga *rga;
795         struct resource *iores;
796         int irq;
797         int ret;
798
799         if (!pdev->dev.of_node)
800                 return -ENODEV;
801
802         rga = devm_kzalloc(&pdev->dev, sizeof(*rga), GFP_KERNEL);
803         if (!rga)
804                 return -ENOMEM;
805
806         rga->dev = &pdev->dev;
807
808         rga->runqueue_slab = kmem_cache_create("rga_runqueue_slab",
809                                                sizeof(struct rga_runqueue_node),
810                                                0, 0, NULL);
811         if (!rga->runqueue_slab)
812                 return -ENOMEM;
813
814         rga->rga_workq = create_singlethread_workqueue("rga");
815         if (!rga->rga_workq) {
816                 dev_err(rga->dev, "failed to create workqueue\n");
817                 ret = PTR_ERR(rga->rga_workq);
818                 goto err_destroy_slab;
819         }
820
821         INIT_WORK(&rga->runqueue_work, rga_runqueue_worker);
822         INIT_LIST_HEAD(&rga->runqueue_list);
823         mutex_init(&rga->runqueue_mutex);
824
825         INIT_LIST_HEAD(&rga->free_cmdlist);
826         mutex_init(&rga->cmdlist_mutex);
827
828         rga_init_cmdlist(rga);
829
830         ret = rga_parse_dt(rga);
831         if (ret) {
832                 dev_err(rga->dev, "Unable to parse OF data\n");
833                 goto err_destroy_workqueue;
834         }
835
836         pm_runtime_enable(rga->dev);
837
838         iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
839
840         rga->regs = devm_ioremap_resource(rga->dev, iores);
841         if (IS_ERR(rga->regs)) {
842                 ret = PTR_ERR(rga->regs);
843                 goto err_put_clk;
844         }
845
846         irq = platform_get_irq(pdev, 0);
847         if (irq < 0) {
848                 dev_err(rga->dev, "failed to get irq\n");
849                 ret = irq;
850                 goto err_put_clk;
851         }
852
853         ret = devm_request_irq(rga->dev, irq, rga_irq_handler, 0,
854                                dev_name(rga->dev), rga);
855         if (ret < 0) {
856                 dev_err(rga->dev, "failed to request irq\n");
857                 goto err_put_clk;
858         }
859
860         platform_set_drvdata(pdev, rga);
861
862         rga->version.major = (rga_read(rga, RGA_VERSION_INFO) >> 24) & 0xFF;
863         rga->version.minor = (rga_read(rga, RGA_VERSION_INFO) >> 20) & 0x0F;
864
865         subdrv = &rga->subdrv;
866         subdrv->dev = rga->dev;
867         subdrv->open = rockchip_rga_open;
868         subdrv->close = rockchip_rga_close;
869
870         rockchip_drm_register_subdrv(subdrv);
871
872         return 0;
873
874 err_put_clk:
875         pm_runtime_disable(rga->dev);
876 err_destroy_workqueue:
877         destroy_workqueue(rga->rga_workq);
878 err_destroy_slab:
879         kmem_cache_destroy(rga->runqueue_slab);
880
881         return ret;
882 }
883
884 static int rga_remove(struct platform_device *pdev)
885 {
886         struct rockchip_rga *rga = platform_get_drvdata(pdev);
887
888         cancel_work_sync(&rga->runqueue_work);
889
890         while (rga->runqueue_node) {
891                 rga_free_runqueue_node(rga, rga->runqueue_node);
892                 rga->runqueue_node = rga_get_runqueue(rga);
893         }
894
895         rockchip_drm_unregister_subdrv(&rga->subdrv);
896
897         pm_runtime_disable(rga->dev);
898
899         return 0;
900 }
901
902 static int rga_suspend(struct device *dev)
903 {
904         struct rockchip_rga *rga = dev_get_drvdata(dev);
905
906         mutex_lock(&rga->runqueue_mutex);
907         rga->suspended = true;
908         mutex_unlock(&rga->runqueue_mutex);
909
910         flush_work(&rga->runqueue_work);
911
912         return 0;
913 }
914
915 static int rga_resume(struct device *dev)
916 {
917         struct rockchip_rga *rga = dev_get_drvdata(dev);
918
919         rga->suspended = false;
920         rga_exec_runqueue(rga);
921
922         return 0;
923 }
924
925 #ifdef CONFIG_PM
926 static int rga_runtime_suspend(struct device *dev)
927 {
928         struct rockchip_rga *rga = dev_get_drvdata(dev);
929
930         rga_disable_clocks(rga);
931
932         return 0;
933 }
934
935 static int rga_runtime_resume(struct device *dev)
936 {
937         struct rockchip_rga *rga = dev_get_drvdata(dev);
938
939         return rga_enable_clocks(rga);
940 }
941 #endif
942
943 static const struct dev_pm_ops rga_pm = {
944         SET_SYSTEM_SLEEP_PM_OPS(rga_suspend, rga_resume)
945         SET_RUNTIME_PM_OPS(rga_runtime_suspend,
946                            rga_runtime_resume, NULL)
947 };
948
949 static struct platform_driver rga_pltfm_driver = {
950         .probe  = rga_probe,
951         .remove = rga_remove,
952         .driver = {
953                 .name = "rockchip-rga",
954                 .pm = &rga_pm,
955                 .of_match_table = rockchip_rga_dt_ids,
956         },
957 };
958
959 module_platform_driver(rga_pltfm_driver);
960
961 MODULE_AUTHOR("Yakir Yang <ykk@rock-chips.com>");
962 MODULE_DESCRIPTION("Rockchip RGA Driver Extension");
963 MODULE_LICENSE("GPL");
964 MODULE_ALIAS("platform:rockchip-rga");