drm/rockchip: add support for src1
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / rockchip / rockchip_drm_rga.c
1 #include <linux/clk.h>
2 #include <linux/debugfs.h>
3 #include <linux/delay.h>
4 #include <linux/dma-buf.h>
5 #include <linux/dma-mapping.h>
6 #include <linux/interrupt.h>
7 #include <linux/of.h>
8 #include <linux/of_address.h>
9 #include <linux/of_device.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/reset.h>
12 #include <linux/seq_file.h>
13 #include <linux/slab.h>
14 #include <linux/uaccess.h>
15
16 #include <asm/cacheflush.h>
17 #include <drm/drmP.h>
18 #include <drm/rockchip_drm.h>
19
20 #include "rockchip_drm_drv.h"
21 #include "rockchip_drm_rga.h"
22
23 #define RGA_MODE_BASE_REG               0x0100
24 #define RGA_MODE_MAX_REG                0x017C
25
26 #define RGA_SYS_CTRL                    0x0000
27 #define RGA_CMD_CTRL                    0x0004
28 #define RGA_CMD_BASE                    0x0008
29 #define RGA_INT                         0x0010
30 #define RGA_MMU_CTRL0                   0x0014
31 #define RGA_VERSION_INFO                0x0028
32
33 #define RGA_SRC_Y_RGB_BASE_ADDR         0x0108
34 #define RGA_SRC_CB_BASE_ADDR            0x010C
35 #define RGA_SRC_CR_BASE_ADDR            0x0110
36 #define RGA_SRC1_RGB_BASE_ADDR          0x0114
37 #define RGA_DST_Y_RGB_BASE_ADDR         0x013C
38 #define RGA_DST_CB_BASE_ADDR            0x0140
39 #define RGA_DST_CR_BASE_ADDR            0x014C
40 #define RGA_MMU_CTRL1                   0x016C
41 #define RGA_MMU_SRC_BASE                0x0170
42 #define RGA_MMU_SRC1_BASE               0x0174
43 #define RGA_MMU_DST_BASE                0x0178
44
45 static void __user *rga_compat_ptr(u64 value)
46 {
47 #ifdef CONFIG_ARM64
48         return (void __user *)(value);
49 #else
50         return (void __user *)((u32)(value));
51 #endif
52 }
53
54 static void rga_dma_flush_range(void *ptr, int size)
55 {
56 #ifdef CONFIG_ARM
57         dmac_flush_range(ptr, ptr + size);
58         outer_flush_range(virt_to_phys(ptr), virt_to_phys(ptr + size));
59 #elif defined CONFIG_ARM64
60         __dma_flush_range(ptr, ptr + size);
61 #endif
62 }
63
64 static inline void rga_write(struct rockchip_rga *rga, u32 reg, u32 value)
65 {
66         writel(value, rga->regs + reg);
67 }
68
69 static inline u32 rga_read(struct rockchip_rga *rga, u32 reg)
70 {
71         return readl(rga->regs + reg);
72 }
73
74 static inline void rga_mod(struct rockchip_rga *rga, u32 reg, u32 val, u32 mask)
75 {
76         u32 temp = rga_read(rga, reg) & ~(mask);
77
78         temp |= val & mask;
79         rga_write(rga, reg, temp);
80 }
81
82 static int rga_enable_clocks(struct rockchip_rga *rga)
83 {
84         int ret;
85
86         ret = clk_prepare_enable(rga->sclk);
87         if (ret) {
88                 dev_err(rga->dev, "Cannot enable rga sclk: %d\n", ret);
89                 return ret;
90         }
91
92         ret = clk_prepare_enable(rga->aclk);
93         if (ret) {
94                 dev_err(rga->dev, "Cannot enable rga aclk: %d\n", ret);
95                 goto err_disable_sclk;
96         }
97
98         ret = clk_prepare_enable(rga->hclk);
99         if (ret) {
100                 dev_err(rga->dev, "Cannot enable rga hclk: %d\n", ret);
101                 goto err_disable_aclk;
102         }
103
104         return 0;
105
106 err_disable_sclk:
107         clk_disable_unprepare(rga->sclk);
108 err_disable_aclk:
109         clk_disable_unprepare(rga->aclk);
110
111         return ret;
112 }
113
114 static void rga_disable_clocks(struct rockchip_rga *rga)
115 {
116         clk_disable_unprepare(rga->sclk);
117         clk_disable_unprepare(rga->hclk);
118         clk_disable_unprepare(rga->aclk);
119 }
120
121 static void rga_init_cmdlist(struct rockchip_rga *rga)
122 {
123         struct rga_cmdlist_node *node;
124         int nr;
125
126         node = rga->cmdlist_node;
127
128         for (nr = 0; nr < ARRAY_SIZE(rga->cmdlist_node); nr++)
129                 list_add_tail(&node[nr].list, &rga->free_cmdlist);
130 }
131
132 static int rga_alloc_dma_buf_for_cmdlist(struct rga_runqueue_node *runqueue)
133 {
134         struct list_head *run_cmdlist = &runqueue->run_cmdlist;
135         struct device *dev = runqueue->dev;
136         struct dma_attrs cmdlist_dma_attrs;
137         struct rga_cmdlist_node *node;
138         void *cmdlist_pool_virt;
139         dma_addr_t cmdlist_pool;
140         int cmdlist_cnt = 0;
141         int count = 0;
142
143         list_for_each_entry(node, run_cmdlist, list)
144                 cmdlist_cnt++;
145
146         init_dma_attrs(&cmdlist_dma_attrs);
147         dma_set_attr(DMA_ATTR_WRITE_COMBINE, &runqueue->cmdlist_dma_attrs);
148
149         cmdlist_pool_virt = dma_alloc_attrs(dev, cmdlist_cnt * RGA_CMDLIST_SIZE,
150                                             &cmdlist_pool, GFP_KERNEL,
151                                             &cmdlist_dma_attrs);
152         if (!cmdlist_pool_virt) {
153                 dev_err(dev, "failed to allocate cmdlist dma memory\n");
154                 return -ENOMEM;
155         }
156
157         /*
158          * Fill in the RGA operation registers from cmdlist command buffer,
159          * and also filled in the MMU TLB base information.
160          */
161         list_for_each_entry(node, run_cmdlist, list) {
162                 struct rga_cmdlist *cmdlist = &node->cmdlist;
163                 unsigned int mmu_ctrl = 0;
164                 unsigned int reg;
165                 u32 *dest;
166                 int i;
167
168                 dest = cmdlist_pool_virt + RGA_CMDLIST_SIZE * 4 * count++;
169
170                 for (i = 0; i < cmdlist->last / 2; i++) {
171                         reg = (node->cmdlist.data[2 * i] - RGA_MODE_BASE_REG);
172                         if (reg > RGA_MODE_BASE_REG)
173                                 continue;
174                         dest[reg >> 2] = cmdlist->data[2 * i + 1];
175                 }
176
177                 if (cmdlist->src_mmu_pages) {
178                         reg = RGA_MMU_SRC_BASE - RGA_MODE_BASE_REG;
179                         dest[reg >> 2] = virt_to_phys(cmdlist->src_mmu_pages) >> 4;
180                         mmu_ctrl |= 0x7;
181                 }
182
183                 if (cmdlist->dst_mmu_pages) {
184                         reg = RGA_MMU_DST_BASE - RGA_MODE_BASE_REG;
185                         dest[reg >> 2] = virt_to_phys(cmdlist->dst_mmu_pages) >> 4;
186                         mmu_ctrl |= 0x7 << 8;
187                 }
188
189                 if (cmdlist->src1_mmu_pages) {
190                         reg = RGA_MMU_SRC1_BASE - RGA_MODE_BASE_REG;
191                         dest[reg >> 2] = virt_to_phys(cmdlist->src1_mmu_pages) >> 4;
192                         mmu_ctrl |= 0x7 << 4;
193                 }
194
195                 reg = RGA_MMU_CTRL1 - RGA_MODE_BASE_REG;
196                 dest[reg >> 2] = mmu_ctrl;
197         }
198
199         rga_dma_flush_range(cmdlist_pool_virt, cmdlist_cnt * RGA_CMDLIST_SIZE);
200
201         runqueue->cmdlist_dma_attrs = cmdlist_dma_attrs;
202         runqueue->cmdlist_pool_virt = cmdlist_pool_virt;
203         runqueue->cmdlist_pool = cmdlist_pool;
204         runqueue->cmdlist_cnt = cmdlist_cnt;
205
206         return 0;
207 }
208
209 static int rga_check_reg_offset(struct device *dev,
210                                 struct rga_cmdlist_node *node)
211 {
212         struct rga_cmdlist *cmdlist = &node->cmdlist;
213         int index;
214         int reg;
215         int i;
216
217         for (i = 0; i < cmdlist->last / 2; i++) {
218                 index = cmdlist->last - 2 * (i + 1);
219                 reg = cmdlist->data[index];
220
221                 switch (reg) {
222                 case RGA_BUF_TYPE_GEMFD | RGA_DST_Y_RGB_BASE_ADDR:
223                 case RGA_BUF_TYPE_GEMFD | RGA_SRC_Y_RGB_BASE_ADDR:
224                 case RGA_BUF_TYPE_GEMFD | RGA_SRC1_RGB_BASE_ADDR:
225                         break;
226
227                 case RGA_BUF_TYPE_USERPTR | RGA_DST_Y_RGB_BASE_ADDR:
228                 case RGA_BUF_TYPE_USERPTR | RGA_SRC_Y_RGB_BASE_ADDR:
229                 case RGA_BUF_TYPE_USERPTR | RGA_SRC1_RGB_BASE_ADDR:
230                         goto err;
231
232                 default:
233                         if (reg < RGA_MODE_BASE_REG || reg > RGA_MODE_MAX_REG)
234                                 goto err;
235
236                         if (reg % 4)
237                                 goto err;
238                 }
239         }
240
241         return 0;
242
243 err:
244         dev_err(dev, "Bad register offset: 0x%x\n", cmdlist->data[index]);
245         return -EINVAL;
246 }
247
248 static struct dma_buf_attachment *
249 rga_gem_buf_to_pages(struct rockchip_rga *rga, void **mmu_pages, int fd)
250 {
251         struct dma_buf_attachment *attach;
252         struct dma_buf *dmabuf;
253         struct sg_table *sgt;
254         struct scatterlist *sgl;
255         unsigned int mapped_size = 0;
256         unsigned int address;
257         unsigned int len;
258         unsigned int i, p;
259         unsigned int *pages;
260         int ret;
261
262         dmabuf = dma_buf_get(fd);
263         if (IS_ERR(dmabuf)) {
264                 dev_err(rga->dev, "Failed to get dma_buf with fd %d\n", fd);
265                 return ERR_PTR(-EINVAL);
266         }
267
268         attach = dma_buf_attach(dmabuf, rga->dev);
269         if (IS_ERR(attach)) {
270                 dev_err(rga->dev, "Failed to attach dma_buf\n");
271                 ret = PTR_ERR(attach);
272                 goto failed_attach;
273         }
274
275         sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
276         if (IS_ERR(sgt)) {
277                 dev_err(rga->dev, "Failed to map dma_buf attachment\n");
278                 ret = PTR_ERR(sgt);
279                 goto failed_detach;
280         }
281
282         /*
283          * Alloc (2^3 * 4K) = 32K byte for storing pages, those space could
284          * cover 32K * 4K = 128M ram address.
285          */
286         pages = (unsigned int *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 3);
287
288         for_each_sg(sgt->sgl, sgl, sgt->nents, i) {
289                 len = sg_dma_len(sgl) >> PAGE_SHIFT;
290                 address = sg_phys(sgl);
291
292                 for (p = 0; p < len; p++) {
293                         dma_addr_t phys = address + (p << PAGE_SHIFT);
294
295                         pages[mapped_size + p] = phys;
296                 }
297
298                 mapped_size += len;
299         }
300
301         rga_dma_flush_range(pages, 32 * 1024);
302
303         *mmu_pages = pages;
304
305         dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
306
307         return attach;
308
309 failed_detach:
310         dma_buf_detach(dmabuf, attach);
311 failed_attach:
312         dma_buf_put(dmabuf);
313
314         return ERR_PTR(ret);
315 }
316
317 static int rga_map_cmdlist_gem(struct rockchip_rga *rga,
318                                struct rga_cmdlist_node *node,
319                                struct drm_device *drm_dev,
320                                struct drm_file *file)
321 {
322         struct rga_cmdlist *cmdlist = &node->cmdlist;
323         struct dma_buf_attachment *attach;
324         void *mmu_pages;
325         int fd;
326         int i;
327
328         for (i = 0; i < cmdlist->last / 2; i++) {
329                 int index = cmdlist->last - 2 * (i + 1);
330
331                 switch (cmdlist->data[index]) {
332                 case RGA_SRC1_RGB_BASE_ADDR | RGA_BUF_TYPE_GEMFD:
333                         fd = cmdlist->data[index + 1];
334                         attach = rga_gem_buf_to_pages(rga, &mmu_pages, fd);
335                         if (IS_ERR(attach))
336                                 return PTR_ERR(attach);
337
338                         cmdlist->src1_attach = attach;
339                         cmdlist->src1_mmu_pages = mmu_pages;
340                         break;
341
342                 case RGA_SRC_Y_RGB_BASE_ADDR | RGA_BUF_TYPE_GEMFD:
343                         fd = cmdlist->data[index + 1];
344                         attach = rga_gem_buf_to_pages(rga, &mmu_pages, fd);
345                         if (IS_ERR(attach))
346                                 return PTR_ERR(attach);
347
348                         cmdlist->src_attach = attach;
349                         cmdlist->src_mmu_pages = mmu_pages;
350                         break;
351
352                 case RGA_DST_Y_RGB_BASE_ADDR | RGA_BUF_TYPE_GEMFD:
353                         fd = cmdlist->data[index + 1];
354                         attach = rga_gem_buf_to_pages(rga, &mmu_pages, fd);
355                         if (IS_ERR(attach))
356                                 return PTR_ERR(attach);
357
358                         cmdlist->dst_attach = attach;
359                         cmdlist->dst_mmu_pages = mmu_pages;
360                         break;
361                 }
362         }
363
364         return 0;
365 }
366
367 static void rga_unmap_cmdlist_gem(struct rockchip_rga *rga,
368                                   struct rga_cmdlist_node *node)
369 {
370         struct dma_buf_attachment *attach;
371         struct dma_buf *dma_buf;
372
373         attach = node->cmdlist.src_attach;
374         if (attach) {
375                 dma_buf = attach->dmabuf;
376                 dma_buf_detach(dma_buf, attach);
377                 dma_buf_put(dma_buf);
378         }
379         node->cmdlist.src_attach = NULL;
380
381         attach = node->cmdlist.src1_attach;
382         if (attach) {
383                 dma_buf = attach->dmabuf;
384                 dma_buf_detach(dma_buf, attach);
385                 dma_buf_put(dma_buf);
386         }
387         node->cmdlist.src1_attach = NULL;
388
389         attach = node->cmdlist.dst_attach;
390         if (attach) {
391                 dma_buf = attach->dmabuf;
392                 dma_buf_detach(dma_buf, attach);
393                 dma_buf_put(dma_buf);
394         }
395         node->cmdlist.dst_attach = NULL;
396
397         if (node->cmdlist.src_mmu_pages)
398                 free_pages((unsigned long)node->cmdlist.src_mmu_pages, 3);
399         node->cmdlist.src_mmu_pages = NULL;
400
401         if (node->cmdlist.src1_mmu_pages)
402                 free_pages((unsigned long)node->cmdlist.src1_mmu_pages, 3);
403         node->cmdlist.src1_mmu_pages = NULL;
404
405         if (node->cmdlist.dst_mmu_pages)
406                 free_pages((unsigned long)node->cmdlist.dst_mmu_pages, 3);
407         node->cmdlist.dst_mmu_pages = NULL;
408 }
409
410 static void rga_cmd_start(struct rockchip_rga *rga,
411                           struct rga_runqueue_node *runqueue)
412 {
413         int ret;
414
415         ret = pm_runtime_get_sync(rga->dev);
416         if (ret < 0)
417                 return;
418
419         rga_write(rga, RGA_SYS_CTRL, 0x00);
420
421         rga_write(rga, RGA_CMD_BASE, runqueue->cmdlist_pool);
422
423         rga_write(rga, RGA_SYS_CTRL, 0x22);
424
425         rga_write(rga, RGA_INT, 0x600);
426
427         rga_write(rga, RGA_CMD_CTRL, ((runqueue->cmdlist_cnt - 1) << 3) | 0x1);
428 }
429
430 static void rga_free_runqueue_node(struct rockchip_rga *rga,
431                                    struct rga_runqueue_node *runqueue)
432 {
433         struct rga_cmdlist_node *node;
434
435         if (!runqueue)
436                 return;
437
438         if (runqueue->cmdlist_pool_virt && runqueue->cmdlist_pool)
439                 dma_free_attrs(rga->dev, runqueue->cmdlist_cnt * RGA_CMDLIST_SIZE,
440                                runqueue->cmdlist_pool_virt,
441                                runqueue->cmdlist_pool,
442                                &runqueue->cmdlist_dma_attrs);
443
444         mutex_lock(&rga->cmdlist_mutex);
445         /*
446          * commands in run_cmdlist have been completed so unmap all gem
447          * objects in each command node so that they are unreferenced.
448          */
449         list_for_each_entry(node, &runqueue->run_cmdlist, list)
450                 rga_unmap_cmdlist_gem(rga, node);
451         list_splice_tail_init(&runqueue->run_cmdlist, &rga->free_cmdlist);
452         mutex_unlock(&rga->cmdlist_mutex);
453
454         kmem_cache_free(rga->runqueue_slab, runqueue);
455 }
456
457 static struct rga_runqueue_node *rga_get_runqueue(struct rockchip_rga *rga)
458 {
459         struct rga_runqueue_node *runqueue;
460
461         if (list_empty(&rga->runqueue_list))
462                 return NULL;
463
464         runqueue = list_first_entry(&rga->runqueue_list,
465                                     struct rga_runqueue_node, list);
466         list_del_init(&runqueue->list);
467
468         return runqueue;
469 }
470
471 static void rga_exec_runqueue(struct rockchip_rga *rga)
472 {
473         rga->runqueue_node = rga_get_runqueue(rga);
474         if (rga->runqueue_node)
475                 rga_cmd_start(rga, rga->runqueue_node);
476 }
477
478 static struct rga_cmdlist_node *rga_get_cmdlist(struct rockchip_rga *rga)
479 {
480         struct rga_cmdlist_node *node;
481         struct device *dev = rga->dev;
482
483         mutex_lock(&rga->cmdlist_mutex);
484         if (list_empty(&rga->free_cmdlist)) {
485                 dev_err(dev, "there is no free cmdlist\n");
486                 mutex_unlock(&rga->cmdlist_mutex);
487                 return NULL;
488         }
489
490         node = list_first_entry(&rga->free_cmdlist,
491                                 struct rga_cmdlist_node, list);
492         list_del_init(&node->list);
493         mutex_unlock(&rga->cmdlist_mutex);
494
495         return node;
496 }
497
498 static void rga_put_cmdlist(struct rockchip_rga *rga, struct rga_cmdlist_node *node)
499 {
500         mutex_lock(&rga->cmdlist_mutex);
501         list_move_tail(&node->list, &rga->free_cmdlist);
502         mutex_unlock(&rga->cmdlist_mutex);
503 }
504
505 static void rga_add_cmdlist_to_inuse(struct rockchip_drm_rga_private *rga_priv,
506                                      struct rga_cmdlist_node *node)
507 {
508         struct rga_cmdlist_node *lnode;
509
510         if (list_empty(&rga_priv->inuse_cmdlist))
511                 goto add_to_list;
512
513         /* this links to base address of new cmdlist */
514         lnode = list_entry(rga_priv->inuse_cmdlist.prev,
515                            struct rga_cmdlist_node, list);
516
517 add_to_list:
518         list_add_tail(&node->list, &rga_priv->inuse_cmdlist);
519 }
520
521 /*
522  * IOCRL functions for userspace to get RGA version.
523  */
524 int rockchip_rga_get_ver_ioctl(struct drm_device *drm_dev, void *data,
525                                struct drm_file *file)
526 {
527         struct rockchip_drm_file_private *file_priv = file->driver_priv;
528         struct rockchip_drm_rga_private *rga_priv = file_priv->rga_priv;
529         struct drm_rockchip_rga_get_ver *ver = data;
530         struct rockchip_rga *rga;
531         struct device *dev;
532
533         if (!rga_priv)
534                 return -ENODEV;
535
536         dev = rga_priv->dev;
537         if (!dev)
538                 return -ENODEV;
539
540         rga = dev_get_drvdata(dev);
541         if (!rga)
542                 return -EFAULT;
543
544         ver->major = rga->version.major;
545         ver->minor = rga->version.minor;
546
547         return 0;
548 }
549
550 /*
551  * IOCRL functions for userspace to send an RGA request.
552  */
553 int rockchip_rga_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
554                                    struct drm_file *file)
555 {
556         struct rockchip_drm_file_private *file_priv = file->driver_priv;
557         struct rockchip_drm_rga_private *rga_priv = file_priv->rga_priv;
558         struct drm_rockchip_rga_set_cmdlist *req = data;
559         struct rga_cmdlist_node *node;
560         struct rga_cmdlist *cmdlist;
561         struct rockchip_rga *rga;
562         int ret;
563
564         if (!rga_priv)
565                 return -ENODEV;
566
567         if (!rga_priv->dev)
568                 return -ENODEV;
569
570         rga = dev_get_drvdata(rga_priv->dev);
571         if (!rga)
572                 return -EFAULT;
573
574         if (req->cmd_nr > RGA_CMDLIST_SIZE || req->cmd_buf_nr > RGA_CMDBUF_SIZE) {
575                 dev_err(rga->dev, "cmdlist size is too big\n");
576                 return -EINVAL;
577         }
578
579         node = rga_get_cmdlist(rga);
580         if (!node)
581                 return -ENOMEM;
582
583         cmdlist = &node->cmdlist;
584         cmdlist->last = 0;
585
586         /*
587          * Copy the command / buffer registers setting from userspace, each
588          * command have two integer, one for register offset, another for
589          * register value.
590          */
591         if (copy_from_user(cmdlist->data, rga_compat_ptr(req->cmd),
592                            sizeof(struct drm_rockchip_rga_cmd) * req->cmd_nr))
593                 return -EFAULT;
594         cmdlist->last += req->cmd_nr * 2;
595
596         if (copy_from_user(&cmdlist->data[cmdlist->last],
597                            rga_compat_ptr(req->cmd_buf),
598                            sizeof(struct drm_rockchip_rga_cmd) * req->cmd_buf_nr))
599                 return -EFAULT;
600         cmdlist->last += req->cmd_buf_nr * 2;
601
602         /*
603          * Check the userspace command registers, and mapping the framebuffer,
604          * create the RGA mmu pages or get the framebuffer dma address.
605          */
606         ret = rga_check_reg_offset(rga->dev, node);
607         if (ret < 0) {
608                 dev_err(rga->dev, "Check reg offset failed\n");
609                 goto err_free_cmdlist;
610         }
611
612         ret = rga_map_cmdlist_gem(rga, node, drm_dev, file);
613         if (ret < 0) {
614                 dev_err(rga->dev, "Failed to map cmdlist\n");
615                 goto err_unmap_cmdlist;
616         }
617
618         rga_add_cmdlist_to_inuse(rga_priv, node);
619
620         return 0;
621
622 err_unmap_cmdlist:
623         rga_unmap_cmdlist_gem(rga, node);
624 err_free_cmdlist:
625         rga_put_cmdlist(rga, node);
626
627         return ret;
628 }
629
630 /*
631  * IOCRL functions for userspace to start RGA transform.
632  */
633 int rockchip_rga_exec_ioctl(struct drm_device *drm_dev, void *data,
634                             struct drm_file *file)
635 {
636         struct rockchip_drm_file_private *file_priv = file->driver_priv;
637         struct rockchip_drm_rga_private *rga_priv = file_priv->rga_priv;
638         struct rga_runqueue_node *runqueue;
639         struct rockchip_rga *rga;
640         struct device *dev;
641         int ret;
642
643         if (!rga_priv)
644                 return -ENODEV;
645
646         dev = rga_priv->dev;
647         if (!dev)
648                 return -ENODEV;
649
650         rga = dev_get_drvdata(dev);
651         if (!rga)
652                 return -EFAULT;
653
654         runqueue = kmem_cache_alloc(rga->runqueue_slab, GFP_KERNEL);
655         if (!runqueue) {
656                 dev_err(rga->dev, "failed to allocate memory\n");
657                 return -ENOMEM;
658         }
659
660         runqueue->dev = rga->dev;
661
662         init_completion(&runqueue->complete);
663
664         INIT_LIST_HEAD(&runqueue->run_cmdlist);
665
666         list_splice_init(&rga_priv->inuse_cmdlist, &runqueue->run_cmdlist);
667
668         if (list_empty(&runqueue->run_cmdlist)) {
669                 dev_err(rga->dev, "there is no inuse cmdlist\n");
670                 kmem_cache_free(rga->runqueue_slab, runqueue);
671                 return -EPERM;
672         }
673
674         ret = rga_alloc_dma_buf_for_cmdlist(runqueue);
675         if (ret < 0) {
676                 dev_err(rga->dev, "cmdlist init failed\n");
677                 return ret;
678         }
679
680         mutex_lock(&rga->runqueue_mutex);
681         runqueue->pid = current->pid;
682         runqueue->file = file;
683         list_add_tail(&runqueue->list, &rga->runqueue_list);
684         if (!rga->runqueue_node)
685                 rga_exec_runqueue(rga);
686         mutex_unlock(&rga->runqueue_mutex);
687
688         wait_for_completion(&runqueue->complete);
689         rga_free_runqueue_node(rga, runqueue);
690
691         return 0;
692 }
693
694 static int rockchip_rga_open(struct drm_device *drm_dev, struct device *dev,
695                              struct drm_file *file)
696 {
697         struct rockchip_drm_file_private *file_priv = file->driver_priv;
698         struct rockchip_drm_rga_private *rga_priv;
699
700         rga_priv = kzalloc(sizeof(*rga_priv), GFP_KERNEL);
701         if (!rga_priv)
702                 return -ENOMEM;
703
704         rga_priv->dev = dev;
705         file_priv->rga_priv = rga_priv;
706
707         INIT_LIST_HEAD(&rga_priv->inuse_cmdlist);
708
709         return 0;
710 }
711
712 static void rockchip_rga_close(struct drm_device *drm_dev, struct device *dev,
713                                struct drm_file *file)
714 {
715         struct rockchip_drm_file_private *file_priv = file->driver_priv;
716         struct rockchip_drm_rga_private *rga_priv = file_priv->rga_priv;
717         struct rga_cmdlist_node *node, *n;
718         struct rockchip_rga *rga;
719
720         if (!dev)
721                 return;
722
723         rga = dev_get_drvdata(dev);
724         if (!rga)
725                 return;
726
727         mutex_lock(&rga->cmdlist_mutex);
728         list_for_each_entry_safe(node, n, &rga_priv->inuse_cmdlist, list) {
729                 /*
730                  * unmap all gem objects not completed.
731                  *
732                  * P.S. if current process was terminated forcely then
733                  * there may be some commands in inuse_cmdlist so unmap
734                  * them.
735                  */
736                 rga_unmap_cmdlist_gem(rga, node);
737                 list_move_tail(&node->list, &rga->free_cmdlist);
738         }
739         mutex_unlock(&rga->cmdlist_mutex);
740
741         kfree(file_priv->rga_priv);
742 }
743
744 static void rga_runqueue_worker(struct work_struct *work)
745 {
746         struct rockchip_rga *rga = container_of(work, struct rockchip_rga,
747                                             runqueue_work);
748
749         mutex_lock(&rga->runqueue_mutex);
750         pm_runtime_put_sync(rga->dev);
751
752         complete(&rga->runqueue_node->complete);
753
754         if (rga->suspended)
755                 rga->runqueue_node = NULL;
756         else
757                 rga_exec_runqueue(rga);
758
759         mutex_unlock(&rga->runqueue_mutex);
760 }
761
762 static irqreturn_t rga_irq_handler(int irq, void *dev_id)
763 {
764         struct rockchip_rga *rga = dev_id;
765         int intr;
766
767         intr = rga_read(rga, RGA_INT) & 0xf;
768
769         rga_mod(rga, RGA_INT, intr << 4, 0xf << 4);
770
771         if (intr & 0x04)
772                 queue_work(rga->rga_workq, &rga->runqueue_work);
773
774         return IRQ_HANDLED;
775 }
776
777 static int rga_parse_dt(struct rockchip_rga *rga)
778 {
779         struct reset_control *core_rst, *axi_rst, *ahb_rst;
780
781         core_rst = devm_reset_control_get(rga->dev, "core");
782         if (IS_ERR(core_rst)) {
783                 dev_err(rga->dev, "failed to get core reset controller\n");
784                 return PTR_ERR(core_rst);
785         }
786
787         axi_rst = devm_reset_control_get(rga->dev, "axi");
788         if (IS_ERR(axi_rst)) {
789                 dev_err(rga->dev, "failed to get axi reset controller\n");
790                 return PTR_ERR(axi_rst);
791         }
792
793         ahb_rst = devm_reset_control_get(rga->dev, "ahb");
794         if (IS_ERR(ahb_rst)) {
795                 dev_err(rga->dev, "failed to get ahb reset controller\n");
796                 return PTR_ERR(ahb_rst);
797         }
798
799         reset_control_assert(core_rst);
800         udelay(1);
801         reset_control_deassert(core_rst);
802
803         reset_control_assert(axi_rst);
804         udelay(1);
805         reset_control_deassert(axi_rst);
806
807         reset_control_assert(ahb_rst);
808         udelay(1);
809         reset_control_deassert(ahb_rst);
810
811         rga->sclk = devm_clk_get(rga->dev, "sclk");
812         if (IS_ERR(rga->sclk)) {
813                 dev_err(rga->dev, "failed to get sclk clock\n");
814                 return PTR_ERR(rga->sclk);
815         }
816
817         rga->aclk = devm_clk_get(rga->dev, "aclk");
818         if (IS_ERR(rga->aclk)) {
819                 dev_err(rga->dev, "failed to get aclk clock\n");
820                 return PTR_ERR(rga->aclk);
821         }
822
823         rga->hclk = devm_clk_get(rga->dev, "hclk");
824         if (IS_ERR(rga->hclk)) {
825                 dev_err(rga->dev, "failed to get hclk clock\n");
826                 return PTR_ERR(rga->hclk);
827         }
828
829         return rga_enable_clocks(rga);
830 }
831
832 static const struct of_device_id rockchip_rga_dt_ids[] = {
833         { .compatible = "rockchip,rk3288-rga", },
834         { .compatible = "rockchip,rk3228-rga", },
835         { .compatible = "rockchip,rk3399-rga", },
836         {},
837 };
838 MODULE_DEVICE_TABLE(of, rockchip_rga_dt_ids);
839
840 static int rga_probe(struct platform_device *pdev)
841 {
842         struct drm_rockchip_subdrv *subdrv;
843         struct rockchip_rga *rga;
844         struct resource *iores;
845         int irq;
846         int ret;
847
848         if (!pdev->dev.of_node)
849                 return -ENODEV;
850
851         rga = devm_kzalloc(&pdev->dev, sizeof(*rga), GFP_KERNEL);
852         if (!rga)
853                 return -ENOMEM;
854
855         rga->dev = &pdev->dev;
856
857         rga->runqueue_slab = kmem_cache_create("rga_runqueue_slab",
858                                                sizeof(struct rga_runqueue_node),
859                                                0, 0, NULL);
860         if (!rga->runqueue_slab)
861                 return -ENOMEM;
862
863         rga->rga_workq = create_singlethread_workqueue("rga");
864         if (!rga->rga_workq) {
865                 dev_err(rga->dev, "failed to create workqueue\n");
866                 ret = -ENOMEM;
867                 goto err_destroy_slab;
868         }
869
870         INIT_WORK(&rga->runqueue_work, rga_runqueue_worker);
871         INIT_LIST_HEAD(&rga->runqueue_list);
872         mutex_init(&rga->runqueue_mutex);
873
874         INIT_LIST_HEAD(&rga->free_cmdlist);
875         mutex_init(&rga->cmdlist_mutex);
876
877         rga_init_cmdlist(rga);
878
879         ret = rga_parse_dt(rga);
880         if (ret) {
881                 dev_err(rga->dev, "Unable to parse OF data\n");
882                 goto err_destroy_workqueue;
883         }
884
885         pm_runtime_enable(rga->dev);
886
887         iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
888
889         rga->regs = devm_ioremap_resource(rga->dev, iores);
890         if (IS_ERR(rga->regs)) {
891                 ret = PTR_ERR(rga->regs);
892                 goto err_put_clk;
893         }
894
895         irq = platform_get_irq(pdev, 0);
896         if (irq < 0) {
897                 dev_err(rga->dev, "failed to get irq\n");
898                 ret = irq;
899                 goto err_put_clk;
900         }
901
902         ret = devm_request_irq(rga->dev, irq, rga_irq_handler, 0,
903                                dev_name(rga->dev), rga);
904         if (ret < 0) {
905                 dev_err(rga->dev, "failed to request irq\n");
906                 goto err_put_clk;
907         }
908
909         platform_set_drvdata(pdev, rga);
910
911         rga->version.major = (rga_read(rga, RGA_VERSION_INFO) >> 24) & 0xFF;
912         rga->version.minor = (rga_read(rga, RGA_VERSION_INFO) >> 20) & 0x0F;
913
914         subdrv = &rga->subdrv;
915         subdrv->dev = rga->dev;
916         subdrv->open = rockchip_rga_open;
917         subdrv->close = rockchip_rga_close;
918
919         rockchip_drm_register_subdrv(subdrv);
920
921         return 0;
922
923 err_put_clk:
924         pm_runtime_disable(rga->dev);
925 err_destroy_workqueue:
926         destroy_workqueue(rga->rga_workq);
927 err_destroy_slab:
928         kmem_cache_destroy(rga->runqueue_slab);
929
930         return ret;
931 }
932
933 static int rga_remove(struct platform_device *pdev)
934 {
935         struct rockchip_rga *rga = platform_get_drvdata(pdev);
936
937         cancel_work_sync(&rga->runqueue_work);
938
939         while (rga->runqueue_node) {
940                 rga_free_runqueue_node(rga, rga->runqueue_node);
941                 rga->runqueue_node = rga_get_runqueue(rga);
942         }
943
944         rockchip_drm_unregister_subdrv(&rga->subdrv);
945
946         pm_runtime_disable(rga->dev);
947
948         return 0;
949 }
950
951 static int rga_suspend(struct device *dev)
952 {
953         struct rockchip_rga *rga = dev_get_drvdata(dev);
954
955         mutex_lock(&rga->runqueue_mutex);
956         rga->suspended = true;
957         mutex_unlock(&rga->runqueue_mutex);
958
959         flush_work(&rga->runqueue_work);
960
961         return 0;
962 }
963
964 static int rga_resume(struct device *dev)
965 {
966         struct rockchip_rga *rga = dev_get_drvdata(dev);
967
968         rga->suspended = false;
969         rga_exec_runqueue(rga);
970
971         return 0;
972 }
973
974 #ifdef CONFIG_PM
975 static int rga_runtime_suspend(struct device *dev)
976 {
977         struct rockchip_rga *rga = dev_get_drvdata(dev);
978
979         rga_disable_clocks(rga);
980
981         return 0;
982 }
983
984 static int rga_runtime_resume(struct device *dev)
985 {
986         struct rockchip_rga *rga = dev_get_drvdata(dev);
987
988         return rga_enable_clocks(rga);
989 }
990 #endif
991
992 static const struct dev_pm_ops rga_pm = {
993         SET_SYSTEM_SLEEP_PM_OPS(rga_suspend, rga_resume)
994         SET_RUNTIME_PM_OPS(rga_runtime_suspend,
995                            rga_runtime_resume, NULL)
996 };
997
998 static struct platform_driver rga_pltfm_driver = {
999         .probe  = rga_probe,
1000         .remove = rga_remove,
1001         .driver = {
1002                 .name = "rockchip-rga",
1003                 .pm = &rga_pm,
1004                 .of_match_table = rockchip_rga_dt_ids,
1005         },
1006 };
1007
1008 module_platform_driver(rga_pltfm_driver);
1009
1010 MODULE_AUTHOR("Yakir Yang <ykk@rock-chips.com>");
1011 MODULE_DESCRIPTION("Rockchip RGA Driver Extension");
1012 MODULE_LICENSE("GPL");
1013 MODULE_ALIAS("platform:rockchip-rga");