drm/rockchip: rga: replace primitive api with dma sync api
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / rockchip / rockchip_drm_rga.c
1 #include <linux/clk.h>
2 #include <linux/debugfs.h>
3 #include <linux/delay.h>
4 #include <linux/dma-buf.h>
5 #include <linux/dma-mapping.h>
6 #include <linux/interrupt.h>
7 #include <linux/of.h>
8 #include <linux/of_address.h>
9 #include <linux/of_device.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/reset.h>
12 #include <linux/seq_file.h>
13 #include <linux/slab.h>
14 #include <linux/uaccess.h>
15
16 #include <asm/cacheflush.h>
17 #include <drm/drmP.h>
18 #include <drm/rockchip_drm.h>
19
20 #include "rockchip_drm_drv.h"
21 #include "rockchip_drm_rga.h"
22
23 #define RGA_MODE_BASE_REG               0x0100
24 #define RGA_MODE_MAX_REG                0x017C
25
26 #define RGA_SYS_CTRL                    0x0000
27 #define RGA_CMD_CTRL                    0x0004
28 #define RGA_CMD_BASE                    0x0008
29 #define RGA_INT                         0x0010
30 #define RGA_MMU_CTRL0                   0x0014
31 #define RGA_VERSION_INFO                0x0028
32
33 #define RGA_SRC_Y_RGB_BASE_ADDR         0x0108
34 #define RGA_SRC_CB_BASE_ADDR            0x010C
35 #define RGA_SRC_CR_BASE_ADDR            0x0110
36 #define RGA_SRC1_RGB_BASE_ADDR          0x0114
37 #define RGA_DST_Y_RGB_BASE_ADDR         0x013C
38 #define RGA_DST_CB_BASE_ADDR            0x0140
39 #define RGA_DST_CR_BASE_ADDR            0x014C
40 #define RGA_MMU_CTRL1                   0x016C
41 #define RGA_MMU_SRC_BASE                0x0170
42 #define RGA_MMU_SRC1_BASE               0x0174
43 #define RGA_MMU_DST_BASE                0x0178
44
45 static void __user *rga_compat_ptr(u64 value)
46 {
47 #ifdef CONFIG_ARM64
48         return (void __user *)(value);
49 #else
50         return (void __user *)((u32)(value));
51 #endif
52 }
53
54 static inline void rga_write(struct rockchip_rga *rga, u32 reg, u32 value)
55 {
56         writel(value, rga->regs + reg);
57 }
58
59 static inline u32 rga_read(struct rockchip_rga *rga, u32 reg)
60 {
61         return readl(rga->regs + reg);
62 }
63
64 static inline void rga_mod(struct rockchip_rga *rga, u32 reg, u32 val, u32 mask)
65 {
66         u32 temp = rga_read(rga, reg) & ~(mask);
67
68         temp |= val & mask;
69         rga_write(rga, reg, temp);
70 }
71
72 static int rga_enable_clocks(struct rockchip_rga *rga)
73 {
74         int ret;
75
76         ret = clk_prepare_enable(rga->sclk);
77         if (ret) {
78                 dev_err(rga->dev, "Cannot enable rga sclk: %d\n", ret);
79                 return ret;
80         }
81
82         ret = clk_prepare_enable(rga->aclk);
83         if (ret) {
84                 dev_err(rga->dev, "Cannot enable rga aclk: %d\n", ret);
85                 goto err_disable_sclk;
86         }
87
88         ret = clk_prepare_enable(rga->hclk);
89         if (ret) {
90                 dev_err(rga->dev, "Cannot enable rga hclk: %d\n", ret);
91                 goto err_disable_aclk;
92         }
93
94         return 0;
95
96 err_disable_sclk:
97         clk_disable_unprepare(rga->sclk);
98 err_disable_aclk:
99         clk_disable_unprepare(rga->aclk);
100
101         return ret;
102 }
103
104 static void rga_disable_clocks(struct rockchip_rga *rga)
105 {
106         clk_disable_unprepare(rga->sclk);
107         clk_disable_unprepare(rga->hclk);
108         clk_disable_unprepare(rga->aclk);
109 }
110
111 static void rga_init_cmdlist(struct rockchip_rga *rga)
112 {
113         struct rga_cmdlist_node *node;
114         int nr;
115
116         node = rga->cmdlist_node;
117
118         for (nr = 0; nr < ARRAY_SIZE(rga->cmdlist_node); nr++)
119                 list_add_tail(&node[nr].list, &rga->free_cmdlist);
120 }
121
122 static int rga_alloc_dma_buf_for_cmdlist(struct rga_runqueue_node *runqueue)
123 {
124         struct list_head *run_cmdlist = &runqueue->run_cmdlist;
125         struct device *dev = runqueue->dev;
126         struct dma_attrs cmdlist_dma_attrs;
127         struct rga_cmdlist_node *node;
128         void *cmdlist_pool_virt;
129         dma_addr_t cmdlist_pool;
130         int cmdlist_cnt = 0;
131         int count = 0;
132
133         list_for_each_entry(node, run_cmdlist, list)
134                 cmdlist_cnt++;
135
136         init_dma_attrs(&cmdlist_dma_attrs);
137         dma_set_attr(DMA_ATTR_WRITE_COMBINE, &runqueue->cmdlist_dma_attrs);
138
139         cmdlist_pool_virt = dma_alloc_attrs(dev, cmdlist_cnt * RGA_CMDLIST_SIZE,
140                                             &cmdlist_pool, GFP_KERNEL,
141                                             &cmdlist_dma_attrs);
142         if (!cmdlist_pool_virt) {
143                 dev_err(dev, "failed to allocate cmdlist dma memory\n");
144                 return -ENOMEM;
145         }
146
147         /*
148          * Fill in the RGA operation registers from cmdlist command buffer,
149          * and also filled in the MMU TLB base information.
150          */
151         list_for_each_entry(node, run_cmdlist, list) {
152                 struct rga_cmdlist *cmdlist = &node->cmdlist;
153                 unsigned int mmu_ctrl = 0;
154                 unsigned int reg;
155                 u32 *dest;
156                 int i;
157
158                 dest = cmdlist_pool_virt + RGA_CMDLIST_SIZE * 4 * count++;
159
160                 for (i = 0; i < cmdlist->last / 2; i++) {
161                         reg = (node->cmdlist.data[2 * i] - RGA_MODE_BASE_REG);
162                         if (reg > RGA_MODE_BASE_REG)
163                                 continue;
164                         dest[reg >> 2] = cmdlist->data[2 * i + 1];
165                 }
166
167                 if (cmdlist->src_mmu_pages) {
168                         reg = RGA_MMU_SRC_BASE - RGA_MODE_BASE_REG;
169                         dest[reg >> 2] = virt_to_phys(cmdlist->src_mmu_pages) >> 4;
170                         mmu_ctrl |= 0x7;
171                 }
172
173                 if (cmdlist->dst_mmu_pages) {
174                         reg = RGA_MMU_DST_BASE - RGA_MODE_BASE_REG;
175                         dest[reg >> 2] = virt_to_phys(cmdlist->dst_mmu_pages) >> 4;
176                         mmu_ctrl |= 0x7 << 8;
177                 }
178
179                 if (cmdlist->src1_mmu_pages) {
180                         reg = RGA_MMU_SRC1_BASE - RGA_MODE_BASE_REG;
181                         dest[reg >> 2] = virt_to_phys(cmdlist->src1_mmu_pages) >> 4;
182                         mmu_ctrl |= 0x7 << 4;
183                 }
184
185                 reg = RGA_MMU_CTRL1 - RGA_MODE_BASE_REG;
186                 dest[reg >> 2] = mmu_ctrl;
187         }
188
189         dma_sync_single_for_device(runqueue->drm_dev->dev,
190                                    virt_to_phys(cmdlist_pool_virt),
191                                    PAGE_SIZE, DMA_TO_DEVICE);
192
193         runqueue->cmdlist_dma_attrs = cmdlist_dma_attrs;
194         runqueue->cmdlist_pool_virt = cmdlist_pool_virt;
195         runqueue->cmdlist_pool = cmdlist_pool;
196         runqueue->cmdlist_cnt = cmdlist_cnt;
197
198         return 0;
199 }
200
201 static int rga_check_reg_offset(struct device *dev,
202                                 struct rga_cmdlist_node *node)
203 {
204         struct rga_cmdlist *cmdlist = &node->cmdlist;
205         int index;
206         int reg;
207         int i;
208
209         for (i = 0; i < cmdlist->last / 2; i++) {
210                 index = cmdlist->last - 2 * (i + 1);
211                 reg = cmdlist->data[index];
212
213                 switch (reg) {
214                 case RGA_BUF_TYPE_GEMFD | RGA_DST_Y_RGB_BASE_ADDR:
215                 case RGA_BUF_TYPE_GEMFD | RGA_SRC_Y_RGB_BASE_ADDR:
216                 case RGA_BUF_TYPE_GEMFD | RGA_SRC1_RGB_BASE_ADDR:
217                         break;
218
219                 case RGA_BUF_TYPE_USERPTR | RGA_DST_Y_RGB_BASE_ADDR:
220                 case RGA_BUF_TYPE_USERPTR | RGA_SRC_Y_RGB_BASE_ADDR:
221                 case RGA_BUF_TYPE_USERPTR | RGA_SRC1_RGB_BASE_ADDR:
222                         goto err;
223
224                 default:
225                         if (reg < RGA_MODE_BASE_REG || reg > RGA_MODE_MAX_REG)
226                                 goto err;
227
228                         if (reg % 4)
229                                 goto err;
230                 }
231         }
232
233         return 0;
234
235 err:
236         dev_err(dev, "Bad register offset: 0x%x\n", cmdlist->data[index]);
237         return -EINVAL;
238 }
239
240 static struct dma_buf_attachment *
241 rga_gem_buf_to_pages(struct rockchip_rga *rga, void **mmu_pages, int fd)
242 {
243         struct dma_buf_attachment *attach;
244         struct dma_buf *dmabuf;
245         struct sg_table *sgt;
246         struct scatterlist *sgl;
247         unsigned int mapped_size = 0;
248         unsigned int address;
249         unsigned int len;
250         unsigned int i, p;
251         unsigned int *pages;
252         int ret;
253
254         dmabuf = dma_buf_get(fd);
255         if (IS_ERR(dmabuf)) {
256                 dev_err(rga->dev, "Failed to get dma_buf with fd %d\n", fd);
257                 return ERR_PTR(-EINVAL);
258         }
259
260         attach = dma_buf_attach(dmabuf, rga->dev);
261         if (IS_ERR(attach)) {
262                 dev_err(rga->dev, "Failed to attach dma_buf\n");
263                 ret = PTR_ERR(attach);
264                 goto failed_attach;
265         }
266
267         sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
268         if (IS_ERR(sgt)) {
269                 dev_err(rga->dev, "Failed to map dma_buf attachment\n");
270                 ret = PTR_ERR(sgt);
271                 goto failed_detach;
272         }
273
274         /*
275          * Alloc (2^3 * 4K) = 32K byte for storing pages, those space could
276          * cover 32K * 4K = 128M ram address.
277          */
278         pages = (unsigned int *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 3);
279
280         for_each_sg(sgt->sgl, sgl, sgt->nents, i) {
281                 len = sg_dma_len(sgl) >> PAGE_SHIFT;
282                 address = sg_phys(sgl);
283
284                 for (p = 0; p < len; p++) {
285                         dma_addr_t phys = address + (p << PAGE_SHIFT);
286
287                         pages[mapped_size + p] = phys;
288                 }
289
290                 mapped_size += len;
291         }
292
293         dma_sync_single_for_device(rga->drm_dev->dev, virt_to_phys(pages),
294                                    8 * PAGE_SIZE, DMA_TO_DEVICE);
295
296         *mmu_pages = pages;
297
298         dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
299
300         return attach;
301
302 failed_detach:
303         dma_buf_detach(dmabuf, attach);
304 failed_attach:
305         dma_buf_put(dmabuf);
306
307         return ERR_PTR(ret);
308 }
309
310 static int rga_map_cmdlist_gem(struct rockchip_rga *rga,
311                                struct rga_cmdlist_node *node,
312                                struct drm_device *drm_dev,
313                                struct drm_file *file)
314 {
315         struct rga_cmdlist *cmdlist = &node->cmdlist;
316         struct dma_buf_attachment *attach;
317         void *mmu_pages;
318         int fd;
319         int i;
320
321         for (i = 0; i < cmdlist->last / 2; i++) {
322                 int index = cmdlist->last - 2 * (i + 1);
323
324                 switch (cmdlist->data[index]) {
325                 case RGA_SRC1_RGB_BASE_ADDR | RGA_BUF_TYPE_GEMFD:
326                         fd = cmdlist->data[index + 1];
327                         attach = rga_gem_buf_to_pages(rga, &mmu_pages, fd);
328                         if (IS_ERR(attach))
329                                 return PTR_ERR(attach);
330
331                         cmdlist->src1_attach = attach;
332                         cmdlist->src1_mmu_pages = mmu_pages;
333                         break;
334
335                 case RGA_SRC_Y_RGB_BASE_ADDR | RGA_BUF_TYPE_GEMFD:
336                         fd = cmdlist->data[index + 1];
337                         attach = rga_gem_buf_to_pages(rga, &mmu_pages, fd);
338                         if (IS_ERR(attach))
339                                 return PTR_ERR(attach);
340
341                         cmdlist->src_attach = attach;
342                         cmdlist->src_mmu_pages = mmu_pages;
343                         break;
344
345                 case RGA_DST_Y_RGB_BASE_ADDR | RGA_BUF_TYPE_GEMFD:
346                         fd = cmdlist->data[index + 1];
347                         attach = rga_gem_buf_to_pages(rga, &mmu_pages, fd);
348                         if (IS_ERR(attach))
349                                 return PTR_ERR(attach);
350
351                         cmdlist->dst_attach = attach;
352                         cmdlist->dst_mmu_pages = mmu_pages;
353                         break;
354                 }
355         }
356
357         return 0;
358 }
359
360 static void rga_unmap_cmdlist_gem(struct rockchip_rga *rga,
361                                   struct rga_cmdlist_node *node)
362 {
363         struct dma_buf_attachment *attach;
364         struct dma_buf *dma_buf;
365
366         attach = node->cmdlist.src_attach;
367         if (attach) {
368                 dma_buf = attach->dmabuf;
369                 dma_buf_detach(dma_buf, attach);
370                 dma_buf_put(dma_buf);
371         }
372         node->cmdlist.src_attach = NULL;
373
374         attach = node->cmdlist.src1_attach;
375         if (attach) {
376                 dma_buf = attach->dmabuf;
377                 dma_buf_detach(dma_buf, attach);
378                 dma_buf_put(dma_buf);
379         }
380         node->cmdlist.src1_attach = NULL;
381
382         attach = node->cmdlist.dst_attach;
383         if (attach) {
384                 dma_buf = attach->dmabuf;
385                 dma_buf_detach(dma_buf, attach);
386                 dma_buf_put(dma_buf);
387         }
388         node->cmdlist.dst_attach = NULL;
389
390         if (node->cmdlist.src_mmu_pages)
391                 free_pages((unsigned long)node->cmdlist.src_mmu_pages, 3);
392         node->cmdlist.src_mmu_pages = NULL;
393
394         if (node->cmdlist.src1_mmu_pages)
395                 free_pages((unsigned long)node->cmdlist.src1_mmu_pages, 3);
396         node->cmdlist.src1_mmu_pages = NULL;
397
398         if (node->cmdlist.dst_mmu_pages)
399                 free_pages((unsigned long)node->cmdlist.dst_mmu_pages, 3);
400         node->cmdlist.dst_mmu_pages = NULL;
401 }
402
403 static void rga_cmd_start(struct rockchip_rga *rga,
404                           struct rga_runqueue_node *runqueue)
405 {
406         int ret;
407
408         ret = pm_runtime_get_sync(rga->dev);
409         if (ret < 0)
410                 return;
411
412         rga_write(rga, RGA_SYS_CTRL, 0x00);
413
414         rga_write(rga, RGA_CMD_BASE, runqueue->cmdlist_pool);
415
416         rga_write(rga, RGA_SYS_CTRL, 0x22);
417
418         rga_write(rga, RGA_INT, 0x600);
419
420         rga_write(rga, RGA_CMD_CTRL, ((runqueue->cmdlist_cnt - 1) << 3) | 0x1);
421 }
422
423 static void rga_free_runqueue_node(struct rockchip_rga *rga,
424                                    struct rga_runqueue_node *runqueue)
425 {
426         struct rga_cmdlist_node *node;
427
428         if (!runqueue)
429                 return;
430
431         if (runqueue->cmdlist_pool_virt && runqueue->cmdlist_pool)
432                 dma_free_attrs(rga->dev, runqueue->cmdlist_cnt * RGA_CMDLIST_SIZE,
433                                runqueue->cmdlist_pool_virt,
434                                runqueue->cmdlist_pool,
435                                &runqueue->cmdlist_dma_attrs);
436
437         mutex_lock(&rga->cmdlist_mutex);
438         /*
439          * commands in run_cmdlist have been completed so unmap all gem
440          * objects in each command node so that they are unreferenced.
441          */
442         list_for_each_entry(node, &runqueue->run_cmdlist, list)
443                 rga_unmap_cmdlist_gem(rga, node);
444         list_splice_tail_init(&runqueue->run_cmdlist, &rga->free_cmdlist);
445         mutex_unlock(&rga->cmdlist_mutex);
446
447         kmem_cache_free(rga->runqueue_slab, runqueue);
448 }
449
450 static struct rga_runqueue_node *rga_get_runqueue(struct rockchip_rga *rga)
451 {
452         struct rga_runqueue_node *runqueue;
453
454         if (list_empty(&rga->runqueue_list))
455                 return NULL;
456
457         runqueue = list_first_entry(&rga->runqueue_list,
458                                     struct rga_runqueue_node, list);
459         list_del_init(&runqueue->list);
460
461         return runqueue;
462 }
463
464 static void rga_exec_runqueue(struct rockchip_rga *rga)
465 {
466         rga->runqueue_node = rga_get_runqueue(rga);
467         if (rga->runqueue_node)
468                 rga_cmd_start(rga, rga->runqueue_node);
469 }
470
471 static struct rga_cmdlist_node *rga_get_cmdlist(struct rockchip_rga *rga)
472 {
473         struct rga_cmdlist_node *node;
474         struct device *dev = rga->dev;
475
476         mutex_lock(&rga->cmdlist_mutex);
477         if (list_empty(&rga->free_cmdlist)) {
478                 dev_err(dev, "there is no free cmdlist\n");
479                 mutex_unlock(&rga->cmdlist_mutex);
480                 return NULL;
481         }
482
483         node = list_first_entry(&rga->free_cmdlist,
484                                 struct rga_cmdlist_node, list);
485         list_del_init(&node->list);
486         mutex_unlock(&rga->cmdlist_mutex);
487
488         return node;
489 }
490
491 static void rga_put_cmdlist(struct rockchip_rga *rga, struct rga_cmdlist_node *node)
492 {
493         mutex_lock(&rga->cmdlist_mutex);
494         list_move_tail(&node->list, &rga->free_cmdlist);
495         mutex_unlock(&rga->cmdlist_mutex);
496 }
497
498 static void rga_add_cmdlist_to_inuse(struct rockchip_drm_rga_private *rga_priv,
499                                      struct rga_cmdlist_node *node)
500 {
501         struct rga_cmdlist_node *lnode;
502
503         if (list_empty(&rga_priv->inuse_cmdlist))
504                 goto add_to_list;
505
506         /* this links to base address of new cmdlist */
507         lnode = list_entry(rga_priv->inuse_cmdlist.prev,
508                            struct rga_cmdlist_node, list);
509
510 add_to_list:
511         list_add_tail(&node->list, &rga_priv->inuse_cmdlist);
512 }
513
514 /*
515  * IOCRL functions for userspace to get RGA version.
516  */
517 int rockchip_rga_get_ver_ioctl(struct drm_device *drm_dev, void *data,
518                                struct drm_file *file)
519 {
520         struct rockchip_drm_file_private *file_priv = file->driver_priv;
521         struct rockchip_drm_rga_private *rga_priv = file_priv->rga_priv;
522         struct drm_rockchip_rga_get_ver *ver = data;
523         struct rockchip_rga *rga;
524         struct device *dev;
525
526         if (!rga_priv)
527                 return -ENODEV;
528
529         dev = rga_priv->dev;
530         if (!dev)
531                 return -ENODEV;
532
533         rga = dev_get_drvdata(dev);
534         if (!rga)
535                 return -EFAULT;
536
537         ver->major = rga->version.major;
538         ver->minor = rga->version.minor;
539
540         return 0;
541 }
542
543 /*
544  * IOCRL functions for userspace to send an RGA request.
545  */
546 int rockchip_rga_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
547                                    struct drm_file *file)
548 {
549         struct rockchip_drm_file_private *file_priv = file->driver_priv;
550         struct rockchip_drm_rga_private *rga_priv = file_priv->rga_priv;
551         struct drm_rockchip_rga_set_cmdlist *req = data;
552         struct rga_cmdlist_node *node;
553         struct rga_cmdlist *cmdlist;
554         struct rockchip_rga *rga;
555         int ret;
556
557         if (!rga_priv)
558                 return -ENODEV;
559
560         if (!rga_priv->dev)
561                 return -ENODEV;
562
563         rga = dev_get_drvdata(rga_priv->dev);
564         if (!rga)
565                 return -EFAULT;
566
567         if (req->cmd_nr > RGA_CMDLIST_SIZE || req->cmd_buf_nr > RGA_CMDBUF_SIZE) {
568                 dev_err(rga->dev, "cmdlist size is too big\n");
569                 return -EINVAL;
570         }
571
572         node = rga_get_cmdlist(rga);
573         if (!node)
574                 return -ENOMEM;
575
576         cmdlist = &node->cmdlist;
577         cmdlist->last = 0;
578
579         /*
580          * Copy the command / buffer registers setting from userspace, each
581          * command have two integer, one for register offset, another for
582          * register value.
583          */
584         if (copy_from_user(cmdlist->data, rga_compat_ptr(req->cmd),
585                            sizeof(struct drm_rockchip_rga_cmd) * req->cmd_nr))
586                 return -EFAULT;
587         cmdlist->last += req->cmd_nr * 2;
588
589         if (copy_from_user(&cmdlist->data[cmdlist->last],
590                            rga_compat_ptr(req->cmd_buf),
591                            sizeof(struct drm_rockchip_rga_cmd) * req->cmd_buf_nr))
592                 return -EFAULT;
593         cmdlist->last += req->cmd_buf_nr * 2;
594
595         /*
596          * Check the userspace command registers, and mapping the framebuffer,
597          * create the RGA mmu pages or get the framebuffer dma address.
598          */
599         ret = rga_check_reg_offset(rga->dev, node);
600         if (ret < 0) {
601                 dev_err(rga->dev, "Check reg offset failed\n");
602                 goto err_free_cmdlist;
603         }
604
605         ret = rga_map_cmdlist_gem(rga, node, drm_dev, file);
606         if (ret < 0) {
607                 dev_err(rga->dev, "Failed to map cmdlist\n");
608                 goto err_unmap_cmdlist;
609         }
610
611         rga_add_cmdlist_to_inuse(rga_priv, node);
612
613         return 0;
614
615 err_unmap_cmdlist:
616         rga_unmap_cmdlist_gem(rga, node);
617 err_free_cmdlist:
618         rga_put_cmdlist(rga, node);
619
620         return ret;
621 }
622
623 /*
624  * IOCRL functions for userspace to start RGA transform.
625  */
626 int rockchip_rga_exec_ioctl(struct drm_device *drm_dev, void *data,
627                             struct drm_file *file)
628 {
629         struct rockchip_drm_file_private *file_priv = file->driver_priv;
630         struct rockchip_drm_rga_private *rga_priv = file_priv->rga_priv;
631         struct rga_runqueue_node *runqueue;
632         struct rockchip_rga *rga;
633         struct device *dev;
634         int ret;
635
636         if (!rga_priv)
637                 return -ENODEV;
638
639         dev = rga_priv->dev;
640         if (!dev)
641                 return -ENODEV;
642
643         rga = dev_get_drvdata(dev);
644         if (!rga)
645                 return -EFAULT;
646
647         runqueue = kmem_cache_alloc(rga->runqueue_slab, GFP_KERNEL);
648         if (!runqueue) {
649                 dev_err(rga->dev, "failed to allocate memory\n");
650                 return -ENOMEM;
651         }
652
653         runqueue->drm_dev = drm_dev;
654         runqueue->dev = rga->dev;
655
656         init_completion(&runqueue->complete);
657
658         INIT_LIST_HEAD(&runqueue->run_cmdlist);
659
660         list_splice_init(&rga_priv->inuse_cmdlist, &runqueue->run_cmdlist);
661
662         if (list_empty(&runqueue->run_cmdlist)) {
663                 dev_err(rga->dev, "there is no inuse cmdlist\n");
664                 kmem_cache_free(rga->runqueue_slab, runqueue);
665                 return -EPERM;
666         }
667
668         ret = rga_alloc_dma_buf_for_cmdlist(runqueue);
669         if (ret < 0) {
670                 dev_err(rga->dev, "cmdlist init failed\n");
671                 return ret;
672         }
673
674         mutex_lock(&rga->runqueue_mutex);
675         runqueue->pid = current->pid;
676         runqueue->file = file;
677         list_add_tail(&runqueue->list, &rga->runqueue_list);
678         if (!rga->runqueue_node)
679                 rga_exec_runqueue(rga);
680         mutex_unlock(&rga->runqueue_mutex);
681
682         wait_for_completion(&runqueue->complete);
683         rga_free_runqueue_node(rga, runqueue);
684
685         return 0;
686 }
687
688 static int rockchip_rga_open(struct drm_device *drm_dev, struct device *dev,
689                              struct drm_file *file)
690 {
691         struct rockchip_drm_file_private *file_priv = file->driver_priv;
692         struct rockchip_drm_rga_private *rga_priv;
693         struct rockchip_rga *rga;
694
695         rga = dev_get_drvdata(dev);
696         rga->drm_dev = drm_dev;
697
698         rga_priv = kzalloc(sizeof(*rga_priv), GFP_KERNEL);
699         if (!rga_priv)
700                 return -ENOMEM;
701
702         rga_priv->dev = dev;
703         file_priv->rga_priv = rga_priv;
704
705         INIT_LIST_HEAD(&rga_priv->inuse_cmdlist);
706
707         return 0;
708 }
709
710 static void rockchip_rga_close(struct drm_device *drm_dev, struct device *dev,
711                                struct drm_file *file)
712 {
713         struct rockchip_drm_file_private *file_priv = file->driver_priv;
714         struct rockchip_drm_rga_private *rga_priv = file_priv->rga_priv;
715         struct rga_cmdlist_node *node, *n;
716         struct rockchip_rga *rga;
717
718         if (!dev)
719                 return;
720
721         rga = dev_get_drvdata(dev);
722         if (!rga)
723                 return;
724
725         mutex_lock(&rga->cmdlist_mutex);
726         list_for_each_entry_safe(node, n, &rga_priv->inuse_cmdlist, list) {
727                 /*
728                  * unmap all gem objects not completed.
729                  *
730                  * P.S. if current process was terminated forcely then
731                  * there may be some commands in inuse_cmdlist so unmap
732                  * them.
733                  */
734                 rga_unmap_cmdlist_gem(rga, node);
735                 list_move_tail(&node->list, &rga->free_cmdlist);
736         }
737         mutex_unlock(&rga->cmdlist_mutex);
738
739         kfree(file_priv->rga_priv);
740 }
741
742 static void rga_runqueue_worker(struct work_struct *work)
743 {
744         struct rockchip_rga *rga = container_of(work, struct rockchip_rga,
745                                             runqueue_work);
746
747         mutex_lock(&rga->runqueue_mutex);
748         pm_runtime_put_sync(rga->dev);
749
750         complete(&rga->runqueue_node->complete);
751
752         if (rga->suspended)
753                 rga->runqueue_node = NULL;
754         else
755                 rga_exec_runqueue(rga);
756
757         mutex_unlock(&rga->runqueue_mutex);
758 }
759
760 static irqreturn_t rga_irq_handler(int irq, void *dev_id)
761 {
762         struct rockchip_rga *rga = dev_id;
763         int intr;
764
765         intr = rga_read(rga, RGA_INT) & 0xf;
766
767         rga_mod(rga, RGA_INT, intr << 4, 0xf << 4);
768
769         if (intr & 0x04)
770                 queue_work(rga->rga_workq, &rga->runqueue_work);
771
772         return IRQ_HANDLED;
773 }
774
775 static int rga_parse_dt(struct rockchip_rga *rga)
776 {
777         struct reset_control *core_rst, *axi_rst, *ahb_rst;
778
779         core_rst = devm_reset_control_get(rga->dev, "core");
780         if (IS_ERR(core_rst)) {
781                 dev_err(rga->dev, "failed to get core reset controller\n");
782                 return PTR_ERR(core_rst);
783         }
784
785         axi_rst = devm_reset_control_get(rga->dev, "axi");
786         if (IS_ERR(axi_rst)) {
787                 dev_err(rga->dev, "failed to get axi reset controller\n");
788                 return PTR_ERR(axi_rst);
789         }
790
791         ahb_rst = devm_reset_control_get(rga->dev, "ahb");
792         if (IS_ERR(ahb_rst)) {
793                 dev_err(rga->dev, "failed to get ahb reset controller\n");
794                 return PTR_ERR(ahb_rst);
795         }
796
797         reset_control_assert(core_rst);
798         udelay(1);
799         reset_control_deassert(core_rst);
800
801         reset_control_assert(axi_rst);
802         udelay(1);
803         reset_control_deassert(axi_rst);
804
805         reset_control_assert(ahb_rst);
806         udelay(1);
807         reset_control_deassert(ahb_rst);
808
809         rga->sclk = devm_clk_get(rga->dev, "sclk");
810         if (IS_ERR(rga->sclk)) {
811                 dev_err(rga->dev, "failed to get sclk clock\n");
812                 return PTR_ERR(rga->sclk);
813         }
814
815         rga->aclk = devm_clk_get(rga->dev, "aclk");
816         if (IS_ERR(rga->aclk)) {
817                 dev_err(rga->dev, "failed to get aclk clock\n");
818                 return PTR_ERR(rga->aclk);
819         }
820
821         rga->hclk = devm_clk_get(rga->dev, "hclk");
822         if (IS_ERR(rga->hclk)) {
823                 dev_err(rga->dev, "failed to get hclk clock\n");
824                 return PTR_ERR(rga->hclk);
825         }
826
827         return rga_enable_clocks(rga);
828 }
829
830 static const struct of_device_id rockchip_rga_dt_ids[] = {
831         { .compatible = "rockchip,rk3288-rga", },
832         { .compatible = "rockchip,rk3228-rga", },
833         { .compatible = "rockchip,rk3399-rga", },
834         {},
835 };
836 MODULE_DEVICE_TABLE(of, rockchip_rga_dt_ids);
837
838 static int rga_probe(struct platform_device *pdev)
839 {
840         struct drm_rockchip_subdrv *subdrv;
841         struct rockchip_rga *rga;
842         struct resource *iores;
843         int irq;
844         int ret;
845
846         if (!pdev->dev.of_node)
847                 return -ENODEV;
848
849         rga = devm_kzalloc(&pdev->dev, sizeof(*rga), GFP_KERNEL);
850         if (!rga)
851                 return -ENOMEM;
852
853         rga->dev = &pdev->dev;
854
855         rga->runqueue_slab = kmem_cache_create("rga_runqueue_slab",
856                                                sizeof(struct rga_runqueue_node),
857                                                0, 0, NULL);
858         if (!rga->runqueue_slab)
859                 return -ENOMEM;
860
861         rga->rga_workq = create_singlethread_workqueue("rga");
862         if (!rga->rga_workq) {
863                 dev_err(rga->dev, "failed to create workqueue\n");
864                 ret = -ENOMEM;
865                 goto err_destroy_slab;
866         }
867
868         INIT_WORK(&rga->runqueue_work, rga_runqueue_worker);
869         INIT_LIST_HEAD(&rga->runqueue_list);
870         mutex_init(&rga->runqueue_mutex);
871
872         INIT_LIST_HEAD(&rga->free_cmdlist);
873         mutex_init(&rga->cmdlist_mutex);
874
875         rga_init_cmdlist(rga);
876
877         ret = rga_parse_dt(rga);
878         if (ret) {
879                 dev_err(rga->dev, "Unable to parse OF data\n");
880                 goto err_destroy_workqueue;
881         }
882
883         pm_runtime_enable(rga->dev);
884
885         iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
886
887         rga->regs = devm_ioremap_resource(rga->dev, iores);
888         if (IS_ERR(rga->regs)) {
889                 ret = PTR_ERR(rga->regs);
890                 goto err_put_clk;
891         }
892
893         irq = platform_get_irq(pdev, 0);
894         if (irq < 0) {
895                 dev_err(rga->dev, "failed to get irq\n");
896                 ret = irq;
897                 goto err_put_clk;
898         }
899
900         ret = devm_request_irq(rga->dev, irq, rga_irq_handler, 0,
901                                dev_name(rga->dev), rga);
902         if (ret < 0) {
903                 dev_err(rga->dev, "failed to request irq\n");
904                 goto err_put_clk;
905         }
906
907         platform_set_drvdata(pdev, rga);
908
909         rga->version.major = (rga_read(rga, RGA_VERSION_INFO) >> 24) & 0xFF;
910         rga->version.minor = (rga_read(rga, RGA_VERSION_INFO) >> 20) & 0x0F;
911
912         subdrv = &rga->subdrv;
913         subdrv->dev = rga->dev;
914         subdrv->open = rockchip_rga_open;
915         subdrv->close = rockchip_rga_close;
916
917         rockchip_drm_register_subdrv(subdrv);
918
919         return 0;
920
921 err_put_clk:
922         pm_runtime_disable(rga->dev);
923 err_destroy_workqueue:
924         destroy_workqueue(rga->rga_workq);
925 err_destroy_slab:
926         kmem_cache_destroy(rga->runqueue_slab);
927
928         return ret;
929 }
930
931 static int rga_remove(struct platform_device *pdev)
932 {
933         struct rockchip_rga *rga = platform_get_drvdata(pdev);
934
935         cancel_work_sync(&rga->runqueue_work);
936
937         while (rga->runqueue_node) {
938                 rga_free_runqueue_node(rga, rga->runqueue_node);
939                 rga->runqueue_node = rga_get_runqueue(rga);
940         }
941
942         rockchip_drm_unregister_subdrv(&rga->subdrv);
943
944         pm_runtime_disable(rga->dev);
945
946         return 0;
947 }
948
949 static int rga_suspend(struct device *dev)
950 {
951         struct rockchip_rga *rga = dev_get_drvdata(dev);
952
953         mutex_lock(&rga->runqueue_mutex);
954         rga->suspended = true;
955         mutex_unlock(&rga->runqueue_mutex);
956
957         flush_work(&rga->runqueue_work);
958
959         return 0;
960 }
961
962 static int rga_resume(struct device *dev)
963 {
964         struct rockchip_rga *rga = dev_get_drvdata(dev);
965
966         rga->suspended = false;
967         rga_exec_runqueue(rga);
968
969         return 0;
970 }
971
972 #ifdef CONFIG_PM
973 static int rga_runtime_suspend(struct device *dev)
974 {
975         struct rockchip_rga *rga = dev_get_drvdata(dev);
976
977         rga_disable_clocks(rga);
978
979         return 0;
980 }
981
982 static int rga_runtime_resume(struct device *dev)
983 {
984         struct rockchip_rga *rga = dev_get_drvdata(dev);
985
986         return rga_enable_clocks(rga);
987 }
988 #endif
989
990 static const struct dev_pm_ops rga_pm = {
991         SET_SYSTEM_SLEEP_PM_OPS(rga_suspend, rga_resume)
992         SET_RUNTIME_PM_OPS(rga_runtime_suspend,
993                            rga_runtime_resume, NULL)
994 };
995
996 static struct platform_driver rga_pltfm_driver = {
997         .probe  = rga_probe,
998         .remove = rga_remove,
999         .driver = {
1000                 .name = "rockchip-rga",
1001                 .pm = &rga_pm,
1002                 .of_match_table = rockchip_rga_dt_ids,
1003         },
1004 };
1005
1006 module_platform_driver(rga_pltfm_driver);
1007
1008 MODULE_AUTHOR("Yakir Yang <ykk@rock-chips.com>");
1009 MODULE_DESCRIPTION("Rockchip RGA Driver Extension");
1010 MODULE_LICENSE("GPL");
1011 MODULE_ALIAS("platform:rockchip-rga");