2 #include <linux/debugfs.h>
3 #include <linux/delay.h>
4 #include <linux/dma-buf.h>
5 #include <linux/dma-mapping.h>
6 #include <linux/interrupt.h>
8 #include <linux/of_address.h>
9 #include <linux/of_device.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/reset.h>
12 #include <linux/seq_file.h>
13 #include <linux/slab.h>
14 #include <linux/uaccess.h>
16 #include <asm/cacheflush.h>
18 #include <drm/rockchip_drm.h>
20 #include "rockchip_drm_drv.h"
21 #include "rockchip_drm_rga.h"
23 #define RGA_MODE_BASE_REG 0x0100
24 #define RGA_MODE_MAX_REG 0x017C
26 #define RGA_SYS_CTRL 0x0000
27 #define RGA_CMD_CTRL 0x0004
28 #define RGA_CMD_BASE 0x0008
29 #define RGA_INT 0x0010
30 #define RGA_MMU_CTRL0 0x0014
31 #define RGA_VERSION_INFO 0x0028
33 #define RGA_SRC_Y_RGB_BASE_ADDR 0x0108
34 #define RGA_SRC_CB_BASE_ADDR 0x010C
35 #define RGA_SRC_CR_BASE_ADDR 0x0110
36 #define RGA_SRC1_RGB_BASE_ADDR 0x0114
37 #define RGA_DST_Y_RGB_BASE_ADDR 0x013C
38 #define RGA_DST_CB_BASE_ADDR 0x0140
39 #define RGA_DST_CR_BASE_ADDR 0x014C
40 #define RGA_MMU_CTRL1 0x016C
41 #define RGA_MMU_SRC_BASE 0x0170
42 #define RGA_MMU_SRC1_BASE 0x0174
43 #define RGA_MMU_DST_BASE 0x0178
45 static void __user *rga_compat_ptr(u64 value)
48 return (void __user *)(value);
50 return (void __user *)((u32)(value));
54 static inline void rga_write(struct rockchip_rga *rga, u32 reg, u32 value)
56 writel(value, rga->regs + reg);
59 static inline u32 rga_read(struct rockchip_rga *rga, u32 reg)
61 return readl(rga->regs + reg);
64 static inline void rga_mod(struct rockchip_rga *rga, u32 reg, u32 val, u32 mask)
66 u32 temp = rga_read(rga, reg) & ~(mask);
69 rga_write(rga, reg, temp);
72 static int rga_enable_clocks(struct rockchip_rga *rga)
76 ret = clk_prepare_enable(rga->sclk);
78 dev_err(rga->dev, "Cannot enable rga sclk: %d\n", ret);
82 ret = clk_prepare_enable(rga->aclk);
84 dev_err(rga->dev, "Cannot enable rga aclk: %d\n", ret);
85 goto err_disable_sclk;
88 ret = clk_prepare_enable(rga->hclk);
90 dev_err(rga->dev, "Cannot enable rga hclk: %d\n", ret);
91 goto err_disable_aclk;
97 clk_disable_unprepare(rga->sclk);
99 clk_disable_unprepare(rga->aclk);
104 static void rga_disable_clocks(struct rockchip_rga *rga)
106 clk_disable_unprepare(rga->sclk);
107 clk_disable_unprepare(rga->hclk);
108 clk_disable_unprepare(rga->aclk);
111 static void rga_init_cmdlist(struct rockchip_rga *rga)
113 struct rga_cmdlist_node *node;
116 node = rga->cmdlist_node;
118 for (nr = 0; nr < ARRAY_SIZE(rga->cmdlist_node); nr++)
119 list_add_tail(&node[nr].list, &rga->free_cmdlist);
122 static int rga_alloc_dma_buf_for_cmdlist(struct rga_runqueue_node *runqueue)
124 struct list_head *run_cmdlist = &runqueue->run_cmdlist;
125 struct device *dev = runqueue->dev;
126 struct dma_attrs cmdlist_dma_attrs;
127 struct rga_cmdlist_node *node;
128 void *cmdlist_pool_virt;
129 dma_addr_t cmdlist_pool;
133 list_for_each_entry(node, run_cmdlist, list)
136 init_dma_attrs(&cmdlist_dma_attrs);
137 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &runqueue->cmdlist_dma_attrs);
139 cmdlist_pool_virt = dma_alloc_attrs(dev, cmdlist_cnt * RGA_CMDLIST_SIZE,
140 &cmdlist_pool, GFP_KERNEL,
142 if (!cmdlist_pool_virt) {
143 dev_err(dev, "failed to allocate cmdlist dma memory\n");
148 * Fill in the RGA operation registers from cmdlist command buffer,
149 * and also filled in the MMU TLB base information.
151 list_for_each_entry(node, run_cmdlist, list) {
152 struct rga_cmdlist *cmdlist = &node->cmdlist;
153 unsigned int mmu_ctrl = 0;
158 dest = cmdlist_pool_virt + RGA_CMDLIST_SIZE * 4 * count++;
160 for (i = 0; i < cmdlist->last / 2; i++) {
161 reg = (node->cmdlist.data[2 * i] - RGA_MODE_BASE_REG);
162 if (reg > RGA_MODE_BASE_REG)
164 dest[reg >> 2] = cmdlist->data[2 * i + 1];
167 if (cmdlist->src_mmu_pages) {
168 reg = RGA_MMU_SRC_BASE - RGA_MODE_BASE_REG;
169 dest[reg >> 2] = virt_to_phys(cmdlist->src_mmu_pages) >> 4;
173 if (cmdlist->dst_mmu_pages) {
174 reg = RGA_MMU_DST_BASE - RGA_MODE_BASE_REG;
175 dest[reg >> 2] = virt_to_phys(cmdlist->dst_mmu_pages) >> 4;
176 mmu_ctrl |= 0x7 << 8;
179 if (cmdlist->src1_mmu_pages) {
180 reg = RGA_MMU_SRC1_BASE - RGA_MODE_BASE_REG;
181 dest[reg >> 2] = virt_to_phys(cmdlist->src1_mmu_pages) >> 4;
182 mmu_ctrl |= 0x7 << 4;
185 reg = RGA_MMU_CTRL1 - RGA_MODE_BASE_REG;
186 dest[reg >> 2] = mmu_ctrl;
189 dma_sync_single_for_device(runqueue->drm_dev->dev,
190 virt_to_phys(cmdlist_pool_virt),
191 PAGE_SIZE, DMA_TO_DEVICE);
193 runqueue->cmdlist_dma_attrs = cmdlist_dma_attrs;
194 runqueue->cmdlist_pool_virt = cmdlist_pool_virt;
195 runqueue->cmdlist_pool = cmdlist_pool;
196 runqueue->cmdlist_cnt = cmdlist_cnt;
201 static int rga_check_reg_offset(struct device *dev,
202 struct rga_cmdlist_node *node)
204 struct rga_cmdlist *cmdlist = &node->cmdlist;
209 for (i = 0; i < cmdlist->last / 2; i++) {
210 index = cmdlist->last - 2 * (i + 1);
211 reg = cmdlist->data[index];
214 case RGA_BUF_TYPE_GEMFD | RGA_DST_Y_RGB_BASE_ADDR:
215 case RGA_BUF_TYPE_GEMFD | RGA_SRC_Y_RGB_BASE_ADDR:
216 case RGA_BUF_TYPE_GEMFD | RGA_SRC1_RGB_BASE_ADDR:
219 case RGA_BUF_TYPE_USERPTR | RGA_DST_Y_RGB_BASE_ADDR:
220 case RGA_BUF_TYPE_USERPTR | RGA_SRC_Y_RGB_BASE_ADDR:
221 case RGA_BUF_TYPE_USERPTR | RGA_SRC1_RGB_BASE_ADDR:
225 if (reg < RGA_MODE_BASE_REG || reg > RGA_MODE_MAX_REG)
236 dev_err(dev, "Bad register offset: 0x%x\n", cmdlist->data[index]);
240 static struct dma_buf_attachment *
241 rga_gem_buf_to_pages(struct rockchip_rga *rga, void **mmu_pages, int fd)
243 struct dma_buf_attachment *attach;
244 struct dma_buf *dmabuf;
245 struct sg_table *sgt;
246 struct scatterlist *sgl;
247 unsigned int mapped_size = 0;
248 unsigned int address;
254 dmabuf = dma_buf_get(fd);
255 if (IS_ERR(dmabuf)) {
256 dev_err(rga->dev, "Failed to get dma_buf with fd %d\n", fd);
257 return ERR_PTR(-EINVAL);
260 attach = dma_buf_attach(dmabuf, rga->dev);
261 if (IS_ERR(attach)) {
262 dev_err(rga->dev, "Failed to attach dma_buf\n");
263 ret = PTR_ERR(attach);
267 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
269 dev_err(rga->dev, "Failed to map dma_buf attachment\n");
275 * Alloc (2^3 * 4K) = 32K byte for storing pages, those space could
276 * cover 32K * 4K = 128M ram address.
278 pages = (unsigned int *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 3);
280 for_each_sg(sgt->sgl, sgl, sgt->nents, i) {
281 len = sg_dma_len(sgl) >> PAGE_SHIFT;
282 address = sg_phys(sgl);
284 for (p = 0; p < len; p++) {
285 dma_addr_t phys = address + (p << PAGE_SHIFT);
287 pages[mapped_size + p] = phys;
293 dma_sync_single_for_device(rga->drm_dev->dev, virt_to_phys(pages),
294 8 * PAGE_SIZE, DMA_TO_DEVICE);
298 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
303 dma_buf_detach(dmabuf, attach);
310 static int rga_map_cmdlist_gem(struct rockchip_rga *rga,
311 struct rga_cmdlist_node *node,
312 struct drm_device *drm_dev,
313 struct drm_file *file)
315 struct rga_cmdlist *cmdlist = &node->cmdlist;
316 struct dma_buf_attachment *attach;
321 for (i = 0; i < cmdlist->last / 2; i++) {
322 int index = cmdlist->last - 2 * (i + 1);
324 switch (cmdlist->data[index]) {
325 case RGA_SRC1_RGB_BASE_ADDR | RGA_BUF_TYPE_GEMFD:
326 fd = cmdlist->data[index + 1];
327 attach = rga_gem_buf_to_pages(rga, &mmu_pages, fd);
329 return PTR_ERR(attach);
331 cmdlist->src1_attach = attach;
332 cmdlist->src1_mmu_pages = mmu_pages;
335 case RGA_SRC_Y_RGB_BASE_ADDR | RGA_BUF_TYPE_GEMFD:
336 fd = cmdlist->data[index + 1];
337 attach = rga_gem_buf_to_pages(rga, &mmu_pages, fd);
339 return PTR_ERR(attach);
341 cmdlist->src_attach = attach;
342 cmdlist->src_mmu_pages = mmu_pages;
345 case RGA_DST_Y_RGB_BASE_ADDR | RGA_BUF_TYPE_GEMFD:
346 fd = cmdlist->data[index + 1];
347 attach = rga_gem_buf_to_pages(rga, &mmu_pages, fd);
349 return PTR_ERR(attach);
351 cmdlist->dst_attach = attach;
352 cmdlist->dst_mmu_pages = mmu_pages;
360 static void rga_unmap_cmdlist_gem(struct rockchip_rga *rga,
361 struct rga_cmdlist_node *node)
363 struct dma_buf_attachment *attach;
364 struct dma_buf *dma_buf;
366 attach = node->cmdlist.src_attach;
368 dma_buf = attach->dmabuf;
369 dma_buf_detach(dma_buf, attach);
370 dma_buf_put(dma_buf);
372 node->cmdlist.src_attach = NULL;
374 attach = node->cmdlist.src1_attach;
376 dma_buf = attach->dmabuf;
377 dma_buf_detach(dma_buf, attach);
378 dma_buf_put(dma_buf);
380 node->cmdlist.src1_attach = NULL;
382 attach = node->cmdlist.dst_attach;
384 dma_buf = attach->dmabuf;
385 dma_buf_detach(dma_buf, attach);
386 dma_buf_put(dma_buf);
388 node->cmdlist.dst_attach = NULL;
390 if (node->cmdlist.src_mmu_pages)
391 free_pages((unsigned long)node->cmdlist.src_mmu_pages, 3);
392 node->cmdlist.src_mmu_pages = NULL;
394 if (node->cmdlist.src1_mmu_pages)
395 free_pages((unsigned long)node->cmdlist.src1_mmu_pages, 3);
396 node->cmdlist.src1_mmu_pages = NULL;
398 if (node->cmdlist.dst_mmu_pages)
399 free_pages((unsigned long)node->cmdlist.dst_mmu_pages, 3);
400 node->cmdlist.dst_mmu_pages = NULL;
403 static void rga_cmd_start(struct rockchip_rga *rga,
404 struct rga_runqueue_node *runqueue)
408 ret = pm_runtime_get_sync(rga->dev);
412 rga_write(rga, RGA_SYS_CTRL, 0x00);
414 rga_write(rga, RGA_CMD_BASE, runqueue->cmdlist_pool);
416 rga_write(rga, RGA_SYS_CTRL, 0x22);
418 rga_write(rga, RGA_INT, 0x600);
420 rga_write(rga, RGA_CMD_CTRL, ((runqueue->cmdlist_cnt - 1) << 3) | 0x1);
423 static void rga_free_runqueue_node(struct rockchip_rga *rga,
424 struct rga_runqueue_node *runqueue)
426 struct rga_cmdlist_node *node;
431 if (runqueue->cmdlist_pool_virt && runqueue->cmdlist_pool)
432 dma_free_attrs(rga->dev, runqueue->cmdlist_cnt * RGA_CMDLIST_SIZE,
433 runqueue->cmdlist_pool_virt,
434 runqueue->cmdlist_pool,
435 &runqueue->cmdlist_dma_attrs);
437 mutex_lock(&rga->cmdlist_mutex);
439 * commands in run_cmdlist have been completed so unmap all gem
440 * objects in each command node so that they are unreferenced.
442 list_for_each_entry(node, &runqueue->run_cmdlist, list)
443 rga_unmap_cmdlist_gem(rga, node);
444 list_splice_tail_init(&runqueue->run_cmdlist, &rga->free_cmdlist);
445 mutex_unlock(&rga->cmdlist_mutex);
447 kmem_cache_free(rga->runqueue_slab, runqueue);
450 static struct rga_runqueue_node *rga_get_runqueue(struct rockchip_rga *rga)
452 struct rga_runqueue_node *runqueue;
454 if (list_empty(&rga->runqueue_list))
457 runqueue = list_first_entry(&rga->runqueue_list,
458 struct rga_runqueue_node, list);
459 list_del_init(&runqueue->list);
464 static void rga_exec_runqueue(struct rockchip_rga *rga)
466 rga->runqueue_node = rga_get_runqueue(rga);
467 if (rga->runqueue_node)
468 rga_cmd_start(rga, rga->runqueue_node);
471 static struct rga_cmdlist_node *rga_get_cmdlist(struct rockchip_rga *rga)
473 struct rga_cmdlist_node *node;
474 struct device *dev = rga->dev;
476 mutex_lock(&rga->cmdlist_mutex);
477 if (list_empty(&rga->free_cmdlist)) {
478 dev_err(dev, "there is no free cmdlist\n");
479 mutex_unlock(&rga->cmdlist_mutex);
483 node = list_first_entry(&rga->free_cmdlist,
484 struct rga_cmdlist_node, list);
485 list_del_init(&node->list);
486 mutex_unlock(&rga->cmdlist_mutex);
491 static void rga_put_cmdlist(struct rockchip_rga *rga, struct rga_cmdlist_node *node)
493 mutex_lock(&rga->cmdlist_mutex);
494 list_move_tail(&node->list, &rga->free_cmdlist);
495 mutex_unlock(&rga->cmdlist_mutex);
498 static void rga_add_cmdlist_to_inuse(struct rockchip_drm_rga_private *rga_priv,
499 struct rga_cmdlist_node *node)
501 struct rga_cmdlist_node *lnode;
503 if (list_empty(&rga_priv->inuse_cmdlist))
506 /* this links to base address of new cmdlist */
507 lnode = list_entry(rga_priv->inuse_cmdlist.prev,
508 struct rga_cmdlist_node, list);
511 list_add_tail(&node->list, &rga_priv->inuse_cmdlist);
515 * IOCRL functions for userspace to get RGA version.
517 int rockchip_rga_get_ver_ioctl(struct drm_device *drm_dev, void *data,
518 struct drm_file *file)
520 struct rockchip_drm_file_private *file_priv = file->driver_priv;
521 struct rockchip_drm_rga_private *rga_priv = file_priv->rga_priv;
522 struct drm_rockchip_rga_get_ver *ver = data;
523 struct rockchip_rga *rga;
533 rga = dev_get_drvdata(dev);
537 ver->major = rga->version.major;
538 ver->minor = rga->version.minor;
544 * IOCRL functions for userspace to send an RGA request.
546 int rockchip_rga_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
547 struct drm_file *file)
549 struct rockchip_drm_file_private *file_priv = file->driver_priv;
550 struct rockchip_drm_rga_private *rga_priv = file_priv->rga_priv;
551 struct drm_rockchip_rga_set_cmdlist *req = data;
552 struct rga_cmdlist_node *node;
553 struct rga_cmdlist *cmdlist;
554 struct rockchip_rga *rga;
563 rga = dev_get_drvdata(rga_priv->dev);
567 if (req->cmd_nr > RGA_CMDLIST_SIZE || req->cmd_buf_nr > RGA_CMDBUF_SIZE) {
568 dev_err(rga->dev, "cmdlist size is too big\n");
572 node = rga_get_cmdlist(rga);
576 cmdlist = &node->cmdlist;
580 * Copy the command / buffer registers setting from userspace, each
581 * command have two integer, one for register offset, another for
584 if (copy_from_user(cmdlist->data, rga_compat_ptr(req->cmd),
585 sizeof(struct drm_rockchip_rga_cmd) * req->cmd_nr))
587 cmdlist->last += req->cmd_nr * 2;
589 if (copy_from_user(&cmdlist->data[cmdlist->last],
590 rga_compat_ptr(req->cmd_buf),
591 sizeof(struct drm_rockchip_rga_cmd) * req->cmd_buf_nr))
593 cmdlist->last += req->cmd_buf_nr * 2;
596 * Check the userspace command registers, and mapping the framebuffer,
597 * create the RGA mmu pages or get the framebuffer dma address.
599 ret = rga_check_reg_offset(rga->dev, node);
601 dev_err(rga->dev, "Check reg offset failed\n");
602 goto err_free_cmdlist;
605 ret = rga_map_cmdlist_gem(rga, node, drm_dev, file);
607 dev_err(rga->dev, "Failed to map cmdlist\n");
608 goto err_unmap_cmdlist;
611 rga_add_cmdlist_to_inuse(rga_priv, node);
616 rga_unmap_cmdlist_gem(rga, node);
618 rga_put_cmdlist(rga, node);
624 * IOCRL functions for userspace to start RGA transform.
626 int rockchip_rga_exec_ioctl(struct drm_device *drm_dev, void *data,
627 struct drm_file *file)
629 struct rockchip_drm_file_private *file_priv = file->driver_priv;
630 struct rockchip_drm_rga_private *rga_priv = file_priv->rga_priv;
631 struct rga_runqueue_node *runqueue;
632 struct rockchip_rga *rga;
643 rga = dev_get_drvdata(dev);
647 runqueue = kmem_cache_alloc(rga->runqueue_slab, GFP_KERNEL);
649 dev_err(rga->dev, "failed to allocate memory\n");
653 runqueue->drm_dev = drm_dev;
654 runqueue->dev = rga->dev;
656 init_completion(&runqueue->complete);
658 INIT_LIST_HEAD(&runqueue->run_cmdlist);
660 list_splice_init(&rga_priv->inuse_cmdlist, &runqueue->run_cmdlist);
662 if (list_empty(&runqueue->run_cmdlist)) {
663 dev_err(rga->dev, "there is no inuse cmdlist\n");
664 kmem_cache_free(rga->runqueue_slab, runqueue);
668 ret = rga_alloc_dma_buf_for_cmdlist(runqueue);
670 dev_err(rga->dev, "cmdlist init failed\n");
674 mutex_lock(&rga->runqueue_mutex);
675 runqueue->pid = current->pid;
676 runqueue->file = file;
677 list_add_tail(&runqueue->list, &rga->runqueue_list);
678 if (!rga->runqueue_node)
679 rga_exec_runqueue(rga);
680 mutex_unlock(&rga->runqueue_mutex);
682 wait_for_completion(&runqueue->complete);
683 rga_free_runqueue_node(rga, runqueue);
688 static int rockchip_rga_open(struct drm_device *drm_dev, struct device *dev,
689 struct drm_file *file)
691 struct rockchip_drm_file_private *file_priv = file->driver_priv;
692 struct rockchip_drm_rga_private *rga_priv;
693 struct rockchip_rga *rga;
695 rga = dev_get_drvdata(dev);
696 rga->drm_dev = drm_dev;
698 rga_priv = kzalloc(sizeof(*rga_priv), GFP_KERNEL);
703 file_priv->rga_priv = rga_priv;
705 INIT_LIST_HEAD(&rga_priv->inuse_cmdlist);
710 static void rockchip_rga_close(struct drm_device *drm_dev, struct device *dev,
711 struct drm_file *file)
713 struct rockchip_drm_file_private *file_priv = file->driver_priv;
714 struct rockchip_drm_rga_private *rga_priv = file_priv->rga_priv;
715 struct rga_cmdlist_node *node, *n;
716 struct rockchip_rga *rga;
721 rga = dev_get_drvdata(dev);
725 mutex_lock(&rga->cmdlist_mutex);
726 list_for_each_entry_safe(node, n, &rga_priv->inuse_cmdlist, list) {
728 * unmap all gem objects not completed.
730 * P.S. if current process was terminated forcely then
731 * there may be some commands in inuse_cmdlist so unmap
734 rga_unmap_cmdlist_gem(rga, node);
735 list_move_tail(&node->list, &rga->free_cmdlist);
737 mutex_unlock(&rga->cmdlist_mutex);
739 kfree(file_priv->rga_priv);
742 static void rga_runqueue_worker(struct work_struct *work)
744 struct rockchip_rga *rga = container_of(work, struct rockchip_rga,
747 mutex_lock(&rga->runqueue_mutex);
748 pm_runtime_put_sync(rga->dev);
750 complete(&rga->runqueue_node->complete);
753 rga->runqueue_node = NULL;
755 rga_exec_runqueue(rga);
757 mutex_unlock(&rga->runqueue_mutex);
760 static irqreturn_t rga_irq_handler(int irq, void *dev_id)
762 struct rockchip_rga *rga = dev_id;
765 intr = rga_read(rga, RGA_INT) & 0xf;
767 rga_mod(rga, RGA_INT, intr << 4, 0xf << 4);
770 queue_work(rga->rga_workq, &rga->runqueue_work);
775 static int rga_parse_dt(struct rockchip_rga *rga)
777 struct reset_control *core_rst, *axi_rst, *ahb_rst;
779 core_rst = devm_reset_control_get(rga->dev, "core");
780 if (IS_ERR(core_rst)) {
781 dev_err(rga->dev, "failed to get core reset controller\n");
782 return PTR_ERR(core_rst);
785 axi_rst = devm_reset_control_get(rga->dev, "axi");
786 if (IS_ERR(axi_rst)) {
787 dev_err(rga->dev, "failed to get axi reset controller\n");
788 return PTR_ERR(axi_rst);
791 ahb_rst = devm_reset_control_get(rga->dev, "ahb");
792 if (IS_ERR(ahb_rst)) {
793 dev_err(rga->dev, "failed to get ahb reset controller\n");
794 return PTR_ERR(ahb_rst);
797 reset_control_assert(core_rst);
799 reset_control_deassert(core_rst);
801 reset_control_assert(axi_rst);
803 reset_control_deassert(axi_rst);
805 reset_control_assert(ahb_rst);
807 reset_control_deassert(ahb_rst);
809 rga->sclk = devm_clk_get(rga->dev, "sclk");
810 if (IS_ERR(rga->sclk)) {
811 dev_err(rga->dev, "failed to get sclk clock\n");
812 return PTR_ERR(rga->sclk);
815 rga->aclk = devm_clk_get(rga->dev, "aclk");
816 if (IS_ERR(rga->aclk)) {
817 dev_err(rga->dev, "failed to get aclk clock\n");
818 return PTR_ERR(rga->aclk);
821 rga->hclk = devm_clk_get(rga->dev, "hclk");
822 if (IS_ERR(rga->hclk)) {
823 dev_err(rga->dev, "failed to get hclk clock\n");
824 return PTR_ERR(rga->hclk);
827 return rga_enable_clocks(rga);
830 static const struct of_device_id rockchip_rga_dt_ids[] = {
831 { .compatible = "rockchip,rk3288-rga", },
832 { .compatible = "rockchip,rk3228-rga", },
833 { .compatible = "rockchip,rk3399-rga", },
836 MODULE_DEVICE_TABLE(of, rockchip_rga_dt_ids);
838 static int rga_probe(struct platform_device *pdev)
840 struct drm_rockchip_subdrv *subdrv;
841 struct rockchip_rga *rga;
842 struct resource *iores;
846 if (!pdev->dev.of_node)
849 rga = devm_kzalloc(&pdev->dev, sizeof(*rga), GFP_KERNEL);
853 rga->dev = &pdev->dev;
855 rga->runqueue_slab = kmem_cache_create("rga_runqueue_slab",
856 sizeof(struct rga_runqueue_node),
858 if (!rga->runqueue_slab)
861 rga->rga_workq = create_singlethread_workqueue("rga");
862 if (!rga->rga_workq) {
863 dev_err(rga->dev, "failed to create workqueue\n");
865 goto err_destroy_slab;
868 INIT_WORK(&rga->runqueue_work, rga_runqueue_worker);
869 INIT_LIST_HEAD(&rga->runqueue_list);
870 mutex_init(&rga->runqueue_mutex);
872 INIT_LIST_HEAD(&rga->free_cmdlist);
873 mutex_init(&rga->cmdlist_mutex);
875 rga_init_cmdlist(rga);
877 ret = rga_parse_dt(rga);
879 dev_err(rga->dev, "Unable to parse OF data\n");
880 goto err_destroy_workqueue;
883 pm_runtime_enable(rga->dev);
885 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
887 rga->regs = devm_ioremap_resource(rga->dev, iores);
888 if (IS_ERR(rga->regs)) {
889 ret = PTR_ERR(rga->regs);
893 irq = platform_get_irq(pdev, 0);
895 dev_err(rga->dev, "failed to get irq\n");
900 ret = devm_request_irq(rga->dev, irq, rga_irq_handler, 0,
901 dev_name(rga->dev), rga);
903 dev_err(rga->dev, "failed to request irq\n");
907 platform_set_drvdata(pdev, rga);
909 rga->version.major = (rga_read(rga, RGA_VERSION_INFO) >> 24) & 0xFF;
910 rga->version.minor = (rga_read(rga, RGA_VERSION_INFO) >> 20) & 0x0F;
912 subdrv = &rga->subdrv;
913 subdrv->dev = rga->dev;
914 subdrv->open = rockchip_rga_open;
915 subdrv->close = rockchip_rga_close;
917 rockchip_drm_register_subdrv(subdrv);
922 pm_runtime_disable(rga->dev);
923 err_destroy_workqueue:
924 destroy_workqueue(rga->rga_workq);
926 kmem_cache_destroy(rga->runqueue_slab);
931 static int rga_remove(struct platform_device *pdev)
933 struct rockchip_rga *rga = platform_get_drvdata(pdev);
935 cancel_work_sync(&rga->runqueue_work);
937 while (rga->runqueue_node) {
938 rga_free_runqueue_node(rga, rga->runqueue_node);
939 rga->runqueue_node = rga_get_runqueue(rga);
942 rockchip_drm_unregister_subdrv(&rga->subdrv);
944 pm_runtime_disable(rga->dev);
949 static int rga_suspend(struct device *dev)
951 struct rockchip_rga *rga = dev_get_drvdata(dev);
953 mutex_lock(&rga->runqueue_mutex);
954 rga->suspended = true;
955 mutex_unlock(&rga->runqueue_mutex);
957 flush_work(&rga->runqueue_work);
962 static int rga_resume(struct device *dev)
964 struct rockchip_rga *rga = dev_get_drvdata(dev);
966 rga->suspended = false;
967 rga_exec_runqueue(rga);
973 static int rga_runtime_suspend(struct device *dev)
975 struct rockchip_rga *rga = dev_get_drvdata(dev);
977 rga_disable_clocks(rga);
982 static int rga_runtime_resume(struct device *dev)
984 struct rockchip_rga *rga = dev_get_drvdata(dev);
986 return rga_enable_clocks(rga);
990 static const struct dev_pm_ops rga_pm = {
991 SET_SYSTEM_SLEEP_PM_OPS(rga_suspend, rga_resume)
992 SET_RUNTIME_PM_OPS(rga_runtime_suspend,
993 rga_runtime_resume, NULL)
996 static struct platform_driver rga_pltfm_driver = {
998 .remove = rga_remove,
1000 .name = "rockchip-rga",
1002 .of_match_table = rockchip_rga_dt_ids,
1006 module_platform_driver(rga_pltfm_driver);
1008 MODULE_AUTHOR("Yakir Yang <ykk@rock-chips.com>");
1009 MODULE_DESCRIPTION("Rockchip RGA Driver Extension");
1010 MODULE_LICENSE("GPL");
1011 MODULE_ALIAS("platform:rockchip-rga");