#include <asm/io.h>\r
#include <linux/irq.h>\r
#include <linux/interrupt.h>\r
-#include <mach/io.h>\r
-#include <mach/irqs.h>\r
#include <linux/fs.h>\r
#include <asm/uaccess.h>\r
#include <linux/miscdevice.h>\r
#include <linux/slab.h>\r
#include <linux/fb.h>\r
#include <linux/wakelock.h>\r
+#include <linux/scatterlist.h>\r
+\r
+\r
+#if defined(CONFIG_ION_ROCKCHIP)\r
+#include <linux/rockchip_ion.h>\r
+#endif\r
\r
#include "rga2.h"\r
#include "rga2_reg_info.h"\r
#include "rga2_mmu_info.h"\r
#include "RGA2_API.h"\r
\r
+#if defined(CONFIG_ROCKCHIP_IOMMU) & defined(CONFIG_ION_ROCKCHIP)\r
+#define CONFIG_RGA_IOMMU\r
+#endif\r
\r
#define RGA2_TEST_FLUSH_TIME 0\r
#define RGA2_INFO_BUS_ERROR 1\r
\r
#define RGA2_MAJOR 255\r
\r
-#if defined(CONFIG_ROCKCHIP_RGA2)\r
-#define RK32_RGA2_PHYS 0xFFC70000\r
-#define RK32_RGA2_SIZE 0x00001000\r
-#endif\r
#define RGA2_RESET_TIMEOUT 1000\r
\r
/* Driver information */\r
\r
#define RGA2_VERSION "2.000"\r
\r
-ktime_t rga_start;\r
-ktime_t rga_end;\r
+ktime_t rga2_start;\r
+ktime_t rga2_end;\r
\r
int rga2_flag = 0;\r
\r
+extern long (*rga_ioctl_kernel_p)(struct rga_req *);\r
+\r
rga2_session rga2_session_global;\r
\r
struct rga2_drvdata_t {\r
struct clk *hclk_rga2;\r
struct clk *pd_rga2;\r
struct clk *rga2;\r
+\r
+ #if defined(CONFIG_ION_ROCKCHIP)\r
+ struct ion_client * ion_client;\r
+ #endif\r
};\r
\r
struct rga2_drvdata_t *rga2_drvdata;\r
struct rga2_service_info rga2_service;\r
struct rga2_mmu_buf_t rga2_mmu_buf;\r
\r
+#if defined(CONFIG_ION_ROCKCHIP)\r
+extern struct ion_client *rockchip_ion_client_create(const char * name);\r
+#endif\r
+\r
static int rga2_blit_async(rga2_session *session, struct rga2_req *req);\r
static void rga2_del_running_list(void);\r
static void rga2_del_running_list_timeout(void);\r
{\r
printk("render_mode=%d bitblt_mode=%d rotate_mode=%.8x\n",\r
req->render_mode, req->bitblt_mode, req->rotate_mode);\r
- printk("src : y=%.8x uv=%.8x v=%.8x format=%d aw=%d ah=%d vw=%d vh=%d xoff=%d yoff=%d \n",\r
+ printk("src : y=%.llx uv=%.llx v=%.llx format=%d aw=%d ah=%d vw=%d vh=%d xoff=%d yoff=%d \n",\r
req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr, req->src.format,\r
req->src.act_w, req->src.act_h, req->src.vir_w, req->src.vir_h,\r
req->src.x_offset, req->src.y_offset);\r
- printk("dst : y=%.8x uv=%.8x v=%.8x format=%d aw=%d ah=%d vw=%d vh=%d xoff=%d yoff=%d \n",\r
+ printk("dst : y=%llx uv=%llx v=%llx format=%d aw=%d ah=%d vw=%d vh=%d xoff=%d yoff=%d \n",\r
req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr, req->dst.format,\r
req->dst.act_w, req->dst.act_h, req->dst.vir_w, req->dst.vir_h,\r
req->dst.x_offset, req->dst.y_offset);\r
\r
static inline void rga2_write(u32 b, u32 r)\r
{\r
- __raw_writel(b, rga2_drvdata->rga_base + r);\r
+ //__raw_writel(b, rga2_drvdata->rga_base + r);\r
+\r
+ *((volatile unsigned int *)(rga2_drvdata->rga_base + r)) = b;\r
}\r
\r
static inline u32 rga2_read(u32 r)\r
{\r
- return __raw_readl(rga2_drvdata->rga_base + r);\r
+ //return __raw_readl(rga2_drvdata->rga_base + r);\r
+\r
+ return *((volatile unsigned int *)(rga2_drvdata->rga_base + r));\r
}\r
\r
static void rga2_soft_reset(void)\r
u32 i;\r
u32 reg;\r
\r
- rga2_write(1, RGA2_SYS_CTRL); //RGA_SYS_CTRL\r
+ rga2_write((1 << 3) | (1 << 4), RGA2_SYS_CTRL); //RGA_SYS_CTRL\r
\r
for(i = 0; i < RGA2_RESET_TIMEOUT; i++)\r
{\r
running = atomic_read(&rga2_service.total_running);\r
printk("rga total_running %d\n", running);\r
\r
- #if 0\r
-\r
- /* Dump waiting list info */\r
- if (!list_empty(&rga_service.waiting))\r
- {\r
- list_head *next;\r
-\r
- next = &rga_service.waiting;\r
-\r
- printk("rga_service dump waiting list\n");\r
-\r
- do\r
- {\r
- reg = list_entry(next->next, struct rga_reg, status_link);\r
- running = atomic_read(®->session->task_running);\r
- num_done = atomic_read(®->session->num_done);\r
- printk("rga session pid %d, done %d, running %d\n", reg->session->pid, num_done, running);\r
- next = next->next;\r
- }\r
- while(!list_empty(next));\r
- }\r
-\r
- /* Dump running list info */\r
- if (!list_empty(&rga_service.running))\r
- {\r
- printk("rga_service dump running list\n");\r
-\r
- list_head *next;\r
-\r
- next = &rga_service.running;\r
- do\r
- {\r
- reg = list_entry(next->next, struct rga_reg, status_link);\r
- running = atomic_read(®->session->task_running);\r
- num_done = atomic_read(®->session->num_done);\r
- printk("rga session pid %d, done %d, running %d:\n", reg->session->pid, num_done, running);\r
- next = next->next;\r
- }\r
- while(!list_empty(next));\r
- }\r
- #endif\r
-\r
list_for_each_entry_safe(session, session_tmp, &rga2_service.session, list_session)\r
{\r
printk("session pid %d:\n", session->pid);\r
printk("task_running %d\n", running);\r
list_for_each_entry_safe(reg, reg_tmp, &session->waiting, session_link)\r
{\r
- printk("waiting register set 0x%.8x\n", (unsigned int)reg);\r
+ printk("waiting register set 0x%.lu\n", (unsigned long)reg);\r
}\r
list_for_each_entry_safe(reg, reg_tmp, &session->running, session_link)\r
{\r
- printk("running register set 0x%.8x\n", (unsigned int)reg);\r
+ printk("running register set 0x%.lu\n", (unsigned long)reg);\r
}\r
}\r
}\r
if (rga2_service.enable)\r
return;\r
\r
- clk_enable(rga2_drvdata->rga2);\r
- clk_enable(rga2_drvdata->aclk_rga2);\r
- clk_enable(rga2_drvdata->hclk_rga2);\r
- clk_enable(rga2_drvdata->pd_rga2);\r
+ clk_prepare_enable(rga2_drvdata->rga2);\r
+ clk_prepare_enable(rga2_drvdata->aclk_rga2);\r
+ clk_prepare_enable(rga2_drvdata->hclk_rga2);\r
+ clk_prepare_enable(rga2_drvdata->pd_rga2);\r
wake_lock(&rga2_drvdata->wake_lock);\r
rga2_service.enable = true;\r
}\r
rga2_dump();\r
}\r
\r
- clk_disable(rga2_drvdata->pd_rga2);\r
- clk_disable(rga2_drvdata->rga2);\r
- clk_disable(rga2_drvdata->aclk_rga2);\r
- clk_disable(rga2_drvdata->hclk_rga2);\r
+ clk_disable_unprepare(rga2_drvdata->rga2);\r
+ clk_disable_unprepare(rga2_drvdata->pd_rga2);\r
+ clk_disable_unprepare(rga2_drvdata->aclk_rga2);\r
+ clk_disable_unprepare(rga2_drvdata->hclk_rga2);\r
wake_unlock(&rga2_drvdata->wake_lock);\r
rga2_service.enable = false;\r
}\r
uint32_t *reg_p;\r
\r
if(atomic_read(®->session->task_running) != 0)\r
- {\r
printk(KERN_ERR "task_running is no zero\n");\r
- }\r
\r
atomic_add(1, &rga2_service.cmd_num);\r
atomic_add(1, ®->session->task_running);\r
reg_p = (uint32_t *)reg->cmd_reg;\r
\r
for(i=0; i<32; i++)\r
- {\r
cmd_buf[i] = reg_p[i];\r
- }\r
-\r
- dsb();\r
}\r
\r
\r
static struct rga2_reg * rga2_reg_init(rga2_session *session, struct rga2_req *req)\r
{\r
- uint32_t ret;\r
+ int32_t ret;\r
struct rga2_reg *reg = kzalloc(sizeof(struct rga2_reg), GFP_KERNEL);\r
if (NULL == reg) {\r
pr_err("kmalloc fail in rga_reg_init\n");\r
|| (req->mmu_info.dst_mmu_flag & 1) || (req->mmu_info.els_mmu_flag & 1))\r
{\r
ret = rga2_set_mmu_info(reg, req);\r
- if(ret < 0)\r
- {\r
+ if(ret < 0) {\r
printk("%s, [%d] set mmu info error \n", __FUNCTION__, __LINE__);\r
if(reg != NULL)\r
- {\r
kfree(reg);\r
- }\r
+\r
return NULL;\r
}\r
}\r
\r
- if(RGA2_gen_reg_info((uint8_t *)reg->cmd_reg, req) == -1)\r
- {\r
+ if(RGA2_gen_reg_info((uint8_t *)reg->cmd_reg, req) == -1) {\r
printk("gen reg info error\n");\r
if(reg != NULL)\r
- {\r
kfree(reg);\r
- }\r
+\r
return NULL;\r
}\r
\r
rga2_copy_reg(reg, 0);\r
rga2_reg_from_wait_to_run(reg);\r
\r
+ #ifdef CONFIG_ARM\r
dmac_flush_range(&rga2_service.cmd_buff[0], &rga2_service.cmd_buff[32]);\r
outer_flush_range(virt_to_phys(&rga2_service.cmd_buff[0]),virt_to_phys(&rga2_service.cmd_buff[32]));\r
+ #elif defined(CONFIG_ARM64)\r
+ __dma_flush_range(&rga2_service.cmd_buff[0], &rga2_service.cmd_buff[32]);\r
+ #endif\r
\r
- #if defined(CONFIG_ARCH_RK30)\r
rga2_soft_reset();\r
- #endif\r
\r
rga2_write(0x0, RGA2_SYS_CTRL);\r
- //rga2_write(0, RGA_MMU_CTRL);\r
\r
/* CMD buff */\r
rga2_write(virt_to_phys(rga2_service.cmd_buff), RGA2_CMD_BASE);\r
\r
#if RGA2_TEST\r
- if(rga2_flag)\r
- {\r
- //printk(KERN_DEBUG "cmd_addr = %.8x\n", rga_read(RGA_CMD_ADDR));\r
+ if(rga2_flag) {\r
uint32_t i, *p;\r
p = rga2_service.cmd_buff;\r
printk("CMD_REG\n");\r
rga2_write(rga2_read(RGA2_INT)|(0x1<<10)|(0x1<<8), RGA2_INT);\r
\r
#if RGA2_TEST_TIME\r
- rga_start = ktime_get();\r
+ rga2_start = ktime_get();\r
#endif\r
\r
/* Start proc */\r
atomic_sub(1, ®->session->task_running);\r
atomic_sub(1, &rga2_service.total_running);\r
\r
- //printk("RGA soft reset for timeout process\n");\r
rga2_soft_reset();\r
\r
-\r
- #if 0\r
- printk("RGA_INT is %.8x\n", rga_read(RGA_INT));\r
- printk("reg->session->task_running = %d\n", atomic_read(®->session->task_running));\r
- printk("rga_service.total_running = %d\n", atomic_read(&rga_service.total_running));\r
-\r
- print_info(®->req);\r
-\r
- {\r
- uint32_t *p, i;\r
- p = reg->cmd_reg;\r
- for (i=0; i<7; i++)\r
- printk("%.8x %.8x %.8x %.8x\n", p[0 + i*4], p[1+i*4], p[2 + i*4], p[3 + i*4]);\r
-\r
- }\r
- #endif\r
-\r
if(list_empty(®->session->waiting))\r
{\r
atomic_set(®->session->done, 1);\r
}\r
\r
\r
-static void rga2_mem_addr_sel(struct rga2_req *req)\r
+static int rga2_convert_dma_buf(struct rga2_req *req)\r
{\r
+ struct ion_handle *hdl;\r
+ ion_phys_addr_t phy_addr;\r
+ size_t len;\r
+ int ret;\r
+\r
+ req->sg_src0 = NULL;\r
+ req->sg_src1 = NULL;\r
+ req->sg_dst = NULL;\r
+ req->sg_els = NULL;\r
+\r
+ if(req->src.yrgb_addr) {\r
+ hdl = ion_import_dma_buf(rga2_drvdata->ion_client, req->src.yrgb_addr);\r
+ if (IS_ERR(hdl)) {\r
+ ret = PTR_ERR(hdl);\r
+ printk("RGA2 ERROR ion buf handle\n");\r
+ return ret;\r
+ }\r
+ if (req->mmu_info.src0_mmu_flag) {\r
+ req->sg_src0 = ion_sg_table(rga2_drvdata->ion_client, hdl);\r
+ req->src.yrgb_addr = req->src.uv_addr;\r
+ req->src.uv_addr = req->src.yrgb_addr + (req->src.vir_w * req->src.vir_h);\r
+ req->src.v_addr = req->src.uv_addr + (req->src.vir_w * req->src.vir_h)/4;\r
+ }\r
+ else {\r
+ ion_phys(rga2_drvdata->ion_client, hdl, &phy_addr, &len);\r
+ req->src.yrgb_addr = phy_addr;\r
+ req->src.uv_addr = req->src.yrgb_addr + (req->src.vir_w * req->src.vir_h);\r
+ req->src.v_addr = req->src.uv_addr + (req->src.vir_w * req->src.vir_h)/4;\r
+ }\r
+ ion_free(rga2_drvdata->ion_client, hdl);\r
+ }\r
+ else {\r
+ req->src.yrgb_addr = req->src.uv_addr;\r
+ req->src.uv_addr = req->src.yrgb_addr + (req->src.vir_w * req->src.vir_h);\r
+ req->src.v_addr = req->src.uv_addr + (req->src.vir_w * req->src.vir_h)/4;\r
+ }\r
+\r
+ if(req->dst.yrgb_addr) {\r
+ hdl = ion_import_dma_buf(rga2_drvdata->ion_client, req->dst.yrgb_addr);\r
+ if (IS_ERR(hdl)) {\r
+ ret = PTR_ERR(hdl);\r
+ printk("RGA2 ERROR ion buf handle\n");\r
+ return ret;\r
+ }\r
+ if (req->mmu_info.dst_mmu_flag) {\r
+ req->sg_dst = ion_sg_table(rga2_drvdata->ion_client, hdl);\r
+ req->dst.yrgb_addr = req->dst.uv_addr;\r
+ req->dst.uv_addr = req->dst.yrgb_addr + (req->dst.vir_w * req->dst.vir_h);\r
+ req->dst.v_addr = req->dst.uv_addr + (req->dst.vir_w * req->dst.vir_h)/4;\r
+ }\r
+ else {\r
+ ion_phys(rga2_drvdata->ion_client, hdl, &phy_addr, &len);\r
+ req->dst.yrgb_addr = phy_addr;\r
+ req->dst.uv_addr = req->dst.yrgb_addr + (req->dst.vir_w * req->dst.vir_h);\r
+ req->dst.v_addr = req->dst.uv_addr + (req->dst.vir_w * req->dst.vir_h)/4;\r
+ }\r
+ ion_free(rga2_drvdata->ion_client, hdl);\r
+ }\r
+ else {\r
+ req->dst.yrgb_addr = req->dst.uv_addr;\r
+ req->dst.uv_addr = req->dst.yrgb_addr + (req->dst.vir_w * req->dst.vir_h);\r
+ req->dst.v_addr = req->dst.uv_addr + (req->dst.vir_w * req->dst.vir_h)/4;\r
+ }\r
+\r
+ if(req->src1.yrgb_addr) {\r
+ hdl = ion_import_dma_buf(rga2_drvdata->ion_client, req->src1.yrgb_addr);\r
+ if (IS_ERR(hdl)) {\r
+ ret = PTR_ERR(hdl);\r
+ printk("RGA2 ERROR ion buf handle\n");\r
+ return ret;\r
+ }\r
+ if (req->mmu_info.dst_mmu_flag) {\r
+ req->sg_src1 = ion_sg_table(rga2_drvdata->ion_client, hdl);\r
+ req->src1.yrgb_addr = 0;\r
+ req->src1.uv_addr = req->dst.yrgb_addr + (req->dst.vir_w * req->dst.vir_h);\r
+ req->src1.v_addr = req->dst.uv_addr + (req->dst.vir_w * req->dst.vir_h)/4;\r
+ }\r
+ else {\r
+ ion_phys(rga2_drvdata->ion_client, hdl, &phy_addr, &len);\r
+ req->src1.yrgb_addr = phy_addr;\r
+ req->src1.uv_addr = req->dst.yrgb_addr + (req->dst.vir_w * req->dst.vir_h);\r
+ req->src1.v_addr = req->dst.uv_addr + (req->dst.vir_w * req->dst.vir_h)/4;\r
+ }\r
+ ion_free(rga2_drvdata->ion_client, hdl);\r
+ }\r
+ else {\r
+ req->src1.yrgb_addr = req->dst.uv_addr;\r
+ req->src1.uv_addr = req->dst.yrgb_addr + (req->dst.vir_w * req->dst.vir_h);\r
+ req->src1.v_addr = req->dst.uv_addr + (req->dst.vir_w * req->dst.vir_h)/4;\r
+ }\r
+\r
+ return 0;\r
}\r
\r
\r
int num = 0;\r
struct rga2_reg *reg;\r
\r
- do\r
- {\r
+ if(rga2_convert_dma_buf(req)) {\r
+ printk("RGA2 : DMA buf copy error\n");\r
+ return -EFAULT;\r
+ }\r
+\r
+ do {\r
/* check value if legal */\r
ret = rga2_check_param(req);\r
if(ret == -EINVAL) {\r
int ret = -1;\r
\r
#if RGA2_TEST_MSG\r
- //printk("*** rga_blit_async proc ***\n");\r
- if (req->src.format >= 0x10) {\r
+ if (1) {//req->src.format >= 0x10) {\r
print_info(req);\r
rga2_flag = 1;\r
printk("*** rga_blit_async proc ***\n");\r
int ret_timeout = 0;\r
\r
#if RGA2_TEST_MSG\r
- if (req->src.format >= 0x10) {\r
+ if (1) {//req->bitblt_mode == 0x2) {\r
print_info(req);\r
rga2_flag = 1;\r
printk("*** rga2_blit_sync proc ***\n");\r
\r
ret = rga2_blit(session, req);\r
if(ret < 0)\r
- {\r
return ret;\r
- }\r
\r
ret_timeout = wait_event_timeout(session->wait, atomic_read(&session->done), RGA2_TIMEOUT_DELAY);\r
\r
}\r
\r
#if RGA2_TEST_TIME\r
- rga_end = ktime_get();\r
- rga_end = ktime_sub(rga_end, rga_start);\r
- printk("sync one cmd end time %d\n", (int)ktime_to_us(rga_end));\r
+ rga2_end = ktime_get();\r
+ rga2_end = ktime_sub(rga2_end, rga2_start);\r
+ printk("sync one cmd end time %d\n", (int)ktime_to_us(rga2_end));\r
#endif\r
\r
return ret;\r
int ret = 0;\r
rga2_session *session;\r
\r
+ memset(&req, 0x0, sizeof(req));\r
+\r
mutex_lock(&rga2_service.mutex);\r
\r
session = (rga2_session *)file->private_data;\r
switch (cmd)\r
{\r
case RGA_BLIT_SYNC:\r
-\r
if (unlikely(copy_from_user(&req_rga, (struct rga_req*)arg, sizeof(struct rga_req))))\r
{\r
ERR("copy_from_user failed\n");\r
return ret;\r
}\r
\r
+static long compat_rga_ioctl(struct file *file, uint32_t cmd, unsigned long arg)\r
+{\r
+ struct rga2_req req;\r
+ struct rga_req_32 req_rga;\r
+ int ret = 0;\r
+ rga2_session *session;\r
\r
-long rga_ioctl_kernel(struct rga_req *req)\r
+ memset(&req, 0x0, sizeof(req));\r
+\r
+ mutex_lock(&rga2_service.mutex);\r
+\r
+ session = (rga2_session *)file->private_data;\r
+\r
+ #if RGA2_TEST_MSG\r
+ printk("use compat_rga_ioctl\n");\r
+ #endif\r
+\r
+ if (NULL == session) {\r
+ printk("%s [%d] rga thread session is null\n",__FUNCTION__,__LINE__);\r
+ mutex_unlock(&rga2_service.mutex);\r
+ return -EINVAL;\r
+ }\r
+\r
+ memset(&req, 0x0, sizeof(req));\r
+\r
+ switch (cmd) {\r
+ case RGA_BLIT_SYNC:\r
+ if (unlikely(copy_from_user(&req_rga, compat_ptr((compat_uptr_t)arg), sizeof(struct rga_req_32))))\r
+ {\r
+ ERR("copy_from_user failed\n");\r
+ ret = -EFAULT;\r
+ break;\r
+ }\r
+\r
+ RGA_MSG_2_RGA2_MSG_32(&req_rga, &req);\r
+\r
+ ret = rga2_blit_sync(session, &req);\r
+ break;\r
+ case RGA_BLIT_ASYNC:\r
+ if (unlikely(copy_from_user(&req_rga, compat_ptr((compat_uptr_t)arg), sizeof(struct rga_req_32))))\r
+ {\r
+ ERR("copy_from_user failed\n");\r
+ ret = -EFAULT;\r
+ break;\r
+ }\r
+\r
+ RGA_MSG_2_RGA2_MSG_32(&req_rga, &req);\r
+\r
+ if((atomic_read(&rga2_service.total_running) > 8))\r
+ ret = rga2_blit_sync(session, &req);\r
+ else\r
+ ret = rga2_blit_async(session, &req);\r
+\r
+ break;\r
+ case RGA2_BLIT_SYNC:\r
+ if (unlikely(copy_from_user(&req, compat_ptr((compat_uptr_t)arg), sizeof(struct rga2_req))))\r
+ {\r
+ ERR("copy_from_user failed\n");\r
+ ret = -EFAULT;\r
+ break;\r
+ }\r
+ ret = rga2_blit_sync(session, &req);\r
+ break;\r
+ case RGA2_BLIT_ASYNC:\r
+ if (unlikely(copy_from_user(&req, compat_ptr((compat_uptr_t)arg), sizeof(struct rga2_req))))\r
+ {\r
+ ERR("copy_from_user failed\n");\r
+ ret = -EFAULT;\r
+ break;\r
+ }\r
+\r
+ if((atomic_read(&rga2_service.total_running) > 16))\r
+ ret = rga2_blit_sync(session, &req);\r
+ else\r
+ ret = rga2_blit_async(session, &req);\r
+\r
+ break;\r
+ case RGA_FLUSH:\r
+ case RGA2_FLUSH:\r
+ ret = rga2_flush(session, arg);\r
+ break;\r
+ case RGA_GET_RESULT:\r
+ case RGA2_GET_RESULT:\r
+ ret = rga2_get_result(session, arg);\r
+ break;\r
+ case RGA_GET_VERSION:\r
+ case RGA2_GET_VERSION:\r
+ ret = copy_to_user((void *)arg, RGA2_VERSION, sizeof(RGA2_VERSION));\r
+ //ret = 0;\r
+ break;\r
+ default:\r
+ ERR("unknown ioctl cmd!\n");\r
+ ret = -EINVAL;\r
+ break;\r
+ }\r
+\r
+ mutex_unlock(&rga2_service.mutex);\r
+\r
+ return ret;\r
+}\r
+\r
+\r
+\r
+long rga2_ioctl_kernel(struct rga_req *req_rga)\r
{\r
int ret = 0;\r
rga2_session *session;\r
- struct rga2_req req_rga2;\r
+ struct rga2_req req;\r
+\r
+ memset(&req, 0x0, sizeof(req));\r
\r
mutex_lock(&rga2_service.mutex);\r
\r
return -EINVAL;\r
}\r
\r
- RGA_MSG_2_RGA2_MSG(req, &req_rga2);\r
- ret = rga2_blit_sync(session, &req_rga2);\r
+ RGA_MSG_2_RGA2_MSG(req_rga, &req);\r
+ ret = rga2_blit_sync(session, &req);\r
\r
mutex_unlock(&rga2_service.mutex);\r
\r
.open = rga2_open,\r
.release = rga2_release,\r
.unlocked_ioctl = rga_ioctl,\r
+ .compat_ioctl = compat_rga_ioctl,\r
};\r
\r
static struct miscdevice rga2_dev ={\r
.fops = &rga2_fops,\r
};\r
\r
-static int __devinit rga2_drv_probe(struct platform_device *pdev)\r
+static const struct of_device_id rockchip_rga_dt_ids[] = {\r
+ { .compatible = "rockchip,rk3368-rga2", },\r
+ {},\r
+};\r
+\r
+static int rga2_drv_probe(struct platform_device *pdev)\r
{\r
struct rga2_drvdata_t *data;\r
+ struct resource *res;\r
int ret = 0;\r
\r
- INIT_LIST_HEAD(&rga2_service.waiting);\r
- INIT_LIST_HEAD(&rga2_service.running);\r
- INIT_LIST_HEAD(&rga2_service.done);\r
- INIT_LIST_HEAD(&rga2_service.session);\r
mutex_init(&rga2_service.lock);\r
mutex_init(&rga2_service.mutex);\r
atomic_set(&rga2_service.total_running, 0);\r
rga2_service.last_prc_src_format = 1; /* default is yuv first*/\r
rga2_service.enable = false;\r
\r
- data = kzalloc(sizeof(struct rga2_drvdata_t), GFP_KERNEL);\r
+ rga_ioctl_kernel_p = rga2_ioctl_kernel;\r
+\r
+ data = devm_kzalloc(&pdev->dev, sizeof(struct rga2_drvdata_t), GFP_KERNEL);\r
if(NULL == data)\r
{\r
ERR("failed to allocate driver data.\n");\r
INIT_DELAYED_WORK(&data->power_off_work, rga2_power_off_work);\r
wake_lock_init(&data->wake_lock, WAKE_LOCK_SUSPEND, "rga");\r
\r
- data->pd_rga2 = clk_get(NULL, "pd_rga");\r
- data->rga2 = clk_get(NULL, "rga");\r
- data->aclk_rga2 = clk_get(NULL, "aclk_rga");\r
- data->hclk_rga2 = clk_get(NULL, "hclk_rga");\r
-\r
- /* map the memory */\r
- if (!request_mem_region(RK32_RGA2_PHYS, RK32_RGA2_SIZE, "rga_io"))\r
- {\r
- pr_info("failed to reserve rga HW regs\n");\r
- return -EBUSY;\r
- }\r
+ //data->pd_rga2 = clk_get(NULL, "pd_rga");\r
+ data->rga2 = devm_clk_get(&pdev->dev, "clk_rga");\r
+ data->pd_rga2 = devm_clk_get(&pdev->dev, "pd_rga");\r
+ data->aclk_rga2 = devm_clk_get(&pdev->dev, "aclk_rga");\r
+ data->hclk_rga2 = devm_clk_get(&pdev->dev, "hclk_rga");\r
\r
- data->rga_base = (void*)ioremap_nocache(RK32_RGA2_PHYS, RK32_RGA2_SIZE);\r
- if (data->rga_base == NULL)\r
- {\r
+ /* map the registers */\r
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);\r
+ data->rga_base = devm_ioremap_resource(&pdev->dev, res);\r
+ if (!data->rga_base) {\r
ERR("rga ioremap failed\n");\r
ret = -ENOENT;\r
goto err_ioremap;\r
\r
/* get the IRQ */\r
data->irq = platform_get_irq(pdev, 0);\r
- if (data->irq <= 0)\r
- {\r
+ if (data->irq <= 0) {\r
ERR("failed to get rga irq resource (%d).\n", data->irq);\r
ret = data->irq;\r
goto err_irq;\r
}\r
\r
/* request the IRQ */\r
- ret = request_threaded_irq(data->irq, rga2_irq, rga2_irq_thread, 0, "rga", pdev);\r
+ ret = devm_request_threaded_irq(&pdev->dev, data->irq, rga2_irq, rga2_irq_thread, 0, "rga", pdev);\r
if (ret)\r
{\r
ERR("rga request_irq failed (%d).\n", ret);\r
platform_set_drvdata(pdev, data);\r
rga2_drvdata = data;\r
\r
+ #if defined(CONFIG_ION_ROCKCHIP)\r
+ data->ion_client = rockchip_ion_client_create("rga");\r
+ if (IS_ERR(data->ion_client)) {\r
+ dev_err(&pdev->dev, "failed to create ion client for rga");\r
+ return PTR_ERR(data->ion_client);\r
+ } else {\r
+ dev_info(&pdev->dev, "rga ion client create success!\n");\r
+ }\r
+ #endif\r
+\r
ret = misc_register(&rga2_dev);\r
if(ret)\r
{\r
iounmap(data->rga_base);\r
err_ioremap:\r
wake_lock_destroy(&data->wake_lock);\r
- kfree(data);\r
+ //kfree(data);\r
\r
return ret;\r
}\r
free_irq(data->irq, &data->miscdev);\r
iounmap((void __iomem *)(data->rga_base));\r
\r
- clk_put(data->pd_rga2);\r
- clk_put(data->rga2);\r
- clk_put(data->aclk_rga2);\r
- clk_put(data->hclk_rga2);\r
-\r
+ //clk_put(data->pd_rga2);\r
+ devm_clk_put(&pdev->dev, data->rga2);\r
+ devm_clk_put(&pdev->dev, data->pd_rga2);\r
+ devm_clk_put(&pdev->dev, data->aclk_rga2);\r
+ devm_clk_put(&pdev->dev, data->hclk_rga2);\r
\r
kfree(data);\r
return 0;\r
\r
static struct platform_driver rga2_driver = {\r
.probe = rga2_drv_probe,\r
- .remove = __devexit_p(rga2_drv_remove),\r
+ .remove = rga2_drv_remove,\r
.driver = {\r
.owner = THIS_MODULE,\r
- .name = "rga",\r
+ .name = "rga2",\r
+ .of_match_table = of_match_ptr(rockchip_rga_dt_ids),\r
},\r
};\r
\r
/* malloc pre scale mid buf mmu table */\r
buf_p = kmalloc(1024*256, GFP_KERNEL);\r
rga2_mmu_buf.buf_virtual = buf_p;\r
- rga2_mmu_buf.buf = (uint32_t *)virt_to_phys((void *)((uint32_t)buf_p));\r
+ rga2_mmu_buf.buf = (uint32_t *)virt_to_phys((void *)((unsigned long)buf_p));\r
rga2_mmu_buf.front = 0;\r
rga2_mmu_buf.back = 64*1024;\r
rga2_mmu_buf.size = 64*1024;\r
\r
+ rga2_mmu_buf.pages = kmalloc(32768 * sizeof(struct page *), GFP_KERNEL);\r
+\r
if ((ret = platform_driver_register(&rga2_driver)) != 0)\r
{\r
printk(KERN_ERR "Platform device register failed (%d).\n", ret);\r
INIT_LIST_HEAD(&rga2_session_global.waiting);\r
INIT_LIST_HEAD(&rga2_session_global.running);\r
INIT_LIST_HEAD(&rga2_session_global.list_session);\r
+\r
+ INIT_LIST_HEAD(&rga2_service.waiting);\r
+ INIT_LIST_HEAD(&rga2_service.running);\r
+ INIT_LIST_HEAD(&rga2_service.done);\r
+ INIT_LIST_HEAD(&rga2_service.session);\r
init_waitqueue_head(&rga2_session_global.wait);\r
//mutex_lock(&rga_service.lock);\r
list_add_tail(&rga2_session_global.list_session, &rga2_service.session);\r
\r
#if RGA2_TEST_CASE\r
\r
-extern struct fb_info * rk_get_fb(int fb_id);\r
-EXPORT_SYMBOL(rk_get_fb);\r
-\r
-extern void rk_direct_fb_show(struct fb_info * fbi);\r
-EXPORT_SYMBOL(rk_direct_fb_show);\r
-\r
-//unsigned int src_buf[1920*1080];\r
-//unsigned int dst_buf[1920*1080];\r
-//unsigned int tmp_buf[1920*1080 * 2];\r
-\r
void rga2_test_0(void)\r
{\r
struct rga2_req req;\r
atomic_set(&session.num_done, 0);\r
//file->private_data = (void *)session;\r
\r
- fb = rk_get_fb(0);\r
+ //fb = rk_get_fb(0);\r
\r
memset(&req, 0, sizeof(struct rga2_req));\r
src = kmalloc(800*480*4, GFP_KERNEL);\r
dst = kmalloc(800*480*4, GFP_KERNEL);\r
\r
+ printk("\n********************************\n");\r
+ printk("************ RGA2_TEST ************\n");\r
+ printk("********************************\n\n");\r
+\r
memset(src, 0x80, 800*480*4);\r
+ memset(dst, 0x0, 800*480*4);\r
\r
- dmac_flush_range(&src, &src[800*480*4]);\r
- outer_flush_range(virt_to_phys(&src),virt_to_phys(&src[800*480*4]));\r
+ //dmac_flush_range(&src, &src[800*480*4]);\r
+ //outer_flush_range(virt_to_phys(&src),virt_to_phys(&src[800*480*4]));\r
\r
\r
#if 0\r
\r
i = j = 0;\r
\r
- printk("\n********************************\n");\r
- printk("************ RGA2_TEST ************\n");\r
- printk("********************************\n\n");\r
+\r
+\r
+ #if 0\r
+ req.pat.act_w = 16;\r
+ req.pat.act_h = 16;\r
+ req.pat.vir_w = 16;\r
+ req.pat.vir_h = 16;\r
+ req.pat.yrgb_addr = virt_to_phys(src);\r
+ req.render_mode = 0;\r
+ rga2_blit_sync(&session, &req);\r
+ #endif\r
\r
req.src.act_w = 320;\r
req.src.act_h = 240;\r
\r
req.src.vir_w = 320;\r
req.src.vir_h = 240;\r
- req.src.yrgb_addr = (uint32_t)virt_to_phys(src);\r
- req.src.uv_addr = (uint32_t)(req.src.yrgb_addr + 800*480);\r
- req.src.v_addr = (uint32_t)virt_to_phys(src);\r
+ req.src.yrgb_addr = 0;//(uint32_t)virt_to_phys(src);\r
+ req.src.uv_addr = (unsigned long)virt_to_phys(src);\r
+ req.src.v_addr = 0;\r
req.src.format = RGA2_FORMAT_RGBA_8888;\r
\r
req.dst.act_w = 320;\r
req.dst.vir_w = 320;\r
req.dst.vir_h = 240;\r
\r
- req.dst.yrgb_addr = ((uint32_t)virt_to_phys(dst));\r
+ req.dst.yrgb_addr = 0;//((uint32_t)virt_to_phys(dst));\r
+ req.dst.uv_addr = (unsigned long)virt_to_phys(dst);\r
req.dst.format = RGA2_FORMAT_RGBA_8888;\r
\r
//dst = dst0;\r
//req.render_mode = color_fill_mode;\r
//req.fg_color = 0x80ffffff;\r
\r
- req.rotate_mode = 1;\r
+ req.rotate_mode = 0;\r
req.scale_bicu_mode = 2;\r
\r
//req.alpha_rop_flag = 0;\r
//req.alpha_rop_mode = 0x19;\r
//req.PD_mode = 3;\r
\r
- req.rotate_mode = 0;\r
-\r
//req.mmu_info.mmu_flag = 0x21;\r
//req.mmu_info.mmu_en = 1;\r
\r
\r
rga2_blit_sync(&session, &req);\r
\r
- #if 0\r
- fb->var.bits_per_pixel = 32;\r
-\r
- fb->var.xres = 1280;\r
- fb->var.yres = 800;\r
-\r
- fb->var.red.length = 8;\r
- fb->var.red.offset = 0;\r
- fb->var.red.msb_right = 0;\r
-\r
- fb->var.green.length = 8;\r
- fb->var.green.offset = 8;\r
- fb->var.green.msb_right = 0;\r
-\r
- fb->var.blue.length = 8;\r
-\r
- fb->var.blue.offset = 16;\r
- fb->var.blue.msb_right = 0;\r
-\r
- fb->var.transp.length = 8;\r
- fb->var.transp.offset = 24;\r
- fb->var.transp.msb_right = 0;\r
-\r
- fb->var.nonstd &= (~0xff);\r
- fb->var.nonstd |= 1;\r
-\r
- fb->fix.smem_start = virt_to_phys(dst);\r
-\r
- rk_direct_fb_show(fb);\r
- #endif\r
+ for(j=0; j<100; j++) {\r
+ printk("%.8x\n", dst[j]);\r
+ }\r
\r
if(src)\r
kfree(src);\r
}\r
\r
#endif\r
-module_init(rga2_init);\r
+fs_initcall(rga2_init);\r
module_exit(rga2_exit);\r
\r
/* Module information */\r