video/rockchip: rga2: use axi safe reset
[firefly-linux-kernel-4.4.55.git] / drivers / video / rockchip / rga2 / rga2_drv.c
index a3de7916655f9b26ab91d86b0cfe1968bf5c1b88..936411d895cda3f874e71dc814b114a5f5459c1f 100644 (file)
-/*\r
- * Copyright (C) 2012 ROCKCHIP, Inc.\r
- *\r
- * This software is licensed under the terms of the GNU General Public\r
- * License version 2, as published by the Free Software Foundation, and\r
- * may be copied, distributed, and modified under those terms.\r
- *\r
- * This program is distributed in the hope that it will be useful,\r
- * but WITHOUT ANY WARRANTY; without even the implied warranty of\r
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\r
- * GNU General Public License for more details.\r
- *\r
- */\r
-\r
-#define pr_fmt(fmt) "rga: " fmt\r
-#include <linux/kernel.h>\r
-#include <linux/init.h>\r
-#include <linux/module.h>\r
-#include <linux/platform_device.h>\r
-#include <linux/sched.h>\r
-#include <linux/mutex.h>\r
-#include <linux/err.h>\r
-#include <linux/clk.h>\r
-#include <asm/delay.h>\r
-#include <linux/dma-mapping.h>\r
-#include <linux/delay.h>\r
-#include <asm/io.h>\r
-#include <linux/irq.h>\r
-#include <linux/interrupt.h>\r
-#include <linux/fs.h>\r
-#include <asm/uaccess.h>\r
-#include <linux/miscdevice.h>\r
-#include <linux/poll.h>\r
-#include <linux/delay.h>\r
-#include <linux/wait.h>\r
-#include <linux/syscalls.h>\r
-#include <linux/timer.h>\r
-#include <linux/time.h>\r
-#include <asm/cacheflush.h>\r
-#include <linux/slab.h>\r
-#include <linux/fb.h>\r
-#include <linux/wakelock.h>\r
-#include <linux/scatterlist.h>\r
-\r
-\r
-#if defined(CONFIG_ION_ROCKCHIP)\r
-#include <linux/rockchip_ion.h>\r
-#endif\r
-\r
-#include "rga2.h"\r
-#include "rga2_reg_info.h"\r
-#include "rga2_mmu_info.h"\r
-#include "RGA2_API.h"\r
-\r
-#if defined(CONFIG_ROCKCHIP_IOMMU) & defined(CONFIG_ION_ROCKCHIP)\r
-#define CONFIG_RGA_IOMMU\r
-#endif\r
-\r
-#define RGA2_TEST_FLUSH_TIME 0\r
-#define RGA2_INFO_BUS_ERROR 1\r
-\r
-#define RGA2_POWER_OFF_DELAY   4*HZ /* 4s */\r
-#define RGA2_TIMEOUT_DELAY     2*HZ /* 2s */\r
-\r
-#define RGA2_MAJOR             255\r
-\r
-#define RGA2_RESET_TIMEOUT     1000\r
-\r
-/* Driver information */\r
-#define DRIVER_DESC            "RGA2 Device Driver"\r
-#define DRIVER_NAME            "rga2"\r
-\r
-#define RGA2_VERSION   "2.000"\r
-\r
-ktime_t rga2_start;\r
-ktime_t rga2_end;\r
-\r
-int rga2_flag = 0;\r
-int first_RGA2_proc = 0;\r
-\r
-extern long (*rga_ioctl_kernel_p)(struct rga_req *);\r
-\r
-rga2_session rga2_session_global;\r
-\r
-struct rga2_drvdata_t {\r
-       struct miscdevice miscdev;\r
-       struct device dev;\r
-       void *rga_base;\r
-       int irq;\r
-\r
-       struct delayed_work power_off_work;\r
-       void (*rga_irq_callback)(int rga_retval);   //callback function used by aync call\r
-       struct wake_lock wake_lock;\r
-\r
-       struct clk *aclk_rga2;\r
-       struct clk *hclk_rga2;\r
-       struct clk *pd_rga2;\r
-    struct clk *rga2;\r
-\r
-    #if defined(CONFIG_ION_ROCKCHIP)\r
-    struct ion_client * ion_client;\r
-    #endif\r
-};\r
-\r
-struct rga2_drvdata_t *rga2_drvdata;\r
-\r
-struct rga2_service_info rga2_service;\r
-struct rga2_mmu_buf_t rga2_mmu_buf;\r
-\r
-#if defined(CONFIG_ION_ROCKCHIP)\r
-extern struct ion_client *rockchip_ion_client_create(const char * name);\r
-#endif\r
-\r
-static int rga2_blit_async(rga2_session *session, struct rga2_req *req);\r
-static void rga2_del_running_list(void);\r
-static void rga2_del_running_list_timeout(void);\r
-static void rga2_try_set_reg(void);\r
-\r
-\r
-/* Logging */\r
-#define RGA_DEBUG 0\r
-#if RGA_DEBUG\r
-#define DBG(format, args...) printk(KERN_DEBUG "%s: " format, DRIVER_NAME, ## args)\r
-#define ERR(format, args...) printk(KERN_ERR "%s: " format, DRIVER_NAME, ## args)\r
-#define WARNING(format, args...) printk(KERN_WARN "%s: " format, DRIVER_NAME, ## args)\r
-#define INFO(format, args...) printk(KERN_INFO "%s: " format, DRIVER_NAME, ## args)\r
-#else\r
-#define DBG(format, args...)\r
-#define ERR(format, args...)\r
-#define WARNING(format, args...)\r
-#define INFO(format, args...)\r
-#endif\r
-\r
-#if RGA2_TEST_MSG\r
-static void print_info(struct rga2_req *req)\r
-{\r
-    printk("render_mode=%d bitblt_mode=%d rotate_mode=%.8x\n",\r
-            req->render_mode, req->bitblt_mode, req->rotate_mode);\r
-    printk("src : y=%.llx uv=%.llx v=%.llx format=%d aw=%d ah=%d vw=%d vh=%d xoff=%d yoff=%d \n",\r
-            req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr, req->src.format,\r
-            req->src.act_w, req->src.act_h, req->src.vir_w, req->src.vir_h,\r
-            req->src.x_offset, req->src.y_offset);\r
-    printk("dst : y=%llx uv=%llx v=%llx format=%d aw=%d ah=%d vw=%d vh=%d xoff=%d yoff=%d \n",\r
-            req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr, req->dst.format,\r
-            req->dst.act_w, req->dst.act_h, req->dst.vir_w, req->dst.vir_h,\r
-            req->dst.x_offset, req->dst.y_offset);\r
-    printk("mmu : src=%.2x src1=%.2x dst=%.2x els=%.2x\n",\r
-            req->mmu_info.src0_mmu_flag, req->mmu_info.src1_mmu_flag,\r
-            req->mmu_info.dst_mmu_flag,  req->mmu_info.els_mmu_flag);\r
-    printk("alpha : flag %.8x mode0=%.8x mode1=%.8x\n",\r
-            req->alpha_rop_flag, req->alpha_mode_0, req->alpha_mode_1);\r
-}\r
-#endif\r
-\r
-\r
-static inline void rga2_write(u32 b, u32 r)\r
-{\r
-       //__raw_writel(b, rga2_drvdata->rga_base + r);\r
-\r
-    *((volatile unsigned int *)(rga2_drvdata->rga_base + r)) = b;\r
-}\r
-\r
-static inline u32 rga2_read(u32 r)\r
-{\r
-       //return __raw_readl(rga2_drvdata->rga_base + r);\r
-\r
-    return *((volatile unsigned int *)(rga2_drvdata->rga_base + r));\r
-}\r
-\r
-static void rga2_soft_reset(void)\r
-{\r
-       u32 i;\r
-       u32 reg;\r
-\r
-       rga2_write((1 << 3) | (1 << 4), RGA2_SYS_CTRL); //RGA_SYS_CTRL\r
-\r
-       for(i = 0; i < RGA2_RESET_TIMEOUT; i++)\r
-       {\r
-               reg = rga2_read(RGA2_SYS_CTRL) & 1; //RGA_SYS_CTRL\r
-\r
-               if(reg == 0)\r
-                       break;\r
-\r
-               udelay(1);\r
-       }\r
-\r
-       if(i == RGA2_RESET_TIMEOUT)\r
-               ERR("soft reset timeout.\n");\r
-}\r
-\r
-static void rga2_dump(void)\r
-{\r
-       int running;\r
-    struct rga2_reg *reg, *reg_tmp;\r
-    rga2_session *session, *session_tmp;\r
-\r
-       running = atomic_read(&rga2_service.total_running);\r
-       printk("rga total_running %d\n", running);\r
-\r
-       list_for_each_entry_safe(session, session_tmp, &rga2_service.session, list_session)\r
-    {\r
-               printk("session pid %d:\n", session->pid);\r
-               running = atomic_read(&session->task_running);\r
-               printk("task_running %d\n", running);\r
-               list_for_each_entry_safe(reg, reg_tmp, &session->waiting, session_link)\r
-        {\r
-                       printk("waiting register set 0x%.lu\n", (unsigned long)reg);\r
-               }\r
-               list_for_each_entry_safe(reg, reg_tmp, &session->running, session_link)\r
-        {\r
-                       printk("running register set 0x%.lu\n", (unsigned long)reg);\r
-               }\r
-       }\r
-}\r
-\r
-static inline void rga2_queue_power_off_work(void)\r
-{\r
-       queue_delayed_work(system_nrt_wq, &rga2_drvdata->power_off_work, RGA2_POWER_OFF_DELAY);\r
-}\r
-\r
-/* Caller must hold rga_service.lock */\r
-static void rga2_power_on(void)\r
-{\r
-       static ktime_t last;\r
-       ktime_t now = ktime_get();\r
-\r
-       if (ktime_to_ns(ktime_sub(now, last)) > NSEC_PER_SEC) {\r
-               cancel_delayed_work_sync(&rga2_drvdata->power_off_work);\r
-               rga2_queue_power_off_work();\r
-               last = now;\r
-       }\r
-       if (rga2_service.enable)\r
-               return;\r
-\r
-    clk_prepare_enable(rga2_drvdata->rga2);\r
-       clk_prepare_enable(rga2_drvdata->aclk_rga2);\r
-       clk_prepare_enable(rga2_drvdata->hclk_rga2);\r
-       clk_prepare_enable(rga2_drvdata->pd_rga2);\r
-       wake_lock(&rga2_drvdata->wake_lock);\r
-       rga2_service.enable = true;\r
-}\r
-\r
-/* Caller must hold rga_service.lock */\r
-static void rga2_power_off(void)\r
-{\r
-       int total_running;\r
-\r
-       if (!rga2_service.enable) {\r
-               return;\r
-       }\r
-\r
-       total_running = atomic_read(&rga2_service.total_running);\r
-       if (total_running) {\r
-               pr_err("power off when %d task running!!\n", total_running);\r
-               mdelay(50);\r
-               pr_err("delay 50 ms for running task\n");\r
-               rga2_dump();\r
-       }\r
-\r
-    clk_disable_unprepare(rga2_drvdata->rga2);\r
-    clk_disable_unprepare(rga2_drvdata->pd_rga2);\r
-       clk_disable_unprepare(rga2_drvdata->aclk_rga2);\r
-       clk_disable_unprepare(rga2_drvdata->hclk_rga2);\r
-       wake_unlock(&rga2_drvdata->wake_lock);\r
-    first_RGA2_proc = 0;\r
-       rga2_service.enable = false;\r
-}\r
-\r
-static void rga2_power_off_work(struct work_struct *work)\r
-{\r
-       if (mutex_trylock(&rga2_service.lock)) {\r
-               rga2_power_off();\r
-               mutex_unlock(&rga2_service.lock);\r
-       } else {\r
-               /* Come back later if the device is busy... */\r
-               rga2_queue_power_off_work();\r
-       }\r
-}\r
-\r
-static int rga2_flush(rga2_session *session, unsigned long arg)\r
-{\r
-    int ret = 0;\r
-    int ret_timeout;\r
-\r
-    #if RGA2_TEST_FLUSH_TIME\r
-    ktime_t start;\r
-    ktime_t end;\r
-    start = ktime_get();\r
-    #endif\r
-\r
-    ret_timeout = wait_event_timeout(session->wait, atomic_read(&session->done), RGA2_TIMEOUT_DELAY);\r
-\r
-       if (unlikely(ret_timeout < 0)) {\r
-               //pr_err("flush pid %d wait task ret %d\n", session->pid, ret);\r
-        mutex_lock(&rga2_service.lock);\r
-        rga2_del_running_list();\r
-        mutex_unlock(&rga2_service.lock);\r
-        ret = ret_timeout;\r
-       } else if (0 == ret_timeout) {\r
-               //pr_err("flush pid %d wait %d task done timeout\n", session->pid, atomic_read(&session->task_running));\r
-        //printk("bus  = %.8x\n", rga_read(RGA_INT));\r
-        mutex_lock(&rga2_service.lock);\r
-        rga2_del_running_list_timeout();\r
-        rga2_try_set_reg();\r
-        mutex_unlock(&rga2_service.lock);\r
-               ret = -ETIMEDOUT;\r
-       }\r
-\r
-    #if RGA2_TEST_FLUSH_TIME\r
-    end = ktime_get();\r
-    end = ktime_sub(end, start);\r
-    printk("one flush wait time %d\n", (int)ktime_to_us(end));\r
-    #endif\r
-\r
-       return ret;\r
-}\r
-\r
-\r
-static int rga2_get_result(rga2_session *session, unsigned long arg)\r
-{\r
-       //printk("rga_get_result %d\n",drvdata->rga_result);\r
-\r
-    int ret = 0;\r
-\r
-    int num_done;\r
-\r
-    num_done = atomic_read(&session->num_done);\r
-\r
-       if (unlikely(copy_to_user((void __user *)arg, &num_done, sizeof(int)))) {\r
-                       printk("copy_to_user failed\n");\r
-                       ret =  -EFAULT;\r
-               }\r
-       return ret;\r
-}\r
-\r
-\r
-static int rga2_check_param(const struct rga2_req *req)\r
-{\r
-    /*RGA2 can support up to 8192*8192 resolution in RGB format,but we limit the image size to 8191*8191 here*/\r
-       //check src width and height\r
-\r
-    if(!((req->render_mode == color_fill_mode)))\r
-    {\r
-       if (unlikely((req->src.act_w <= 0) || (req->src.act_w > 8191) || (req->src.act_h <= 0) || (req->src.act_h > 8191)))\r
-        {\r
-               printk("invalid source resolution act_w = %d, act_h = %d\n", req->src.act_w, req->src.act_h);\r
-               return  -EINVAL;\r
-       }\r
-    }\r
-\r
-    if(!((req->render_mode == color_fill_mode)))\r
-    {\r
-       if (unlikely((req->src.vir_w <= 0) || (req->src.vir_w > 8191) || (req->src.vir_h <= 0) || (req->src.vir_h > 8191)))\r
-        {\r
-               printk("invalid source resolution vir_w = %d, vir_h = %d\n", req->src.vir_w, req->src.vir_h);\r
-               return  -EINVAL;\r
-       }\r
-    }\r
-\r
-       //check dst width and height\r
-       if (unlikely((req->dst.act_w <= 0) || (req->dst.act_w > 4096) || (req->dst.act_h <= 0) || (req->dst.act_h > 4096)))\r
-    {\r
-               printk("invalid destination resolution act_w = %d, act_h = %d\n", req->dst.act_w, req->dst.act_h);\r
-               return  -EINVAL;\r
-       }\r
-\r
-    if (unlikely((req->dst.vir_w <= 0) || (req->dst.vir_w > 4096) || (req->dst.vir_h <= 0) || (req->dst.vir_h > 4096)))\r
-    {\r
-               printk("invalid destination resolution vir_w = %d, vir_h = %d\n", req->dst.vir_w, req->dst.vir_h);\r
-               return  -EINVAL;\r
-       }\r
-\r
-       //check src_vir_w\r
-       if(unlikely(req->src.vir_w < req->src.act_w)){\r
-               printk("invalid src_vir_w act_w = %d, vir_w = %d\n", req->src.act_w, req->src.vir_w);\r
-               return  -EINVAL;\r
-       }\r
-\r
-       //check dst_vir_w\r
-       if(unlikely(req->dst.vir_w < req->dst.act_w)){\r
-        if(req->rotate_mode != 1)\r
-        {\r
-                   printk("invalid dst_vir_w act_h = %d, vir_h = %d\n", req->dst.act_w, req->dst.vir_w);\r
-                   return      -EINVAL;\r
-        }\r
-       }\r
-\r
-       return 0;\r
-}\r
-\r
-static void rga2_copy_reg(struct rga2_reg *reg, uint32_t offset)\r
-{\r
-    uint32_t i;\r
-    uint32_t *cmd_buf;\r
-    uint32_t *reg_p;\r
-\r
-    if(atomic_read(&reg->session->task_running) != 0)\r
-        printk(KERN_ERR "task_running is no zero\n");\r
-\r
-    atomic_add(1, &rga2_service.cmd_num);\r
-       atomic_add(1, &reg->session->task_running);\r
-\r
-    cmd_buf = (uint32_t *)rga2_service.cmd_buff + offset*32;\r
-    reg_p = (uint32_t *)reg->cmd_reg;\r
-\r
-    for(i=0; i<32; i++)\r
-        cmd_buf[i] = reg_p[i];\r
-}\r
-\r
-\r
-static struct rga2_reg * rga2_reg_init(rga2_session *session, struct rga2_req *req)\r
-{\r
-    int32_t ret;\r
-       struct rga2_reg *reg = kzalloc(sizeof(struct rga2_reg), GFP_KERNEL);\r
-       if (NULL == reg) {\r
-               pr_err("kmalloc fail in rga_reg_init\n");\r
-               return NULL;\r
-       }\r
-\r
-    reg->session = session;\r
-       INIT_LIST_HEAD(&reg->session_link);\r
-       INIT_LIST_HEAD(&reg->status_link);\r
-\r
-    reg->MMU_base = NULL;\r
-\r
-    if ((req->mmu_info.src0_mmu_flag & 1) || (req->mmu_info.src1_mmu_flag & 1)\r
-        || (req->mmu_info.dst_mmu_flag & 1) || (req->mmu_info.els_mmu_flag & 1))\r
-    {\r
-        ret = rga2_set_mmu_info(reg, req);\r
-        if(ret < 0) {\r
-            printk("%s, [%d] set mmu info error \n", __FUNCTION__, __LINE__);\r
-            if(reg != NULL)\r
-                kfree(reg);\r
-\r
-            return NULL;\r
-        }\r
-    }\r
-\r
-    if(RGA2_gen_reg_info((uint8_t *)reg->cmd_reg, req) == -1) {\r
-        printk("gen reg info error\n");\r
-        if(reg != NULL)\r
-            kfree(reg);\r
-\r
-        return NULL;\r
-    }\r
-\r
-    mutex_lock(&rga2_service.lock);\r
-       list_add_tail(&reg->status_link, &rga2_service.waiting);\r
-       list_add_tail(&reg->session_link, &session->waiting);\r
-       mutex_unlock(&rga2_service.lock);\r
-\r
-    return reg;\r
-}\r
-\r
-\r
-/* Caller must hold rga_service.lock */\r
-static void rga2_reg_deinit(struct rga2_reg *reg)\r
-{\r
-       list_del_init(&reg->session_link);\r
-       list_del_init(&reg->status_link);\r
-       kfree(reg);\r
-}\r
-\r
-/* Caller must hold rga_service.lock */\r
-static void rga2_reg_from_wait_to_run(struct rga2_reg *reg)\r
-{\r
-       list_del_init(&reg->status_link);\r
-       list_add_tail(&reg->status_link, &rga2_service.running);\r
-\r
-       list_del_init(&reg->session_link);\r
-       list_add_tail(&reg->session_link, &reg->session->running);\r
-}\r
-\r
-/* Caller must hold rga_service.lock */\r
-static void rga2_service_session_clear(rga2_session *session)\r
-{\r
-       struct rga2_reg *reg, *n;\r
-\r
-    list_for_each_entry_safe(reg, n, &session->waiting, session_link)\r
-    {\r
-               rga2_reg_deinit(reg);\r
-       }\r
-\r
-    list_for_each_entry_safe(reg, n, &session->running, session_link)\r
-    {\r
-               rga2_reg_deinit(reg);\r
-       }\r
-}\r
-\r
-/* Caller must hold rga_service.lock */\r
-static void rga2_try_set_reg(void)\r
-{\r
-    struct rga2_reg *reg ;\r
-\r
-    if (list_empty(&rga2_service.running))\r
-    {\r
-        if (!list_empty(&rga2_service.waiting))\r
-        {\r
-            /* RGA is idle */\r
-            reg = list_entry(rga2_service.waiting.next, struct rga2_reg, status_link);\r
-\r
-            rga2_power_on();\r
-            udelay(1);\r
-\r
-            rga2_copy_reg(reg, 0);\r
-            rga2_reg_from_wait_to_run(reg);\r
-\r
-            #ifdef CONFIG_ARM\r
-            dmac_flush_range(&rga2_service.cmd_buff[0], &rga2_service.cmd_buff[32]);\r
-            outer_flush_range(virt_to_phys(&rga2_service.cmd_buff[0]),virt_to_phys(&rga2_service.cmd_buff[32]));\r
-            #elif defined(CONFIG_ARM64)\r
-            __dma_flush_range(&rga2_service.cmd_buff[0], &rga2_service.cmd_buff[32]);\r
-            #endif\r
-\r
-            //rga2_soft_reset();\r
-\r
-            rga2_write(0x0, RGA2_SYS_CTRL);\r
-\r
-            /* CMD buff */\r
-            rga2_write(virt_to_phys(rga2_service.cmd_buff), RGA2_CMD_BASE);\r
-\r
-#if RGA2_TEST\r
-            if(rga2_flag) {\r
-                int32_t i, *p;\r
-                p = rga2_service.cmd_buff;\r
-                printk("CMD_REG\n");\r
-                for (i=0; i<8; i++)\r
-                    printk("%.8x %.8x %.8x %.8x\n", p[0 + i*4], p[1+i*4], p[2 + i*4], p[3 + i*4]);\r
-            }\r
-#endif\r
-\r
-            /* master mode */\r
-            rga2_write((0x1<<1)|(0x1<<2)|(0x1<<5)|(0x1<<6), RGA2_SYS_CTRL);\r
-\r
-            /* All CMD finish int */\r
-            rga2_write(rga2_read(RGA2_INT)|(0x1<<10)|(0x1<<9)|(0x1<<8), RGA2_INT);\r
-\r
-            #if RGA2_TEST_TIME\r
-            rga2_start = ktime_get();\r
-            #endif\r
-\r
-            /* Start proc */\r
-            atomic_set(&reg->session->done, 0);\r
-            rga2_write(0x1, RGA2_CMD_CTRL);\r
-#if RGA2_TEST\r
-            if(rga2_flag)\r
-            {\r
-                uint32_t i;\r
-                printk("CMD_READ_BACK_REG\n");\r
-                for (i=0; i<8; i++)\r
-                    printk("%.8x %.8x %.8x %.8x\n", rga2_read(0x100 + i*16 + 0),\r
-                            rga2_read(0x100 + i*16 + 4), rga2_read(0x100 + i*16 + 8), rga2_read(0x100 + i*16 + 12));\r
-            }\r
-#endif\r
-        }\r
-    }\r
-}\r
-\r
-\r
-\r
-\r
-/* Caller must hold rga_service.lock */\r
-static void rga2_del_running_list(void)\r
-{\r
-    struct rga2_reg *reg;\r
-\r
-    while(!list_empty(&rga2_service.running))\r
-    {\r
-        reg = list_entry(rga2_service.running.next, struct rga2_reg, status_link);\r
-\r
-        if(reg->MMU_len != 0)\r
-        {\r
-            if (rga2_mmu_buf.back + reg->MMU_len > 2*rga2_mmu_buf.size)\r
-                rga2_mmu_buf.back = reg->MMU_len + rga2_mmu_buf.size;\r
-            else\r
-                rga2_mmu_buf.back += reg->MMU_len;\r
-        }\r
-        atomic_sub(1, &reg->session->task_running);\r
-        atomic_sub(1, &rga2_service.total_running);\r
-\r
-        if(list_empty(&reg->session->waiting))\r
-        {\r
-            atomic_set(&reg->session->done, 1);\r
-            wake_up(&reg->session->wait);\r
-        }\r
-\r
-        rga2_reg_deinit(reg);\r
-    }\r
-}\r
-\r
-/* Caller must hold rga_service.lock */\r
-static void rga2_del_running_list_timeout(void)\r
-{\r
-    struct rga2_reg *reg;\r
-\r
-    while(!list_empty(&rga2_service.running))\r
-    {\r
-        reg = list_entry(rga2_service.running.next, struct rga2_reg, status_link);\r
-\r
-        if(reg->MMU_base != NULL)\r
-        {\r
-            kfree(reg->MMU_base);\r
-        }\r
-\r
-        atomic_sub(1, &reg->session->task_running);\r
-        atomic_sub(1, &rga2_service.total_running);\r
-\r
-        rga2_soft_reset();\r
-\r
-        if(list_empty(&reg->session->waiting))\r
-        {\r
-            atomic_set(&reg->session->done, 1);\r
-            wake_up(&reg->session->wait);\r
-        }\r
-\r
-        rga2_reg_deinit(reg);\r
-    }\r
-}\r
-\r
-\r
-static int rga2_convert_dma_buf(struct rga2_req *req)\r
-{\r
-       struct ion_handle *hdl;\r
-       ion_phys_addr_t phy_addr;\r
-       size_t len;\r
-    int ret;\r
-\r
-    req->sg_src0 = NULL;\r
-    req->sg_src1 = NULL;\r
-    req->sg_dst  = NULL;\r
-    req->sg_els  = NULL;\r
-\r
-    if((int)req->src.yrgb_addr > 0) {\r
-        hdl = ion_import_dma_buf(rga2_drvdata->ion_client, req->src.yrgb_addr);\r
-        if (IS_ERR(hdl)) {\r
-            ret = PTR_ERR(hdl);\r
-            printk("RGA2 SRC ERROR ion buf handle\n");\r
-            return ret;\r
-        }\r
-        if (req->mmu_info.src0_mmu_flag) {\r
-            req->sg_src0 = ion_sg_table(rga2_drvdata->ion_client, hdl);\r
-            req->src.yrgb_addr = req->src.uv_addr;\r
-            req->src.uv_addr = req->src.yrgb_addr + (req->src.vir_w * req->src.vir_h);\r
-            req->src.v_addr = req->src.uv_addr + (req->src.vir_w * req->src.vir_h)/4;\r
-        }\r
-        else {\r
-            ion_phys(rga2_drvdata->ion_client, hdl, &phy_addr, &len);\r
-            req->src.yrgb_addr = phy_addr;\r
-            req->src.uv_addr = req->src.yrgb_addr + (req->src.vir_w * req->src.vir_h);\r
-            req->src.v_addr = req->src.uv_addr + (req->src.vir_w * req->src.vir_h)/4;\r
-        }\r
-        ion_free(rga2_drvdata->ion_client, hdl);\r
-    }\r
-    else {\r
-        req->src.yrgb_addr = req->src.uv_addr;\r
-        req->src.uv_addr = req->src.yrgb_addr + (req->src.vir_w * req->src.vir_h);\r
-        req->src.v_addr = req->src.uv_addr + (req->src.vir_w * req->src.vir_h)/4;\r
-    }\r
-\r
-    if((int)req->dst.yrgb_addr > 0) {\r
-        hdl = ion_import_dma_buf(rga2_drvdata->ion_client, req->dst.yrgb_addr);\r
-        if (IS_ERR(hdl)) {\r
-            ret = PTR_ERR(hdl);\r
-            printk("RGA2 DST ERROR ion buf handle\n");\r
-            return ret;\r
-        }\r
-        if (req->mmu_info.dst_mmu_flag) {\r
-            req->sg_dst = ion_sg_table(rga2_drvdata->ion_client, hdl);\r
-            req->dst.yrgb_addr = req->dst.uv_addr;\r
-            req->dst.uv_addr = req->dst.yrgb_addr + (req->dst.vir_w * req->dst.vir_h);\r
-            req->dst.v_addr = req->dst.uv_addr + (req->dst.vir_w * req->dst.vir_h)/4;\r
-        }\r
-        else {\r
-            ion_phys(rga2_drvdata->ion_client, hdl, &phy_addr, &len);\r
-            req->dst.yrgb_addr = phy_addr;\r
-            req->dst.uv_addr = req->dst.yrgb_addr + (req->dst.vir_w * req->dst.vir_h);\r
-            req->dst.v_addr = req->dst.uv_addr + (req->dst.vir_w * req->dst.vir_h)/4;\r
-        }\r
-        ion_free(rga2_drvdata->ion_client, hdl);\r
-    }\r
-    else {\r
-        req->dst.yrgb_addr = req->dst.uv_addr;\r
-        req->dst.uv_addr = req->dst.yrgb_addr + (req->dst.vir_w * req->dst.vir_h);\r
-        req->dst.v_addr = req->dst.uv_addr + (req->dst.vir_w * req->dst.vir_h)/4;\r
-    }\r
-\r
-    if((int)req->src1.yrgb_addr > 0) {\r
-        hdl = ion_import_dma_buf(rga2_drvdata->ion_client, req->src1.yrgb_addr);\r
-        if (IS_ERR(hdl)) {\r
-            ret = PTR_ERR(hdl);\r
-            printk("RGA2 ERROR ion buf handle\n");\r
-            return ret;\r
-        }\r
-        if (req->mmu_info.dst_mmu_flag) {\r
-            req->sg_src1 = ion_sg_table(rga2_drvdata->ion_client, hdl);\r
-            req->src1.yrgb_addr = 0;\r
-            req->src1.uv_addr = req->dst.yrgb_addr + (req->dst.vir_w * req->dst.vir_h);\r
-            req->src1.v_addr = req->dst.uv_addr + (req->dst.vir_w * req->dst.vir_h)/4;\r
-        }\r
-        else {\r
-            ion_phys(rga2_drvdata->ion_client, hdl, &phy_addr, &len);\r
-            req->src1.yrgb_addr = phy_addr;\r
-            req->src1.uv_addr = req->dst.yrgb_addr + (req->dst.vir_w * req->dst.vir_h);\r
-            req->src1.v_addr = req->dst.uv_addr + (req->dst.vir_w * req->dst.vir_h)/4;\r
-        }\r
-        ion_free(rga2_drvdata->ion_client, hdl);\r
-    }\r
-    else {\r
-        req->src1.yrgb_addr = req->dst.uv_addr;\r
-        req->src1.uv_addr = req->dst.yrgb_addr + (req->dst.vir_w * req->dst.vir_h);\r
-        req->src1.v_addr = req->dst.uv_addr + (req->dst.vir_w * req->dst.vir_h)/4;\r
-    }\r
-\r
-    return 0;\r
-}\r
-\r
-\r
-static int rga2_blit(rga2_session *session, struct rga2_req *req)\r
-{\r
-    int ret = -1;\r
-    int num = 0;\r
-    struct rga2_reg *reg;\r
-\r
-    if(rga2_convert_dma_buf(req)) {\r
-        printk("RGA2 : DMA buf copy error\n");\r
-        return -EFAULT;\r
-    }\r
-\r
-    do {\r
-        /* check value if legal */\r
-        ret = rga2_check_param(req);\r
-       if(ret == -EINVAL) {\r
-            printk("req argument is inval\n");\r
-            break;\r
-       }\r
-\r
-        reg = rga2_reg_init(session, req);\r
-        if(reg == NULL) {\r
-            break;\r
-        }\r
-        num = 1;\r
-\r
-        mutex_lock(&rga2_service.lock);\r
-        atomic_add(num, &rga2_service.total_running);\r
-        rga2_try_set_reg();\r
-        mutex_unlock(&rga2_service.lock);\r
-\r
-        return 0;\r
-    }\r
-    while(0);\r
-\r
-    return -EFAULT;\r
-}\r
-\r
-static int rga2_blit_async(rga2_session *session, struct rga2_req *req)\r
-{\r
-       int ret = -1;\r
-\r
-    #if RGA2_TEST_MSG\r
-    if (1) {//req->src.format >= 0x10) {\r
-        print_info(req);\r
-        rga2_flag = 1;\r
-        printk("*** rga_blit_async proc ***\n");\r
-    }\r
-    else\r
-        rga2_flag = 0;\r
-    #endif\r
-    atomic_set(&session->done, 0);\r
-    ret = rga2_blit(session, req);\r
-\r
-    return ret;\r
-}\r
-\r
-static int rga2_blit_sync(rga2_session *session, struct rga2_req *req)\r
-{\r
-    int ret = -1;\r
-    int ret_timeout = 0;\r
-\r
-    #if RGA2_TEST_MSG\r
-    if (1) {//req->bitblt_mode == 0x2) {\r
-        print_info(req);\r
-        rga2_flag = 1;\r
-        printk("*** rga2_blit_sync proc ***\n");\r
-    }\r
-    else\r
-        rga2_flag = 0;\r
-    #endif\r
-\r
-    atomic_set(&session->done, 0);\r
-\r
-    ret = rga2_blit(session, req);\r
-    if(ret < 0)\r
-        return ret;\r
-\r
-    ret_timeout = wait_event_timeout(session->wait, atomic_read(&session->done), RGA2_TIMEOUT_DELAY);\r
-\r
-    if (unlikely(ret_timeout< 0))\r
-    {\r
-               //pr_err("sync pid %d wait task ret %d\n", session->pid, ret_timeout);\r
-        mutex_lock(&rga2_service.lock);\r
-        rga2_del_running_list();\r
-        mutex_unlock(&rga2_service.lock);\r
-        ret = ret_timeout;\r
-       }\r
-    else if (0 == ret_timeout)\r
-    {\r
-               //pr_err("sync pid %d wait %d task done timeout\n", session->pid, atomic_read(&session->task_running));\r
-        mutex_lock(&rga2_service.lock);\r
-        rga2_del_running_list_timeout();\r
-        rga2_try_set_reg();\r
-        mutex_unlock(&rga2_service.lock);\r
-               ret = -ETIMEDOUT;\r
-       }\r
-\r
-    #if RGA2_TEST_TIME\r
-    rga2_end = ktime_get();\r
-    rga2_end = ktime_sub(rga2_end, rga2_start);\r
-    printk("sync one cmd end time %d\n", (int)ktime_to_us(rga2_end));\r
-    #endif\r
-\r
-    return ret;\r
-}\r
-\r
-static long rga_ioctl(struct file *file, uint32_t cmd, unsigned long arg)\r
-{\r
-    struct rga2_req req, req_first;\r
-    struct rga_req req_rga;\r
-       int ret = 0;\r
-    rga2_session *session;\r
-\r
-    memset(&req, 0x0, sizeof(req));\r
-\r
-    mutex_lock(&rga2_service.mutex);\r
-\r
-    session = (rga2_session *)file->private_data;\r
-\r
-       if (NULL == session)\r
-    {\r
-        printk("%s [%d] rga thread session is null\n",__FUNCTION__,__LINE__);\r
-        mutex_unlock(&rga2_service.mutex);\r
-               return -EINVAL;\r
-       }\r
-\r
-    memset(&req, 0x0, sizeof(req));\r
-\r
-       switch (cmd)\r
-       {\r
-        case RGA_BLIT_SYNC:\r
-               if (unlikely(copy_from_user(&req_rga, (struct rga_req*)arg, sizeof(struct rga_req))))\r
-            {\r
-                       ERR("copy_from_user failed\n");\r
-                       ret = -EFAULT;\r
-                break;\r
-               }\r
-            RGA_MSG_2_RGA2_MSG(&req_rga, &req);\r
-\r
-            if (first_RGA2_proc == 0 && req.bitblt_mode == bitblt_mode && rga2_service.dev_mode == 1) {\r
-                memcpy(&req_first, &req, sizeof(struct rga2_req));\r
-                if ((req_first.src.act_w != req_first.dst.act_w)\r
-                    || (req_first.src.act_h != req_first.dst.act_h)) {\r
-                    req_first.src.act_w = MIN(320, MIN(req_first.src.act_w, req_first.dst.act_w));\r
-                    req_first.src.act_h = MIN(240, MIN(req_first.src.act_h, req_first.dst.act_h));\r
-                    req_first.dst.act_w = req_first.src.act_w;\r
-                    req_first.dst.act_h = req_first.src.act_h;\r
-                    ret = rga2_blit_async(session, &req_first);\r
-                }\r
-                ret = rga2_blit_sync(session, &req);\r
-                first_RGA2_proc = 1;\r
-            }\r
-            else {\r
-                ret = rga2_blit_sync(session, &req);\r
-            }\r
-            break;\r
-               case RGA_BLIT_ASYNC:\r
-               if (unlikely(copy_from_user(&req_rga, (struct rga_req*)arg, sizeof(struct rga_req))))\r
-            {\r
-                       ERR("copy_from_user failed\n");\r
-                       ret = -EFAULT;\r
-                break;\r
-               }\r
-\r
-            RGA_MSG_2_RGA2_MSG(&req_rga, &req);\r
-\r
-            if (first_RGA2_proc == 0 && req.bitblt_mode == bitblt_mode && rga2_service.dev_mode == 1) {\r
-                memcpy(&req_first, &req, sizeof(struct rga2_req));\r
-                if ((req_first.src.act_w != req_first.dst.act_w)\r
-                    || (req_first.src.act_h != req_first.dst.act_h)) {\r
-                    req_first.src.act_w = MIN(320, MIN(req_first.src.act_w, req_first.dst.act_w));\r
-                    req_first.src.act_h = MIN(240, MIN(req_first.src.act_h, req_first.dst.act_h));\r
-                    req_first.dst.act_w = req_first.src.act_w;\r
-                    req_first.dst.act_h = req_first.src.act_h;\r
-                    ret = rga2_blit_async(session, &req_first);\r
-                }\r
-                ret = rga2_blit_async(session, &req);\r
-                first_RGA2_proc = 1;\r
-            }\r
-            else {\r
-                ret = rga2_blit_async(session, &req);\r
-            }\r
-               break;\r
-               case RGA2_BLIT_SYNC:\r
-               if (unlikely(copy_from_user(&req, (struct rga2_req*)arg, sizeof(struct rga2_req))))\r
-            {\r
-                       ERR("copy_from_user failed\n");\r
-                       ret = -EFAULT;\r
-                break;\r
-               }\r
-            ret = rga2_blit_sync(session, &req);\r
-            break;\r
-               case RGA2_BLIT_ASYNC:\r
-               if (unlikely(copy_from_user(&req, (struct rga2_req*)arg, sizeof(struct rga2_req))))\r
-            {\r
-                       ERR("copy_from_user failed\n");\r
-                       ret = -EFAULT;\r
-                break;\r
-               }\r
-\r
-            if((atomic_read(&rga2_service.total_running) > 16))\r
-            {\r
-                           ret = rga2_blit_sync(session, &req);\r
-            }\r
-            else\r
-            {\r
-                ret = rga2_blit_async(session, &req);\r
-            }\r
-                       break;\r
-        case RGA_FLUSH:\r
-               case RGA2_FLUSH:\r
-                       ret = rga2_flush(session, arg);\r
-                       break;\r
-        case RGA_GET_RESULT:\r
-        case RGA2_GET_RESULT:\r
-            ret = rga2_get_result(session, arg);\r
-            break;\r
-        case RGA_GET_VERSION:\r
-        case RGA2_GET_VERSION:\r
-            ret = copy_to_user((void *)arg, RGA2_VERSION, sizeof(RGA2_VERSION));\r
-            //ret = 0;\r
-            break;\r
-               default:\r
-                       ERR("unknown ioctl cmd!\n");\r
-                       ret = -EINVAL;\r
-                       break;\r
-       }\r
-\r
-       mutex_unlock(&rga2_service.mutex);\r
-\r
-       return ret;\r
-}\r
-\r
-#ifdef CONFIG_COMPAT\r
-static long compat_rga_ioctl(struct file *file, uint32_t cmd, unsigned long arg)\r
-{\r
-    struct rga2_req req, req_first;\r
-    struct rga_req_32 req_rga;\r
-       int ret = 0;\r
-    rga2_session *session;\r
-\r
-    memset(&req, 0x0, sizeof(req));\r
-\r
-    mutex_lock(&rga2_service.mutex);\r
-\r
-    session = (rga2_session *)file->private_data;\r
-\r
-    #if RGA2_TEST_MSG\r
-    printk("use compat_rga_ioctl\n");\r
-    #endif\r
-\r
-       if (NULL == session) {\r
-        printk("%s [%d] rga thread session is null\n",__FUNCTION__,__LINE__);\r
-        mutex_unlock(&rga2_service.mutex);\r
-               return -EINVAL;\r
-       }\r
-\r
-    memset(&req, 0x0, sizeof(req));\r
-\r
-       switch (cmd) {\r
-        case RGA_BLIT_SYNC:\r
-               if (unlikely(copy_from_user(&req_rga, compat_ptr((compat_uptr_t)arg), sizeof(struct rga_req_32))))\r
-            {\r
-                       ERR("copy_from_user failed\n");\r
-                       ret = -EFAULT;\r
-                break;\r
-               }\r
-\r
-            RGA_MSG_2_RGA2_MSG_32(&req_rga, &req);\r
-\r
-            if (first_RGA2_proc == 0 && req.bitblt_mode == bitblt_mode && rga2_service.dev_mode == 1) {\r
-                memcpy(&req_first, &req, sizeof(struct rga2_req));\r
-                if ((req_first.src.act_w != req_first.dst.act_w)\r
-                    || (req_first.src.act_h != req_first.dst.act_h)) {\r
-                    req_first.src.act_w = MIN(320, MIN(req_first.src.act_w, req_first.dst.act_w));\r
-                    req_first.src.act_h = MIN(240, MIN(req_first.src.act_h, req_first.dst.act_h));\r
-                    req_first.dst.act_w = req_first.src.act_w;\r
-                    req_first.dst.act_h = req_first.src.act_h;\r
-                    ret = rga2_blit_async(session, &req_first);\r
-                }\r
-                ret = rga2_blit_sync(session, &req);\r
-                first_RGA2_proc = 1;\r
-            }\r
-            else {\r
-                ret = rga2_blit_sync(session, &req);\r
-            }\r
-            break;\r
-               case RGA_BLIT_ASYNC:\r
-               if (unlikely(copy_from_user(&req_rga, compat_ptr((compat_uptr_t)arg), sizeof(struct rga_req_32))))\r
-            {\r
-                       ERR("copy_from_user failed\n");\r
-                       ret = -EFAULT;\r
-                break;\r
-               }\r
-            RGA_MSG_2_RGA2_MSG_32(&req_rga, &req);\r
-\r
-            if (first_RGA2_proc == 0 && req.bitblt_mode == bitblt_mode && rga2_service.dev_mode == 1) {\r
-                memcpy(&req_first, &req, sizeof(struct rga2_req));\r
-                if ((req_first.src.act_w != req_first.dst.act_w)\r
-                    || (req_first.src.act_h != req_first.dst.act_h)) {\r
-                    req_first.src.act_w = MIN(320, MIN(req_first.src.act_w, req_first.dst.act_w));\r
-                    req_first.src.act_h = MIN(240, MIN(req_first.src.act_h, req_first.dst.act_h));\r
-                    req_first.dst.act_w = req_first.src.act_w;\r
-                    req_first.dst.act_h = req_first.src.act_h;\r
-                    ret = rga2_blit_async(session, &req_first);\r
-                }\r
-                ret = rga2_blit_sync(session, &req);\r
-                first_RGA2_proc = 1;\r
-            }\r
-            else {\r
-                ret = rga2_blit_sync(session, &req);\r
-            }\r
-\r
-            //if((atomic_read(&rga2_service.total_running) > 8))\r
-                       //    ret = rga2_blit_sync(session, &req);\r
-            //else\r
-            //    ret = rga2_blit_async(session, &req);\r
-\r
-                       break;\r
-               case RGA2_BLIT_SYNC:\r
-               if (unlikely(copy_from_user(&req, compat_ptr((compat_uptr_t)arg), sizeof(struct rga2_req))))\r
-            {\r
-                       ERR("copy_from_user failed\n");\r
-                       ret = -EFAULT;\r
-                break;\r
-               }\r
-            ret = rga2_blit_sync(session, &req);\r
-            break;\r
-               case RGA2_BLIT_ASYNC:\r
-               if (unlikely(copy_from_user(&req, compat_ptr((compat_uptr_t)arg), sizeof(struct rga2_req))))\r
-            {\r
-                       ERR("copy_from_user failed\n");\r
-                       ret = -EFAULT;\r
-                break;\r
-               }\r
-\r
-            if((atomic_read(&rga2_service.total_running) > 16))\r
-                           ret = rga2_blit_sync(session, &req);\r
-            else\r
-                ret = rga2_blit_async(session, &req);\r
-\r
-                       break;\r
-        case RGA_FLUSH:\r
-               case RGA2_FLUSH:\r
-                       ret = rga2_flush(session, arg);\r
-                       break;\r
-        case RGA_GET_RESULT:\r
-        case RGA2_GET_RESULT:\r
-            ret = rga2_get_result(session, arg);\r
-            break;\r
-        case RGA_GET_VERSION:\r
-        case RGA2_GET_VERSION:\r
-            ret = copy_to_user((void *)arg, RGA2_VERSION, sizeof(RGA2_VERSION));\r
-            //ret = 0;\r
-            break;\r
-               default:\r
-                       ERR("unknown ioctl cmd!\n");\r
-                       ret = -EINVAL;\r
-                       break;\r
-       }\r
-\r
-       mutex_unlock(&rga2_service.mutex);\r
-\r
-       return ret;\r
-}\r
-#endif\r
-\r
-\r
-long rga2_ioctl_kernel(struct rga_req *req_rga)\r
-{\r
-       int ret = 0;\r
-    rga2_session *session;\r
-    struct rga2_req req;\r
-\r
-    memset(&req, 0x0, sizeof(req));\r
-\r
-    mutex_lock(&rga2_service.mutex);\r
-\r
-    session = &rga2_session_global;\r
-\r
-       if (NULL == session)\r
-    {\r
-        printk("%s [%d] rga thread session is null\n",__FUNCTION__,__LINE__);\r
-        mutex_unlock(&rga2_service.mutex);\r
-               return -EINVAL;\r
-       }\r
-\r
-    RGA_MSG_2_RGA2_MSG(req_rga, &req);\r
-    ret = rga2_blit_sync(session, &req);\r
-\r
-       mutex_unlock(&rga2_service.mutex);\r
-\r
-       return ret;\r
-}\r
-\r
-\r
-static int rga2_open(struct inode *inode, struct file *file)\r
-{\r
-    rga2_session *session = kzalloc(sizeof(rga2_session), GFP_KERNEL);\r
-       if (NULL == session) {\r
-               pr_err("unable to allocate memory for rga_session.");\r
-               return -ENOMEM;\r
-       }\r
-\r
-       session->pid = current->pid;\r
-    //printk(KERN_DEBUG  "+");\r
-\r
-       INIT_LIST_HEAD(&session->waiting);\r
-       INIT_LIST_HEAD(&session->running);\r
-       INIT_LIST_HEAD(&session->list_session);\r
-       init_waitqueue_head(&session->wait);\r
-       mutex_lock(&rga2_service.lock);\r
-       list_add_tail(&session->list_session, &rga2_service.session);\r
-       mutex_unlock(&rga2_service.lock);\r
-       atomic_set(&session->task_running, 0);\r
-    atomic_set(&session->num_done, 0);\r
-\r
-       file->private_data = (void *)session;\r
-\r
-    //DBG("*** rga dev opened by pid %d *** \n", session->pid);\r
-       return nonseekable_open(inode, file);\r
-\r
-}\r
-\r
-static int rga2_release(struct inode *inode, struct file *file)\r
-{\r
-    int task_running;\r
-       rga2_session *session = (rga2_session *)file->private_data;\r
-       if (NULL == session)\r
-               return -EINVAL;\r
-    //printk(KERN_DEBUG  "-");\r
-       task_running = atomic_read(&session->task_running);\r
-\r
-    if (task_running)\r
-    {\r
-               pr_err("rga2_service session %d still has %d task running when closing\n", session->pid, task_running);\r
-               msleep(100);\r
-        /*ͬ²½*/\r
-       }\r
-\r
-       wake_up(&session->wait);\r
-       mutex_lock(&rga2_service.lock);\r
-       list_del(&session->list_session);\r
-       rga2_service_session_clear(session);\r
-       kfree(session);\r
-       mutex_unlock(&rga2_service.lock);\r
-\r
-    //DBG("*** rga dev close ***\n");\r
-       return 0;\r
-}\r
-\r
-static irqreturn_t rga2_irq_thread(int irq, void *dev_id)\r
-{\r
-       mutex_lock(&rga2_service.lock);\r
-       if (rga2_service.enable) {\r
-               rga2_del_running_list();\r
-               rga2_try_set_reg();\r
-       }\r
-       mutex_unlock(&rga2_service.lock);\r
-\r
-       return IRQ_HANDLED;\r
-}\r
-\r
-static irqreturn_t rga2_irq(int irq,  void *dev_id)\r
-{\r
-       /*clear INT */\r
-       rga2_write(rga2_read(RGA2_INT) | (0x1<<4) | (0x1<<5) | (0x1<<6) | (0x1<<7), RGA2_INT);\r
-\r
-       return IRQ_WAKE_THREAD;\r
-}\r
-\r
-struct file_operations rga2_fops = {\r
-       .owner          = THIS_MODULE,\r
-       .open           = rga2_open,\r
-       .release        = rga2_release,\r
-       .unlocked_ioctl         = rga_ioctl,\r
-#ifdef CONFIG_COMPAT\r
-       .compat_ioctl           = compat_rga_ioctl,\r
-#endif\r
-};\r
-\r
-static struct miscdevice rga2_dev ={\r
-    .minor = RGA2_MAJOR,\r
-    .name  = "rga",\r
-    .fops  = &rga2_fops,\r
-};\r
-\r
-static const struct of_device_id rockchip_rga_dt_ids[] = {\r
-       { .compatible = "rockchip,rga2", },\r
-       {},\r
-};\r
-\r
-static int rga2_drv_probe(struct platform_device *pdev)\r
-{\r
-       struct rga2_drvdata_t *data;\r
-    struct resource *res;\r
-       int ret = 0;\r
-       struct device_node *np = pdev->dev.of_node;\r
-\r
-       mutex_init(&rga2_service.lock);\r
-       mutex_init(&rga2_service.mutex);\r
-       atomic_set(&rga2_service.total_running, 0);\r
-       atomic_set(&rga2_service.src_format_swt, 0);\r
-       rga2_service.last_prc_src_format = 1; /* default is yuv first*/\r
-       rga2_service.enable = false;\r
-\r
-    rga_ioctl_kernel_p = rga2_ioctl_kernel;\r
-\r
-       data = devm_kzalloc(&pdev->dev, sizeof(struct rga2_drvdata_t), GFP_KERNEL);\r
-       if(NULL == data)\r
-       {\r
-               ERR("failed to allocate driver data.\n");\r
-               return -ENOMEM;\r
-       }\r
-\r
-       INIT_DELAYED_WORK(&data->power_off_work, rga2_power_off_work);\r
-       wake_lock_init(&data->wake_lock, WAKE_LOCK_SUSPEND, "rga");\r
-\r
-       //data->pd_rga2 = clk_get(NULL, "pd_rga");\r
-    data->rga2 = devm_clk_get(&pdev->dev, "clk_rga");\r
-    data->pd_rga2 = devm_clk_get(&pdev->dev, "pd_rga");\r
-       data->aclk_rga2 = devm_clk_get(&pdev->dev, "aclk_rga");\r
-    data->hclk_rga2 = devm_clk_get(&pdev->dev, "hclk_rga");\r
-\r
-       /* map the registers */\r
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);\r
-       data->rga_base = devm_ioremap_resource(&pdev->dev, res);\r
-       if (!data->rga_base) {\r
-               ERR("rga ioremap failed\n");\r
-               ret = -ENOENT;\r
-               goto err_ioremap;\r
-       }\r
-\r
-       /* get the IRQ */\r
-       data->irq = platform_get_irq(pdev, 0);\r
-       if (data->irq <= 0) {\r
-               ERR("failed to get rga irq resource (%d).\n", data->irq);\r
-               ret = data->irq;\r
-               goto err_irq;\r
-       }\r
-\r
-       /* request the IRQ */\r
-       ret = devm_request_threaded_irq(&pdev->dev, data->irq, rga2_irq, rga2_irq_thread, 0, "rga", pdev);\r
-       if (ret)\r
-       {\r
-               ERR("rga request_irq failed (%d).\n", ret);\r
-               goto err_irq;\r
-       }\r
-\r
-       platform_set_drvdata(pdev, data);\r
-       rga2_drvdata = data;\r
-       of_property_read_u32(np, "dev_mode", &rga2_service.dev_mode);\r
-\r
-    #if defined(CONFIG_ION_ROCKCHIP)\r
-       data->ion_client = rockchip_ion_client_create("rga");\r
-       if (IS_ERR(data->ion_client)) {\r
-               dev_err(&pdev->dev, "failed to create ion client for rga");\r
-               return PTR_ERR(data->ion_client);\r
-       } else {\r
-               dev_info(&pdev->dev, "rga ion client create success!\n");\r
-       }\r
-    #endif\r
-\r
-       ret = misc_register(&rga2_dev);\r
-       if(ret)\r
-       {\r
-               ERR("cannot register miscdev (%d)\n", ret);\r
-               goto err_misc_register;\r
-       }\r
-\r
-       pr_info("Driver loaded succesfully\n");\r
-\r
-       return 0;\r
-\r
-err_misc_register:\r
-       free_irq(data->irq, pdev);\r
-err_irq:\r
-       iounmap(data->rga_base);\r
-err_ioremap:\r
-       wake_lock_destroy(&data->wake_lock);\r
-       //kfree(data);\r
-\r
-       return ret;\r
-}\r
-\r
-static int rga2_drv_remove(struct platform_device *pdev)\r
-{\r
-       struct rga2_drvdata_t *data = platform_get_drvdata(pdev);\r
-       DBG("%s [%d]\n",__FUNCTION__,__LINE__);\r
-\r
-       wake_lock_destroy(&data->wake_lock);\r
-       misc_deregister(&(data->miscdev));\r
-       free_irq(data->irq, &data->miscdev);\r
-       iounmap((void __iomem *)(data->rga_base));\r
-\r
-       //clk_put(data->pd_rga2);\r
-       devm_clk_put(&pdev->dev, data->rga2);\r
-    devm_clk_put(&pdev->dev, data->pd_rga2);\r
-       devm_clk_put(&pdev->dev, data->aclk_rga2);\r
-       devm_clk_put(&pdev->dev, data->hclk_rga2);\r
-\r
-       kfree(data);\r
-       return 0;\r
-}\r
-\r
-static struct platform_driver rga2_driver = {\r
-       .probe          = rga2_drv_probe,\r
-       .remove         = rga2_drv_remove,\r
-       .driver         = {\r
-               .owner  = THIS_MODULE,\r
-               .name   = "rga2",\r
-               .of_match_table = of_match_ptr(rockchip_rga_dt_ids),\r
-       },\r
-};\r
-\r
-\r
-void rga2_test_0(void);\r
-\r
-static int __init rga2_init(void)\r
-{\r
-       int ret;\r
-    uint32_t *buf_p;\r
-\r
-    /* malloc pre scale mid buf mmu table */\r
-    buf_p = kmalloc(1024*256, GFP_KERNEL);\r
-    rga2_mmu_buf.buf_virtual = buf_p;\r
-    rga2_mmu_buf.buf = (uint32_t *)virt_to_phys((void *)((unsigned long)buf_p));\r
-    rga2_mmu_buf.front = 0;\r
-    rga2_mmu_buf.back = 64*1024;\r
-    rga2_mmu_buf.size = 64*1024;\r
-\r
-    rga2_mmu_buf.pages = kmalloc(32768 * sizeof(struct page *), GFP_KERNEL);\r
-\r
-       if ((ret = platform_driver_register(&rga2_driver)) != 0)\r
-       {\r
-        printk(KERN_ERR "Platform device register failed (%d).\n", ret);\r
-                       return ret;\r
-       }\r
-\r
-    {\r
-        rga2_session_global.pid = 0x0000ffff;\r
-        INIT_LIST_HEAD(&rga2_session_global.waiting);\r
-        INIT_LIST_HEAD(&rga2_session_global.running);\r
-        INIT_LIST_HEAD(&rga2_session_global.list_session);\r
-\r
-        INIT_LIST_HEAD(&rga2_service.waiting);\r
-           INIT_LIST_HEAD(&rga2_service.running);\r
-           INIT_LIST_HEAD(&rga2_service.done);\r
-        INIT_LIST_HEAD(&rga2_service.session);\r
-        init_waitqueue_head(&rga2_session_global.wait);\r
-        //mutex_lock(&rga_service.lock);\r
-        list_add_tail(&rga2_session_global.list_session, &rga2_service.session);\r
-        //mutex_unlock(&rga_service.lock);\r
-        atomic_set(&rga2_session_global.task_running, 0);\r
-        atomic_set(&rga2_session_global.num_done, 0);\r
-    }\r
-\r
-    #if RGA2_TEST_CASE\r
-    rga2_test_0();\r
-    #endif\r
-\r
-       INFO("Module initialized.\n");\r
-\r
-       return 0;\r
-}\r
-\r
-static void __exit rga2_exit(void)\r
-{\r
-    rga2_power_off();\r
-\r
-    if (rga2_mmu_buf.buf_virtual)\r
-        kfree(rga2_mmu_buf.buf_virtual);\r
-\r
-       platform_driver_unregister(&rga2_driver);\r
-}\r
-\r
-\r
-#if RGA2_TEST_CASE\r
-\r
-void rga2_test_0(void)\r
-{\r
-    struct rga2_req req;\r
-    rga2_session session;\r
-    unsigned int *src, *dst;\r
-    uint32_t i, j;\r
-    uint8_t *p;\r
-    uint8_t t;\r
-    uint32_t *dst0, *dst1, *dst2;\r
-\r
-    struct fb_info *fb;\r
-\r
-    session.pid        = current->pid;\r
-       INIT_LIST_HEAD(&session.waiting);\r
-       INIT_LIST_HEAD(&session.running);\r
-       INIT_LIST_HEAD(&session.list_session);\r
-       init_waitqueue_head(&session.wait);\r
-       /* no need to protect */\r
-       list_add_tail(&session.list_session, &rga2_service.session);\r
-       atomic_set(&session.task_running, 0);\r
-    atomic_set(&session.num_done, 0);\r
-       //file->private_data = (void *)session;\r
-\r
-    //fb = rk_get_fb(0);\r
-\r
-    memset(&req, 0, sizeof(struct rga2_req));\r
-    src = kmalloc(800*480*4, GFP_KERNEL);\r
-    dst = kmalloc(800*480*4, GFP_KERNEL);\r
-\r
-    printk("\n********************************\n");\r
-    printk("************ RGA2_TEST ************\n");\r
-    printk("********************************\n\n");\r
-\r
-    memset(src, 0x80, 800*480*4);\r
-    memset(dst, 0x0, 800*480*4);\r
-\r
-    //dmac_flush_range(&src, &src[800*480*4]);\r
-    //outer_flush_range(virt_to_phys(&src),virt_to_phys(&src[800*480*4]));\r
-\r
-\r
-    #if 0\r
-    memset(src_buf, 0x80, 800*480*4);\r
-    memset(dst_buf, 0xcc, 800*480*4);\r
-\r
-    dmac_flush_range(&dst_buf[0], &dst_buf[800*480]);\r
-    outer_flush_range(virt_to_phys(&dst_buf[0]),virt_to_phys(&dst_buf[800*480]));\r
-    #endif\r
-\r
-    dst0 = &dst;\r
-\r
-    i = j = 0;\r
-\r
-\r
-\r
-    #if 0\r
-    req.pat.act_w = 16;\r
-    req.pat.act_h = 16;\r
-    req.pat.vir_w = 16;\r
-    req.pat.vir_h = 16;\r
-    req.pat.yrgb_addr = virt_to_phys(src);\r
-    req.render_mode = 0;\r
-    rga2_blit_sync(&session, &req);\r
-    #endif\r
-\r
-    req.src.act_w  = 320;\r
-    req.src.act_h = 240;\r
-\r
-    req.src.vir_w  = 320;\r
-    req.src.vir_h = 240;\r
-    req.src.yrgb_addr = 0;//(uint32_t)virt_to_phys(src);\r
-    req.src.uv_addr = (unsigned long)virt_to_phys(src);\r
-    req.src.v_addr = 0;\r
-    req.src.format = RGA2_FORMAT_RGBA_8888;\r
-\r
-    req.dst.act_w  = 320;\r
-    req.dst.act_h = 240;\r
-    req.dst.x_offset = 0;\r
-    req.dst.y_offset = 0;\r
-\r
-    req.dst.vir_w = 320;\r
-    req.dst.vir_h = 240;\r
-\r
-    req.dst.yrgb_addr = 0;//((uint32_t)virt_to_phys(dst));\r
-    req.dst.uv_addr = (unsigned long)virt_to_phys(dst);\r
-    req.dst.format = RGA2_FORMAT_RGBA_8888;\r
-\r
-    //dst = dst0;\r
-\r
-    //req.render_mode = color_fill_mode;\r
-    //req.fg_color = 0x80ffffff;\r
-\r
-    req.rotate_mode = 0;\r
-    req.scale_bicu_mode = 2;\r
-\r
-    //req.alpha_rop_flag = 0;\r
-    //req.alpha_rop_mode = 0x19;\r
-    //req.PD_mode = 3;\r
-\r
-    //req.mmu_info.mmu_flag = 0x21;\r
-    //req.mmu_info.mmu_en = 1;\r
-\r
-    //printk("src = %.8x\n", req.src.yrgb_addr);\r
-    //printk("src = %.8x\n", req.src.uv_addr);\r
-    //printk("dst = %.8x\n", req.dst.yrgb_addr);\r
-\r
-    rga2_blit_sync(&session, &req);\r
-\r
-    for(j=0; j<100; j++) {\r
-        printk("%.8x\n", dst[j]);\r
-    }\r
-\r
-    if(src)\r
-        kfree(src);\r
-    if(dst)\r
-        kfree(dst);\r
-}\r
-\r
-#endif\r
-fs_initcall(rga2_init);\r
-module_exit(rga2_exit);\r
-\r
-/* Module information */\r
-MODULE_AUTHOR("zsq@rock-chips.com");\r
-MODULE_DESCRIPTION("Driver for rga device");\r
-MODULE_LICENSE("GPL");\r
+/*
+ * Copyright (C) 2012 ROCKCHIP, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "rga: " fmt
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <asm/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/fs.h>
+#include <asm/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/syscalls.h>
+#include <linux/timer.h>
+#include <linux/time.h>
+#include <asm/cacheflush.h>
+#include <linux/slab.h>
+#include <linux/fb.h>
+#include <linux/wakelock.h>
+#include <linux/scatterlist.h>
+#include <linux/rockchip_ion.h>
+#include <linux/version.h>
+#include <linux/pm_runtime.h>
+#include <linux/dma-buf.h>
+
+#include "rga2.h"
+#include "rga2_reg_info.h"
+#include "rga2_mmu_info.h"
+#include "RGA2_API.h"
+#include "rga2_rop.h"
+
+#if defined(CONFIG_RK_IOMMU) && defined(CONFIG_ION_ROCKCHIP)
+#define CONFIG_RGA_IOMMU
+#endif
+
+#define RGA2_TEST_FLUSH_TIME 0
+#define RGA2_INFO_BUS_ERROR 1
+#define RGA2_POWER_OFF_DELAY   4*HZ /* 4s */
+#define RGA2_TIMEOUT_DELAY     (HZ / 10) /* 100ms */
+#define RGA2_MAJOR             255
+#define RGA2_RESET_TIMEOUT     1000
+
+/* Driver information */
+#define DRIVER_DESC            "RGA2 Device Driver"
+#define DRIVER_NAME            "rga2"
+#define RGA2_VERSION   "2.000"
+
+ktime_t rga2_start;
+ktime_t rga2_end;
+int rga2_flag;
+int first_RGA2_proc;
+
+rga2_session rga2_session_global;
+long (*rga_ioctl_kernel_p)(struct rga_req *);
+
+struct rga2_drvdata_t {
+       struct miscdevice miscdev;
+       struct device *dev;
+       void *rga_base;
+       int irq;
+
+       struct delayed_work power_off_work;
+       struct wake_lock wake_lock;
+       void (*rga_irq_callback)(int rga_retval);
+
+       struct clk *aclk_rga2;
+       struct clk *hclk_rga2;
+       struct clk *rga2;
+
+       struct ion_client * ion_client;
+       char version[16];
+};
+
+struct rga2_drvdata_t *rga2_drvdata;
+struct rga2_service_info rga2_service;
+struct rga2_mmu_buf_t rga2_mmu_buf;
+
+static int rga2_blit_async(rga2_session *session, struct rga2_req *req);
+static void rga2_del_running_list(void);
+static void rga2_del_running_list_timeout(void);
+static void rga2_try_set_reg(void);
+
+
+/* Logging */
+#define RGA_DEBUG 0
+#if RGA_DEBUG
+#define DBG(format, args...) printk(KERN_DEBUG "%s: " format, DRIVER_NAME, ## args)
+#define ERR(format, args...) printk(KERN_ERR "%s: " format, DRIVER_NAME, ## args)
+#define WARNING(format, args...) printk(KERN_WARN "%s: " format, DRIVER_NAME, ## args)
+#define INFO(format, args...) printk(KERN_INFO "%s: " format, DRIVER_NAME, ## args)
+#else
+#define DBG(format, args...)
+#define ERR(format, args...)
+#define WARNING(format, args...)
+#define INFO(format, args...)
+#endif
+
+#if RGA2_TEST_MSG
+static void print_info(struct rga2_req *req)
+{
+       printk("render_mode=%d bitblt_mode=%d rotate_mode=%.8x\n",
+               req->render_mode, req->bitblt_mode, req->rotate_mode);
+       printk("src : y=%.lx uv=%.lx v=%.lx format=%d aw=%d ah=%d vw=%d vh=%d xoff=%d yoff=%d \n",
+               req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr, req->src.format,
+               req->src.act_w, req->src.act_h, req->src.vir_w, req->src.vir_h,
+               req->src.x_offset, req->src.y_offset);
+       printk("dst : y=%lx uv=%lx v=%lx format=%d aw=%d ah=%d vw=%d vh=%d xoff=%d yoff=%d \n",
+               req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr, req->dst.format,
+               req->dst.act_w, req->dst.act_h, req->dst.vir_w, req->dst.vir_h,
+               req->dst.x_offset, req->dst.y_offset);
+       printk("mmu : src=%.2x src1=%.2x dst=%.2x els=%.2x\n",
+               req->mmu_info.src0_mmu_flag, req->mmu_info.src1_mmu_flag,
+               req->mmu_info.dst_mmu_flag,  req->mmu_info.els_mmu_flag);
+       printk("alpha : flag %.8x mode0=%.8x mode1=%.8x\n",
+               req->alpha_rop_flag, req->alpha_mode_0, req->alpha_mode_1);
+}
+#endif
+
+static inline void rga2_write(u32 b, u32 r)
+{
+       *((volatile unsigned int *)(rga2_drvdata->rga_base + r)) = b;
+}
+
+static inline u32 rga2_read(u32 r)
+{
+       return *((volatile unsigned int *)(rga2_drvdata->rga_base + r));
+}
+
+static inline int rga2_init_version(void)
+{
+       struct rga2_drvdata_t *rga = rga2_drvdata;
+       u32 major_version, minor_version;
+       u32 reg_version;
+
+       if (!rga) {
+               pr_err("rga2_drvdata is null\n");
+               return -EINVAL;
+       }
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+       pm_runtime_get_sync(rga2_drvdata->dev);
+#endif
+
+       clk_prepare_enable(rga2_drvdata->aclk_rga2);
+       clk_prepare_enable(rga2_drvdata->hclk_rga2);
+
+       reg_version = rga2_read(0x028);
+
+       clk_disable_unprepare(rga2_drvdata->aclk_rga2);
+       clk_disable_unprepare(rga2_drvdata->hclk_rga2);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+       pm_runtime_put(rga2_drvdata->dev);
+#endif
+
+       major_version = (reg_version & RGA2_MAJOR_VERSION_MASK) >> 24;
+       minor_version = (reg_version & RGA2_MINOR_VERSION_MASK) >> 20;
+
+       sprintf(rga->version, "%d.%02d", major_version, minor_version);
+
+       return 0;
+}
+
+static void rga2_soft_reset(void)
+{
+       u32 i;
+       u32 reg;
+
+       rga2_write((1 << 3) | (1 << 4) | (1 << 6), RGA2_SYS_CTRL);
+
+       for(i = 0; i < RGA2_RESET_TIMEOUT; i++)
+       {
+               reg = rga2_read(RGA2_SYS_CTRL) & 1; //RGA_SYS_CTRL
+
+               if(reg == 0)
+                       break;
+
+               udelay(1);
+       }
+
+       if(i == RGA2_RESET_TIMEOUT)
+               ERR("soft reset timeout.\n");
+}
+
+static void rga2_dump(void)
+{
+       int running;
+       struct rga2_reg *reg, *reg_tmp;
+       rga2_session *session, *session_tmp;
+
+       running = atomic_read(&rga2_service.total_running);
+       printk("rga total_running %d\n", running);
+       list_for_each_entry_safe(session, session_tmp, &rga2_service.session,
+               list_session)
+       {
+               printk("session pid %d:\n", session->pid);
+               running = atomic_read(&session->task_running);
+               printk("task_running %d\n", running);
+               list_for_each_entry_safe(reg, reg_tmp, &session->waiting, session_link)
+               {
+                       printk("waiting register set 0x%.lu\n", (unsigned long)reg);
+               }
+               list_for_each_entry_safe(reg, reg_tmp, &session->running, session_link)
+               {
+                       printk("running register set 0x%.lu\n", (unsigned long)reg);
+               }
+       }
+}
+
+static inline void rga2_queue_power_off_work(void)
+{
+       queue_delayed_work(system_wq, &rga2_drvdata->power_off_work,
+               RGA2_POWER_OFF_DELAY);
+}
+
+/* Caller must hold rga_service.lock */
+static void rga2_power_on(void)
+{
+       static ktime_t last;
+       ktime_t now = ktime_get();
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+       pm_runtime_get_sync(rga2_drvdata->dev);
+#endif
+
+       if (ktime_to_ns(ktime_sub(now, last)) > NSEC_PER_SEC) {
+               cancel_delayed_work_sync(&rga2_drvdata->power_off_work);
+               rga2_queue_power_off_work();
+               last = now;
+       }
+
+       if (rga2_service.enable)
+               return;
+
+       clk_prepare_enable(rga2_drvdata->rga2);
+       clk_prepare_enable(rga2_drvdata->aclk_rga2);
+       clk_prepare_enable(rga2_drvdata->hclk_rga2);
+       wake_lock(&rga2_drvdata->wake_lock);
+       rga2_service.enable = true;
+}
+
+/* Caller must hold rga_service.lock */
+static void rga2_power_off(void)
+{
+       int total_running;
+
+       if (!rga2_service.enable) {
+               return;
+       }
+
+       total_running = atomic_read(&rga2_service.total_running);
+       if (total_running) {
+               pr_err("power off when %d task running!!\n", total_running);
+               mdelay(50);
+               pr_err("delay 50 ms for running task\n");
+               rga2_dump();
+       }
+
+       clk_disable_unprepare(rga2_drvdata->rga2);
+       clk_disable_unprepare(rga2_drvdata->aclk_rga2);
+       clk_disable_unprepare(rga2_drvdata->hclk_rga2);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+       pm_runtime_put(rga2_drvdata->dev);
+#endif
+
+       wake_unlock(&rga2_drvdata->wake_lock);
+    first_RGA2_proc = 0;
+       rga2_service.enable = false;
+}
+
+static void rga2_power_off_work(struct work_struct *work)
+{
+       if (mutex_trylock(&rga2_service.lock)) {
+               rga2_power_off();
+               mutex_unlock(&rga2_service.lock);
+       } else {
+               /* Come back later if the device is busy... */
+               rga2_queue_power_off_work();
+       }
+}
+
+static int rga2_flush(rga2_session *session, unsigned long arg)
+{
+    int ret = 0;
+    int ret_timeout;
+
+    #if RGA2_TEST_FLUSH_TIME
+    ktime_t start;
+    ktime_t end;
+    start = ktime_get();
+    #endif
+
+    ret_timeout = wait_event_timeout(session->wait, atomic_read(&session->done), RGA2_TIMEOUT_DELAY);
+
+       if (unlikely(ret_timeout < 0)) {
+               //pr_err("flush pid %d wait task ret %d\n", session->pid, ret);
+        mutex_lock(&rga2_service.lock);
+        rga2_del_running_list();
+        mutex_unlock(&rga2_service.lock);
+        ret = ret_timeout;
+       } else if (0 == ret_timeout) {
+               //pr_err("flush pid %d wait %d task done timeout\n", session->pid, atomic_read(&session->task_running));
+        //printk("bus  = %.8x\n", rga_read(RGA_INT));
+        mutex_lock(&rga2_service.lock);
+        rga2_del_running_list_timeout();
+        rga2_try_set_reg();
+        mutex_unlock(&rga2_service.lock);
+               ret = -ETIMEDOUT;
+       }
+
+    #if RGA2_TEST_FLUSH_TIME
+    end = ktime_get();
+    end = ktime_sub(end, start);
+    printk("one flush wait time %d\n", (int)ktime_to_us(end));
+    #endif
+
+       return ret;
+}
+
+
+static int rga2_get_result(rga2_session *session, unsigned long arg)
+{
+       int ret = 0;
+       int num_done;
+
+       num_done = atomic_read(&session->num_done);
+       if (unlikely(copy_to_user((void __user *)arg, &num_done, sizeof(int)))) {
+           printk("copy_to_user failed\n");
+           ret =  -EFAULT;
+       }
+       return ret;
+}
+
+
+static int rga2_check_param(const struct rga2_req *req)
+{
+       if(!((req->render_mode == color_fill_mode)))
+       {
+           if (unlikely((req->src.act_w <= 0) || (req->src.act_w > 8191) || (req->src.act_h <= 0) || (req->src.act_h > 8191)))
+           {
+               printk("invalid source resolution act_w = %d, act_h = %d\n", req->src.act_w, req->src.act_h);
+               return -EINVAL;
+           }
+       }
+
+       if(!((req->render_mode == color_fill_mode)))
+       {
+           if (unlikely((req->src.vir_w <= 0) || (req->src.vir_w > 8191) || (req->src.vir_h <= 0) || (req->src.vir_h > 8191)))
+           {
+               printk("invalid source resolution vir_w = %d, vir_h = %d\n", req->src.vir_w, req->src.vir_h);
+               return -EINVAL;
+           }
+       }
+
+       //check dst width and height
+       if (unlikely((req->dst.act_w <= 0) || (req->dst.act_w > 4096) || (req->dst.act_h <= 0) || (req->dst.act_h > 4096)))
+       {
+           printk("invalid destination resolution act_w = %d, act_h = %d\n", req->dst.act_w, req->dst.act_h);
+           return -EINVAL;
+       }
+
+       if (unlikely((req->dst.vir_w <= 0) || (req->dst.vir_w > 4096) || (req->dst.vir_h <= 0) || (req->dst.vir_h > 4096)))
+       {
+           printk("invalid destination resolution vir_w = %d, vir_h = %d\n", req->dst.vir_w, req->dst.vir_h);
+           return -EINVAL;
+       }
+
+       //check src_vir_w
+       if(unlikely(req->src.vir_w < req->src.act_w)){
+           printk("invalid src_vir_w act_w = %d, vir_w = %d\n", req->src.act_w, req->src.vir_w);
+           return -EINVAL;
+       }
+
+       //check dst_vir_w
+       if(unlikely(req->dst.vir_w < req->dst.act_w)){
+           if(req->rotate_mode != 1)
+           {
+               printk("invalid dst_vir_w act_h = %d, vir_h = %d\n", req->dst.act_w, req->dst.vir_w);
+               return -EINVAL;
+           }
+       }
+
+       return 0;
+}
+
+static void rga2_copy_reg(struct rga2_reg *reg, uint32_t offset)
+{
+    uint32_t i;
+    uint32_t *cmd_buf;
+    uint32_t *reg_p;
+
+    if(atomic_read(&reg->session->task_running) != 0)
+        printk(KERN_ERR "task_running is no zero\n");
+
+    atomic_add(1, &rga2_service.cmd_num);
+       atomic_add(1, &reg->session->task_running);
+
+    cmd_buf = (uint32_t *)rga2_service.cmd_buff + offset*32;
+    reg_p = (uint32_t *)reg->cmd_reg;
+
+    for(i=0; i<32; i++)
+        cmd_buf[i] = reg_p[i];
+}
+
+
+static struct rga2_reg * rga2_reg_init(rga2_session *session, struct rga2_req *req)
+{
+    int32_t ret;
+       struct rga2_reg *reg = kzalloc(sizeof(struct rga2_reg), GFP_KERNEL);
+       if (NULL == reg) {
+               pr_err("kmalloc fail in rga_reg_init\n");
+               return NULL;
+       }
+
+    reg->session = session;
+       INIT_LIST_HEAD(&reg->session_link);
+       INIT_LIST_HEAD(&reg->status_link);
+
+    reg->MMU_base = NULL;
+
+    if ((req->mmu_info.src0_mmu_flag & 1) || (req->mmu_info.src1_mmu_flag & 1)
+        || (req->mmu_info.dst_mmu_flag & 1) || (req->mmu_info.els_mmu_flag & 1))
+    {
+        ret = rga2_set_mmu_info(reg, req);
+        if(ret < 0) {
+            printk("%s, [%d] set mmu info error \n", __FUNCTION__, __LINE__);
+            if(reg != NULL)
+                kfree(reg);
+
+            return NULL;
+        }
+    }
+
+    if(RGA2_gen_reg_info((uint8_t *)reg->cmd_reg, req) == -1) {
+        printk("gen reg info error\n");
+        if(reg != NULL)
+            kfree(reg);
+
+        return NULL;
+    }
+
+       reg->sg_src0 = req->sg_src0;
+       reg->sg_dst = req->sg_dst;
+       reg->sg_src1 = req->sg_src1;
+       reg->attach_src0 = req->attach_src0;
+       reg->attach_dst = req->attach_dst;
+       reg->attach_src1 = req->attach_src1;
+
+    mutex_lock(&rga2_service.lock);
+       list_add_tail(&reg->status_link, &rga2_service.waiting);
+       list_add_tail(&reg->session_link, &session->waiting);
+       mutex_unlock(&rga2_service.lock);
+
+    return reg;
+}
+
+
+/* Caller must hold rga_service.lock */
+static void rga2_reg_deinit(struct rga2_reg *reg)
+{
+       list_del_init(&reg->session_link);
+       list_del_init(&reg->status_link);
+       kfree(reg);
+}
+
+/* Caller must hold rga_service.lock */
+static void rga2_reg_from_wait_to_run(struct rga2_reg *reg)
+{
+       list_del_init(&reg->status_link);
+       list_add_tail(&reg->status_link, &rga2_service.running);
+
+       list_del_init(&reg->session_link);
+       list_add_tail(&reg->session_link, &reg->session->running);
+}
+
+/* Caller must hold rga_service.lock */
+static void rga2_service_session_clear(rga2_session *session)
+{
+       struct rga2_reg *reg, *n;
+
+       list_for_each_entry_safe(reg, n, &session->waiting, session_link)
+       {
+               rga2_reg_deinit(reg);
+       }
+
+       list_for_each_entry_safe(reg, n, &session->running, session_link)
+       {
+               rga2_reg_deinit(reg);
+       }
+}
+
+/* Caller must hold rga_service.lock */
+static void rga2_try_set_reg(void)
+{
+       struct rga2_reg *reg ;
+
+       if (list_empty(&rga2_service.running))
+       {
+               if (!list_empty(&rga2_service.waiting))
+               {
+                       /* RGA is idle */
+                       reg = list_entry(rga2_service.waiting.next, struct rga2_reg, status_link);
+
+                       rga2_power_on();
+                       udelay(1);
+
+                       rga2_copy_reg(reg, 0);
+                       rga2_reg_from_wait_to_run(reg);
+
+#ifdef CONFIG_ARM
+                       dmac_flush_range(&rga2_service.cmd_buff[0], &rga2_service.cmd_buff[32]);
+                       outer_flush_range(virt_to_phys(&rga2_service.cmd_buff[0]),virt_to_phys(&rga2_service.cmd_buff[32]));
+#elif defined(CONFIG_ARM64)
+                       __dma_flush_range(&rga2_service.cmd_buff[0], &rga2_service.cmd_buff[32]);
+#endif
+
+                       //rga2_soft_reset();
+
+                       rga2_write(0x0, RGA2_SYS_CTRL);
+
+                       /* CMD buff */
+                       rga2_write(virt_to_phys(rga2_service.cmd_buff), RGA2_CMD_BASE);
+
+#if RGA2_TEST
+                       if(rga2_flag) {
+                               int32_t i, *p;
+                               p = rga2_service.cmd_buff;
+                               printk("CMD_REG\n");
+                               for (i=0; i<8; i++)
+                                       printk("%.8x %.8x %.8x %.8x\n", p[0 + i*4], p[1+i*4], p[2 + i*4], p[3 + i*4]);
+                       }
+#endif
+
+                       /* master mode */
+                       rga2_write((0x1<<1)|(0x1<<2)|(0x1<<5)|(0x1<<6), RGA2_SYS_CTRL);
+
+                       /* All CMD finish int */
+                       rga2_write(rga2_read(RGA2_INT)|(0x1<<10)|(0x1<<9)|(0x1<<8), RGA2_INT);
+
+#if RGA2_TEST_TIME
+                       rga2_start = ktime_get();
+#endif
+
+                       /* Start proc */
+                       atomic_set(&reg->session->done, 0);
+                       rga2_write(0x1, RGA2_CMD_CTRL);
+#if RGA2_TEST
+                       if(rga2_flag)
+                       {
+                               uint32_t i;
+                               printk("CMD_READ_BACK_REG\n");
+                               for (i=0; i<8; i++)
+                                       printk("%.8x %.8x %.8x %.8x\n", rga2_read(0x100 + i*16 + 0),
+                                                       rga2_read(0x100 + i*16 + 4), rga2_read(0x100 + i*16 + 8), rga2_read(0x100 + i*16 + 12));
+                       }
+#endif
+               }
+       }
+}
+
+static int rga2_put_dma_buf(struct rga2_req *req, struct rga2_reg *reg)
+{
+       struct dma_buf_attachment *attach = NULL;
+       struct sg_table *sgt = NULL;
+       struct dma_buf *dma_buf = NULL;
+
+       if (!req && !reg)
+               return -EINVAL;
+
+       attach = (!reg) ? req->attach_src0 : reg->attach_src0;
+       sgt = (!reg) ? req->sg_src0 : reg->sg_src0;
+       if (attach && sgt)
+               dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+       if (attach) {
+               dma_buf = attach->dmabuf;
+               dma_buf_detach(dma_buf, attach);
+               dma_buf_put(dma_buf);
+       }
+
+       attach = (!reg) ? req->attach_dst : reg->attach_dst;
+       sgt = (!reg) ? req->sg_dst : reg->sg_dst;
+       if (attach && sgt)
+               dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+       if (attach) {
+               dma_buf = attach->dmabuf;
+               dma_buf_detach(dma_buf, attach);
+               dma_buf_put(dma_buf);
+       }
+
+       attach = (!reg) ? req->attach_src1 : reg->attach_src1;
+       sgt = (!reg) ? req->sg_src1 : reg->sg_src1;
+       if (attach && sgt)
+               dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+       if (attach) {
+               dma_buf = attach->dmabuf;
+               dma_buf_detach(dma_buf, attach);
+               dma_buf_put(dma_buf);
+       }
+
+       return 0;
+}
+
+static void rga2_del_running_list(void)
+{
+       struct rga2_mmu_buf_t *tbuf = &rga2_mmu_buf;
+       struct rga2_reg *reg;
+
+       while (!list_empty(&rga2_service.running)) {
+               reg = list_entry(rga2_service.running.next, struct rga2_reg,
+                                status_link);
+               if (reg->MMU_len && tbuf) {
+                       if (tbuf->back + reg->MMU_len > 2 * tbuf->size)
+                               tbuf->back = reg->MMU_len + tbuf->size;
+                       else
+                               tbuf->back += reg->MMU_len;
+               }
+
+               rga2_put_dma_buf(NULL, reg);
+
+               atomic_sub(1, &reg->session->task_running);
+               atomic_sub(1, &rga2_service.total_running);
+
+               if(list_empty(&reg->session->waiting))
+               {
+                       atomic_set(&reg->session->done, 1);
+                       wake_up(&reg->session->wait);
+               }
+
+               rga2_reg_deinit(reg);
+       }
+}
+
+static void rga2_del_running_list_timeout(void)
+{
+       struct rga2_mmu_buf_t *tbuf = &rga2_mmu_buf;
+       struct rga2_reg *reg;
+
+       while (!list_empty(&rga2_service.running)) {
+               reg = list_entry(rga2_service.running.next, struct rga2_reg,
+                                status_link);
+               kfree(reg->MMU_base);
+               if (reg->MMU_len && tbuf) {
+                       if (tbuf->back + reg->MMU_len > 2 * tbuf->size)
+                               tbuf->back = reg->MMU_len + tbuf->size;
+                       else
+                               tbuf->back += reg->MMU_len;
+               }
+
+               rga2_put_dma_buf(NULL, reg);
+
+               atomic_sub(1, &reg->session->task_running);
+               atomic_sub(1, &rga2_service.total_running);
+               rga2_soft_reset();
+               if (list_empty(&reg->session->waiting)) {
+                       atomic_set(&reg->session->done, 1);
+                       wake_up(&reg->session->wait);
+               }
+               rga2_reg_deinit(reg);
+       }
+       return;
+}
+
+static int rga2_get_img_info(rga_img_info_t *img,
+                            u8 mmu_flag,
+                            u8 buf_gem_type_dma,
+                            struct sg_table **psgt,
+                            struct dma_buf_attachment **pattach)
+{
+       struct dma_buf_attachment *attach = NULL;
+       struct ion_client *ion_client = NULL;
+       struct ion_handle *hdl = NULL;
+       struct device *rga_dev = NULL;
+       struct sg_table *sgt = NULL;
+       struct dma_buf *dma_buf = NULL;
+       u32 vir_w, vir_h;
+       ion_phys_addr_t phy_addr;
+       size_t len = 0;
+       int yrgb_addr = -1;
+       int ret = 0;
+
+       ion_client = rga2_drvdata->ion_client;
+       rga_dev = rga2_drvdata->dev;
+       yrgb_addr = (int)img->yrgb_addr;
+       vir_w = img->vir_w;
+       vir_h = img->vir_h;
+
+       if (yrgb_addr > 0) {
+               if (buf_gem_type_dma) {
+                       dma_buf = dma_buf_get(img->yrgb_addr);
+                       if (IS_ERR(dma_buf)) {
+                               ret = -EINVAL;
+                               pr_err("dma_buf_get fail fd[%d]\n", yrgb_addr);
+                               return ret;
+                       }
+
+                       attach = dma_buf_attach(dma_buf, rga_dev);
+                       if (IS_ERR(attach)) {
+                               dma_buf_put(dma_buf);
+                               ret = -EINVAL;
+                               pr_err("Failed to attach dma_buf\n");
+                               return ret;
+                       }
+
+                       *pattach = attach;
+                       sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+                       if (IS_ERR(sgt)) {
+                               ret = -EINVAL;
+                               pr_err("Failed to map src attachment\n");
+                               goto err_get_sg;
+                       }
+                       if (!mmu_flag) {
+                               ret = -EINVAL;
+                               pr_err("Fix it please enable iommu flag\n");
+                               goto err_get_sg;
+                       }
+               } else {
+                       hdl = ion_import_dma_buf(ion_client, img->yrgb_addr);
+                       if (IS_ERR(hdl)) {
+                               ret = -EINVAL;
+                               pr_err("RGA2 ERROR ion buf handle\n");
+                               return ret;
+                       }
+                       if (mmu_flag) {
+                               sgt = ion_sg_table(ion_client, hdl);
+                               if (IS_ERR(sgt)) {
+                                       ret = -EINVAL;
+                                       pr_err("Fail map src attachment\n");
+                                       goto err_get_sg;
+                               }
+                       }
+               }
+
+               if (mmu_flag) {
+                       *psgt = sgt;
+                       img->yrgb_addr = img->uv_addr;
+                       img->uv_addr = img->yrgb_addr + (vir_w * vir_h);
+                       img->v_addr = img->uv_addr + (vir_w * vir_h) / 4;
+               } else {
+                       ion_phys(ion_client, hdl, &phy_addr, &len);
+                       img->yrgb_addr = phy_addr;
+                       img->uv_addr = img->yrgb_addr + (vir_w * vir_h);
+                       img->v_addr = img->uv_addr + (vir_w * vir_h) / 4;
+               }
+       } else {
+               img->yrgb_addr = img->uv_addr;
+               img->uv_addr = img->yrgb_addr + (vir_w * vir_h);
+               img->v_addr = img->uv_addr + (vir_w * vir_h) / 4;
+       }
+
+       if (hdl)
+               ion_free(ion_client, hdl);
+
+       return ret;
+
+err_get_sg:
+       if (hdl)
+               ion_free(ion_client, hdl);
+       if (sgt && buf_gem_type_dma)
+               dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+       if (attach) {
+               dma_buf = attach->dmabuf;
+               dma_buf_detach(dma_buf, attach);
+               *pattach = NULL;
+               dma_buf_put(dma_buf);
+       }
+       return ret;
+}
+
+static int rga2_get_dma_buf(struct rga2_req *req)
+{
+       struct dma_buf *dma_buf = NULL;
+       u8 buf_gem_type_dma = 0;
+       u8 mmu_flag = 0;
+       int ret = 0;
+
+       buf_gem_type_dma = req->buf_type & RGA_BUF_GEM_TYPE_DMA;
+       req->sg_src0 = NULL;
+       req->sg_src1 = NULL;
+       req->sg_dst = NULL;
+       req->sg_els = NULL;
+       req->attach_src0 = NULL;
+       req->attach_dst = NULL;
+       req->attach_src1 = NULL;
+       mmu_flag = req->mmu_info.src0_mmu_flag;
+       ret = rga2_get_img_info(&req->src, mmu_flag, buf_gem_type_dma,
+                               &req->sg_src0, &req->attach_src0);
+       if (ret) {
+               pr_err("src:rga2_get_img_info fail\n");
+               goto err_src;
+       }
+
+       mmu_flag = req->mmu_info.dst_mmu_flag;
+       ret = rga2_get_img_info(&req->dst, mmu_flag, buf_gem_type_dma,
+                               &req->sg_dst, &req->attach_dst);
+       if (ret) {
+               pr_err("dst:rga2_get_img_info fail\n");
+               goto err_dst;
+       }
+
+       mmu_flag = req->mmu_info.src1_mmu_flag;
+       ret = rga2_get_img_info(&req->src1, mmu_flag, buf_gem_type_dma,
+                               &req->sg_src1, &req->attach_src1);
+       if (ret) {
+               pr_err("src1:rga2_get_img_info fail\n");
+               goto err_src1;
+       }
+
+       return ret;
+
+err_src1:
+       if (buf_gem_type_dma && req->sg_dst && req->attach_dst) {
+               dma_buf_unmap_attachment(req->attach_dst,
+                                        req->sg_dst, DMA_BIDIRECTIONAL);
+               dma_buf = req->attach_dst->dmabuf;
+               dma_buf_detach(dma_buf, req->attach_dst);
+               dma_buf_put(dma_buf);
+       }
+err_dst:
+       if (buf_gem_type_dma && req->sg_src0 && req->attach_src0) {
+               dma_buf_unmap_attachment(req->attach_src0,
+                                        req->sg_src0, DMA_BIDIRECTIONAL);
+               dma_buf = req->attach_src0->dmabuf;
+               dma_buf_detach(dma_buf, req->attach_src0);
+               dma_buf_put(dma_buf);
+       }
+err_src:
+
+       return ret;
+}
+
+static int rga2_blit(rga2_session *session, struct rga2_req *req)
+{
+       int ret = -1;
+       int num = 0;
+       struct rga2_reg *reg;
+
+       if (rga2_get_dma_buf(req)) {
+               pr_err("RGA2 : DMA buf copy error\n");
+               return -EFAULT;
+       }
+
+       do {
+               /* check value if legal */
+               ret = rga2_check_param(req);
+               if(ret == -EINVAL) {
+                       pr_err("req argument is inval\n");
+                       goto err_put_dma_buf;
+               }
+
+               reg = rga2_reg_init(session, req);
+               if(reg == NULL) {
+                       pr_err("init reg fail\n");
+                       goto err_put_dma_buf;
+               }
+
+               num = 1;
+               mutex_lock(&rga2_service.lock);
+               atomic_add(num, &rga2_service.total_running);
+               rga2_try_set_reg();
+               mutex_unlock(&rga2_service.lock);
+
+               return 0;
+       }
+       while(0);
+
+err_put_dma_buf:
+       rga2_put_dma_buf(req, NULL);
+
+       return -EFAULT;
+}
+
+static int rga2_blit_async(rga2_session *session, struct rga2_req *req)
+{
+       int ret = -1;
+
+#if RGA2_TEST_MSG
+       if (1) {//req->src.format >= 0x10) {
+               print_info(req);
+               rga2_flag = 1;
+               printk("*** rga_blit_async proc ***\n");
+       }
+       else
+               rga2_flag = 0;
+#endif
+       atomic_set(&session->done, 0);
+       ret = rga2_blit(session, req);
+
+       return ret;
+       }
+
+static int rga2_blit_sync(rga2_session *session, struct rga2_req *req)
+{
+       struct rga2_req req_bak;
+       int try = 10;
+       int ret = -1;
+       int ret_timeout = 0;
+
+       memcpy(&req_bak, req, sizeof(req_bak));
+retry:
+
+#if RGA2_TEST_MSG
+       if (1) {//req->bitblt_mode == 0x2) {
+               print_info(req);
+               rga2_flag = 1;
+               printk("*** rga2_blit_sync proc ***\n");
+       }
+       else
+               rga2_flag = 0;
+#endif
+
+       atomic_set(&session->done, 0);
+
+       ret = rga2_blit(session, req);
+       if(ret < 0)
+               return ret;
+
+       ret_timeout = wait_event_timeout(session->wait, atomic_read(&session->done), RGA2_TIMEOUT_DELAY);
+
+       if (unlikely(ret_timeout< 0))
+       {
+               //pr_err("sync pid %d wait task ret %d\n", session->pid, ret_timeout);
+               mutex_lock(&rga2_service.lock);
+               rga2_del_running_list();
+               mutex_unlock(&rga2_service.lock);
+               ret = ret_timeout;
+       }
+       else if (0 == ret_timeout)
+       {
+               //pr_err("sync pid %d wait %d task done timeout\n", session->pid, atomic_read(&session->task_running));
+               mutex_lock(&rga2_service.lock);
+               rga2_del_running_list_timeout();
+               rga2_try_set_reg();
+               mutex_unlock(&rga2_service.lock);
+               ret = -ETIMEDOUT;
+       }
+
+#if RGA2_TEST_TIME
+       rga2_end = ktime_get();
+       rga2_end = ktime_sub(rga2_end, rga2_start);
+       printk("sync one cmd end time %d\n", (int)ktime_to_us(rga2_end));
+#endif
+       if (ret == -ETIMEDOUT && try--) {
+               memcpy(req, &req_bak, sizeof(req_bak));
+               goto retry;
+       }
+
+       return ret;
+       }
+
+static long rga_ioctl(struct file *file, uint32_t cmd, unsigned long arg)
+{
+       struct rga2_drvdata_t *rga = rga2_drvdata;
+       struct rga2_req req, req_first;
+       struct rga_req req_rga;
+       int ret = 0;
+       rga2_session *session;
+
+       if (!rga) {
+               pr_err("rga2_drvdata is null, rga2 is not init\n");
+               return -ENODEV;
+       }
+       memset(&req, 0x0, sizeof(req));
+
+       mutex_lock(&rga2_service.mutex);
+
+       session = (rga2_session *)file->private_data;
+
+       if (NULL == session)
+       {
+               printk("%s [%d] rga thread session is null\n",__FUNCTION__,__LINE__);
+               mutex_unlock(&rga2_service.mutex);
+               return -EINVAL;
+       }
+
+       memset(&req, 0x0, sizeof(req));
+
+       switch (cmd)
+       {
+               case RGA_BLIT_SYNC:
+                       if (unlikely(copy_from_user(&req_rga, (struct rga_req*)arg, sizeof(struct rga_req))))
+                       {
+                               ERR("copy_from_user failed\n");
+                               ret = -EFAULT;
+                               break;
+                       }
+                       RGA_MSG_2_RGA2_MSG(&req_rga, &req);
+
+                       if (first_RGA2_proc == 0 && req.bitblt_mode == bitblt_mode && rga2_service.dev_mode == 1) {
+                               memcpy(&req_first, &req, sizeof(struct rga2_req));
+                               if ((req_first.src.act_w != req_first.dst.act_w)
+                                               || (req_first.src.act_h != req_first.dst.act_h)) {
+                                       req_first.src.act_w = MIN(320, MIN(req_first.src.act_w, req_first.dst.act_w));
+                                       req_first.src.act_h = MIN(240, MIN(req_first.src.act_h, req_first.dst.act_h));
+                                       req_first.dst.act_w = req_first.src.act_w;
+                                       req_first.dst.act_h = req_first.src.act_h;
+                                       ret = rga2_blit_async(session, &req_first);
+                               }
+                               ret = rga2_blit_sync(session, &req);
+                               first_RGA2_proc = 1;
+                       }
+                       else {
+                               ret = rga2_blit_sync(session, &req);
+                       }
+                       break;
+               case RGA_BLIT_ASYNC:
+                       if (unlikely(copy_from_user(&req_rga, (struct rga_req*)arg, sizeof(struct rga_req))))
+                       {
+                               ERR("copy_from_user failed\n");
+                               ret = -EFAULT;
+                               break;
+                       }
+
+                       RGA_MSG_2_RGA2_MSG(&req_rga, &req);
+
+                       if (first_RGA2_proc == 0 && req.bitblt_mode == bitblt_mode && rga2_service.dev_mode == 1) {
+                               memcpy(&req_first, &req, sizeof(struct rga2_req));
+                               if ((req_first.src.act_w != req_first.dst.act_w)
+                                               || (req_first.src.act_h != req_first.dst.act_h)) {
+                                       req_first.src.act_w = MIN(320, MIN(req_first.src.act_w, req_first.dst.act_w));
+                                       req_first.src.act_h = MIN(240, MIN(req_first.src.act_h, req_first.dst.act_h));
+                                       req_first.dst.act_w = req_first.src.act_w;
+                                       req_first.dst.act_h = req_first.src.act_h;
+                                       ret = rga2_blit_async(session, &req_first);
+                               }
+                               ret = rga2_blit_async(session, &req);
+                               first_RGA2_proc = 1;
+                       }
+                       else {
+                               ret = rga2_blit_async(session, &req);
+                       }
+                       break;
+               case RGA2_BLIT_SYNC:
+                       if (unlikely(copy_from_user(&req, (struct rga2_req*)arg, sizeof(struct rga2_req))))
+                       {
+                               ERR("copy_from_user failed\n");
+                               ret = -EFAULT;
+                               break;
+                       }
+                       ret = rga2_blit_sync(session, &req);
+                       break;
+               case RGA2_BLIT_ASYNC:
+                       if (unlikely(copy_from_user(&req, (struct rga2_req*)arg, sizeof(struct rga2_req))))
+                       {
+                               ERR("copy_from_user failed\n");
+                               ret = -EFAULT;
+                               break;
+                       }
+
+                       if((atomic_read(&rga2_service.total_running) > 16))
+                       {
+                               ret = rga2_blit_sync(session, &req);
+                       }
+                       else
+                       {
+                               ret = rga2_blit_async(session, &req);
+                       }
+                       break;
+               case RGA_FLUSH:
+               case RGA2_FLUSH:
+                       ret = rga2_flush(session, arg);
+                       break;
+               case RGA_GET_RESULT:
+               case RGA2_GET_RESULT:
+                       ret = rga2_get_result(session, arg);
+                       break;
+               case RGA_GET_VERSION:
+               case RGA2_GET_VERSION:
+                       ret = copy_to_user((void *)arg, rga->version, 16);
+                       break;
+               default:
+                       ERR("unknown ioctl cmd!\n");
+                       ret = -EINVAL;
+                       break;
+       }
+
+       mutex_unlock(&rga2_service.mutex);
+
+       return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static long compat_rga_ioctl(struct file *file, uint32_t cmd, unsigned long arg)
+{
+       struct rga2_drvdata_t *rga = rga2_drvdata;
+       struct rga2_req req, req_first;
+       struct rga_req_32 req_rga;
+       int ret = 0;
+       rga2_session *session;
+
+       if (!rga) {
+               pr_err("rga2_drvdata is null, rga2 is not init\n");
+               return -ENODEV;
+       }
+       memset(&req, 0x0, sizeof(req));
+
+       mutex_lock(&rga2_service.mutex);
+
+       session = (rga2_session *)file->private_data;
+
+#if RGA2_TEST_MSG
+       printk("use compat_rga_ioctl\n");
+#endif
+
+       if (NULL == session) {
+               printk("%s [%d] rga thread session is null\n",__FUNCTION__,__LINE__);
+               mutex_unlock(&rga2_service.mutex);
+               return -EINVAL;
+       }
+
+       memset(&req, 0x0, sizeof(req));
+
+       switch (cmd) {
+               case RGA_BLIT_SYNC:
+                       if (unlikely(copy_from_user(&req_rga, compat_ptr((compat_uptr_t)arg), sizeof(struct rga_req_32))))
+                       {
+                               ERR("copy_from_user failed\n");
+                               ret = -EFAULT;
+                               break;
+                       }
+
+                       RGA_MSG_2_RGA2_MSG_32(&req_rga, &req);
+
+                       if (first_RGA2_proc == 0 && req.bitblt_mode == bitblt_mode && rga2_service.dev_mode == 1) {
+                               memcpy(&req_first, &req, sizeof(struct rga2_req));
+                               if ((req_first.src.act_w != req_first.dst.act_w)
+                                               || (req_first.src.act_h != req_first.dst.act_h)) {
+                                       req_first.src.act_w = MIN(320, MIN(req_first.src.act_w, req_first.dst.act_w));
+                                       req_first.src.act_h = MIN(240, MIN(req_first.src.act_h, req_first.dst.act_h));
+                                       req_first.dst.act_w = req_first.src.act_w;
+                                       req_first.dst.act_h = req_first.src.act_h;
+                                       ret = rga2_blit_async(session, &req_first);
+                               }
+                               ret = rga2_blit_sync(session, &req);
+                               first_RGA2_proc = 1;
+                       }
+                       else {
+                               ret = rga2_blit_sync(session, &req);
+                       }
+                       break;
+               case RGA_BLIT_ASYNC:
+                       if (unlikely(copy_from_user(&req_rga, compat_ptr((compat_uptr_t)arg), sizeof(struct rga_req_32))))
+                       {
+                               ERR("copy_from_user failed\n");
+                               ret = -EFAULT;
+                               break;
+                       }
+                       RGA_MSG_2_RGA2_MSG_32(&req_rga, &req);
+
+                       if (first_RGA2_proc == 0 && req.bitblt_mode == bitblt_mode && rga2_service.dev_mode == 1) {
+                               memcpy(&req_first, &req, sizeof(struct rga2_req));
+                               if ((req_first.src.act_w != req_first.dst.act_w)
+                                               || (req_first.src.act_h != req_first.dst.act_h)) {
+                                       req_first.src.act_w = MIN(320, MIN(req_first.src.act_w, req_first.dst.act_w));
+                                       req_first.src.act_h = MIN(240, MIN(req_first.src.act_h, req_first.dst.act_h));
+                                       req_first.dst.act_w = req_first.src.act_w;
+                                       req_first.dst.act_h = req_first.src.act_h;
+                                       ret = rga2_blit_async(session, &req_first);
+                               }
+                               ret = rga2_blit_sync(session, &req);
+                               first_RGA2_proc = 1;
+                       }
+                       else {
+                               ret = rga2_blit_sync(session, &req);
+                       }
+
+                       //if((atomic_read(&rga2_service.total_running) > 8))
+                       //    ret = rga2_blit_sync(session, &req);
+                       //else
+                       //    ret = rga2_blit_async(session, &req);
+
+                       break;
+               case RGA2_BLIT_SYNC:
+                       if (unlikely(copy_from_user(&req, compat_ptr((compat_uptr_t)arg), sizeof(struct rga2_req))))
+                       {
+                               ERR("copy_from_user failed\n");
+                               ret = -EFAULT;
+                               break;
+                       }
+                       ret = rga2_blit_sync(session, &req);
+                       break;
+               case RGA2_BLIT_ASYNC:
+                       if (unlikely(copy_from_user(&req, compat_ptr((compat_uptr_t)arg), sizeof(struct rga2_req))))
+                       {
+                               ERR("copy_from_user failed\n");
+                               ret = -EFAULT;
+                               break;
+                       }
+
+                       if((atomic_read(&rga2_service.total_running) > 16))
+                               ret = rga2_blit_sync(session, &req);
+                       else
+                               ret = rga2_blit_async(session, &req);
+
+                       break;
+               case RGA_FLUSH:
+               case RGA2_FLUSH:
+                       ret = rga2_flush(session, arg);
+                       break;
+               case RGA_GET_RESULT:
+               case RGA2_GET_RESULT:
+                       ret = rga2_get_result(session, arg);
+                       break;
+               case RGA_GET_VERSION:
+               case RGA2_GET_VERSION:
+                       ret = copy_to_user((void *)arg, rga->version, 16);
+                       break;
+               default:
+                       ERR("unknown ioctl cmd!\n");
+                       ret = -EINVAL;
+                       break;
+       }
+
+       mutex_unlock(&rga2_service.mutex);
+
+       return ret;
+}
+#endif
+
+
+long rga2_ioctl_kernel(struct rga_req *req_rga)
+{
+       int ret = 0;
+       rga2_session *session;
+       struct rga2_req req;
+
+       memset(&req, 0x0, sizeof(req));
+       mutex_lock(&rga2_service.mutex);
+       session = &rga2_session_global;
+       if (NULL == session)
+       {
+               printk("%s [%d] rga thread session is null\n",__FUNCTION__,__LINE__);
+               mutex_unlock(&rga2_service.mutex);
+               return -EINVAL;
+       }
+
+       RGA_MSG_2_RGA2_MSG(req_rga, &req);
+       ret = rga2_blit_sync(session, &req);
+       mutex_unlock(&rga2_service.mutex);
+
+       return ret;
+}
+
+
+static int rga2_open(struct inode *inode, struct file *file)
+{
+       rga2_session *session = kzalloc(sizeof(rga2_session), GFP_KERNEL);
+
+       if (NULL == session) {
+               pr_err("unable to allocate memory for rga_session.");
+               return -ENOMEM;
+       }
+
+       session->pid = current->pid;
+       INIT_LIST_HEAD(&session->waiting);
+       INIT_LIST_HEAD(&session->running);
+       INIT_LIST_HEAD(&session->list_session);
+       init_waitqueue_head(&session->wait);
+       mutex_lock(&rga2_service.lock);
+       list_add_tail(&session->list_session, &rga2_service.session);
+       mutex_unlock(&rga2_service.lock);
+       atomic_set(&session->task_running, 0);
+       atomic_set(&session->num_done, 0);
+       file->private_data = (void *)session;
+
+       return nonseekable_open(inode, file);
+}
+
+static int rga2_release(struct inode *inode, struct file *file)
+{
+       int task_running;
+       rga2_session *session = (rga2_session *)file->private_data;
+
+       if (NULL == session)
+               return -EINVAL;
+
+       task_running = atomic_read(&session->task_running);
+       if (task_running)
+       {
+               pr_err("rga2_service session %d still has %d task running when closing\n", session->pid, task_running);
+               msleep(100);
+       }
+
+       wake_up(&session->wait);
+       mutex_lock(&rga2_service.lock);
+       list_del(&session->list_session);
+       rga2_service_session_clear(session);
+       kfree(session);
+       mutex_unlock(&rga2_service.lock);
+
+       return 0;
+}
+
+static irqreturn_t rga2_irq_thread(int irq, void *dev_id)
+{
+       mutex_lock(&rga2_service.lock);
+       if (rga2_service.enable) {
+               rga2_del_running_list();
+               rga2_try_set_reg();
+       }
+       mutex_unlock(&rga2_service.lock);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t rga2_irq(int irq,  void *dev_id)
+{
+       /*clear INT */
+       rga2_write(rga2_read(RGA2_INT) | (0x1<<4) | (0x1<<5) | (0x1<<6) | (0x1<<7), RGA2_INT);
+
+       return IRQ_WAKE_THREAD;
+}
+
+struct file_operations rga2_fops = {
+       .owner          = THIS_MODULE,
+       .open           = rga2_open,
+       .release        = rga2_release,
+       .unlocked_ioctl         = rga_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl           = compat_rga_ioctl,
+#endif
+};
+
+static struct miscdevice rga2_dev ={
+       .minor = RGA2_MAJOR,
+       .name  = "rga",
+       .fops  = &rga2_fops,
+};
+
+static const struct of_device_id rockchip_rga_dt_ids[] = {
+       { .compatible = "rockchip,rga2", },
+       {},
+};
+
+static int rga2_drv_probe(struct platform_device *pdev)
+{
+       struct rga2_drvdata_t *data;
+       struct resource *res;
+       int ret = 0;
+       struct device_node *np = pdev->dev.of_node;
+
+       mutex_init(&rga2_service.lock);
+       mutex_init(&rga2_service.mutex);
+       atomic_set(&rga2_service.total_running, 0);
+       atomic_set(&rga2_service.src_format_swt, 0);
+       rga2_service.last_prc_src_format = 1; /* default is yuv first*/
+       rga2_service.enable = false;
+
+       rga_ioctl_kernel_p = rga2_ioctl_kernel;
+
+       data = devm_kzalloc(&pdev->dev, sizeof(struct rga2_drvdata_t), GFP_KERNEL);
+       if(NULL == data)
+       {
+               ERR("failed to allocate driver data.\n");
+               return -ENOMEM;
+       }
+
+       INIT_DELAYED_WORK(&data->power_off_work, rga2_power_off_work);
+       wake_lock_init(&data->wake_lock, WAKE_LOCK_SUSPEND, "rga");
+
+       data->rga2 = devm_clk_get(&pdev->dev, "clk_rga");
+       data->aclk_rga2 = devm_clk_get(&pdev->dev, "aclk_rga");
+       data->hclk_rga2 = devm_clk_get(&pdev->dev, "hclk_rga");
+
+       /* map the registers */
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       data->rga_base = devm_ioremap_resource(&pdev->dev, res);
+       if (!data->rga_base) {
+               ERR("rga ioremap failed\n");
+               ret = -ENOENT;
+               goto err_ioremap;
+       }
+
+       /* get the IRQ */
+       data->irq = platform_get_irq(pdev, 0);
+       if (data->irq <= 0) {
+               ERR("failed to get rga irq resource (%d).\n", data->irq);
+               ret = data->irq;
+               goto err_irq;
+       }
+
+       /* request the IRQ */
+       ret = devm_request_threaded_irq(&pdev->dev, data->irq, rga2_irq, rga2_irq_thread, 0, "rga", pdev);
+       if (ret)
+       {
+               ERR("rga request_irq failed (%d).\n", ret);
+               goto err_irq;
+       }
+
+       platform_set_drvdata(pdev, data);
+       data->dev = &pdev->dev;
+       rga2_drvdata = data;
+       of_property_read_u32(np, "dev_mode", &rga2_service.dev_mode);
+
+#if defined(CONFIG_ION_ROCKCHIP)
+       data->ion_client = rockchip_ion_client_create("rga");
+       if (IS_ERR(data->ion_client)) {
+               dev_err(&pdev->dev, "failed to create ion client for rga");
+               return PTR_ERR(data->ion_client);
+       } else {
+               dev_info(&pdev->dev, "rga ion client create success!\n");
+       }
+#endif
+
+       ret = misc_register(&rga2_dev);
+       if(ret)
+       {
+               ERR("cannot register miscdev (%d)\n", ret);
+               goto err_misc_register;
+       }
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+       pm_runtime_enable(&pdev->dev);
+#endif
+       rga2_init_version();
+       pr_info("Driver loaded successfully ver:%s\n", rga2_drvdata->version);
+
+       return 0;
+
+err_misc_register:
+       free_irq(data->irq, pdev);
+err_irq:
+       iounmap(data->rga_base);
+err_ioremap:
+       wake_lock_destroy(&data->wake_lock);
+       //kfree(data);
+
+       return ret;
+}
+
+static int rga2_drv_remove(struct platform_device *pdev)
+{
+       struct rga2_drvdata_t *data = platform_get_drvdata(pdev);
+       DBG("%s [%d]\n",__FUNCTION__,__LINE__);
+
+       wake_lock_destroy(&data->wake_lock);
+       misc_deregister(&(data->miscdev));
+       free_irq(data->irq, &data->miscdev);
+       iounmap((void __iomem *)(data->rga_base));
+
+       devm_clk_put(&pdev->dev, data->rga2);
+       devm_clk_put(&pdev->dev, data->aclk_rga2);
+       devm_clk_put(&pdev->dev, data->hclk_rga2);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+       pm_runtime_disable(&pdev->dev);
+#endif
+
+       kfree(data);
+       return 0;
+}
+
+static struct platform_driver rga2_driver = {
+       .probe          = rga2_drv_probe,
+       .remove         = rga2_drv_remove,
+       .driver         = {
+               .owner  = THIS_MODULE,
+               .name   = "rga2",
+               .of_match_table = of_match_ptr(rockchip_rga_dt_ids),
+       },
+};
+
+
+void rga2_test_0(void);
+
+static int __init rga2_init(void)
+{
+       int ret;
+       uint32_t *buf_p;
+
+       /* malloc pre scale mid buf mmu table */
+       buf_p = kmalloc(1024*256, GFP_KERNEL);
+       rga2_mmu_buf.buf_virtual = buf_p;
+       rga2_mmu_buf.buf = (uint32_t *)virt_to_phys((void *)((unsigned long)buf_p));
+       rga2_mmu_buf.front = 0;
+       rga2_mmu_buf.back = 64*1024;
+       rga2_mmu_buf.size = 64*1024;
+
+       rga2_mmu_buf.pages = kmalloc(32768 * sizeof(struct page *), GFP_KERNEL);
+
+       ret = platform_driver_register(&rga2_driver);
+       if (ret != 0) {
+               printk(KERN_ERR "Platform device register failed (%d).\n", ret);
+               return ret;
+       }
+
+       rga2_session_global.pid = 0x0000ffff;
+       INIT_LIST_HEAD(&rga2_session_global.waiting);
+       INIT_LIST_HEAD(&rga2_session_global.running);
+       INIT_LIST_HEAD(&rga2_session_global.list_session);
+
+       INIT_LIST_HEAD(&rga2_service.waiting);
+       INIT_LIST_HEAD(&rga2_service.running);
+       INIT_LIST_HEAD(&rga2_service.done);
+       INIT_LIST_HEAD(&rga2_service.session);
+       init_waitqueue_head(&rga2_session_global.wait);
+       //mutex_lock(&rga_service.lock);
+       list_add_tail(&rga2_session_global.list_session, &rga2_service.session);
+       //mutex_unlock(&rga_service.lock);
+       atomic_set(&rga2_session_global.task_running, 0);
+       atomic_set(&rga2_session_global.num_done, 0);
+
+#if RGA2_TEST_CASE
+       rga2_test_0();
+#endif
+
+       INFO("Module initialized.\n");
+
+       return 0;
+}
+
+static void __exit rga2_exit(void)
+{
+       rga2_power_off();
+
+       if (rga2_mmu_buf.buf_virtual)
+               kfree(rga2_mmu_buf.buf_virtual);
+
+       platform_driver_unregister(&rga2_driver);
+}
+
+
+#if RGA2_TEST_CASE
+
+void rga2_test_0(void)
+{
+       struct rga2_req req;
+       rga2_session session;
+       unsigned int *src, *dst;
+
+       session.pid     = current->pid;
+       INIT_LIST_HEAD(&session.waiting);
+       INIT_LIST_HEAD(&session.running);
+       INIT_LIST_HEAD(&session.list_session);
+       init_waitqueue_head(&session.wait);
+       /* no need to protect */
+       list_add_tail(&session.list_session, &rga2_service.session);
+       atomic_set(&session.task_running, 0);
+       atomic_set(&session.num_done, 0);
+
+       memset(&req, 0, sizeof(struct rga2_req));
+       src = kmalloc(800*480*4, GFP_KERNEL);
+       dst = kmalloc(800*480*4, GFP_KERNEL);
+
+       printk("\n********************************\n");
+       printk("************ RGA2_TEST ************\n");
+       printk("********************************\n\n");
+
+#if 1
+       memset(src, 0x80, 800 * 480 * 4);
+       memset(dst, 0xcc, 800 * 480 * 4);
+#endif
+#if 0
+       dmac_flush_range(src, &src[800 * 480]);
+       outer_flush_range(virt_to_phys(src), virt_to_phys(&src[800 * 480]));
+
+       dmac_flush_range(dst, &dst[800 * 480]);
+       outer_flush_range(virt_to_phys(dst), virt_to_phys(&dst[800 * 480]));
+#endif
+
+#if 0
+       req.pat.act_w = 16;
+       req.pat.act_h = 16;
+       req.pat.vir_w = 16;
+       req.pat.vir_h = 16;
+       req.pat.yrgb_addr = virt_to_phys(src);
+       req.render_mode = 0;
+       rga2_blit_sync(&session, &req);
+#endif
+       {
+               uint32_t i, j;
+               uint8_t *sp;
+
+               sp = (uint8_t *)src;
+               for (j = 0; j < 240; j++) {
+                       sp = (uint8_t *)src + j * 320 * 10 / 8;
+                       for (i = 0; i < 320; i++) {
+                               if ((i & 3) == 0) {
+                                       sp[i * 5 / 4] = 0;
+                                       sp[i * 5 / 4+1] = 0x1;
+                               } else if ((i & 3) == 1) {
+                                       sp[i * 5 / 4+1] = 0x4;
+                               } else if ((i & 3) == 2) {
+                                       sp[i * 5 / 4+1] = 0x10;
+                               } else if ((i & 3) == 3) {
+                                       sp[i * 5 / 4+1] = 0x40;
+                           }
+                       }
+               }
+               sp = (uint8_t *)src;
+               for (j = 0; j < 100; j++)
+                       printk("src %.2x\n", sp[j]);
+       }
+       req.src.act_w = 320;
+       req.src.act_h = 240;
+
+       req.src.vir_w = 320;
+       req.src.vir_h = 240;
+       req.src.yrgb_addr = 0;//(uint32_t)virt_to_phys(src);
+       req.src.uv_addr = (unsigned long)virt_to_phys(src);
+       req.src.v_addr = 0;
+       req.src.format = RGA2_FORMAT_YCbCr_420_SP_10B;
+
+       req.dst.act_w  = 320;
+       req.dst.act_h = 240;
+       req.dst.x_offset = 0;
+       req.dst.y_offset = 0;
+
+       req.dst.vir_w = 320;
+       req.dst.vir_h = 240;
+
+       req.dst.yrgb_addr = 0;//((uint32_t)virt_to_phys(dst));
+       req.dst.uv_addr = (unsigned long)virt_to_phys(dst);
+       req.dst.format = RGA2_FORMAT_YCbCr_420_SP;
+
+       //dst = dst0;
+
+       //req.render_mode = color_fill_mode;
+       //req.fg_color = 0x80ffffff;
+
+       req.rotate_mode = 0;
+       req.scale_bicu_mode = 2;
+
+#if 0
+       //req.alpha_rop_flag = 0;
+       //req.alpha_rop_mode = 0x19;
+       //req.PD_mode = 3;
+
+       //req.mmu_info.mmu_flag = 0x21;
+       //req.mmu_info.mmu_en = 1;
+
+       //printk("src = %.8x\n", req.src.yrgb_addr);
+       //printk("src = %.8x\n", req.src.uv_addr);
+       //printk("dst = %.8x\n", req.dst.yrgb_addr);
+#endif
+
+       rga2_blit_sync(&session, &req);
+
+#if 0
+       uint32_t j;
+       for (j = 0; j < 320 * 240 * 10 / 8; j++) {
+        if (src[j] != dst[j])
+               printk("error value dst not equal src j %d, s %.2x d %.2x\n",
+                       j, src[j], dst[j]);
+       }
+#endif
+
+#if 1
+       {
+               uint32_t j;
+               uint8_t *dp = (uint8_t *)dst;
+
+               for (j = 0; j < 100; j++)
+                       printk("%d %.2x\n", j, dp[j]);
+       }
+#endif
+
+       if(src)
+               kfree(src);
+       if(dst)
+               kfree(dst);
+}
+#endif
+
+module_init(rga2_init);
+module_exit(rga2_exit);
+
+/* Module information */
+MODULE_AUTHOR("zsq@rock-chips.com");
+MODULE_DESCRIPTION("Driver for rga device");
+MODULE_LICENSE("GPL");