#include <linux/wakelock.h>
#include <linux/scatterlist.h>
#include <linux/rockchip_ion.h>
+#include <linux/version.h>
+#include <linux/pm_runtime.h>
+#include <linux/dma-buf.h>
#include "rga2.h"
#include "rga2_reg_info.h"
#define RGA2_TEST_FLUSH_TIME 0
#define RGA2_INFO_BUS_ERROR 1
#define RGA2_POWER_OFF_DELAY 4*HZ /* 4s */
-#define RGA2_TIMEOUT_DELAY 2*HZ /* 2s */
+#define RGA2_TIMEOUT_DELAY (HZ / 10) /* 100ms */
#define RGA2_MAJOR 255
#define RGA2_RESET_TIMEOUT 1000
struct rga2_drvdata_t {
struct miscdevice miscdev;
- struct device dev;
+ struct device *dev;
void *rga_base;
int irq;
struct clk *rga2;
struct ion_client * ion_client;
+ char version[16];
};
struct rga2_drvdata_t *rga2_drvdata;
return *((volatile unsigned int *)(rga2_drvdata->rga_base + r));
}
+static inline int rga2_init_version(void)
+{
+ struct rga2_drvdata_t *rga = rga2_drvdata;
+ u32 major_version, minor_version;
+ u32 reg_version;
+
+ if (!rga) {
+ pr_err("rga2_drvdata is null\n");
+ return -EINVAL;
+ }
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+ pm_runtime_get_sync(rga2_drvdata->dev);
+#endif
+
+ clk_prepare_enable(rga2_drvdata->aclk_rga2);
+ clk_prepare_enable(rga2_drvdata->hclk_rga2);
+
+ reg_version = rga2_read(0x028);
+
+ clk_disable_unprepare(rga2_drvdata->aclk_rga2);
+ clk_disable_unprepare(rga2_drvdata->hclk_rga2);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+ pm_runtime_put(rga2_drvdata->dev);
+#endif
+
+ major_version = (reg_version & RGA2_MAJOR_VERSION_MASK) >> 24;
+ minor_version = (reg_version & RGA2_MINOR_VERSION_MASK) >> 20;
+
+ sprintf(rga->version, "%d.%02d", major_version, minor_version);
+
+ return 0;
+}
+
static void rga2_soft_reset(void)
{
u32 i;
u32 reg;
- rga2_write((1 << 3) | (1 << 4), RGA2_SYS_CTRL); //RGA_SYS_CTRL
+ rga2_write((1 << 3) | (1 << 4) | (1 << 6), RGA2_SYS_CTRL);
for(i = 0; i < RGA2_RESET_TIMEOUT; i++)
{
static ktime_t last;
ktime_t now = ktime_get();
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+ pm_runtime_get_sync(rga2_drvdata->dev);
+#endif
+
if (ktime_to_ns(ktime_sub(now, last)) > NSEC_PER_SEC) {
cancel_delayed_work_sync(&rga2_drvdata->power_off_work);
rga2_queue_power_off_work();
clk_disable_unprepare(rga2_drvdata->rga2);
clk_disable_unprepare(rga2_drvdata->aclk_rga2);
clk_disable_unprepare(rga2_drvdata->hclk_rga2);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+ pm_runtime_put(rga2_drvdata->dev);
+#endif
+
wake_unlock(&rga2_drvdata->wake_lock);
first_RGA2_proc = 0;
rga2_service.enable = false;
return NULL;
}
+ reg->sg_src0 = req->sg_src0;
+ reg->sg_dst = req->sg_dst;
+ reg->sg_src1 = req->sg_src1;
+ reg->attach_src0 = req->attach_src0;
+ reg->attach_dst = req->attach_dst;
+ reg->attach_src1 = req->attach_src1;
+
mutex_lock(&rga2_service.lock);
list_add_tail(®->status_link, &rga2_service.waiting);
list_add_tail(®->session_link, &session->waiting);
}
}
-/* Caller must hold rga_service.lock */
+static int rga2_put_dma_buf(struct rga2_req *req, struct rga2_reg *reg)
+{
+ struct dma_buf_attachment *attach = NULL;
+ struct sg_table *sgt = NULL;
+ struct dma_buf *dma_buf = NULL;
+
+ if (!req && !reg)
+ return -EINVAL;
+
+ attach = (!reg) ? req->attach_src0 : reg->attach_src0;
+ sgt = (!reg) ? req->sg_src0 : reg->sg_src0;
+ if (attach && sgt)
+ dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+ if (attach) {
+ dma_buf = attach->dmabuf;
+ dma_buf_detach(dma_buf, attach);
+ dma_buf_put(dma_buf);
+ }
+
+ attach = (!reg) ? req->attach_dst : reg->attach_dst;
+ sgt = (!reg) ? req->sg_dst : reg->sg_dst;
+ if (attach && sgt)
+ dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+ if (attach) {
+ dma_buf = attach->dmabuf;
+ dma_buf_detach(dma_buf, attach);
+ dma_buf_put(dma_buf);
+ }
+
+ attach = (!reg) ? req->attach_src1 : reg->attach_src1;
+ sgt = (!reg) ? req->sg_src1 : reg->sg_src1;
+ if (attach && sgt)
+ dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+ if (attach) {
+ dma_buf = attach->dmabuf;
+ dma_buf_detach(dma_buf, attach);
+ dma_buf_put(dma_buf);
+ }
+
+ return 0;
+}
+
static void rga2_del_running_list(void)
{
+ struct rga2_mmu_buf_t *tbuf = &rga2_mmu_buf;
struct rga2_reg *reg;
- while(!list_empty(&rga2_service.running))
- {
- reg = list_entry(rga2_service.running.next, struct rga2_reg, status_link);
-
- if(reg->MMU_len != 0)
- {
- if (rga2_mmu_buf.back + reg->MMU_len > 2*rga2_mmu_buf.size)
- rga2_mmu_buf.back = reg->MMU_len + rga2_mmu_buf.size;
+ while (!list_empty(&rga2_service.running)) {
+ reg = list_entry(rga2_service.running.next, struct rga2_reg,
+ status_link);
+ if (reg->MMU_len && tbuf) {
+ if (tbuf->back + reg->MMU_len > 2 * tbuf->size)
+ tbuf->back = reg->MMU_len + tbuf->size;
else
- rga2_mmu_buf.back += reg->MMU_len;
+ tbuf->back += reg->MMU_len;
}
+
+ rga2_put_dma_buf(NULL, reg);
+
atomic_sub(1, ®->session->task_running);
atomic_sub(1, &rga2_service.total_running);
}
}
-/* Caller must hold rga_service.lock */
static void rga2_del_running_list_timeout(void)
{
- struct rga2_reg *reg;
+ struct rga2_mmu_buf_t *tbuf = &rga2_mmu_buf;
+ struct rga2_reg *reg;
- while(!list_empty(&rga2_service.running))
- {
- reg = list_entry(rga2_service.running.next, struct rga2_reg, status_link);
+ while (!list_empty(&rga2_service.running)) {
+ reg = list_entry(rga2_service.running.next, struct rga2_reg,
+ status_link);
+ kfree(reg->MMU_base);
+ if (reg->MMU_len && tbuf) {
+ if (tbuf->back + reg->MMU_len > 2 * tbuf->size)
+ tbuf->back = reg->MMU_len + tbuf->size;
+ else
+ tbuf->back += reg->MMU_len;
+ }
- if(reg->MMU_base != NULL)
- {
- kfree(reg->MMU_base);
- }
+ rga2_put_dma_buf(NULL, reg);
- atomic_sub(1, ®->session->task_running);
- atomic_sub(1, &rga2_service.total_running);
+ atomic_sub(1, ®->session->task_running);
+ atomic_sub(1, &rga2_service.total_running);
+ rga2_soft_reset();
+ if (list_empty(®->session->waiting)) {
+ atomic_set(®->session->done, 1);
+ wake_up(®->session->wait);
+ }
+ rga2_reg_deinit(reg);
+ }
+ return;
+}
- rga2_soft_reset();
+static int rga2_get_img_info(rga_img_info_t *img,
+ u8 mmu_flag,
+ u8 buf_gem_type_dma,
+ struct sg_table **psgt,
+ struct dma_buf_attachment **pattach)
+{
+ struct dma_buf_attachment *attach = NULL;
+ struct ion_client *ion_client = NULL;
+ struct ion_handle *hdl = NULL;
+ struct device *rga_dev = NULL;
+ struct sg_table *sgt = NULL;
+ struct dma_buf *dma_buf = NULL;
+ u32 vir_w, vir_h;
+ ion_phys_addr_t phy_addr;
+ size_t len = 0;
+ int yrgb_addr = -1;
+ int ret = 0;
- if(list_empty(®->session->waiting))
- {
- atomic_set(®->session->done, 1);
- wake_up(®->session->wait);
- }
+ ion_client = rga2_drvdata->ion_client;
+ rga_dev = rga2_drvdata->dev;
+ yrgb_addr = (int)img->yrgb_addr;
+ vir_w = img->vir_w;
+ vir_h = img->vir_h;
+
+ if (yrgb_addr > 0) {
+ if (buf_gem_type_dma) {
+ dma_buf = dma_buf_get(img->yrgb_addr);
+ if (IS_ERR(dma_buf)) {
+ ret = -EINVAL;
+ pr_err("dma_buf_get fail fd[%d]\n", yrgb_addr);
+ return ret;
+ }
- rga2_reg_deinit(reg);
- }
+ attach = dma_buf_attach(dma_buf, rga_dev);
+ if (IS_ERR(attach)) {
+ dma_buf_put(dma_buf);
+ ret = -EINVAL;
+ pr_err("Failed to attach dma_buf\n");
+ return ret;
+ }
+
+ *pattach = attach;
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt)) {
+ ret = -EINVAL;
+ pr_err("Failed to map src attachment\n");
+ goto err_get_sg;
+ }
+ if (!mmu_flag) {
+ ret = -EINVAL;
+ pr_err("Fix it please enable iommu flag\n");
+ goto err_get_sg;
+ }
+ } else {
+ hdl = ion_import_dma_buf(ion_client, img->yrgb_addr);
+ if (IS_ERR(hdl)) {
+ ret = -EINVAL;
+ pr_err("RGA2 ERROR ion buf handle\n");
+ return ret;
+ }
+ if (mmu_flag) {
+ sgt = ion_sg_table(ion_client, hdl);
+ if (IS_ERR(sgt)) {
+ ret = -EINVAL;
+ pr_err("Fail map src attachment\n");
+ goto err_get_sg;
+ }
+ }
+ }
+
+ if (mmu_flag) {
+ *psgt = sgt;
+ img->yrgb_addr = img->uv_addr;
+ img->uv_addr = img->yrgb_addr + (vir_w * vir_h);
+ img->v_addr = img->uv_addr + (vir_w * vir_h) / 4;
+ } else {
+ ion_phys(ion_client, hdl, &phy_addr, &len);
+ img->yrgb_addr = phy_addr;
+ img->uv_addr = img->yrgb_addr + (vir_w * vir_h);
+ img->v_addr = img->uv_addr + (vir_w * vir_h) / 4;
+ }
+ } else {
+ img->yrgb_addr = img->uv_addr;
+ img->uv_addr = img->yrgb_addr + (vir_w * vir_h);
+ img->v_addr = img->uv_addr + (vir_w * vir_h) / 4;
+ }
+
+ if (hdl)
+ ion_free(ion_client, hdl);
+
+ return ret;
+
+err_get_sg:
+ if (hdl)
+ ion_free(ion_client, hdl);
+ if (sgt && buf_gem_type_dma)
+ dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+ if (attach) {
+ dma_buf = attach->dmabuf;
+ dma_buf_detach(dma_buf, attach);
+ *pattach = NULL;
+ dma_buf_put(dma_buf);
+ }
+ return ret;
}
-static int rga2_convert_dma_buf(struct rga2_req *req)
+static int rga2_get_dma_buf(struct rga2_req *req)
{
- struct ion_handle *hdl;
- ion_phys_addr_t phy_addr;
- size_t len;
- int ret;
+ struct dma_buf *dma_buf = NULL;
+ u8 buf_gem_type_dma = 0;
+ u8 mmu_flag = 0;
+ int ret = 0;
+ buf_gem_type_dma = req->buf_type & RGA_BUF_GEM_TYPE_DMA;
req->sg_src0 = NULL;
req->sg_src1 = NULL;
- req->sg_dst = NULL;
- req->sg_els = NULL;
-
- if((int)req->src.yrgb_addr > 0) {
- hdl = ion_import_dma_buf(rga2_drvdata->ion_client, req->src.yrgb_addr);
- if (IS_ERR(hdl)) {
- ret = PTR_ERR(hdl);
- printk("RGA2 SRC ERROR ion buf handle\n");
- return ret;
- }
- if (req->mmu_info.src0_mmu_flag) {
- req->sg_src0 = ion_sg_table(rga2_drvdata->ion_client, hdl);
- req->src.yrgb_addr = req->src.uv_addr;
- req->src.uv_addr = req->src.yrgb_addr + (req->src.vir_w * req->src.vir_h);
- req->src.v_addr = req->src.uv_addr + (req->src.vir_w * req->src.vir_h)/4;
- }
- else {
- ion_phys(rga2_drvdata->ion_client, hdl, &phy_addr, &len);
- req->src.yrgb_addr = phy_addr;
- req->src.uv_addr = req->src.yrgb_addr + (req->src.vir_w * req->src.vir_h);
- req->src.v_addr = req->src.uv_addr + (req->src.vir_w * req->src.vir_h)/4;
- }
- ion_free(rga2_drvdata->ion_client, hdl);
- }
- else {
- req->src.yrgb_addr = req->src.uv_addr;
- req->src.uv_addr = req->src.yrgb_addr + (req->src.vir_w * req->src.vir_h);
- req->src.v_addr = req->src.uv_addr + (req->src.vir_w * req->src.vir_h)/4;
+ req->sg_dst = NULL;
+ req->sg_els = NULL;
+ req->attach_src0 = NULL;
+ req->attach_dst = NULL;
+ req->attach_src1 = NULL;
+ mmu_flag = req->mmu_info.src0_mmu_flag;
+ ret = rga2_get_img_info(&req->src, mmu_flag, buf_gem_type_dma,
+ &req->sg_src0, &req->attach_src0);
+ if (ret) {
+ pr_err("src:rga2_get_img_info fail\n");
+ goto err_src;
}
- if((int)req->dst.yrgb_addr > 0) {
- hdl = ion_import_dma_buf(rga2_drvdata->ion_client, req->dst.yrgb_addr);
- if (IS_ERR(hdl)) {
- ret = PTR_ERR(hdl);
- printk("RGA2 DST ERROR ion buf handle\n");
- return ret;
- }
- if (req->mmu_info.dst_mmu_flag) {
- req->sg_dst = ion_sg_table(rga2_drvdata->ion_client, hdl);
- req->dst.yrgb_addr = req->dst.uv_addr;
- req->dst.uv_addr = req->dst.yrgb_addr + (req->dst.vir_w * req->dst.vir_h);
- req->dst.v_addr = req->dst.uv_addr + (req->dst.vir_w * req->dst.vir_h)/4;
- }
- else {
- ion_phys(rga2_drvdata->ion_client, hdl, &phy_addr, &len);
- req->dst.yrgb_addr = phy_addr;
- req->dst.uv_addr = req->dst.yrgb_addr + (req->dst.vir_w * req->dst.vir_h);
- req->dst.v_addr = req->dst.uv_addr + (req->dst.vir_w * req->dst.vir_h)/4;
- }
- ion_free(rga2_drvdata->ion_client, hdl);
+ mmu_flag = req->mmu_info.dst_mmu_flag;
+ ret = rga2_get_img_info(&req->dst, mmu_flag, buf_gem_type_dma,
+ &req->sg_dst, &req->attach_dst);
+ if (ret) {
+ pr_err("dst:rga2_get_img_info fail\n");
+ goto err_dst;
}
- else {
- req->dst.yrgb_addr = req->dst.uv_addr;
- req->dst.uv_addr = req->dst.yrgb_addr + (req->dst.vir_w * req->dst.vir_h);
- req->dst.v_addr = req->dst.uv_addr + (req->dst.vir_w * req->dst.vir_h)/4;
+
+ mmu_flag = req->mmu_info.src1_mmu_flag;
+ ret = rga2_get_img_info(&req->src1, mmu_flag, buf_gem_type_dma,
+ &req->sg_src1, &req->attach_src1);
+ if (ret) {
+ pr_err("src1:rga2_get_img_info fail\n");
+ goto err_src1;
}
- if((int)req->src1.yrgb_addr > 0) {
- hdl = ion_import_dma_buf(rga2_drvdata->ion_client, req->src1.yrgb_addr);
- if (IS_ERR(hdl)) {
- ret = PTR_ERR(hdl);
- printk("RGA2 ERROR ion buf handle\n");
- return ret;
- }
- if (req->mmu_info.dst_mmu_flag) {
- req->sg_src1 = ion_sg_table(rga2_drvdata->ion_client, hdl);
- req->src1.yrgb_addr = 0;
- req->src1.uv_addr = req->dst.yrgb_addr + (req->dst.vir_w * req->dst.vir_h);
- req->src1.v_addr = req->dst.uv_addr + (req->dst.vir_w * req->dst.vir_h)/4;
- }
- else {
- ion_phys(rga2_drvdata->ion_client, hdl, &phy_addr, &len);
- req->src1.yrgb_addr = phy_addr;
- req->src1.uv_addr = req->dst.yrgb_addr + (req->dst.vir_w * req->dst.vir_h);
- req->src1.v_addr = req->dst.uv_addr + (req->dst.vir_w * req->dst.vir_h)/4;
- }
- ion_free(rga2_drvdata->ion_client, hdl);
+ return ret;
+
+err_src1:
+ if (buf_gem_type_dma && req->sg_dst && req->attach_dst) {
+ dma_buf_unmap_attachment(req->attach_dst,
+ req->sg_dst, DMA_BIDIRECTIONAL);
+ dma_buf = req->attach_dst->dmabuf;
+ dma_buf_detach(dma_buf, req->attach_dst);
+ dma_buf_put(dma_buf);
}
- else {
- req->src1.yrgb_addr = req->dst.uv_addr;
- req->src1.uv_addr = req->dst.yrgb_addr + (req->dst.vir_w * req->dst.vir_h);
- req->src1.v_addr = req->dst.uv_addr + (req->dst.vir_w * req->dst.vir_h)/4;
+err_dst:
+ if (buf_gem_type_dma && req->sg_src0 && req->attach_src0) {
+ dma_buf_unmap_attachment(req->attach_src0,
+ req->sg_src0, DMA_BIDIRECTIONAL);
+ dma_buf = req->attach_src0->dmabuf;
+ dma_buf_detach(dma_buf, req->attach_src0);
+ dma_buf_put(dma_buf);
}
+err_src:
- return 0;
+ return ret;
}
static int rga2_blit(rga2_session *session, struct rga2_req *req)
int num = 0;
struct rga2_reg *reg;
- if(rga2_convert_dma_buf(req)) {
- printk("RGA2 : DMA buf copy error\n");
+ if (rga2_get_dma_buf(req)) {
+ pr_err("RGA2 : DMA buf copy error\n");
return -EFAULT;
}
/* check value if legal */
ret = rga2_check_param(req);
if(ret == -EINVAL) {
- printk("req argument is inval\n");
- break;
+ pr_err("req argument is inval\n");
+ goto err_put_dma_buf;
}
reg = rga2_reg_init(session, req);
if(reg == NULL) {
- break;
+ pr_err("init reg fail\n");
+ goto err_put_dma_buf;
}
- num = 1;
+ num = 1;
mutex_lock(&rga2_service.lock);
atomic_add(num, &rga2_service.total_running);
rga2_try_set_reg();
}
while(0);
+err_put_dma_buf:
+ rga2_put_dma_buf(req, NULL);
+
return -EFAULT;
}
static int rga2_blit_sync(rga2_session *session, struct rga2_req *req)
{
+ struct rga2_req req_bak;
+ int try = 10;
int ret = -1;
int ret_timeout = 0;
+ memcpy(&req_bak, req, sizeof(req_bak));
+retry:
+
#if RGA2_TEST_MSG
if (1) {//req->bitblt_mode == 0x2) {
print_info(req);
rga2_end = ktime_sub(rga2_end, rga2_start);
printk("sync one cmd end time %d\n", (int)ktime_to_us(rga2_end));
#endif
+ if (ret == -ETIMEDOUT && try--) {
+ memcpy(req, &req_bak, sizeof(req_bak));
+ goto retry;
+ }
return ret;
}
static long rga_ioctl(struct file *file, uint32_t cmd, unsigned long arg)
{
+ struct rga2_drvdata_t *rga = rga2_drvdata;
struct rga2_req req, req_first;
struct rga_req req_rga;
int ret = 0;
rga2_session *session;
+ if (!rga) {
+ pr_err("rga2_drvdata is null, rga2 is not init\n");
+ return -ENODEV;
+ }
memset(&req, 0x0, sizeof(req));
mutex_lock(&rga2_service.mutex);
break;
case RGA_GET_VERSION:
case RGA2_GET_VERSION:
- ret = copy_to_user((void *)arg, RGA2_VERSION, sizeof(RGA2_VERSION));
- //ret = 0;
+ ret = copy_to_user((void *)arg, rga->version, 16);
break;
default:
ERR("unknown ioctl cmd!\n");
#ifdef CONFIG_COMPAT
static long compat_rga_ioctl(struct file *file, uint32_t cmd, unsigned long arg)
{
+ struct rga2_drvdata_t *rga = rga2_drvdata;
struct rga2_req req, req_first;
struct rga_req_32 req_rga;
int ret = 0;
rga2_session *session;
+ if (!rga) {
+ pr_err("rga2_drvdata is null, rga2 is not init\n");
+ return -ENODEV;
+ }
memset(&req, 0x0, sizeof(req));
mutex_lock(&rga2_service.mutex);
break;
case RGA_GET_VERSION:
case RGA2_GET_VERSION:
- ret = copy_to_user((void *)arg, RGA2_VERSION, sizeof(RGA2_VERSION));
- //ret = 0;
+ ret = copy_to_user((void *)arg, rga->version, 16);
break;
default:
ERR("unknown ioctl cmd!\n");
}
platform_set_drvdata(pdev, data);
+ data->dev = &pdev->dev;
rga2_drvdata = data;
of_property_read_u32(np, "dev_mode", &rga2_service.dev_mode);
ERR("cannot register miscdev (%d)\n", ret);
goto err_misc_register;
}
-
- pr_info("Driver loaded succesfully\n");
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+ pm_runtime_enable(&pdev->dev);
+#endif
+ rga2_init_version();
+ pr_info("Driver loaded successfully ver:%s\n", rga2_drvdata->version);
return 0;
devm_clk_put(&pdev->dev, data->aclk_rga2);
devm_clk_put(&pdev->dev, data->hclk_rga2);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+ pm_runtime_disable(&pdev->dev);
+#endif
+
kfree(data);
return 0;
}
rga2_mmu_buf.pages = kmalloc(32768 * sizeof(struct page *), GFP_KERNEL);
- if ((ret = platform_driver_register(&rga2_driver)) != 0)
- {
+ ret = platform_driver_register(&rga2_driver);
+ if (ret != 0) {
printk(KERN_ERR "Platform device register failed (%d).\n", ret);
return ret;
}
struct rga2_req req;
rga2_session session;
unsigned int *src, *dst;
- uint32_t i, j;
- uint32_t *dst0;
session.pid = current->pid;
INIT_LIST_HEAD(&session.waiting);
list_add_tail(&session.list_session, &rga2_service.session);
atomic_set(&session.task_running, 0);
atomic_set(&session.num_done, 0);
- //file->private_data = (void *)session;
-
- //fb = rk_get_fb(0);
memset(&req, 0, sizeof(struct rga2_req));
src = kmalloc(800*480*4, GFP_KERNEL);
printk("************ RGA2_TEST ************\n");
printk("********************************\n\n");
- memset(src, 0x80, 800*480*4);
- memset(dst, 0x0, 800*480*4);
-
- //dmac_flush_range(&src, &src[800*480*4]);
- //outer_flush_range(virt_to_phys(&src),virt_to_phys(&src[800*480*4]));
-
+#if 1
+ memset(src, 0x80, 800 * 480 * 4);
+ memset(dst, 0xcc, 800 * 480 * 4);
+#endif
#if 0
- memset(src_buf, 0x80, 800*480*4);
- memset(dst_buf, 0xcc, 800*480*4);
+ dmac_flush_range(src, &src[800 * 480]);
+ outer_flush_range(virt_to_phys(src), virt_to_phys(&src[800 * 480]));
- dmac_flush_range(&dst_buf[0], &dst_buf[800*480]);
- outer_flush_range(virt_to_phys(&dst_buf[0]),virt_to_phys(&dst_buf[800*480]));
+ dmac_flush_range(dst, &dst[800 * 480]);
+ outer_flush_range(virt_to_phys(dst), virt_to_phys(&dst[800 * 480]));
#endif
- dst0 = (uint32_t *)&dst;
- i = j = 0;
#if 0
req.pat.act_w = 16;
req.pat.act_h = 16;
req.render_mode = 0;
rga2_blit_sync(&session, &req);
#endif
-
- req.src.act_w = 320;
+ {
+ uint32_t i, j;
+ uint8_t *sp;
+
+ sp = (uint8_t *)src;
+ for (j = 0; j < 240; j++) {
+ sp = (uint8_t *)src + j * 320 * 10 / 8;
+ for (i = 0; i < 320; i++) {
+ if ((i & 3) == 0) {
+ sp[i * 5 / 4] = 0;
+ sp[i * 5 / 4+1] = 0x1;
+ } else if ((i & 3) == 1) {
+ sp[i * 5 / 4+1] = 0x4;
+ } else if ((i & 3) == 2) {
+ sp[i * 5 / 4+1] = 0x10;
+ } else if ((i & 3) == 3) {
+ sp[i * 5 / 4+1] = 0x40;
+ }
+ }
+ }
+ sp = (uint8_t *)src;
+ for (j = 0; j < 100; j++)
+ printk("src %.2x\n", sp[j]);
+ }
+ req.src.act_w = 320;
req.src.act_h = 240;
- req.src.vir_w = 320;
+ req.src.vir_w = 320;
req.src.vir_h = 240;
req.src.yrgb_addr = 0;//(uint32_t)virt_to_phys(src);
req.src.uv_addr = (unsigned long)virt_to_phys(src);
req.src.v_addr = 0;
- req.src.format = RGA2_FORMAT_RGBA_8888;
+ req.src.format = RGA2_FORMAT_YCbCr_420_SP_10B;
req.dst.act_w = 320;
req.dst.act_h = 240;
req.dst.yrgb_addr = 0;//((uint32_t)virt_to_phys(dst));
req.dst.uv_addr = (unsigned long)virt_to_phys(dst);
- req.dst.format = RGA2_FORMAT_RGBA_8888;
+ req.dst.format = RGA2_FORMAT_YCbCr_420_SP;
//dst = dst0;
req.rotate_mode = 0;
req.scale_bicu_mode = 2;
+#if 0
//req.alpha_rop_flag = 0;
//req.alpha_rop_mode = 0x19;
//req.PD_mode = 3;
//printk("src = %.8x\n", req.src.yrgb_addr);
//printk("src = %.8x\n", req.src.uv_addr);
//printk("dst = %.8x\n", req.dst.yrgb_addr);
+#endif
rga2_blit_sync(&session, &req);
- for(j=0; j<100; j++) {
- printk("%.8x\n", dst[j]);
+#if 0
+ uint32_t j;
+ for (j = 0; j < 320 * 240 * 10 / 8; j++) {
+ if (src[j] != dst[j])
+ printk("error value dst not equal src j %d, s %.2x d %.2x\n",
+ j, src[j], dst[j]);
+ }
+#endif
+
+#if 1
+ {
+ uint32_t j;
+ uint8_t *dp = (uint8_t *)dst;
+
+ for (j = 0; j < 100; j++)
+ printk("%d %.2x\n", j, dp[j]);
}
+#endif
if(src)
kfree(src);