return 0;\r
}\r
\r
-static int rga_mem_size_cal(uint32_t Mem, uint32_t MemSize, uint32_t *StartAddr)\r
+static int rga_mem_size_cal(unsigned long Mem, uint32_t MemSize, unsigned long *StartAddr)\r
{\r
- uint32_t start, end;\r
+ unsigned long start, end;\r
uint32_t pageCount;\r
\r
end = (Mem + (MemSize + PAGE_SIZE - 1)) >> PAGE_SHIFT;\r
return pageCount;\r
}\r
\r
-static int rga_buf_size_cal(uint32_t yrgb_addr, uint32_t uv_addr, uint32_t v_addr,\r
- int format, uint32_t w, uint32_t h, uint32_t *StartAddr )\r
+static int rga_buf_size_cal(unsigned long yrgb_addr, unsigned long uv_addr, unsigned long v_addr,\r
+ int format, uint32_t w, uint32_t h, unsigned long *StartAddr )\r
{\r
uint32_t size_yrgb = 0;\r
uint32_t size_uv = 0;\r
uint32_t size_v = 0;\r
uint32_t stride = 0;\r
- uint32_t start, end;\r
+ unsigned long start, end;\r
uint32_t pageCount;\r
\r
switch(format)\r
\r
static int rga_MapUserMemory(struct page **pages,\r
uint32_t *pageTable,\r
- uint32_t Memory,\r
+ unsigned long Memory,\r
uint32_t pageCount)\r
{\r
int32_t result;\r
uint32_t i;\r
uint32_t status;\r
- uint32_t Address;\r
- //uint32_t temp;\r
+ unsigned long Address;\r
\r
status = 0;\r
Address = 0;\r
\r
- do\r
- {\r
+ do {\r
down_read(¤t->mm->mmap_sem);\r
result = get_user_pages(current,\r
current->mm,\r
{\r
uint32_t i;\r
uint32_t status;\r
- uint32_t Address;\r
+ unsigned long Address;\r
uint32_t mapped_size = 0;\r
uint32_t len = 0;\r
struct scatterlist *sgl = sg->sgl;\r
static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req)\r
{\r
int SrcMemSize, DstMemSize;\r
- uint32_t SrcStart, DstStart;\r
+ unsigned long SrcStart, DstStart;\r
uint32_t i;\r
uint32_t AllSize;\r
uint32_t *MMU_Base, *MMU_p, *MMU_Base_phys;\r
else {\r
MMU_p = MMU_Base;\r
\r
- if(req->src.yrgb_addr == (uint32_t)rga_service.pre_scale_buf) {\r
+ if(req->src.yrgb_addr == (unsigned long)rga_service.pre_scale_buf) {\r
for(i=0; i<SrcMemSize; i++)\r
MMU_p[i] = rga_service.pre_scale_buf[i];\r
}\r
* change the buf address in req struct\r
*/\r
\r
- req->mmu_info.base_addr = (uint32_t)MMU_Base_phys >> 2;\r
+ req->mmu_info.base_addr = (unsigned long)MMU_Base_phys >> 2;\r
\r
uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((SrcMemSize + uv_size) << PAGE_SHIFT);\r
\r
/* flush data to DDR */\r
+ #ifdef CONFIG_ARM\r
dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
outer_flush_range(virt_to_phys(MMU_Base), virt_to_phys(MMU_Base + AllSize + 1));\r
+ #elif defined(CONFIG_ARM64)\r
+ __dma_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
+ #endif\r
\r
rga_mmu_buf_get(&rga_mmu_buf, AllSize + 16);\r
reg->MMU_len = AllSize + 16;\r
static int rga_mmu_info_color_palette_mode(struct rga_reg *reg, struct rga_req *req)\r
{\r
int SrcMemSize, DstMemSize, CMDMemSize;\r
- uint32_t SrcStart, DstStart, CMDStart;\r
+ unsigned long SrcStart, DstStart, CMDStart;\r
struct page **pages = NULL;\r
uint32_t i;\r
uint32_t AllSize;\r
return -EINVAL;\r
}\r
\r
- CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
+ CMDMemSize = rga_mem_size_cal((unsigned long)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
if(CMDMemSize == 0) {\r
return -EINVAL;\r
}\r
\r
/* map CMD addr */\r
for(i=0; i<CMDMemSize; i++) {\r
- MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i)<<PAGE_SHIFT));\r
+ MMU_Base[i] = (uint32_t)virt_to_phys((uint32_t *)((CMDStart + i)<<PAGE_SHIFT));\r
}\r
\r
/* map src addr */\r
reg->MMU_base = MMU_Base;\r
\r
/* flush data to DDR */\r
+ #ifdef CONFIG_ARM\r
dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
+ #elif defined(CONFIG_ARM64)\r
+ __dma_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
+ #endif\r
\r
rga_mmu_buf_get(&rga_mmu_buf, AllSize + 16);\r
reg->MMU_len = AllSize + 16;\r
static int rga_mmu_info_color_fill_mode(struct rga_reg *reg, struct rga_req *req)\r
{\r
int DstMemSize;\r
- uint32_t DstStart;\r
+ unsigned long DstStart;\r
struct page **pages = NULL;\r
uint32_t i;\r
uint32_t AllSize;\r
\r
MMU_Base = NULL;\r
\r
- do\r
- {\r
+ do {\r
DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
&DstStart);\r
* change the buf address in req struct\r
*/\r
\r
- req->mmu_info.base_addr = ((uint32_t)(MMU_Base_phys)>>2);\r
+ req->mmu_info.base_addr = ((unsigned long)(MMU_Base_phys)>>2);\r
req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));\r
\r
/*record the malloc buf for the cmd end to release*/\r
reg->MMU_base = MMU_Base;\r
\r
/* flush data to DDR */\r
+ #ifdef CONFIG_ARM\r
dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
+ #elif defined(CONFIG_ARM64)\r
+ __dma_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
+ #endif\r
\r
rga_mmu_buf_get(&rga_mmu_buf, AllSize + 16);\r
reg->MMU_len = AllSize + 16;\r
\r
static int rga_mmu_info_line_point_drawing_mode(struct rga_reg *reg, struct rga_req *req)\r
{\r
- int DstMemSize;\r
- uint32_t DstStart;\r
- struct page **pages = NULL;\r
- uint32_t i;\r
- uint32_t AllSize;\r
- uint32_t *MMU_Base, *MMU_p;\r
- int ret, status;\r
-\r
- MMU_Base = NULL;\r
-\r
- do\r
- {\r
- /* cal dst buf mmu info */\r
- DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
- req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
- &DstStart);\r
- if(DstMemSize == 0) {\r
- return -EINVAL;\r
- }\r
-\r
- AllSize = DstMemSize;\r
-\r
- pages = kzalloc((AllSize + 1) * sizeof(struct page *), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc pages mem failed\n");\r
- status = RGA_MALLOC_ERROR;\r
- break;\r
- }\r
-\r
- MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc MMU_Base point failed\n");\r
- status = RGA_MALLOC_ERROR;\r
- break;\r
- }\r
-\r
- if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)\r
- {\r
- ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], DstStart, DstMemSize);\r
- if (ret < 0) {\r
- pr_err("rga map dst memory failed\n");\r
- status = ret;\r
- break;\r
- }\r
- }\r
- else\r
- {\r
- MMU_p = MMU_Base;\r
-\r
- for(i=0; i<DstMemSize; i++)\r
- {\r
- MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));\r
- }\r
- }\r
-\r
- /* zsq\r
- * change the buf address in req struct\r
- * for the reason of lie to MMU\r
- */\r
- req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);\r
- req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));\r
-\r
-\r
- /*record the malloc buf for the cmd end to release*/\r
- reg->MMU_base = MMU_Base;\r
-\r
- /* flush data to DDR */\r
- dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
- outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
-\r
- /* Free the page table */\r
- if (pages != NULL) {\r
- kfree(pages);\r
- }\r
-\r
- return 0;\r
-\r
- }\r
- while(0);\r
-\r
- if (pages != NULL)\r
- kfree(pages);\r
-\r
- if (MMU_Base != NULL)\r
- kfree(MMU_Base);\r
-\r
- return status;\r
+ return 0;\r
}\r
\r
static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_req *req)\r
{\r
- int SrcMemSize, DstMemSize;\r
- uint32_t SrcStart, DstStart;\r
- struct page **pages = NULL;\r
- uint32_t i;\r
- uint32_t AllSize;\r
- uint32_t *MMU_Base, *MMU_p;\r
- int ret, status;\r
- uint32_t uv_size, v_size;\r
-\r
- MMU_Base = NULL;\r
-\r
- do\r
- {\r
- /* cal src buf mmu info */\r
- SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,\r
- req->src.format, req->src.vir_w, req->src.vir_h,\r
- &SrcStart);\r
- if(SrcMemSize == 0) {\r
- return -EINVAL;\r
- }\r
-\r
- /* cal dst buf mmu info */\r
- DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
- req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
- &DstStart);\r
- if(DstMemSize == 0) {\r
- return -EINVAL;\r
- }\r
-\r
- AllSize = SrcMemSize + DstMemSize;\r
-\r
- pages = kzalloc((AllSize + 1) * sizeof(struct page *), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc pages mem failed\n");\r
- status = RGA_MALLOC_ERROR;\r
- break;\r
- }\r
-\r
- MMU_Base = kzalloc((AllSize + 1)* sizeof(uint32_t), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc MMU_Base point failed\n");\r
- status = RGA_MALLOC_ERROR;\r
- break;\r
- }\r
-\r
- if (req->src.yrgb_addr < KERNEL_SPACE_VALID)\r
- {\r
- ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);\r
- if (ret < 0)\r
- {\r
- pr_err("rga map src memory failed\n");\r
- status = ret;\r
- break;\r
- }\r
- }\r
- else\r
- {\r
- MMU_p = MMU_Base;\r
-\r
- for(i=0; i<SrcMemSize; i++)\r
- {\r
- MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));\r
- }\r
- }\r
-\r
-\r
- if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)\r
- {\r
- ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);\r
- if (ret < 0)\r
- {\r
- pr_err("rga map dst memory failed\n");\r
- status = ret;\r
- break;\r
- }\r
- }\r
- else\r
- {\r
- MMU_p = MMU_Base + SrcMemSize;\r
-\r
- for(i=0; i<DstMemSize; i++)\r
- {\r
- MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));\r
- }\r
- }\r
-\r
- MMU_Base[AllSize] = MMU_Base[AllSize - 1];\r
-\r
- /* zsq\r
- * change the buf address in req struct\r
- * for the reason of lie to MMU\r
- */\r
- req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);\r
-\r
- uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
- v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
-\r
- req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));\r
- req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT);\r
- req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT);\r
-\r
- uv_size = (req->dst.uv_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
- v_size = (req->dst.v_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
-\r
- req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | (SrcMemSize << PAGE_SHIFT);\r
- req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((SrcMemSize + uv_size) << PAGE_SHIFT);\r
- req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) | ((SrcMemSize + v_size) << PAGE_SHIFT);\r
-\r
-\r
- /*record the malloc buf for the cmd end to release*/\r
- reg->MMU_base = MMU_Base;\r
-\r
- /* flush data to DDR */\r
- dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
- outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
-\r
- /* Free the page table */\r
- if (pages != NULL) {\r
- kfree(pages);\r
- }\r
-\r
- return 0;\r
- }\r
- while(0);\r
-\r
- if (pages != NULL)\r
- kfree(pages);\r
-\r
- if (MMU_Base != NULL)\r
- kfree(MMU_Base);\r
-\r
- return status;\r
+ return 0;\r
}\r
\r
\r
static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req)\r
{\r
int SrcMemSize, DstMemSize;\r
- uint32_t SrcStart, DstStart;\r
+ unsigned long SrcStart, DstStart;\r
struct page **pages = NULL;\r
uint32_t i;\r
uint32_t AllSize;\r
/* kernel space */\r
MMU_p = MMU_Base + SrcMemSize;\r
\r
- if(req->dst.yrgb_addr == (uint32_t)rga_service.pre_scale_buf) {\r
+ if(req->dst.yrgb_addr == (unsigned long)rga_service.pre_scale_buf) {\r
for(i=0; i<DstMemSize; i++)\r
MMU_p[i] = rga_service.pre_scale_buf[i];\r
}\r
* for the reason of lie to MMU\r
*/\r
\r
- req->mmu_info.base_addr = ((uint32_t)(MMU_Base_phys)>>2);\r
+ req->mmu_info.base_addr = ((unsigned long)(MMU_Base_phys)>>2);\r
\r
uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
reg->MMU_base = MMU_Base;\r
\r
/* flush data to DDR */\r
+ #ifdef CONFIG_ARM\r
dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
+ #elif defined(CONFIG_ARM64)\r
+ __dma_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
+ #endif\r
\r
rga_mmu_buf_get(&rga_mmu_buf, AllSize + 16);\r
reg->MMU_len = AllSize + 16;\r
static int rga_mmu_info_update_palette_table_mode(struct rga_reg *reg, struct rga_req *req)\r
{\r
int SrcMemSize, CMDMemSize;\r
- uint32_t SrcStart, CMDStart;\r
+ unsigned long SrcStart, CMDStart;\r
struct page **pages = NULL;\r
uint32_t i;\r
uint32_t AllSize;\r
\r
MMU_Base = NULL;\r
\r
- do\r
- {\r
+ do {\r
/* cal src buf mmu info */\r
SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, req->src.vir_w * req->src.vir_h, &SrcStart);\r
if(SrcMemSize == 0) {\r
}\r
\r
/* cal cmd buf mmu info */\r
- CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
+ CMDMemSize = rga_mem_size_cal((unsigned long)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
if(CMDMemSize == 0) {\r
return -EINVAL;\r
}\r
}\r
\r
for(i=0; i<CMDMemSize; i++) {\r
- MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));\r
+ MMU_Base[i] = (uint32_t)virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));\r
}\r
\r
if (req->src.yrgb_addr < KERNEL_SPACE_VALID)\r
reg->MMU_base = MMU_Base;\r
\r
/* flush data to DDR */\r
+ #ifdef CONFIG_ARM\r
dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+ #elif defined(CONFIG_ARM64)\r
+ __dma_flush_range(MMU_Base, (MMU_Base + AllSize));\r
+ #endif\r
+\r
\r
if (pages != NULL) {\r
/* Free the page table */\r
static int rga_mmu_info_update_patten_buff_mode(struct rga_reg *reg, struct rga_req *req)\r
{\r
int SrcMemSize, CMDMemSize;\r
- uint32_t SrcStart, CMDStart;\r
+ unsigned long SrcStart, CMDStart;\r
struct page **pages = NULL;\r
uint32_t i;\r
uint32_t AllSize;\r
}\r
\r
/* cal cmd buf mmu info */\r
- CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
+ CMDMemSize = rga_mem_size_cal((unsigned long)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
if(CMDMemSize == 0) {\r
return -EINVAL;\r
}\r
reg->MMU_base = MMU_Base;\r
\r
/* flush data to DDR */\r
+ #ifdef CONFIG_ARM\r
dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+ #elif defined(CONFIG_ARM64)\r
+ __dma_flush_range(MMU_Base, (MMU_Base + AllSize));\r
+ #endif\r
\r
if (pages != NULL) {\r
/* Free the page table */\r