{\r
mutex_lock(&rga2_service.lock);\r
if((t->back - t->front) > t->size) {\r
- if(t->front + size > t->back - t->size)\r
+ if(t->front + size > t->back - t->size) {\r
+ pr_info("front %d, back %d dsize %d size %d", t->front, t->back, t->size, size);\r
return -1;\r
+ }\r
}\r
else {\r
- if((t->front + size) > t->back)\r
+ if((t->front + size) > t->back) {\r
+ pr_info("front %d, back %d dsize %d size %d", t->front, t->back, t->size, size);\r
return -1;\r
+ }\r
\r
if(t->front + size > t->size) {\r
if (size > (t->back - t->size)) {\r
+ pr_info("front %d, back %d dsize %d size %d", t->front, t->back, t->size, size);\r
return -1;\r
}\r
t->front = 0;\r
return 0;\r
}\r
\r
-#if 0\r
-static int rga2_mmu_buf_cal(struct rga2_mmu_buf_t *t, uint32_t size)\r
-{\r
- if((t->front + size) > t->back) {\r
- return -1;\r
- }\r
- else {\r
- return 0;\r
- }\r
-}\r
-#endif\r
-\r
-\r
-\r
-static int rga2_mem_size_cal(uint32_t Mem, uint32_t MemSize, uint32_t *StartAddr)\r
+static int rga2_mem_size_cal(unsigned long Mem, uint32_t MemSize, unsigned long *StartAddr)\r
{\r
- uint32_t start, end;\r
+ unsigned long start, end;\r
uint32_t pageCount;\r
\r
end = (Mem + (MemSize + PAGE_SIZE - 1)) >> PAGE_SHIFT;\r
return pageCount;\r
}\r
\r
-static int rga2_buf_size_cal(uint32_t yrgb_addr, uint32_t uv_addr, uint32_t v_addr,\r
- int format, uint32_t w, uint32_t h, uint32_t *StartAddr )\r
+static int rga2_buf_size_cal(unsigned long yrgb_addr, unsigned long uv_addr, unsigned long v_addr,\r
+ int format, uint32_t w, uint32_t h, unsigned long *StartAddr )\r
{\r
uint32_t size_yrgb = 0;\r
uint32_t size_uv = 0;\r
uint32_t size_v = 0;\r
uint32_t stride = 0;\r
- uint32_t start, end;\r
+ unsigned long start, end;\r
uint32_t pageCount;\r
\r
switch(format)\r
case RK_FORMAT_BPP8 :\r
break;\r
#endif\r
+ case RGA2_FORMAT_YCbCr_420_SP_10B:\r
+ case RGA2_FORMAT_YCrCb_420_SP_10B:\r
+ stride = (w + 3) & (~3);\r
+ stride = stride;\r
+ size_yrgb = stride * h;\r
+ size_uv = (stride * (h >> 1));\r
+ start = MIN(yrgb_addr, uv_addr);\r
+ start >>= PAGE_SHIFT;\r
+ end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));\r
+ end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;\r
+ pageCount = end - start;\r
+ break;\r
default :\r
pageCount = 0;\r
start = 0;\r
\r
static int rga2_MapUserMemory(struct page **pages,\r
uint32_t *pageTable,\r
- uint32_t Memory,\r
+ unsigned long Memory,\r
uint32_t pageCount)\r
{\r
int32_t result;\r
uint32_t i;\r
uint32_t status;\r
- uint32_t Address;\r
- //uint32_t temp;\r
+ unsigned long Address;\r
\r
status = 0;\r
Address = 0;\r
);\r
up_read(¤t->mm->mmap_sem);\r
\r
- #if 0\r
- if(result <= 0 || result < pageCount)\r
- {\r
- status = 0;\r
-\r
- for(i=0; i<pageCount; i++)\r
- {\r
- temp = armv7_va_to_pa((Memory + i) << PAGE_SHIFT);\r
- if (temp == 0xffffffff)\r
- {\r
- printk("rga find mmu phy ddr error\n ");\r
- status = RGA_OUT_OF_RESOURCES;\r
- break;\r
- }\r
-\r
- pageTable[i] = temp;\r
- }\r
-\r
- return status;\r
- }\r
- #else\r
if(result <= 0 || result < pageCount)\r
{\r
struct vm_area_struct *vma;\r
\r
if (vma)//&& (vma->vm_flags & VM_PFNMAP) )\r
{\r
- #if 1\r
do\r
{\r
pte_t * pte;\r
}\r
while (0);\r
\r
- #else\r
- do\r
- {\r
- pte_t * pte;\r
- spinlock_t * ptl;\r
- unsigned long pfn;\r
- pgd_t * pgd;\r
- pud_t * pud;\r
- pmd_t * pmd;\r
-\r
- pgd = pgd_offset(current->mm, (Memory + i) << PAGE_SHIFT);\r
- pud = pud_offset(pgd, (Memory + i) << PAGE_SHIFT);\r
- pmd = pmd_offset(pud, (Memory + i) << PAGE_SHIFT);\r
- pte = pte_offset_map_lock(current->mm, pmd, (Memory + i) << PAGE_SHIFT, &ptl);\r
-\r
- pfn = pte_pfn(*pte);\r
- Address = ((pfn << PAGE_SHIFT) | (((unsigned long)((Memory + i) << PAGE_SHIFT)) & ~PAGE_MASK));\r
- pte_unmap_unlock(pte, ptl);\r
- }\r
- while (0);\r
- #endif\r
-\r
- pageTable[i] = Address;\r
+ pageTable[i] = (uint32_t)Address;\r
}\r
else\r
{\r
\r
return status;\r
}\r
- #endif\r
\r
/* Fill the page table. */\r
for(i=0; i<pageCount; i++)\r
{\r
uint32_t i;\r
uint32_t status;\r
- uint32_t Address;\r
+ unsigned long Address;\r
uint32_t mapped_size = 0;\r
uint32_t len;\r
struct scatterlist *sgl = sg->sgl;\r
uint32_t sg_num = 0;\r
+ uint32_t break_flag = 0;\r
\r
status = 0;\r
Address = 0;\r
Address = sg_phys(sgl);\r
\r
for(i=0; i<len; i++) {\r
- Memory[mapped_size + i] = Address + (i << PAGE_SHIFT);\r
+ if (mapped_size + i >= pageCount) {\r
+ break_flag = 1;\r
+ break;\r
+ }\r
+ Memory[mapped_size + i] = (uint32_t)(Address + (i << PAGE_SHIFT));\r
}\r
-\r
+ if (break_flag)\r
+ break;\r
mapped_size += len;\r
sg_num += 1;\r
}\r
static int rga2_mmu_info_BitBlt_mode(struct rga2_reg *reg, struct rga2_req *req)\r
{\r
int Src0MemSize, DstMemSize, Src1MemSize;\r
- uint32_t Src0Start, Src1Start, DstStart;\r
+ unsigned long Src0Start, Src1Start, DstStart;\r
uint32_t AllSize;\r
uint32_t *MMU_Base, *MMU_Base_phys;\r
int ret;\r
Src1MemSize = 0;\r
DstMemSize = 0;\r
\r
- do\r
- {\r
+ do {\r
/* cal src0 buf mmu info */\r
if(req->mmu_info.src0_mmu_flag & 1) {\r
Src0MemSize = rga2_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,\r
break;\r
}\r
\r
- pages = kzalloc((AllSize)* sizeof(struct page *), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc pages mem failed\n");\r
- status = RGA2_MALLOC_ERROR;\r
- break;\r
- }\r
+ pages = rga2_mmu_buf.pages;\r
\r
mutex_lock(&rga2_service.lock);\r
MMU_Base = rga2_mmu_buf.buf_virtual + (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1));\r
\r
if (ret < 0) {\r
pr_err("rga2 map src0 memory failed\n");\r
- pr_err("RGA2 : yrgb = %.8x, uv = %.8x format = %d\n", req->src.yrgb_addr, req->src.uv_addr, req->src.format);\r
- pr_err("RGA2 : vir_w = %d, vir_h = %d\n", req->src.vir_w, req->src.vir_h);\r
status = ret;\r
break;\r
}\r
\r
/* change the buf address in req struct */\r
- req->mmu_info.src0_base_addr = (((uint32_t)MMU_Base_phys));\r
+ req->mmu_info.src0_base_addr = (((unsigned long)MMU_Base_phys));\r
uv_size = (req->src.uv_addr - (Src0Start << PAGE_SHIFT)) >> PAGE_SHIFT;\r
v_size = (req->src.v_addr - (Src0Start << PAGE_SHIFT)) >> PAGE_SHIFT;\r
\r
\r
if (ret < 0) {\r
pr_err("rga2 map src1 memory failed\n");\r
- pr_err("RGA2 : yrgb = %.8x, format = %d\n", req->src1.yrgb_addr, req->src1.format);\r
- pr_err("RGA2 : vir_w = %d, vir_h = %d\n", req->src1.vir_w, req->src1.vir_h);\r
status = ret;\r
break;\r
}\r
\r
/* change the buf address in req struct */\r
- req->mmu_info.src1_base_addr = ((uint32_t)(MMU_Base_phys + Src0MemSize));\r
+ req->mmu_info.src1_base_addr = ((unsigned long)(MMU_Base_phys + Src0MemSize));\r
req->src1.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));\r
}\r
\r
}\r
if (ret < 0) {\r
pr_err("rga2 map dst memory failed\n");\r
- pr_err("RGA2 : yrgb = %.8x, uv = %.8x\n, format = %d\n", req->dst.yrgb_addr, req->dst.uv_addr, req->dst.format);\r
- pr_err("RGA2 : vir_w = %d, vir_h = %d\n", req->dst.vir_w, req->dst.vir_h);\r
status = ret;\r
break;\r
}\r
\r
/* change the buf address in req struct */\r
- req->mmu_info.dst_base_addr = ((uint32_t)(MMU_Base_phys + Src0MemSize + Src1MemSize));\r
+ req->mmu_info.dst_base_addr = ((unsigned long)(MMU_Base_phys + Src0MemSize + Src1MemSize));\r
req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));\r
uv_size = (req->dst.uv_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
v_size = (req->dst.v_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
}\r
\r
/* flush data to DDR */\r
+ #ifdef CONFIG_ARM\r
dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+ #elif defined(CONFIG_ARM64)\r
+ __dma_flush_range(MMU_Base, (MMU_Base + AllSize));\r
+ #endif\r
\r
rga2_mmu_buf_get(&rga2_mmu_buf, AllSize);\r
reg->MMU_len = AllSize;\r
\r
status = 0;\r
\r
- /* Free the page table */\r
- if (pages != NULL) {\r
- kfree(pages);\r
- }\r
-\r
return status;\r
}\r
while(0);\r
\r
-\r
- /* Free the page table */\r
- if (pages != NULL) {\r
- kfree(pages);\r
- }\r
-\r
return status;\r
}\r
\r
static int rga2_mmu_info_color_palette_mode(struct rga2_reg *reg, struct rga2_req *req)\r
{\r
int SrcMemSize, DstMemSize;\r
- uint32_t SrcStart, DstStart;\r
+ unsigned long SrcStart, DstStart;\r
struct page **pages = NULL;\r
uint32_t AllSize;\r
uint32_t *MMU_Base = NULL, *MMU_Base_phys;\r
byte_num = sw >> shift;\r
stride = (byte_num + 3) & (~3);\r
\r
+ SrcStart = 0;
+ DstStart = 0;
SrcMemSize = 0;\r
DstMemSize = 0;\r
\r
- do\r
- {\r
+ do {\r
if (req->mmu_info.src0_mmu_flag) {\r
SrcMemSize = rga2_mem_size_cal(req->src.yrgb_addr, stride, &SrcStart);\r
if(SrcMemSize == 0) {\r
break;\r
}\r
\r
- pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
+ pages = rga2_mmu_buf.pages;\r
if(pages == NULL) {\r
pr_err("RGA MMU malloc pages mem failed\n");\r
return -EINVAL;\r
}\r
\r
/* change the buf address in req struct */\r
- req->mmu_info.src0_base_addr = (((uint32_t)MMU_Base_phys));\r
+ req->mmu_info.src0_base_addr = (((unsigned long)MMU_Base_phys));\r
req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));\r
}\r
\r
}\r
\r
/* change the buf address in req struct */\r
- req->mmu_info.dst_base_addr = ((uint32_t)(MMU_Base_phys + SrcMemSize));\r
+ req->mmu_info.dst_base_addr = ((unsigned long)(MMU_Base_phys + SrcMemSize));\r
req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));\r
}\r
\r
/* flush data to DDR */\r
+ #ifdef CONFIG_ARM\r
dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+ #elif defined(CONFIG_ARM64)\r
+ __dma_flush_range(MMU_Base, (MMU_Base + AllSize));\r
+ #endif\r
\r
rga2_mmu_buf_get(&rga2_mmu_buf, AllSize);\r
reg->MMU_len = AllSize;\r
\r
- status = 0;\r
-\r
- /* Free the page table */\r
- if (pages != NULL) {\r
- kfree(pages);\r
- }\r
-\r
- return status;\r
+ return 0;\r
}\r
while(0);\r
\r
- /* Free the page table */\r
- if (pages != NULL) {\r
- kfree(pages);\r
- }\r
-\r
return 0;\r
}\r
\r
static int rga2_mmu_info_color_fill_mode(struct rga2_reg *reg, struct rga2_req *req)\r
{\r
int DstMemSize;\r
- uint32_t DstStart;\r
+ unsigned long DstStart;\r
struct page **pages = NULL;\r
uint32_t AllSize;\r
uint32_t *MMU_Base, *MMU_Base_phys;\r
int ret;\r
int status;\r
\r
+ DstMemSize = 0;
MMU_Base = NULL;\r
\r
- do\r
- {\r
+ do {\r
if(req->mmu_info.dst_mmu_flag & 1) {\r
DstMemSize = rga2_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
\r
AllSize = (DstMemSize + 15) & (~15);\r
\r
- pages = kzalloc((AllSize)* sizeof(struct page *), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA2 MMU malloc pages mem failed\n");\r
- status = RGA2_MALLOC_ERROR;\r
- break;\r
- }\r
+ pages = rga2_mmu_buf.pages;\r
\r
if(rga2_mmu_buf_get_try(&rga2_mmu_buf, AllSize)) {\r
pr_err("RGA2 Get MMU mem failed\n");\r
}\r
\r
/* change the buf address in req struct */\r
- req->mmu_info.dst_base_addr = ((uint32_t)MMU_Base_phys);\r
+ req->mmu_info.dst_base_addr = ((unsigned long)MMU_Base_phys);\r
req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));\r
}\r
\r
/* flush data to DDR */\r
+ #ifdef CONFIG_ARM\r
dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
+ #elif defined(CONFIG_ARM64)\r
+ __dma_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
+ #endif\r
\r
rga2_mmu_buf_get(&rga2_mmu_buf, AllSize);\r
\r
- /* Free the page table */\r
- if (pages != NULL)\r
- kfree(pages);\r
-\r
return 0;\r
}\r
while(0);\r
\r
- if (pages != NULL)\r
- kfree(pages);\r
-\r
return status;\r
}\r
\r
static int rga2_mmu_info_update_palette_table_mode(struct rga2_reg *reg, struct rga2_req *req)\r
{\r
int SrcMemSize;\r
- uint32_t SrcStart;\r
+ unsigned long SrcStart;\r
struct page **pages = NULL;\r
uint32_t AllSize;\r
uint32_t *MMU_Base, *MMU_Base_phys;\r
\r
MMU_Base = NULL;\r
\r
- do\r
- {\r
+ do {\r
/* cal src buf mmu info */\r
SrcMemSize = rga2_mem_size_cal(req->pat.yrgb_addr, req->pat.vir_w * req->pat.vir_h, &SrcStart);\r
if(SrcMemSize == 0) {\r
mutex_unlock(&rga2_service.lock);\r
\r
pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc pages mem failed\n");\r
- status = RGA2_MALLOC_ERROR;\r
- break;\r
- }\r
\r
if(SrcMemSize) {\r
ret = rga2_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);\r
}\r
\r
/* change the buf address in req struct */\r
- req->mmu_info.src0_base_addr = (((uint32_t)MMU_Base_phys));\r
+ req->mmu_info.src0_base_addr = (((unsigned long)MMU_Base_phys));\r
req->pat.yrgb_addr = (req->pat.yrgb_addr & (~PAGE_MASK));\r
}\r
\r
/* flush data to DDR */\r
+ #ifdef CONFIG_ARM\r
dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
outer_flush_range(virt_to_phys(MMU_Base), virt_to_phys(MMU_Base + AllSize));\r
+ #elif defined(CONFIG_ARM64)\r
+ __dma_flush_range(MMU_Base, (MMU_Base + AllSize));\r
+ #endif\r
\r
rga2_mmu_buf_get(&rga2_mmu_buf, AllSize);\r
reg->MMU_len = AllSize;\r
\r
- if (pages != NULL) {\r
- /* Free the page table */\r
- kfree(pages);\r
- }\r
-\r
return 0;\r
}\r
while(0);\r
\r
- if (pages != NULL)\r
- kfree(pages);\r
-\r
return status;\r
}\r
\r
static int rga2_mmu_info_update_patten_buff_mode(struct rga2_reg *reg, struct rga2_req *req)\r
{\r
int SrcMemSize, CMDMemSize;\r
- uint32_t SrcStart, CMDStart;\r
+ unsigned long SrcStart, CMDStart;\r
struct page **pages = NULL;\r
uint32_t i;\r
uint32_t AllSize;\r
\r
MMU_Base = MMU_p = 0;\r
\r
- do\r
- {\r
-\r
+ do {\r
/* cal src buf mmu info */\r
SrcMemSize = rga2_mem_size_cal(req->pat.yrgb_addr, req->pat.act_w * req->pat.act_h * 4, &SrcStart);\r
if(SrcMemSize == 0) {\r
}\r
\r
/* cal cmd buf mmu info */\r
- CMDMemSize = rga2_mem_size_cal((uint32_t)rga2_service.cmd_buff, RGA2_CMD_BUF_SIZE, &CMDStart);\r
+ CMDMemSize = rga2_mem_size_cal((unsigned long)rga2_service.cmd_buff, RGA2_CMD_BUF_SIZE, &CMDStart);\r
if(CMDMemSize == 0) {\r
return -EINVAL;\r
}\r
\r
AllSize = SrcMemSize + CMDMemSize;\r
\r
- pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc pages mem failed\n");\r
- status = RGA2_MALLOC_ERROR;\r
- break;\r
- }\r
+ pages = rga2_mmu_buf.pages;\r
\r
MMU_Base = kzalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc MMU_Base point failed\n");\r
- status = RGA2_MALLOC_ERROR;\r
- break;\r
- }\r
\r
for(i=0; i<CMDMemSize; i++) {\r
MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));\r
reg->MMU_base = MMU_Base;\r
\r
/* flush data to DDR */\r
+ #ifdef CONFIG_ARM\r
dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
-\r
- if (pages != NULL) {\r
- /* Free the page table */\r
- kfree(pages);\r
- }\r
+ #elif defined(CONFIG_ARM64)\r
+ __dma_flush_range(MMU_Base, (MMU_Base + AllSize));\r
+ #endif\r
\r
return 0;\r
\r
}\r
while(0);\r
\r
- if (pages != NULL)\r
- kfree(pages);\r
-\r
return status;\r
}\r
\r