#include <asm/atomic.h>\r
#include <asm/cacheflush.h>\r
#include "rga_mmu_info.h"\r
+#include <linux/delay.h>\r
\r
extern rga_service_info rga_service;\r
-//extern int mmu_buff_temp[1024];\r
+extern struct rga_mmu_buf_t rga_mmu_buf;\r
\r
#define KERNEL_SPACE_VALID 0xc0000000\r
\r
-#define V7_VATOPA_SUCESS_MASK (0x1)\r
-#define V7_VATOPA_GET_PADDR(X) (X & 0xFFFFF000)\r
-#define V7_VATOPA_GET_INER(X) ((X>>4) & 7)\r
-#define V7_VATOPA_GET_OUTER(X) ((X>>2) & 3)\r
-#define V7_VATOPA_GET_SH(X) ((X>>7) & 1)\r
-#define V7_VATOPA_GET_NS(X) ((X>>9) & 1)\r
-#define V7_VATOPA_GET_SS(X) ((X>>1) & 1)\r
+static int rga_mmu_buf_get(struct rga_mmu_buf_t *t, uint32_t size)\r
+{\r
+ mutex_lock(&rga_service.lock);\r
+ t->front += size;\r
+ mutex_unlock(&rga_service.lock);\r
+\r
+ return 0;\r
+}\r
\r
-#if 0\r
-static unsigned int armv7_va_to_pa(unsigned int v_addr)\r
+static int rga_mmu_buf_get_try(struct rga_mmu_buf_t *t, uint32_t size)\r
{\r
- unsigned int p_addr;\r
- __asm__ volatile ( "mcr p15, 0, %1, c7, c8, 0\n"\r
- "isb\n"\r
- "dsb\n"\r
- "mrc p15, 0, %0, c7, c4, 0\n"\r
- : "=r" (p_addr)\r
- : "r" (v_addr)\r
- : "cc");\r
-\r
- if (p_addr & V7_VATOPA_SUCESS_MASK)\r
- return 0xFFFFFFFF;\r
- else\r
- return (V7_VATOPA_GET_SS(p_addr) ? 0xFFFFFFFF : V7_VATOPA_GET_PADDR(p_addr));\r
+ mutex_lock(&rga_service.lock);\r
+ if((t->back - t->front) > t->size) {\r
+ if(t->front + size > t->back - t->size)\r
+ return -1;\r
+ }\r
+ else {\r
+ if((t->front + size) > t->back)\r
+ return -1;\r
+\r
+ if(t->front + size > t->size) {\r
+ if (size > (t->back - t->size)) {\r
+ return -1;\r
+ }\r
+ t->front = 0;\r
+ }\r
+ }\r
+ mutex_unlock(&rga_service.lock);\r
+\r
+ return 0;\r
}\r
-#endif\r
\r
static int rga_mem_size_cal(uint32_t Mem, uint32_t MemSize, uint32_t *StartAddr)\r
{\r
\r
if (vma)//&& (vma->vm_flags & VM_PFNMAP) )\r
{\r
- #if 1\r
do\r
{\r
pte_t * pte;\r
}\r
while (0);\r
\r
- #else\r
- do\r
- {\r
- pte_t * pte;\r
- spinlock_t * ptl;\r
- unsigned long pfn;\r
- pgd_t * pgd;\r
- pud_t * pud;\r
- pmd_t * pmd;\r
-\r
- pgd = pgd_offset(current->mm, (Memory + i) << PAGE_SHIFT);\r
- pud = pud_offset(pgd, (Memory + i) << PAGE_SHIFT);\r
- pmd = pmd_offset(pud, (Memory + i) << PAGE_SHIFT);\r
- pte = pte_offset_map_lock(current->mm, pmd, (Memory + i) << PAGE_SHIFT, &ptl);\r
-\r
- pfn = pte_pfn(*pte);\r
- Address = ((pfn << PAGE_SHIFT) | (((unsigned long)((Memory + i) << PAGE_SHIFT)) & ~PAGE_MASK));\r
- pte_unmap_unlock(pte, ptl);\r
- }\r
- while (0);\r
- #endif\r
-\r
pageTable[i] = Address;\r
}\r
else\r
\r
static int rga_MapION(struct sg_table *sg,\r
uint32_t *Memory,\r
- int32_t pageCount)\r
+ int32_t pageCount,\r
+ uint32_t offset)\r
{\r
uint32_t i;\r
uint32_t status;\r
uint32_t Address;\r
uint32_t mapped_size = 0;\r
- uint32_t len;\r
+ uint32_t len = 0;\r
struct scatterlist *sgl = sg->sgl;\r
uint32_t sg_num = 0;\r
\r
status = 0;\r
Address = 0;\r
- do {\r
+ offset = offset >> PAGE_SHIFT;\r
+ if (offset != 0) {\r
+ do {\r
+ len += (sg_dma_len(sgl) >> PAGE_SHIFT);\r
+ if (len == offset) {\r
+ sg_num += 1;\r
+ break;\r
+ }\r
+ else {\r
+ if (len > offset)\r
+ break;\r
+ }\r
+ sg_num += 1;\r
+ }\r
+ while((sgl = sg_next(sgl)) && (mapped_size < pageCount) && (sg_num < sg->nents));\r
+\r
+ sgl = sg->sgl;\r
+ len = 0;\r
+ do {\r
+ len += (sg_dma_len(sgl) >> PAGE_SHIFT);\r
+ sgl = sg_next(sgl);\r
+ }\r
+ while(--sg_num);\r
+\r
+ offset -= len;\r
+\r
len = sg_dma_len(sgl) >> PAGE_SHIFT;\r
Address = sg_phys(sgl);\r
+ Address += offset;\r
\r
- for(i=0; i<len; i++) {\r
- Memory[mapped_size + i] = Address + (i << PAGE_SHIFT);\r
+ for(i=offset; i<len; i++) {\r
+ Memory[i - offset] = Address + (i << PAGE_SHIFT);\r
}\r
+ mapped_size += (len - offset);\r
+ sg_num = 1;\r
+ sgl = sg_next(sgl);\r
+ do {\r
+ len = sg_dma_len(sgl) >> PAGE_SHIFT;\r
+ Address = sg_phys(sgl);\r
\r
- mapped_size += len;\r
- sg_num += 1;\r
- }\r
- while((sgl = sg_next(sgl)) && (mapped_size < pageCount) && (sg_num < sg->nents));\r
+ for(i=0; i<len; i++) {\r
+ Memory[mapped_size + i] = Address + (i << PAGE_SHIFT);\r
+ }\r
\r
+ mapped_size += len;\r
+ sg_num += 1;\r
+ }\r
+ while((sgl = sg_next(sgl)) && (mapped_size < pageCount) && (sg_num < sg->nents));\r
+ }\r
+ else {\r
+ do {\r
+ len = sg_dma_len(sgl) >> PAGE_SHIFT;\r
+ Address = sg_phys(sgl);\r
+ for(i=0; i<len; i++) {\r
+ Memory[mapped_size + i] = Address + (i << PAGE_SHIFT);\r
+ }\r
+ mapped_size += len;\r
+ sg_num += 1;\r
+ }\r
+ while((sgl = sg_next(sgl)) && (mapped_size < pageCount) && (sg_num < sg->nents));\r
+ }\r
return 0;\r
}\r
\r
uint32_t SrcStart, DstStart;\r
uint32_t i;\r
uint32_t AllSize;\r
- uint32_t *MMU_Base, *MMU_p;\r
+ uint32_t *MMU_Base, *MMU_p, *MMU_Base_phys;\r
int ret;\r
int status;\r
uint32_t uv_size, v_size;\r
SrcMemSize = 0;\r
DstMemSize = 0;\r
\r
- do\r
- {\r
+ do {\r
/* cal src buf mmu info */\r
-\r
SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,\r
req->src.format, req->src.vir_w, req->src.act_h + req->src.y_offset,\r
&SrcStart);\r
return -EINVAL;\r
}\r
\r
-\r
/* cal dst buf mmu info */\r
\r
DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
&DstStart);\r
- if(DstMemSize == 0) {\r
+ if(DstMemSize == 0)\r
return -EINVAL;\r
- }\r
\r
/* Cal out the needed mem size */\r
SrcMemSize = (SrcMemSize + 15) & (~15);\r
DstMemSize = (DstMemSize + 15) & (~15);\r
AllSize = SrcMemSize + DstMemSize;\r
\r
- pages = kzalloc((AllSize + 1)* sizeof(struct page *), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc pages mem failed\n");\r
+ if (rga_mmu_buf_get_try(&rga_mmu_buf, AllSize + 16)) {\r
+ pr_err("RGA Get MMU mem failed\n");\r
status = RGA_MALLOC_ERROR;\r
break;\r
}\r
\r
- MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);\r
- if(MMU_Base == NULL) {\r
- pr_err("RGA MMU malloc MMU_Base point failed\n");\r
- status = RGA_MALLOC_ERROR;\r
- break;\r
- }\r
+ mutex_lock(&rga_service.lock);\r
+ MMU_Base = rga_mmu_buf.buf_virtual + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));\r
+ MMU_Base_phys = rga_mmu_buf.buf + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));\r
+ mutex_unlock(&rga_service.lock);\r
+\r
+ pages = rga_mmu_buf.pages;\r
\r
if((req->mmu_info.mmu_flag >> 8) & 1) {\r
if (req->sg_src) {\r
- ret = rga_MapION(req->sg_src, &MMU_Base[0], SrcMemSize);\r
+ ret = rga_MapION(req->sg_src, &MMU_Base[0], SrcMemSize, req->line_draw_info.flag);\r
}\r
else {\r
ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);\r
\r
if ((req->mmu_info.mmu_flag >> 10) & 1) {\r
if (req->sg_dst) {\r
- ret = rga_MapION(req->sg_dst, &MMU_Base[SrcMemSize], DstMemSize);\r
+ ret = rga_MapION(req->sg_dst, &MMU_Base[SrcMemSize], DstMemSize, req->line_draw_info.line_width);\r
}\r
else {\r
ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);\r
MMU_p[i] = (uint32_t)((DstStart + i) << PAGE_SHIFT);\r
}\r
\r
- MMU_Base[AllSize] = MMU_Base[AllSize - 1];\r
+ MMU_Base[AllSize] = MMU_Base[AllSize-1];\r
\r
/* zsq\r
* change the buf address in req struct\r
*/\r
\r
- req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);\r
+ req->mmu_info.base_addr = (uint32_t)MMU_Base_phys >> 2;\r
\r
uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | (SrcMemSize << PAGE_SHIFT);\r
req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((SrcMemSize + uv_size) << PAGE_SHIFT);\r
\r
- /*record the malloc buf for the cmd end to release*/\r
- reg->MMU_base = MMU_Base;\r
-\r
/* flush data to DDR */\r
dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
- outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
+ outer_flush_range(virt_to_phys(MMU_Base), virt_to_phys(MMU_Base + AllSize + 1));\r
\r
- status = 0;\r
+ rga_mmu_buf_get(&rga_mmu_buf, AllSize + 16);\r
+ reg->MMU_len = AllSize + 16;\r
\r
- /* Free the page table */\r
- if (pages != NULL) {\r
- kfree(pages);\r
- }\r
+ status = 0;\r
\r
return status;\r
}\r
while(0);\r
\r
-\r
- /* Free the page table */\r
- if (pages != NULL) {\r
- kfree(pages);\r
- }\r
-\r
- /* Free MMU table */\r
- if(MMU_Base != NULL) {\r
- kfree(MMU_Base);\r
- }\r
-\r
return status;\r
}\r
\r
struct page **pages = NULL;\r
uint32_t i;\r
uint32_t AllSize;\r
- uint32_t *MMU_Base = NULL;\r
+ uint32_t *MMU_Base = NULL, *MMU_Base_phys = NULL;\r
uint32_t *MMU_p;\r
int ret, status;\r
uint32_t stride;\r
byte_num = sw >> shift;\r
stride = (byte_num + 3) & (~3);\r
\r
- do\r
- {\r
-\r
+ do {\r
SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, stride, &SrcStart);\r
if(SrcMemSize == 0) {\r
return -EINVAL;\r
return -EINVAL;\r
}\r
\r
- AllSize = SrcMemSize + DstMemSize + CMDMemSize;\r
+ SrcMemSize = (SrcMemSize + 15) & (~15);\r
+ DstMemSize = (DstMemSize + 15) & (~15);\r
+ CMDMemSize = (CMDMemSize + 15) & (~15);\r
\r
- pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc pages mem failed\n");\r
- return -EINVAL;\r
- }\r
+ AllSize = SrcMemSize + DstMemSize + CMDMemSize;\r
\r
- MMU_Base = kzalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
- if(MMU_Base == NULL) {\r
- pr_err("RGA MMU malloc MMU_Base point failed\n");\r
+ if (rga_mmu_buf_get_try(&rga_mmu_buf, AllSize + 16)) {\r
+ pr_err("RGA Get MMU mem failed\n");\r
+ status = RGA_MALLOC_ERROR;\r
break;\r
}\r
\r
+ mutex_lock(&rga_service.lock);\r
+ MMU_Base = rga_mmu_buf.buf_virtual + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));\r
+ MMU_Base_phys = rga_mmu_buf.buf + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));\r
+ mutex_unlock(&rga_service.lock);\r
+\r
+ pages = rga_mmu_buf.pages;\r
+\r
/* map CMD addr */\r
- for(i=0; i<CMDMemSize; i++)\r
- {\r
+ for(i=0; i<CMDMemSize; i++) {\r
MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i)<<PAGE_SHIFT));\r
}\r
\r
/* map src addr */\r
- if (req->src.yrgb_addr < KERNEL_SPACE_VALID)\r
- {\r
+ if (req->src.yrgb_addr < KERNEL_SPACE_VALID) {\r
ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);\r
- if (ret < 0)\r
- {\r
+ if (ret < 0) {\r
pr_err("rga map src memory failed\n");\r
status = ret;\r
break;\r
}\r
}\r
- else\r
- {\r
+ else {\r
MMU_p = MMU_Base + CMDMemSize;\r
\r
for(i=0; i<SrcMemSize; i++)\r
}\r
\r
/* map dst addr */\r
- if (req->src.yrgb_addr < KERNEL_SPACE_VALID)\r
- {\r
+ if (req->src.yrgb_addr < KERNEL_SPACE_VALID) {\r
ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);\r
- if (ret < 0)\r
- {\r
+ if (ret < 0) {\r
pr_err("rga map dst memory failed\n");\r
status = ret;\r
break;\r
}\r
}\r
- else\r
- {\r
+ else {\r
MMU_p = MMU_Base + CMDMemSize + SrcMemSize;\r
-\r
for(i=0; i<DstMemSize; i++)\r
- {\r
MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));\r
- }\r
}\r
\r
\r
req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);\r
\r
-\r
/*record the malloc buf for the cmd end to release*/\r
reg->MMU_base = MMU_Base;\r
\r
dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
\r
- /* Free the page table */\r
- if (pages != NULL) {\r
- kfree(pages);\r
- }\r
+ rga_mmu_buf_get(&rga_mmu_buf, AllSize + 16);\r
+ reg->MMU_len = AllSize + 16;\r
\r
return status;\r
\r
}\r
while(0);\r
\r
- /* Free the page table */\r
- if (pages != NULL) {\r
- kfree(pages);\r
- }\r
-\r
- /* Free mmu table */\r
- if (MMU_Base != NULL) {\r
- kfree(MMU_Base);\r
- }\r
-\r
return 0;\r
}\r
\r
struct page **pages = NULL;\r
uint32_t i;\r
uint32_t AllSize;\r
- uint32_t *MMU_Base, *MMU_p;\r
+ uint32_t *MMU_Base, *MMU_p, *MMU_Base_phys;\r
int ret;\r
int status;\r
\r
return -EINVAL;\r
}\r
\r
- AllSize = DstMemSize;\r
+ AllSize = (DstMemSize + 15) & (~15);\r
\r
- pages = kzalloc((AllSize + 1)* sizeof(struct page *), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc pages mem failed\n");\r
- status = RGA_MALLOC_ERROR;\r
- break;\r
- }\r
+ pages = rga_mmu_buf.pages;\r
\r
- MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc MMU_Base point failed\n");\r
+ if (rga_mmu_buf_get_try(&rga_mmu_buf, AllSize + 16)) {\r
+ pr_err("RGA Get MMU mem failed\n");\r
status = RGA_MALLOC_ERROR;\r
break;\r
}\r
\r
+ mutex_lock(&rga_service.lock);\r
+ MMU_Base = rga_mmu_buf.buf_virtual + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));\r
+ MMU_Base_phys = rga_mmu_buf.buf + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));\r
+ mutex_unlock(&rga_service.lock);\r
+\r
if (req->dst.yrgb_addr < KERNEL_SPACE_VALID) {\r
if (req->sg_dst) {\r
- ret = rga_MapION(req->sg_dst, &MMU_Base[0], DstMemSize);\r
+ ret = rga_MapION(req->sg_dst, &MMU_Base[0], DstMemSize, req->line_draw_info.line_width);\r
}\r
else {\r
ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], DstStart, DstMemSize);\r
* change the buf address in req struct\r
*/\r
\r
- req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);\r
+ req->mmu_info.base_addr = ((uint32_t)(MMU_Base_phys)>>2);\r
req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));\r
\r
/*record the malloc buf for the cmd end to release*/\r
dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
\r
- /* Free the page table */\r
- if (pages != NULL)\r
- kfree(pages);\r
+ rga_mmu_buf_get(&rga_mmu_buf, AllSize + 16);\r
+ reg->MMU_len = AllSize + 16;\r
\r
return 0;\r
}\r
while(0);\r
\r
- if (pages != NULL)\r
- kfree(pages);\r
-\r
- if (MMU_Base != NULL)\r
- kfree(MMU_Base);\r
-\r
return status;\r
}\r
\r
struct page **pages = NULL;\r
uint32_t i;\r
uint32_t AllSize;\r
- uint32_t *MMU_Base, *MMU_p;\r
+ uint32_t *MMU_Base, *MMU_p, *MMU_Base_phys;\r
int ret;\r
int status;\r
uint32_t uv_size, v_size;\r
return -EINVAL;\r
}\r
\r
+ SrcMemSize = (SrcMemSize + 15) & (~15);\r
+ DstMemSize = (DstMemSize + 15) & (~15);\r
+\r
AllSize = SrcMemSize + DstMemSize;\r
\r
- pages = kzalloc((AllSize)* sizeof(struct page *), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc pages mem failed\n");\r
- status = RGA_MALLOC_ERROR;\r
- break;\r
- }\r
+ pages = rga_mmu_buf.pages;\r
\r
- /*\r
- * Allocate MMU Index mem\r
- * This mem release in run_to_done fun\r
- */\r
- MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc MMU_Base point failed\n");\r
+ if (rga_mmu_buf_get_try(&rga_mmu_buf, AllSize + 16)) {\r
+ pr_err("RGA Get MMU mem failed\n");\r
status = RGA_MALLOC_ERROR;\r
break;\r
}\r
\r
+ mutex_lock(&rga_service.lock);\r
+ MMU_Base = rga_mmu_buf.buf_virtual + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));\r
+ MMU_Base_phys = rga_mmu_buf.buf + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));\r
+ mutex_unlock(&rga_service.lock);\r
+\r
/* map src pages */\r
if ((req->mmu_info.mmu_flag >> 8) & 1) {\r
if (req->sg_src) {\r
- ret = rga_MapION(req->sg_src, &MMU_Base[0], SrcMemSize);\r
+ ret = rga_MapION(req->sg_src, &MMU_Base[0], SrcMemSize,req->line_draw_info.flag);\r
}\r
else {\r
ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);\r
\r
if((req->mmu_info.mmu_flag >> 10) & 1) {\r
if (req->sg_dst) {\r
- ret = rga_MapION(req->sg_dst, &MMU_Base[SrcMemSize], DstMemSize);\r
+ ret = rga_MapION(req->sg_dst, &MMU_Base[SrcMemSize], DstMemSize, req->line_draw_info.line_width);\r
}\r
else {\r
ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);\r
}\r
}\r
\r
- MMU_Base[AllSize] = MMU_Base[AllSize - 1];\r
+ MMU_Base[AllSize] = MMU_Base[AllSize];\r
\r
/* zsq\r
* change the buf address in req struct\r
* for the reason of lie to MMU\r
*/\r
\r
- req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);\r
+ req->mmu_info.base_addr = ((uint32_t)(MMU_Base_phys)>>2);\r
\r
uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
\r
- /* Free the page table */\r
- if (pages != NULL)\r
- {\r
- kfree(pages);\r
- }\r
+ rga_mmu_buf_get(&rga_mmu_buf, AllSize + 16);\r
+ reg->MMU_len = AllSize + 16;\r
\r
return 0;\r
}\r
while(0);\r
\r
- if (pages != NULL)\r
- kfree(pages);\r
-\r
- if (MMU_Base != NULL)\r
- kfree(MMU_Base);\r
-\r
return status;\r
}\r
\r