#include <asm/atomic.h>\r
#include <asm/cacheflush.h>\r
#include "rga_mmu_info.h"\r
+#include <linux/delay.h>\r
\r
extern rga_service_info rga_service;\r
-//extern int mmu_buff_temp[1024];\r
+extern struct rga_mmu_buf_t rga_mmu_buf;\r
\r
#define KERNEL_SPACE_VALID 0xc0000000\r
\r
-#define V7_VATOPA_SUCESS_MASK (0x1)\r
-#define V7_VATOPA_GET_PADDR(X) (X & 0xFFFFF000)\r
-#define V7_VATOPA_GET_INER(X) ((X>>4) & 7)\r
-#define V7_VATOPA_GET_OUTER(X) ((X>>2) & 3)\r
-#define V7_VATOPA_GET_SH(X) ((X>>7) & 1)\r
-#define V7_VATOPA_GET_NS(X) ((X>>9) & 1)\r
-#define V7_VATOPA_GET_SS(X) ((X>>1) & 1)\r
+static int rga_mmu_buf_get(struct rga_mmu_buf_t *t, uint32_t size)\r
+{\r
+ mutex_lock(&rga_service.lock);\r
+ t->front += size;\r
+ mutex_unlock(&rga_service.lock);\r
+\r
+ return 0;\r
+}\r
\r
-#if 0\r
-static unsigned int armv7_va_to_pa(unsigned int v_addr)\r
+static int rga_mmu_buf_get_try(struct rga_mmu_buf_t *t, uint32_t size)\r
{\r
- unsigned int p_addr;\r
- __asm__ volatile ( "mcr p15, 0, %1, c7, c8, 0\n"\r
- "isb\n"\r
- "dsb\n"\r
- "mrc p15, 0, %0, c7, c4, 0\n"\r
- : "=r" (p_addr)\r
- : "r" (v_addr)\r
- : "cc");\r
-\r
- if (p_addr & V7_VATOPA_SUCESS_MASK)\r
- return 0xFFFFFFFF;\r
- else\r
- return (V7_VATOPA_GET_SS(p_addr) ? 0xFFFFFFFF : V7_VATOPA_GET_PADDR(p_addr));\r
+ mutex_lock(&rga_service.lock);\r
+ if((t->back - t->front) > t->size) {\r
+ if(t->front + size > t->back - t->size)\r
+ return -1;\r
+ }\r
+ else {\r
+ if((t->front + size) > t->back)\r
+ return -1;\r
+\r
+ if(t->front + size > t->size) {\r
+ if (size > (t->back - t->size)) {\r
+ return -1;\r
+ }\r
+ t->front = 0;\r
+ }\r
+ }\r
+ mutex_unlock(&rga_service.lock);\r
+\r
+ return 0;\r
}\r
-#endif\r
\r
-static int rga_mem_size_cal(uint32_t Mem, uint32_t MemSize, uint32_t *StartAddr)\r
+static int rga_mem_size_cal(unsigned long Mem, uint32_t MemSize, unsigned long *StartAddr)\r
{\r
- uint32_t start, end;\r
+ unsigned long start, end;\r
uint32_t pageCount;\r
\r
end = (Mem + (MemSize + PAGE_SIZE - 1)) >> PAGE_SHIFT;\r
return pageCount;\r
}\r
\r
-static int rga_buf_size_cal(uint32_t yrgb_addr, uint32_t uv_addr, uint32_t v_addr,\r
- int format, uint32_t w, uint32_t h, uint32_t *StartAddr )\r
+static int rga_buf_size_cal(unsigned long yrgb_addr, unsigned long uv_addr, unsigned long v_addr,\r
+ int format, uint32_t w, uint32_t h, unsigned long *StartAddr )\r
{\r
uint32_t size_yrgb = 0;\r
uint32_t size_uv = 0;\r
uint32_t size_v = 0;\r
uint32_t stride = 0;\r
- uint32_t start, end;\r
+ unsigned long start, end;\r
uint32_t pageCount;\r
\r
switch(format)\r
\r
static int rga_MapUserMemory(struct page **pages,\r
uint32_t *pageTable,\r
- uint32_t Memory,\r
+ unsigned long Memory,\r
uint32_t pageCount)\r
{\r
int32_t result;\r
uint32_t i;\r
uint32_t status;\r
- uint32_t Address;\r
- //uint32_t temp;\r
+ unsigned long Address;\r
\r
status = 0;\r
Address = 0;\r
\r
- do\r
- {\r
+ do {\r
down_read(¤t->mm->mmap_sem);\r
result = get_user_pages(current,\r
current->mm,\r
{\r
struct vm_area_struct *vma;\r
\r
+ if (result>0) {\r
+ down_read(¤t->mm->mmap_sem);\r
+ for (i = 0; i < result; i++)\r
+ put_page(pages[i]);\r
+ up_read(¤t->mm->mmap_sem);\r
+ }\r
+\r
for(i=0; i<pageCount; i++)\r
{\r
vma = find_vma(current->mm, (Memory + i) << PAGE_SHIFT);\r
\r
if (vma)//&& (vma->vm_flags & VM_PFNMAP) )\r
{\r
- #if 1\r
do\r
{\r
pte_t * pte;\r
}\r
while (0);\r
\r
- #else\r
- do\r
- {\r
- pte_t * pte;\r
- spinlock_t * ptl;\r
- unsigned long pfn;\r
- pgd_t * pgd;\r
- pud_t * pud;\r
- pmd_t * pmd;\r
-\r
- pgd = pgd_offset(current->mm, (Memory + i) << PAGE_SHIFT);\r
- pud = pud_offset(pgd, (Memory + i) << PAGE_SHIFT);\r
- pmd = pmd_offset(pud, (Memory + i) << PAGE_SHIFT);\r
- pte = pte_offset_map_lock(current->mm, pmd, (Memory + i) << PAGE_SHIFT, &ptl);\r
-\r
- pfn = pte_pfn(*pte);\r
- Address = ((pfn << PAGE_SHIFT) | (((unsigned long)((Memory + i) << PAGE_SHIFT)) & ~PAGE_MASK));\r
- pte_unmap_unlock(pte, ptl);\r
- }\r
- while (0);\r
- #endif\r
-\r
pageTable[i] = Address;\r
}\r
else\r
pageTable[i] = page_to_phys(pages[i]);\r
}\r
\r
+ down_read(¤t->mm->mmap_sem);\r
+ for (i = 0; i < result; i++)\r
+ put_page(pages[i]);\r
+ up_read(¤t->mm->mmap_sem);\r
+\r
return 0;\r
}\r
while(0);\r
return status;\r
}\r
\r
+static int rga_MapION(struct sg_table *sg,\r
+ uint32_t *Memory,\r
+ int32_t pageCount,\r
+ uint32_t offset)\r
+{\r
+ uint32_t i;\r
+ uint32_t status;\r
+ unsigned long Address;\r
+ uint32_t mapped_size = 0;\r
+ uint32_t len = 0;\r
+ struct scatterlist *sgl = sg->sgl;\r
+ uint32_t sg_num = 0;\r
+\r
+ status = 0;\r
+ Address = 0;\r
+ offset = offset >> PAGE_SHIFT;\r
+ if (offset != 0) {\r
+ do {\r
+ len += (sg_dma_len(sgl) >> PAGE_SHIFT);\r
+ if (len == offset) {\r
+ sg_num += 1;\r
+ break;\r
+ }\r
+ else {\r
+ if (len > offset)\r
+ break;\r
+ }\r
+ sg_num += 1;\r
+ }\r
+ while((sgl = sg_next(sgl)) && (mapped_size < pageCount) && (sg_num < sg->nents));\r
+\r
+ sgl = sg->sgl;\r
+ len = 0;\r
+ do {\r
+ len += (sg_dma_len(sgl) >> PAGE_SHIFT);\r
+ sgl = sg_next(sgl);\r
+ }\r
+ while(--sg_num);\r
+\r
+ offset -= len;\r
+\r
+ len = sg_dma_len(sgl) >> PAGE_SHIFT;\r
+ Address = sg_phys(sgl);\r
+ Address += offset;\r
+\r
+ for(i=offset; i<len; i++) {\r
+ Memory[i - offset] = Address + (i << PAGE_SHIFT);\r
+ }\r
+ mapped_size += (len - offset);\r
+ sg_num = 1;\r
+ sgl = sg_next(sgl);\r
+ do {\r
+ len = sg_dma_len(sgl) >> PAGE_SHIFT;\r
+ Address = sg_phys(sgl);\r
+\r
+ for(i=0; i<len; i++) {\r
+ Memory[mapped_size + i] = Address + (i << PAGE_SHIFT);\r
+ }\r
+\r
+ mapped_size += len;\r
+ sg_num += 1;\r
+ }\r
+ while((sgl = sg_next(sgl)) && (mapped_size < pageCount) && (sg_num < sg->nents));\r
+ }\r
+ else {\r
+ do {\r
+ len = sg_dma_len(sgl) >> PAGE_SHIFT;\r
+ Address = sg_phys(sgl);\r
+ for(i=0; i<len; i++) {\r
+ Memory[mapped_size + i] = Address + (i << PAGE_SHIFT);\r
+ }\r
+ mapped_size += len;\r
+ sg_num += 1;\r
+ }\r
+ while((sgl = sg_next(sgl)) && (mapped_size < pageCount) && (sg_num < sg->nents));\r
+ }\r
+ return 0;\r
+}\r
+\r
+\r
static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req)\r
{\r
int SrcMemSize, DstMemSize;\r
- uint32_t SrcStart, DstStart;\r
+ unsigned long SrcStart, DstStart;\r
uint32_t i;\r
uint32_t AllSize;\r
- uint32_t *MMU_Base, *MMU_p;\r
+ uint32_t *MMU_Base, *MMU_p, *MMU_Base_phys;\r
int ret;\r
int status;\r
uint32_t uv_size, v_size;\r
\r
MMU_Base = NULL;\r
\r
- do\r
- {\r
+ SrcMemSize = 0;\r
+ DstMemSize = 0;\r
+\r
+ do {\r
/* cal src buf mmu info */\r
SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,\r
req->src.format, req->src.vir_w, req->src.act_h + req->src.y_offset,\r
}\r
\r
/* cal dst buf mmu info */\r
+\r
DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
&DstStart);\r
- if(DstMemSize == 0) {\r
+ if(DstMemSize == 0)\r
return -EINVAL;\r
- }\r
\r
/* Cal out the needed mem size */\r
+ SrcMemSize = (SrcMemSize + 15) & (~15);\r
+ DstMemSize = (DstMemSize + 15) & (~15);\r
AllSize = SrcMemSize + DstMemSize;\r
\r
- pages = kzalloc((AllSize + 1)* sizeof(struct page *), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc pages mem failed\n");\r
+ if (rga_mmu_buf_get_try(&rga_mmu_buf, AllSize + 16)) {\r
+ pr_err("RGA Get MMU mem failed\n");\r
status = RGA_MALLOC_ERROR;\r
break;\r
}\r
\r
- MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);\r
- if(MMU_Base == NULL) {\r
- pr_err("RGA MMU malloc MMU_Base point failed\n");\r
- status = RGA_MALLOC_ERROR;\r
- break;\r
- }\r
+ mutex_lock(&rga_service.lock);\r
+ MMU_Base = rga_mmu_buf.buf_virtual + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));\r
+ MMU_Base_phys = rga_mmu_buf.buf + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));\r
+ mutex_unlock(&rga_service.lock);\r
\r
- if(req->src.yrgb_addr < KERNEL_SPACE_VALID)\r
- {\r
- ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);\r
- if (ret < 0) {\r
- pr_err("rga map src memory failed\n");\r
- status = ret;\r
- break;\r
+ pages = rga_mmu_buf.pages;\r
+\r
+ if((req->mmu_info.mmu_flag >> 8) & 1) {\r
+ if (req->sg_src) {\r
+ ret = rga_MapION(req->sg_src, &MMU_Base[0], SrcMemSize, req->line_draw_info.flag);\r
+ }\r
+ else {\r
+ ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);\r
+ if (ret < 0) {\r
+ pr_err("rga map src memory failed\n");\r
+ status = ret;\r
+ break;\r
+ }\r
}\r
}\r
- else\r
- {\r
+ else {\r
MMU_p = MMU_Base;\r
\r
- if(req->src.yrgb_addr == (uint32_t)rga_service.pre_scale_buf)\r
- {\r
- /* Down scale ratio over 2, Last prc */\r
- /* MMU table copy from pre scale table */\r
-\r
+ if(req->src.yrgb_addr == (unsigned long)rga_service.pre_scale_buf) {\r
for(i=0; i<SrcMemSize; i++)\r
- {\r
MMU_p[i] = rga_service.pre_scale_buf[i];\r
- }\r
}\r
- else\r
- {\r
+ else {\r
for(i=0; i<SrcMemSize; i++)\r
- {\r
- MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));\r
- }\r
+ MMU_p[i] = (uint32_t)((SrcStart + i) << PAGE_SHIFT);\r
}\r
}\r
\r
- if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)\r
- {\r
- #if 0\r
- ktime_t start, end;\r
- start = ktime_get();\r
- #endif\r
- ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);\r
- if (ret < 0) {\r
- pr_err("rga map dst memory failed\n");\r
- status = ret;\r
- break;\r
+ if ((req->mmu_info.mmu_flag >> 10) & 1) {\r
+ if (req->sg_dst) {\r
+ ret = rga_MapION(req->sg_dst, &MMU_Base[SrcMemSize], DstMemSize, req->line_draw_info.line_width);\r
+ }\r
+ else {\r
+ ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);\r
+ if (ret < 0) {\r
+ pr_err("rga map dst memory failed\n");\r
+ status = ret;\r
+ break;\r
+ }\r
}\r
-\r
- #if 0\r
- end = ktime_get();\r
- end = ktime_sub(end, start);\r
- printk("dst mmu map time = %d\n", (int)ktime_to_us(end));\r
- #endif\r
}\r
- else\r
- {\r
+ else {\r
MMU_p = MMU_Base + SrcMemSize;\r
-\r
for(i=0; i<DstMemSize; i++)\r
- {\r
- MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));\r
- }\r
+ MMU_p[i] = (uint32_t)((DstStart + i) << PAGE_SHIFT);\r
}\r
\r
- MMU_Base[AllSize] = MMU_Base[AllSize - 1];\r
+ MMU_Base[AllSize] = MMU_Base[AllSize-1];\r
\r
/* zsq\r
* change the buf address in req struct\r
*/\r
\r
- req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);\r
+ req->mmu_info.base_addr = (unsigned long)MMU_Base_phys >> 2;\r
\r
uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT);\r
req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT);\r
\r
- req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | (SrcMemSize << PAGE_SHIFT);\r
+ uv_size = (req->dst.uv_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
\r
- /*record the malloc buf for the cmd end to release*/\r
- reg->MMU_base = MMU_Base;\r
+ req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | (SrcMemSize << PAGE_SHIFT);\r
+ req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((SrcMemSize + uv_size) << PAGE_SHIFT);\r
\r
/* flush data to DDR */\r
+ #ifdef CONFIG_ARM\r
dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
- outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
+ outer_flush_range(virt_to_phys(MMU_Base), virt_to_phys(MMU_Base + AllSize + 1));\r
+ #elif defined(CONFIG_ARM64)\r
+ __dma_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
+ #endif\r
\r
- status = 0;\r
+ rga_mmu_buf_get(&rga_mmu_buf, AllSize + 16);\r
+ reg->MMU_len = AllSize + 16;\r
\r
- /* Free the page table */\r
- if (pages != NULL) {\r
- kfree(pages);\r
- }\r
+ status = 0;\r
\r
return status;\r
}\r
while(0);\r
\r
-\r
- /* Free the page table */\r
- if (pages != NULL) {\r
- kfree(pages);\r
- }\r
-\r
- /* Free MMU table */\r
- if(MMU_Base != NULL) {\r
- kfree(MMU_Base);\r
- }\r
-\r
return status;\r
}\r
\r
static int rga_mmu_info_color_palette_mode(struct rga_reg *reg, struct rga_req *req)\r
{\r
int SrcMemSize, DstMemSize, CMDMemSize;\r
- uint32_t SrcStart, DstStart, CMDStart;\r
+ unsigned long SrcStart, DstStart, CMDStart;\r
struct page **pages = NULL;\r
uint32_t i;\r
uint32_t AllSize;\r
- uint32_t *MMU_Base = NULL;\r
+ uint32_t *MMU_Base = NULL, *MMU_Base_phys = NULL;\r
uint32_t *MMU_p;\r
int ret, status;\r
uint32_t stride;\r
byte_num = sw >> shift;\r
stride = (byte_num + 3) & (~3);\r
\r
- do\r
- {\r
-\r
+ do {\r
SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, stride, &SrcStart);\r
if(SrcMemSize == 0) {\r
return -EINVAL;\r
return -EINVAL;\r
}\r
\r
- CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
+ CMDMemSize = rga_mem_size_cal((unsigned long)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
if(CMDMemSize == 0) {\r
return -EINVAL;\r
}\r
\r
- AllSize = SrcMemSize + DstMemSize + CMDMemSize;\r
+ SrcMemSize = (SrcMemSize + 15) & (~15);\r
+ DstMemSize = (DstMemSize + 15) & (~15);\r
+ CMDMemSize = (CMDMemSize + 15) & (~15);\r
\r
- pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc pages mem failed\n");\r
- return -EINVAL;\r
- }\r
+ AllSize = SrcMemSize + DstMemSize + CMDMemSize;\r
\r
- MMU_Base = kzalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
- if(MMU_Base == NULL) {\r
- pr_err("RGA MMU malloc MMU_Base point failed\n");\r
+ if (rga_mmu_buf_get_try(&rga_mmu_buf, AllSize + 16)) {\r
+ pr_err("RGA Get MMU mem failed\n");\r
+ status = RGA_MALLOC_ERROR;\r
break;\r
}\r
\r
+ mutex_lock(&rga_service.lock);\r
+ MMU_Base = rga_mmu_buf.buf_virtual + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));\r
+ MMU_Base_phys = rga_mmu_buf.buf + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));\r
+ mutex_unlock(&rga_service.lock);\r
+\r
+ pages = rga_mmu_buf.pages;\r
+\r
/* map CMD addr */\r
- for(i=0; i<CMDMemSize; i++)\r
- {\r
- MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i)<<PAGE_SHIFT));\r
+ for(i=0; i<CMDMemSize; i++) {\r
+ MMU_Base[i] = (uint32_t)virt_to_phys((uint32_t *)((CMDStart + i)<<PAGE_SHIFT));\r
}\r
\r
/* map src addr */\r
- if (req->src.yrgb_addr < KERNEL_SPACE_VALID)\r
- {\r
+ if (req->src.yrgb_addr < KERNEL_SPACE_VALID) {\r
ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);\r
- if (ret < 0)\r
- {\r
+ if (ret < 0) {\r
pr_err("rga map src memory failed\n");\r
status = ret;\r
break;\r
}\r
}\r
- else\r
- {\r
+ else {\r
MMU_p = MMU_Base + CMDMemSize;\r
\r
for(i=0; i<SrcMemSize; i++)\r
}\r
\r
/* map dst addr */\r
- if (req->src.yrgb_addr < KERNEL_SPACE_VALID)\r
- {\r
+ if (req->src.yrgb_addr < KERNEL_SPACE_VALID) {\r
ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);\r
- if (ret < 0)\r
- {\r
+ if (ret < 0) {\r
pr_err("rga map dst memory failed\n");\r
status = ret;\r
break;\r
}\r
}\r
- else\r
- {\r
+ else {\r
MMU_p = MMU_Base + CMDMemSize + SrcMemSize;\r
-\r
for(i=0; i<DstMemSize; i++)\r
- {\r
MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));\r
- }\r
}\r
\r
\r
req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);\r
\r
-\r
/*record the malloc buf for the cmd end to release*/\r
reg->MMU_base = MMU_Base;\r
\r
/* flush data to DDR */\r
+ #ifdef CONFIG_ARM\r
dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
+ #elif defined(CONFIG_ARM64)\r
+ __dma_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
+ #endif\r
\r
- /* Free the page table */\r
- if (pages != NULL) {\r
- kfree(pages);\r
- }\r
+ rga_mmu_buf_get(&rga_mmu_buf, AllSize + 16);\r
+ reg->MMU_len = AllSize + 16;\r
\r
return status;\r
\r
}\r
while(0);\r
\r
- /* Free the page table */\r
- if (pages != NULL) {\r
- kfree(pages);\r
- }\r
-\r
- /* Free mmu table */\r
- if (MMU_Base != NULL) {\r
- kfree(MMU_Base);\r
- }\r
-\r
return 0;\r
}\r
\r
static int rga_mmu_info_color_fill_mode(struct rga_reg *reg, struct rga_req *req)\r
{\r
int DstMemSize;\r
- uint32_t DstStart;\r
+ unsigned long DstStart;\r
struct page **pages = NULL;\r
uint32_t i;\r
uint32_t AllSize;\r
- uint32_t *MMU_Base, *MMU_p;\r
+ uint32_t *MMU_Base, *MMU_p, *MMU_Base_phys;\r
int ret;\r
int status;\r
\r
MMU_Base = NULL;\r
\r
- do\r
- {\r
+ do {\r
DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
&DstStart);\r
return -EINVAL;\r
}\r
\r
- AllSize = DstMemSize;\r
+ AllSize = (DstMemSize + 15) & (~15);\r
\r
- pages = kzalloc((AllSize + 1)* sizeof(struct page *), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc pages mem failed\n");\r
- status = RGA_MALLOC_ERROR;\r
- break;\r
- }\r
+ pages = rga_mmu_buf.pages;\r
\r
- MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc MMU_Base point failed\n");\r
+ if (rga_mmu_buf_get_try(&rga_mmu_buf, AllSize + 16)) {\r
+ pr_err("RGA Get MMU mem failed\n");\r
status = RGA_MALLOC_ERROR;\r
break;\r
}\r
\r
- if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)\r
- {\r
- ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], DstStart, DstMemSize);\r
- if (ret < 0) {\r
- pr_err("rga map dst memory failed\n");\r
- status = ret;\r
- break;\r
+ mutex_lock(&rga_service.lock);\r
+ MMU_Base = rga_mmu_buf.buf_virtual + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));\r
+ MMU_Base_phys = rga_mmu_buf.buf + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));\r
+ mutex_unlock(&rga_service.lock);\r
+\r
+ if (req->dst.yrgb_addr < KERNEL_SPACE_VALID) {\r
+ if (req->sg_dst) {\r
+ ret = rga_MapION(req->sg_dst, &MMU_Base[0], DstMemSize, req->line_draw_info.line_width);\r
+ }\r
+ else {\r
+ ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], DstStart, DstMemSize);\r
+ if (ret < 0) {\r
+ pr_err("rga map dst memory failed\n");\r
+ status = ret;\r
+ break;\r
+ }\r
}\r
}\r
- else\r
- {\r
+ else {\r
MMU_p = MMU_Base;\r
-\r
for(i=0; i<DstMemSize; i++)\r
- {\r
- MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));\r
- }\r
+ MMU_p[i] = (uint32_t)((DstStart + i) << PAGE_SHIFT);\r
}\r
\r
MMU_Base[AllSize] = MMU_Base[AllSize - 1];\r
* change the buf address in req struct\r
*/\r
\r
- req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);\r
+ req->mmu_info.base_addr = ((unsigned long)(MMU_Base_phys)>>2);\r
req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));\r
\r
/*record the malloc buf for the cmd end to release*/\r
reg->MMU_base = MMU_Base;\r
\r
/* flush data to DDR */\r
+ #ifdef CONFIG_ARM\r
dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
+ #elif defined(CONFIG_ARM64)\r
+ __dma_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
+ #endif\r
\r
- /* Free the page table */\r
- if (pages != NULL)\r
- kfree(pages);\r
+ rga_mmu_buf_get(&rga_mmu_buf, AllSize + 16);\r
+ reg->MMU_len = AllSize + 16;\r
\r
return 0;\r
}\r
while(0);\r
\r
- if (pages != NULL)\r
- kfree(pages);\r
-\r
- if (MMU_Base != NULL)\r
- kfree(MMU_Base);\r
-\r
return status;\r
}\r
\r
\r
static int rga_mmu_info_line_point_drawing_mode(struct rga_reg *reg, struct rga_req *req)\r
{\r
- int DstMemSize;\r
- uint32_t DstStart;\r
- struct page **pages = NULL;\r
- uint32_t i;\r
- uint32_t AllSize;\r
- uint32_t *MMU_Base, *MMU_p;\r
- int ret, status;\r
-\r
- MMU_Base = NULL;\r
-\r
- do\r
- {\r
- /* cal dst buf mmu info */\r
- DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
- req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
- &DstStart);\r
- if(DstMemSize == 0) {\r
- return -EINVAL;\r
- }\r
-\r
- AllSize = DstMemSize;\r
-\r
- pages = kzalloc((AllSize + 1) * sizeof(struct page *), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc pages mem failed\n");\r
- status = RGA_MALLOC_ERROR;\r
- break;\r
- }\r
-\r
- MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc MMU_Base point failed\n");\r
- status = RGA_MALLOC_ERROR;\r
- break;\r
- }\r
-\r
- if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)\r
- {\r
- ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], DstStart, DstMemSize);\r
- if (ret < 0) {\r
- pr_err("rga map dst memory failed\n");\r
- status = ret;\r
- break;\r
- }\r
- }\r
- else\r
- {\r
- MMU_p = MMU_Base;\r
-\r
- for(i=0; i<DstMemSize; i++)\r
- {\r
- MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));\r
- }\r
- }\r
-\r
- /* zsq\r
- * change the buf address in req struct\r
- * for the reason of lie to MMU\r
- */\r
- req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);\r
- req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));\r
-\r
-\r
- /*record the malloc buf for the cmd end to release*/\r
- reg->MMU_base = MMU_Base;\r
-\r
- /* flush data to DDR */\r
- dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
- outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
-\r
- /* Free the page table */\r
- if (pages != NULL) {\r
- kfree(pages);\r
- }\r
-\r
- return 0;\r
-\r
- }\r
- while(0);\r
-\r
- if (pages != NULL)\r
- kfree(pages);\r
-\r
- if (MMU_Base != NULL)\r
- kfree(MMU_Base);\r
-\r
- return status;\r
+ return 0;\r
}\r
\r
static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_req *req)\r
{\r
- int SrcMemSize, DstMemSize;\r
- uint32_t SrcStart, DstStart;\r
- struct page **pages = NULL;\r
- uint32_t i;\r
- uint32_t AllSize;\r
- uint32_t *MMU_Base, *MMU_p;\r
- int ret, status;\r
- uint32_t uv_size, v_size;\r
-\r
- MMU_Base = NULL;\r
-\r
- do\r
- {\r
- /* cal src buf mmu info */\r
- SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,\r
- req->src.format, req->src.vir_w, req->src.vir_h,\r
- &SrcStart);\r
- if(SrcMemSize == 0) {\r
- return -EINVAL;\r
- }\r
-\r
- /* cal dst buf mmu info */\r
- DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
- req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
- &DstStart);\r
- if(DstMemSize == 0) {\r
- return -EINVAL;\r
- }\r
-\r
- AllSize = SrcMemSize + DstMemSize;\r
-\r
- pages = kzalloc((AllSize + 1) * sizeof(struct page *), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc pages mem failed\n");\r
- status = RGA_MALLOC_ERROR;\r
- break;\r
- }\r
-\r
- MMU_Base = kzalloc((AllSize + 1)* sizeof(uint32_t), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc MMU_Base point failed\n");\r
- status = RGA_MALLOC_ERROR;\r
- break;\r
- }\r
-\r
- if (req->src.yrgb_addr < KERNEL_SPACE_VALID)\r
- {\r
- ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);\r
- if (ret < 0)\r
- {\r
- pr_err("rga map src memory failed\n");\r
- status = ret;\r
- break;\r
- }\r
- }\r
- else\r
- {\r
- MMU_p = MMU_Base;\r
-\r
- for(i=0; i<SrcMemSize; i++)\r
- {\r
- MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));\r
- }\r
- }\r
-\r
-\r
- if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)\r
- {\r
- ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);\r
- if (ret < 0)\r
- {\r
- pr_err("rga map dst memory failed\n");\r
- status = ret;\r
- break;\r
- }\r
- }\r
- else\r
- {\r
- MMU_p = MMU_Base + SrcMemSize;\r
-\r
- for(i=0; i<DstMemSize; i++)\r
- {\r
- MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));\r
- }\r
- }\r
-\r
- MMU_Base[AllSize] = MMU_Base[AllSize - 1];\r
-\r
- /* zsq\r
- * change the buf address in req struct\r
- * for the reason of lie to MMU\r
- */\r
- req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);\r
-\r
- uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
- v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
-\r
- req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));\r
- req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT);\r
- req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT);\r
-\r
- uv_size = (req->dst.uv_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
- v_size = (req->dst.v_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
-\r
- req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | (SrcMemSize << PAGE_SHIFT);\r
- req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((SrcMemSize + uv_size) << PAGE_SHIFT);\r
- req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) | ((SrcMemSize + v_size) << PAGE_SHIFT);\r
-\r
-\r
- /*record the malloc buf for the cmd end to release*/\r
- reg->MMU_base = MMU_Base;\r
-\r
- /* flush data to DDR */\r
- dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
- outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
-\r
- /* Free the page table */\r
- if (pages != NULL) {\r
- kfree(pages);\r
- }\r
-\r
- return 0;\r
- }\r
- while(0);\r
-\r
- if (pages != NULL)\r
- kfree(pages);\r
-\r
- if (MMU_Base != NULL)\r
- kfree(MMU_Base);\r
-\r
- return status;\r
+ return 0;\r
}\r
\r
\r
static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req)\r
{\r
int SrcMemSize, DstMemSize;\r
- uint32_t SrcStart, DstStart;\r
+ unsigned long SrcStart, DstStart;\r
struct page **pages = NULL;\r
uint32_t i;\r
uint32_t AllSize;\r
- uint32_t *MMU_Base, *MMU_p;\r
+ uint32_t *MMU_Base, *MMU_p, *MMU_Base_phys;\r
int ret;\r
int status;\r
uint32_t uv_size, v_size;\r
\r
MMU_Base = NULL;\r
\r
- do\r
- {\r
+ do {\r
/* cal src buf mmu info */\r
SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,\r
req->src.format, req->src.vir_w, req->src.vir_h,\r
return -EINVAL;\r
}\r
\r
+ SrcMemSize = (SrcMemSize + 15) & (~15);\r
+ DstMemSize = (DstMemSize + 15) & (~15);\r
+\r
AllSize = SrcMemSize + DstMemSize;\r
\r
- pages = kzalloc((AllSize)* sizeof(struct page *), GFP_KERNEL);\r
- if(pages == NULL)\r
- {\r
- pr_err("RGA MMU malloc pages mem failed\n");\r
- status = RGA_MALLOC_ERROR;\r
- break;\r
- }\r
+ pages = rga_mmu_buf.pages;\r
\r
- /*\r
- * Allocate MMU Index mem\r
- * This mem release in run_to_done fun\r
- */\r
- MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc MMU_Base point failed\n");\r
+ if (rga_mmu_buf_get_try(&rga_mmu_buf, AllSize + 16)) {\r
+ pr_err("RGA Get MMU mem failed\n");\r
status = RGA_MALLOC_ERROR;\r
break;\r
}\r
\r
+ mutex_lock(&rga_service.lock);\r
+ MMU_Base = rga_mmu_buf.buf_virtual + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));\r
+ MMU_Base_phys = rga_mmu_buf.buf + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));\r
+ mutex_unlock(&rga_service.lock);\r
+\r
/* map src pages */\r
- if (req->src.yrgb_addr < KERNEL_SPACE_VALID)\r
- {\r
- ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);\r
- if (ret < 0) {\r
- pr_err("rga map src memory failed\n");\r
- status = ret;\r
- break;\r
+ if ((req->mmu_info.mmu_flag >> 8) & 1) {\r
+ if (req->sg_src) {\r
+ ret = rga_MapION(req->sg_src, &MMU_Base[0], SrcMemSize,req->line_draw_info.flag);\r
+ }\r
+ else {\r
+ ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);\r
+ if (ret < 0) {\r
+ pr_err("rga map src memory failed\n");\r
+ status = ret;\r
+ break;\r
+ }\r
}\r
}\r
- else\r
- {\r
+ else {\r
MMU_p = MMU_Base;\r
\r
for(i=0; i<SrcMemSize; i++)\r
- {\r
- MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));\r
- }\r
+ MMU_p[i] = (uint32_t)((SrcStart + i) << PAGE_SHIFT);\r
}\r
\r
-\r
- if(req->dst.yrgb_addr >= KERNEL_SPACE_VALID)\r
+ if((req->mmu_info.mmu_flag >> 10) & 1) {\r
+ if (req->sg_dst) {\r
+ ret = rga_MapION(req->sg_dst, &MMU_Base[SrcMemSize], DstMemSize, req->line_draw_info.line_width);\r
+ }\r
+ else {\r
+ ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);\r
+ if (ret < 0) {\r
+ pr_err("rga map dst memory failed\n");\r
+ status = ret;\r
+ break;\r
+ }\r
+ }\r
+ }\r
+ else\r
{\r
/* kernel space */\r
MMU_p = MMU_Base + SrcMemSize;\r
\r
- if(req->dst.yrgb_addr == (uint32_t)rga_service.pre_scale_buf)\r
- {\r
+ if(req->dst.yrgb_addr == (unsigned long)rga_service.pre_scale_buf) {\r
for(i=0; i<DstMemSize; i++)\r
- {\r
MMU_p[i] = rga_service.pre_scale_buf[i];\r
- }\r
}\r
- else\r
- {\r
+ else {\r
for(i=0; i<DstMemSize; i++)\r
- {\r
- MMU_p[i] = virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));\r
- }\r
- }\r
- }\r
- else\r
- {\r
- /* user space */\r
- ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);\r
- if (ret < 0)\r
- {\r
- pr_err("rga map dst memory failed\n");\r
- status = ret;\r
- break;\r
+ MMU_p[i] = (uint32_t)((DstStart + i) << PAGE_SHIFT);\r
}\r
}\r
\r
- MMU_Base[AllSize] = MMU_Base[AllSize - 1];\r
+ MMU_Base[AllSize] = MMU_Base[AllSize];\r
\r
/* zsq\r
* change the buf address in req struct\r
* for the reason of lie to MMU\r
*/\r
\r
- req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);\r
+ req->mmu_info.base_addr = ((unsigned long)(MMU_Base_phys)>>2);\r
\r
uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
reg->MMU_base = MMU_Base;\r
\r
/* flush data to DDR */\r
+ #ifdef CONFIG_ARM\r
dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
+ #elif defined(CONFIG_ARM64)\r
+ __dma_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
+ #endif\r
\r
- /* Free the page table */\r
- if (pages != NULL)\r
- {\r
- kfree(pages);\r
- }\r
+ rga_mmu_buf_get(&rga_mmu_buf, AllSize + 16);\r
+ reg->MMU_len = AllSize + 16;\r
\r
return 0;\r
}\r
while(0);\r
\r
- if (pages != NULL)\r
- kfree(pages);\r
-\r
- if (MMU_Base != NULL)\r
- kfree(MMU_Base);\r
-\r
return status;\r
}\r
\r
static int rga_mmu_info_update_palette_table_mode(struct rga_reg *reg, struct rga_req *req)\r
{\r
int SrcMemSize, CMDMemSize;\r
- uint32_t SrcStart, CMDStart;\r
+ unsigned long SrcStart, CMDStart;\r
struct page **pages = NULL;\r
uint32_t i;\r
uint32_t AllSize;\r
\r
MMU_Base = NULL;\r
\r
- do\r
- {\r
+ do {\r
/* cal src buf mmu info */\r
SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, req->src.vir_w * req->src.vir_h, &SrcStart);\r
if(SrcMemSize == 0) {\r
}\r
\r
/* cal cmd buf mmu info */\r
- CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
+ CMDMemSize = rga_mem_size_cal((unsigned long)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
if(CMDMemSize == 0) {\r
return -EINVAL;\r
}\r
}\r
\r
for(i=0; i<CMDMemSize; i++) {\r
- MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));\r
+ MMU_Base[i] = (uint32_t)virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));\r
}\r
\r
if (req->src.yrgb_addr < KERNEL_SPACE_VALID)\r
reg->MMU_base = MMU_Base;\r
\r
/* flush data to DDR */\r
+ #ifdef CONFIG_ARM\r
dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+ #elif defined(CONFIG_ARM64)\r
+ __dma_flush_range(MMU_Base, (MMU_Base + AllSize));\r
+ #endif\r
+\r
\r
if (pages != NULL) {\r
/* Free the page table */\r
static int rga_mmu_info_update_patten_buff_mode(struct rga_reg *reg, struct rga_req *req)\r
{\r
int SrcMemSize, CMDMemSize;\r
- uint32_t SrcStart, CMDStart;\r
+ unsigned long SrcStart, CMDStart;\r
struct page **pages = NULL;\r
uint32_t i;\r
uint32_t AllSize;\r
}\r
\r
/* cal cmd buf mmu info */\r
- CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
+ CMDMemSize = rga_mem_size_cal((unsigned long)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
if(CMDMemSize == 0) {\r
return -EINVAL;\r
}\r
reg->MMU_base = MMU_Base;\r
\r
/* flush data to DDR */\r
+ #ifdef CONFIG_ARM\r
dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+ #elif defined(CONFIG_ARM64)\r
+ __dma_flush_range(MMU_Base, (MMU_Base + AllSize));\r
+ #endif\r
\r
if (pages != NULL) {\r
/* Free the page table */\r