X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=drivers%2Fvideo%2Frockchip%2Frga2%2Frga2_mmu_info.c;h=2cd40a5d040c9294213ba66593caa16df30116c5;hb=c2b7f944b6fae5282b767c3e012a011caabc4987;hp=35529aeac04676c18b17cfcdf145f4f9a77fd817;hpb=9983901d21cae4402e9c01b91ee5df1a979207b7;p=firefly-linux-kernel-4.4.55.git diff --git a/drivers/video/rockchip/rga2/rga2_mmu_info.c b/drivers/video/rockchip/rga2/rga2_mmu_info.c index 35529aeac046..2cd40a5d040c 100644 --- a/drivers/video/rockchip/rga2/rga2_mmu_info.c +++ b/drivers/video/rockchip/rga2/rga2_mmu_info.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -34,6 +35,16 @@ extern struct rga2_mmu_buf_t rga2_mmu_buf; #define V7_VATOPA_GET_NS(X) ((X>>9) & 1) #define V7_VATOPA_GET_SS(X) ((X>>1) & 1) +static void rga_dma_flush_range(void *pstart, void *pend) +{ +#ifdef CONFIG_ARM + dmac_flush_range(pstart, pend); + outer_flush_range(virt_to_phys(pstart), virt_to_phys(pend)); +#elif defined(CONFIG_ARM64) + __dma_flush_range(pstart, pend); +#endif +} + #if 0 static unsigned int armv7_va_to_pa(unsigned int v_addr) { @@ -62,46 +73,44 @@ static int rga2_mmu_buf_get(struct rga2_mmu_buf_t *t, uint32_t size) return 0; } -static int rga2_mmu_buf_get_try(struct rga2_mmu_buf_t *t, uint32_t size) +static int rga2_mmu_buf_get_try(struct rga2_mmu_buf_t *t, uint32_t size) +{ + int ret = 0; + + mutex_lock(&rga2_service.lock); + if ((t->back - t->front) > t->size) { + if (t->front + size > t->back - t->size) { + pr_info("front %d, back %d dsize %d size %d", + t->front, t->back, t->size, size); + ret = -ENOMEM; + goto out; + } + } else { + if ((t->front + size) > t->back) { + pr_info("front %d, back %d dsize %d size %d", + t->front, t->back, t->size, size); + ret = -ENOMEM; + goto out; + } + + if (t->front + size > t->size) { + if (size > (t->back - t->size)) { + pr_info("front %d, back %d dsize %d size %d", + t->front, t->back, t->size, size); + ret = -ENOMEM; + goto out; + } + t->front = 0; + } + } +out: + mutex_unlock(&rga2_service.lock); + return ret; +} + +static int rga2_mem_size_cal(unsigned long Mem, uint32_t MemSize, unsigned long *StartAddr) { - mutex_lock(&rga2_service.lock); - if((t->back - t->front) > t->size) { - if(t->front + size > t->back - t->size) - return -1; - } - else { - if((t->front + size) > t->back) - return -1; - - if(t->front + size > t->size) { - if (size > (t->back - t->size)) { - return -1; - } - t->front = 0; - } - } - mutex_unlock(&rga2_service.lock); - - return 0; -} - -#if 0 -static int rga2_mmu_buf_cal(struct rga2_mmu_buf_t *t, uint32_t size) -{ - if((t->front + size) > t->back) { - return -1; - } - else { - return 0; - } -} -#endif - - - -static int rga2_mem_size_cal(uint32_t Mem, uint32_t MemSize, uint32_t *StartAddr) -{ - uint32_t start, end; + unsigned long start, end; uint32_t pageCount; end = (Mem + (MemSize + PAGE_SIZE - 1)) >> PAGE_SHIFT; @@ -111,14 +120,14 @@ static int rga2_mem_size_cal(uint32_t Mem, uint32_t MemSize, uint32_t *StartAddr return pageCount; } -static int rga2_buf_size_cal(uint32_t yrgb_addr, uint32_t uv_addr, uint32_t v_addr, - int format, uint32_t w, uint32_t h, uint32_t *StartAddr ) +static int rga2_buf_size_cal(unsigned long yrgb_addr, unsigned long uv_addr, unsigned long v_addr, + int format, uint32_t w, uint32_t h, unsigned long *StartAddr ) { uint32_t size_yrgb = 0; uint32_t size_uv = 0; uint32_t size_v = 0; uint32_t stride = 0; - uint32_t start, end; + unsigned long start, end; uint32_t pageCount; switch(format) @@ -178,7 +187,6 @@ static int rga2_buf_size_cal(uint32_t yrgb_addr, uint32_t uv_addr, uint32_t v_ad size_yrgb = stride * h; size_uv = stride * h; start = MIN(yrgb_addr, uv_addr); - start >>= PAGE_SHIFT; end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)); end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT; @@ -229,6 +237,18 @@ static int rga2_buf_size_cal(uint32_t yrgb_addr, uint32_t uv_addr, uint32_t v_ad case RK_FORMAT_BPP8 : break; #endif + case RGA2_FORMAT_YCbCr_420_SP_10B: + case RGA2_FORMAT_YCrCb_420_SP_10B: + stride = (w + 3) & (~3); + stride = stride; + size_yrgb = stride * h; + size_uv = (stride * (h >> 1)); + start = MIN(yrgb_addr, uv_addr); + start >>= PAGE_SHIFT; + end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)); + end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT; + pageCount = end - start; + break; default : pageCount = 0; start = 0; @@ -239,206 +259,319 @@ static int rga2_buf_size_cal(uint32_t yrgb_addr, uint32_t uv_addr, uint32_t v_ad return pageCount; } -static int rga2_MapUserMemory(struct page **pages, - uint32_t *pageTable, - uint32_t Memory, - uint32_t pageCount) +static int rga2_MapUserMemory(struct page **pages, uint32_t *pageTable, + unsigned long Memory, uint32_t pageCount, + int writeFlag) +{ + struct vm_area_struct *vma; + int32_t result; + uint32_t i; + uint32_t status; + unsigned long Address; + unsigned long pfn; + void *virt; + spinlock_t * ptl; + pte_t * pte; + pgd_t * pgd; + pud_t * pud; + pmd_t * pmd; + + status = 0; + Address = 0; + down_read(¤t->mm->mmap_sem); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) + result = get_user_pages(current, current->mm, Memory << PAGE_SHIFT, + pageCount, writeFlag, 0, pages, NULL); +#else + result = get_user_pages_remote(current, current->mm, + Memory << PAGE_SHIFT, + pageCount, writeFlag, 0, pages, NULL); +#endif + if (result > 0 && result >= pageCount) { + /* Fill the page table. */ + for (i = 0; i < pageCount; i++) { + /* Get the physical address from page struct. */ + pageTable[i] = page_to_phys(pages[i]); + virt = phys_to_virt(pageTable[i]); + rga_dma_flush_range(virt, virt + 4 * 1024); + } + for (i = 0; i < result; i++) + put_page(pages[i]); + up_read(¤t->mm->mmap_sem); + return 0; + } + if (result > 0) { + for (i = 0; i < result; i++) + put_page(pages[i]); + } + for (i = 0; i < pageCount; i++) { + vma = find_vma(current->mm, (Memory + i) << PAGE_SHIFT); + if (!vma) { + status = RGA2_OUT_OF_RESOURCES; + break; + } + pgd = pgd_offset(current->mm, (Memory + i) << PAGE_SHIFT); + if (pgd_val(*pgd) == 0) { + status = RGA2_OUT_OF_RESOURCES; + break; + } + pud = pud_offset(pgd, (Memory + i) << PAGE_SHIFT); + if (!pud) { + status = RGA2_OUT_OF_RESOURCES; + break; + } + pmd = pmd_offset(pud, (Memory + i) << PAGE_SHIFT); + if (!pmd) { + status = RGA2_OUT_OF_RESOURCES; + break; + } + pte = pte_offset_map_lock(current->mm, pmd, + (Memory + i) << PAGE_SHIFT, + &ptl); + if (!pte) { + pte_unmap_unlock(pte, ptl); + status = RGA2_OUT_OF_RESOURCES; + break; + } + pfn = pte_pfn(*pte); + Address = ((pfn << PAGE_SHIFT) | (((unsigned long)((Memory + i) + << PAGE_SHIFT)) & ~PAGE_MASK)); + pte_unmap_unlock(pte, ptl); + pageTable[i] = (uint32_t)Address; + virt = phys_to_virt(pageTable[i]); + rga_dma_flush_range(virt, virt + 4 * 1024); + } + up_read(¤t->mm->mmap_sem); + return status; +} + +static int rga2_MapION(struct sg_table *sg, + uint32_t *Memory, + int32_t pageCount) { - int32_t result; uint32_t i; uint32_t status; - uint32_t Address; - //uint32_t temp; + unsigned long Address; + uint32_t mapped_size = 0; + uint32_t len; + struct scatterlist *sgl = sg->sgl; + uint32_t sg_num = 0; + uint32_t break_flag = 0; status = 0; Address = 0; + do { + len = sg_dma_len(sgl) >> PAGE_SHIFT; + Address = sg_phys(sgl); - do - { - down_read(¤t->mm->mmap_sem); - result = get_user_pages(current, - current->mm, - Memory << PAGE_SHIFT, - pageCount, - 1, - 0, - pages, - NULL - ); - up_read(¤t->mm->mmap_sem); - - #if 0 - if(result <= 0 || result < pageCount) - { - status = 0; - - for(i=0; imm, (Memory + i) << PAGE_SHIFT); - - if (vma)//&& (vma->vm_flags & VM_PFNMAP) ) - { - #if 1 - do - { - pte_t * pte; - spinlock_t * ptl; - unsigned long pfn; - pgd_t * pgd; - pud_t * pud; - - pgd = pgd_offset(current->mm, (Memory + i) << PAGE_SHIFT); - - if(pgd_val(*pgd) == 0) - { - //printk("rga pgd value is zero \n"); - break; - } - - pud = pud_offset(pgd, (Memory + i) << PAGE_SHIFT); - if (pud) - { - pmd_t * pmd = pmd_offset(pud, (Memory + i) << PAGE_SHIFT); - if (pmd) - { - pte = pte_offset_map_lock(current->mm, pmd, (Memory + i) << PAGE_SHIFT, &ptl); - if (!pte) - { - pte_unmap_unlock(pte, ptl); - break; - } - } - else - { - break; - } - } - else - { - break; - } - - pfn = pte_pfn(*pte); - Address = ((pfn << PAGE_SHIFT) | (((unsigned long)((Memory + i) << PAGE_SHIFT)) & ~PAGE_MASK)); - pte_unmap_unlock(pte, ptl); - } - while (0); - - #else - do - { - pte_t * pte; - spinlock_t * ptl; - unsigned long pfn; - pgd_t * pgd; - pud_t * pud; - pmd_t * pmd; - - pgd = pgd_offset(current->mm, (Memory + i) << PAGE_SHIFT); - pud = pud_offset(pgd, (Memory + i) << PAGE_SHIFT); - pmd = pmd_offset(pud, (Memory + i) << PAGE_SHIFT); - pte = pte_offset_map_lock(current->mm, pmd, (Memory + i) << PAGE_SHIFT, &ptl); - - pfn = pte_pfn(*pte); - Address = ((pfn << PAGE_SHIFT) | (((unsigned long)((Memory + i) << PAGE_SHIFT)) & ~PAGE_MASK)); - pte_unmap_unlock(pte, ptl); - } - while (0); - #endif - - pageTable[i] = Address; - } - else - { - status = RGA2_OUT_OF_RESOURCES; - break; - } + for(i=0; i= pageCount) { + break_flag = 1; + break; } - - return status; + Memory[mapped_size + i] = (uint32_t)(Address + (i << PAGE_SHIFT)); } - #endif - - /* Fill the page table. */ - for(i=0; inents)); - return status; + return 0; } -static int rga2_mmu_info_BitBlt_mode(struct rga2_reg *reg, struct rga2_req *req) + +static int rga2_mmu_info_BitBlt_mode(struct rga2_reg *reg, struct rga2_req *req) +{ + int Src0MemSize, DstMemSize, Src1MemSize; + unsigned long Src0Start, Src1Start, DstStart; + unsigned long Src0PageCount, Src1PageCount, DstPageCount; + uint32_t AllSize; + uint32_t *MMU_Base, *MMU_Base_phys; + int ret; + int status; + uint32_t uv_size, v_size; + struct page **pages = NULL; + MMU_Base = NULL; + Src0MemSize = 0; + Src1MemSize = 0; + DstMemSize = 0; + Src0PageCount = 0; + Src1PageCount = 0; + DstPageCount = 0; + + /* cal src0 buf mmu info */ + if (req->mmu_info.src0_mmu_flag & 1) { + Src0PageCount = rga2_buf_size_cal(req->src.yrgb_addr, + req->src.uv_addr, + req->src.v_addr, + req->src.format, + req->src.vir_w, + (req->src.vir_h), + &Src0Start); + if (Src0PageCount == 0) + return -EINVAL; + } + /* cal src1 buf mmu info */ + if (req->mmu_info.src1_mmu_flag & 1) { + Src1PageCount = rga2_buf_size_cal(req->src1.yrgb_addr, + req->src1.uv_addr, + req->src1.v_addr, + req->src1.format, + req->src1.vir_w, + (req->src1.vir_h), + &Src1Start); + Src1PageCount = (Src1PageCount + 3) & (~3); + if (Src1PageCount == 0) + return -EINVAL; + } + /* cal dst buf mmu info */ + if (req->mmu_info.dst_mmu_flag & 1) { + DstPageCount = rga2_buf_size_cal(req->dst.yrgb_addr, + req->dst.uv_addr, + req->dst.v_addr, + req->dst.format, + req->dst.vir_w, + req->dst.vir_h, + &DstStart); + if (DstPageCount == 0) + return -EINVAL; + } + /* Cal out the needed mem size */ + Src0MemSize = (Src0PageCount + 15) & (~15); + Src1MemSize = (Src1PageCount + 15) & (~15); + DstMemSize = (DstPageCount + 15) & (~15); + AllSize = Src0MemSize + Src1MemSize + DstMemSize; + + if (rga2_mmu_buf_get_try(&rga2_mmu_buf, AllSize)) { + pr_err("RGA2 Get MMU mem failed\n"); + status = RGA2_MALLOC_ERROR; + goto out; + } + pages = rga2_mmu_buf.pages; + mutex_lock(&rga2_service.lock); + MMU_Base = rga2_mmu_buf.buf_virtual + + (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1)); + MMU_Base_phys = rga2_mmu_buf.buf + + (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1)); + mutex_unlock(&rga2_service.lock); + if (Src0MemSize) { + if (req->sg_src0) + ret = rga2_MapION(req->sg_src0, + &MMU_Base[0], Src0MemSize); + else + ret = rga2_MapUserMemory(&pages[0], &MMU_Base[0], + Src0Start, Src0PageCount, 0); + + if (ret < 0) { + pr_err("rga2 map src0 memory failed\n"); + status = ret; + goto out; + } + /* change the buf address in req struct */ + req->mmu_info.src0_base_addr = (((unsigned long)MMU_Base_phys)); + uv_size = (req->src.uv_addr + - (Src0Start << PAGE_SHIFT)) >> PAGE_SHIFT; + v_size = (req->src.v_addr + - (Src0Start << PAGE_SHIFT)) >> PAGE_SHIFT; + + req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)); + req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | + (uv_size << PAGE_SHIFT); + req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | + (v_size << PAGE_SHIFT); + } + if (Src1MemSize) { + if (req->sg_src1) + ret = rga2_MapION(req->sg_src1, + MMU_Base + Src0MemSize, Src1MemSize); + else + ret = rga2_MapUserMemory(&pages[0], + MMU_Base + Src0MemSize, + Src1Start, Src1PageCount, 0); + if (ret < 0) { + pr_err("rga2 map src1 memory failed\n"); + status = ret; + goto out; + } + /* change the buf address in req struct */ + req->mmu_info.src1_base_addr = ((unsigned long)(MMU_Base_phys + + Src0MemSize)); + req->src1.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)); + } + if (DstMemSize) { + if (req->sg_dst) + ret = rga2_MapION(req->sg_dst, MMU_Base + Src0MemSize + + Src1MemSize, DstMemSize); + else + ret = rga2_MapUserMemory(&pages[0], MMU_Base + + Src0MemSize + Src1MemSize, + DstStart, DstPageCount, 1); + if (ret < 0) { + pr_err("rga2 map dst memory failed\n"); + status = ret; + goto out; + } + /* change the buf address in req struct */ + req->mmu_info.dst_base_addr = ((unsigned long)(MMU_Base_phys + + Src0MemSize + Src1MemSize)); + req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)); + uv_size = (req->dst.uv_addr + - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT; + v_size = (req->dst.v_addr + - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT; + req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | + ((uv_size) << PAGE_SHIFT); + req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) | + ((v_size) << PAGE_SHIFT); + + if (((req->alpha_rop_flag & 1) == 1) && (req->bitblt_mode == 0)) { + req->mmu_info.src1_base_addr = req->mmu_info.dst_base_addr; + req->mmu_info.src1_mmu_flag = req->mmu_info.dst_mmu_flag; + } + } + /* flush data to DDR */ + rga_dma_flush_range(MMU_Base, (MMU_Base + AllSize)); + rga2_mmu_buf_get(&rga2_mmu_buf, AllSize); + reg->MMU_len = AllSize; + status = 0; +out: + return status; +} + +static int rga2_mmu_info_color_palette_mode(struct rga2_reg *reg, struct rga2_req *req) { - int Src0MemSize, DstMemSize, Src1MemSize; - uint32_t Src0Start, Src1Start, DstStart; + int SrcMemSize, DstMemSize; + unsigned long SrcStart, DstStart; + struct page **pages = NULL; uint32_t AllSize; - uint32_t *MMU_Base, *MMU_Base_phys; - int ret; - int status; - uint32_t uv_size, v_size; + uint32_t *MMU_Base = NULL, *MMU_Base_phys; + int ret, status; + uint32_t stride; - struct page **pages = NULL; + uint8_t shift; + uint16_t sw, byte_num; - MMU_Base = NULL; + shift = 3 - (req->palette_mode & 3); + sw = req->src.vir_w*req->src.vir_h; + byte_num = sw >> shift; + stride = (byte_num + 3) & (~3); - Src0MemSize = 0; - Src1MemSize = 0; - DstMemSize = 0; + SrcStart = 0; + DstStart = 0; + SrcMemSize = 0; + DstMemSize = 0; - do - { - /* cal src0 buf mmu info */ - if(req->mmu_info.src0_mmu_flag & 1) { - Src0MemSize = rga2_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr, - req->src.format, req->src.vir_w, - (req->src.vir_h), - &Src0Start); - if (Src0MemSize == 0) { + do { + if (req->mmu_info.src0_mmu_flag) { + SrcMemSize = rga2_mem_size_cal(req->src.yrgb_addr, stride, &SrcStart); + if(SrcMemSize == 0) { return -EINVAL; } } - /* cal src1 buf mmu info */ - if(req->mmu_info.src1_mmu_flag & 1) { - Src1MemSize = rga2_buf_size_cal(req->src1.yrgb_addr, req->src1.uv_addr, req->src1.v_addr, - req->src1.format, req->src1.vir_w, - (req->src1.vir_h), - &Src1Start); - Src0MemSize = (Src0MemSize + 3) & (~3); - if (Src1MemSize == 0) { - return -EINVAL; - } - } - - - /* cal dst buf mmu info */ - if(req->mmu_info.dst_mmu_flag & 1) { + if (req->mmu_info.dst_mmu_flag) { DstMemSize = rga2_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr, req->dst.format, req->dst.vir_w, req->dst.vir_h, &DstStart); @@ -447,15 +580,10 @@ static int rga2_mmu_info_BitBlt_mode(struct rga2_reg *reg, struct rga2_req *req) } } - /* Cal out the needed mem size */ - AllSize = ((Src0MemSize+3)&(~3)) + ((Src1MemSize+3)&(~3)) + ((DstMemSize+3)&(~3)); + SrcMemSize = (SrcMemSize + 15) & (~15); + DstMemSize = (DstMemSize + 15) & (~15); - pages = kzalloc((AllSize)* sizeof(struct page *), GFP_KERNEL); - if(pages == NULL) { - pr_err("RGA MMU malloc pages mem failed\n"); - status = RGA2_MALLOC_ERROR; - break; - } + AllSize = SrcMemSize + DstMemSize; if (rga2_mmu_buf_get_try(&rga2_mmu_buf, AllSize)) { pr_err("RGA2 Get MMU mem failed\n"); @@ -463,13 +591,20 @@ static int rga2_mmu_info_BitBlt_mode(struct rga2_reg *reg, struct rga2_req *req) break; } + pages = rga2_mmu_buf.pages; + if(pages == NULL) { + pr_err("RGA MMU malloc pages mem failed\n"); + return -EINVAL; + } + mutex_lock(&rga2_service.lock); MMU_Base = rga2_mmu_buf.buf_virtual + (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1)); MMU_Base_phys = rga2_mmu_buf.buf + (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1)); mutex_unlock(&rga2_service.lock); - if(Src0MemSize) { - ret = rga2_MapUserMemory(&pages[0], &MMU_Base[0], Src0Start, Src0MemSize); + if(SrcMemSize) { + ret = rga2_MapUserMemory(&pages[0], &MMU_Base[0], + SrcStart, SrcMemSize, 0); if (ret < 0) { pr_err("rga2 map src0 memory failed\n"); status = ret; @@ -477,34 +612,13 @@ static int rga2_mmu_info_BitBlt_mode(struct rga2_reg *reg, struct rga2_req *req) } /* change the buf address in req struct */ - req->mmu_info.src0_base_addr = (((uint32_t)MMU_Base_phys)); - uv_size = (req->src.uv_addr - (Src0Start << PAGE_SHIFT)) >> PAGE_SHIFT; - v_size = (req->src.v_addr - (Src0Start << PAGE_SHIFT)) >> PAGE_SHIFT; - + req->mmu_info.src0_base_addr = (((unsigned long)MMU_Base_phys)); req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)); - req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT); - req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT); } - Src0MemSize = (Src0MemSize + 3) & (~3); - - if(Src1MemSize) { - ret = rga2_MapUserMemory(&pages[0], MMU_Base + Src0MemSize, Src1Start, Src1MemSize); - if (ret < 0) { - pr_err("rga2 map src1 memory failed\n"); - status = ret; - break; - } - - /* change the buf address in req struct */ - req->mmu_info.src1_base_addr = ((uint32_t)(MMU_Base_phys + Src0MemSize)); - req->src1.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (Src1MemSize << PAGE_SHIFT); - } - - Src1MemSize = (Src1MemSize + 3) & (~3); - if(DstMemSize) { - ret = rga2_MapUserMemory(&pages[0], MMU_Base + Src0MemSize + Src1MemSize, DstStart, DstMemSize); + ret = rga2_MapUserMemory(&pages[0], MMU_Base + SrcMemSize, + DstStart, DstMemSize, 1); if (ret < 0) { pr_err("rga2 map dst memory failed\n"); status = ret; @@ -512,202 +626,36 @@ static int rga2_mmu_info_BitBlt_mode(struct rga2_reg *reg, struct rga2_req *req) } /* change the buf address in req struct */ - req->mmu_info.dst_base_addr = ((uint32_t)(MMU_Base_phys + Src0MemSize + Src1MemSize)); - req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((Src0MemSize + Src1MemSize) << PAGE_SHIFT); - uv_size = (req->dst.uv_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT; - v_size = (req->dst.v_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT; - req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((Src0MemSize + Src1MemSize + uv_size) << PAGE_SHIFT); - req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) | ((Src0MemSize + Src1MemSize + v_size) << PAGE_SHIFT); + req->mmu_info.dst_base_addr = ((unsigned long)(MMU_Base_phys + SrcMemSize)); + req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)); } /* flush data to DDR */ - dmac_flush_range(MMU_Base, (MMU_Base + AllSize)); - outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize)); - + rga_dma_flush_range(MMU_Base, (MMU_Base + AllSize)); rga2_mmu_buf_get(&rga2_mmu_buf, AllSize); reg->MMU_len = AllSize; - status = 0; - - /* Free the page table */ - if (pages != NULL) { - kfree(pages); - } - - return status; - } - while(0); - - - /* Free the page table */ - if (pages != NULL) { - kfree(pages); - } - - /* Free MMU table */ - if(MMU_Base != NULL) { - kfree(MMU_Base); - } - - return status; -} - -static int rga2_mmu_info_color_palette_mode(struct rga2_reg *reg, struct rga2_req *req) -{ - int SrcMemSize, DstMemSize, CMDMemSize; - uint32_t SrcStart, DstStart, CMDStart; - struct page **pages = NULL; - uint32_t i; - uint32_t AllSize; - uint32_t *MMU_Base = NULL; - uint32_t *MMU_p; - int ret, status; - uint32_t stride; - - uint8_t shift; - uint16_t sw, byte_num; - - shift = 3 - (req->palette_mode & 3); - sw = req->src.vir_w; - byte_num = sw >> shift; - stride = (byte_num + 3) & (~3); - - do - { - - SrcMemSize = rga2_mem_size_cal(req->src.yrgb_addr, stride, &SrcStart); - if(SrcMemSize == 0) { - return -EINVAL; - } - - DstMemSize = rga2_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr, - req->dst.format, req->dst.vir_w, req->dst.vir_h, - &DstStart); - if(DstMemSize == 0) { - return -EINVAL; - } - - CMDMemSize = rga2_mem_size_cal((uint32_t)rga2_service.cmd_buff, RGA2_CMD_BUF_SIZE, &CMDStart); - if(CMDMemSize == 0) { - return -EINVAL; - } - - AllSize = SrcMemSize + DstMemSize + CMDMemSize; - - pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL); - if(pages == NULL) { - pr_err("RGA MMU malloc pages mem failed\n"); - return -EINVAL; - } - - MMU_Base = kzalloc(AllSize * sizeof(uint32_t), GFP_KERNEL); - if(MMU_Base == NULL) { - pr_err("RGA MMU malloc MMU_Base point failed\n"); - break; - } - - /* map CMD addr */ - for(i=0; isrc.yrgb_addr < KERNEL_SPACE_VALID) - { - ret = rga2_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize); - if (ret < 0) - { - pr_err("rga map src memory failed\n"); - status = ret; - break; - } - } - else - { - MMU_p = MMU_Base + CMDMemSize; - - for(i=0; isrc.yrgb_addr < KERNEL_SPACE_VALID) - { - ret = rga2_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize); - if (ret < 0) - { - pr_err("rga map dst memory failed\n"); - status = ret; - break; - } - } - else - { - MMU_p = MMU_Base + CMDMemSize + SrcMemSize; - - for(i=0; immu_info.src0_base_addr = (virt_to_phys(MMU_Base)>>2); - req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); - req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT); - - - /*record the malloc buf for the cmd end to release*/ - reg->MMU_base = MMU_Base; - - /* flush data to DDR */ - dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1)); - outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1)); - - /* Free the page table */ - if (pages != NULL) { - kfree(pages); - } - - return status; - + return 0; } while(0); - /* Free the page table */ - if (pages != NULL) { - kfree(pages); - } - - /* Free mmu table */ - if (MMU_Base != NULL) { - kfree(MMU_Base); - } - return 0; } static int rga2_mmu_info_color_fill_mode(struct rga2_reg *reg, struct rga2_req *req) { int DstMemSize; - uint32_t DstStart; + unsigned long DstStart; struct page **pages = NULL; uint32_t AllSize; uint32_t *MMU_Base, *MMU_Base_phys; int ret; int status; + DstMemSize = 0; MMU_Base = NULL; - do - { + do { if(req->mmu_info.dst_mmu_flag & 1) { DstMemSize = rga2_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr, req->dst.format, req->dst.vir_w, req->dst.vir_h, @@ -717,14 +665,9 @@ static int rga2_mmu_info_color_fill_mode(struct rga2_reg *reg, struct rga2_req * } } - AllSize = (DstMemSize + 3) & (~3); + AllSize = (DstMemSize + 15) & (~15); - pages = kzalloc((AllSize)* sizeof(struct page *), GFP_KERNEL); - if(pages == NULL) { - pr_err("RGA2 MMU malloc pages mem failed\n"); - status = RGA2_MALLOC_ERROR; - break; - } + pages = rga2_mmu_buf.pages; if(rga2_mmu_buf_get_try(&rga2_mmu_buf, AllSize)) { pr_err("RGA2 Get MMU mem failed\n"); @@ -737,9 +680,14 @@ static int rga2_mmu_info_color_fill_mode(struct rga2_reg *reg, struct rga2_req * MMU_Base = rga2_mmu_buf.buf_virtual + (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1)); mutex_unlock(&rga2_service.lock); - if (DstMemSize) - { - ret = rga2_MapUserMemory(&pages[0], &MMU_Base[0], DstStart, DstMemSize); + if (DstMemSize) { + if (req->sg_dst) { + ret = rga2_MapION(req->sg_dst, &MMU_Base[0], DstMemSize); + } + else { + ret = rga2_MapUserMemory(&pages[0], &MMU_Base[0], + DstStart, DstMemSize, 1); + } if (ret < 0) { pr_err("rga2 map dst memory failed\n"); status = ret; @@ -747,135 +695,86 @@ static int rga2_mmu_info_color_fill_mode(struct rga2_reg *reg, struct rga2_req * } /* change the buf address in req struct */ - req->mmu_info.src0_base_addr = (((uint32_t)MMU_Base_phys)>>4); + req->mmu_info.dst_base_addr = ((unsigned long)MMU_Base_phys); req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)); } /* flush data to DDR */ - dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1)); - outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1)); - - rga2_mmu_buf_get_try(&rga2_mmu_buf, AllSize); - - /* Free the page table */ - if (pages != NULL) - kfree(pages); + rga_dma_flush_range(MMU_Base, (MMU_Base + AllSize + 1)); + rga2_mmu_buf_get(&rga2_mmu_buf, AllSize); return 0; } while(0); - if (pages != NULL) - kfree(pages); - - if (MMU_Base != NULL) - kfree(MMU_Base); - return status; } static int rga2_mmu_info_update_palette_table_mode(struct rga2_reg *reg, struct rga2_req *req) { - int SrcMemSize, CMDMemSize; - uint32_t SrcStart, CMDStart; + int SrcMemSize; + unsigned long SrcStart; struct page **pages = NULL; - uint32_t i; uint32_t AllSize; - uint32_t *MMU_Base, *MMU_p; + uint32_t *MMU_Base, *MMU_Base_phys; int ret, status; MMU_Base = NULL; - do - { + do { /* cal src buf mmu info */ - SrcMemSize = rga2_mem_size_cal(req->src.yrgb_addr, req->src.vir_w * req->src.vir_h, &SrcStart); + SrcMemSize = rga2_mem_size_cal(req->pat.yrgb_addr, req->pat.vir_w * req->pat.vir_h, &SrcStart); if(SrcMemSize == 0) { return -EINVAL; } - /* cal cmd buf mmu info */ - CMDMemSize = rga2_mem_size_cal((uint32_t)rga2_service.cmd_buff, RGA2_CMD_BUF_SIZE, &CMDStart); - if(CMDMemSize == 0) { - return -EINVAL; - } + SrcMemSize = (SrcMemSize + 15) & (~15); + AllSize = SrcMemSize; - AllSize = SrcMemSize + CMDMemSize; - - pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL); - if(pages == NULL) { - pr_err("RGA MMU malloc pages mem failed\n"); + if (rga2_mmu_buf_get_try(&rga2_mmu_buf, AllSize)) { + pr_err("RGA2 Get MMU mem failed\n"); status = RGA2_MALLOC_ERROR; break; } - MMU_Base = kzalloc((AllSize + 1)* sizeof(uint32_t), GFP_KERNEL); - if(pages == NULL) { - pr_err("RGA MMU malloc MMU_Base point failed\n"); - status = RGA2_MALLOC_ERROR; - break; - } + mutex_lock(&rga2_service.lock); + MMU_Base = rga2_mmu_buf.buf_virtual + (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1)); + MMU_Base_phys = rga2_mmu_buf.buf + (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1)); + mutex_unlock(&rga2_service.lock); - for(i=0; isrc.yrgb_addr < KERNEL_SPACE_VALID) - { - ret = rga2_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize); + if(SrcMemSize) { + ret = rga2_MapUserMemory(&pages[0], &MMU_Base[0], + SrcStart, SrcMemSize, 0); if (ret < 0) { - pr_err("rga map src memory failed\n"); - return -EINVAL; + pr_err("rga2 map palette memory failed\n"); + status = ret; + break; } - } - else - { - MMU_p = MMU_Base + CMDMemSize; - for(i=0; immu_info.src0_base_addr = (((unsigned long)MMU_Base_phys)); + req->pat.yrgb_addr = (req->pat.yrgb_addr & (~PAGE_MASK)); } - /* zsq - * change the buf address in req struct - * for the reason of lie to MMU - */ - req->mmu_info.src0_base_addr = (virt_to_phys(MMU_Base) >> 2); - - req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); - - /*record the malloc buf for the cmd end to release*/ - reg->MMU_base = MMU_Base; - /* flush data to DDR */ - dmac_flush_range(MMU_Base, (MMU_Base + AllSize)); - outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize)); - - if (pages != NULL) { - /* Free the page table */ - kfree(pages); - } + rga_dma_flush_range(MMU_Base, (MMU_Base + AllSize)); + rga2_mmu_buf_get(&rga2_mmu_buf, AllSize); + reg->MMU_len = AllSize; return 0; } while(0); - if (pages != NULL) - kfree(pages); - - if (MMU_Base != NULL) - kfree(MMU_Base); - return status; } static int rga2_mmu_info_update_patten_buff_mode(struct rga2_reg *reg, struct rga2_req *req) { int SrcMemSize, CMDMemSize; - uint32_t SrcStart, CMDStart; + unsigned long SrcStart, CMDStart; struct page **pages = NULL; uint32_t i; uint32_t AllSize; @@ -884,9 +783,7 @@ static int rga2_mmu_info_update_patten_buff_mode(struct rga2_reg *reg, struct rg MMU_Base = MMU_p = 0; - do - { - + do { /* cal src buf mmu info */ SrcMemSize = rga2_mem_size_cal(req->pat.yrgb_addr, req->pat.act_w * req->pat.act_h * 4, &SrcStart); if(SrcMemSize == 0) { @@ -894,26 +791,16 @@ static int rga2_mmu_info_update_patten_buff_mode(struct rga2_reg *reg, struct rg } /* cal cmd buf mmu info */ - CMDMemSize = rga2_mem_size_cal((uint32_t)rga2_service.cmd_buff, RGA2_CMD_BUF_SIZE, &CMDStart); + CMDMemSize = rga2_mem_size_cal((unsigned long)rga2_service.cmd_buff, RGA2_CMD_BUF_SIZE, &CMDStart); if(CMDMemSize == 0) { return -EINVAL; } AllSize = SrcMemSize + CMDMemSize; - pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL); - if(pages == NULL) { - pr_err("RGA MMU malloc pages mem failed\n"); - status = RGA2_MALLOC_ERROR; - break; - } + pages = rga2_mmu_buf.pages; MMU_Base = kzalloc(AllSize * sizeof(uint32_t), GFP_KERNEL); - if(pages == NULL) { - pr_err("RGA MMU malloc MMU_Base point failed\n"); - status = RGA2_MALLOC_ERROR; - break; - } for(i=0; isrc.yrgb_addr < KERNEL_SPACE_VALID) { - ret = rga2_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize); + ret = rga2_MapUserMemory(&pages[CMDMemSize], + &MMU_Base[CMDMemSize], + SrcStart, SrcMemSize, 1); if (ret < 0) { pr_err("rga map src memory failed\n"); status = ret; @@ -950,25 +839,12 @@ static int rga2_mmu_info_update_patten_buff_mode(struct rga2_reg *reg, struct rg reg->MMU_base = MMU_Base; /* flush data to DDR */ - dmac_flush_range(MMU_Base, (MMU_Base + AllSize)); - outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize)); - - if (pages != NULL) { - /* Free the page table */ - kfree(pages); - } - + rga_dma_flush_range(MMU_Base, (MMU_Base + AllSize)); return 0; } while(0); - if (pages != NULL) - kfree(pages); - - if (MMU_Base != NULL) - kfree(MMU_Base); - return status; }