3 #include <linux/version.h>
\r
4 #include <linux/init.h>
\r
5 #include <linux/module.h>
\r
6 #include <linux/fs.h>
\r
7 #include <linux/sched.h>
\r
8 #include <linux/signal.h>
\r
9 #include <linux/pagemap.h>
\r
10 #include <linux/seq_file.h>
\r
11 #include <linux/mm.h>
\r
12 #include <linux/mman.h>
\r
13 #include <linux/sched.h>
\r
14 #include <linux/slab.h>
\r
15 #include <linux/memory.h>
\r
16 #include <linux/dma-mapping.h>
\r
17 #include <linux/scatterlist.h>
\r
18 #include <asm/memory.h>
\r
19 #include <asm/atomic.h>
\r
20 #include <asm/cacheflush.h>
\r
21 #include "rga2_mmu_info.h"
\r
23 extern struct rga2_service_info rga2_service;
\r
24 extern struct rga2_mmu_buf_t rga2_mmu_buf;
\r
26 //extern int mmu_buff_temp[1024];
\r
28 #define KERNEL_SPACE_VALID 0xc0000000
\r
30 #define V7_VATOPA_SUCESS_MASK (0x1)
\r
31 #define V7_VATOPA_GET_PADDR(X) (X & 0xFFFFF000)
\r
32 #define V7_VATOPA_GET_INER(X) ((X>>4) & 7)
\r
33 #define V7_VATOPA_GET_OUTER(X) ((X>>2) & 3)
\r
34 #define V7_VATOPA_GET_SH(X) ((X>>7) & 1)
\r
35 #define V7_VATOPA_GET_NS(X) ((X>>9) & 1)
\r
36 #define V7_VATOPA_GET_SS(X) ((X>>1) & 1)
\r
39 static unsigned int armv7_va_to_pa(unsigned int v_addr)
\r
41 unsigned int p_addr;
\r
42 __asm__ volatile ( "mcr p15, 0, %1, c7, c8, 0\n"
\r
45 "mrc p15, 0, %0, c7, c4, 0\n"
\r
50 if (p_addr & V7_VATOPA_SUCESS_MASK)
\r
53 return (V7_VATOPA_GET_SS(p_addr) ? 0xFFFFFFFF : V7_VATOPA_GET_PADDR(p_addr));
\r
57 static int rga2_mmu_buf_get(struct rga2_mmu_buf_t *t, uint32_t size)
\r
59 mutex_lock(&rga2_service.lock);
\r
61 mutex_unlock(&rga2_service.lock);
\r
66 static int rga2_mmu_buf_get_try(struct rga2_mmu_buf_t *t, uint32_t size)
\r
68 mutex_lock(&rga2_service.lock);
\r
69 if((t->back - t->front) > t->size) {
\r
70 if(t->front + size > t->back - t->size) {
\r
71 pr_info("front %d, back %d dsize %d size %d", t->front, t->back, t->size, size);
\r
76 if((t->front + size) > t->back) {
\r
77 pr_info("front %d, back %d dsize %d size %d", t->front, t->back, t->size, size);
\r
81 if(t->front + size > t->size) {
\r
82 if (size > (t->back - t->size)) {
\r
83 pr_info("front %d, back %d dsize %d size %d", t->front, t->back, t->size, size);
\r
89 mutex_unlock(&rga2_service.lock);
\r
94 static int rga2_mem_size_cal(unsigned long Mem, uint32_t MemSize, unsigned long *StartAddr)
\r
96 unsigned long start, end;
\r
99 end = (Mem + (MemSize + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
100 start = Mem >> PAGE_SHIFT;
\r
101 pageCount = end - start;
\r
102 *StartAddr = start;
\r
106 static int rga2_buf_size_cal(unsigned long yrgb_addr, unsigned long uv_addr, unsigned long v_addr,
\r
107 int format, uint32_t w, uint32_t h, unsigned long *StartAddr )
\r
109 uint32_t size_yrgb = 0;
\r
110 uint32_t size_uv = 0;
\r
111 uint32_t size_v = 0;
\r
112 uint32_t stride = 0;
\r
113 unsigned long start, end;
\r
114 uint32_t pageCount;
\r
118 case RGA2_FORMAT_RGBA_8888 :
\r
119 stride = (w * 4 + 3) & (~3);
\r
120 size_yrgb = stride*h;
\r
121 start = yrgb_addr >> PAGE_SHIFT;
\r
122 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
124 case RGA2_FORMAT_RGBX_8888 :
\r
125 stride = (w * 4 + 3) & (~3);
\r
126 size_yrgb = stride*h;
\r
127 start = yrgb_addr >> PAGE_SHIFT;
\r
128 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
130 case RGA2_FORMAT_RGB_888 :
\r
131 stride = (w * 3 + 3) & (~3);
\r
132 size_yrgb = stride*h;
\r
133 start = yrgb_addr >> PAGE_SHIFT;
\r
134 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
136 case RGA2_FORMAT_BGRA_8888 :
\r
138 start = yrgb_addr >> PAGE_SHIFT;
\r
139 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
141 case RGA2_FORMAT_RGB_565 :
\r
142 stride = (w*2 + 3) & (~3);
\r
143 size_yrgb = stride * h;
\r
144 start = yrgb_addr >> PAGE_SHIFT;
\r
145 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
147 case RGA2_FORMAT_RGBA_5551 :
\r
148 stride = (w*2 + 3) & (~3);
\r
149 size_yrgb = stride * h;
\r
150 start = yrgb_addr >> PAGE_SHIFT;
\r
151 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
153 case RGA2_FORMAT_RGBA_4444 :
\r
154 stride = (w*2 + 3) & (~3);
\r
155 size_yrgb = stride * h;
\r
156 start = yrgb_addr >> PAGE_SHIFT;
\r
157 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
159 case RGA2_FORMAT_BGR_888 :
\r
160 stride = (w*3 + 3) & (~3);
\r
161 size_yrgb = stride * h;
\r
162 start = yrgb_addr >> PAGE_SHIFT;
\r
163 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
167 case RGA2_FORMAT_YCbCr_422_SP :
\r
168 case RGA2_FORMAT_YCrCb_422_SP :
\r
169 stride = (w + 3) & (~3);
\r
170 size_yrgb = stride * h;
\r
171 size_uv = stride * h;
\r
172 start = MIN(yrgb_addr, uv_addr);
\r
173 start >>= PAGE_SHIFT;
\r
174 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
175 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
176 pageCount = end - start;
\r
178 case RGA2_FORMAT_YCbCr_422_P :
\r
179 case RGA2_FORMAT_YCrCb_422_P :
\r
180 stride = (w + 3) & (~3);
\r
181 size_yrgb = stride * h;
\r
182 size_uv = ((stride >> 1) * h);
\r
183 size_v = ((stride >> 1) * h);
\r
184 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
185 start = start >> PAGE_SHIFT;
\r
186 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
187 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
188 pageCount = end - start;
\r
190 case RGA2_FORMAT_YCbCr_420_SP :
\r
191 case RGA2_FORMAT_YCrCb_420_SP :
\r
192 stride = (w + 3) & (~3);
\r
193 size_yrgb = stride * h;
\r
194 size_uv = (stride * (h >> 1));
\r
195 start = MIN(yrgb_addr, uv_addr);
\r
196 start >>= PAGE_SHIFT;
\r
197 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
198 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
199 pageCount = end - start;
\r
200 //printk("yrgb_addr = %.8x\n", yrgb_addr);
\r
201 //printk("uv_addr = %.8x\n", uv_addr);
\r
203 case RGA2_FORMAT_YCbCr_420_P :
\r
204 case RGA2_FORMAT_YCrCb_420_P :
\r
205 stride = (w + 3) & (~3);
\r
206 size_yrgb = stride * h;
\r
207 size_uv = ((stride >> 1) * (h >> 1));
\r
208 size_v = ((stride >> 1) * (h >> 1));
\r
209 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
210 start >>= PAGE_SHIFT;
\r
211 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
212 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
213 pageCount = end - start;
\r
216 case RK_FORMAT_BPP1 :
\r
218 case RK_FORMAT_BPP2 :
\r
220 case RK_FORMAT_BPP4 :
\r
222 case RK_FORMAT_BPP8 :
\r
225 case RGA2_FORMAT_YCbCr_420_SP_10B:
\r
226 case RGA2_FORMAT_YCrCb_420_SP_10B:
\r
227 stride = (w + 3) & (~3);
\r
229 size_yrgb = stride * h;
\r
230 size_uv = (stride * (h >> 1));
\r
231 start = MIN(yrgb_addr, uv_addr);
\r
232 start >>= PAGE_SHIFT;
\r
233 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
234 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
235 pageCount = end - start;
\r
243 *StartAddr = start;
\r
247 static int rga2_MapUserMemory(struct page **pages,
\r
248 uint32_t *pageTable,
\r
249 unsigned long Memory,
\r
250 uint32_t pageCount)
\r
255 unsigned long Address;
\r
262 down_read(¤t->mm->mmap_sem);
\r
263 result = get_user_pages(current,
\r
265 Memory << PAGE_SHIFT,
\r
272 up_read(¤t->mm->mmap_sem);
\r
274 if(result <= 0 || result < pageCount)
\r
276 struct vm_area_struct *vma;
\r
279 down_read(¤t->mm->mmap_sem);
\r
280 for (i = 0; i < result; i++)
\r
281 put_page(pages[i]);
\r
282 up_read(¤t->mm->mmap_sem);
\r
285 for(i=0; i<pageCount; i++)
\r
287 vma = find_vma(current->mm, (Memory + i) << PAGE_SHIFT);
\r
289 if (vma)//&& (vma->vm_flags & VM_PFNMAP) )
\r
299 pgd = pgd_offset(current->mm, (Memory + i) << PAGE_SHIFT);
\r
301 if(pgd_val(*pgd) == 0)
\r
303 //printk("rga pgd value is zero \n");
\r
307 pud = pud_offset(pgd, (Memory + i) << PAGE_SHIFT);
\r
310 pmd_t * pmd = pmd_offset(pud, (Memory + i) << PAGE_SHIFT);
\r
313 pte = pte_offset_map_lock(current->mm, pmd, (Memory + i) << PAGE_SHIFT, &ptl);
\r
316 pte_unmap_unlock(pte, ptl);
\r
330 pfn = pte_pfn(*pte);
\r
331 Address = ((pfn << PAGE_SHIFT) | (((unsigned long)((Memory + i) << PAGE_SHIFT)) & ~PAGE_MASK));
\r
332 pte_unmap_unlock(pte, ptl);
\r
336 pageTable[i] = (uint32_t)Address;
\r
340 status = RGA2_OUT_OF_RESOURCES;
\r
348 /* Fill the page table. */
\r
349 for(i=0; i<pageCount; i++)
\r
351 /* Get the physical address from page struct. */
\r
352 pageTable[i] = page_to_phys(pages[i]);
\r
355 down_read(¤t->mm->mmap_sem);
\r
356 for (i = 0; i < result; i++)
\r
357 put_page(pages[i]);
\r
358 up_read(¤t->mm->mmap_sem);
\r
367 static int rga2_MapION(struct sg_table *sg,
\r
373 unsigned long Address;
\r
374 uint32_t mapped_size = 0;
\r
376 struct scatterlist *sgl = sg->sgl;
\r
377 uint32_t sg_num = 0;
\r
378 uint32_t break_flag = 0;
\r
383 len = sg_dma_len(sgl) >> PAGE_SHIFT;
\r
384 Address = sg_phys(sgl);
\r
386 for(i=0; i<len; i++) {
\r
387 if (mapped_size + i >= pageCount) {
\r
391 Memory[mapped_size + i] = (uint32_t)(Address + (i << PAGE_SHIFT));
\r
395 mapped_size += len;
\r
398 while((sgl = sg_next(sgl)) && (mapped_size < pageCount) && (sg_num < sg->nents));
\r
404 static int rga2_mmu_info_BitBlt_mode(struct rga2_reg *reg, struct rga2_req *req)
\r
406 int Src0MemSize, DstMemSize, Src1MemSize;
\r
407 unsigned long Src0Start, Src1Start, DstStart;
\r
409 uint32_t *MMU_Base, *MMU_Base_phys;
\r
412 uint32_t uv_size, v_size;
\r
414 struct page **pages = NULL;
\r
423 /* cal src0 buf mmu info */
\r
424 if(req->mmu_info.src0_mmu_flag & 1) {
\r
425 Src0MemSize = rga2_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,
\r
426 req->src.format, req->src.vir_w,
\r
429 if (Src0MemSize == 0) {
\r
434 /* cal src1 buf mmu info */
\r
435 if(req->mmu_info.src1_mmu_flag & 1) {
\r
436 Src1MemSize = rga2_buf_size_cal(req->src1.yrgb_addr, req->src1.uv_addr, req->src1.v_addr,
\r
437 req->src1.format, req->src1.vir_w,
\r
440 Src0MemSize = (Src0MemSize + 3) & (~3);
\r
441 if (Src1MemSize == 0) {
\r
447 /* cal dst buf mmu info */
\r
448 if(req->mmu_info.dst_mmu_flag & 1) {
\r
449 DstMemSize = rga2_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
450 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
452 if(DstMemSize == 0) {
\r
457 /* Cal out the needed mem size */
\r
458 Src0MemSize = (Src0MemSize+15)&(~15);
\r
459 Src1MemSize = (Src1MemSize+15)&(~15);
\r
460 DstMemSize = (DstMemSize+15)&(~15);
\r
461 AllSize = Src0MemSize + Src1MemSize + DstMemSize;
\r
463 if (rga2_mmu_buf_get_try(&rga2_mmu_buf, AllSize)) {
\r
464 pr_err("RGA2 Get MMU mem failed\n");
\r
465 status = RGA2_MALLOC_ERROR;
\r
469 pages = rga2_mmu_buf.pages;
\r
471 mutex_lock(&rga2_service.lock);
\r
472 MMU_Base = rga2_mmu_buf.buf_virtual + (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1));
\r
473 MMU_Base_phys = rga2_mmu_buf.buf + (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1));
\r
474 mutex_unlock(&rga2_service.lock);
\r
476 if (req->sg_src0) {
\r
477 ret = rga2_MapION(req->sg_src0, &MMU_Base[0], Src0MemSize);
\r
480 ret = rga2_MapUserMemory(&pages[0], &MMU_Base[0], Src0Start, Src0MemSize);
\r
484 pr_err("rga2 map src0 memory failed\n");
\r
489 /* change the buf address in req struct */
\r
490 req->mmu_info.src0_base_addr = (((unsigned long)MMU_Base_phys));
\r
491 uv_size = (req->src.uv_addr - (Src0Start << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
492 v_size = (req->src.v_addr - (Src0Start << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
494 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));
\r
495 req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT);
\r
496 req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT);
\r
500 if (req->sg_src1) {
\r
501 ret = rga2_MapION(req->sg_src1, MMU_Base + Src0MemSize, Src1MemSize);
\r
504 ret = rga2_MapUserMemory(&pages[0], MMU_Base + Src0MemSize, Src1Start, Src1MemSize);
\r
508 pr_err("rga2 map src1 memory failed\n");
\r
513 /* change the buf address in req struct */
\r
514 req->mmu_info.src1_base_addr = ((unsigned long)(MMU_Base_phys + Src0MemSize));
\r
515 req->src1.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));
\r
520 ret = rga2_MapION(req->sg_dst, MMU_Base + Src0MemSize + Src1MemSize, DstMemSize);
\r
523 ret = rga2_MapUserMemory(&pages[0], MMU_Base + Src0MemSize + Src1MemSize, DstStart, DstMemSize);
\r
526 pr_err("rga2 map dst memory failed\n");
\r
531 /* change the buf address in req struct */
\r
532 req->mmu_info.dst_base_addr = ((unsigned long)(MMU_Base_phys + Src0MemSize + Src1MemSize));
\r
533 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));
\r
534 uv_size = (req->dst.uv_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
535 v_size = (req->dst.v_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
536 req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((uv_size) << PAGE_SHIFT);
\r
537 req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) | ((v_size) << PAGE_SHIFT);
\r
540 /* flush data to DDR */
\r
542 dmac_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
543 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));
\r
544 #elif defined(CONFIG_ARM64)
\r
545 __dma_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
548 rga2_mmu_buf_get(&rga2_mmu_buf, AllSize);
\r
549 reg->MMU_len = AllSize;
\r
560 static int rga2_mmu_info_color_palette_mode(struct rga2_reg *reg, struct rga2_req *req)
\r
562 int SrcMemSize, DstMemSize;
\r
563 unsigned long SrcStart, DstStart;
\r
564 struct page **pages = NULL;
\r
566 uint32_t *MMU_Base = NULL, *MMU_Base_phys;
\r
571 uint16_t sw, byte_num;
\r
573 shift = 3 - (req->palette_mode & 3);
\r
574 sw = req->src.vir_w*req->src.vir_h;
\r
575 byte_num = sw >> shift;
\r
576 stride = (byte_num + 3) & (~3);
\r
584 if (req->mmu_info.src0_mmu_flag) {
\r
585 SrcMemSize = rga2_mem_size_cal(req->src.yrgb_addr, stride, &SrcStart);
\r
586 if(SrcMemSize == 0) {
\r
591 if (req->mmu_info.dst_mmu_flag) {
\r
592 DstMemSize = rga2_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
593 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
595 if(DstMemSize == 0) {
\r
600 SrcMemSize = (SrcMemSize + 15) & (~15);
\r
601 DstMemSize = (DstMemSize + 15) & (~15);
\r
603 AllSize = SrcMemSize + DstMemSize;
\r
605 if (rga2_mmu_buf_get_try(&rga2_mmu_buf, AllSize)) {
\r
606 pr_err("RGA2 Get MMU mem failed\n");
\r
607 status = RGA2_MALLOC_ERROR;
\r
611 pages = rga2_mmu_buf.pages;
\r
612 if(pages == NULL) {
\r
613 pr_err("RGA MMU malloc pages mem failed\n");
\r
617 mutex_lock(&rga2_service.lock);
\r
618 MMU_Base = rga2_mmu_buf.buf_virtual + (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1));
\r
619 MMU_Base_phys = rga2_mmu_buf.buf + (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1));
\r
620 mutex_unlock(&rga2_service.lock);
\r
623 ret = rga2_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);
\r
625 pr_err("rga2 map src0 memory failed\n");
\r
630 /* change the buf address in req struct */
\r
631 req->mmu_info.src0_base_addr = (((unsigned long)MMU_Base_phys));
\r
632 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));
\r
636 ret = rga2_MapUserMemory(&pages[0], MMU_Base + SrcMemSize, DstStart, DstMemSize);
\r
638 pr_err("rga2 map dst memory failed\n");
\r
643 /* change the buf address in req struct */
\r
644 req->mmu_info.dst_base_addr = ((unsigned long)(MMU_Base_phys + SrcMemSize));
\r
645 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));
\r
648 /* flush data to DDR */
\r
650 dmac_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
651 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));
\r
652 #elif defined(CONFIG_ARM64)
\r
653 __dma_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
656 rga2_mmu_buf_get(&rga2_mmu_buf, AllSize);
\r
657 reg->MMU_len = AllSize;
\r
666 static int rga2_mmu_info_color_fill_mode(struct rga2_reg *reg, struct rga2_req *req)
\r
669 unsigned long DstStart;
\r
670 struct page **pages = NULL;
\r
672 uint32_t *MMU_Base, *MMU_Base_phys;
\r
680 if(req->mmu_info.dst_mmu_flag & 1) {
\r
681 DstMemSize = rga2_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
682 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
684 if(DstMemSize == 0) {
\r
689 AllSize = (DstMemSize + 15) & (~15);
\r
691 pages = rga2_mmu_buf.pages;
\r
693 if(rga2_mmu_buf_get_try(&rga2_mmu_buf, AllSize)) {
\r
694 pr_err("RGA2 Get MMU mem failed\n");
\r
695 status = RGA2_MALLOC_ERROR;
\r
699 mutex_lock(&rga2_service.lock);
\r
700 MMU_Base_phys = rga2_mmu_buf.buf + (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1));
\r
701 MMU_Base = rga2_mmu_buf.buf_virtual + (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1));
\r
702 mutex_unlock(&rga2_service.lock);
\r
706 ret = rga2_MapION(req->sg_dst, &MMU_Base[0], DstMemSize);
\r
709 ret = rga2_MapUserMemory(&pages[0], &MMU_Base[0], DstStart, DstMemSize);
\r
712 pr_err("rga2 map dst memory failed\n");
\r
717 /* change the buf address in req struct */
\r
718 req->mmu_info.dst_base_addr = ((unsigned long)MMU_Base_phys);
\r
719 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));
\r
722 /* flush data to DDR */
\r
724 dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
725 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
\r
726 #elif defined(CONFIG_ARM64)
\r
727 __dma_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
730 rga2_mmu_buf_get(&rga2_mmu_buf, AllSize);
\r
740 static int rga2_mmu_info_update_palette_table_mode(struct rga2_reg *reg, struct rga2_req *req)
\r
743 unsigned long SrcStart;
\r
744 struct page **pages = NULL;
\r
746 uint32_t *MMU_Base, *MMU_Base_phys;
\r
752 /* cal src buf mmu info */
\r
753 SrcMemSize = rga2_mem_size_cal(req->pat.yrgb_addr, req->pat.vir_w * req->pat.vir_h, &SrcStart);
\r
754 if(SrcMemSize == 0) {
\r
758 SrcMemSize = (SrcMemSize + 15) & (~15);
\r
759 AllSize = SrcMemSize;
\r
761 if (rga2_mmu_buf_get_try(&rga2_mmu_buf, AllSize)) {
\r
762 pr_err("RGA2 Get MMU mem failed\n");
\r
763 status = RGA2_MALLOC_ERROR;
\r
767 mutex_lock(&rga2_service.lock);
\r
768 MMU_Base = rga2_mmu_buf.buf_virtual + (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1));
\r
769 MMU_Base_phys = rga2_mmu_buf.buf + (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1));
\r
770 mutex_unlock(&rga2_service.lock);
\r
772 pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
775 ret = rga2_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);
\r
777 pr_err("rga2 map palette memory failed\n");
\r
782 /* change the buf address in req struct */
\r
783 req->mmu_info.src0_base_addr = (((unsigned long)MMU_Base_phys));
\r
784 req->pat.yrgb_addr = (req->pat.yrgb_addr & (~PAGE_MASK));
\r
787 /* flush data to DDR */
\r
789 dmac_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
790 outer_flush_range(virt_to_phys(MMU_Base), virt_to_phys(MMU_Base + AllSize));
\r
791 #elif defined(CONFIG_ARM64)
\r
792 __dma_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
795 rga2_mmu_buf_get(&rga2_mmu_buf, AllSize);
\r
796 reg->MMU_len = AllSize;
\r
805 static int rga2_mmu_info_update_patten_buff_mode(struct rga2_reg *reg, struct rga2_req *req)
\r
807 int SrcMemSize, CMDMemSize;
\r
808 unsigned long SrcStart, CMDStart;
\r
809 struct page **pages = NULL;
\r
812 uint32_t *MMU_Base, *MMU_p;
\r
815 MMU_Base = MMU_p = 0;
\r
818 /* cal src buf mmu info */
\r
819 SrcMemSize = rga2_mem_size_cal(req->pat.yrgb_addr, req->pat.act_w * req->pat.act_h * 4, &SrcStart);
\r
820 if(SrcMemSize == 0) {
\r
824 /* cal cmd buf mmu info */
\r
825 CMDMemSize = rga2_mem_size_cal((unsigned long)rga2_service.cmd_buff, RGA2_CMD_BUF_SIZE, &CMDStart);
\r
826 if(CMDMemSize == 0) {
\r
830 AllSize = SrcMemSize + CMDMemSize;
\r
832 pages = rga2_mmu_buf.pages;
\r
834 MMU_Base = kzalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
836 for(i=0; i<CMDMemSize; i++) {
\r
837 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));
\r
840 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
842 ret = rga2_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
844 pr_err("rga map src memory failed\n");
\r
851 MMU_p = MMU_Base + CMDMemSize;
\r
853 for(i=0; i<SrcMemSize; i++)
\r
855 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
860 * change the buf address in req struct
\r
861 * for the reason of lie to MMU
\r
863 req->mmu_info.src0_base_addr = (virt_to_phys(MMU_Base) >> 2);
\r
865 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
867 /*record the malloc buf for the cmd end to release*/
\r
868 reg->MMU_base = MMU_Base;
\r
870 /* flush data to DDR */
\r
872 dmac_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
873 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));
\r
874 #elif defined(CONFIG_ARM64)
\r
875 __dma_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
886 int rga2_set_mmu_info(struct rga2_reg *reg, struct rga2_req *req)
\r
890 switch (req->render_mode) {
\r
892 ret = rga2_mmu_info_BitBlt_mode(reg, req);
\r
894 case color_palette_mode :
\r
895 ret = rga2_mmu_info_color_palette_mode(reg, req);
\r
897 case color_fill_mode :
\r
898 ret = rga2_mmu_info_color_fill_mode(reg, req);
\r
900 case update_palette_table_mode :
\r
901 ret = rga2_mmu_info_update_palette_table_mode(reg, req);
\r
903 case update_patten_buff_mode :
\r
904 ret = rga2_mmu_info_update_patten_buff_mode(reg, req);
\r