3 #include <linux/version.h>
\r
4 #include <linux/init.h>
\r
5 #include <linux/module.h>
\r
6 #include <linux/fs.h>
\r
7 #include <linux/sched.h>
\r
8 #include <linux/signal.h>
\r
9 #include <linux/pagemap.h>
\r
10 #include <linux/seq_file.h>
\r
11 #include <linux/mm.h>
\r
12 #include <linux/mman.h>
\r
13 #include <linux/sched.h>
\r
14 #include <linux/slab.h>
\r
15 #include <asm/atomic.h>
\r
18 #include "rga_mmu_info.h"
\r
20 extern rga_service_info rga_service;
\r
22 #define KERNEL_SPACE_VALID 0xc0000000
\r
24 static int rga_mem_size_cal(uint32_t Mem, uint32_t MemSize, uint32_t *StartAddr)
\r
26 uint32_t start, end;
\r
29 end = (Mem + (MemSize + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
30 start = Mem >> PAGE_SHIFT;
\r
31 pageCount = end - start;
\r
36 static int rga_buf_size_cal(uint32_t yrgb_addr, uint32_t uv_addr, uint32_t v_addr,
\r
37 int format, uint32_t w, uint32_t h, uint32_t *StartAddr )
\r
39 uint32_t size_yrgb = 0;
\r
40 uint32_t size_uv = 0;
\r
41 uint32_t size_v = 0;
\r
42 uint32_t stride = 0;
\r
43 uint32_t start, end;
\r
48 case RK_FORMAT_RGBA_8888 :
\r
49 stride = (w * 4 + 3) & (~3);
\r
50 size_yrgb = stride*h;
\r
51 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
52 start = yrgb_addr >> PAGE_SHIFT;
\r
53 pageCount = end - start;
\r
55 case RK_FORMAT_RGBX_8888 :
\r
56 stride = (w * 4 + 3) & (~3);
\r
57 size_yrgb = stride*h;
\r
58 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
59 start = yrgb_addr >> PAGE_SHIFT;
\r
60 pageCount = end - start;
\r
62 case RK_FORMAT_RGB_888 :
\r
63 stride = (w * 3 + 3) & (~3);
\r
64 size_yrgb = stride*h;
\r
65 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
66 start = yrgb_addr >> PAGE_SHIFT;
\r
67 pageCount = end - start;
\r
69 case RK_FORMAT_BGRA_8888 :
\r
71 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
72 start = yrgb_addr >> PAGE_SHIFT;
\r
73 pageCount = end - start;
\r
76 case RK_FORMAT_RGB_565 :
\r
77 stride = (w*2 + 3) & (~3);
\r
78 size_yrgb = stride * h;
\r
79 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
80 start = yrgb_addr >> PAGE_SHIFT;
\r
81 pageCount = end - start;
\r
83 case RK_FORMAT_RGBA_5551 :
\r
84 stride = (w*2 + 3) & (~3);
\r
85 size_yrgb = stride * h;
\r
86 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
87 start = yrgb_addr >> PAGE_SHIFT;
\r
88 pageCount = end - start;
\r
90 case RK_FORMAT_RGBA_4444 :
\r
91 stride = (w*2 + 3) & (~3);
\r
92 size_yrgb = stride * h;
\r
93 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
94 start = yrgb_addr >> PAGE_SHIFT;
\r
95 pageCount = end - start;
\r
97 case RK_FORMAT_BGR_888 :
\r
98 stride = (w*3 + 3) & (~3);
\r
99 size_yrgb = stride * h;
\r
100 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
101 start = yrgb_addr >> PAGE_SHIFT;
\r
102 pageCount = end - start;
\r
106 case RK_FORMAT_YCbCr_422_SP :
\r
107 stride = (w + 3) & (~3);
\r
108 size_yrgb = stride * h;
\r
109 size_uv = stride * h;
\r
110 start = MIN(yrgb_addr, uv_addr);
\r
111 start >>= PAGE_SHIFT;
\r
112 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
113 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
114 pageCount = end - start;
\r
116 case RK_FORMAT_YCbCr_422_P :
\r
117 stride = (w + 3) & (~3);
\r
118 size_yrgb = stride * h;
\r
119 size_uv = ((stride >> 1) * h);
\r
120 size_v = ((stride >> 1) * h);
\r
121 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
122 start = start >> PAGE_SHIFT;
\r
123 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
124 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
125 pageCount = end - start;
\r
127 case RK_FORMAT_YCbCr_420_SP :
\r
128 stride = (w + 3) & (~3);
\r
129 size_yrgb = stride * h;
\r
130 size_uv = (stride * (h >> 1));
\r
131 start = MIN(yrgb_addr, uv_addr);
\r
132 start >>= PAGE_SHIFT;
\r
133 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
134 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
135 pageCount = end - start;
\r
137 case RK_FORMAT_YCbCr_420_P :
\r
138 stride = (w + 3) & (~3);
\r
139 size_yrgb = stride * h;
\r
140 size_uv = ((stride >> 1) * (h >> 1));
\r
141 size_v = ((stride >> 1) * (h >> 1));
\r
142 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
143 start >>= PAGE_SHIFT;
\r
144 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
145 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
146 pageCount = end - start;
\r
149 case RK_FORMAT_YCrCb_422_SP :
\r
150 stride = (w + 3) & (~3);
\r
151 size_yrgb = stride * h;
\r
152 size_uv = stride * h;
\r
153 start = MIN(yrgb_addr, uv_addr);
\r
154 start >>= PAGE_SHIFT;
\r
155 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
156 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
157 pageCount = end - start;
\r
159 case RK_FORMAT_YCrCb_422_P :
\r
160 stride = (w + 3) & (~3);
\r
161 size_yrgb = stride * h;
\r
162 size_uv = ((stride >> 1) * h);
\r
163 size_v = ((stride >> 1) * h);
\r
164 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
165 start >>= PAGE_SHIFT;
\r
166 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
167 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
168 pageCount = end - start;
\r
171 case RK_FORMAT_YCrCb_420_SP :
\r
172 stride = (w + 3) & (~3);
\r
173 size_yrgb = stride * h;
\r
174 size_uv = (stride * (h >> 1));
\r
175 start = MIN(yrgb_addr, uv_addr);
\r
176 start >>= PAGE_SHIFT;
\r
177 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
178 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
179 pageCount = end - start;
\r
181 case RK_FORMAT_YCrCb_420_P :
\r
182 stride = (w + 3) & (~3);
\r
183 size_yrgb = stride * h;
\r
184 size_uv = ((stride >> 1) * (h >> 1));
\r
185 size_v = ((stride >> 1) * (h >> 1));
\r
186 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
187 start >>= PAGE_SHIFT;
\r
188 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
189 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
190 pageCount = end - start;
\r
193 case RK_FORMAT_BPP1 :
\r
195 case RK_FORMAT_BPP2 :
\r
197 case RK_FORMAT_BPP4 :
\r
199 case RK_FORMAT_BPP8 :
\r
208 *StartAddr = start;
\r
212 static int rga_MapUserMemory(struct page **pages,
\r
213 uint32_t *pageTable,
\r
215 uint32_t pageCount)
\r
226 down_read(¤t->mm->mmap_sem);
\r
227 result = get_user_pages(current,
\r
229 Memory << PAGE_SHIFT,
\r
236 up_read(¤t->mm->mmap_sem);
\r
238 if(result <= 0 || result < pageCount)
\r
240 struct vm_area_struct *vma;
\r
242 for(i=0; i<pageCount; i++)
\r
244 t_mem = Memory + i;
\r
246 vma = find_vma(current->mm, (t_mem) << PAGE_SHIFT);
\r
248 if (vma && (vma->vm_flags & VM_PFNMAP) )
\r
254 unsigned long pfn;
\r
256 pgd_t * pgd = pgd_offset(current->mm, ((t_mem)<< PAGE_SHIFT));
\r
257 pud_t * pud = pud_offset(pgd, ((t_mem) << PAGE_SHIFT));
\r
260 pmd_t * pmd = pmd_offset(pud, ((t_mem) << PAGE_SHIFT));
\r
263 pte = pte_offset_map_lock(current->mm, pmd, ((t_mem)<< PAGE_SHIFT), &ptl);
\r
279 pfn = pte_pfn(*pte);
\r
280 Address = ((pfn << PAGE_SHIFT) | (((unsigned long)((t_mem) << PAGE_SHIFT)) & ~PAGE_MASK));
\r
281 pte_unmap_unlock(pte, ptl);
\r
284 /* Free the page table. */
\r
287 /* Release the pages if any. */
\r
290 for (i = 0; i < result; i++)
\r
292 if (pages[i] == NULL)
\r
297 page_cache_release(pages[i]);
\r
303 pageTable[i] = Address;
\r
309 status = RGA_OUT_OF_RESOURCES;
\r
317 for (i = 0; i < pageCount; i++)
\r
319 /* Flush the data cache. */
\r
321 dma_sync_single_for_device(
\r
323 page_to_phys(pages[i]),
\r
327 flush_dcache_page(pages[i]);
\r
331 /* Fill the page table. */
\r
332 for(i=0; i<pageCount; i++)
\r
334 /* Get the physical address from page struct. */
\r
335 pageTable[i] = page_to_phys(pages[i]);
\r
342 if (rgaIS_ERROR(status))
\r
344 /* Release page array. */
\r
345 if (result > 0 && pages != NULL)
\r
347 for (i = 0; i < result; i++)
\r
349 if (pages[i] == NULL)
\r
354 dma_sync_single_for_device(
\r
356 page_to_phys(pages[i]),
\r
360 page_cache_release(pages[i]);
\r
368 static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req)
\r
370 int SrcMemSize, DstMemSize, CMDMemSize;
\r
371 uint32_t SrcStart, DstStart, CMDStart;
\r
374 uint32_t *MMU_Base, *MMU_p;
\r
377 uint32_t uv_size, v_size;
\r
379 struct page **pages = NULL;
\r
385 /* cal src buf mmu info */
\r
386 SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,
\r
387 req->src.format, req->src.vir_w, (req->src.act_h + req->src.y_offset),
\r
389 if(SrcMemSize == 0) {
\r
394 /* cal dst buf mmu info */
\r
395 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
396 req->dst.format, req->dst.vir_w, (req->dst.act_h + req->dst.y_offset),
\r
398 if(DstMemSize == 0) {
\r
402 /* cal cmd buf mmu info */
\r
403 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
404 if(CMDMemSize == 0) {
\r
409 /* Cal out the needed mem size */
\r
410 AllSize = SrcMemSize + DstMemSize + CMDMemSize;
\r
412 pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
413 if(pages == NULL) {
\r
414 pr_err("RGA MMU malloc pages mem failed\n");
\r
415 status = RGA_MALLOC_ERROR;
\r
419 MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
420 if(MMU_Base == NULL) {
\r
421 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
422 status = RGA_MALLOC_ERROR;
\r
426 for(i=0; i<CMDMemSize; i++) {
\r
427 MMU_Base[i] = (uint32_t)virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));
\r
430 if(req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
433 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
435 pr_err("rga map src memory failed\n");
\r
443 MMU_p = MMU_Base + CMDMemSize;
\r
445 if(req->src.yrgb_addr == (uint32_t)rga_service.pre_scale_buf)
\r
447 /* Down scale ratio over 2, Last prc */
\r
448 /* MMU table copy from pre scale table */
\r
450 for(i=0; i<SrcMemSize; i++)
\r
452 MMU_p[i] = rga_service.pre_scale_buf[i];
\r
457 for(i=0; i<SrcMemSize; i++)
\r
459 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
464 if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)
\r
467 ktime_t start, end;
\r
468 start = ktime_get();
\r
470 ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);
\r
472 pr_err("rga map dst memory failed\n");
\r
479 end = ktime_sub(end, start);
\r
480 printk("dst mmu map time = %d\n", (int)ktime_to_us(end));
\r
485 MMU_p = MMU_Base + CMDMemSize + SrcMemSize;
\r
487 for(i=0; i<DstMemSize; i++)
\r
489 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
\r
494 * change the buf address in req struct
\r
497 req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);
\r
499 uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
500 v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
502 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
503 req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | ((CMDMemSize + uv_size) << PAGE_SHIFT);
\r
504 req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | ((CMDMemSize + v_size) << PAGE_SHIFT);
\r
506 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);
\r
508 /*record the malloc buf for the cmd end to release*/
\r
509 reg->MMU_base = MMU_Base;
\r
511 /* flush data to DDR */
\r
512 dmac_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
513 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));
\r
517 /* Free the page table */
\r
518 if (pages != NULL) {
\r
527 /* Free the page table */
\r
528 if (pages != NULL) {
\r
532 /* Free MMU table */
\r
533 if(MMU_Base != NULL) {
\r
540 static int rga_mmu_info_color_palette_mode(struct rga_reg *reg, struct rga_req *req)
\r
542 int SrcMemSize, DstMemSize, CMDMemSize;
\r
543 uint32_t SrcStart, DstStart, CMDStart;
\r
544 struct page **pages = NULL;
\r
547 uint32_t *MMU_Base = NULL;
\r
553 uint16_t sw, byte_num;
\r
555 shift = 3 - (req->palette_mode & 3);
\r
556 sw = req->src.vir_w;
\r
557 byte_num = sw >> shift;
\r
558 stride = (byte_num + 3) & (~3);
\r
563 SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, stride, &SrcStart);
\r
564 if(SrcMemSize == 0) {
\r
568 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
569 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
571 if(DstMemSize == 0) {
\r
575 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
576 if(CMDMemSize == 0) {
\r
580 AllSize = SrcMemSize + DstMemSize + CMDMemSize;
\r
582 pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
583 if(pages == NULL) {
\r
584 pr_err("RGA MMU malloc pages mem failed\n");
\r
588 MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
589 if(MMU_Base == NULL) {
\r
590 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
595 for(i=0; i<CMDMemSize; i++)
\r
597 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i)<<PAGE_SHIFT));
\r
601 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
603 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
606 pr_err("rga map src memory failed\n");
\r
613 MMU_p = MMU_Base + CMDMemSize;
\r
615 for(i=0; i<SrcMemSize; i++)
\r
617 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
622 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
624 ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);
\r
627 pr_err("rga map dst memory failed\n");
\r
634 MMU_p = MMU_Base + CMDMemSize + SrcMemSize;
\r
636 for(i=0; i<DstMemSize; i++)
\r
638 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
\r
644 * change the buf address in req struct
\r
645 * for the reason of lie to MMU
\r
647 req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);
\r
648 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
649 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);
\r
652 /*record the malloc buf for the cmd end to release*/
\r
653 reg->MMU_base = MMU_Base;
\r
655 /* flush data to DDR */
\r
656 dmac_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
657 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));
\r
659 /* Free the page table */
\r
660 if (pages != NULL) {
\r
669 /* Free the page table */
\r
670 if (pages != NULL) {
\r
674 /* Free mmu table */
\r
675 if (MMU_Base != NULL) {
\r
682 static int rga_mmu_info_color_fill_mode(struct rga_reg *reg, struct rga_req *req)
\r
684 int DstMemSize, CMDMemSize;
\r
685 uint32_t DstStart, CMDStart;
\r
686 struct page **pages = NULL;
\r
689 uint32_t *MMU_Base, *MMU_p;
\r
697 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
698 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
700 if(DstMemSize == 0) {
\r
704 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
705 if(CMDMemSize == 0) {
\r
709 AllSize = DstMemSize + CMDMemSize;
\r
711 pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
712 if(pages == NULL) {
\r
713 pr_err("RGA MMU malloc pages mem failed\n");
\r
714 status = RGA_MALLOC_ERROR;
\r
718 MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
719 if(pages == NULL) {
\r
720 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
721 status = RGA_MALLOC_ERROR;
\r
725 for(i=0; i<CMDMemSize; i++) {
\r
726 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart+i)<<PAGE_SHIFT));
\r
729 if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)
\r
731 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], DstStart, DstMemSize);
\r
733 pr_err("rga map dst memory failed\n");
\r
740 MMU_p = MMU_Base + CMDMemSize;
\r
742 for(i=0; i<DstMemSize; i++)
\r
744 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
\r
750 * change the buf address in req struct
\r
753 req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);
\r
754 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize) << PAGE_SHIFT);
\r
756 /*record the malloc buf for the cmd end to release*/
\r
757 reg->MMU_base = MMU_Base;
\r
759 /* flush data to DDR */
\r
760 dmac_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
761 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));
\r
763 /* Free the page table */
\r
764 if (pages != NULL)
\r
771 if (pages != NULL)
\r
774 if (MMU_Base != NULL)
\r
781 static int rga_mmu_info_line_point_drawing_mode(struct rga_reg *reg, struct rga_req *req)
\r
783 int DstMemSize, CMDMemSize;
\r
784 uint32_t DstStart, CMDStart;
\r
785 struct page **pages = NULL;
\r
788 uint32_t *MMU_Base, *MMU_p;
\r
795 /* cal dst buf mmu info */
\r
796 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
797 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
799 if(DstMemSize == 0) {
\r
803 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
804 if(CMDMemSize == 0) {
\r
808 AllSize = DstMemSize + CMDMemSize;
\r
810 pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
811 if(pages == NULL) {
\r
812 pr_err("RGA MMU malloc pages mem failed\n");
\r
813 status = RGA_MALLOC_ERROR;
\r
817 MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
818 if(pages == NULL) {
\r
819 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
820 status = RGA_MALLOC_ERROR;
\r
824 for(i=0; i<CMDMemSize; i++) {
\r
825 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart+i)<<PAGE_SHIFT));
\r
828 if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)
\r
830 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], DstStart, DstMemSize);
\r
832 pr_err("rga map dst memory failed\n");
\r
839 MMU_p = MMU_Base + CMDMemSize;
\r
841 for(i=0; i<DstMemSize; i++)
\r
843 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
\r
848 * change the buf address in req struct
\r
849 * for the reason of lie to MMU
\r
851 req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);
\r
852 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize) << PAGE_SHIFT);
\r
855 /*record the malloc buf for the cmd end to release*/
\r
856 reg->MMU_base = MMU_Base;
\r
858 /* flush data to DDR */
\r
859 dmac_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
860 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));
\r
862 /* Free the page table */
\r
863 if (pages != NULL) {
\r
875 if (MMU_Base != NULL)
\r
881 static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_req *req)
\r
883 int SrcMemSize, DstMemSize, CMDMemSize;
\r
884 uint32_t SrcStart, DstStart, CMDStart;
\r
885 struct page **pages = NULL;
\r
888 uint32_t *MMU_Base, *MMU_p;
\r
890 uint32_t uv_size, v_size;
\r
896 /* cal src buf mmu info */
\r
897 SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,
\r
898 req->src.format, req->src.vir_w, req->src.vir_h,
\r
900 if(SrcMemSize == 0) {
\r
904 /* cal dst buf mmu info */
\r
905 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
906 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
908 if(DstMemSize == 0) {
\r
912 /* cal cmd buf mmu info */
\r
913 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
914 if(CMDMemSize == 0) {
\r
918 AllSize = SrcMemSize + DstMemSize + CMDMemSize;
\r
920 pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
921 if(pages == NULL) {
\r
922 pr_err("RGA MMU malloc pages mem failed\n");
\r
923 status = RGA_MALLOC_ERROR;
\r
927 MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
928 if(pages == NULL) {
\r
929 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
930 status = RGA_MALLOC_ERROR;
\r
934 for(i=0; i<CMDMemSize; i++) {
\r
935 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i)<< PAGE_SHIFT));
\r
938 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
940 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
943 pr_err("rga map src memory failed\n");
\r
950 MMU_p = MMU_Base + CMDMemSize;
\r
952 for(i=0; i<SrcMemSize; i++)
\r
954 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
959 if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)
\r
961 ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);
\r
964 pr_err("rga map dst memory failed\n");
\r
971 MMU_p = MMU_Base + CMDMemSize + SrcMemSize;
\r
973 for(i=0; i<DstMemSize; i++)
\r
975 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
\r
980 * change the buf address in req struct
\r
981 * for the reason of lie to MMU
\r
983 req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);
\r
985 uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
986 v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
988 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
989 req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | ((CMDMemSize + uv_size) << PAGE_SHIFT);
\r
990 req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | ((CMDMemSize + v_size) << PAGE_SHIFT);
\r
992 uv_size = (req->dst.uv_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
993 v_size = (req->dst.v_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
995 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);
\r
996 req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize + uv_size) << PAGE_SHIFT);
\r
997 req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize + v_size) << PAGE_SHIFT);
\r
1000 /*record the malloc buf for the cmd end to release*/
\r
1001 reg->MMU_base = MMU_Base;
\r
1003 /* flush data to DDR */
\r
1004 dmac_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
1005 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));
\r
1007 /* Free the page table */
\r
1008 if (pages != NULL) {
\r
1016 if (pages != NULL)
\r
1019 if (MMU_Base != NULL)
\r
1027 static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req)
\r
1029 int SrcMemSize, DstMemSize, CMDMemSize;
\r
1030 uint32_t SrcStart, DstStart, CMDStart;
\r
1031 struct page **pages = NULL;
\r
1034 uint32_t *MMU_Base, *MMU_p;
\r
1037 uint32_t uv_size, v_size;
\r
1043 /* cal src buf mmu info */
\r
1044 SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,
\r
1045 req->src.format, req->src.vir_w, req->src.vir_h,
\r
1047 if(SrcMemSize == 0) {
\r
1051 /* cal dst buf mmu info */
\r
1052 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
1053 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
1055 if(DstMemSize == 0) {
\r
1059 /* cal cmd buf mmu info */
\r
1060 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
1061 if(CMDMemSize == 0) {
\r
1065 AllSize = SrcMemSize + DstMemSize + CMDMemSize;
\r
1067 pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
1068 if(pages == NULL)
\r
1070 pr_err("RGA MMU malloc pages mem failed\n");
\r
1071 status = RGA_MALLOC_ERROR;
\r
1076 * Allocate MMU Index mem
\r
1077 * This mem release in run_to_done fun
\r
1079 MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
1080 if(pages == NULL) {
\r
1081 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
1082 status = RGA_MALLOC_ERROR;
\r
1086 for(i=0; i<CMDMemSize; i++) {
\r
1087 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));
\r
1091 /* map src pages */
\r
1092 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
1094 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
1096 pr_err("rga map src memory failed\n");
\r
1103 MMU_p = MMU_Base + CMDMemSize;
\r
1105 for(i=0; i<SrcMemSize; i++)
\r
1107 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
1112 if(req->dst.yrgb_addr >= KERNEL_SPACE_VALID)
\r
1114 /* kernel space */
\r
1115 MMU_p = MMU_Base + CMDMemSize + SrcMemSize;
\r
1117 if(req->dst.yrgb_addr == (uint32_t)rga_service.pre_scale_buf)
\r
1119 for(i=0; i<DstMemSize; i++)
\r
1121 MMU_p[i] = rga_service.pre_scale_buf[i];
\r
1126 for(i=0; i<DstMemSize; i++)
\r
1128 MMU_p[i] = virt_to_phys((uint32_t *)((DstStart + i)<< PAGE_SHIFT));
\r
1135 ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);
\r
1138 pr_err("rga map dst memory failed\n");
\r
1145 * change the buf address in req struct
\r
1146 * for the reason of lie to MMU
\r
1149 req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);
\r
1151 uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
1152 v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
1154 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
1155 req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | ((CMDMemSize + uv_size) << PAGE_SHIFT);
\r
1156 req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | ((CMDMemSize + v_size) << PAGE_SHIFT);
\r
1158 uv_size = (req->dst.uv_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
1159 v_size = (req->dst.v_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
1161 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);
\r
1162 req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize + uv_size) << PAGE_SHIFT);
\r
1163 req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize + v_size) << PAGE_SHIFT);
\r
1165 /*record the malloc buf for the cmd end to release*/
\r
1166 reg->MMU_base = MMU_Base;
\r
1168 /* flush data to DDR */
\r
1169 dmac_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
1170 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));
\r
1172 /* Free the page table */
\r
1173 if (pages != NULL)
\r
1182 if (pages != NULL)
\r
1185 if (MMU_Base != NULL)
\r
1192 static int rga_mmu_info_update_palette_table_mode(struct rga_reg *reg, struct rga_req *req)
\r
1194 int SrcMemSize, CMDMemSize;
\r
1195 uint32_t SrcStart, CMDStart;
\r
1196 struct page **pages = NULL;
\r
1199 uint32_t *MMU_Base, *MMU_p;
\r
1206 /* cal src buf mmu info */
\r
1207 SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, req->src.vir_w * req->src.vir_h, &SrcStart);
\r
1208 if(SrcMemSize == 0) {
\r
1212 /* cal cmd buf mmu info */
\r
1213 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
1214 if(CMDMemSize == 0) {
\r
1218 AllSize = SrcMemSize + CMDMemSize;
\r
1220 pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
1221 if(pages == NULL) {
\r
1222 pr_err("RGA MMU malloc pages mem failed\n");
\r
1223 status = RGA_MALLOC_ERROR;
\r
1227 MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
1228 if(pages == NULL) {
\r
1229 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
1230 status = RGA_MALLOC_ERROR;
\r
1234 for(i=0; i<CMDMemSize; i++) {
\r
1235 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));
\r
1238 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
1240 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
1242 pr_err("rga map src memory failed\n");
\r
1248 MMU_p = MMU_Base + CMDMemSize;
\r
1250 for(i=0; i<SrcMemSize; i++)
\r
1252 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
1257 * change the buf address in req struct
\r
1258 * for the reason of lie to MMU
\r
1260 req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);
\r
1262 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
1264 /*record the malloc buf for the cmd end to release*/
\r
1265 reg->MMU_base = MMU_Base;
\r
1267 /* flush data to DDR */
\r
1268 dmac_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
1269 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));
\r
1271 if (pages != NULL) {
\r
1272 /* Free the page table */
\r
1280 if (pages != NULL)
\r
1283 if (MMU_Base != NULL)
\r
1289 static int rga_mmu_info_update_patten_buff_mode(struct rga_reg *reg, struct rga_req *req)
\r
1291 int SrcMemSize, CMDMemSize;
\r
1292 uint32_t SrcStart, CMDStart;
\r
1293 struct page **pages = NULL;
\r
1296 uint32_t *MMU_Base, *MMU_p;
\r
1299 MMU_Base = MMU_p = 0;
\r
1304 /* cal src buf mmu info */
\r
1305 SrcMemSize = rga_mem_size_cal(req->pat.yrgb_addr, req->pat.vir_w * req->pat.vir_h * 4, &SrcStart);
\r
1306 if(SrcMemSize == 0) {
\r
1310 /* cal cmd buf mmu info */
\r
1311 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
1312 if(CMDMemSize == 0) {
\r
1316 AllSize = SrcMemSize + CMDMemSize;
\r
1318 pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
1319 if(pages == NULL) {
\r
1320 pr_err("RGA MMU malloc pages mem failed\n");
\r
1321 status = RGA_MALLOC_ERROR;
\r
1325 MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
1326 if(pages == NULL) {
\r
1327 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
1328 status = RGA_MALLOC_ERROR;
\r
1332 for(i=0; i<CMDMemSize; i++) {
\r
1333 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));
\r
1336 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
1338 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
1340 pr_err("rga map src memory failed\n");
\r
1347 MMU_p = MMU_Base + CMDMemSize;
\r
1349 for(i=0; i<SrcMemSize; i++)
\r
1351 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
1356 * change the buf address in req struct
\r
1357 * for the reason of lie to MMU
\r
1359 req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);
\r
1361 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
1363 /*record the malloc buf for the cmd end to release*/
\r
1364 reg->MMU_base = MMU_Base;
\r
1366 /* flush data to DDR */
\r
1367 dmac_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
1368 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));
\r
1370 if (pages != NULL) {
\r
1371 /* Free the page table */
\r
1380 if (pages != NULL)
\r
1383 if (MMU_Base != NULL)
\r
1389 int rga_set_mmu_info(struct rga_reg *reg, struct rga_req *req)
\r
1393 switch (req->render_mode) {
\r
1394 case bitblt_mode :
\r
1395 ret = rga_mmu_info_BitBlt_mode(reg, req);
\r
1397 case color_palette_mode :
\r
1398 ret = rga_mmu_info_color_palette_mode(reg, req);
\r
1400 case color_fill_mode :
\r
1401 ret = rga_mmu_info_color_fill_mode(reg, req);
\r
1403 case line_point_drawing_mode :
\r
1404 ret = rga_mmu_info_line_point_drawing_mode(reg, req);
\r
1406 case blur_sharp_filter_mode :
\r
1407 ret = rga_mmu_info_blur_sharp_filter_mode(reg, req);
\r
1409 case pre_scaling_mode :
\r
1410 ret = rga_mmu_info_pre_scale_mode(reg, req);
\r
1412 case update_palette_table_mode :
\r
1413 ret = rga_mmu_info_update_palette_table_mode(reg, req);
\r
1415 case update_patten_buff_mode :
\r
1416 ret = rga_mmu_info_update_patten_buff_mode(reg, req);
\r