3 #include <linux/version.h>
\r
4 #include <linux/init.h>
\r
5 #include <linux/module.h>
\r
6 #include <linux/fs.h>
\r
7 #include <linux/sched.h>
\r
8 #include <linux/signal.h>
\r
9 #include <linux/pagemap.h>
\r
10 #include <linux/seq_file.h>
\r
11 #include <linux/mm.h>
\r
12 #include <linux/mman.h>
\r
13 #include <linux/sched.h>
\r
14 #include <linux/slab.h>
\r
15 #include <linux/memory.h>
\r
16 #include <linux/dma-mapping.h>
\r
17 #include <asm/memory.h>
\r
18 #include <asm/atomic.h>
\r
19 #include <asm/cacheflush.h>
\r
20 #include "rga_mmu_info.h"
\r
22 extern rga_service_info rga_service;
\r
23 //extern int mmu_buff_temp[1024];
\r
25 #define KERNEL_SPACE_VALID 0xc0000000
\r
27 #define V7_VATOPA_SUCESS_MASK (0x1)
\r
28 #define V7_VATOPA_GET_PADDR(X) (X & 0xFFFFF000)
\r
29 #define V7_VATOPA_GET_INER(X) ((X>>4) & 7)
\r
30 #define V7_VATOPA_GET_OUTER(X) ((X>>2) & 3)
\r
31 #define V7_VATOPA_GET_SH(X) ((X>>7) & 1)
\r
32 #define V7_VATOPA_GET_NS(X) ((X>>9) & 1)
\r
33 #define V7_VATOPA_GET_SS(X) ((X>>1) & 1)
\r
36 static unsigned int armv7_va_to_pa(unsigned int v_addr)
\r
38 unsigned int p_addr;
\r
39 __asm__ volatile ( "mcr p15, 0, %1, c7, c8, 0\n"
\r
42 "mrc p15, 0, %0, c7, c4, 0\n"
\r
47 if (p_addr & V7_VATOPA_SUCESS_MASK)
\r
50 return (V7_VATOPA_GET_SS(p_addr) ? 0xFFFFFFFF : V7_VATOPA_GET_PADDR(p_addr));
\r
54 static int rga_mem_size_cal(uint32_t Mem, uint32_t MemSize, uint32_t *StartAddr)
\r
56 uint32_t start, end;
\r
59 end = (Mem + (MemSize + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
60 start = Mem >> PAGE_SHIFT;
\r
61 pageCount = end - start;
\r
66 static int rga_buf_size_cal(uint32_t yrgb_addr, uint32_t uv_addr, uint32_t v_addr,
\r
67 int format, uint32_t w, uint32_t h, uint32_t *StartAddr )
\r
69 uint32_t size_yrgb = 0;
\r
70 uint32_t size_uv = 0;
\r
71 uint32_t size_v = 0;
\r
72 uint32_t stride = 0;
\r
73 uint32_t start, end;
\r
78 case RK_FORMAT_RGBA_8888 :
\r
79 stride = (w * 4 + 3) & (~3);
\r
80 size_yrgb = stride*h;
\r
81 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
82 start = yrgb_addr >> PAGE_SHIFT;
\r
83 pageCount = end - start;
\r
85 case RK_FORMAT_RGBX_8888 :
\r
86 stride = (w * 4 + 3) & (~3);
\r
87 size_yrgb = stride*h;
\r
88 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
89 start = yrgb_addr >> PAGE_SHIFT;
\r
90 pageCount = end - start;
\r
92 case RK_FORMAT_RGB_888 :
\r
93 stride = (w * 3 + 3) & (~3);
\r
94 size_yrgb = stride*h;
\r
95 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
96 start = yrgb_addr >> PAGE_SHIFT;
\r
97 pageCount = end - start;
\r
99 case RK_FORMAT_BGRA_8888 :
\r
101 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
102 start = yrgb_addr >> PAGE_SHIFT;
\r
103 pageCount = end - start;
\r
105 case RK_FORMAT_RGB_565 :
\r
106 stride = (w*2 + 3) & (~3);
\r
107 size_yrgb = stride * h;
\r
108 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
109 start = yrgb_addr >> PAGE_SHIFT;
\r
110 pageCount = end - start;
\r
112 case RK_FORMAT_RGBA_5551 :
\r
113 stride = (w*2 + 3) & (~3);
\r
114 size_yrgb = stride * h;
\r
115 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
116 start = yrgb_addr >> PAGE_SHIFT;
\r
117 pageCount = end - start;
\r
119 case RK_FORMAT_RGBA_4444 :
\r
120 stride = (w*2 + 3) & (~3);
\r
121 size_yrgb = stride * h;
\r
122 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
123 start = yrgb_addr >> PAGE_SHIFT;
\r
124 pageCount = end - start;
\r
126 case RK_FORMAT_BGR_888 :
\r
127 stride = (w*3 + 3) & (~3);
\r
128 size_yrgb = stride * h;
\r
129 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
130 start = yrgb_addr >> PAGE_SHIFT;
\r
131 pageCount = end - start;
\r
135 case RK_FORMAT_YCbCr_422_SP :
\r
136 stride = (w + 3) & (~3);
\r
137 size_yrgb = stride * h;
\r
138 size_uv = stride * h;
\r
139 start = MIN(yrgb_addr, uv_addr);
\r
140 start >>= PAGE_SHIFT;
\r
141 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
142 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
143 pageCount = end - start;
\r
145 case RK_FORMAT_YCbCr_422_P :
\r
146 stride = (w + 3) & (~3);
\r
147 size_yrgb = stride * h;
\r
148 size_uv = ((stride >> 1) * h);
\r
149 size_v = ((stride >> 1) * h);
\r
150 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
151 start = start >> PAGE_SHIFT;
\r
152 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
153 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
154 pageCount = end - start;
\r
156 case RK_FORMAT_YCbCr_420_SP :
\r
157 stride = (w + 3) & (~3);
\r
158 size_yrgb = stride * h;
\r
159 size_uv = (stride * (h >> 1));
\r
160 start = MIN(yrgb_addr, uv_addr);
\r
161 start >>= PAGE_SHIFT;
\r
162 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
163 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
164 pageCount = end - start;
\r
166 case RK_FORMAT_YCbCr_420_P :
\r
167 stride = (w + 3) & (~3);
\r
168 size_yrgb = stride * h;
\r
169 size_uv = ((stride >> 1) * (h >> 1));
\r
170 size_v = ((stride >> 1) * (h >> 1));
\r
171 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
172 start >>= PAGE_SHIFT;
\r
173 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
174 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
175 pageCount = end - start;
\r
178 case RK_FORMAT_YCrCb_422_SP :
\r
179 stride = (w + 3) & (~3);
\r
180 size_yrgb = stride * h;
\r
181 size_uv = stride * h;
\r
182 start = MIN(yrgb_addr, uv_addr);
\r
183 start >>= PAGE_SHIFT;
\r
184 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
185 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
186 pageCount = end - start;
\r
188 case RK_FORMAT_YCrCb_422_P :
\r
189 stride = (w + 3) & (~3);
\r
190 size_yrgb = stride * h;
\r
191 size_uv = ((stride >> 1) * h);
\r
192 size_v = ((stride >> 1) * h);
\r
193 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
194 start >>= PAGE_SHIFT;
\r
195 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
196 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
197 pageCount = end - start;
\r
200 case RK_FORMAT_YCrCb_420_SP :
\r
201 stride = (w + 3) & (~3);
\r
202 size_yrgb = stride * h;
\r
203 size_uv = (stride * (h >> 1));
\r
204 start = MIN(yrgb_addr, uv_addr);
\r
205 start >>= PAGE_SHIFT;
\r
206 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
207 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
208 pageCount = end - start;
\r
210 case RK_FORMAT_YCrCb_420_P :
\r
211 stride = (w + 3) & (~3);
\r
212 size_yrgb = stride * h;
\r
213 size_uv = ((stride >> 1) * (h >> 1));
\r
214 size_v = ((stride >> 1) * (h >> 1));
\r
215 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
216 start >>= PAGE_SHIFT;
\r
217 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
218 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
219 pageCount = end - start;
\r
222 case RK_FORMAT_BPP1 :
\r
224 case RK_FORMAT_BPP2 :
\r
226 case RK_FORMAT_BPP4 :
\r
228 case RK_FORMAT_BPP8 :
\r
237 *StartAddr = start;
\r
241 static int rga_MapUserMemory(struct page **pages,
\r
242 uint32_t *pageTable,
\r
244 uint32_t pageCount)
\r
257 down_read(¤t->mm->mmap_sem);
\r
258 result = get_user_pages(current,
\r
260 Memory << PAGE_SHIFT,
\r
267 up_read(¤t->mm->mmap_sem);
\r
270 if(result <= 0 || result < pageCount)
\r
274 for(i=0; i<pageCount; i++)
\r
276 temp = armv7_va_to_pa((Memory + i) << PAGE_SHIFT);
\r
277 if (temp == 0xffffffff)
\r
279 printk("rga find mmu phy ddr error\n ");
\r
280 status = RGA_OUT_OF_RESOURCES;
\r
284 pageTable[i] = temp;
\r
290 if(result <= 0 || result < pageCount)
\r
292 struct vm_area_struct *vma;
\r
294 for(i=0; i<pageCount; i++)
\r
296 vma = find_vma(current->mm, (Memory + i) << PAGE_SHIFT);
\r
298 if (vma)//&& (vma->vm_flags & VM_PFNMAP) )
\r
304 unsigned long pfn;
\r
308 pgd = pgd_offset(current->mm, (Memory + i) << PAGE_SHIFT);
\r
310 if(pgd_val(*pgd) == 0)
\r
312 printk("rga pgd value is zero \n");
\r
316 pud = pud_offset(pgd, (Memory + i) << PAGE_SHIFT);
\r
319 pmd_t * pmd = pmd_offset(pud, (Memory + i) << PAGE_SHIFT);
\r
322 pte = pte_offset_map_lock(current->mm, pmd, (Memory + i) << PAGE_SHIFT, &ptl);
\r
338 pfn = pte_pfn(*pte);
\r
339 Address = ((pfn << PAGE_SHIFT) | (((unsigned long)((Memory + i) << PAGE_SHIFT)) & ~PAGE_MASK));
\r
340 pte_unmap_unlock(pte, ptl);
\r
344 pageTable[i] = Address;
\r
348 status = RGA_OUT_OF_RESOURCES;
\r
357 /* Fill the page table. */
\r
358 for(i=0; i<pageCount; i++)
\r
360 /* Get the physical address from page struct. */
\r
361 pageTable[i] = page_to_phys(pages[i]);
\r
371 static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req)
\r
373 int SrcMemSize, DstMemSize;
\r
374 uint32_t SrcStart, DstStart;
\r
377 uint32_t *MMU_Base, *MMU_p;
\r
380 uint32_t uv_size, v_size;
\r
382 struct page **pages = NULL;
\r
388 /* cal src buf mmu info */
\r
389 SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,
\r
390 req->src.format, req->src.vir_w, req->src.act_h + req->src.y_offset,
\r
392 if(SrcMemSize == 0) {
\r
396 /* cal dst buf mmu info */
\r
397 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
398 req->dst.format, req->dst.vir_w, req->dst.act_h + req->dst.y_offset,
\r
400 if(DstMemSize == 0) {
\r
404 /* Cal out the needed mem size */
\r
405 AllSize = SrcMemSize + DstMemSize;
\r
407 pages = kzalloc((AllSize + 1)* sizeof(struct page *), GFP_KERNEL);
\r
408 if(pages == NULL) {
\r
409 pr_err("RGA MMU malloc pages mem failed\n");
\r
410 status = RGA_MALLOC_ERROR;
\r
414 MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);
\r
415 if(MMU_Base == NULL) {
\r
416 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
417 status = RGA_MALLOC_ERROR;
\r
421 if(req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
423 ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);
\r
425 pr_err("rga map src memory failed\n");
\r
434 if(req->src.yrgb_addr == (uint32_t)rga_service.pre_scale_buf)
\r
436 /* Down scale ratio over 2, Last prc */
\r
437 /* MMU table copy from pre scale table */
\r
439 for(i=0; i<SrcMemSize; i++)
\r
441 MMU_p[i] = rga_service.pre_scale_buf[i];
\r
446 for(i=0; i<SrcMemSize; i++)
\r
448 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
453 if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)
\r
456 ktime_t start, end;
\r
457 start = ktime_get();
\r
459 ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);
\r
461 pr_err("rga map dst memory failed\n");
\r
468 end = ktime_sub(end, start);
\r
469 printk("dst mmu map time = %d\n", (int)ktime_to_us(end));
\r
474 MMU_p = MMU_Base + SrcMemSize;
\r
476 for(i=0; i<DstMemSize; i++)
\r
478 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
\r
482 MMU_Base[AllSize] = MMU_Base[AllSize - 1];
\r
485 * change the buf address in req struct
\r
488 req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);
\r
490 uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
491 v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
493 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));
\r
494 req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT);
\r
495 req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT);
\r
497 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | (SrcMemSize << PAGE_SHIFT);
\r
499 /*record the malloc buf for the cmd end to release*/
\r
500 reg->MMU_base = MMU_Base;
\r
502 /* flush data to DDR */
\r
503 dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
504 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
\r
508 /* Free the page table */
\r
509 if (pages != NULL) {
\r
518 /* Free the page table */
\r
519 if (pages != NULL) {
\r
523 /* Free MMU table */
\r
524 if(MMU_Base != NULL) {
\r
531 static int rga_mmu_info_color_palette_mode(struct rga_reg *reg, struct rga_req *req)
\r
533 int SrcMemSize, DstMemSize, CMDMemSize;
\r
534 uint32_t SrcStart, DstStart, CMDStart;
\r
535 struct page **pages = NULL;
\r
538 uint32_t *MMU_Base = NULL;
\r
544 uint16_t sw, byte_num;
\r
546 shift = 3 - (req->palette_mode & 3);
\r
547 sw = req->src.vir_w;
\r
548 byte_num = sw >> shift;
\r
549 stride = (byte_num + 3) & (~3);
\r
554 SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, stride, &SrcStart);
\r
555 if(SrcMemSize == 0) {
\r
559 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
560 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
562 if(DstMemSize == 0) {
\r
566 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
567 if(CMDMemSize == 0) {
\r
571 AllSize = SrcMemSize + DstMemSize + CMDMemSize;
\r
573 pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
574 if(pages == NULL) {
\r
575 pr_err("RGA MMU malloc pages mem failed\n");
\r
579 MMU_Base = kzalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
580 if(MMU_Base == NULL) {
\r
581 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
586 for(i=0; i<CMDMemSize; i++)
\r
588 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i)<<PAGE_SHIFT));
\r
592 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
594 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
597 pr_err("rga map src memory failed\n");
\r
604 MMU_p = MMU_Base + CMDMemSize;
\r
606 for(i=0; i<SrcMemSize; i++)
\r
608 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
613 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
615 ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);
\r
618 pr_err("rga map dst memory failed\n");
\r
625 MMU_p = MMU_Base + CMDMemSize + SrcMemSize;
\r
627 for(i=0; i<DstMemSize; i++)
\r
629 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
\r
635 * change the buf address in req struct
\r
636 * for the reason of lie to MMU
\r
638 req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);
\r
639 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
640 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);
\r
643 /*record the malloc buf for the cmd end to release*/
\r
644 reg->MMU_base = MMU_Base;
\r
646 /* flush data to DDR */
\r
647 dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
648 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
\r
650 /* Free the page table */
\r
651 if (pages != NULL) {
\r
660 /* Free the page table */
\r
661 if (pages != NULL) {
\r
665 /* Free mmu table */
\r
666 if (MMU_Base != NULL) {
\r
673 static int rga_mmu_info_color_fill_mode(struct rga_reg *reg, struct rga_req *req)
\r
677 struct page **pages = NULL;
\r
680 uint32_t *MMU_Base, *MMU_p;
\r
688 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
689 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
691 if(DstMemSize == 0) {
\r
695 AllSize = DstMemSize;
\r
697 pages = kzalloc((AllSize + 1)* sizeof(struct page *), GFP_KERNEL);
\r
698 if(pages == NULL) {
\r
699 pr_err("RGA MMU malloc pages mem failed\n");
\r
700 status = RGA_MALLOC_ERROR;
\r
704 MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);
\r
705 if(pages == NULL) {
\r
706 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
707 status = RGA_MALLOC_ERROR;
\r
711 if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)
\r
713 ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], DstStart, DstMemSize);
\r
715 pr_err("rga map dst memory failed\n");
\r
724 for(i=0; i<DstMemSize; i++)
\r
726 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
\r
730 MMU_Base[AllSize] = MMU_Base[AllSize - 1];
\r
733 * change the buf address in req struct
\r
736 req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);
\r
737 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));
\r
739 /*record the malloc buf for the cmd end to release*/
\r
740 reg->MMU_base = MMU_Base;
\r
742 /* flush data to DDR */
\r
743 dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
744 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
\r
746 /* Free the page table */
\r
747 if (pages != NULL)
\r
754 if (pages != NULL)
\r
757 if (MMU_Base != NULL)
\r
764 static int rga_mmu_info_line_point_drawing_mode(struct rga_reg *reg, struct rga_req *req)
\r
768 struct page **pages = NULL;
\r
771 uint32_t *MMU_Base, *MMU_p;
\r
778 /* cal dst buf mmu info */
\r
779 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
780 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
782 if(DstMemSize == 0) {
\r
786 AllSize = DstMemSize;
\r
788 pages = kzalloc((AllSize + 1) * sizeof(struct page *), GFP_KERNEL);
\r
789 if(pages == NULL) {
\r
790 pr_err("RGA MMU malloc pages mem failed\n");
\r
791 status = RGA_MALLOC_ERROR;
\r
795 MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);
\r
796 if(pages == NULL) {
\r
797 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
798 status = RGA_MALLOC_ERROR;
\r
802 if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)
\r
804 ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], DstStart, DstMemSize);
\r
806 pr_err("rga map dst memory failed\n");
\r
815 for(i=0; i<DstMemSize; i++)
\r
817 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
\r
822 * change the buf address in req struct
\r
823 * for the reason of lie to MMU
\r
825 req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);
\r
826 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));
\r
829 /*record the malloc buf for the cmd end to release*/
\r
830 reg->MMU_base = MMU_Base;
\r
832 /* flush data to DDR */
\r
833 dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
834 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
\r
836 /* Free the page table */
\r
837 if (pages != NULL) {
\r
849 if (MMU_Base != NULL)
\r
855 static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_req *req)
\r
857 int SrcMemSize, DstMemSize;
\r
858 uint32_t SrcStart, DstStart;
\r
859 struct page **pages = NULL;
\r
862 uint32_t *MMU_Base, *MMU_p;
\r
864 uint32_t uv_size, v_size;
\r
870 /* cal src buf mmu info */
\r
871 SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,
\r
872 req->src.format, req->src.vir_w, req->src.vir_h,
\r
874 if(SrcMemSize == 0) {
\r
878 /* cal dst buf mmu info */
\r
879 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
880 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
882 if(DstMemSize == 0) {
\r
886 AllSize = SrcMemSize + DstMemSize;
\r
888 pages = kzalloc((AllSize + 1) * sizeof(struct page *), GFP_KERNEL);
\r
889 if(pages == NULL) {
\r
890 pr_err("RGA MMU malloc pages mem failed\n");
\r
891 status = RGA_MALLOC_ERROR;
\r
895 MMU_Base = kzalloc((AllSize + 1)* sizeof(uint32_t), GFP_KERNEL);
\r
896 if(pages == NULL) {
\r
897 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
898 status = RGA_MALLOC_ERROR;
\r
902 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
904 ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);
\r
907 pr_err("rga map src memory failed\n");
\r
916 for(i=0; i<SrcMemSize; i++)
\r
918 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
923 if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)
\r
925 ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);
\r
928 pr_err("rga map dst memory failed\n");
\r
935 MMU_p = MMU_Base + SrcMemSize;
\r
937 for(i=0; i<DstMemSize; i++)
\r
939 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
\r
943 MMU_Base[AllSize] = MMU_Base[AllSize - 1];
\r
946 * change the buf address in req struct
\r
947 * for the reason of lie to MMU
\r
949 req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);
\r
951 uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
952 v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
954 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));
\r
955 req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT);
\r
956 req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT);
\r
958 uv_size = (req->dst.uv_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
959 v_size = (req->dst.v_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
961 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | (SrcMemSize << PAGE_SHIFT);
\r
962 req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((SrcMemSize + uv_size) << PAGE_SHIFT);
\r
963 req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) | ((SrcMemSize + v_size) << PAGE_SHIFT);
\r
966 /*record the malloc buf for the cmd end to release*/
\r
967 reg->MMU_base = MMU_Base;
\r
969 /* flush data to DDR */
\r
970 dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
971 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
\r
973 /* Free the page table */
\r
974 if (pages != NULL) {
\r
985 if (MMU_Base != NULL)
\r
993 static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req)
\r
995 int SrcMemSize, DstMemSize;
\r
996 uint32_t SrcStart, DstStart;
\r
997 struct page **pages = NULL;
\r
1000 uint32_t *MMU_Base, *MMU_p;
\r
1003 uint32_t uv_size, v_size;
\r
1009 /* cal src buf mmu info */
\r
1010 SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,
\r
1011 req->src.format, req->src.vir_w, req->src.vir_h,
\r
1013 if(SrcMemSize == 0) {
\r
1017 /* cal dst buf mmu info */
\r
1018 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
1019 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
1021 if(DstMemSize == 0) {
\r
1025 AllSize = SrcMemSize + DstMemSize;
\r
1027 pages = kzalloc((AllSize)* sizeof(struct page *), GFP_KERNEL);
\r
1028 if(pages == NULL)
\r
1030 pr_err("RGA MMU malloc pages mem failed\n");
\r
1031 status = RGA_MALLOC_ERROR;
\r
1036 * Allocate MMU Index mem
\r
1037 * This mem release in run_to_done fun
\r
1039 MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);
\r
1040 if(pages == NULL) {
\r
1041 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
1042 status = RGA_MALLOC_ERROR;
\r
1046 /* map src pages */
\r
1047 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
1049 ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);
\r
1051 pr_err("rga map src memory failed\n");
\r
1060 for(i=0; i<SrcMemSize; i++)
\r
1062 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
1067 if(req->dst.yrgb_addr >= KERNEL_SPACE_VALID)
\r
1069 /* kernel space */
\r
1070 MMU_p = MMU_Base + SrcMemSize;
\r
1072 if(req->dst.yrgb_addr == (uint32_t)rga_service.pre_scale_buf)
\r
1074 for(i=0; i<DstMemSize; i++)
\r
1076 MMU_p[i] = rga_service.pre_scale_buf[i];
\r
1081 for(i=0; i<DstMemSize; i++)
\r
1083 MMU_p[i] = virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
\r
1090 ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);
\r
1093 pr_err("rga map dst memory failed\n");
\r
1099 MMU_Base[AllSize] = MMU_Base[AllSize - 1];
\r
1102 * change the buf address in req struct
\r
1103 * for the reason of lie to MMU
\r
1106 req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);
\r
1108 uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
1109 v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
1111 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));
\r
1112 req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT);
\r
1113 req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT);
\r
1115 uv_size = (req->dst.uv_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
1116 v_size = (req->dst.v_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
1118 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((SrcMemSize) << PAGE_SHIFT);
\r
1119 req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((SrcMemSize + uv_size) << PAGE_SHIFT);
\r
1120 req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) | ((SrcMemSize + v_size) << PAGE_SHIFT);
\r
1122 /*record the malloc buf for the cmd end to release*/
\r
1123 reg->MMU_base = MMU_Base;
\r
1125 /* flush data to DDR */
\r
1126 dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
1127 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
\r
1129 /* Free the page table */
\r
1130 if (pages != NULL)
\r
1139 if (pages != NULL)
\r
1142 if (MMU_Base != NULL)
\r
1149 static int rga_mmu_info_update_palette_table_mode(struct rga_reg *reg, struct rga_req *req)
\r
1151 int SrcMemSize, CMDMemSize;
\r
1152 uint32_t SrcStart, CMDStart;
\r
1153 struct page **pages = NULL;
\r
1156 uint32_t *MMU_Base, *MMU_p;
\r
1163 /* cal src buf mmu info */
\r
1164 SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, req->src.vir_w * req->src.vir_h, &SrcStart);
\r
1165 if(SrcMemSize == 0) {
\r
1169 /* cal cmd buf mmu info */
\r
1170 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
1171 if(CMDMemSize == 0) {
\r
1175 AllSize = SrcMemSize + CMDMemSize;
\r
1177 pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
1178 if(pages == NULL) {
\r
1179 pr_err("RGA MMU malloc pages mem failed\n");
\r
1180 status = RGA_MALLOC_ERROR;
\r
1184 MMU_Base = kzalloc((AllSize + 1)* sizeof(uint32_t), GFP_KERNEL);
\r
1185 if(pages == NULL) {
\r
1186 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
1187 status = RGA_MALLOC_ERROR;
\r
1191 for(i=0; i<CMDMemSize; i++) {
\r
1192 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));
\r
1195 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
1197 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
1199 pr_err("rga map src memory failed\n");
\r
1205 MMU_p = MMU_Base + CMDMemSize;
\r
1207 for(i=0; i<SrcMemSize; i++)
\r
1209 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
1214 * change the buf address in req struct
\r
1215 * for the reason of lie to MMU
\r
1217 req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);
\r
1219 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
1221 /*record the malloc buf for the cmd end to release*/
\r
1222 reg->MMU_base = MMU_Base;
\r
1224 /* flush data to DDR */
\r
1225 dmac_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
1226 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));
\r
1228 if (pages != NULL) {
\r
1229 /* Free the page table */
\r
1237 if (pages != NULL)
\r
1240 if (MMU_Base != NULL)
\r
1246 static int rga_mmu_info_update_patten_buff_mode(struct rga_reg *reg, struct rga_req *req)
\r
1248 int SrcMemSize, CMDMemSize;
\r
1249 uint32_t SrcStart, CMDStart;
\r
1250 struct page **pages = NULL;
\r
1253 uint32_t *MMU_Base, *MMU_p;
\r
1256 MMU_Base = MMU_p = 0;
\r
1261 /* cal src buf mmu info */
\r
1262 SrcMemSize = rga_mem_size_cal(req->pat.yrgb_addr, req->pat.vir_w * req->pat.vir_h * 4, &SrcStart);
\r
1263 if(SrcMemSize == 0) {
\r
1267 /* cal cmd buf mmu info */
\r
1268 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
1269 if(CMDMemSize == 0) {
\r
1273 AllSize = SrcMemSize + CMDMemSize;
\r
1275 pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
1276 if(pages == NULL) {
\r
1277 pr_err("RGA MMU malloc pages mem failed\n");
\r
1278 status = RGA_MALLOC_ERROR;
\r
1282 MMU_Base = kzalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
1283 if(pages == NULL) {
\r
1284 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
1285 status = RGA_MALLOC_ERROR;
\r
1289 for(i=0; i<CMDMemSize; i++) {
\r
1290 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));
\r
1293 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
1295 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
1297 pr_err("rga map src memory failed\n");
\r
1304 MMU_p = MMU_Base + CMDMemSize;
\r
1306 for(i=0; i<SrcMemSize; i++)
\r
1308 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
1313 * change the buf address in req struct
\r
1314 * for the reason of lie to MMU
\r
1316 req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);
\r
1318 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
1320 /*record the malloc buf for the cmd end to release*/
\r
1321 reg->MMU_base = MMU_Base;
\r
1323 /* flush data to DDR */
\r
1324 dmac_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
1325 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));
\r
1327 if (pages != NULL) {
\r
1328 /* Free the page table */
\r
1337 if (pages != NULL)
\r
1340 if (MMU_Base != NULL)
\r
1346 int rga_set_mmu_info(struct rga_reg *reg, struct rga_req *req)
\r
1350 switch (req->render_mode) {
\r
1351 case bitblt_mode :
\r
1352 ret = rga_mmu_info_BitBlt_mode(reg, req);
\r
1354 case color_palette_mode :
\r
1355 ret = rga_mmu_info_color_palette_mode(reg, req);
\r
1357 case color_fill_mode :
\r
1358 ret = rga_mmu_info_color_fill_mode(reg, req);
\r
1360 case line_point_drawing_mode :
\r
1361 ret = rga_mmu_info_line_point_drawing_mode(reg, req);
\r
1363 case blur_sharp_filter_mode :
\r
1364 ret = rga_mmu_info_blur_sharp_filter_mode(reg, req);
\r
1366 case pre_scaling_mode :
\r
1367 ret = rga_mmu_info_pre_scale_mode(reg, req);
\r
1369 case update_palette_table_mode :
\r
1370 ret = rga_mmu_info_update_palette_table_mode(reg, req);
\r
1372 case update_patten_buff_mode :
\r
1373 ret = rga_mmu_info_update_patten_buff_mode(reg, req);
\r