3 #include <linux/version.h>
\r
4 #include <linux/init.h>
\r
5 #include <linux/module.h>
\r
6 #include <linux/fs.h>
\r
7 #include <linux/sched.h>
\r
8 #include <linux/signal.h>
\r
9 #include <linux/pagemap.h>
\r
10 #include <linux/seq_file.h>
\r
11 #include <linux/mm.h>
\r
12 #include <linux/mman.h>
\r
13 #include <linux/sched.h>
\r
14 #include <linux/slab.h>
\r
15 #include <linux/memory.h>
\r
16 #include <linux/dma-mapping.h>
\r
17 #include <asm/memory.h>
\r
18 #include <asm/atomic.h>
\r
19 #include <asm/cacheflush.h>
\r
20 #include "rga_mmu_info.h"
\r
22 extern rga_service_info rga_service;
\r
23 //extern int mmu_buff_temp[1024];
\r
25 #define KERNEL_SPACE_VALID 0xc0000000
\r
27 #define V7_VATOPA_SUCESS_MASK (0x1)
\r
28 #define V7_VATOPA_GET_PADDR(X) (X & 0xFFFFF000)
\r
29 #define V7_VATOPA_GET_INER(X) ((X>>4) & 7)
\r
30 #define V7_VATOPA_GET_OUTER(X) ((X>>2) & 3)
\r
31 #define V7_VATOPA_GET_SH(X) ((X>>7) & 1)
\r
32 #define V7_VATOPA_GET_NS(X) ((X>>9) & 1)
\r
33 #define V7_VATOPA_GET_SS(X) ((X>>1) & 1)
\r
38 unsigned int armv7_va_to_pa(unsigned int v_addr)
\r
40 unsigned int p_addr;
\r
41 __asm__ volatile ( "mcr p15, 0, %1, c7, c8, 0\n"
\r
44 "mrc p15, 0, %0, c7, c4, 0\n"
\r
49 if (p_addr & V7_VATOPA_SUCESS_MASK)
\r
52 return (V7_VATOPA_GET_SS(p_addr) ? 0xFFFFFFFF : V7_VATOPA_GET_PADDR(p_addr));
\r
55 static int rga_mem_size_cal(uint32_t Mem, uint32_t MemSize, uint32_t *StartAddr)
\r
57 uint32_t start, end;
\r
60 end = (Mem + (MemSize + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
61 start = Mem >> PAGE_SHIFT;
\r
62 pageCount = end - start;
\r
67 static int rga_buf_size_cal(uint32_t yrgb_addr, uint32_t uv_addr, uint32_t v_addr,
\r
68 int format, uint32_t w, uint32_t h, uint32_t *StartAddr )
\r
70 uint32_t size_yrgb = 0;
\r
71 uint32_t size_uv = 0;
\r
72 uint32_t size_v = 0;
\r
73 uint32_t stride = 0;
\r
74 uint32_t start, end;
\r
79 case RK_FORMAT_RGBA_8888 :
\r
80 stride = (w * 4 + 3) & (~3);
\r
81 size_yrgb = stride*h;
\r
82 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
83 start = yrgb_addr >> PAGE_SHIFT;
\r
84 pageCount = end - start;
\r
86 case RK_FORMAT_RGBX_8888 :
\r
87 stride = (w * 4 + 3) & (~3);
\r
88 size_yrgb = stride*h;
\r
89 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
90 start = yrgb_addr >> PAGE_SHIFT;
\r
91 pageCount = end - start;
\r
93 case RK_FORMAT_RGB_888 :
\r
94 stride = (w * 3 + 3) & (~3);
\r
95 size_yrgb = stride*h;
\r
96 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
97 start = yrgb_addr >> PAGE_SHIFT;
\r
98 pageCount = end - start;
\r
100 case RK_FORMAT_BGRA_8888 :
\r
102 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
103 start = yrgb_addr >> PAGE_SHIFT;
\r
104 pageCount = end - start;
\r
106 case RK_FORMAT_RGB_565 :
\r
107 stride = (w*2 + 3) & (~3);
\r
108 size_yrgb = stride * h;
\r
109 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
110 start = yrgb_addr >> PAGE_SHIFT;
\r
111 pageCount = end - start;
\r
113 case RK_FORMAT_RGBA_5551 :
\r
114 stride = (w*2 + 3) & (~3);
\r
115 size_yrgb = stride * h;
\r
116 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
117 start = yrgb_addr >> PAGE_SHIFT;
\r
118 pageCount = end - start;
\r
120 case RK_FORMAT_RGBA_4444 :
\r
121 stride = (w*2 + 3) & (~3);
\r
122 size_yrgb = stride * h;
\r
123 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
124 start = yrgb_addr >> PAGE_SHIFT;
\r
125 pageCount = end - start;
\r
127 case RK_FORMAT_BGR_888 :
\r
128 stride = (w*3 + 3) & (~3);
\r
129 size_yrgb = stride * h;
\r
130 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
131 start = yrgb_addr >> PAGE_SHIFT;
\r
132 pageCount = end - start;
\r
136 case RK_FORMAT_YCbCr_422_SP :
\r
137 stride = (w + 3) & (~3);
\r
138 size_yrgb = stride * h;
\r
139 size_uv = stride * h;
\r
140 start = MIN(yrgb_addr, uv_addr);
\r
141 start >>= PAGE_SHIFT;
\r
142 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
143 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
144 pageCount = end - start;
\r
146 case RK_FORMAT_YCbCr_422_P :
\r
147 stride = (w + 3) & (~3);
\r
148 size_yrgb = stride * h;
\r
149 size_uv = ((stride >> 1) * h);
\r
150 size_v = ((stride >> 1) * h);
\r
151 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
152 start = start >> PAGE_SHIFT;
\r
153 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
154 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
155 pageCount = end - start;
\r
157 case RK_FORMAT_YCbCr_420_SP :
\r
158 stride = (w + 3) & (~3);
\r
159 size_yrgb = stride * h;
\r
160 size_uv = (stride * (h >> 1));
\r
161 start = MIN(yrgb_addr, uv_addr);
\r
162 start >>= PAGE_SHIFT;
\r
163 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
164 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
165 pageCount = end - start;
\r
167 case RK_FORMAT_YCbCr_420_P :
\r
168 stride = (w + 3) & (~3);
\r
169 size_yrgb = stride * h;
\r
170 size_uv = ((stride >> 1) * (h >> 1));
\r
171 size_v = ((stride >> 1) * (h >> 1));
\r
172 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
173 start >>= PAGE_SHIFT;
\r
174 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
175 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
176 pageCount = end - start;
\r
179 case RK_FORMAT_YCrCb_422_SP :
\r
180 stride = (w + 3) & (~3);
\r
181 size_yrgb = stride * h;
\r
182 size_uv = stride * h;
\r
183 start = MIN(yrgb_addr, uv_addr);
\r
184 start >>= PAGE_SHIFT;
\r
185 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
186 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
187 pageCount = end - start;
\r
189 case RK_FORMAT_YCrCb_422_P :
\r
190 stride = (w + 3) & (~3);
\r
191 size_yrgb = stride * h;
\r
192 size_uv = ((stride >> 1) * h);
\r
193 size_v = ((stride >> 1) * h);
\r
194 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
195 start >>= PAGE_SHIFT;
\r
196 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
197 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
198 pageCount = end - start;
\r
201 case RK_FORMAT_YCrCb_420_SP :
\r
202 stride = (w + 3) & (~3);
\r
203 size_yrgb = stride * h;
\r
204 size_uv = (stride * (h >> 1));
\r
205 start = MIN(yrgb_addr, uv_addr);
\r
206 start >>= PAGE_SHIFT;
\r
207 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
208 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
209 pageCount = end - start;
\r
211 case RK_FORMAT_YCrCb_420_P :
\r
212 stride = (w + 3) & (~3);
\r
213 size_yrgb = stride * h;
\r
214 size_uv = ((stride >> 1) * (h >> 1));
\r
215 size_v = ((stride >> 1) * (h >> 1));
\r
216 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
217 start >>= PAGE_SHIFT;
\r
218 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
219 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
220 pageCount = end - start;
\r
223 case RK_FORMAT_BPP1 :
\r
225 case RK_FORMAT_BPP2 :
\r
227 case RK_FORMAT_BPP4 :
\r
229 case RK_FORMAT_BPP8 :
\r
238 *StartAddr = start;
\r
242 static int rga_MapUserMemory(struct page **pages,
\r
243 uint32_t *pageTable,
\r
245 uint32_t pageCount)
\r
257 down_read(¤t->mm->mmap_sem);
\r
258 result = get_user_pages(current,
\r
260 Memory << PAGE_SHIFT,
\r
267 up_read(¤t->mm->mmap_sem);
\r
269 if(result <= 0 || result < pageCount)
\r
273 for(i=0; i<pageCount; i++)
\r
275 temp = armv7_va_to_pa((Memory + i) << PAGE_SHIFT);
\r
276 if (temp == 0xffffffff)
\r
278 printk("rga find mmu phy ddr error\n ");
\r
279 status = RGA_OUT_OF_RESOURCES;
\r
283 pageTable[i] = temp;
\r
289 for (i = 0; i < pageCount; i++)
\r
291 /* Flush the data cache. */
\r
293 dma_sync_single_for_device(
\r
295 page_to_phys(pages[i]),
\r
299 flush_dcache_page(pages[i]);
\r
303 /* Fill the page table. */
\r
304 for(i=0; i<pageCount; i++)
\r
306 /* Get the physical address from page struct. */
\r
307 pageTable[i] = page_to_phys(pages[i]);
\r
314 if (rgaIS_ERROR(status))
\r
316 /* Release page array. */
\r
317 if (result > 0 && pages != NULL)
\r
319 for (i = 0; i < result; i++)
\r
321 if (pages[i] == NULL)
\r
326 dma_sync_single_for_device(
\r
328 page_to_phys(pages[i]),
\r
332 page_cache_release(pages[i]);
\r
340 static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req)
\r
342 int SrcMemSize, DstMemSize;
\r
343 uint32_t SrcStart, DstStart;
\r
346 uint32_t *MMU_Base, *MMU_p;
\r
349 uint32_t uv_size, v_size;
\r
351 struct page **pages = NULL;
\r
357 /* cal src buf mmu info */
\r
358 SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,
\r
359 req->src.format, req->src.vir_w, req->src.act_h + req->src.y_offset,
\r
361 if(SrcMemSize == 0) {
\r
365 /* cal dst buf mmu info */
\r
366 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
367 req->dst.format, req->dst.vir_w, req->dst.act_h + req->dst.y_offset,
\r
369 if(DstMemSize == 0) {
\r
373 /* Cal out the needed mem size */
\r
374 AllSize = SrcMemSize + DstMemSize;
\r
376 pages = kmalloc((AllSize + 1)* sizeof(struct page *), GFP_KERNEL);
\r
377 if(pages == NULL) {
\r
378 pr_err("RGA MMU malloc pages mem failed\n");
\r
379 status = RGA_MALLOC_ERROR;
\r
383 MMU_Base = kmalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);
\r
384 if(MMU_Base == NULL) {
\r
385 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
386 status = RGA_MALLOC_ERROR;
\r
390 if(req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
392 ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);
\r
394 pr_err("rga map src memory failed\n");
\r
403 if(req->src.yrgb_addr == (uint32_t)rga_service.pre_scale_buf)
\r
405 /* Down scale ratio over 2, Last prc */
\r
406 /* MMU table copy from pre scale table */
\r
408 for(i=0; i<SrcMemSize; i++)
\r
410 MMU_p[i] = rga_service.pre_scale_buf[i];
\r
415 for(i=0; i<SrcMemSize; i++)
\r
417 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
422 if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)
\r
425 ktime_t start, end;
\r
426 start = ktime_get();
\r
428 ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);
\r
430 pr_err("rga map dst memory failed\n");
\r
437 end = ktime_sub(end, start);
\r
438 printk("dst mmu map time = %d\n", (int)ktime_to_us(end));
\r
443 MMU_p = MMU_Base + SrcMemSize;
\r
445 for(i=0; i<DstMemSize; i++)
\r
447 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
\r
451 MMU_Base[AllSize] = MMU_Base[AllSize - 1];
\r
454 * change the buf address in req struct
\r
457 req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);
\r
459 uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
460 v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
462 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));
\r
463 req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT);
\r
464 req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT);
\r
466 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | (SrcMemSize << PAGE_SHIFT);
\r
468 /*record the malloc buf for the cmd end to release*/
\r
469 reg->MMU_base = MMU_Base;
\r
471 /* flush data to DDR */
\r
472 dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
473 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
\r
477 /* Free the page table */
\r
478 if (pages != NULL) {
\r
487 /* Free the page table */
\r
488 if (pages != NULL) {
\r
492 /* Free MMU table */
\r
493 if(MMU_Base != NULL) {
\r
500 static int rga_mmu_info_color_palette_mode(struct rga_reg *reg, struct rga_req *req)
\r
502 int SrcMemSize, DstMemSize, CMDMemSize;
\r
503 uint32_t SrcStart, DstStart, CMDStart;
\r
504 struct page **pages = NULL;
\r
507 uint32_t *MMU_Base = NULL;
\r
513 uint16_t sw, byte_num;
\r
515 shift = 3 - (req->palette_mode & 3);
\r
516 sw = req->src.vir_w;
\r
517 byte_num = sw >> shift;
\r
518 stride = (byte_num + 3) & (~3);
\r
523 SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, stride, &SrcStart);
\r
524 if(SrcMemSize == 0) {
\r
528 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
529 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
531 if(DstMemSize == 0) {
\r
535 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
536 if(CMDMemSize == 0) {
\r
540 AllSize = SrcMemSize + DstMemSize + CMDMemSize;
\r
542 pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
543 if(pages == NULL) {
\r
544 pr_err("RGA MMU malloc pages mem failed\n");
\r
548 MMU_Base = kzalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
549 if(MMU_Base == NULL) {
\r
550 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
555 for(i=0; i<CMDMemSize; i++)
\r
557 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i)<<PAGE_SHIFT));
\r
561 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
563 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
566 pr_err("rga map src memory failed\n");
\r
573 MMU_p = MMU_Base + CMDMemSize;
\r
575 for(i=0; i<SrcMemSize; i++)
\r
577 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
582 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
584 ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);
\r
587 pr_err("rga map dst memory failed\n");
\r
594 MMU_p = MMU_Base + CMDMemSize + SrcMemSize;
\r
596 for(i=0; i<DstMemSize; i++)
\r
598 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
\r
604 * change the buf address in req struct
\r
605 * for the reason of lie to MMU
\r
607 req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);
\r
608 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
609 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);
\r
612 /*record the malloc buf for the cmd end to release*/
\r
613 reg->MMU_base = MMU_Base;
\r
615 /* flush data to DDR */
\r
616 dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
617 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
\r
619 /* Free the page table */
\r
620 if (pages != NULL) {
\r
629 /* Free the page table */
\r
630 if (pages != NULL) {
\r
634 /* Free mmu table */
\r
635 if (MMU_Base != NULL) {
\r
642 static int rga_mmu_info_color_fill_mode(struct rga_reg *reg, struct rga_req *req)
\r
646 struct page **pages = NULL;
\r
649 uint32_t *MMU_Base, *MMU_p;
\r
657 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
658 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
660 if(DstMemSize == 0) {
\r
664 AllSize = DstMemSize;
\r
666 pages = kzalloc((AllSize + 1)* sizeof(struct page *), GFP_KERNEL);
\r
667 if(pages == NULL) {
\r
668 pr_err("RGA MMU malloc pages mem failed\n");
\r
669 status = RGA_MALLOC_ERROR;
\r
673 MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);
\r
674 if(pages == NULL) {
\r
675 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
676 status = RGA_MALLOC_ERROR;
\r
680 if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)
\r
682 ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], DstStart, DstMemSize);
\r
684 pr_err("rga map dst memory failed\n");
\r
693 for(i=0; i<DstMemSize; i++)
\r
695 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
\r
699 MMU_Base[AllSize] = MMU_Base[AllSize - 1];
\r
702 * change the buf address in req struct
\r
705 req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);
\r
706 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));
\r
708 /*record the malloc buf for the cmd end to release*/
\r
709 reg->MMU_base = MMU_Base;
\r
711 /* flush data to DDR */
\r
712 dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
713 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
\r
715 /* Free the page table */
\r
716 if (pages != NULL)
\r
723 if (pages != NULL)
\r
726 if (MMU_Base != NULL)
\r
733 static int rga_mmu_info_line_point_drawing_mode(struct rga_reg *reg, struct rga_req *req)
\r
737 struct page **pages = NULL;
\r
740 uint32_t *MMU_Base, *MMU_p;
\r
747 /* cal dst buf mmu info */
\r
748 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
749 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
751 if(DstMemSize == 0) {
\r
755 AllSize = DstMemSize;
\r
757 pages = kzalloc((AllSize + 1) * sizeof(struct page *), GFP_KERNEL);
\r
758 if(pages == NULL) {
\r
759 pr_err("RGA MMU malloc pages mem failed\n");
\r
760 status = RGA_MALLOC_ERROR;
\r
764 MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);
\r
765 if(pages == NULL) {
\r
766 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
767 status = RGA_MALLOC_ERROR;
\r
771 if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)
\r
773 ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], DstStart, DstMemSize);
\r
775 pr_err("rga map dst memory failed\n");
\r
784 for(i=0; i<DstMemSize; i++)
\r
786 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
\r
791 * change the buf address in req struct
\r
792 * for the reason of lie to MMU
\r
794 req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);
\r
795 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));
\r
798 /*record the malloc buf for the cmd end to release*/
\r
799 reg->MMU_base = MMU_Base;
\r
801 /* flush data to DDR */
\r
802 dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
803 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
\r
805 /* Free the page table */
\r
806 if (pages != NULL) {
\r
818 if (MMU_Base != NULL)
\r
824 static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_req *req)
\r
826 int SrcMemSize, DstMemSize;
\r
827 uint32_t SrcStart, DstStart;
\r
828 struct page **pages = NULL;
\r
831 uint32_t *MMU_Base, *MMU_p;
\r
833 uint32_t uv_size, v_size;
\r
839 /* cal src buf mmu info */
\r
840 SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,
\r
841 req->src.format, req->src.vir_w, req->src.vir_h,
\r
843 if(SrcMemSize == 0) {
\r
847 /* cal dst buf mmu info */
\r
848 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
849 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
851 if(DstMemSize == 0) {
\r
855 AllSize = SrcMemSize + DstMemSize;
\r
857 pages = kzalloc((AllSize + 1) * sizeof(struct page *), GFP_KERNEL);
\r
858 if(pages == NULL) {
\r
859 pr_err("RGA MMU malloc pages mem failed\n");
\r
860 status = RGA_MALLOC_ERROR;
\r
864 MMU_Base = kzalloc((AllSize + 1)* sizeof(uint32_t), GFP_KERNEL);
\r
865 if(pages == NULL) {
\r
866 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
867 status = RGA_MALLOC_ERROR;
\r
871 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
873 ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);
\r
876 pr_err("rga map src memory failed\n");
\r
885 for(i=0; i<SrcMemSize; i++)
\r
887 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
892 if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)
\r
894 ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);
\r
897 pr_err("rga map dst memory failed\n");
\r
904 MMU_p = MMU_Base + SrcMemSize;
\r
906 for(i=0; i<DstMemSize; i++)
\r
908 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
\r
912 MMU_Base[AllSize] = MMU_Base[AllSize - 1];
\r
915 * change the buf address in req struct
\r
916 * for the reason of lie to MMU
\r
918 req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);
\r
920 uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
921 v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
923 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));
\r
924 req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT);
\r
925 req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT);
\r
927 uv_size = (req->dst.uv_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
928 v_size = (req->dst.v_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
930 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | (SrcMemSize << PAGE_SHIFT);
\r
931 req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((SrcMemSize + uv_size) << PAGE_SHIFT);
\r
932 req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) | ((SrcMemSize + v_size) << PAGE_SHIFT);
\r
935 /*record the malloc buf for the cmd end to release*/
\r
936 reg->MMU_base = MMU_Base;
\r
938 /* flush data to DDR */
\r
939 dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
940 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
\r
942 /* Free the page table */
\r
943 if (pages != NULL) {
\r
954 if (MMU_Base != NULL)
\r
962 static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req)
\r
964 int SrcMemSize, DstMemSize;
\r
965 uint32_t SrcStart, DstStart;
\r
966 struct page **pages = NULL;
\r
969 uint32_t *MMU_Base, *MMU_p;
\r
972 uint32_t uv_size, v_size;
\r
978 /* cal src buf mmu info */
\r
979 SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,
\r
980 req->src.format, req->src.vir_w, req->src.vir_h,
\r
982 if(SrcMemSize == 0) {
\r
986 /* cal dst buf mmu info */
\r
987 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
988 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
990 if(DstMemSize == 0) {
\r
994 AllSize = SrcMemSize + DstMemSize;
\r
996 pages = kzalloc((AllSize)* sizeof(struct page *), GFP_KERNEL);
\r
999 pr_err("RGA MMU malloc pages mem failed\n");
\r
1000 status = RGA_MALLOC_ERROR;
\r
1005 * Allocate MMU Index mem
\r
1006 * This mem release in run_to_done fun
\r
1008 MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);
\r
1009 if(pages == NULL) {
\r
1010 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
1011 status = RGA_MALLOC_ERROR;
\r
1015 /* map src pages */
\r
1016 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
1018 ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);
\r
1020 pr_err("rga map src memory failed\n");
\r
1029 for(i=0; i<SrcMemSize; i++)
\r
1031 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
1036 if(req->dst.yrgb_addr >= KERNEL_SPACE_VALID)
\r
1038 /* kernel space */
\r
1039 MMU_p = MMU_Base + SrcMemSize;
\r
1041 if(req->dst.yrgb_addr == (uint32_t)rga_service.pre_scale_buf)
\r
1043 for(i=0; i<DstMemSize; i++)
\r
1045 MMU_p[i] = rga_service.pre_scale_buf[i];
\r
1050 for(i=0; i<DstMemSize; i++)
\r
1052 MMU_p[i] = virt_to_phys((uint32_t *)((DstStart + i)<< PAGE_SHIFT));
\r
1059 ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);
\r
1062 pr_err("rga map dst memory failed\n");
\r
1068 MMU_Base[AllSize] = MMU_Base[AllSize - 1];
\r
1071 * change the buf address in req struct
\r
1072 * for the reason of lie to MMU
\r
1075 req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);
\r
1077 uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
1078 v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
1080 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));
\r
1081 req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT);
\r
1082 req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT);
\r
1084 uv_size = (req->dst.uv_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
1085 v_size = (req->dst.v_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
1087 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((SrcMemSize) << PAGE_SHIFT);
\r
1088 req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((SrcMemSize + uv_size) << PAGE_SHIFT);
\r
1089 req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) | ((SrcMemSize + v_size) << PAGE_SHIFT);
\r
1091 /*record the malloc buf for the cmd end to release*/
\r
1092 reg->MMU_base = MMU_Base;
\r
1094 /* flush data to DDR */
\r
1095 dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
1096 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
\r
1098 /* Free the page table */
\r
1099 if (pages != NULL)
\r
1108 if (pages != NULL)
\r
1111 if (MMU_Base != NULL)
\r
1118 static int rga_mmu_info_update_palette_table_mode(struct rga_reg *reg, struct rga_req *req)
\r
1120 int SrcMemSize, CMDMemSize;
\r
1121 uint32_t SrcStart, CMDStart;
\r
1122 struct page **pages = NULL;
\r
1125 uint32_t *MMU_Base, *MMU_p;
\r
1132 /* cal src buf mmu info */
\r
1133 SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, req->src.vir_w * req->src.vir_h, &SrcStart);
\r
1134 if(SrcMemSize == 0) {
\r
1138 /* cal cmd buf mmu info */
\r
1139 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
1140 if(CMDMemSize == 0) {
\r
1144 AllSize = SrcMemSize + CMDMemSize;
\r
1146 pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
1147 if(pages == NULL) {
\r
1148 pr_err("RGA MMU malloc pages mem failed\n");
\r
1149 status = RGA_MALLOC_ERROR;
\r
1153 MMU_Base = kzalloc((AllSize + 1)* sizeof(uint32_t), GFP_KERNEL);
\r
1154 if(pages == NULL) {
\r
1155 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
1156 status = RGA_MALLOC_ERROR;
\r
1160 for(i=0; i<CMDMemSize; i++) {
\r
1161 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));
\r
1164 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
1166 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
1168 pr_err("rga map src memory failed\n");
\r
1174 MMU_p = MMU_Base + CMDMemSize;
\r
1176 for(i=0; i<SrcMemSize; i++)
\r
1178 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
1183 * change the buf address in req struct
\r
1184 * for the reason of lie to MMU
\r
1186 req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);
\r
1188 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
1190 /*record the malloc buf for the cmd end to release*/
\r
1191 reg->MMU_base = MMU_Base;
\r
1193 /* flush data to DDR */
\r
1194 dmac_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
1195 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));
\r
1197 if (pages != NULL) {
\r
1198 /* Free the page table */
\r
1206 if (pages != NULL)
\r
1209 if (MMU_Base != NULL)
\r
1215 static int rga_mmu_info_update_patten_buff_mode(struct rga_reg *reg, struct rga_req *req)
\r
1217 int SrcMemSize, CMDMemSize;
\r
1218 uint32_t SrcStart, CMDStart;
\r
1219 struct page **pages = NULL;
\r
1222 uint32_t *MMU_Base, *MMU_p;
\r
1225 MMU_Base = MMU_p = 0;
\r
1230 /* cal src buf mmu info */
\r
1231 SrcMemSize = rga_mem_size_cal(req->pat.yrgb_addr, req->pat.vir_w * req->pat.vir_h * 4, &SrcStart);
\r
1232 if(SrcMemSize == 0) {
\r
1236 /* cal cmd buf mmu info */
\r
1237 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
1238 if(CMDMemSize == 0) {
\r
1242 AllSize = SrcMemSize + CMDMemSize;
\r
1244 pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
1245 if(pages == NULL) {
\r
1246 pr_err("RGA MMU malloc pages mem failed\n");
\r
1247 status = RGA_MALLOC_ERROR;
\r
1251 MMU_Base = kzalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
1252 if(pages == NULL) {
\r
1253 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
1254 status = RGA_MALLOC_ERROR;
\r
1258 for(i=0; i<CMDMemSize; i++) {
\r
1259 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));
\r
1262 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
1264 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
1266 pr_err("rga map src memory failed\n");
\r
1273 MMU_p = MMU_Base + CMDMemSize;
\r
1275 for(i=0; i<SrcMemSize; i++)
\r
1277 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
1282 * change the buf address in req struct
\r
1283 * for the reason of lie to MMU
\r
1285 req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);
\r
1287 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
1289 /*record the malloc buf for the cmd end to release*/
\r
1290 reg->MMU_base = MMU_Base;
\r
1292 /* flush data to DDR */
\r
1293 dmac_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
1294 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));
\r
1296 if (pages != NULL) {
\r
1297 /* Free the page table */
\r
1306 if (pages != NULL)
\r
1309 if (MMU_Base != NULL)
\r
1315 int rga_set_mmu_info(struct rga_reg *reg, struct rga_req *req)
\r
1319 switch (req->render_mode) {
\r
1320 case bitblt_mode :
\r
1321 ret = rga_mmu_info_BitBlt_mode(reg, req);
\r
1323 case color_palette_mode :
\r
1324 ret = rga_mmu_info_color_palette_mode(reg, req);
\r
1326 case color_fill_mode :
\r
1327 ret = rga_mmu_info_color_fill_mode(reg, req);
\r
1329 case line_point_drawing_mode :
\r
1330 ret = rga_mmu_info_line_point_drawing_mode(reg, req);
\r
1332 case blur_sharp_filter_mode :
\r
1333 ret = rga_mmu_info_blur_sharp_filter_mode(reg, req);
\r
1335 case pre_scaling_mode :
\r
1336 ret = rga_mmu_info_pre_scale_mode(reg, req);
\r
1338 case update_palette_table_mode :
\r
1339 ret = rga_mmu_info_update_palette_table_mode(reg, req);
\r
1341 case update_patten_buff_mode :
\r
1342 ret = rga_mmu_info_update_patten_buff_mode(reg, req);
\r