3 #include <linux/version.h>
\r
4 #include <linux/init.h>
\r
5 #include <linux/module.h>
\r
6 #include <linux/fs.h>
\r
7 #include <linux/sched.h>
\r
8 #include <linux/signal.h>
\r
9 #include <linux/pagemap.h>
\r
10 #include <linux/seq_file.h>
\r
11 #include <linux/mm.h>
\r
12 #include <linux/mman.h>
\r
13 #include <linux/sched.h>
\r
14 #include <linux/slab.h>
\r
15 #include <linux/memory.h>
\r
16 #include <linux/dma-mapping.h>
\r
17 #include <asm/memory.h>
\r
18 #include <asm/atomic.h>
\r
19 #include <asm/cacheflush.h>
\r
20 #include "rga_mmu_info.h"
\r
22 extern rga_service_info rga_service;
\r
23 //extern int mmu_buff_temp[1024];
\r
25 #define KERNEL_SPACE_VALID 0xc0000000
\r
27 #define V7_VATOPA_SUCESS_MASK (0x1)
\r
28 #define V7_VATOPA_GET_PADDR(X) (X & 0xFFFFF000)
\r
29 #define V7_VATOPA_GET_INER(X) ((X>>4) & 7)
\r
30 #define V7_VATOPA_GET_OUTER(X) ((X>>2) & 3)
\r
31 #define V7_VATOPA_GET_SH(X) ((X>>7) & 1)
\r
32 #define V7_VATOPA_GET_NS(X) ((X>>9) & 1)
\r
33 #define V7_VATOPA_GET_SS(X) ((X>>1) & 1)
\r
35 static unsigned int armv7_va_to_pa(unsigned int v_addr)
\r
37 unsigned int p_addr;
\r
38 __asm__ volatile ( "mcr p15, 0, %1, c7, c8, 0\n"
\r
41 "mrc p15, 0, %0, c7, c4, 0\n"
\r
46 if (p_addr & V7_VATOPA_SUCESS_MASK)
\r
49 return (V7_VATOPA_GET_SS(p_addr) ? 0xFFFFFFFF : V7_VATOPA_GET_PADDR(p_addr));
\r
52 static int rga_mem_size_cal(uint32_t Mem, uint32_t MemSize, uint32_t *StartAddr)
\r
54 uint32_t start, end;
\r
57 end = (Mem + (MemSize + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
58 start = Mem >> PAGE_SHIFT;
\r
59 pageCount = end - start;
\r
64 static int rga_buf_size_cal(uint32_t yrgb_addr, uint32_t uv_addr, uint32_t v_addr,
\r
65 int format, uint32_t w, uint32_t h, uint32_t *StartAddr )
\r
67 uint32_t size_yrgb = 0;
\r
68 uint32_t size_uv = 0;
\r
69 uint32_t size_v = 0;
\r
70 uint32_t stride = 0;
\r
71 uint32_t start, end;
\r
76 case RK_FORMAT_RGBA_8888 :
\r
77 stride = (w * 4 + 3) & (~3);
\r
78 size_yrgb = stride*h;
\r
79 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
80 start = yrgb_addr >> PAGE_SHIFT;
\r
81 pageCount = end - start;
\r
83 case RK_FORMAT_RGBX_8888 :
\r
84 stride = (w * 4 + 3) & (~3);
\r
85 size_yrgb = stride*h;
\r
86 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
87 start = yrgb_addr >> PAGE_SHIFT;
\r
88 pageCount = end - start;
\r
90 case RK_FORMAT_RGB_888 :
\r
91 stride = (w * 3 + 3) & (~3);
\r
92 size_yrgb = stride*h;
\r
93 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
94 start = yrgb_addr >> PAGE_SHIFT;
\r
95 pageCount = end - start;
\r
97 case RK_FORMAT_BGRA_8888 :
\r
99 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
100 start = yrgb_addr >> PAGE_SHIFT;
\r
101 pageCount = end - start;
\r
103 case RK_FORMAT_RGB_565 :
\r
104 stride = (w*2 + 3) & (~3);
\r
105 size_yrgb = stride * h;
\r
106 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
107 start = yrgb_addr >> PAGE_SHIFT;
\r
108 pageCount = end - start;
\r
110 case RK_FORMAT_RGBA_5551 :
\r
111 stride = (w*2 + 3) & (~3);
\r
112 size_yrgb = stride * h;
\r
113 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
114 start = yrgb_addr >> PAGE_SHIFT;
\r
115 pageCount = end - start;
\r
117 case RK_FORMAT_RGBA_4444 :
\r
118 stride = (w*2 + 3) & (~3);
\r
119 size_yrgb = stride * h;
\r
120 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
121 start = yrgb_addr >> PAGE_SHIFT;
\r
122 pageCount = end - start;
\r
124 case RK_FORMAT_BGR_888 :
\r
125 stride = (w*3 + 3) & (~3);
\r
126 size_yrgb = stride * h;
\r
127 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
128 start = yrgb_addr >> PAGE_SHIFT;
\r
129 pageCount = end - start;
\r
133 case RK_FORMAT_YCbCr_422_SP :
\r
134 stride = (w + 3) & (~3);
\r
135 size_yrgb = stride * h;
\r
136 size_uv = stride * h;
\r
137 start = MIN(yrgb_addr, uv_addr);
\r
138 start >>= PAGE_SHIFT;
\r
139 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
140 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
141 pageCount = end - start;
\r
143 case RK_FORMAT_YCbCr_422_P :
\r
144 stride = (w + 3) & (~3);
\r
145 size_yrgb = stride * h;
\r
146 size_uv = ((stride >> 1) * h);
\r
147 size_v = ((stride >> 1) * h);
\r
148 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
149 start = start >> PAGE_SHIFT;
\r
150 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
151 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
152 pageCount = end - start;
\r
154 case RK_FORMAT_YCbCr_420_SP :
\r
155 stride = (w + 3) & (~3);
\r
156 size_yrgb = stride * h;
\r
157 size_uv = (stride * (h >> 1));
\r
158 start = MIN(yrgb_addr, uv_addr);
\r
159 start >>= PAGE_SHIFT;
\r
160 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
161 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
162 pageCount = end - start;
\r
164 case RK_FORMAT_YCbCr_420_P :
\r
165 stride = (w + 3) & (~3);
\r
166 size_yrgb = stride * h;
\r
167 size_uv = ((stride >> 1) * (h >> 1));
\r
168 size_v = ((stride >> 1) * (h >> 1));
\r
169 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
170 start >>= PAGE_SHIFT;
\r
171 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
172 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
173 pageCount = end - start;
\r
176 case RK_FORMAT_YCrCb_422_SP :
\r
177 stride = (w + 3) & (~3);
\r
178 size_yrgb = stride * h;
\r
179 size_uv = stride * h;
\r
180 start = MIN(yrgb_addr, uv_addr);
\r
181 start >>= PAGE_SHIFT;
\r
182 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
183 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
184 pageCount = end - start;
\r
186 case RK_FORMAT_YCrCb_422_P :
\r
187 stride = (w + 3) & (~3);
\r
188 size_yrgb = stride * h;
\r
189 size_uv = ((stride >> 1) * h);
\r
190 size_v = ((stride >> 1) * h);
\r
191 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
192 start >>= PAGE_SHIFT;
\r
193 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
194 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
195 pageCount = end - start;
\r
198 case RK_FORMAT_YCrCb_420_SP :
\r
199 stride = (w + 3) & (~3);
\r
200 size_yrgb = stride * h;
\r
201 size_uv = (stride * (h >> 1));
\r
202 start = MIN(yrgb_addr, uv_addr);
\r
203 start >>= PAGE_SHIFT;
\r
204 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
205 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
206 pageCount = end - start;
\r
208 case RK_FORMAT_YCrCb_420_P :
\r
209 stride = (w + 3) & (~3);
\r
210 size_yrgb = stride * h;
\r
211 size_uv = ((stride >> 1) * (h >> 1));
\r
212 size_v = ((stride >> 1) * (h >> 1));
\r
213 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
214 start >>= PAGE_SHIFT;
\r
215 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
216 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
217 pageCount = end - start;
\r
220 case RK_FORMAT_BPP1 :
\r
222 case RK_FORMAT_BPP2 :
\r
224 case RK_FORMAT_BPP4 :
\r
226 case RK_FORMAT_BPP8 :
\r
235 *StartAddr = start;
\r
239 static int rga_MapUserMemory(struct page **pages,
\r
240 uint32_t *pageTable,
\r
242 uint32_t pageCount)
\r
254 down_read(¤t->mm->mmap_sem);
\r
255 result = get_user_pages(current,
\r
257 Memory << PAGE_SHIFT,
\r
264 up_read(¤t->mm->mmap_sem);
\r
266 if(result <= 0 || result < pageCount)
\r
270 for(i=0; i<pageCount; i++)
\r
272 temp = armv7_va_to_pa((Memory + i) << PAGE_SHIFT);
\r
273 if (temp == 0xffffffff)
\r
275 printk("rga find mmu phy ddr error\n ");
\r
276 status = RGA_OUT_OF_RESOURCES;
\r
280 pageTable[i] = temp;
\r
286 for (i = 0; i < pageCount; i++)
\r
288 /* Flush the data cache. */
\r
290 dma_sync_single_for_device(
\r
292 page_to_phys(pages[i]),
\r
296 flush_dcache_page(pages[i]);
\r
300 /* Fill the page table. */
\r
301 for(i=0; i<pageCount; i++)
\r
303 /* Get the physical address from page struct. */
\r
304 pageTable[i] = page_to_phys(pages[i]);
\r
311 if (rgaIS_ERROR(status))
\r
313 /* Release page array. */
\r
314 if (result > 0 && pages != NULL)
\r
316 for (i = 0; i < result; i++)
\r
318 if (pages[i] == NULL)
\r
323 dma_sync_single_for_device(
\r
325 page_to_phys(pages[i]),
\r
329 page_cache_release(pages[i]);
\r
337 static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req)
\r
339 int SrcMemSize, DstMemSize;
\r
340 uint32_t SrcStart, DstStart;
\r
343 uint32_t *MMU_Base, *MMU_p;
\r
346 uint32_t uv_size, v_size;
\r
348 struct page **pages = NULL;
\r
354 /* cal src buf mmu info */
\r
355 SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,
\r
356 req->src.format, req->src.vir_w, req->src.act_h + req->src.y_offset,
\r
358 if(SrcMemSize == 0) {
\r
362 /* cal dst buf mmu info */
\r
363 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
364 req->dst.format, req->dst.vir_w, req->dst.act_h + req->dst.y_offset,
\r
366 if(DstMemSize == 0) {
\r
370 /* Cal out the needed mem size */
\r
371 AllSize = SrcMemSize + DstMemSize;
\r
373 pages = kmalloc((AllSize + 1)* sizeof(struct page *), GFP_KERNEL);
\r
374 if(pages == NULL) {
\r
375 pr_err("RGA MMU malloc pages mem failed\n");
\r
376 status = RGA_MALLOC_ERROR;
\r
380 MMU_Base = kmalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);
\r
381 if(MMU_Base == NULL) {
\r
382 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
383 status = RGA_MALLOC_ERROR;
\r
387 if(req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
389 ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);
\r
391 pr_err("rga map src memory failed\n");
\r
400 if(req->src.yrgb_addr == (uint32_t)rga_service.pre_scale_buf)
\r
402 /* Down scale ratio over 2, Last prc */
\r
403 /* MMU table copy from pre scale table */
\r
405 for(i=0; i<SrcMemSize; i++)
\r
407 MMU_p[i] = rga_service.pre_scale_buf[i];
\r
412 for(i=0; i<SrcMemSize; i++)
\r
414 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
419 if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)
\r
422 ktime_t start, end;
\r
423 start = ktime_get();
\r
425 ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);
\r
427 pr_err("rga map dst memory failed\n");
\r
434 end = ktime_sub(end, start);
\r
435 printk("dst mmu map time = %d\n", (int)ktime_to_us(end));
\r
440 MMU_p = MMU_Base + SrcMemSize;
\r
442 for(i=0; i<DstMemSize; i++)
\r
444 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
\r
448 MMU_Base[AllSize] = MMU_Base[AllSize - 1];
\r
451 * change the buf address in req struct
\r
454 req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);
\r
456 uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
457 v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
459 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));
\r
460 req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT);
\r
461 req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT);
\r
463 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | (SrcMemSize << PAGE_SHIFT);
\r
465 /*record the malloc buf for the cmd end to release*/
\r
466 reg->MMU_base = MMU_Base;
\r
468 /* flush data to DDR */
\r
469 dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
470 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
\r
474 /* Free the page table */
\r
475 if (pages != NULL) {
\r
484 /* Free the page table */
\r
485 if (pages != NULL) {
\r
489 /* Free MMU table */
\r
490 if(MMU_Base != NULL) {
\r
497 static int rga_mmu_info_color_palette_mode(struct rga_reg *reg, struct rga_req *req)
\r
499 int SrcMemSize, DstMemSize, CMDMemSize;
\r
500 uint32_t SrcStart, DstStart, CMDStart;
\r
501 struct page **pages = NULL;
\r
504 uint32_t *MMU_Base = NULL;
\r
510 uint16_t sw, byte_num;
\r
512 shift = 3 - (req->palette_mode & 3);
\r
513 sw = req->src.vir_w;
\r
514 byte_num = sw >> shift;
\r
515 stride = (byte_num + 3) & (~3);
\r
520 SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, stride, &SrcStart);
\r
521 if(SrcMemSize == 0) {
\r
525 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
526 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
528 if(DstMemSize == 0) {
\r
532 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
533 if(CMDMemSize == 0) {
\r
537 AllSize = SrcMemSize + DstMemSize + CMDMemSize;
\r
539 pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
540 if(pages == NULL) {
\r
541 pr_err("RGA MMU malloc pages mem failed\n");
\r
545 MMU_Base = kzalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
546 if(MMU_Base == NULL) {
\r
547 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
552 for(i=0; i<CMDMemSize; i++)
\r
554 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i)<<PAGE_SHIFT));
\r
558 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
560 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
563 pr_err("rga map src memory failed\n");
\r
570 MMU_p = MMU_Base + CMDMemSize;
\r
572 for(i=0; i<SrcMemSize; i++)
\r
574 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
579 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
581 ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);
\r
584 pr_err("rga map dst memory failed\n");
\r
591 MMU_p = MMU_Base + CMDMemSize + SrcMemSize;
\r
593 for(i=0; i<DstMemSize; i++)
\r
595 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
\r
601 * change the buf address in req struct
\r
602 * for the reason of lie to MMU
\r
604 req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);
\r
605 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
606 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);
\r
609 /*record the malloc buf for the cmd end to release*/
\r
610 reg->MMU_base = MMU_Base;
\r
612 /* flush data to DDR */
\r
613 dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
614 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
\r
616 /* Free the page table */
\r
617 if (pages != NULL) {
\r
626 /* Free the page table */
\r
627 if (pages != NULL) {
\r
631 /* Free mmu table */
\r
632 if (MMU_Base != NULL) {
\r
639 static int rga_mmu_info_color_fill_mode(struct rga_reg *reg, struct rga_req *req)
\r
643 struct page **pages = NULL;
\r
646 uint32_t *MMU_Base, *MMU_p;
\r
654 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
655 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
657 if(DstMemSize == 0) {
\r
661 AllSize = DstMemSize;
\r
663 pages = kzalloc((AllSize + 1)* sizeof(struct page *), GFP_KERNEL);
\r
664 if(pages == NULL) {
\r
665 pr_err("RGA MMU malloc pages mem failed\n");
\r
666 status = RGA_MALLOC_ERROR;
\r
670 MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);
\r
671 if(pages == NULL) {
\r
672 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
673 status = RGA_MALLOC_ERROR;
\r
677 if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)
\r
679 ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], DstStart, DstMemSize);
\r
681 pr_err("rga map dst memory failed\n");
\r
690 for(i=0; i<DstMemSize; i++)
\r
692 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
\r
696 MMU_Base[AllSize] = MMU_Base[AllSize - 1];
\r
699 * change the buf address in req struct
\r
702 req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);
\r
703 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));
\r
705 /*record the malloc buf for the cmd end to release*/
\r
706 reg->MMU_base = MMU_Base;
\r
708 /* flush data to DDR */
\r
709 dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
710 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
\r
712 /* Free the page table */
\r
713 if (pages != NULL)
\r
720 if (pages != NULL)
\r
723 if (MMU_Base != NULL)
\r
730 static int rga_mmu_info_line_point_drawing_mode(struct rga_reg *reg, struct rga_req *req)
\r
734 struct page **pages = NULL;
\r
737 uint32_t *MMU_Base, *MMU_p;
\r
744 /* cal dst buf mmu info */
\r
745 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
746 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
748 if(DstMemSize == 0) {
\r
752 AllSize = DstMemSize;
\r
754 pages = kzalloc((AllSize + 1) * sizeof(struct page *), GFP_KERNEL);
\r
755 if(pages == NULL) {
\r
756 pr_err("RGA MMU malloc pages mem failed\n");
\r
757 status = RGA_MALLOC_ERROR;
\r
761 MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);
\r
762 if(pages == NULL) {
\r
763 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
764 status = RGA_MALLOC_ERROR;
\r
768 if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)
\r
770 ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], DstStart, DstMemSize);
\r
772 pr_err("rga map dst memory failed\n");
\r
781 for(i=0; i<DstMemSize; i++)
\r
783 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
\r
788 * change the buf address in req struct
\r
789 * for the reason of lie to MMU
\r
791 req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);
\r
792 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));
\r
795 /*record the malloc buf for the cmd end to release*/
\r
796 reg->MMU_base = MMU_Base;
\r
798 /* flush data to DDR */
\r
799 dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
800 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
\r
802 /* Free the page table */
\r
803 if (pages != NULL) {
\r
815 if (MMU_Base != NULL)
\r
821 static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_req *req)
\r
823 int SrcMemSize, DstMemSize;
\r
824 uint32_t SrcStart, DstStart;
\r
825 struct page **pages = NULL;
\r
828 uint32_t *MMU_Base, *MMU_p;
\r
830 uint32_t uv_size, v_size;
\r
836 /* cal src buf mmu info */
\r
837 SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,
\r
838 req->src.format, req->src.vir_w, req->src.vir_h,
\r
840 if(SrcMemSize == 0) {
\r
844 /* cal dst buf mmu info */
\r
845 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
846 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
848 if(DstMemSize == 0) {
\r
852 AllSize = SrcMemSize + DstMemSize;
\r
854 pages = kzalloc((AllSize + 1) * sizeof(struct page *), GFP_KERNEL);
\r
855 if(pages == NULL) {
\r
856 pr_err("RGA MMU malloc pages mem failed\n");
\r
857 status = RGA_MALLOC_ERROR;
\r
861 MMU_Base = kzalloc((AllSize + 1)* sizeof(uint32_t), GFP_KERNEL);
\r
862 if(pages == NULL) {
\r
863 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
864 status = RGA_MALLOC_ERROR;
\r
868 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
870 ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);
\r
873 pr_err("rga map src memory failed\n");
\r
882 for(i=0; i<SrcMemSize; i++)
\r
884 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
889 if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)
\r
891 ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);
\r
894 pr_err("rga map dst memory failed\n");
\r
901 MMU_p = MMU_Base + SrcMemSize;
\r
903 for(i=0; i<DstMemSize; i++)
\r
905 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
\r
909 MMU_Base[AllSize] = MMU_Base[AllSize - 1];
\r
912 * change the buf address in req struct
\r
913 * for the reason of lie to MMU
\r
915 req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);
\r
917 uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
918 v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
920 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));
\r
921 req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT);
\r
922 req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT);
\r
924 uv_size = (req->dst.uv_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
925 v_size = (req->dst.v_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
927 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | (SrcMemSize << PAGE_SHIFT);
\r
928 req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((SrcMemSize + uv_size) << PAGE_SHIFT);
\r
929 req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) | ((SrcMemSize + v_size) << PAGE_SHIFT);
\r
932 /*record the malloc buf for the cmd end to release*/
\r
933 reg->MMU_base = MMU_Base;
\r
935 /* flush data to DDR */
\r
936 dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
937 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
\r
939 /* Free the page table */
\r
940 if (pages != NULL) {
\r
951 if (MMU_Base != NULL)
\r
959 static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req)
\r
961 int SrcMemSize, DstMemSize;
\r
962 uint32_t SrcStart, DstStart;
\r
963 struct page **pages = NULL;
\r
966 uint32_t *MMU_Base, *MMU_p;
\r
969 uint32_t uv_size, v_size;
\r
975 /* cal src buf mmu info */
\r
976 SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,
\r
977 req->src.format, req->src.vir_w, req->src.vir_h,
\r
979 if(SrcMemSize == 0) {
\r
983 /* cal dst buf mmu info */
\r
984 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
985 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
987 if(DstMemSize == 0) {
\r
991 AllSize = SrcMemSize + DstMemSize;
\r
993 pages = kzalloc((AllSize)* sizeof(struct page *), GFP_KERNEL);
\r
996 pr_err("RGA MMU malloc pages mem failed\n");
\r
997 status = RGA_MALLOC_ERROR;
\r
1002 * Allocate MMU Index mem
\r
1003 * This mem release in run_to_done fun
\r
1005 MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);
\r
1006 if(pages == NULL) {
\r
1007 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
1008 status = RGA_MALLOC_ERROR;
\r
1012 /* map src pages */
\r
1013 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
1015 ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);
\r
1017 pr_err("rga map src memory failed\n");
\r
1026 for(i=0; i<SrcMemSize; i++)
\r
1028 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
1033 if(req->dst.yrgb_addr >= KERNEL_SPACE_VALID)
\r
1035 /* kernel space */
\r
1036 MMU_p = MMU_Base + SrcMemSize;
\r
1038 if(req->dst.yrgb_addr == (uint32_t)rga_service.pre_scale_buf)
\r
1040 for(i=0; i<DstMemSize; i++)
\r
1042 MMU_p[i] = rga_service.pre_scale_buf[i];
\r
1047 for(i=0; i<DstMemSize; i++)
\r
1049 MMU_p[i] = virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
\r
1056 ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);
\r
1059 pr_err("rga map dst memory failed\n");
\r
1065 MMU_Base[AllSize] = MMU_Base[AllSize - 1];
\r
1068 * change the buf address in req struct
\r
1069 * for the reason of lie to MMU
\r
1072 req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);
\r
1074 uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
1075 v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
1077 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));
\r
1078 req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT);
\r
1079 req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT);
\r
1081 uv_size = (req->dst.uv_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
1082 v_size = (req->dst.v_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
1084 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((SrcMemSize) << PAGE_SHIFT);
\r
1085 req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((SrcMemSize + uv_size) << PAGE_SHIFT);
\r
1086 req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) | ((SrcMemSize + v_size) << PAGE_SHIFT);
\r
1088 /*record the malloc buf for the cmd end to release*/
\r
1089 reg->MMU_base = MMU_Base;
\r
1091 /* flush data to DDR */
\r
1092 dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
1093 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
\r
1095 /* Free the page table */
\r
1096 if (pages != NULL)
\r
1105 if (pages != NULL)
\r
1108 if (MMU_Base != NULL)
\r
1115 static int rga_mmu_info_update_palette_table_mode(struct rga_reg *reg, struct rga_req *req)
\r
1117 int SrcMemSize, CMDMemSize;
\r
1118 uint32_t SrcStart, CMDStart;
\r
1119 struct page **pages = NULL;
\r
1122 uint32_t *MMU_Base, *MMU_p;
\r
1129 /* cal src buf mmu info */
\r
1130 SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, req->src.vir_w * req->src.vir_h, &SrcStart);
\r
1131 if(SrcMemSize == 0) {
\r
1135 /* cal cmd buf mmu info */
\r
1136 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
1137 if(CMDMemSize == 0) {
\r
1141 AllSize = SrcMemSize + CMDMemSize;
\r
1143 pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
1144 if(pages == NULL) {
\r
1145 pr_err("RGA MMU malloc pages mem failed\n");
\r
1146 status = RGA_MALLOC_ERROR;
\r
1150 MMU_Base = kzalloc((AllSize + 1)* sizeof(uint32_t), GFP_KERNEL);
\r
1151 if(pages == NULL) {
\r
1152 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
1153 status = RGA_MALLOC_ERROR;
\r
1157 for(i=0; i<CMDMemSize; i++) {
\r
1158 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));
\r
1161 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
1163 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
1165 pr_err("rga map src memory failed\n");
\r
1171 MMU_p = MMU_Base + CMDMemSize;
\r
1173 for(i=0; i<SrcMemSize; i++)
\r
1175 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
1180 * change the buf address in req struct
\r
1181 * for the reason of lie to MMU
\r
1183 req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);
\r
1185 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
1187 /*record the malloc buf for the cmd end to release*/
\r
1188 reg->MMU_base = MMU_Base;
\r
1190 /* flush data to DDR */
\r
1191 dmac_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
1192 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));
\r
1194 if (pages != NULL) {
\r
1195 /* Free the page table */
\r
1203 if (pages != NULL)
\r
1206 if (MMU_Base != NULL)
\r
1212 static int rga_mmu_info_update_patten_buff_mode(struct rga_reg *reg, struct rga_req *req)
\r
1214 int SrcMemSize, CMDMemSize;
\r
1215 uint32_t SrcStart, CMDStart;
\r
1216 struct page **pages = NULL;
\r
1219 uint32_t *MMU_Base, *MMU_p;
\r
1222 MMU_Base = MMU_p = 0;
\r
1227 /* cal src buf mmu info */
\r
1228 SrcMemSize = rga_mem_size_cal(req->pat.yrgb_addr, req->pat.vir_w * req->pat.vir_h * 4, &SrcStart);
\r
1229 if(SrcMemSize == 0) {
\r
1233 /* cal cmd buf mmu info */
\r
1234 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
1235 if(CMDMemSize == 0) {
\r
1239 AllSize = SrcMemSize + CMDMemSize;
\r
1241 pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
1242 if(pages == NULL) {
\r
1243 pr_err("RGA MMU malloc pages mem failed\n");
\r
1244 status = RGA_MALLOC_ERROR;
\r
1248 MMU_Base = kzalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
1249 if(pages == NULL) {
\r
1250 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
1251 status = RGA_MALLOC_ERROR;
\r
1255 for(i=0; i<CMDMemSize; i++) {
\r
1256 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));
\r
1259 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
1261 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
1263 pr_err("rga map src memory failed\n");
\r
1270 MMU_p = MMU_Base + CMDMemSize;
\r
1272 for(i=0; i<SrcMemSize; i++)
\r
1274 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
1279 * change the buf address in req struct
\r
1280 * for the reason of lie to MMU
\r
1282 req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);
\r
1284 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
1286 /*record the malloc buf for the cmd end to release*/
\r
1287 reg->MMU_base = MMU_Base;
\r
1289 /* flush data to DDR */
\r
1290 dmac_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
1291 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));
\r
1293 if (pages != NULL) {
\r
1294 /* Free the page table */
\r
1303 if (pages != NULL)
\r
1306 if (MMU_Base != NULL)
\r
1312 int rga_set_mmu_info(struct rga_reg *reg, struct rga_req *req)
\r
1316 switch (req->render_mode) {
\r
1317 case bitblt_mode :
\r
1318 ret = rga_mmu_info_BitBlt_mode(reg, req);
\r
1320 case color_palette_mode :
\r
1321 ret = rga_mmu_info_color_palette_mode(reg, req);
\r
1323 case color_fill_mode :
\r
1324 ret = rga_mmu_info_color_fill_mode(reg, req);
\r
1326 case line_point_drawing_mode :
\r
1327 ret = rga_mmu_info_line_point_drawing_mode(reg, req);
\r
1329 case blur_sharp_filter_mode :
\r
1330 ret = rga_mmu_info_blur_sharp_filter_mode(reg, req);
\r
1332 case pre_scaling_mode :
\r
1333 ret = rga_mmu_info_pre_scale_mode(reg, req);
\r
1335 case update_palette_table_mode :
\r
1336 ret = rga_mmu_info_update_palette_table_mode(reg, req);
\r
1338 case update_patten_buff_mode :
\r
1339 ret = rga_mmu_info_update_patten_buff_mode(reg, req);
\r