3 #include <linux/version.h>
\r
4 #include <linux/init.h>
\r
5 #include <linux/module.h>
\r
6 #include <linux/fs.h>
\r
7 #include <linux/sched.h>
\r
8 #include <linux/signal.h>
\r
9 #include <linux/pagemap.h>
\r
10 #include <linux/seq_file.h>
\r
11 #include <linux/mm.h>
\r
12 #include <linux/mman.h>
\r
13 #include <linux/sched.h>
\r
14 #include <linux/slab.h>
\r
15 #include <asm/atomic.h>
\r
18 #include "rga_mmu_info.h"
\r
21 extern rga_service_info rga_service;
\r
23 #define KERNEL_SPACE_VALID 0xc0000000
\r
25 static int rga_mem_size_cal(uint32_t Mem, uint32_t MemSize, uint32_t *StartAddr)
\r
27 uint32_t start, end;
\r
30 end = (Mem + (MemSize + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
31 start = Mem >> PAGE_SHIFT;
\r
32 pageCount = end - start;
\r
37 static int rga_buf_size_cal(uint32_t yrgb_addr, uint32_t uv_addr, uint32_t v_addr,
\r
38 int format, uint32_t w, uint32_t h, uint32_t *StartAddr )
\r
40 uint32_t size_yrgb = 0;
\r
41 uint32_t size_uv = 0;
\r
42 uint32_t size_v = 0;
\r
43 uint32_t stride = 0;
\r
44 uint32_t start, end;
\r
49 case RK_FORMAT_RGBA_8888 :
\r
50 stride = (w * 4 + 3) & (~3);
\r
51 size_yrgb = stride*h;
\r
52 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
53 start = yrgb_addr >> PAGE_SHIFT;
\r
54 pageCount = end - start;
\r
56 case RK_FORMAT_RGBX_8888 :
\r
57 stride = (w * 4 + 3) & (~3);
\r
58 size_yrgb = stride*h;
\r
59 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
60 start = yrgb_addr >> PAGE_SHIFT;
\r
61 pageCount = end - start;
\r
63 case RK_FORMAT_RGB_888 :
\r
64 stride = (w * 3 + 3) & (~3);
\r
65 size_yrgb = stride*h;
\r
66 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
67 start = yrgb_addr >> PAGE_SHIFT;
\r
68 pageCount = end - start;
\r
70 case RK_FORMAT_BGRA_8888 :
\r
72 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
73 start = yrgb_addr >> PAGE_SHIFT;
\r
74 pageCount = end - start;
\r
77 case RK_FORMAT_RGB_565 :
\r
78 stride = (w*2 + 3) & (~3);
\r
79 size_yrgb = stride * h;
\r
80 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
81 start = yrgb_addr >> PAGE_SHIFT;
\r
82 pageCount = end - start;
\r
84 case RK_FORMAT_RGBA_5551 :
\r
85 stride = (w*2 + 3) & (~3);
\r
86 size_yrgb = stride * h;
\r
87 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
88 start = yrgb_addr >> PAGE_SHIFT;
\r
89 pageCount = end - start;
\r
91 case RK_FORMAT_RGBA_4444 :
\r
92 stride = (w*2 + 3) & (~3);
\r
93 size_yrgb = stride * h;
\r
94 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
95 start = yrgb_addr >> PAGE_SHIFT;
\r
96 pageCount = end - start;
\r
98 case RK_FORMAT_BGR_888 :
\r
99 stride = (w*3 + 3) & (~3);
\r
100 size_yrgb = stride * h;
\r
101 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
102 start = yrgb_addr >> PAGE_SHIFT;
\r
103 pageCount = end - start;
\r
107 case RK_FORMAT_YCbCr_422_SP :
\r
108 stride = (w + 3) & (~3);
\r
109 size_yrgb = stride * h;
\r
110 size_uv = stride * h;
\r
111 start = MIN(yrgb_addr, uv_addr);
\r
112 start >>= PAGE_SHIFT;
\r
113 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
114 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
115 pageCount = end - start;
\r
117 case RK_FORMAT_YCbCr_422_P :
\r
118 stride = (w + 3) & (~3);
\r
119 size_yrgb = stride * h;
\r
120 size_uv = ((stride >> 1) * h);
\r
121 size_v = ((stride >> 1) * h);
\r
122 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
123 start = start >> PAGE_SHIFT;
\r
124 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
125 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
126 pageCount = end - start;
\r
128 case RK_FORMAT_YCbCr_420_SP :
\r
129 stride = (w + 3) & (~3);
\r
130 size_yrgb = stride * h;
\r
131 size_uv = (stride * (h >> 1));
\r
132 start = MIN(yrgb_addr, uv_addr);
\r
133 start >>= PAGE_SHIFT;
\r
134 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
135 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
136 pageCount = end - start;
\r
138 case RK_FORMAT_YCbCr_420_P :
\r
139 stride = (w + 3) & (~3);
\r
140 size_yrgb = stride * h;
\r
141 size_uv = ((stride >> 1) * (h >> 1));
\r
142 size_v = ((stride >> 1) * (h >> 1));
\r
143 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
144 start >>= PAGE_SHIFT;
\r
145 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
146 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
147 pageCount = end - start;
\r
150 case RK_FORMAT_YCrCb_422_SP :
\r
151 stride = (w + 3) & (~3);
\r
152 size_yrgb = stride * h;
\r
153 size_uv = stride * h;
\r
154 start = MIN(yrgb_addr, uv_addr);
\r
155 start >>= PAGE_SHIFT;
\r
156 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
157 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
158 pageCount = end - start;
\r
160 case RK_FORMAT_YCrCb_422_P :
\r
161 stride = (w + 3) & (~3);
\r
162 size_yrgb = stride * h;
\r
163 size_uv = ((stride >> 1) * h);
\r
164 size_v = ((stride >> 1) * h);
\r
165 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
166 start >>= PAGE_SHIFT;
\r
167 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
168 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
169 pageCount = end - start;
\r
172 case RK_FORMAT_YCrCb_420_SP :
\r
173 stride = (w + 3) & (~3);
\r
174 size_yrgb = stride * h;
\r
175 size_uv = (stride * (h >> 1));
\r
176 start = MIN(yrgb_addr, uv_addr);
\r
177 start >>= PAGE_SHIFT;
\r
178 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
179 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
180 pageCount = end - start;
\r
182 case RK_FORMAT_YCrCb_420_P :
\r
183 stride = (w + 3) & (~3);
\r
184 size_yrgb = stride * h;
\r
185 size_uv = ((stride >> 1) * (h >> 1));
\r
186 size_v = ((stride >> 1) * (h >> 1));
\r
187 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
188 start >>= PAGE_SHIFT;
\r
189 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
190 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
191 pageCount = end - start;
\r
194 case RK_FORMAT_BPP1 :
\r
196 case RK_FORMAT_BPP2 :
\r
198 case RK_FORMAT_BPP4 :
\r
200 case RK_FORMAT_BPP8 :
\r
209 *StartAddr = start;
\r
213 static int rga_MapUserMemory(struct page **pages,
\r
214 uint32_t *pageTable,
\r
216 uint32_t pageCount)
\r
221 down_read(¤t->mm->mmap_sem);
\r
222 result = get_user_pages(current,
\r
224 Memory << PAGE_SHIFT,
\r
231 up_read(¤t->mm->mmap_sem);
\r
233 if(result <= 0 || result < pageCount)
\r
238 for (i = 0; i < pageCount; i++)
\r
240 /* Flush the data cache. */
\r
242 dma_sync_single_for_device(
\r
244 page_to_phys(pages[i]),
\r
248 flush_dcache_page(pages[i]);
\r
252 /* Fill the page table. */
\r
253 for(i=0; i<pageCount; i++) {
\r
255 /* Get the physical address from page struct. */
\r
256 pageTable[i * (PAGE_SIZE/4096)] = page_to_phys(pages[i]);
\r
262 static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req)
\r
264 int SrcMemSize, DstMemSize, CMDMemSize;
\r
265 uint32_t SrcStart, DstStart, CMDStart;
\r
268 uint32_t *MMU_Base, *MMU_p;
\r
271 struct page **pages = NULL;
\r
273 /* cal src buf mmu info */
\r
274 SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,
\r
275 req->src.format, req->src.vir_w, req->src.vir_h,
\r
277 if(SrcMemSize == 0) {
\r
281 /* cal dst buf mmu info */
\r
282 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
283 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
285 if(DstMemSize == 0) {
\r
289 /* cal cmd buf mmu info */
\r
290 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
291 if(CMDMemSize == 0) {
\r
295 AllSize = SrcMemSize + DstMemSize + CMDMemSize;
\r
297 pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
298 if(pages == NULL) {
\r
299 pr_err("RGA MMU malloc pages mem failed");
\r
303 MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
304 if(pages == NULL) {
\r
305 pr_err("RGA MMU malloc MMU_Base point failed");
\r
309 for(i=0; i<CMDMemSize; i++) {
\r
310 MMU_Base[i] = (uint32_t)virt_to_phys((uint32_t *)((CMDStart + i)<< PAGE_SHIFT));
\r
313 if(req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
315 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
317 pr_err("rga map src memory failed");
\r
323 MMU_p = MMU_Base + CMDMemSize;
\r
325 for(i=0; i<SrcMemSize; i++)
\r
327 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
331 if(req->dst.yrgb_addr < KERNEL_SPACE_VALID)
\r
333 ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);
\r
335 pr_err("rga map dst memory failed");
\r
341 MMU_p = MMU_Base + CMDMemSize + SrcMemSize;
\r
343 for(i=0; i<DstMemSize; i++)
\r
345 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
\r
350 * change the buf address in req struct
\r
351 * for the reason of lie to MMU
\r
353 req->mmu_info.base_addr = virt_to_phys(MMU_Base);
\r
355 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
356 req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
357 req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
359 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);
\r
361 /*record the malloc buf for the cmd end to release*/
\r
362 reg->MMU_base = MMU_Base;
\r
364 if (pages != NULL) {
\r
365 /* Free the page table */
\r
372 static int rga_mmu_info_color_palette_mode(struct rga_reg *reg, struct rga_req *req)
\r
374 int SrcMemSize, DstMemSize, CMDMemSize;
\r
375 uint32_t SrcStart, DstStart, CMDStart;
\r
376 struct page **pages = NULL;
\r
379 uint32_t *MMU_Base;
\r
384 uint16_t sw, byte_num;
\r
386 shift = 3 - (req->palette_mode & 3);
\r
387 sw = req->src.vir_w;
\r
388 byte_num = sw >> shift;
\r
389 stride = (byte_num + 3) & (~3);
\r
391 SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, stride, &SrcStart);
\r
392 if(SrcMemSize == 0) {
\r
396 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
397 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
399 if(DstMemSize == 0) {
\r
403 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
404 if(CMDMemSize == 0) {
\r
408 AllSize = SrcMemSize + DstMemSize + CMDMemSize;
\r
410 pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
411 if(pages == NULL) {
\r
412 pr_err("RGA MMU malloc pages mem failed");
\r
416 MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
417 if(pages == NULL) {
\r
418 pr_err("RGA MMU malloc MMU_Base point failed");
\r
422 for(i=0; i<CMDMemSize; i++) {
\r
423 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i)<<PAGE_SHIFT));
\r
426 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
428 pr_err("rga map src memory failed");
\r
432 ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);
\r
434 pr_err("rga map dst memory failed");
\r
439 * change the buf address in req struct
\r
440 * for the reason of lie to MMU
\r
442 req->mmu_info.base_addr = virt_to_phys(MMU_Base);
\r
443 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
444 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);
\r
447 /*record the malloc buf for the cmd end to release*/
\r
448 reg->MMU_base = MMU_Base;
\r
450 if (pages != NULL) {
\r
451 /* Free the page table */
\r
458 static int rga_mmu_info_color_fill_mode(struct rga_reg *reg, struct rga_req *req)
\r
460 int DstMemSize, CMDMemSize;
\r
461 uint32_t DstStart, CMDStart;
\r
462 struct page **pages = NULL;
\r
465 uint32_t *MMU_Base;
\r
468 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
469 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
471 if(DstMemSize == 0) {
\r
475 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
476 if(CMDMemSize == 0) {
\r
480 AllSize = DstMemSize + CMDMemSize;
\r
482 pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
483 if(pages == NULL) {
\r
484 pr_err("RGA MMU malloc pages mem failed");
\r
488 MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
489 if(pages == NULL) {
\r
490 pr_err("RGA MMU malloc MMU_Base point failed");
\r
494 for(i=0; i<CMDMemSize; i++) {
\r
495 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart+i)<<PAGE_SHIFT));
\r
498 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], DstStart, DstMemSize);
\r
500 pr_err("rga map dst memory failed");
\r
505 * change the buf address in req struct
\r
506 * for the reason of lie to MMU
\r
508 req->mmu_info.base_addr = virt_to_phys(MMU_Base);
\r
509 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize) << PAGE_SHIFT);
\r
512 /*record the malloc buf for the cmd end to release*/
\r
513 reg->MMU_base = MMU_Base;
\r
515 if (pages != NULL) {
\r
516 /* Free the page table */
\r
524 static int rga_mmu_info_line_point_drawing_mode(struct rga_reg *reg, struct rga_req *req)
\r
526 int DstMemSize, CMDMemSize;
\r
527 uint32_t DstStart, CMDStart;
\r
528 struct page **pages = NULL;
\r
531 uint32_t *MMU_Base;
\r
534 /* cal dst buf mmu info */
\r
535 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
536 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
538 if(DstMemSize == 0) {
\r
542 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
543 if(CMDMemSize == 0) {
\r
547 AllSize = DstMemSize + CMDMemSize;
\r
549 pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
550 if(pages == NULL) {
\r
551 pr_err("RGA MMU malloc pages mem failed");
\r
555 MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
556 if(pages == NULL) {
\r
557 pr_err("RGA MMU malloc MMU_Base point failed");
\r
561 for(i=0; i<CMDMemSize; i++) {
\r
562 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart+i)<<PAGE_SHIFT));
\r
565 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], DstStart, DstMemSize);
\r
567 pr_err("rga map dst memory failed");
\r
572 * change the buf address in req struct
\r
573 * for the reason of lie to MMU
\r
575 req->mmu_info.base_addr = virt_to_phys(MMU_Base);
\r
576 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize) << PAGE_SHIFT);
\r
579 /*record the malloc buf for the cmd end to release*/
\r
580 reg->MMU_base = MMU_Base;
\r
582 if (pages != NULL) {
\r
583 /* Free the page table */
\r
590 static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_req *req)
\r
592 int SrcMemSize, DstMemSize, CMDMemSize;
\r
593 uint32_t SrcStart, DstStart, CMDStart;
\r
594 struct page **pages = NULL;
\r
597 uint32_t *MMU_Base;
\r
600 /* cal src buf mmu info */
\r
601 SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,
\r
602 req->src.format, req->src.vir_w, req->src.vir_h,
\r
604 if(SrcMemSize == 0) {
\r
608 /* cal dst buf mmu info */
\r
609 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
610 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
612 if(DstMemSize == 0) {
\r
616 /* cal cmd buf mmu info */
\r
617 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
618 if(CMDMemSize == 0) {
\r
622 AllSize = SrcMemSize + DstMemSize + CMDMemSize;
\r
624 pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
625 if(pages == NULL) {
\r
626 pr_err("RGA MMU malloc pages mem failed");
\r
630 MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
631 if(pages == NULL) {
\r
632 pr_err("RGA MMU malloc MMU_Base point failed");
\r
636 for(i=0; i<CMDMemSize; i++) {
\r
637 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i)<< PAGE_SHIFT));
\r
640 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
642 pr_err("rga map src memory failed");
\r
646 ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);
\r
648 pr_err("rga map dst memory failed");
\r
653 * change the buf address in req struct
\r
654 * for the reason of lie to MMU
\r
656 req->mmu_info.base_addr = virt_to_phys(MMU_Base);
\r
658 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
659 req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
660 req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
662 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);
\r
664 /*record the malloc buf for the cmd end to release*/
\r
665 reg->MMU_base = MMU_Base;
\r
667 if (pages != NULL) {
\r
668 /* Free the page table */
\r
677 static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req)
\r
679 int SrcMemSize, DstMemSize, CMDMemSize;
\r
680 uint32_t SrcStart, DstStart, CMDStart;
\r
681 struct page **pages = NULL;
\r
684 uint32_t *MMU_Base, *MMU_p;
\r
687 /* cal src buf mmu info */
\r
688 SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,
\r
689 req->src.format, req->src.vir_w, req->src.vir_h,
\r
691 if(SrcMemSize == 0) {
\r
695 /* cal dst buf mmu info */
\r
696 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
697 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
699 if(DstMemSize == 0) {
\r
703 /* cal cmd buf mmu info */
\r
704 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
705 if(CMDMemSize == 0) {
\r
709 AllSize = SrcMemSize + DstMemSize + CMDMemSize;
\r
711 pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
712 if(pages == NULL) {
\r
713 pr_err("RGA MMU malloc pages mem failed");
\r
718 * Allocate MMU Index mem
\r
719 * This mem release in run_to_done fun
\r
721 MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
722 if(pages == NULL) {
\r
723 pr_err("RGA MMU malloc MMU_Base point failed");
\r
727 for(i=0; i<CMDMemSize; i++) {
\r
728 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));
\r
732 /* map src pages */
\r
733 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
735 pr_err("rga map src memory failed");
\r
740 if(req->dst.yrgb_addr >= 0xc0000000)
\r
743 MMU_p = MMU_Base + CMDMemSize + SrcMemSize;
\r
744 for(i=0; i<DstMemSize; i++)
\r
746 MMU_p[i] = virt_to_phys((uint32_t *)((DstStart + i)<< PAGE_SHIFT));
\r
752 ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);
\r
755 pr_err("rga map dst memory failed");
\r
761 * change the buf address in req struct
\r
762 * for the reason of lie to MMU
\r
764 req->mmu_info.base_addr = virt_to_phys(MMU_Base);
\r
766 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
767 req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
768 req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
770 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);
\r
772 /*record the malloc buf for the cmd end to release*/
\r
773 reg->MMU_base = MMU_Base;
\r
775 if (pages != NULL) {
\r
776 /* Free the page table */
\r
784 static int rga_mmu_info_update_palette_table_mode(struct rga_reg *reg, struct rga_req *req)
\r
786 int SrcMemSize, DstMemSize, CMDMemSize;
\r
787 uint32_t SrcStart, CMDStart;
\r
788 struct page **pages = NULL;
\r
791 uint32_t *MMU_Base;
\r
794 /* cal src buf mmu info */
\r
795 SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, req->src.vir_w * req->src.vir_h, &SrcStart);
\r
796 if(SrcMemSize == 0) {
\r
800 /* cal cmd buf mmu info */
\r
801 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
802 if(CMDMemSize == 0) {
\r
806 AllSize = SrcMemSize + DstMemSize + CMDMemSize;
\r
808 pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
809 if(pages == NULL) {
\r
810 pr_err("RGA MMU malloc pages mem failed");
\r
814 MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
815 if(pages == NULL) {
\r
816 pr_err("RGA MMU malloc MMU_Base point failed");
\r
820 for(i=0; i<CMDMemSize; i++) {
\r
821 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));
\r
824 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
826 pr_err("rga map src memory failed");
\r
831 * change the buf address in req struct
\r
832 * for the reason of lie to MMU
\r
834 req->mmu_info.base_addr = virt_to_phys(MMU_Base);
\r
836 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
838 /*record the malloc buf for the cmd end to release*/
\r
839 reg->MMU_base = MMU_Base;
\r
841 if (pages != NULL) {
\r
842 /* Free the page table */
\r
849 static int rga_mmu_info_update_patten_buff_mode(struct rga_reg *reg, struct rga_req *req)
\r
851 int SrcMemSize, DstMemSize, CMDMemSize;
\r
852 uint32_t SrcStart, CMDStart;
\r
853 struct page **pages = NULL;
\r
856 uint32_t *MMU_Base;
\r
859 /* cal src buf mmu info */
\r
860 SrcMemSize = rga_mem_size_cal(req->pat.yrgb_addr, req->pat.vir_w * req->pat.vir_h * 4, &SrcStart);
\r
861 if(SrcMemSize == 0) {
\r
865 /* cal cmd buf mmu info */
\r
866 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
867 if(CMDMemSize == 0) {
\r
871 AllSize = SrcMemSize + DstMemSize + CMDMemSize;
\r
873 pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
874 if(pages == NULL) {
\r
875 pr_err("RGA MMU malloc pages mem failed");
\r
879 MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
880 if(pages == NULL) {
\r
881 pr_err("RGA MMU malloc MMU_Base point failed");
\r
885 for(i=0; i<CMDMemSize; i++) {
\r
886 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));
\r
889 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
891 pr_err("rga map src memory failed");
\r
896 * change the buf address in req struct
\r
897 * for the reason of lie to MMU
\r
899 req->mmu_info.base_addr = virt_to_phys(MMU_Base);
\r
901 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
903 /*record the malloc buf for the cmd end to release*/
\r
904 reg->MMU_base = MMU_Base;
\r
906 if (pages != NULL) {
\r
907 /* Free the page table */
\r
914 int rga_set_mmu_info(struct rga_reg *reg, struct rga_req *req)
\r
918 switch (req->render_mode) {
\r
919 case bitblt_mode :
\r
920 ret = rga_mmu_info_BitBlt_mode(reg, req);
\r
922 case color_palette_mode :
\r
923 ret = rga_mmu_info_color_palette_mode(reg, req);
\r
925 case color_fill_mode :
\r
926 ret = rga_mmu_info_color_fill_mode(reg, req);
\r
928 case line_point_drawing_mode :
\r
929 ret = rga_mmu_info_line_point_drawing_mode(reg, req);
\r
931 case blur_sharp_filter_mode :
\r
932 ret = rga_mmu_info_blur_sharp_filter_mode(reg, req);
\r
934 case pre_scaling_mode :
\r
935 ret = rga_mmu_info_pre_scale_mode(reg, req);
\r
937 case update_palette_table_mode :
\r
938 ret = rga_mmu_info_update_palette_table_mode(reg, req);
\r
940 case update_patten_buff_mode :
\r
941 ret = rga_mmu_info_update_patten_buff_mode(reg, req);
\r