3 #include <linux/version.h>
\r
4 #include <linux/init.h>
\r
5 #include <linux/module.h>
\r
6 #include <linux/fs.h>
\r
7 #include <linux/sched.h>
\r
8 #include <linux/signal.h>
\r
9 #include <linux/pagemap.h>
\r
10 #include <linux/seq_file.h>
\r
11 #include <linux/mm.h>
\r
12 #include <linux/mman.h>
\r
13 #include <linux/sched.h>
\r
14 #include <linux/slab.h>
\r
15 #include <linux/memory.h>
\r
16 #include <linux/dma-mapping.h>
\r
17 #include <linux/scatterlist.h>
\r
18 #include <asm/memory.h>
\r
19 #include <asm/atomic.h>
\r
20 #include <asm/cacheflush.h>
\r
21 #include "rga2_mmu_info.h"
\r
23 extern struct rga2_service_info rga2_service;
\r
24 extern struct rga2_mmu_buf_t rga2_mmu_buf;
\r
26 //extern int mmu_buff_temp[1024];
\r
28 #define KERNEL_SPACE_VALID 0xc0000000
\r
30 #define V7_VATOPA_SUCESS_MASK (0x1)
\r
31 #define V7_VATOPA_GET_PADDR(X) (X & 0xFFFFF000)
\r
32 #define V7_VATOPA_GET_INER(X) ((X>>4) & 7)
\r
33 #define V7_VATOPA_GET_OUTER(X) ((X>>2) & 3)
\r
34 #define V7_VATOPA_GET_SH(X) ((X>>7) & 1)
\r
35 #define V7_VATOPA_GET_NS(X) ((X>>9) & 1)
\r
36 #define V7_VATOPA_GET_SS(X) ((X>>1) & 1)
\r
38 static void rga_dma_flush_range(void *pstart, void *pend)
41 dmac_flush_range(pstart, pend);
42 outer_flush_range(virt_to_phys(pstart), virt_to_phys(pend));
44 __dma_flush_range(pstart, pend);
49 static unsigned int armv7_va_to_pa(unsigned int v_addr)
\r
51 unsigned int p_addr;
\r
52 __asm__ volatile ( "mcr p15, 0, %1, c7, c8, 0\n"
\r
55 "mrc p15, 0, %0, c7, c4, 0\n"
\r
60 if (p_addr & V7_VATOPA_SUCESS_MASK)
\r
63 return (V7_VATOPA_GET_SS(p_addr) ? 0xFFFFFFFF : V7_VATOPA_GET_PADDR(p_addr));
\r
67 static int rga2_mmu_buf_get(struct rga2_mmu_buf_t *t, uint32_t size)
\r
69 mutex_lock(&rga2_service.lock);
\r
71 mutex_unlock(&rga2_service.lock);
\r
76 static int rga2_mmu_buf_get_try(struct rga2_mmu_buf_t *t, uint32_t size)
\r
78 mutex_lock(&rga2_service.lock);
\r
79 if((t->back - t->front) > t->size) {
\r
80 if(t->front + size > t->back - t->size) {
\r
81 pr_info("front %d, back %d dsize %d size %d", t->front, t->back, t->size, size);
\r
86 if((t->front + size) > t->back) {
\r
87 pr_info("front %d, back %d dsize %d size %d", t->front, t->back, t->size, size);
\r
91 if(t->front + size > t->size) {
\r
92 if (size > (t->back - t->size)) {
\r
93 pr_info("front %d, back %d dsize %d size %d", t->front, t->back, t->size, size);
\r
99 mutex_unlock(&rga2_service.lock);
\r
104 static int rga2_mem_size_cal(unsigned long Mem, uint32_t MemSize, unsigned long *StartAddr)
\r
106 unsigned long start, end;
\r
107 uint32_t pageCount;
\r
109 end = (Mem + (MemSize + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
110 start = Mem >> PAGE_SHIFT;
\r
111 pageCount = end - start;
\r
112 *StartAddr = start;
\r
116 static int rga2_buf_size_cal(unsigned long yrgb_addr, unsigned long uv_addr, unsigned long v_addr,
\r
117 int format, uint32_t w, uint32_t h, unsigned long *StartAddr )
\r
119 uint32_t size_yrgb = 0;
\r
120 uint32_t size_uv = 0;
\r
121 uint32_t size_v = 0;
\r
122 uint32_t stride = 0;
\r
123 unsigned long start, end;
\r
124 uint32_t pageCount;
\r
128 case RGA2_FORMAT_RGBA_8888 :
\r
129 stride = (w * 4 + 3) & (~3);
\r
130 size_yrgb = stride*h;
\r
131 start = yrgb_addr >> PAGE_SHIFT;
\r
132 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
134 case RGA2_FORMAT_RGBX_8888 :
\r
135 stride = (w * 4 + 3) & (~3);
\r
136 size_yrgb = stride*h;
\r
137 start = yrgb_addr >> PAGE_SHIFT;
\r
138 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
140 case RGA2_FORMAT_RGB_888 :
\r
141 stride = (w * 3 + 3) & (~3);
\r
142 size_yrgb = stride*h;
\r
143 start = yrgb_addr >> PAGE_SHIFT;
\r
144 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
146 case RGA2_FORMAT_BGRA_8888 :
\r
148 start = yrgb_addr >> PAGE_SHIFT;
\r
149 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
151 case RGA2_FORMAT_RGB_565 :
\r
152 stride = (w*2 + 3) & (~3);
\r
153 size_yrgb = stride * h;
\r
154 start = yrgb_addr >> PAGE_SHIFT;
\r
155 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
157 case RGA2_FORMAT_RGBA_5551 :
\r
158 stride = (w*2 + 3) & (~3);
\r
159 size_yrgb = stride * h;
\r
160 start = yrgb_addr >> PAGE_SHIFT;
\r
161 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
163 case RGA2_FORMAT_RGBA_4444 :
\r
164 stride = (w*2 + 3) & (~3);
\r
165 size_yrgb = stride * h;
\r
166 start = yrgb_addr >> PAGE_SHIFT;
\r
167 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
169 case RGA2_FORMAT_BGR_888 :
\r
170 stride = (w*3 + 3) & (~3);
\r
171 size_yrgb = stride * h;
\r
172 start = yrgb_addr >> PAGE_SHIFT;
\r
173 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
177 case RGA2_FORMAT_YCbCr_422_SP :
\r
178 case RGA2_FORMAT_YCrCb_422_SP :
\r
179 stride = (w + 3) & (~3);
\r
180 size_yrgb = stride * h;
\r
181 size_uv = stride * h;
\r
182 start = MIN(yrgb_addr, uv_addr);
\r
183 start >>= PAGE_SHIFT;
\r
184 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
185 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
186 pageCount = end - start;
\r
188 case RGA2_FORMAT_YCbCr_422_P :
\r
189 case RGA2_FORMAT_YCrCb_422_P :
\r
190 stride = (w + 3) & (~3);
\r
191 size_yrgb = stride * h;
\r
192 size_uv = ((stride >> 1) * h);
\r
193 size_v = ((stride >> 1) * h);
\r
194 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
195 start = start >> PAGE_SHIFT;
\r
196 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
197 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
198 pageCount = end - start;
\r
200 case RGA2_FORMAT_YCbCr_420_SP :
\r
201 case RGA2_FORMAT_YCrCb_420_SP :
\r
202 stride = (w + 3) & (~3);
\r
203 size_yrgb = stride * h;
\r
204 size_uv = (stride * (h >> 1));
\r
205 start = MIN(yrgb_addr, uv_addr);
\r
206 start >>= PAGE_SHIFT;
\r
207 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
208 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
209 pageCount = end - start;
\r
211 case RGA2_FORMAT_YCbCr_420_P :
\r
212 case RGA2_FORMAT_YCrCb_420_P :
\r
213 stride = (w + 3) & (~3);
\r
214 size_yrgb = stride * h;
\r
215 size_uv = ((stride >> 1) * (h >> 1));
\r
216 size_v = ((stride >> 1) * (h >> 1));
\r
217 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
218 start >>= PAGE_SHIFT;
\r
219 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
220 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
221 pageCount = end - start;
\r
224 case RK_FORMAT_BPP1 :
\r
226 case RK_FORMAT_BPP2 :
\r
228 case RK_FORMAT_BPP4 :
\r
230 case RK_FORMAT_BPP8 :
\r
233 case RGA2_FORMAT_YCbCr_420_SP_10B:
\r
234 case RGA2_FORMAT_YCrCb_420_SP_10B:
\r
235 stride = (w + 3) & (~3);
\r
237 size_yrgb = stride * h;
\r
238 size_uv = (stride * (h >> 1));
\r
239 start = MIN(yrgb_addr, uv_addr);
\r
240 start >>= PAGE_SHIFT;
\r
241 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
242 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
243 pageCount = end - start;
\r
251 *StartAddr = start;
\r
255 static int rga2_MapUserMemory(struct page **pages, uint32_t *pageTable,
256 unsigned long Memory, uint32_t pageCount,
259 struct vm_area_struct *vma;
263 unsigned long Address;
274 down_read(¤t->mm->mmap_sem);
275 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
276 result = get_user_pages(current, current->mm, Memory << PAGE_SHIFT,
277 pageCount, writeFlag, 0, pages, NULL);
279 result = get_user_pages_remote(current, current->mm,
280 Memory << PAGE_SHIFT,
281 pageCount, writeFlag, 0, pages, NULL);
283 if (result > 0 && result >= pageCount) {
284 /* Fill the page table. */
285 for (i = 0; i < pageCount; i++) {
286 /* Get the physical address from page struct. */
287 pageTable[i] = page_to_phys(pages[i]);
288 virt = phys_to_virt(pageTable[i]);
289 rga_dma_flush_range(virt, virt + 4 * 1024);
291 for (i = 0; i < result; i++)
293 up_read(¤t->mm->mmap_sem);
297 for (i = 0; i < result; i++)
300 for (i = 0; i < pageCount; i++) {
301 vma = find_vma(current->mm, (Memory + i) << PAGE_SHIFT);
303 status = RGA2_OUT_OF_RESOURCES;
306 pgd = pgd_offset(current->mm, (Memory + i) << PAGE_SHIFT);
307 if (pgd_val(*pgd) == 0) {
308 status = RGA2_OUT_OF_RESOURCES;
311 pud = pud_offset(pgd, (Memory + i) << PAGE_SHIFT);
313 status = RGA2_OUT_OF_RESOURCES;
316 pmd = pmd_offset(pud, (Memory + i) << PAGE_SHIFT);
318 status = RGA2_OUT_OF_RESOURCES;
321 pte = pte_offset_map_lock(current->mm, pmd,
322 (Memory + i) << PAGE_SHIFT,
325 pte_unmap_unlock(pte, ptl);
326 status = RGA2_OUT_OF_RESOURCES;
330 Address = ((pfn << PAGE_SHIFT) | (((unsigned long)((Memory + i)
331 << PAGE_SHIFT)) & ~PAGE_MASK));
332 pte_unmap_unlock(pte, ptl);
333 pageTable[i] = (uint32_t)Address;
334 virt = phys_to_virt(pageTable[i]);
335 rga_dma_flush_range(virt, virt + 4 * 1024);
337 up_read(¤t->mm->mmap_sem);
341 static int rga2_MapION(struct sg_table *sg,
\r
347 unsigned long Address;
\r
348 uint32_t mapped_size = 0;
\r
350 struct scatterlist *sgl = sg->sgl;
\r
351 uint32_t sg_num = 0;
\r
352 uint32_t break_flag = 0;
\r
357 len = sg_dma_len(sgl) >> PAGE_SHIFT;
\r
358 Address = sg_phys(sgl);
\r
360 for(i=0; i<len; i++) {
\r
361 if (mapped_size + i >= pageCount) {
\r
365 Memory[mapped_size + i] = (uint32_t)(Address + (i << PAGE_SHIFT));
\r
369 mapped_size += len;
\r
372 while((sgl = sg_next(sgl)) && (mapped_size < pageCount) && (sg_num < sg->nents));
\r
378 static int rga2_mmu_info_BitBlt_mode(struct rga2_reg *reg, struct rga2_req *req)
380 int Src0MemSize, DstMemSize, Src1MemSize;
381 unsigned long Src0Start, Src1Start, DstStart;
382 unsigned long Src0PageCount, Src1PageCount, DstPageCount;
384 uint32_t *MMU_Base, *MMU_Base_phys;
387 uint32_t uv_size, v_size;
388 struct page **pages = NULL;
397 /* cal src0 buf mmu info */
398 if (req->mmu_info.src0_mmu_flag & 1) {
399 Src0PageCount = rga2_buf_size_cal(req->src.yrgb_addr,
406 if (Src0PageCount == 0)
409 /* cal src1 buf mmu info */
410 if (req->mmu_info.src1_mmu_flag & 1) {
411 Src1PageCount = rga2_buf_size_cal(req->src1.yrgb_addr,
418 Src1PageCount = (Src1PageCount + 3) & (~3);
419 if (Src1PageCount == 0)
422 /* cal dst buf mmu info */
423 if (req->mmu_info.dst_mmu_flag & 1) {
424 DstPageCount = rga2_buf_size_cal(req->dst.yrgb_addr,
431 if (DstPageCount == 0)
434 /* Cal out the needed mem size */
435 Src0MemSize = (Src0PageCount + 15) & (~15);
436 Src1MemSize = (Src1PageCount + 15) & (~15);
437 DstMemSize = (DstPageCount + 15) & (~15);
438 AllSize = Src0MemSize + Src1MemSize + DstMemSize;
440 if (rga2_mmu_buf_get_try(&rga2_mmu_buf, AllSize)) {
441 pr_err("RGA2 Get MMU mem failed\n");
442 status = RGA2_MALLOC_ERROR;
445 pages = rga2_mmu_buf.pages;
446 mutex_lock(&rga2_service.lock);
447 MMU_Base = rga2_mmu_buf.buf_virtual +
448 (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1));
449 MMU_Base_phys = rga2_mmu_buf.buf +
450 (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1));
451 mutex_unlock(&rga2_service.lock);
454 ret = rga2_MapION(req->sg_src0,
455 &MMU_Base[0], Src0MemSize);
457 ret = rga2_MapUserMemory(&pages[0], &MMU_Base[0],
458 Src0Start, Src0PageCount, 0);
461 pr_err("rga2 map src0 memory failed\n");
465 /* change the buf address in req struct */
466 req->mmu_info.src0_base_addr = (((unsigned long)MMU_Base_phys));
467 uv_size = (req->src.uv_addr
468 - (Src0Start << PAGE_SHIFT)) >> PAGE_SHIFT;
469 v_size = (req->src.v_addr
470 - (Src0Start << PAGE_SHIFT)) >> PAGE_SHIFT;
472 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));
473 req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) |
474 (uv_size << PAGE_SHIFT);
475 req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) |
476 (v_size << PAGE_SHIFT);
480 ret = rga2_MapION(req->sg_src1,
481 MMU_Base + Src0MemSize, Src1MemSize);
483 ret = rga2_MapUserMemory(&pages[0],
484 MMU_Base + Src0MemSize,
485 Src1Start, Src1PageCount, 0);
487 pr_err("rga2 map src1 memory failed\n");
491 /* change the buf address in req struct */
492 req->mmu_info.src1_base_addr = ((unsigned long)(MMU_Base_phys
494 req->src1.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));
498 ret = rga2_MapION(req->sg_dst, MMU_Base + Src0MemSize
499 + Src1MemSize, DstMemSize);
501 ret = rga2_MapUserMemory(&pages[0], MMU_Base
502 + Src0MemSize + Src1MemSize,
503 DstStart, DstPageCount, 1);
505 pr_err("rga2 map dst memory failed\n");
509 /* change the buf address in req struct */
510 req->mmu_info.dst_base_addr = ((unsigned long)(MMU_Base_phys
511 + Src0MemSize + Src1MemSize));
512 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));
513 uv_size = (req->dst.uv_addr
514 - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
515 v_size = (req->dst.v_addr
516 - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
517 req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) |
518 ((uv_size) << PAGE_SHIFT);
519 req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) |
520 ((v_size) << PAGE_SHIFT);
522 if (((req->alpha_rop_flag & 1) == 1) && (req->bitblt_mode == 0)) {
523 req->mmu_info.src1_base_addr = req->mmu_info.dst_base_addr;
524 req->mmu_info.src1_mmu_flag = req->mmu_info.dst_mmu_flag;
527 /* flush data to DDR */
528 rga_dma_flush_range(MMU_Base, (MMU_Base + AllSize));
529 rga2_mmu_buf_get(&rga2_mmu_buf, AllSize);
530 reg->MMU_len = AllSize;
536 static int rga2_mmu_info_color_palette_mode(struct rga2_reg *reg, struct rga2_req *req)
\r
538 int SrcMemSize, DstMemSize;
\r
539 unsigned long SrcStart, DstStart;
\r
540 struct page **pages = NULL;
\r
542 uint32_t *MMU_Base = NULL, *MMU_Base_phys;
\r
547 uint16_t sw, byte_num;
\r
549 shift = 3 - (req->palette_mode & 3);
\r
550 sw = req->src.vir_w*req->src.vir_h;
\r
551 byte_num = sw >> shift;
\r
552 stride = (byte_num + 3) & (~3);
\r
560 if (req->mmu_info.src0_mmu_flag) {
\r
561 SrcMemSize = rga2_mem_size_cal(req->src.yrgb_addr, stride, &SrcStart);
\r
562 if(SrcMemSize == 0) {
\r
567 if (req->mmu_info.dst_mmu_flag) {
\r
568 DstMemSize = rga2_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
569 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
571 if(DstMemSize == 0) {
\r
576 SrcMemSize = (SrcMemSize + 15) & (~15);
\r
577 DstMemSize = (DstMemSize + 15) & (~15);
\r
579 AllSize = SrcMemSize + DstMemSize;
\r
581 if (rga2_mmu_buf_get_try(&rga2_mmu_buf, AllSize)) {
\r
582 pr_err("RGA2 Get MMU mem failed\n");
\r
583 status = RGA2_MALLOC_ERROR;
\r
587 pages = rga2_mmu_buf.pages;
\r
588 if(pages == NULL) {
\r
589 pr_err("RGA MMU malloc pages mem failed\n");
\r
593 mutex_lock(&rga2_service.lock);
\r
594 MMU_Base = rga2_mmu_buf.buf_virtual + (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1));
\r
595 MMU_Base_phys = rga2_mmu_buf.buf + (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1));
\r
596 mutex_unlock(&rga2_service.lock);
\r
599 ret = rga2_MapUserMemory(&pages[0], &MMU_Base[0],
600 SrcStart, SrcMemSize, 0);
602 pr_err("rga2 map src0 memory failed\n");
\r
607 /* change the buf address in req struct */
\r
608 req->mmu_info.src0_base_addr = (((unsigned long)MMU_Base_phys));
\r
609 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));
\r
613 ret = rga2_MapUserMemory(&pages[0], MMU_Base + SrcMemSize,
614 DstStart, DstMemSize, 1);
616 pr_err("rga2 map dst memory failed\n");
\r
621 /* change the buf address in req struct */
\r
622 req->mmu_info.dst_base_addr = ((unsigned long)(MMU_Base_phys + SrcMemSize));
\r
623 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));
\r
626 /* flush data to DDR */
\r
627 rga_dma_flush_range(MMU_Base, (MMU_Base + AllSize));
628 rga2_mmu_buf_get(&rga2_mmu_buf, AllSize);
\r
629 reg->MMU_len = AllSize;
\r
638 static int rga2_mmu_info_color_fill_mode(struct rga2_reg *reg, struct rga2_req *req)
\r
641 unsigned long DstStart;
\r
642 struct page **pages = NULL;
\r
644 uint32_t *MMU_Base, *MMU_Base_phys;
\r
652 if(req->mmu_info.dst_mmu_flag & 1) {
\r
653 DstMemSize = rga2_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
654 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
656 if(DstMemSize == 0) {
\r
661 AllSize = (DstMemSize + 15) & (~15);
\r
663 pages = rga2_mmu_buf.pages;
\r
665 if(rga2_mmu_buf_get_try(&rga2_mmu_buf, AllSize)) {
\r
666 pr_err("RGA2 Get MMU mem failed\n");
\r
667 status = RGA2_MALLOC_ERROR;
\r
671 mutex_lock(&rga2_service.lock);
\r
672 MMU_Base_phys = rga2_mmu_buf.buf + (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1));
\r
673 MMU_Base = rga2_mmu_buf.buf_virtual + (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1));
\r
674 mutex_unlock(&rga2_service.lock);
\r
678 ret = rga2_MapION(req->sg_dst, &MMU_Base[0], DstMemSize);
\r
681 ret = rga2_MapUserMemory(&pages[0], &MMU_Base[0],
682 DstStart, DstMemSize, 1);
685 pr_err("rga2 map dst memory failed\n");
\r
690 /* change the buf address in req struct */
\r
691 req->mmu_info.dst_base_addr = ((unsigned long)MMU_Base_phys);
\r
692 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));
\r
695 /* flush data to DDR */
\r
696 rga_dma_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
697 rga2_mmu_buf_get(&rga2_mmu_buf, AllSize);
\r
707 static int rga2_mmu_info_update_palette_table_mode(struct rga2_reg *reg, struct rga2_req *req)
\r
710 unsigned long SrcStart;
\r
711 struct page **pages = NULL;
\r
713 uint32_t *MMU_Base, *MMU_Base_phys;
\r
719 /* cal src buf mmu info */
\r
720 SrcMemSize = rga2_mem_size_cal(req->pat.yrgb_addr, req->pat.vir_w * req->pat.vir_h, &SrcStart);
\r
721 if(SrcMemSize == 0) {
\r
725 SrcMemSize = (SrcMemSize + 15) & (~15);
\r
726 AllSize = SrcMemSize;
\r
728 if (rga2_mmu_buf_get_try(&rga2_mmu_buf, AllSize)) {
\r
729 pr_err("RGA2 Get MMU mem failed\n");
\r
730 status = RGA2_MALLOC_ERROR;
\r
734 mutex_lock(&rga2_service.lock);
\r
735 MMU_Base = rga2_mmu_buf.buf_virtual + (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1));
\r
736 MMU_Base_phys = rga2_mmu_buf.buf + (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1));
\r
737 mutex_unlock(&rga2_service.lock);
\r
739 pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
742 ret = rga2_MapUserMemory(&pages[0], &MMU_Base[0],
743 SrcStart, SrcMemSize, 0);
745 pr_err("rga2 map palette memory failed\n");
\r
750 /* change the buf address in req struct */
\r
751 req->mmu_info.src0_base_addr = (((unsigned long)MMU_Base_phys));
\r
752 req->pat.yrgb_addr = (req->pat.yrgb_addr & (~PAGE_MASK));
\r
755 /* flush data to DDR */
\r
756 rga_dma_flush_range(MMU_Base, (MMU_Base + AllSize));
757 rga2_mmu_buf_get(&rga2_mmu_buf, AllSize);
\r
758 reg->MMU_len = AllSize;
\r
767 static int rga2_mmu_info_update_patten_buff_mode(struct rga2_reg *reg, struct rga2_req *req)
\r
769 int SrcMemSize, CMDMemSize;
\r
770 unsigned long SrcStart, CMDStart;
\r
771 struct page **pages = NULL;
\r
774 uint32_t *MMU_Base, *MMU_p;
\r
777 MMU_Base = MMU_p = 0;
\r
780 /* cal src buf mmu info */
\r
781 SrcMemSize = rga2_mem_size_cal(req->pat.yrgb_addr, req->pat.act_w * req->pat.act_h * 4, &SrcStart);
\r
782 if(SrcMemSize == 0) {
\r
786 /* cal cmd buf mmu info */
\r
787 CMDMemSize = rga2_mem_size_cal((unsigned long)rga2_service.cmd_buff, RGA2_CMD_BUF_SIZE, &CMDStart);
\r
788 if(CMDMemSize == 0) {
\r
792 AllSize = SrcMemSize + CMDMemSize;
\r
794 pages = rga2_mmu_buf.pages;
\r
796 MMU_Base = kzalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
798 for(i=0; i<CMDMemSize; i++) {
\r
799 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));
\r
802 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
804 ret = rga2_MapUserMemory(&pages[CMDMemSize],
805 &MMU_Base[CMDMemSize],
806 SrcStart, SrcMemSize, 1);
808 pr_err("rga map src memory failed\n");
\r
815 MMU_p = MMU_Base + CMDMemSize;
\r
817 for(i=0; i<SrcMemSize; i++)
\r
819 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
824 * change the buf address in req struct
\r
825 * for the reason of lie to MMU
\r
827 req->mmu_info.src0_base_addr = (virt_to_phys(MMU_Base) >> 2);
\r
829 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
831 /*record the malloc buf for the cmd end to release*/
\r
832 reg->MMU_base = MMU_Base;
\r
834 /* flush data to DDR */
\r
835 rga_dma_flush_range(MMU_Base, (MMU_Base + AllSize));
844 int rga2_set_mmu_info(struct rga2_reg *reg, struct rga2_req *req)
\r
848 switch (req->render_mode) {
\r
850 ret = rga2_mmu_info_BitBlt_mode(reg, req);
\r
852 case color_palette_mode :
\r
853 ret = rga2_mmu_info_color_palette_mode(reg, req);
\r
855 case color_fill_mode :
\r
856 ret = rga2_mmu_info_color_fill_mode(reg, req);
\r
858 case update_palette_table_mode :
\r
859 ret = rga2_mmu_info_update_palette_table_mode(reg, req);
\r
861 case update_patten_buff_mode :
\r
862 ret = rga2_mmu_info_update_patten_buff_mode(reg, req);
\r