3 #include <linux/version.h>
\r
4 #include <linux/init.h>
\r
5 #include <linux/module.h>
\r
6 #include <linux/fs.h>
\r
7 #include <linux/sched.h>
\r
8 #include <linux/signal.h>
\r
9 #include <linux/pagemap.h>
\r
10 #include <linux/seq_file.h>
\r
11 #include <linux/mm.h>
\r
12 #include <linux/mman.h>
\r
13 #include <linux/sched.h>
\r
14 #include <linux/slab.h>
\r
15 #include <linux/memory.h>
\r
16 #include <linux/dma-mapping.h>
\r
17 #include <asm/memory.h>
\r
18 #include <asm/atomic.h>
\r
19 #include <asm/cacheflush.h>
\r
20 #include "rga_mmu_info.h"
\r
21 #include <linux/delay.h>
\r
23 extern rga_service_info rga_service;
\r
24 extern struct rga_mmu_buf_t rga_mmu_buf;
\r
26 #define KERNEL_SPACE_VALID 0xc0000000
\r
28 static int rga_mmu_buf_get(struct rga_mmu_buf_t *t, uint32_t size)
\r
30 mutex_lock(&rga_service.lock);
\r
32 mutex_unlock(&rga_service.lock);
\r
37 static int rga_mmu_buf_get_try(struct rga_mmu_buf_t *t, uint32_t size)
\r
39 mutex_lock(&rga_service.lock);
\r
40 if((t->back - t->front) > t->size) {
\r
41 if(t->front + size > t->back - t->size)
\r
45 if((t->front + size) > t->back)
\r
48 if(t->front + size > t->size) {
\r
49 if (size > (t->back - t->size)) {
\r
55 mutex_unlock(&rga_service.lock);
\r
60 static int rga_mem_size_cal(unsigned long Mem, uint32_t MemSize, unsigned long *StartAddr)
\r
62 unsigned long start, end;
\r
65 end = (Mem + (MemSize + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
66 start = Mem >> PAGE_SHIFT;
\r
67 pageCount = end - start;
\r
72 static int rga_buf_size_cal(unsigned long yrgb_addr, unsigned long uv_addr, unsigned long v_addr,
\r
73 int format, uint32_t w, uint32_t h, unsigned long *StartAddr )
\r
75 uint32_t size_yrgb = 0;
\r
76 uint32_t size_uv = 0;
\r
77 uint32_t size_v = 0;
\r
78 uint32_t stride = 0;
\r
79 unsigned long start, end;
\r
84 case RK_FORMAT_RGBA_8888 :
\r
85 stride = (w * 4 + 3) & (~3);
\r
86 size_yrgb = stride*h;
\r
87 start = yrgb_addr >> PAGE_SHIFT;
\r
88 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
90 case RK_FORMAT_RGBX_8888 :
\r
91 stride = (w * 4 + 3) & (~3);
\r
92 size_yrgb = stride*h;
\r
93 start = yrgb_addr >> PAGE_SHIFT;
\r
94 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
96 case RK_FORMAT_RGB_888 :
\r
97 stride = (w * 3 + 3) & (~3);
\r
98 size_yrgb = stride*h;
\r
99 start = yrgb_addr >> PAGE_SHIFT;
\r
100 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
102 case RK_FORMAT_BGRA_8888 :
\r
104 start = yrgb_addr >> PAGE_SHIFT;
\r
105 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
107 case RK_FORMAT_RGB_565 :
\r
108 stride = (w*2 + 3) & (~3);
\r
109 size_yrgb = stride * h;
\r
110 start = yrgb_addr >> PAGE_SHIFT;
\r
111 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
113 case RK_FORMAT_RGBA_5551 :
\r
114 stride = (w*2 + 3) & (~3);
\r
115 size_yrgb = stride * h;
\r
116 start = yrgb_addr >> PAGE_SHIFT;
\r
117 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
119 case RK_FORMAT_RGBA_4444 :
\r
120 stride = (w*2 + 3) & (~3);
\r
121 size_yrgb = stride * h;
\r
122 start = yrgb_addr >> PAGE_SHIFT;
\r
123 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
125 case RK_FORMAT_BGR_888 :
\r
126 stride = (w*3 + 3) & (~3);
\r
127 size_yrgb = stride * h;
\r
128 start = yrgb_addr >> PAGE_SHIFT;
\r
129 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
133 case RK_FORMAT_YCbCr_422_SP :
\r
134 stride = (w + 3) & (~3);
\r
135 size_yrgb = stride * h;
\r
136 size_uv = stride * h;
\r
137 start = MIN(yrgb_addr, uv_addr);
\r
139 start >>= PAGE_SHIFT;
\r
140 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
141 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
142 pageCount = end - start;
\r
144 case RK_FORMAT_YCbCr_422_P :
\r
145 stride = (w + 3) & (~3);
\r
146 size_yrgb = stride * h;
\r
147 size_uv = ((stride >> 1) * h);
\r
148 size_v = ((stride >> 1) * h);
\r
149 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
150 start = start >> PAGE_SHIFT;
\r
151 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
152 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
153 pageCount = end - start;
\r
155 case RK_FORMAT_YCbCr_420_SP :
\r
156 stride = (w + 3) & (~3);
\r
157 size_yrgb = stride * h;
\r
158 size_uv = (stride * (h >> 1));
\r
159 start = MIN(yrgb_addr, uv_addr);
\r
160 start >>= PAGE_SHIFT;
\r
161 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
162 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
163 pageCount = end - start;
\r
165 case RK_FORMAT_YCbCr_420_P :
\r
166 stride = (w + 3) & (~3);
\r
167 size_yrgb = stride * h;
\r
168 size_uv = ((stride >> 1) * (h >> 1));
\r
169 size_v = ((stride >> 1) * (h >> 1));
\r
170 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
171 start >>= PAGE_SHIFT;
\r
172 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
173 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
174 pageCount = end - start;
\r
177 case RK_FORMAT_YCrCb_422_SP :
\r
178 stride = (w + 3) & (~3);
\r
179 size_yrgb = stride * h;
\r
180 size_uv = stride * h;
\r
181 start = MIN(yrgb_addr, uv_addr);
\r
182 start >>= PAGE_SHIFT;
\r
183 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
184 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
185 pageCount = end - start;
\r
187 case RK_FORMAT_YCrCb_422_P :
\r
188 stride = (w + 3) & (~3);
\r
189 size_yrgb = stride * h;
\r
190 size_uv = ((stride >> 1) * h);
\r
191 size_v = ((stride >> 1) * h);
\r
192 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
193 start >>= PAGE_SHIFT;
\r
194 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
195 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
196 pageCount = end - start;
\r
199 case RK_FORMAT_YCrCb_420_SP :
\r
200 stride = (w + 3) & (~3);
\r
201 size_yrgb = stride * h;
\r
202 size_uv = (stride * (h >> 1));
\r
203 start = MIN(yrgb_addr, uv_addr);
\r
204 start >>= PAGE_SHIFT;
\r
205 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
206 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
207 pageCount = end - start;
\r
209 case RK_FORMAT_YCrCb_420_P :
\r
210 stride = (w + 3) & (~3);
\r
211 size_yrgb = stride * h;
\r
212 size_uv = ((stride >> 1) * (h >> 1));
\r
213 size_v = ((stride >> 1) * (h >> 1));
\r
214 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
215 start >>= PAGE_SHIFT;
\r
216 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
217 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
218 pageCount = end - start;
\r
221 case RK_FORMAT_BPP1 :
\r
223 case RK_FORMAT_BPP2 :
\r
225 case RK_FORMAT_BPP4 :
\r
227 case RK_FORMAT_BPP8 :
\r
236 *StartAddr = start;
\r
240 static int rga_MapUserMemory(struct page **pages,
\r
241 uint32_t *pageTable,
\r
242 unsigned long Memory,
\r
243 uint32_t pageCount)
\r
248 unsigned long Address;
\r
254 down_read(¤t->mm->mmap_sem);
\r
255 result = get_user_pages(current,
\r
257 Memory << PAGE_SHIFT,
\r
264 up_read(¤t->mm->mmap_sem);
\r
267 if(result <= 0 || result < pageCount)
\r
271 for(i=0; i<pageCount; i++)
\r
273 temp = armv7_va_to_pa((Memory + i) << PAGE_SHIFT);
\r
274 if (temp == 0xffffffff)
\r
276 printk("rga find mmu phy ddr error\n ");
\r
277 status = RGA_OUT_OF_RESOURCES;
\r
281 pageTable[i] = temp;
\r
287 if(result <= 0 || result < pageCount)
\r
289 struct vm_area_struct *vma;
\r
292 down_read(¤t->mm->mmap_sem);
\r
293 for (i = 0; i < result; i++)
\r
294 put_page(pages[i]);
\r
295 up_read(¤t->mm->mmap_sem);
\r
298 for(i=0; i<pageCount; i++)
\r
300 vma = find_vma(current->mm, (Memory + i) << PAGE_SHIFT);
\r
302 if (vma)//&& (vma->vm_flags & VM_PFNMAP) )
\r
312 pgd = pgd_offset(current->mm, (Memory + i) << PAGE_SHIFT);
\r
314 if(pgd_val(*pgd) == 0)
\r
316 //printk("rga pgd value is zero \n");
\r
320 pud = pud_offset(pgd, (Memory + i) << PAGE_SHIFT);
\r
323 pmd_t * pmd = pmd_offset(pud, (Memory + i) << PAGE_SHIFT);
\r
326 pte = pte_offset_map_lock(current->mm, pmd, (Memory + i) << PAGE_SHIFT, &ptl);
\r
329 pte_unmap_unlock(pte, ptl);
\r
343 pfn = pte_pfn(*pte);
\r
344 Address = ((pfn << PAGE_SHIFT) | (((unsigned long)((Memory + i) << PAGE_SHIFT)) & ~PAGE_MASK));
\r
345 pte_unmap_unlock(pte, ptl);
\r
349 pageTable[i] = Address;
\r
353 status = RGA_OUT_OF_RESOURCES;
\r
362 /* Fill the page table. */
\r
363 for(i=0; i<pageCount; i++)
\r
365 /* Get the physical address from page struct. */
\r
366 pageTable[i] = page_to_phys(pages[i]);
\r
369 down_read(¤t->mm->mmap_sem);
\r
370 for (i = 0; i < result; i++)
\r
371 put_page(pages[i]);
\r
372 up_read(¤t->mm->mmap_sem);
\r
381 static int rga_MapION(struct sg_table *sg,
\r
388 unsigned long Address;
\r
389 uint32_t mapped_size = 0;
\r
391 struct scatterlist *sgl = sg->sgl;
\r
392 uint32_t sg_num = 0;
\r
396 offset = offset >> PAGE_SHIFT;
\r
399 len += (sg_dma_len(sgl) >> PAGE_SHIFT);
\r
400 if (len == offset) {
\r
410 while((sgl = sg_next(sgl)) && (mapped_size < pageCount) && (sg_num < sg->nents));
\r
415 len += (sg_dma_len(sgl) >> PAGE_SHIFT);
\r
416 sgl = sg_next(sgl);
\r
422 len = sg_dma_len(sgl) >> PAGE_SHIFT;
\r
423 Address = sg_phys(sgl);
\r
426 for(i=offset; i<len; i++) {
\r
427 Memory[i - offset] = Address + (i << PAGE_SHIFT);
\r
429 mapped_size += (len - offset);
\r
431 sgl = sg_next(sgl);
\r
433 len = sg_dma_len(sgl) >> PAGE_SHIFT;
\r
434 Address = sg_phys(sgl);
\r
436 for(i=0; i<len; i++) {
\r
437 Memory[mapped_size + i] = Address + (i << PAGE_SHIFT);
\r
440 mapped_size += len;
\r
443 while((sgl = sg_next(sgl)) && (mapped_size < pageCount) && (sg_num < sg->nents));
\r
447 len = sg_dma_len(sgl) >> PAGE_SHIFT;
\r
448 Address = sg_phys(sgl);
\r
449 for(i=0; i<len; i++) {
\r
450 Memory[mapped_size + i] = Address + (i << PAGE_SHIFT);
\r
452 mapped_size += len;
\r
455 while((sgl = sg_next(sgl)) && (mapped_size < pageCount) && (sg_num < sg->nents));
\r
461 static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req)
\r
463 int SrcMemSize, DstMemSize;
\r
464 unsigned long SrcStart, DstStart;
\r
467 uint32_t *MMU_Base, *MMU_p, *MMU_Base_phys;
\r
470 uint32_t uv_size, v_size;
\r
472 struct page **pages = NULL;
\r
480 /* cal src buf mmu info */
\r
481 SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,
\r
482 req->src.format, req->src.vir_w, req->src.act_h + req->src.y_offset,
\r
484 if(SrcMemSize == 0) {
\r
488 /* cal dst buf mmu info */
\r
490 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
491 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
493 if(DstMemSize == 0)
\r
496 /* Cal out the needed mem size */
\r
497 SrcMemSize = (SrcMemSize + 15) & (~15);
\r
498 DstMemSize = (DstMemSize + 15) & (~15);
\r
499 AllSize = SrcMemSize + DstMemSize;
\r
501 if (rga_mmu_buf_get_try(&rga_mmu_buf, AllSize + 16)) {
\r
502 pr_err("RGA Get MMU mem failed\n");
\r
503 status = RGA_MALLOC_ERROR;
\r
507 mutex_lock(&rga_service.lock);
\r
508 MMU_Base = rga_mmu_buf.buf_virtual + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));
\r
509 MMU_Base_phys = rga_mmu_buf.buf + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));
\r
510 mutex_unlock(&rga_service.lock);
\r
512 pages = rga_mmu_buf.pages;
\r
514 if((req->mmu_info.mmu_flag >> 8) & 1) {
\r
516 ret = rga_MapION(req->sg_src, &MMU_Base[0], SrcMemSize, req->line_draw_info.flag);
\r
519 ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);
\r
521 pr_err("rga map src memory failed\n");
\r
530 if(req->src.yrgb_addr == (unsigned long)rga_service.pre_scale_buf) {
\r
531 for(i=0; i<SrcMemSize; i++)
\r
532 MMU_p[i] = rga_service.pre_scale_buf[i];
\r
535 for(i=0; i<SrcMemSize; i++)
\r
536 MMU_p[i] = (uint32_t)((SrcStart + i) << PAGE_SHIFT);
\r
540 if ((req->mmu_info.mmu_flag >> 10) & 1) {
\r
542 ret = rga_MapION(req->sg_dst, &MMU_Base[SrcMemSize], DstMemSize, req->line_draw_info.line_width);
\r
545 ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);
\r
547 pr_err("rga map dst memory failed\n");
\r
554 MMU_p = MMU_Base + SrcMemSize;
\r
555 for(i=0; i<DstMemSize; i++)
\r
556 MMU_p[i] = (uint32_t)((DstStart + i) << PAGE_SHIFT);
\r
559 MMU_Base[AllSize] = MMU_Base[AllSize-1];
\r
562 * change the buf address in req struct
\r
565 req->mmu_info.base_addr = (unsigned long)MMU_Base_phys >> 2;
\r
567 uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
568 v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
570 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));
\r
571 req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT);
\r
572 req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT);
\r
574 uv_size = (req->dst.uv_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
576 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | (SrcMemSize << PAGE_SHIFT);
\r
577 req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((SrcMemSize + uv_size) << PAGE_SHIFT);
\r
579 /* flush data to DDR */
\r
581 dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
582 outer_flush_range(virt_to_phys(MMU_Base), virt_to_phys(MMU_Base + AllSize + 1));
\r
583 #elif defined(CONFIG_ARM64)
\r
584 __dma_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
587 rga_mmu_buf_get(&rga_mmu_buf, AllSize + 16);
\r
588 reg->MMU_len = AllSize + 16;
\r
599 static int rga_mmu_info_color_palette_mode(struct rga_reg *reg, struct rga_req *req)
\r
601 int SrcMemSize, DstMemSize, CMDMemSize;
\r
602 unsigned long SrcStart, DstStart, CMDStart;
\r
603 struct page **pages = NULL;
\r
606 uint32_t *MMU_Base = NULL, *MMU_Base_phys = NULL;
\r
612 uint16_t sw, byte_num;
\r
614 shift = 3 - (req->palette_mode & 3);
\r
615 sw = req->src.vir_w;
\r
616 byte_num = sw >> shift;
\r
617 stride = (byte_num + 3) & (~3);
\r
620 SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, stride, &SrcStart);
\r
621 if(SrcMemSize == 0) {
\r
625 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
626 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
628 if(DstMemSize == 0) {
\r
632 CMDMemSize = rga_mem_size_cal((unsigned long)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
633 if(CMDMemSize == 0) {
\r
637 SrcMemSize = (SrcMemSize + 15) & (~15);
\r
638 DstMemSize = (DstMemSize + 15) & (~15);
\r
639 CMDMemSize = (CMDMemSize + 15) & (~15);
\r
641 AllSize = SrcMemSize + DstMemSize + CMDMemSize;
\r
643 if (rga_mmu_buf_get_try(&rga_mmu_buf, AllSize + 16)) {
\r
644 pr_err("RGA Get MMU mem failed\n");
\r
645 status = RGA_MALLOC_ERROR;
\r
649 mutex_lock(&rga_service.lock);
\r
650 MMU_Base = rga_mmu_buf.buf_virtual + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));
\r
651 MMU_Base_phys = rga_mmu_buf.buf + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));
\r
652 mutex_unlock(&rga_service.lock);
\r
654 pages = rga_mmu_buf.pages;
\r
657 for(i=0; i<CMDMemSize; i++) {
\r
658 MMU_Base[i] = (uint32_t)virt_to_phys((uint32_t *)((CMDStart + i)<<PAGE_SHIFT));
\r
662 if (req->src.yrgb_addr < KERNEL_SPACE_VALID) {
\r
663 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
665 pr_err("rga map src memory failed\n");
\r
671 MMU_p = MMU_Base + CMDMemSize;
\r
673 for(i=0; i<SrcMemSize; i++)
\r
675 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
680 if (req->src.yrgb_addr < KERNEL_SPACE_VALID) {
\r
681 ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);
\r
683 pr_err("rga map dst memory failed\n");
\r
689 MMU_p = MMU_Base + CMDMemSize + SrcMemSize;
\r
690 for(i=0; i<DstMemSize; i++)
\r
691 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
\r
696 * change the buf address in req struct
\r
697 * for the reason of lie to MMU
\r
699 req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);
\r
700 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
701 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);
\r
703 /*record the malloc buf for the cmd end to release*/
\r
704 reg->MMU_base = MMU_Base;
\r
706 /* flush data to DDR */
\r
708 dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
709 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
\r
710 #elif defined(CONFIG_ARM64)
\r
711 __dma_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
714 rga_mmu_buf_get(&rga_mmu_buf, AllSize + 16);
\r
715 reg->MMU_len = AllSize + 16;
\r
725 static int rga_mmu_info_color_fill_mode(struct rga_reg *reg, struct rga_req *req)
\r
728 unsigned long DstStart;
\r
729 struct page **pages = NULL;
\r
732 uint32_t *MMU_Base, *MMU_p, *MMU_Base_phys;
\r
739 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
740 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
742 if(DstMemSize == 0) {
\r
746 AllSize = (DstMemSize + 15) & (~15);
\r
748 pages = rga_mmu_buf.pages;
\r
750 if (rga_mmu_buf_get_try(&rga_mmu_buf, AllSize + 16)) {
\r
751 pr_err("RGA Get MMU mem failed\n");
\r
752 status = RGA_MALLOC_ERROR;
\r
756 mutex_lock(&rga_service.lock);
\r
757 MMU_Base = rga_mmu_buf.buf_virtual + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));
\r
758 MMU_Base_phys = rga_mmu_buf.buf + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));
\r
759 mutex_unlock(&rga_service.lock);
\r
761 if (req->dst.yrgb_addr < KERNEL_SPACE_VALID) {
\r
763 ret = rga_MapION(req->sg_dst, &MMU_Base[0], DstMemSize, req->line_draw_info.line_width);
\r
766 ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], DstStart, DstMemSize);
\r
768 pr_err("rga map dst memory failed\n");
\r
776 for(i=0; i<DstMemSize; i++)
\r
777 MMU_p[i] = (uint32_t)((DstStart + i) << PAGE_SHIFT);
\r
780 MMU_Base[AllSize] = MMU_Base[AllSize - 1];
\r
783 * change the buf address in req struct
\r
786 req->mmu_info.base_addr = ((unsigned long)(MMU_Base_phys)>>2);
\r
787 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));
\r
789 /*record the malloc buf for the cmd end to release*/
\r
790 reg->MMU_base = MMU_Base;
\r
792 /* flush data to DDR */
\r
794 dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
795 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
\r
796 #elif defined(CONFIG_ARM64)
\r
797 __dma_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
800 rga_mmu_buf_get(&rga_mmu_buf, AllSize + 16);
\r
801 reg->MMU_len = AllSize + 16;
\r
811 static int rga_mmu_info_line_point_drawing_mode(struct rga_reg *reg, struct rga_req *req)
\r
816 static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_req *req)
\r
823 static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req)
\r
825 int SrcMemSize, DstMemSize;
\r
826 unsigned long SrcStart, DstStart;
\r
827 struct page **pages = NULL;
\r
830 uint32_t *MMU_Base, *MMU_p, *MMU_Base_phys;
\r
833 uint32_t uv_size, v_size;
\r
838 /* cal src buf mmu info */
\r
839 SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,
\r
840 req->src.format, req->src.vir_w, req->src.vir_h,
\r
842 if(SrcMemSize == 0) {
\r
846 /* cal dst buf mmu info */
\r
847 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
848 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
850 if(DstMemSize == 0) {
\r
854 SrcMemSize = (SrcMemSize + 15) & (~15);
\r
855 DstMemSize = (DstMemSize + 15) & (~15);
\r
857 AllSize = SrcMemSize + DstMemSize;
\r
859 pages = rga_mmu_buf.pages;
\r
861 if (rga_mmu_buf_get_try(&rga_mmu_buf, AllSize + 16)) {
\r
862 pr_err("RGA Get MMU mem failed\n");
\r
863 status = RGA_MALLOC_ERROR;
\r
867 mutex_lock(&rga_service.lock);
\r
868 MMU_Base = rga_mmu_buf.buf_virtual + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));
\r
869 MMU_Base_phys = rga_mmu_buf.buf + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));
\r
870 mutex_unlock(&rga_service.lock);
\r
872 /* map src pages */
\r
873 if ((req->mmu_info.mmu_flag >> 8) & 1) {
\r
875 ret = rga_MapION(req->sg_src, &MMU_Base[0], SrcMemSize,req->line_draw_info.flag);
\r
878 ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);
\r
880 pr_err("rga map src memory failed\n");
\r
889 for(i=0; i<SrcMemSize; i++)
\r
890 MMU_p[i] = (uint32_t)((SrcStart + i) << PAGE_SHIFT);
\r
893 if((req->mmu_info.mmu_flag >> 10) & 1) {
\r
895 ret = rga_MapION(req->sg_dst, &MMU_Base[SrcMemSize], DstMemSize, req->line_draw_info.line_width);
\r
898 ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);
\r
900 pr_err("rga map dst memory failed\n");
\r
909 MMU_p = MMU_Base + SrcMemSize;
\r
911 if(req->dst.yrgb_addr == (unsigned long)rga_service.pre_scale_buf) {
\r
912 for(i=0; i<DstMemSize; i++)
\r
913 MMU_p[i] = rga_service.pre_scale_buf[i];
\r
916 for(i=0; i<DstMemSize; i++)
\r
917 MMU_p[i] = (uint32_t)((DstStart + i) << PAGE_SHIFT);
\r
921 MMU_Base[AllSize] = MMU_Base[AllSize];
\r
924 * change the buf address in req struct
\r
925 * for the reason of lie to MMU
\r
928 req->mmu_info.base_addr = ((unsigned long)(MMU_Base_phys)>>2);
\r
930 uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
931 v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
933 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));
\r
934 req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT);
\r
935 req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT);
\r
937 uv_size = (req->dst.uv_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
938 v_size = (req->dst.v_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
940 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((SrcMemSize) << PAGE_SHIFT);
\r
941 req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((SrcMemSize + uv_size) << PAGE_SHIFT);
\r
942 req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) | ((SrcMemSize + v_size) << PAGE_SHIFT);
\r
944 /*record the malloc buf for the cmd end to release*/
\r
945 reg->MMU_base = MMU_Base;
\r
947 /* flush data to DDR */
\r
949 dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
950 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
\r
951 #elif defined(CONFIG_ARM64)
\r
952 __dma_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
955 rga_mmu_buf_get(&rga_mmu_buf, AllSize + 16);
\r
956 reg->MMU_len = AllSize + 16;
\r
966 static int rga_mmu_info_update_palette_table_mode(struct rga_reg *reg, struct rga_req *req)
\r
968 int SrcMemSize, CMDMemSize;
\r
969 unsigned long SrcStart, CMDStart;
\r
970 struct page **pages = NULL;
\r
973 uint32_t *MMU_Base, *MMU_p;
\r
979 /* cal src buf mmu info */
\r
980 SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, req->src.vir_w * req->src.vir_h, &SrcStart);
\r
981 if(SrcMemSize == 0) {
\r
985 /* cal cmd buf mmu info */
\r
986 CMDMemSize = rga_mem_size_cal((unsigned long)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
987 if(CMDMemSize == 0) {
\r
991 AllSize = SrcMemSize + CMDMemSize;
\r
993 pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
994 if(pages == NULL) {
\r
995 pr_err("RGA MMU malloc pages mem failed\n");
\r
996 status = RGA_MALLOC_ERROR;
\r
1000 MMU_Base = kzalloc((AllSize + 1)* sizeof(uint32_t), GFP_KERNEL);
\r
1001 if(pages == NULL) {
\r
1002 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
1003 status = RGA_MALLOC_ERROR;
\r
1007 for(i=0; i<CMDMemSize; i++) {
\r
1008 MMU_Base[i] = (uint32_t)virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));
\r
1011 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
1013 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
1015 pr_err("rga map src memory failed\n");
\r
1021 MMU_p = MMU_Base + CMDMemSize;
\r
1023 for(i=0; i<SrcMemSize; i++)
\r
1025 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
1030 * change the buf address in req struct
\r
1031 * for the reason of lie to MMU
\r
1033 req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);
\r
1035 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
1037 /*record the malloc buf for the cmd end to release*/
\r
1038 reg->MMU_base = MMU_Base;
\r
1040 /* flush data to DDR */
\r
1042 dmac_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
1043 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));
\r
1044 #elif defined(CONFIG_ARM64)
\r
1045 __dma_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
1049 if (pages != NULL) {
\r
1050 /* Free the page table */
\r
1058 if (pages != NULL)
\r
1061 if (MMU_Base != NULL)
\r
1067 static int rga_mmu_info_update_patten_buff_mode(struct rga_reg *reg, struct rga_req *req)
\r
1069 int SrcMemSize, CMDMemSize;
\r
1070 unsigned long SrcStart, CMDStart;
\r
1071 struct page **pages = NULL;
\r
1074 uint32_t *MMU_Base, *MMU_p;
\r
1077 MMU_Base = MMU_p = 0;
\r
1082 /* cal src buf mmu info */
\r
1083 SrcMemSize = rga_mem_size_cal(req->pat.yrgb_addr, req->pat.vir_w * req->pat.vir_h * 4, &SrcStart);
\r
1084 if(SrcMemSize == 0) {
\r
1088 /* cal cmd buf mmu info */
\r
1089 CMDMemSize = rga_mem_size_cal((unsigned long)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
1090 if(CMDMemSize == 0) {
\r
1094 AllSize = SrcMemSize + CMDMemSize;
\r
1096 pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
1097 if(pages == NULL) {
\r
1098 pr_err("RGA MMU malloc pages mem failed\n");
\r
1099 status = RGA_MALLOC_ERROR;
\r
1103 MMU_Base = kzalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
1104 if(pages == NULL) {
\r
1105 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
1106 status = RGA_MALLOC_ERROR;
\r
1110 for(i=0; i<CMDMemSize; i++) {
\r
1111 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));
\r
1114 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
1116 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
1118 pr_err("rga map src memory failed\n");
\r
1125 MMU_p = MMU_Base + CMDMemSize;
\r
1127 for(i=0; i<SrcMemSize; i++)
\r
1129 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
1134 * change the buf address in req struct
\r
1135 * for the reason of lie to MMU
\r
1137 req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);
\r
1139 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
1141 /*record the malloc buf for the cmd end to release*/
\r
1142 reg->MMU_base = MMU_Base;
\r
1144 /* flush data to DDR */
\r
1146 dmac_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
1147 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));
\r
1148 #elif defined(CONFIG_ARM64)
\r
1149 __dma_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
1152 if (pages != NULL) {
\r
1153 /* Free the page table */
\r
1162 if (pages != NULL)
\r
1165 if (MMU_Base != NULL)
\r
1171 int rga_set_mmu_info(struct rga_reg *reg, struct rga_req *req)
\r
1175 switch (req->render_mode) {
\r
1176 case bitblt_mode :
\r
1177 ret = rga_mmu_info_BitBlt_mode(reg, req);
\r
1179 case color_palette_mode :
\r
1180 ret = rga_mmu_info_color_palette_mode(reg, req);
\r
1182 case color_fill_mode :
\r
1183 ret = rga_mmu_info_color_fill_mode(reg, req);
\r
1185 case line_point_drawing_mode :
\r
1186 ret = rga_mmu_info_line_point_drawing_mode(reg, req);
\r
1188 case blur_sharp_filter_mode :
\r
1189 ret = rga_mmu_info_blur_sharp_filter_mode(reg, req);
\r
1191 case pre_scaling_mode :
\r
1192 ret = rga_mmu_info_pre_scale_mode(reg, req);
\r
1194 case update_palette_table_mode :
\r
1195 ret = rga_mmu_info_update_palette_table_mode(reg, req);
\r
1197 case update_patten_buff_mode :
\r
1198 ret = rga_mmu_info_update_patten_buff_mode(reg, req);
\r