add mmu table flush for master bug
[firefly-linux-kernel-4.4.55.git] / drivers / video / rockchip / rga / rga_mmu_info.c
index 468897df6351c68a3f153908456213e3dee87346..677e6d8d0c5ba63651f93d19caae7f7981feb17f 100755 (executable)
@@ -392,14 +392,14 @@ static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req)
         /* Cal out the needed mem size */\r
         AllSize = SrcMemSize + DstMemSize;\r
                            \r
-        pages = kzalloc((AllSize + 1)* sizeof(struct page *), GFP_KERNEL);\r
+        pages = kmalloc((AllSize + 1)* sizeof(struct page *), GFP_KERNEL);\r
         if(pages == NULL) {\r
             pr_err("RGA MMU malloc pages mem failed\n");\r
             status = RGA_MALLOC_ERROR;\r
             break;                \r
         }\r
         \r
-        MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);\r
+        MMU_Base = kmalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);\r
         if(MMU_Base == NULL) {\r
             pr_err("RGA MMU malloc MMU_Base point failed\n");\r
             status = RGA_MALLOC_ERROR;\r
@@ -488,8 +488,8 @@ static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req)
         reg->MMU_base = MMU_Base;\r
         \r
         /* flush data to DDR */\r
-        dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
-        outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+        dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
+        outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
 \r
         status = 0;\r
                \r
@@ -632,8 +632,8 @@ static int rga_mmu_info_color_palette_mode(struct rga_reg *reg, struct rga_req *
         reg->MMU_base = MMU_Base;\r
 \r
         /* flush data to DDR */\r
-        dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
-        outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+        dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
+        outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
 \r
         /* Free the page table */\r
         if (pages != NULL) {            \r
@@ -728,8 +728,8 @@ static int rga_mmu_info_color_fill_mode(struct rga_reg *reg, struct rga_req *req
         reg->MMU_base = MMU_Base;\r
 \r
         /* flush data to DDR */\r
-        dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
-        outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+        dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
+        outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
 \r
         /* Free the page table */\r
         if (pages != NULL)             \r
@@ -773,14 +773,14 @@ static int rga_mmu_info_line_point_drawing_mode(struct rga_reg *reg, struct rga_
 \r
         AllSize = DstMemSize;\r
                    \r
-        pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
+        pages = kzalloc((AllSize + 1) * sizeof(struct page *), GFP_KERNEL);\r
         if(pages == NULL) {\r
             pr_err("RGA MMU malloc pages mem failed\n");\r
             status = RGA_MALLOC_ERROR;\r
             break;\r
         }\r
         \r
-        MMU_Base = kzalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
+        MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);\r
         if(pages == NULL) {\r
             pr_err("RGA MMU malloc MMU_Base point failed\n");\r
             status = RGA_MALLOC_ERROR;\r
@@ -818,8 +818,8 @@ static int rga_mmu_info_line_point_drawing_mode(struct rga_reg *reg, struct rga_
         reg->MMU_base = MMU_Base;\r
 \r
         /* flush data to DDR */\r
-        dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
-        outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+        dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
+        outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
 \r
         /* Free the page table */\r
         if (pages != NULL) {            \r
@@ -955,8 +955,8 @@ static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_r
         reg->MMU_base = MMU_Base;\r
 \r
         /* flush data to DDR */\r
-        dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
-        outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+        dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
+        outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
 \r
         /* Free the page table */\r
         if (pages != NULL) {        \r
@@ -1111,8 +1111,8 @@ static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req)
         reg->MMU_base = MMU_Base;\r
 \r
         /* flush data to DDR */\r
-        dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
-        outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+        dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
+        outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
 \r
         /* Free the page table */\r
         if (pages != NULL) \r