ARM64: DTS: Add rk3399-firefly uart4 device, node as /dev/ttyS1
[firefly-linux-kernel-4.4.55.git] / drivers / video / rockchip / rga2 / rga2_drv.c
1 /*
2  * Copyright (C) 2012 ROCKCHIP, Inc.
3  *
4  * This software is licensed under the terms of the GNU General Public
5  * License version 2, as published by the Free Software Foundation, and
6  * may be copied, distributed, and modified under those terms.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  */
14
15 #define pr_fmt(fmt) "rga: " fmt
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/platform_device.h>
20 #include <linux/sched.h>
21 #include <linux/mutex.h>
22 #include <linux/err.h>
23 #include <linux/clk.h>
24 #include <asm/delay.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/delay.h>
27 #include <asm/io.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/fs.h>
31 #include <asm/uaccess.h>
32 #include <linux/miscdevice.h>
33 #include <linux/poll.h>
34 #include <linux/delay.h>
35 #include <linux/wait.h>
36 #include <linux/syscalls.h>
37 #include <linux/timer.h>
38 #include <linux/time.h>
39 #include <asm/cacheflush.h>
40 #include <linux/slab.h>
41 #include <linux/fb.h>
42 #include <linux/wakelock.h>
43 #include <linux/scatterlist.h>
44 #include <linux/rockchip_ion.h>
45 #include <linux/version.h>
46 #include <linux/pm_runtime.h>
47 #include <linux/dma-buf.h>
48
49 #include "rga2.h"
50 #include "rga2_reg_info.h"
51 #include "rga2_mmu_info.h"
52 #include "RGA2_API.h"
53 #include "rga2_rop.h"
54
55 #if defined(CONFIG_RK_IOMMU) && defined(CONFIG_ION_ROCKCHIP)
56 #define CONFIG_RGA_IOMMU
57 #endif
58
59 #define RGA2_TEST_FLUSH_TIME 0
60 #define RGA2_INFO_BUS_ERROR 1
61 #define RGA2_POWER_OFF_DELAY    4*HZ /* 4s */
62 #define RGA2_TIMEOUT_DELAY      (HZ / 10) /* 100ms */
63 #define RGA2_MAJOR              255
64 #define RGA2_RESET_TIMEOUT      1000
65
66 /* Driver information */
67 #define DRIVER_DESC             "RGA2 Device Driver"
68 #define DRIVER_NAME             "rga2"
69 #define RGA2_VERSION   "2.000"
70
71 ktime_t rga2_start;
72 ktime_t rga2_end;
73 int rga2_flag;
74 int first_RGA2_proc;
75
76 rga2_session rga2_session_global;
77 long (*rga_ioctl_kernel_p)(struct rga_req *);
78
79 struct rga2_drvdata_t {
80         struct miscdevice miscdev;
81         struct device *dev;
82         void *rga_base;
83         int irq;
84
85         struct delayed_work power_off_work;
86         struct wake_lock wake_lock;
87         void (*rga_irq_callback)(int rga_retval);
88
89         struct clk *aclk_rga2;
90         struct clk *hclk_rga2;
91         struct clk *rga2;
92
93         struct ion_client * ion_client;
94         char version[16];
95 };
96
97 struct rga2_drvdata_t *rga2_drvdata;
98 struct rga2_service_info rga2_service;
99 struct rga2_mmu_buf_t rga2_mmu_buf;
100
101 static int rga2_blit_async(rga2_session *session, struct rga2_req *req);
102 static void rga2_del_running_list(void);
103 static void rga2_del_running_list_timeout(void);
104 static void rga2_try_set_reg(void);
105
106
107 /* Logging */
108 #define RGA_DEBUG 0
109 #if RGA_DEBUG
110 #define DBG(format, args...) printk(KERN_DEBUG "%s: " format, DRIVER_NAME, ## args)
111 #define ERR(format, args...) printk(KERN_ERR "%s: " format, DRIVER_NAME, ## args)
112 #define WARNING(format, args...) printk(KERN_WARN "%s: " format, DRIVER_NAME, ## args)
113 #define INFO(format, args...) printk(KERN_INFO "%s: " format, DRIVER_NAME, ## args)
114 #else
115 #define DBG(format, args...)
116 #define ERR(format, args...)
117 #define WARNING(format, args...)
118 #define INFO(format, args...)
119 #endif
120
121 #if RGA2_TEST_MSG
122 static void print_info(struct rga2_req *req)
123 {
124         printk("render_mode=%d bitblt_mode=%d rotate_mode=%.8x\n",
125                 req->render_mode, req->bitblt_mode, req->rotate_mode);
126         printk("src : y=%.lx uv=%.lx v=%.lx format=%d aw=%d ah=%d vw=%d vh=%d xoff=%d yoff=%d \n",
127                 req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr, req->src.format,
128                 req->src.act_w, req->src.act_h, req->src.vir_w, req->src.vir_h,
129                 req->src.x_offset, req->src.y_offset);
130         printk("dst : y=%lx uv=%lx v=%lx format=%d aw=%d ah=%d vw=%d vh=%d xoff=%d yoff=%d \n",
131                 req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr, req->dst.format,
132                 req->dst.act_w, req->dst.act_h, req->dst.vir_w, req->dst.vir_h,
133                 req->dst.x_offset, req->dst.y_offset);
134         printk("mmu : src=%.2x src1=%.2x dst=%.2x els=%.2x\n",
135                 req->mmu_info.src0_mmu_flag, req->mmu_info.src1_mmu_flag,
136                 req->mmu_info.dst_mmu_flag,  req->mmu_info.els_mmu_flag);
137         printk("alpha : flag %.8x mode0=%.8x mode1=%.8x\n",
138                 req->alpha_rop_flag, req->alpha_mode_0, req->alpha_mode_1);
139 }
140 #endif
141
142 static inline void rga2_write(u32 b, u32 r)
143 {
144         *((volatile unsigned int *)(rga2_drvdata->rga_base + r)) = b;
145 }
146
147 static inline u32 rga2_read(u32 r)
148 {
149         return *((volatile unsigned int *)(rga2_drvdata->rga_base + r));
150 }
151
152 static inline int rga2_init_version(void)
153 {
154         struct rga2_drvdata_t *rga = rga2_drvdata;
155         u32 major_version, minor_version;
156         u32 reg_version;
157
158         if (!rga) {
159                 pr_err("rga2_drvdata is null\n");
160                 return -EINVAL;
161         }
162 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
163         pm_runtime_get_sync(rga2_drvdata->dev);
164 #endif
165
166         clk_prepare_enable(rga2_drvdata->aclk_rga2);
167         clk_prepare_enable(rga2_drvdata->hclk_rga2);
168
169         reg_version = rga2_read(0x028);
170
171         clk_disable_unprepare(rga2_drvdata->aclk_rga2);
172         clk_disable_unprepare(rga2_drvdata->hclk_rga2);
173
174 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
175         pm_runtime_put(rga2_drvdata->dev);
176 #endif
177
178         major_version = (reg_version & RGA2_MAJOR_VERSION_MASK) >> 24;
179         minor_version = (reg_version & RGA2_MINOR_VERSION_MASK) >> 20;
180
181         /*
182          * some old rga ip has no rga version register, so force set to 2.00
183          */
184         if (!major_version && !minor_version)
185                 major_version = 2;
186         sprintf(rga->version, "%d.%02d", major_version, minor_version);
187
188         return 0;
189 }
190
191 static void rga2_soft_reset(void)
192 {
193         u32 i;
194         u32 reg;
195
196         rga2_write((1 << 3) | (1 << 4) | (1 << 6), RGA2_SYS_CTRL);
197
198         for(i = 0; i < RGA2_RESET_TIMEOUT; i++)
199         {
200                 reg = rga2_read(RGA2_SYS_CTRL) & 1; //RGA_SYS_CTRL
201
202                 if(reg == 0)
203                         break;
204
205                 udelay(1);
206         }
207
208         if(i == RGA2_RESET_TIMEOUT)
209                 ERR("soft reset timeout.\n");
210 }
211
212 static void rga2_dump(void)
213 {
214         int running;
215         struct rga2_reg *reg, *reg_tmp;
216         rga2_session *session, *session_tmp;
217
218         running = atomic_read(&rga2_service.total_running);
219         printk("rga total_running %d\n", running);
220         list_for_each_entry_safe(session, session_tmp, &rga2_service.session,
221                 list_session)
222         {
223                 printk("session pid %d:\n", session->pid);
224                 running = atomic_read(&session->task_running);
225                 printk("task_running %d\n", running);
226                 list_for_each_entry_safe(reg, reg_tmp, &session->waiting, session_link)
227                 {
228                         printk("waiting register set 0x%.lu\n", (unsigned long)reg);
229                 }
230                 list_for_each_entry_safe(reg, reg_tmp, &session->running, session_link)
231                 {
232                         printk("running register set 0x%.lu\n", (unsigned long)reg);
233                 }
234         }
235 }
236
237 static inline void rga2_queue_power_off_work(void)
238 {
239         queue_delayed_work(system_wq, &rga2_drvdata->power_off_work,
240                 RGA2_POWER_OFF_DELAY);
241 }
242
243 /* Caller must hold rga_service.lock */
244 static void rga2_power_on(void)
245 {
246         static ktime_t last;
247         ktime_t now = ktime_get();
248
249 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
250         pm_runtime_get_sync(rga2_drvdata->dev);
251 #endif
252
253         if (ktime_to_ns(ktime_sub(now, last)) > NSEC_PER_SEC) {
254                 cancel_delayed_work_sync(&rga2_drvdata->power_off_work);
255                 rga2_queue_power_off_work();
256                 last = now;
257         }
258
259         if (rga2_service.enable)
260                 return;
261
262         clk_prepare_enable(rga2_drvdata->rga2);
263         clk_prepare_enable(rga2_drvdata->aclk_rga2);
264         clk_prepare_enable(rga2_drvdata->hclk_rga2);
265         wake_lock(&rga2_drvdata->wake_lock);
266         rga2_service.enable = true;
267 }
268
269 /* Caller must hold rga_service.lock */
270 static void rga2_power_off(void)
271 {
272         int total_running;
273
274         if (!rga2_service.enable) {
275                 return;
276         }
277
278         total_running = atomic_read(&rga2_service.total_running);
279         if (total_running) {
280                 pr_err("power off when %d task running!!\n", total_running);
281                 mdelay(50);
282                 pr_err("delay 50 ms for running task\n");
283                 rga2_dump();
284         }
285
286         clk_disable_unprepare(rga2_drvdata->rga2);
287         clk_disable_unprepare(rga2_drvdata->aclk_rga2);
288         clk_disable_unprepare(rga2_drvdata->hclk_rga2);
289
290 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
291         pm_runtime_put(rga2_drvdata->dev);
292 #endif
293
294         wake_unlock(&rga2_drvdata->wake_lock);
295     first_RGA2_proc = 0;
296         rga2_service.enable = false;
297 }
298
299 static void rga2_power_off_work(struct work_struct *work)
300 {
301         if (mutex_trylock(&rga2_service.lock)) {
302                 rga2_power_off();
303                 mutex_unlock(&rga2_service.lock);
304         } else {
305                 /* Come back later if the device is busy... */
306                 rga2_queue_power_off_work();
307         }
308 }
309
310 static int rga2_flush(rga2_session *session, unsigned long arg)
311 {
312     int ret = 0;
313     int ret_timeout;
314
315     #if RGA2_TEST_FLUSH_TIME
316     ktime_t start;
317     ktime_t end;
318     start = ktime_get();
319     #endif
320
321     ret_timeout = wait_event_timeout(session->wait, atomic_read(&session->done), RGA2_TIMEOUT_DELAY);
322
323         if (unlikely(ret_timeout < 0)) {
324                 //pr_err("flush pid %d wait task ret %d\n", session->pid, ret);
325         mutex_lock(&rga2_service.lock);
326         rga2_del_running_list();
327         mutex_unlock(&rga2_service.lock);
328         ret = ret_timeout;
329         } else if (0 == ret_timeout) {
330                 //pr_err("flush pid %d wait %d task done timeout\n", session->pid, atomic_read(&session->task_running));
331         //printk("bus  = %.8x\n", rga_read(RGA_INT));
332         mutex_lock(&rga2_service.lock);
333         rga2_del_running_list_timeout();
334         rga2_try_set_reg();
335         mutex_unlock(&rga2_service.lock);
336                 ret = -ETIMEDOUT;
337         }
338
339     #if RGA2_TEST_FLUSH_TIME
340     end = ktime_get();
341     end = ktime_sub(end, start);
342     printk("one flush wait time %d\n", (int)ktime_to_us(end));
343     #endif
344
345         return ret;
346 }
347
348
349 static int rga2_get_result(rga2_session *session, unsigned long arg)
350 {
351         int ret = 0;
352         int num_done;
353
354         num_done = atomic_read(&session->num_done);
355         if (unlikely(copy_to_user((void __user *)arg, &num_done, sizeof(int)))) {
356             printk("copy_to_user failed\n");
357             ret =  -EFAULT;
358         }
359         return ret;
360 }
361
362
363 static int rga2_check_param(const struct rga2_req *req)
364 {
365         if(!((req->render_mode == color_fill_mode)))
366         {
367             if (unlikely((req->src.act_w <= 0) || (req->src.act_w > 8191) || (req->src.act_h <= 0) || (req->src.act_h > 8191)))
368             {
369                 printk("invalid source resolution act_w = %d, act_h = %d\n", req->src.act_w, req->src.act_h);
370                 return -EINVAL;
371             }
372         }
373
374         if(!((req->render_mode == color_fill_mode)))
375         {
376             if (unlikely((req->src.vir_w <= 0) || (req->src.vir_w > 8191) || (req->src.vir_h <= 0) || (req->src.vir_h > 8191)))
377             {
378                 printk("invalid source resolution vir_w = %d, vir_h = %d\n", req->src.vir_w, req->src.vir_h);
379                 return -EINVAL;
380             }
381         }
382
383         //check dst width and height
384         if (unlikely((req->dst.act_w <= 0) || (req->dst.act_w > 4096) || (req->dst.act_h <= 0) || (req->dst.act_h > 4096)))
385         {
386             printk("invalid destination resolution act_w = %d, act_h = %d\n", req->dst.act_w, req->dst.act_h);
387             return -EINVAL;
388         }
389
390         if (unlikely((req->dst.vir_w <= 0) || (req->dst.vir_w > 4096) || (req->dst.vir_h <= 0) || (req->dst.vir_h > 4096)))
391         {
392             printk("invalid destination resolution vir_w = %d, vir_h = %d\n", req->dst.vir_w, req->dst.vir_h);
393             return -EINVAL;
394         }
395
396         //check src_vir_w
397         if(unlikely(req->src.vir_w < req->src.act_w)){
398             printk("invalid src_vir_w act_w = %d, vir_w = %d\n", req->src.act_w, req->src.vir_w);
399             return -EINVAL;
400         }
401
402         //check dst_vir_w
403         if(unlikely(req->dst.vir_w < req->dst.act_w)){
404             if(req->rotate_mode != 1)
405             {
406                 printk("invalid dst_vir_w act_h = %d, vir_h = %d\n", req->dst.act_w, req->dst.vir_w);
407                 return -EINVAL;
408             }
409         }
410
411         return 0;
412 }
413
414 static void rga2_copy_reg(struct rga2_reg *reg, uint32_t offset)
415 {
416     uint32_t i;
417     uint32_t *cmd_buf;
418     uint32_t *reg_p;
419
420     if(atomic_read(&reg->session->task_running) != 0)
421         printk(KERN_ERR "task_running is no zero\n");
422
423     atomic_add(1, &rga2_service.cmd_num);
424         atomic_add(1, &reg->session->task_running);
425
426     cmd_buf = (uint32_t *)rga2_service.cmd_buff + offset*32;
427     reg_p = (uint32_t *)reg->cmd_reg;
428
429     for(i=0; i<32; i++)
430         cmd_buf[i] = reg_p[i];
431 }
432
433
434 static struct rga2_reg * rga2_reg_init(rga2_session *session, struct rga2_req *req)
435 {
436     int32_t ret;
437         struct rga2_reg *reg = kzalloc(sizeof(struct rga2_reg), GFP_KERNEL);
438         if (NULL == reg) {
439                 pr_err("kmalloc fail in rga_reg_init\n");
440                 return NULL;
441         }
442
443     reg->session = session;
444         INIT_LIST_HEAD(&reg->session_link);
445         INIT_LIST_HEAD(&reg->status_link);
446
447     reg->MMU_base = NULL;
448
449     if ((req->mmu_info.src0_mmu_flag & 1) || (req->mmu_info.src1_mmu_flag & 1)
450         || (req->mmu_info.dst_mmu_flag & 1) || (req->mmu_info.els_mmu_flag & 1))
451     {
452         ret = rga2_set_mmu_info(reg, req);
453         if(ret < 0) {
454             printk("%s, [%d] set mmu info error \n", __FUNCTION__, __LINE__);
455             if(reg != NULL)
456                 kfree(reg);
457
458             return NULL;
459         }
460     }
461
462     if(RGA2_gen_reg_info((uint8_t *)reg->cmd_reg, req) == -1) {
463         printk("gen reg info error\n");
464         if(reg != NULL)
465             kfree(reg);
466
467         return NULL;
468     }
469
470         reg->sg_src0 = req->sg_src0;
471         reg->sg_dst = req->sg_dst;
472         reg->sg_src1 = req->sg_src1;
473         reg->attach_src0 = req->attach_src0;
474         reg->attach_dst = req->attach_dst;
475         reg->attach_src1 = req->attach_src1;
476
477     mutex_lock(&rga2_service.lock);
478         list_add_tail(&reg->status_link, &rga2_service.waiting);
479         list_add_tail(&reg->session_link, &session->waiting);
480         mutex_unlock(&rga2_service.lock);
481
482     return reg;
483 }
484
485
486 /* Caller must hold rga_service.lock */
487 static void rga2_reg_deinit(struct rga2_reg *reg)
488 {
489         list_del_init(&reg->session_link);
490         list_del_init(&reg->status_link);
491         kfree(reg);
492 }
493
494 /* Caller must hold rga_service.lock */
495 static void rga2_reg_from_wait_to_run(struct rga2_reg *reg)
496 {
497         list_del_init(&reg->status_link);
498         list_add_tail(&reg->status_link, &rga2_service.running);
499
500         list_del_init(&reg->session_link);
501         list_add_tail(&reg->session_link, &reg->session->running);
502 }
503
504 /* Caller must hold rga_service.lock */
505 static void rga2_service_session_clear(rga2_session *session)
506 {
507         struct rga2_reg *reg, *n;
508
509         list_for_each_entry_safe(reg, n, &session->waiting, session_link)
510         {
511                 rga2_reg_deinit(reg);
512         }
513
514         list_for_each_entry_safe(reg, n, &session->running, session_link)
515         {
516                 rga2_reg_deinit(reg);
517         }
518 }
519
520 /* Caller must hold rga_service.lock */
521 static void rga2_try_set_reg(void)
522 {
523         struct rga2_reg *reg ;
524
525         if (list_empty(&rga2_service.running))
526         {
527                 if (!list_empty(&rga2_service.waiting))
528                 {
529                         /* RGA is idle */
530                         reg = list_entry(rga2_service.waiting.next, struct rga2_reg, status_link);
531
532                         rga2_power_on();
533                         udelay(1);
534
535                         rga2_copy_reg(reg, 0);
536                         rga2_reg_from_wait_to_run(reg);
537
538 #ifdef CONFIG_ARM
539                         dmac_flush_range(&rga2_service.cmd_buff[0], &rga2_service.cmd_buff[32]);
540                         outer_flush_range(virt_to_phys(&rga2_service.cmd_buff[0]),virt_to_phys(&rga2_service.cmd_buff[32]));
541 #elif defined(CONFIG_ARM64)
542                         __dma_flush_range(&rga2_service.cmd_buff[0], &rga2_service.cmd_buff[32]);
543 #endif
544
545                         //rga2_soft_reset();
546
547                         rga2_write(0x0, RGA2_SYS_CTRL);
548
549                         /* CMD buff */
550                         rga2_write(virt_to_phys(rga2_service.cmd_buff), RGA2_CMD_BASE);
551
552 #if RGA2_TEST
553                         if(rga2_flag) {
554                                 int32_t i, *p;
555                                 p = rga2_service.cmd_buff;
556                                 printk("CMD_REG\n");
557                                 for (i=0; i<8; i++)
558                                         printk("%.8x %.8x %.8x %.8x\n", p[0 + i*4], p[1+i*4], p[2 + i*4], p[3 + i*4]);
559                         }
560 #endif
561
562                         /* master mode */
563                         rga2_write((0x1<<1)|(0x1<<2)|(0x1<<5)|(0x1<<6), RGA2_SYS_CTRL);
564
565                         /* All CMD finish int */
566                         rga2_write(rga2_read(RGA2_INT)|(0x1<<10)|(0x1<<9)|(0x1<<8), RGA2_INT);
567
568 #if RGA2_TEST_TIME
569                         rga2_start = ktime_get();
570 #endif
571
572                         /* Start proc */
573                         atomic_set(&reg->session->done, 0);
574                         rga2_write(0x1, RGA2_CMD_CTRL);
575 #if RGA2_TEST
576                         if(rga2_flag)
577                         {
578                                 uint32_t i;
579                                 printk("CMD_READ_BACK_REG\n");
580                                 for (i=0; i<8; i++)
581                                         printk("%.8x %.8x %.8x %.8x\n", rga2_read(0x100 + i*16 + 0),
582                                                         rga2_read(0x100 + i*16 + 4), rga2_read(0x100 + i*16 + 8), rga2_read(0x100 + i*16 + 12));
583                         }
584 #endif
585                 }
586         }
587 }
588
589 static int rga2_put_dma_buf(struct rga2_req *req, struct rga2_reg *reg)
590 {
591         struct dma_buf_attachment *attach = NULL;
592         struct sg_table *sgt = NULL;
593         struct dma_buf *dma_buf = NULL;
594
595         if (!req && !reg)
596                 return -EINVAL;
597
598         attach = (!reg) ? req->attach_src0 : reg->attach_src0;
599         sgt = (!reg) ? req->sg_src0 : reg->sg_src0;
600         if (attach && sgt)
601                 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
602         if (attach) {
603                 dma_buf = attach->dmabuf;
604                 dma_buf_detach(dma_buf, attach);
605                 dma_buf_put(dma_buf);
606         }
607
608         attach = (!reg) ? req->attach_dst : reg->attach_dst;
609         sgt = (!reg) ? req->sg_dst : reg->sg_dst;
610         if (attach && sgt)
611                 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
612         if (attach) {
613                 dma_buf = attach->dmabuf;
614                 dma_buf_detach(dma_buf, attach);
615                 dma_buf_put(dma_buf);
616         }
617
618         attach = (!reg) ? req->attach_src1 : reg->attach_src1;
619         sgt = (!reg) ? req->sg_src1 : reg->sg_src1;
620         if (attach && sgt)
621                 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
622         if (attach) {
623                 dma_buf = attach->dmabuf;
624                 dma_buf_detach(dma_buf, attach);
625                 dma_buf_put(dma_buf);
626         }
627
628         return 0;
629 }
630
631 static void rga2_del_running_list(void)
632 {
633         struct rga2_mmu_buf_t *tbuf = &rga2_mmu_buf;
634         struct rga2_reg *reg;
635
636         while (!list_empty(&rga2_service.running)) {
637                 reg = list_entry(rga2_service.running.next, struct rga2_reg,
638                                  status_link);
639                 if (reg->MMU_len && tbuf) {
640                         if (tbuf->back + reg->MMU_len > 2 * tbuf->size)
641                                 tbuf->back = reg->MMU_len + tbuf->size;
642                         else
643                                 tbuf->back += reg->MMU_len;
644                 }
645
646                 rga2_put_dma_buf(NULL, reg);
647
648                 atomic_sub(1, &reg->session->task_running);
649                 atomic_sub(1, &rga2_service.total_running);
650
651                 if(list_empty(&reg->session->waiting))
652                 {
653                         atomic_set(&reg->session->done, 1);
654                         wake_up(&reg->session->wait);
655                 }
656
657                 rga2_reg_deinit(reg);
658         }
659 }
660
661 static void rga2_del_running_list_timeout(void)
662 {
663         struct rga2_mmu_buf_t *tbuf = &rga2_mmu_buf;
664         struct rga2_reg *reg;
665
666         while (!list_empty(&rga2_service.running)) {
667                 reg = list_entry(rga2_service.running.next, struct rga2_reg,
668                                  status_link);
669                 kfree(reg->MMU_base);
670                 if (reg->MMU_len && tbuf) {
671                         if (tbuf->back + reg->MMU_len > 2 * tbuf->size)
672                                 tbuf->back = reg->MMU_len + tbuf->size;
673                         else
674                                 tbuf->back += reg->MMU_len;
675                 }
676
677                 rga2_put_dma_buf(NULL, reg);
678
679                 atomic_sub(1, &reg->session->task_running);
680                 atomic_sub(1, &rga2_service.total_running);
681                 rga2_soft_reset();
682                 if (list_empty(&reg->session->waiting)) {
683                         atomic_set(&reg->session->done, 1);
684                         wake_up(&reg->session->wait);
685                 }
686                 rga2_reg_deinit(reg);
687         }
688         return;
689 }
690
691 static int rga2_get_img_info(rga_img_info_t *img,
692                              u8 mmu_flag,
693                              u8 buf_gem_type_dma,
694                              struct sg_table **psgt,
695                              struct dma_buf_attachment **pattach)
696 {
697         struct dma_buf_attachment *attach = NULL;
698         struct ion_client *ion_client = NULL;
699         struct ion_handle *hdl = NULL;
700         struct device *rga_dev = NULL;
701         struct sg_table *sgt = NULL;
702         struct dma_buf *dma_buf = NULL;
703         u32 vir_w, vir_h;
704         ion_phys_addr_t phy_addr;
705         size_t len = 0;
706         int yrgb_addr = -1;
707         int ret = 0;
708
709         ion_client = rga2_drvdata->ion_client;
710         rga_dev = rga2_drvdata->dev;
711         yrgb_addr = (int)img->yrgb_addr;
712         vir_w = img->vir_w;
713         vir_h = img->vir_h;
714
715         if (yrgb_addr > 0) {
716                 if (buf_gem_type_dma) {
717                         dma_buf = dma_buf_get(img->yrgb_addr);
718                         if (IS_ERR(dma_buf)) {
719                                 ret = -EINVAL;
720                                 pr_err("dma_buf_get fail fd[%d]\n", yrgb_addr);
721                                 return ret;
722                         }
723
724                         attach = dma_buf_attach(dma_buf, rga_dev);
725                         if (IS_ERR(attach)) {
726                                 dma_buf_put(dma_buf);
727                                 ret = -EINVAL;
728                                 pr_err("Failed to attach dma_buf\n");
729                                 return ret;
730                         }
731
732                         *pattach = attach;
733                         sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
734                         if (IS_ERR(sgt)) {
735                                 ret = -EINVAL;
736                                 pr_err("Failed to map src attachment\n");
737                                 goto err_get_sg;
738                         }
739                         if (!mmu_flag) {
740                                 ret = -EINVAL;
741                                 pr_err("Fix it please enable iommu flag\n");
742                                 goto err_get_sg;
743                         }
744                 } else {
745                         hdl = ion_import_dma_buf(ion_client, img->yrgb_addr);
746                         if (IS_ERR(hdl)) {
747                                 ret = -EINVAL;
748                                 pr_err("RGA2 ERROR ion buf handle\n");
749                                 return ret;
750                         }
751                         if (mmu_flag) {
752                                 sgt = ion_sg_table(ion_client, hdl);
753                                 if (IS_ERR(sgt)) {
754                                         ret = -EINVAL;
755                                         pr_err("Fail map src attachment\n");
756                                         goto err_get_sg;
757                                 }
758                         }
759                 }
760
761                 if (mmu_flag) {
762                         *psgt = sgt;
763                         img->yrgb_addr = img->uv_addr;
764                         img->uv_addr = img->yrgb_addr + (vir_w * vir_h);
765                         img->v_addr = img->uv_addr + (vir_w * vir_h) / 4;
766                 } else {
767                         ion_phys(ion_client, hdl, &phy_addr, &len);
768                         img->yrgb_addr = phy_addr;
769                         img->uv_addr = img->yrgb_addr + (vir_w * vir_h);
770                         img->v_addr = img->uv_addr + (vir_w * vir_h) / 4;
771                 }
772         } else {
773                 img->yrgb_addr = img->uv_addr;
774                 img->uv_addr = img->yrgb_addr + (vir_w * vir_h);
775                 img->v_addr = img->uv_addr + (vir_w * vir_h) / 4;
776         }
777
778         if (hdl)
779                 ion_free(ion_client, hdl);
780
781         return ret;
782
783 err_get_sg:
784         if (hdl)
785                 ion_free(ion_client, hdl);
786         if (sgt && buf_gem_type_dma)
787                 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
788         if (attach) {
789                 dma_buf = attach->dmabuf;
790                 dma_buf_detach(dma_buf, attach);
791                 *pattach = NULL;
792                 dma_buf_put(dma_buf);
793         }
794         return ret;
795 }
796
797 static int rga2_get_dma_buf(struct rga2_req *req)
798 {
799         struct dma_buf *dma_buf = NULL;
800         u8 buf_gem_type_dma = 0;
801         u8 mmu_flag = 0;
802         int ret = 0;
803
804         buf_gem_type_dma = req->buf_type & RGA_BUF_GEM_TYPE_DMA;
805         req->sg_src0 = NULL;
806         req->sg_src1 = NULL;
807         req->sg_dst = NULL;
808         req->sg_els = NULL;
809         req->attach_src0 = NULL;
810         req->attach_dst = NULL;
811         req->attach_src1 = NULL;
812         mmu_flag = req->mmu_info.src0_mmu_flag;
813         ret = rga2_get_img_info(&req->src, mmu_flag, buf_gem_type_dma,
814                                 &req->sg_src0, &req->attach_src0);
815         if (ret) {
816                 pr_err("src:rga2_get_img_info fail\n");
817                 goto err_src;
818         }
819
820         mmu_flag = req->mmu_info.dst_mmu_flag;
821         ret = rga2_get_img_info(&req->dst, mmu_flag, buf_gem_type_dma,
822                                 &req->sg_dst, &req->attach_dst);
823         if (ret) {
824                 pr_err("dst:rga2_get_img_info fail\n");
825                 goto err_dst;
826         }
827
828         mmu_flag = req->mmu_info.src1_mmu_flag;
829         ret = rga2_get_img_info(&req->src1, mmu_flag, buf_gem_type_dma,
830                                 &req->sg_src1, &req->attach_src1);
831         if (ret) {
832                 pr_err("src1:rga2_get_img_info fail\n");
833                 goto err_src1;
834         }
835
836         return ret;
837
838 err_src1:
839         if (buf_gem_type_dma && req->sg_dst && req->attach_dst) {
840                 dma_buf_unmap_attachment(req->attach_dst,
841                                          req->sg_dst, DMA_BIDIRECTIONAL);
842                 dma_buf = req->attach_dst->dmabuf;
843                 dma_buf_detach(dma_buf, req->attach_dst);
844                 dma_buf_put(dma_buf);
845         }
846 err_dst:
847         if (buf_gem_type_dma && req->sg_src0 && req->attach_src0) {
848                 dma_buf_unmap_attachment(req->attach_src0,
849                                          req->sg_src0, DMA_BIDIRECTIONAL);
850                 dma_buf = req->attach_src0->dmabuf;
851                 dma_buf_detach(dma_buf, req->attach_src0);
852                 dma_buf_put(dma_buf);
853         }
854 err_src:
855
856         return ret;
857 }
858
859 static int rga2_blit(rga2_session *session, struct rga2_req *req)
860 {
861         int ret = -1;
862         int num = 0;
863         struct rga2_reg *reg;
864
865         if (rga2_get_dma_buf(req)) {
866                 pr_err("RGA2 : DMA buf copy error\n");
867                 return -EFAULT;
868         }
869
870         do {
871                 /* check value if legal */
872                 ret = rga2_check_param(req);
873                 if(ret == -EINVAL) {
874                         pr_err("req argument is inval\n");
875                         goto err_put_dma_buf;
876                 }
877
878                 reg = rga2_reg_init(session, req);
879                 if(reg == NULL) {
880                         pr_err("init reg fail\n");
881                         goto err_put_dma_buf;
882                 }
883
884                 num = 1;
885                 mutex_lock(&rga2_service.lock);
886                 atomic_add(num, &rga2_service.total_running);
887                 rga2_try_set_reg();
888                 mutex_unlock(&rga2_service.lock);
889
890                 return 0;
891         }
892         while(0);
893
894 err_put_dma_buf:
895         rga2_put_dma_buf(req, NULL);
896
897         return -EFAULT;
898 }
899
900 static int rga2_blit_async(rga2_session *session, struct rga2_req *req)
901 {
902         int ret = -1;
903
904 #if RGA2_TEST_MSG
905         if (1) {//req->src.format >= 0x10) {
906                 print_info(req);
907                 rga2_flag = 1;
908                 printk("*** rga_blit_async proc ***\n");
909         }
910         else
911                 rga2_flag = 0;
912 #endif
913         atomic_set(&session->done, 0);
914         ret = rga2_blit(session, req);
915
916         return ret;
917         }
918
919 static int rga2_blit_sync(rga2_session *session, struct rga2_req *req)
920 {
921         struct rga2_req req_bak;
922         int try = 10;
923         int ret = -1;
924         int ret_timeout = 0;
925
926         memcpy(&req_bak, req, sizeof(req_bak));
927 retry:
928
929 #if RGA2_TEST_MSG
930         if (1) {//req->bitblt_mode == 0x2) {
931                 print_info(req);
932                 rga2_flag = 1;
933                 printk("*** rga2_blit_sync proc ***\n");
934         }
935         else
936                 rga2_flag = 0;
937 #endif
938
939         atomic_set(&session->done, 0);
940
941         ret = rga2_blit(session, req);
942         if(ret < 0)
943                 return ret;
944
945         ret_timeout = wait_event_timeout(session->wait, atomic_read(&session->done), RGA2_TIMEOUT_DELAY);
946
947         if (unlikely(ret_timeout< 0))
948         {
949                 //pr_err("sync pid %d wait task ret %d\n", session->pid, ret_timeout);
950                 mutex_lock(&rga2_service.lock);
951                 rga2_del_running_list();
952                 mutex_unlock(&rga2_service.lock);
953                 ret = ret_timeout;
954         }
955         else if (0 == ret_timeout)
956         {
957                 //pr_err("sync pid %d wait %d task done timeout\n", session->pid, atomic_read(&session->task_running));
958                 mutex_lock(&rga2_service.lock);
959                 rga2_del_running_list_timeout();
960                 rga2_try_set_reg();
961                 mutex_unlock(&rga2_service.lock);
962                 ret = -ETIMEDOUT;
963         }
964
965 #if RGA2_TEST_TIME
966         rga2_end = ktime_get();
967         rga2_end = ktime_sub(rga2_end, rga2_start);
968         printk("sync one cmd end time %d\n", (int)ktime_to_us(rga2_end));
969 #endif
970         if (ret == -ETIMEDOUT && try--) {
971                 memcpy(req, &req_bak, sizeof(req_bak));
972                 goto retry;
973         }
974
975         return ret;
976         }
977
978 static long rga_ioctl(struct file *file, uint32_t cmd, unsigned long arg)
979 {
980         struct rga2_drvdata_t *rga = rga2_drvdata;
981         struct rga2_req req, req_first;
982         struct rga_req req_rga;
983         int ret = 0;
984         rga2_session *session;
985
986         if (!rga) {
987                 pr_err("rga2_drvdata is null, rga2 is not init\n");
988                 return -ENODEV;
989         }
990         memset(&req, 0x0, sizeof(req));
991
992         mutex_lock(&rga2_service.mutex);
993
994         session = (rga2_session *)file->private_data;
995
996         if (NULL == session)
997         {
998                 printk("%s [%d] rga thread session is null\n",__FUNCTION__,__LINE__);
999                 mutex_unlock(&rga2_service.mutex);
1000                 return -EINVAL;
1001         }
1002
1003         memset(&req, 0x0, sizeof(req));
1004
1005         switch (cmd)
1006         {
1007                 case RGA_BLIT_SYNC:
1008                         if (unlikely(copy_from_user(&req_rga, (struct rga_req*)arg, sizeof(struct rga_req))))
1009                         {
1010                                 ERR("copy_from_user failed\n");
1011                                 ret = -EFAULT;
1012                                 break;
1013                         }
1014                         RGA_MSG_2_RGA2_MSG(&req_rga, &req);
1015
1016                         if (first_RGA2_proc == 0 && req.bitblt_mode == bitblt_mode && rga2_service.dev_mode == 1) {
1017                                 memcpy(&req_first, &req, sizeof(struct rga2_req));
1018                                 if ((req_first.src.act_w != req_first.dst.act_w)
1019                                                 || (req_first.src.act_h != req_first.dst.act_h)) {
1020                                         req_first.src.act_w = MIN(320, MIN(req_first.src.act_w, req_first.dst.act_w));
1021                                         req_first.src.act_h = MIN(240, MIN(req_first.src.act_h, req_first.dst.act_h));
1022                                         req_first.dst.act_w = req_first.src.act_w;
1023                                         req_first.dst.act_h = req_first.src.act_h;
1024                                         ret = rga2_blit_async(session, &req_first);
1025                                 }
1026                                 ret = rga2_blit_sync(session, &req);
1027                                 first_RGA2_proc = 1;
1028                         }
1029                         else {
1030                                 ret = rga2_blit_sync(session, &req);
1031                         }
1032                         break;
1033                 case RGA_BLIT_ASYNC:
1034                         if (unlikely(copy_from_user(&req_rga, (struct rga_req*)arg, sizeof(struct rga_req))))
1035                         {
1036                                 ERR("copy_from_user failed\n");
1037                                 ret = -EFAULT;
1038                                 break;
1039                         }
1040
1041                         RGA_MSG_2_RGA2_MSG(&req_rga, &req);
1042
1043                         if (first_RGA2_proc == 0 && req.bitblt_mode == bitblt_mode && rga2_service.dev_mode == 1) {
1044                                 memcpy(&req_first, &req, sizeof(struct rga2_req));
1045                                 if ((req_first.src.act_w != req_first.dst.act_w)
1046                                                 || (req_first.src.act_h != req_first.dst.act_h)) {
1047                                         req_first.src.act_w = MIN(320, MIN(req_first.src.act_w, req_first.dst.act_w));
1048                                         req_first.src.act_h = MIN(240, MIN(req_first.src.act_h, req_first.dst.act_h));
1049                                         req_first.dst.act_w = req_first.src.act_w;
1050                                         req_first.dst.act_h = req_first.src.act_h;
1051                                         ret = rga2_blit_async(session, &req_first);
1052                                 }
1053                                 ret = rga2_blit_async(session, &req);
1054                                 first_RGA2_proc = 1;
1055                         }
1056                         else {
1057                                 ret = rga2_blit_async(session, &req);
1058                         }
1059                         break;
1060                 case RGA2_BLIT_SYNC:
1061                         if (unlikely(copy_from_user(&req, (struct rga2_req*)arg, sizeof(struct rga2_req))))
1062                         {
1063                                 ERR("copy_from_user failed\n");
1064                                 ret = -EFAULT;
1065                                 break;
1066                         }
1067                         ret = rga2_blit_sync(session, &req);
1068                         break;
1069                 case RGA2_BLIT_ASYNC:
1070                         if (unlikely(copy_from_user(&req, (struct rga2_req*)arg, sizeof(struct rga2_req))))
1071                         {
1072                                 ERR("copy_from_user failed\n");
1073                                 ret = -EFAULT;
1074                                 break;
1075                         }
1076
1077                         if((atomic_read(&rga2_service.total_running) > 16))
1078                         {
1079                                 ret = rga2_blit_sync(session, &req);
1080                         }
1081                         else
1082                         {
1083                                 ret = rga2_blit_async(session, &req);
1084                         }
1085                         break;
1086                 case RGA_FLUSH:
1087                 case RGA2_FLUSH:
1088                         ret = rga2_flush(session, arg);
1089                         break;
1090                 case RGA_GET_RESULT:
1091                 case RGA2_GET_RESULT:
1092                         ret = rga2_get_result(session, arg);
1093                         break;
1094                 case RGA_GET_VERSION:
1095                 case RGA2_GET_VERSION:
1096                         ret = copy_to_user((void *)arg, rga->version, 16);
1097                         break;
1098                 default:
1099                         ERR("unknown ioctl cmd!\n");
1100                         ret = -EINVAL;
1101                         break;
1102         }
1103
1104         mutex_unlock(&rga2_service.mutex);
1105
1106         return ret;
1107 }
1108
1109 #ifdef CONFIG_COMPAT
1110 static long compat_rga_ioctl(struct file *file, uint32_t cmd, unsigned long arg)
1111 {
1112         struct rga2_drvdata_t *rga = rga2_drvdata;
1113         struct rga2_req req, req_first;
1114         struct rga_req_32 req_rga;
1115         int ret = 0;
1116         rga2_session *session;
1117
1118         if (!rga) {
1119                 pr_err("rga2_drvdata is null, rga2 is not init\n");
1120                 return -ENODEV;
1121         }
1122         memset(&req, 0x0, sizeof(req));
1123
1124         mutex_lock(&rga2_service.mutex);
1125
1126         session = (rga2_session *)file->private_data;
1127
1128 #if RGA2_TEST_MSG
1129         printk("use compat_rga_ioctl\n");
1130 #endif
1131
1132         if (NULL == session) {
1133                 printk("%s [%d] rga thread session is null\n",__FUNCTION__,__LINE__);
1134                 mutex_unlock(&rga2_service.mutex);
1135                 return -EINVAL;
1136         }
1137
1138         memset(&req, 0x0, sizeof(req));
1139
1140         switch (cmd) {
1141                 case RGA_BLIT_SYNC:
1142                         if (unlikely(copy_from_user(&req_rga, compat_ptr((compat_uptr_t)arg), sizeof(struct rga_req_32))))
1143                         {
1144                                 ERR("copy_from_user failed\n");
1145                                 ret = -EFAULT;
1146                                 break;
1147                         }
1148
1149                         RGA_MSG_2_RGA2_MSG_32(&req_rga, &req);
1150
1151                         if (first_RGA2_proc == 0 && req.bitblt_mode == bitblt_mode && rga2_service.dev_mode == 1) {
1152                                 memcpy(&req_first, &req, sizeof(struct rga2_req));
1153                                 if ((req_first.src.act_w != req_first.dst.act_w)
1154                                                 || (req_first.src.act_h != req_first.dst.act_h)) {
1155                                         req_first.src.act_w = MIN(320, MIN(req_first.src.act_w, req_first.dst.act_w));
1156                                         req_first.src.act_h = MIN(240, MIN(req_first.src.act_h, req_first.dst.act_h));
1157                                         req_first.dst.act_w = req_first.src.act_w;
1158                                         req_first.dst.act_h = req_first.src.act_h;
1159                                         ret = rga2_blit_async(session, &req_first);
1160                                 }
1161                                 ret = rga2_blit_sync(session, &req);
1162                                 first_RGA2_proc = 1;
1163                         }
1164                         else {
1165                                 ret = rga2_blit_sync(session, &req);
1166                         }
1167                         break;
1168                 case RGA_BLIT_ASYNC:
1169                         if (unlikely(copy_from_user(&req_rga, compat_ptr((compat_uptr_t)arg), sizeof(struct rga_req_32))))
1170                         {
1171                                 ERR("copy_from_user failed\n");
1172                                 ret = -EFAULT;
1173                                 break;
1174                         }
1175                         RGA_MSG_2_RGA2_MSG_32(&req_rga, &req);
1176
1177                         if (first_RGA2_proc == 0 && req.bitblt_mode == bitblt_mode && rga2_service.dev_mode == 1) {
1178                                 memcpy(&req_first, &req, sizeof(struct rga2_req));
1179                                 if ((req_first.src.act_w != req_first.dst.act_w)
1180                                                 || (req_first.src.act_h != req_first.dst.act_h)) {
1181                                         req_first.src.act_w = MIN(320, MIN(req_first.src.act_w, req_first.dst.act_w));
1182                                         req_first.src.act_h = MIN(240, MIN(req_first.src.act_h, req_first.dst.act_h));
1183                                         req_first.dst.act_w = req_first.src.act_w;
1184                                         req_first.dst.act_h = req_first.src.act_h;
1185                                         ret = rga2_blit_async(session, &req_first);
1186                                 }
1187                                 ret = rga2_blit_sync(session, &req);
1188                                 first_RGA2_proc = 1;
1189                         }
1190                         else {
1191                                 ret = rga2_blit_sync(session, &req);
1192                         }
1193
1194                         //if((atomic_read(&rga2_service.total_running) > 8))
1195                         //    ret = rga2_blit_sync(session, &req);
1196                         //else
1197                         //    ret = rga2_blit_async(session, &req);
1198
1199                         break;
1200                 case RGA2_BLIT_SYNC:
1201                         if (unlikely(copy_from_user(&req, compat_ptr((compat_uptr_t)arg), sizeof(struct rga2_req))))
1202                         {
1203                                 ERR("copy_from_user failed\n");
1204                                 ret = -EFAULT;
1205                                 break;
1206                         }
1207                         ret = rga2_blit_sync(session, &req);
1208                         break;
1209                 case RGA2_BLIT_ASYNC:
1210                         if (unlikely(copy_from_user(&req, compat_ptr((compat_uptr_t)arg), sizeof(struct rga2_req))))
1211                         {
1212                                 ERR("copy_from_user failed\n");
1213                                 ret = -EFAULT;
1214                                 break;
1215                         }
1216
1217                         if((atomic_read(&rga2_service.total_running) > 16))
1218                                 ret = rga2_blit_sync(session, &req);
1219                         else
1220                                 ret = rga2_blit_async(session, &req);
1221
1222                         break;
1223                 case RGA_FLUSH:
1224                 case RGA2_FLUSH:
1225                         ret = rga2_flush(session, arg);
1226                         break;
1227                 case RGA_GET_RESULT:
1228                 case RGA2_GET_RESULT:
1229                         ret = rga2_get_result(session, arg);
1230                         break;
1231                 case RGA_GET_VERSION:
1232                 case RGA2_GET_VERSION:
1233                         ret = copy_to_user((void *)arg, rga->version, 16);
1234                         break;
1235                 default:
1236                         ERR("unknown ioctl cmd!\n");
1237                         ret = -EINVAL;
1238                         break;
1239         }
1240
1241         mutex_unlock(&rga2_service.mutex);
1242
1243         return ret;
1244 }
1245 #endif
1246
1247
1248 long rga2_ioctl_kernel(struct rga_req *req_rga)
1249 {
1250         int ret = 0;
1251         rga2_session *session;
1252         struct rga2_req req;
1253
1254         memset(&req, 0x0, sizeof(req));
1255         mutex_lock(&rga2_service.mutex);
1256         session = &rga2_session_global;
1257         if (NULL == session)
1258         {
1259                 printk("%s [%d] rga thread session is null\n",__FUNCTION__,__LINE__);
1260                 mutex_unlock(&rga2_service.mutex);
1261                 return -EINVAL;
1262         }
1263
1264         RGA_MSG_2_RGA2_MSG(req_rga, &req);
1265         ret = rga2_blit_sync(session, &req);
1266         mutex_unlock(&rga2_service.mutex);
1267
1268         return ret;
1269 }
1270
1271
1272 static int rga2_open(struct inode *inode, struct file *file)
1273 {
1274         rga2_session *session = kzalloc(sizeof(rga2_session), GFP_KERNEL);
1275
1276         if (NULL == session) {
1277                 pr_err("unable to allocate memory for rga_session.");
1278                 return -ENOMEM;
1279         }
1280
1281         session->pid = current->pid;
1282         INIT_LIST_HEAD(&session->waiting);
1283         INIT_LIST_HEAD(&session->running);
1284         INIT_LIST_HEAD(&session->list_session);
1285         init_waitqueue_head(&session->wait);
1286         mutex_lock(&rga2_service.lock);
1287         list_add_tail(&session->list_session, &rga2_service.session);
1288         mutex_unlock(&rga2_service.lock);
1289         atomic_set(&session->task_running, 0);
1290         atomic_set(&session->num_done, 0);
1291         file->private_data = (void *)session;
1292
1293         return nonseekable_open(inode, file);
1294 }
1295
1296 static int rga2_release(struct inode *inode, struct file *file)
1297 {
1298         int task_running;
1299         rga2_session *session = (rga2_session *)file->private_data;
1300
1301         if (NULL == session)
1302                 return -EINVAL;
1303
1304         task_running = atomic_read(&session->task_running);
1305         if (task_running)
1306         {
1307                 pr_err("rga2_service session %d still has %d task running when closing\n", session->pid, task_running);
1308                 msleep(100);
1309         }
1310
1311         wake_up(&session->wait);
1312         mutex_lock(&rga2_service.lock);
1313         list_del(&session->list_session);
1314         rga2_service_session_clear(session);
1315         kfree(session);
1316         mutex_unlock(&rga2_service.lock);
1317
1318         return 0;
1319 }
1320
1321 static irqreturn_t rga2_irq_thread(int irq, void *dev_id)
1322 {
1323         mutex_lock(&rga2_service.lock);
1324         if (rga2_service.enable) {
1325                 rga2_del_running_list();
1326                 rga2_try_set_reg();
1327         }
1328         mutex_unlock(&rga2_service.lock);
1329
1330         return IRQ_HANDLED;
1331 }
1332
1333 static irqreturn_t rga2_irq(int irq,  void *dev_id)
1334 {
1335         /*clear INT */
1336         rga2_write(rga2_read(RGA2_INT) | (0x1<<4) | (0x1<<5) | (0x1<<6) | (0x1<<7), RGA2_INT);
1337
1338         return IRQ_WAKE_THREAD;
1339 }
1340
1341 struct file_operations rga2_fops = {
1342         .owner          = THIS_MODULE,
1343         .open           = rga2_open,
1344         .release        = rga2_release,
1345         .unlocked_ioctl         = rga_ioctl,
1346 #ifdef CONFIG_COMPAT
1347         .compat_ioctl           = compat_rga_ioctl,
1348 #endif
1349 };
1350
1351 static struct miscdevice rga2_dev ={
1352         .minor = RGA2_MAJOR,
1353         .name  = "rga",
1354         .fops  = &rga2_fops,
1355 };
1356
1357 static const struct of_device_id rockchip_rga_dt_ids[] = {
1358         { .compatible = "rockchip,rga2", },
1359         {},
1360 };
1361
1362 static int rga2_drv_probe(struct platform_device *pdev)
1363 {
1364         struct rga2_drvdata_t *data;
1365         struct resource *res;
1366         int ret = 0;
1367         struct device_node *np = pdev->dev.of_node;
1368
1369         mutex_init(&rga2_service.lock);
1370         mutex_init(&rga2_service.mutex);
1371         atomic_set(&rga2_service.total_running, 0);
1372         atomic_set(&rga2_service.src_format_swt, 0);
1373         rga2_service.last_prc_src_format = 1; /* default is yuv first*/
1374         rga2_service.enable = false;
1375
1376         rga_ioctl_kernel_p = rga2_ioctl_kernel;
1377
1378         data = devm_kzalloc(&pdev->dev, sizeof(struct rga2_drvdata_t), GFP_KERNEL);
1379         if(NULL == data)
1380         {
1381                 ERR("failed to allocate driver data.\n");
1382                 return -ENOMEM;
1383         }
1384
1385         INIT_DELAYED_WORK(&data->power_off_work, rga2_power_off_work);
1386         wake_lock_init(&data->wake_lock, WAKE_LOCK_SUSPEND, "rga");
1387
1388         data->rga2 = devm_clk_get(&pdev->dev, "clk_rga");
1389         data->aclk_rga2 = devm_clk_get(&pdev->dev, "aclk_rga");
1390         data->hclk_rga2 = devm_clk_get(&pdev->dev, "hclk_rga");
1391
1392         /* map the registers */
1393         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1394         data->rga_base = devm_ioremap_resource(&pdev->dev, res);
1395         if (!data->rga_base) {
1396                 ERR("rga ioremap failed\n");
1397                 ret = -ENOENT;
1398                 goto err_ioremap;
1399         }
1400
1401         /* get the IRQ */
1402         data->irq = platform_get_irq(pdev, 0);
1403         if (data->irq <= 0) {
1404                 ERR("failed to get rga irq resource (%d).\n", data->irq);
1405                 ret = data->irq;
1406                 goto err_irq;
1407         }
1408
1409         /* request the IRQ */
1410         ret = devm_request_threaded_irq(&pdev->dev, data->irq, rga2_irq, rga2_irq_thread, 0, "rga", pdev);
1411         if (ret)
1412         {
1413                 ERR("rga request_irq failed (%d).\n", ret);
1414                 goto err_irq;
1415         }
1416
1417         platform_set_drvdata(pdev, data);
1418         data->dev = &pdev->dev;
1419         rga2_drvdata = data;
1420         of_property_read_u32(np, "dev_mode", &rga2_service.dev_mode);
1421
1422 #if defined(CONFIG_ION_ROCKCHIP)
1423         data->ion_client = rockchip_ion_client_create("rga");
1424         if (IS_ERR(data->ion_client)) {
1425                 dev_err(&pdev->dev, "failed to create ion client for rga");
1426                 return PTR_ERR(data->ion_client);
1427         } else {
1428                 dev_info(&pdev->dev, "rga ion client create success!\n");
1429         }
1430 #endif
1431
1432         ret = misc_register(&rga2_dev);
1433         if(ret)
1434         {
1435                 ERR("cannot register miscdev (%d)\n", ret);
1436                 goto err_misc_register;
1437         }
1438 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
1439         pm_runtime_enable(&pdev->dev);
1440 #endif
1441         rga2_init_version();
1442         pr_info("Driver loaded successfully ver:%s\n", rga2_drvdata->version);
1443
1444         return 0;
1445
1446 err_misc_register:
1447         free_irq(data->irq, pdev);
1448 err_irq:
1449         iounmap(data->rga_base);
1450 err_ioremap:
1451         wake_lock_destroy(&data->wake_lock);
1452         //kfree(data);
1453
1454         return ret;
1455 }
1456
1457 static int rga2_drv_remove(struct platform_device *pdev)
1458 {
1459         struct rga2_drvdata_t *data = platform_get_drvdata(pdev);
1460         DBG("%s [%d]\n",__FUNCTION__,__LINE__);
1461
1462         wake_lock_destroy(&data->wake_lock);
1463         misc_deregister(&(data->miscdev));
1464         free_irq(data->irq, &data->miscdev);
1465         iounmap((void __iomem *)(data->rga_base));
1466
1467         devm_clk_put(&pdev->dev, data->rga2);
1468         devm_clk_put(&pdev->dev, data->aclk_rga2);
1469         devm_clk_put(&pdev->dev, data->hclk_rga2);
1470
1471 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
1472         pm_runtime_disable(&pdev->dev);
1473 #endif
1474
1475         kfree(data);
1476         return 0;
1477 }
1478
1479 static struct platform_driver rga2_driver = {
1480         .probe          = rga2_drv_probe,
1481         .remove         = rga2_drv_remove,
1482         .driver         = {
1483                 .owner  = THIS_MODULE,
1484                 .name   = "rga2",
1485                 .of_match_table = of_match_ptr(rockchip_rga_dt_ids),
1486         },
1487 };
1488
1489
1490 void rga2_test_0(void);
1491
1492 static int __init rga2_init(void)
1493 {
1494         int ret;
1495         uint32_t *buf_p;
1496
1497         /* malloc pre scale mid buf mmu table */
1498         buf_p = kmalloc(1024*256, GFP_KERNEL);
1499         rga2_mmu_buf.buf_virtual = buf_p;
1500         rga2_mmu_buf.buf = (uint32_t *)virt_to_phys((void *)((unsigned long)buf_p));
1501         rga2_mmu_buf.front = 0;
1502         rga2_mmu_buf.back = 64*1024;
1503         rga2_mmu_buf.size = 64*1024;
1504
1505         rga2_mmu_buf.pages = kmalloc(32768 * sizeof(struct page *), GFP_KERNEL);
1506
1507         ret = platform_driver_register(&rga2_driver);
1508         if (ret != 0) {
1509                 printk(KERN_ERR "Platform device register failed (%d).\n", ret);
1510                 return ret;
1511         }
1512
1513         rga2_session_global.pid = 0x0000ffff;
1514         INIT_LIST_HEAD(&rga2_session_global.waiting);
1515         INIT_LIST_HEAD(&rga2_session_global.running);
1516         INIT_LIST_HEAD(&rga2_session_global.list_session);
1517
1518         INIT_LIST_HEAD(&rga2_service.waiting);
1519         INIT_LIST_HEAD(&rga2_service.running);
1520         INIT_LIST_HEAD(&rga2_service.done);
1521         INIT_LIST_HEAD(&rga2_service.session);
1522         init_waitqueue_head(&rga2_session_global.wait);
1523         //mutex_lock(&rga_service.lock);
1524         list_add_tail(&rga2_session_global.list_session, &rga2_service.session);
1525         //mutex_unlock(&rga_service.lock);
1526         atomic_set(&rga2_session_global.task_running, 0);
1527         atomic_set(&rga2_session_global.num_done, 0);
1528
1529 #if RGA2_TEST_CASE
1530         rga2_test_0();
1531 #endif
1532
1533         INFO("Module initialized.\n");
1534
1535         return 0;
1536 }
1537
1538 static void __exit rga2_exit(void)
1539 {
1540         rga2_power_off();
1541
1542         if (rga2_mmu_buf.buf_virtual)
1543                 kfree(rga2_mmu_buf.buf_virtual);
1544
1545         platform_driver_unregister(&rga2_driver);
1546 }
1547
1548
1549 #if RGA2_TEST_CASE
1550
1551 void rga2_test_0(void)
1552 {
1553         struct rga2_req req;
1554         rga2_session session;
1555         unsigned int *src, *dst;
1556
1557         session.pid     = current->pid;
1558         INIT_LIST_HEAD(&session.waiting);
1559         INIT_LIST_HEAD(&session.running);
1560         INIT_LIST_HEAD(&session.list_session);
1561         init_waitqueue_head(&session.wait);
1562         /* no need to protect */
1563         list_add_tail(&session.list_session, &rga2_service.session);
1564         atomic_set(&session.task_running, 0);
1565         atomic_set(&session.num_done, 0);
1566
1567         memset(&req, 0, sizeof(struct rga2_req));
1568         src = kmalloc(800*480*4, GFP_KERNEL);
1569         dst = kmalloc(800*480*4, GFP_KERNEL);
1570
1571         printk("\n********************************\n");
1572         printk("************ RGA2_TEST ************\n");
1573         printk("********************************\n\n");
1574
1575 #if 1
1576         memset(src, 0x80, 800 * 480 * 4);
1577         memset(dst, 0xcc, 800 * 480 * 4);
1578 #endif
1579 #if 0
1580         dmac_flush_range(src, &src[800 * 480]);
1581         outer_flush_range(virt_to_phys(src), virt_to_phys(&src[800 * 480]));
1582
1583         dmac_flush_range(dst, &dst[800 * 480]);
1584         outer_flush_range(virt_to_phys(dst), virt_to_phys(&dst[800 * 480]));
1585 #endif
1586
1587 #if 0
1588         req.pat.act_w = 16;
1589         req.pat.act_h = 16;
1590         req.pat.vir_w = 16;
1591         req.pat.vir_h = 16;
1592         req.pat.yrgb_addr = virt_to_phys(src);
1593         req.render_mode = 0;
1594         rga2_blit_sync(&session, &req);
1595 #endif
1596         {
1597                 uint32_t i, j;
1598                 uint8_t *sp;
1599
1600                 sp = (uint8_t *)src;
1601                 for (j = 0; j < 240; j++) {
1602                         sp = (uint8_t *)src + j * 320 * 10 / 8;
1603                         for (i = 0; i < 320; i++) {
1604                                 if ((i & 3) == 0) {
1605                                         sp[i * 5 / 4] = 0;
1606                                         sp[i * 5 / 4+1] = 0x1;
1607                                 } else if ((i & 3) == 1) {
1608                                         sp[i * 5 / 4+1] = 0x4;
1609                                 } else if ((i & 3) == 2) {
1610                                         sp[i * 5 / 4+1] = 0x10;
1611                                 } else if ((i & 3) == 3) {
1612                                         sp[i * 5 / 4+1] = 0x40;
1613                             }
1614                         }
1615                 }
1616                 sp = (uint8_t *)src;
1617                 for (j = 0; j < 100; j++)
1618                         printk("src %.2x\n", sp[j]);
1619         }
1620         req.src.act_w = 320;
1621         req.src.act_h = 240;
1622
1623         req.src.vir_w = 320;
1624         req.src.vir_h = 240;
1625         req.src.yrgb_addr = 0;//(uint32_t)virt_to_phys(src);
1626         req.src.uv_addr = (unsigned long)virt_to_phys(src);
1627         req.src.v_addr = 0;
1628         req.src.format = RGA2_FORMAT_YCbCr_420_SP_10B;
1629
1630         req.dst.act_w  = 320;
1631         req.dst.act_h = 240;
1632         req.dst.x_offset = 0;
1633         req.dst.y_offset = 0;
1634
1635         req.dst.vir_w = 320;
1636         req.dst.vir_h = 240;
1637
1638         req.dst.yrgb_addr = 0;//((uint32_t)virt_to_phys(dst));
1639         req.dst.uv_addr = (unsigned long)virt_to_phys(dst);
1640         req.dst.format = RGA2_FORMAT_YCbCr_420_SP;
1641
1642         //dst = dst0;
1643
1644         //req.render_mode = color_fill_mode;
1645         //req.fg_color = 0x80ffffff;
1646
1647         req.rotate_mode = 0;
1648         req.scale_bicu_mode = 2;
1649
1650 #if 0
1651         //req.alpha_rop_flag = 0;
1652         //req.alpha_rop_mode = 0x19;
1653         //req.PD_mode = 3;
1654
1655         //req.mmu_info.mmu_flag = 0x21;
1656         //req.mmu_info.mmu_en = 1;
1657
1658         //printk("src = %.8x\n", req.src.yrgb_addr);
1659         //printk("src = %.8x\n", req.src.uv_addr);
1660         //printk("dst = %.8x\n", req.dst.yrgb_addr);
1661 #endif
1662
1663         rga2_blit_sync(&session, &req);
1664
1665 #if 0
1666         uint32_t j;
1667         for (j = 0; j < 320 * 240 * 10 / 8; j++) {
1668         if (src[j] != dst[j])
1669                 printk("error value dst not equal src j %d, s %.2x d %.2x\n",
1670                         j, src[j], dst[j]);
1671         }
1672 #endif
1673
1674 #if 1
1675         {
1676                 uint32_t j;
1677                 uint8_t *dp = (uint8_t *)dst;
1678
1679                 for (j = 0; j < 100; j++)
1680                         printk("%d %.2x\n", j, dp[j]);
1681         }
1682 #endif
1683
1684         if(src)
1685                 kfree(src);
1686         if(dst)
1687                 kfree(dst);
1688 }
1689 #endif
1690
1691 module_init(rga2_init);
1692 module_exit(rga2_exit);
1693
1694 /* Module information */
1695 MODULE_AUTHOR("zsq@rock-chips.com");
1696 MODULE_DESCRIPTION("Driver for rga device");
1697 MODULE_LICENSE("GPL");