a1e09b9fa61fc84d199178a6eed4a563802b5ca1
[firefly-linux-kernel-4.4.55.git] / drivers / video / rockchip / vcodec / vcodec_service.c
1 /**
2  * Copyright (C) 2015 Fuzhou Rockchip Electronics Co., Ltd
3  * author: chenhengming chm@rock-chips.com
4  *         Alpha Lin, alpha.lin@rock-chips.com
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include <linux/clk.h>
20 #include <linux/compat.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/module.h>
25 #include <linux/fs.h>
26 #include <linux/mm.h>
27 #include <linux/platform_device.h>
28 #include <linux/reset.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31 #include <linux/wakelock.h>
32 #include <linux/cdev.h>
33 #include <linux/of.h>
34 #include <linux/of_platform.h>
35 #include <linux/of_irq.h>
36 #include <linux/regmap.h>
37 #include <linux/mfd/syscon.h>
38 #include <linux/uaccess.h>
39 #include <linux/debugfs.h>
40 #include <linux/pm_runtime.h>
41
42 #include <linux/rockchip/cru.h>
43 #include <linux/rockchip/pmu.h>
44 #include <linux/rockchip/grf.h>
45
46 #if defined(CONFIG_ION_ROCKCHIP)
47 #include <linux/rockchip_ion.h>
48 #endif
49
50 #include <linux/rockchip-iovmm.h>
51 #include <linux/dma-buf.h>
52
53 #include "vcodec_hw_info.h"
54 #include "vcodec_hw_vpu.h"
55 #include "vcodec_hw_rkv.h"
56 #include "vcodec_hw_vpu2.h"
57
58 #include "vcodec_service.h"
59
60 /*
61  * debug flag usage:
62  * +------+-------------------+
63  * | 8bit |      24bit        |
64  * +------+-------------------+
65  *  0~23 bit is for different information type
66  * 24~31 bit is for information print format
67  */
68
69 #define DEBUG_POWER                             0x00000001
70 #define DEBUG_CLOCK                             0x00000002
71 #define DEBUG_IRQ_STATUS                        0x00000004
72 #define DEBUG_IOMMU                             0x00000008
73 #define DEBUG_IOCTL                             0x00000010
74 #define DEBUG_FUNCTION                          0x00000020
75 #define DEBUG_REGISTER                          0x00000040
76 #define DEBUG_EXTRA_INFO                        0x00000080
77 #define DEBUG_TIMING                            0x00000100
78 #define DEBUG_TASK_INFO                         0x00000200
79
80 #define DEBUG_SET_REG                           0x00001000
81 #define DEBUG_GET_REG                           0x00002000
82 #define DEBUG_PPS_FILL                          0x00004000
83 #define DEBUG_IRQ_CHECK                         0x00008000
84 #define DEBUG_CACHE_32B                         0x00010000
85
86 #define PRINT_FUNCTION                          0x80000000
87 #define PRINT_LINE                              0x40000000
88
89 static int debug;
90 module_param(debug, int, S_IRUGO | S_IWUSR);
91 MODULE_PARM_DESC(debug, "bit switch for vcodec_service debug information");
92
93 #define VCODEC_CLOCK_ENABLE     1
94
95 /*
96  * hardware information organization
97  *
98  * In order to support multiple hardware with different version the hardware
99  * information is organized as follow:
100  *
101  * 1. First, index hardware by register size / position.
102  *    These information is fix for each hardware and do not relate to runtime
103  *    work flow. It only related to resource allocation.
104  *    Descriptor: struct vpu_hw_info
105  *
106  * 2. Then, index hardware by runtime configuration
107  *    These information is related to runtime setting behave including enable
108  *    register, irq register and other key control flag
109  *    Descriptor: struct vpu_task_info
110  *
111  * 3. Final, on iommu case the fd translation is required
112  *    Descriptor: struct vpu_trans_info
113  */
114
115 enum VPU_FREQ {
116         VPU_FREQ_200M,
117         VPU_FREQ_266M,
118         VPU_FREQ_300M,
119         VPU_FREQ_400M,
120         VPU_FREQ_500M,
121         VPU_FREQ_600M,
122         VPU_FREQ_DEFAULT,
123         VPU_FREQ_BUT,
124 };
125
126 struct extra_info_elem {
127         u32 index;
128         u32 offset;
129 };
130
131 #define EXTRA_INFO_MAGIC        0x4C4A46
132
133 struct extra_info_for_iommu {
134         u32 magic;
135         u32 cnt;
136         struct extra_info_elem elem[20];
137 };
138
139 #define MHZ                                     (1000*1000)
140 #define SIZE_REG(reg)                           ((reg)*4)
141
142 static struct vcodec_info vcodec_info_set[] = {
143         [0] = {
144                 .hw_id          = VPU_ID_8270,
145                 .hw_info        = &hw_vpu_8270,
146                 .task_info      = task_vpu,
147                 .trans_info     = trans_vpu,
148         },
149         [1] = {
150                 .hw_id          = VPU_ID_4831,
151                 .hw_info        = &hw_vpu_4831,
152                 .task_info      = task_vpu,
153                 .trans_info     = trans_vpu,
154         },
155         [2] = {
156                 .hw_id          = VPU_DEC_ID_9190,
157                 .hw_info        = &hw_vpu_9190,
158                 .task_info      = task_vpu,
159                 .trans_info     = trans_vpu,
160         },
161         [3] = {
162                 .hw_id          = HEVC_ID,
163                 .hw_info        = &hw_rkhevc,
164                 .task_info      = task_rkv,
165                 .trans_info     = trans_rkv,
166         },
167         [4] = {
168                 .hw_id          = RKV_DEC_ID,
169                 .hw_info        = &hw_rkvdec,
170                 .task_info      = task_rkv,
171                 .trans_info     = trans_rkv,
172         },
173         [5] = {
174                 .hw_id          = VPU2_ID,
175                 .hw_info        = &hw_vpu2,
176                 .task_info      = task_vpu2,
177                 .trans_info     = trans_vpu2,
178         },
179 };
180
181 #define DEBUG
182 #ifdef DEBUG
183 #define vpu_debug_func(type, fmt, args...)                      \
184         do {                                                    \
185                 if (unlikely(debug & type)) {                   \
186                         pr_info("%s:%d: " fmt,                  \
187                                  __func__, __LINE__, ##args);   \
188                 }                                               \
189         } while (0)
190 #define vpu_debug(type, fmt, args...)                           \
191         do {                                                    \
192                 if (unlikely(debug & type)) {                   \
193                         pr_info(fmt, ##args);                   \
194                 }                                               \
195         } while (0)
196 #else
197 #define vpu_debug_func(level, fmt, args...)
198 #define vpu_debug(level, fmt, args...)
199 #endif
200
201 #define vpu_debug_enter() vpu_debug_func(DEBUG_FUNCTION, "enter\n")
202 #define vpu_debug_leave() vpu_debug_func(DEBUG_FUNCTION, "leave\n")
203
204 #define vpu_err(fmt, args...)                           \
205                 pr_err("%s:%d: " fmt, __func__, __LINE__, ##args)
206
207 enum VPU_DEC_FMT {
208         VPU_DEC_FMT_H264,
209         VPU_DEC_FMT_MPEG4,
210         VPU_DEC_FMT_H263,
211         VPU_DEC_FMT_JPEG,
212         VPU_DEC_FMT_VC1,
213         VPU_DEC_FMT_MPEG2,
214         VPU_DEC_FMT_MPEG1,
215         VPU_DEC_FMT_VP6,
216         VPU_DEC_FMT_RESERV0,
217         VPU_DEC_FMT_VP7,
218         VPU_DEC_FMT_VP8,
219         VPU_DEC_FMT_AVS,
220         VPU_DEC_FMT_RES
221 };
222
223 /**
224  * struct for process session which connect to vpu
225  *
226  * @author ChenHengming (2011-5-3)
227  */
228 struct vpu_session {
229         enum VPU_CLIENT_TYPE type;
230         /* a linked list of data so we can access them for debugging */
231         struct list_head list_session;
232         /* a linked list of register data waiting for process */
233         struct list_head waiting;
234         /* a linked list of register data in processing */
235         struct list_head running;
236         /* a linked list of register data processed */
237         struct list_head done;
238         wait_queue_head_t wait;
239         pid_t pid;
240         atomic_t task_running;
241 };
242
243 /**
244  * struct for process register set
245  *
246  * @author ChenHengming (2011-5-4)
247  */
248 struct vpu_reg {
249         enum VPU_CLIENT_TYPE type;
250         enum VPU_FREQ freq;
251         struct vpu_session *session;
252         struct vpu_subdev_data *data;
253         struct vpu_task_info *task;
254         const struct vpu_trans_info *trans;
255
256         /* link to vpu service session */
257         struct list_head session_link;
258         /* link to register set list */
259         struct list_head status_link;
260
261         unsigned long size;
262         struct list_head mem_region_list;
263         u32 dec_base;
264         u32 *reg;
265 };
266
267 struct vpu_device {
268         atomic_t irq_count_codec;
269         atomic_t irq_count_pp;
270         unsigned int iosize;
271         u32 *regs;
272 };
273
274 enum vcodec_device_id {
275         VCODEC_DEVICE_ID_VPU,
276         VCODEC_DEVICE_ID_HEVC,
277         VCODEC_DEVICE_ID_COMBO,
278         VCODEC_DEVICE_ID_RKVDEC,
279         VCODEC_DEVICE_ID_BUTT
280 };
281
282 enum VCODEC_RUNNING_MODE {
283         VCODEC_RUNNING_MODE_NONE = -1,
284         VCODEC_RUNNING_MODE_VPU,
285         VCODEC_RUNNING_MODE_HEVC,
286         VCODEC_RUNNING_MODE_RKVDEC
287 };
288
289 struct vcodec_mem_region {
290         struct list_head srv_lnk;
291         struct list_head reg_lnk;
292         struct list_head session_lnk;
293         unsigned long iova;     /* virtual address for iommu */
294         unsigned long len;
295         u32 reg_idx;
296         struct ion_handle *hdl;
297 };
298
299 enum vpu_ctx_state {
300         MMU_ACTIVATED   = BIT(0)
301 };
302
303 struct vpu_subdev_data {
304         struct cdev cdev;
305         dev_t dev_t;
306         struct class *cls;
307         struct device *child_dev;
308
309         int irq_enc;
310         int irq_dec;
311         struct vpu_service_info *pservice;
312
313         u32 *regs;
314         enum VCODEC_RUNNING_MODE mode;
315         struct list_head lnk_service;
316
317         struct device *dev;
318
319         struct vpu_device enc_dev;
320         struct vpu_device dec_dev;
321
322         enum VPU_HW_ID hw_id;
323         struct vpu_hw_info *hw_info;
324         struct vpu_task_info *task_info;
325         const struct vpu_trans_info *trans_info;
326
327         u32 reg_size;
328         unsigned long state;
329
330 #ifdef CONFIG_DEBUG_FS
331         struct dentry *debugfs_dir;
332         struct dentry *debugfs_file_regs;
333 #endif
334
335         struct device *mmu_dev;
336 };
337
338 struct vpu_service_info {
339         struct wake_lock wake_lock;
340         struct delayed_work power_off_work;
341         ktime_t last; /* record previous power-on time */
342         /* vpu service structure global lock */
343         struct mutex lock;
344         /* link to link_reg in struct vpu_reg */
345         struct list_head waiting;
346         /* link to link_reg in struct vpu_reg */
347         struct list_head running;
348         /* link to link_reg in struct vpu_reg */
349         struct list_head done;
350         /* link to list_session in struct vpu_session */
351         struct list_head session;
352         atomic_t total_running;
353         atomic_t enabled;
354         atomic_t power_on_cnt;
355         atomic_t power_off_cnt;
356         atomic_t service_on;
357         struct mutex shutdown_lock;
358         struct vpu_reg *reg_codec;
359         struct vpu_reg *reg_pproc;
360         struct vpu_reg *reg_resev;
361         struct vpu_dec_config dec_config;
362         struct vpu_enc_config enc_config;
363
364         bool auto_freq;
365         bool bug_dec_addr;
366         atomic_t freq_status;
367
368         struct clk *aclk_vcodec;
369         struct clk *hclk_vcodec;
370         struct clk *clk_core;
371         struct clk *clk_cabac;
372         struct clk *pd_video;
373
374 #ifdef CONFIG_RESET_CONTROLLER
375         struct reset_control *rst_a;
376         struct reset_control *rst_h;
377         struct reset_control *rst_v;
378 #endif
379         struct device *dev;
380
381         u32 irq_status;
382         atomic_t reset_request;
383         struct ion_client *ion_client;
384         struct list_head mem_region_list;
385
386         enum vcodec_device_id dev_id;
387
388         enum VCODEC_RUNNING_MODE curr_mode;
389         u32 prev_mode;
390
391         struct delayed_work simulate_work;
392
393         u32 mode_bit;
394         u32 mode_ctrl;
395         u32 *reg_base;
396         u32 ioaddr;
397         struct regmap *grf;
398         u32 *grf_base;
399
400         char *name;
401
402         u32 subcnt;
403         struct list_head subdev_list;
404 };
405
406 struct vpu_request {
407         u32 *req;
408         u32 size;
409 };
410
411 #ifdef CONFIG_COMPAT
412 struct compat_vpu_request {
413         compat_uptr_t req;
414         u32 size;
415 };
416 #endif
417
418 /* debugfs root directory for all device (vpu, hevc).*/
419 static struct dentry *parent;
420
421 #ifdef CONFIG_DEBUG_FS
422 static int vcodec_debugfs_init(void);
423 static void vcodec_debugfs_exit(void);
424 static struct dentry *vcodec_debugfs_create_device_dir(
425                 char *dirname, struct dentry *parent);
426 static int debug_vcodec_open(struct inode *inode, struct file *file);
427
428 static const struct file_operations debug_vcodec_fops = {
429         .open = debug_vcodec_open,
430         .read = seq_read,
431         .llseek = seq_lseek,
432         .release = single_release,
433 };
434 #endif
435
436 #define VDPU_SOFT_RESET_REG     101
437 #define VDPU_CLEAN_CACHE_REG    516
438 #define VEPU_CLEAN_CACHE_REG    772
439 #define HEVC_CLEAN_CACHE_REG    260
440
441 #define VPU_REG_ENABLE(base, reg)       writel_relaxed(1, base + reg)
442
443 #define VDPU_SOFT_RESET(base)   VPU_REG_ENABLE(base, VDPU_SOFT_RESET_REG)
444 #define VDPU_CLEAN_CACHE(base)  VPU_REG_ENABLE(base, VDPU_CLEAN_CACHE_REG)
445 #define VEPU_CLEAN_CACHE(base)  VPU_REG_ENABLE(base, VEPU_CLEAN_CACHE_REG)
446 #define HEVC_CLEAN_CACHE(base)  VPU_REG_ENABLE(base, HEVC_CLEAN_CACHE_REG)
447
448 #define VPU_POWER_OFF_DELAY             (4 * HZ) /* 4s */
449 #define VPU_TIMEOUT_DELAY               (2 * HZ) /* 2s */
450
451 static void time_record(struct vpu_task_info *task, int is_end)
452 {
453         if (unlikely(debug & DEBUG_TIMING) && task)
454                 do_gettimeofday((is_end) ? (&task->end) : (&task->start));
455 }
456
457 static void time_diff(struct vpu_task_info *task)
458 {
459         vpu_debug(DEBUG_TIMING, "%s task: %ld ms\n", task->name,
460                   (task->end.tv_sec  - task->start.tv_sec)  * 1000 +
461                   (task->end.tv_usec - task->start.tv_usec) / 1000);
462 }
463
464 static void vcodec_enter_mode(struct vpu_subdev_data *data)
465 {
466         int bits;
467         u32 raw = 0;
468         struct vpu_service_info *pservice = data->pservice;
469         struct vpu_subdev_data *subdata, *n;
470
471         if (pservice->subcnt < 2) {
472                 if (data->mmu_dev && !test_bit(MMU_ACTIVATED, &data->state)) {
473                         set_bit(MMU_ACTIVATED, &data->state);
474                         if (atomic_read(&pservice->enabled))
475                                 rockchip_iovmm_activate(data->dev);
476                         else
477                                 BUG_ON(!atomic_read(&pservice->enabled));
478                 }
479                 return;
480         }
481
482         if (pservice->curr_mode == data->mode)
483                 return;
484
485         vpu_debug(DEBUG_IOMMU, "vcodec enter mode %d\n", data->mode);
486         list_for_each_entry_safe(subdata, n,
487                                  &pservice->subdev_list, lnk_service) {
488                 if (data != subdata && subdata->mmu_dev &&
489                     test_bit(MMU_ACTIVATED, &subdata->state)) {
490                         clear_bit(MMU_ACTIVATED, &subdata->state);
491                         rockchip_iovmm_deactivate(subdata->dev);
492                 }
493         }
494         bits = 1 << pservice->mode_bit;
495 #ifdef CONFIG_MFD_SYSCON
496         if (pservice->grf) {
497                 regmap_read(pservice->grf, pservice->mode_ctrl, &raw);
498
499                 if (data->mode == VCODEC_RUNNING_MODE_HEVC)
500                         regmap_write(pservice->grf, pservice->mode_ctrl,
501                                      raw | bits | (bits << 16));
502                 else
503                         regmap_write(pservice->grf, pservice->mode_ctrl,
504                                      (raw & (~bits)) | (bits << 16));
505         } else if (pservice->grf_base) {
506                 u32 *grf_base = pservice->grf_base;
507
508                 raw = readl_relaxed(grf_base + pservice->mode_ctrl / 4);
509                 if (data->mode == VCODEC_RUNNING_MODE_HEVC)
510                         writel_relaxed(raw | bits | (bits << 16),
511                                        grf_base + pservice->mode_ctrl / 4);
512                 else
513                         writel_relaxed((raw & (~bits)) | (bits << 16),
514                                        grf_base + pservice->mode_ctrl / 4);
515         } else {
516                 vpu_err("no grf resource define, switch decoder failed\n");
517                 return;
518         }
519 #else
520         if (pservice->grf_base) {
521                 u32 *grf_base = pservice->grf_base;
522
523                 raw = readl_relaxed(grf_base + pservice->mode_ctrl / 4);
524                 if (data->mode == VCODEC_RUNNING_MODE_HEVC)
525                         writel_relaxed(raw | bits | (bits << 16),
526                                        grf_base + pservice->mode_ctrl / 4);
527                 else
528                         writel_relaxed((raw & (~bits)) | (bits << 16),
529                                        grf_base + pservice->mode_ctrl / 4);
530         } else {
531                 vpu_err("no grf resource define, switch decoder failed\n");
532                 return;
533         }
534 #endif
535         if (data->mmu_dev && !test_bit(MMU_ACTIVATED, &data->state)) {
536                 set_bit(MMU_ACTIVATED, &data->state);
537                 if (atomic_read(&pservice->enabled))
538                         rockchip_iovmm_activate(data->dev);
539                 else
540                         BUG_ON(!atomic_read(&pservice->enabled));
541         }
542
543         pservice->prev_mode = pservice->curr_mode;
544         pservice->curr_mode = data->mode;
545 }
546
547 static void vcodec_exit_mode(struct vpu_subdev_data *data)
548 {
549         if (data->mmu_dev && test_bit(MMU_ACTIVATED, &data->state)) {
550                 clear_bit(MMU_ACTIVATED, &data->state);
551                 rockchip_iovmm_deactivate(data->dev);
552         }
553         /*
554          * In case of VPU Combo, it require HW switch its running mode
555          * before the other HW component start work. set current HW running
556          * mode to none, can ensure HW switch to its reqired mode properly.
557          */
558         data->pservice->curr_mode = VCODEC_RUNNING_MODE_NONE;
559 }
560
561 static int vpu_get_clk(struct vpu_service_info *pservice)
562 {
563 #if VCODEC_CLOCK_ENABLE
564         struct device *dev = pservice->dev;
565
566         switch (pservice->dev_id) {
567         case VCODEC_DEVICE_ID_HEVC:
568                 pservice->pd_video = devm_clk_get(dev, "pd_hevc");
569                 if (IS_ERR(pservice->pd_video)) {
570                         dev_err(dev, "failed on clk_get pd_hevc\n");
571                         return -1;
572                 }
573         case VCODEC_DEVICE_ID_COMBO:
574         case VCODEC_DEVICE_ID_RKVDEC:
575                 pservice->clk_cabac = devm_clk_get(dev, "clk_cabac");
576                 if (IS_ERR(pservice->clk_cabac)) {
577                         dev_err(dev, "failed on clk_get clk_cabac\n");
578                         pservice->clk_cabac = NULL;
579                 }
580                 pservice->clk_core = devm_clk_get(dev, "clk_core");
581                 if (IS_ERR(pservice->clk_core)) {
582                         dev_err(dev, "failed on clk_get clk_core\n");
583                         return -1;
584                 }
585         case VCODEC_DEVICE_ID_VPU:
586                 pservice->aclk_vcodec = devm_clk_get(dev, "aclk_vcodec");
587                 if (IS_ERR(pservice->aclk_vcodec)) {
588                         dev_err(dev, "failed on clk_get aclk_vcodec\n");
589                         return -1;
590                 }
591
592                 pservice->hclk_vcodec = devm_clk_get(dev, "hclk_vcodec");
593                 if (IS_ERR(pservice->hclk_vcodec)) {
594                         dev_err(dev, "failed on clk_get hclk_vcodec\n");
595                         return -1;
596                 }
597                 if (pservice->pd_video == NULL) {
598                         pservice->pd_video = devm_clk_get(dev, "pd_video");
599                         if (IS_ERR(pservice->pd_video)) {
600                                 pservice->pd_video = NULL;
601                                 dev_info(dev, "do not have pd_video\n");
602                         }
603                 }
604                 break;
605         default:
606                 break;
607         }
608
609         return 0;
610 #else
611         return 0;
612 #endif
613 }
614
615 static void vpu_put_clk(struct vpu_service_info *pservice)
616 {
617 #if VCODEC_CLOCK_ENABLE
618         if (pservice->pd_video)
619                 devm_clk_put(pservice->dev, pservice->pd_video);
620         if (pservice->aclk_vcodec)
621                 devm_clk_put(pservice->dev, pservice->aclk_vcodec);
622         if (pservice->hclk_vcodec)
623                 devm_clk_put(pservice->dev, pservice->hclk_vcodec);
624         if (pservice->clk_core)
625                 devm_clk_put(pservice->dev, pservice->clk_core);
626         if (pservice->clk_cabac)
627                 devm_clk_put(pservice->dev, pservice->clk_cabac);
628 #endif
629 }
630
631 static void vpu_reset(struct vpu_subdev_data *data)
632 {
633         struct vpu_service_info *pservice = data->pservice;
634         enum pmu_idle_req type = IDLE_REQ_VIDEO;
635
636         if (pservice->dev_id == VCODEC_DEVICE_ID_HEVC)
637                 type = IDLE_REQ_HEVC;
638
639         pr_info("%s: resetting...", dev_name(pservice->dev));
640
641 #if defined(CONFIG_ARCH_RK29)
642         clk_disable(aclk_ddr_vepu);
643         cru_set_soft_reset(SOFT_RST_CPU_VODEC_A2A_AHB, true);
644         cru_set_soft_reset(SOFT_RST_DDR_VCODEC_PORT, true);
645         cru_set_soft_reset(SOFT_RST_VCODEC_AHB_BUS, true);
646         cru_set_soft_reset(SOFT_RST_VCODEC_AXI_BUS, true);
647         mdelay(10);
648         cru_set_soft_reset(SOFT_RST_VCODEC_AXI_BUS, false);
649         cru_set_soft_reset(SOFT_RST_VCODEC_AHB_BUS, false);
650         cru_set_soft_reset(SOFT_RST_DDR_VCODEC_PORT, false);
651         cru_set_soft_reset(SOFT_RST_CPU_VODEC_A2A_AHB, false);
652         clk_enable(aclk_ddr_vepu);
653 #elif defined(CONFIG_ARCH_RK30)
654         pmu_set_idle_request(IDLE_REQ_VIDEO, true);
655         cru_set_soft_reset(SOFT_RST_CPU_VCODEC, true);
656         cru_set_soft_reset(SOFT_RST_VCODEC_NIU_AXI, true);
657         cru_set_soft_reset(SOFT_RST_VCODEC_AHB, true);
658         cru_set_soft_reset(SOFT_RST_VCODEC_AXI, true);
659         mdelay(1);
660         cru_set_soft_reset(SOFT_RST_VCODEC_AXI, false);
661         cru_set_soft_reset(SOFT_RST_VCODEC_AHB, false);
662         cru_set_soft_reset(SOFT_RST_VCODEC_NIU_AXI, false);
663         cru_set_soft_reset(SOFT_RST_CPU_VCODEC, false);
664         pmu_set_idle_request(IDLE_REQ_VIDEO, false);
665 #else
666 #endif
667         WARN_ON(pservice->reg_codec != NULL);
668         WARN_ON(pservice->reg_pproc != NULL);
669         WARN_ON(pservice->reg_resev != NULL);
670         pservice->reg_codec = NULL;
671         pservice->reg_pproc = NULL;
672         pservice->reg_resev = NULL;
673
674         pr_info("for 3288/3368...");
675 #ifdef CONFIG_RESET_CONTROLLER
676         if (pservice->rst_a && pservice->rst_h) {
677                 pr_info("reset in\n");
678                 if (pservice->rst_v)
679                         reset_control_assert(pservice->rst_v);
680                 reset_control_assert(pservice->rst_a);
681                 reset_control_assert(pservice->rst_h);
682                 udelay(5);
683                 reset_control_deassert(pservice->rst_h);
684                 reset_control_deassert(pservice->rst_a);
685                 if (pservice->rst_v)
686                         reset_control_deassert(pservice->rst_v);
687         }
688 #endif
689
690         if (data->mmu_dev && test_bit(MMU_ACTIVATED, &data->state)) {
691                 clear_bit(MMU_ACTIVATED, &data->state);
692                 if (atomic_read(&pservice->enabled))
693                         rockchip_iovmm_deactivate(data->dev);
694                 else
695                         BUG_ON(!atomic_read(&pservice->enabled));
696         }
697
698         atomic_set(&pservice->reset_request, 0);
699         pr_info("done\n");
700 }
701
702 static void reg_deinit(struct vpu_subdev_data *data, struct vpu_reg *reg);
703 static void vpu_service_session_clear(struct vpu_subdev_data *data,
704                                       struct vpu_session *session)
705 {
706         struct vpu_reg *reg, *n;
707
708         list_for_each_entry_safe(reg, n, &session->waiting, session_link) {
709                 reg_deinit(data, reg);
710         }
711         list_for_each_entry_safe(reg, n, &session->running, session_link) {
712                 reg_deinit(data, reg);
713         }
714         list_for_each_entry_safe(reg, n, &session->done, session_link) {
715                 reg_deinit(data, reg);
716         }
717 }
718
719 static void vpu_service_clear(struct vpu_subdev_data *data)
720 {
721         struct vpu_reg *reg, *n;
722         struct vpu_session *session, *s;
723         struct vpu_service_info *pservice = data->pservice;
724
725         list_for_each_entry_safe(reg, n, &pservice->waiting, status_link) {
726                 reg_deinit(data, reg);
727         }
728
729         /* wake up session wait event to prevent the timeout hw reset
730          * during reboot procedure.
731          */
732         list_for_each_entry_safe(session, s,
733                                  &pservice->session, list_session)
734                 wake_up(&session->wait);
735 }
736
737 static void vpu_service_dump(struct vpu_service_info *pservice)
738 {
739 }
740
741
742 static void vpu_service_power_off(struct vpu_service_info *pservice)
743 {
744         int total_running;
745         struct vpu_subdev_data *data = NULL, *n;
746         int ret = atomic_add_unless(&pservice->enabled, -1, 0);
747
748         if (!ret)
749                 return;
750
751         total_running = atomic_read(&pservice->total_running);
752         if (total_running) {
753                 pr_alert("alert: power off when %d task running!!\n",
754                          total_running);
755                 mdelay(50);
756                 pr_alert("alert: delay 50 ms for running task\n");
757                 vpu_service_dump(pservice);
758         }
759
760         pr_info("%s: power off...", dev_name(pservice->dev));
761
762         udelay(5);
763
764         list_for_each_entry_safe(data, n, &pservice->subdev_list, lnk_service) {
765                 if (data->mmu_dev && test_bit(MMU_ACTIVATED, &data->state)) {
766                         clear_bit(MMU_ACTIVATED, &data->state);
767                         rockchip_iovmm_deactivate(data->dev);
768                 }
769         }
770         pservice->curr_mode = VCODEC_RUNNING_MODE_NONE;
771
772 #if VCODEC_CLOCK_ENABLE
773                 if (pservice->pd_video)
774                         clk_disable_unprepare(pservice->pd_video);
775                 if (pservice->hclk_vcodec)
776                         clk_disable_unprepare(pservice->hclk_vcodec);
777                 if (pservice->aclk_vcodec)
778                         clk_disable_unprepare(pservice->aclk_vcodec);
779                 if (pservice->clk_core)
780                         clk_disable_unprepare(pservice->clk_core);
781                 if (pservice->clk_cabac)
782                         clk_disable_unprepare(pservice->clk_cabac);
783 #endif
784         pm_runtime_put(pservice->dev);
785
786         atomic_add(1, &pservice->power_off_cnt);
787         wake_unlock(&pservice->wake_lock);
788         pr_info("done\n");
789 }
790
791 static inline void vpu_queue_power_off_work(struct vpu_service_info *pservice)
792 {
793         queue_delayed_work(system_wq, &pservice->power_off_work,
794                            VPU_POWER_OFF_DELAY);
795 }
796
797 static void vpu_power_off_work(struct work_struct *work_s)
798 {
799         struct delayed_work *dlwork = container_of(work_s,
800                         struct delayed_work, work);
801         struct vpu_service_info *pservice = container_of(dlwork,
802                         struct vpu_service_info, power_off_work);
803
804         if (mutex_trylock(&pservice->lock)) {
805                 vpu_service_power_off(pservice);
806                 mutex_unlock(&pservice->lock);
807         } else {
808                 /* Come back later if the device is busy... */
809                 vpu_queue_power_off_work(pservice);
810         }
811 }
812
813 static void vpu_service_power_on(struct vpu_service_info *pservice)
814 {
815         int ret;
816         ktime_t now = ktime_get();
817
818         if (ktime_to_ns(ktime_sub(now, pservice->last)) > NSEC_PER_SEC) {
819                 cancel_delayed_work_sync(&pservice->power_off_work);
820                 vpu_queue_power_off_work(pservice);
821                 pservice->last = now;
822         }
823         ret = atomic_add_unless(&pservice->enabled, 1, 1);
824         if (!ret)
825                 return;
826
827         pr_info("%s: power on\n", dev_name(pservice->dev));
828
829 #define BIT_VCODEC_CLK_SEL      (1<<10)
830         if (of_machine_is_compatible("rockchip,rk3126"))
831                 writel_relaxed(readl_relaxed(RK_GRF_VIRT + RK312X_GRF_SOC_CON1)
832                         | BIT_VCODEC_CLK_SEL | (BIT_VCODEC_CLK_SEL << 16),
833                         RK_GRF_VIRT + RK312X_GRF_SOC_CON1);
834
835 #if VCODEC_CLOCK_ENABLE
836         if (pservice->aclk_vcodec)
837                 clk_prepare_enable(pservice->aclk_vcodec);
838         if (pservice->hclk_vcodec)
839                 clk_prepare_enable(pservice->hclk_vcodec);
840         if (pservice->clk_core)
841                 clk_prepare_enable(pservice->clk_core);
842         if (pservice->clk_cabac)
843                 clk_prepare_enable(pservice->clk_cabac);
844         if (pservice->pd_video)
845                 clk_prepare_enable(pservice->pd_video);
846 #endif
847         pm_runtime_get_sync(pservice->dev);
848
849         udelay(5);
850         atomic_add(1, &pservice->power_on_cnt);
851         wake_lock(&pservice->wake_lock);
852 }
853
854 static inline bool reg_check_interlace(struct vpu_reg *reg)
855 {
856         u32 type = (reg->reg[3] & (1 << 23));
857
858         return (type > 0);
859 }
860
861 static inline enum VPU_DEC_FMT reg_check_fmt(struct vpu_reg *reg)
862 {
863         enum VPU_DEC_FMT type = (enum VPU_DEC_FMT)((reg->reg[3] >> 28) & 0xf);
864
865         return type;
866 }
867
868 static inline int reg_probe_width(struct vpu_reg *reg)
869 {
870         int width_in_mb = reg->reg[4] >> 23;
871
872         return width_in_mb * 16;
873 }
874
875 static inline int reg_probe_hevc_y_stride(struct vpu_reg *reg)
876 {
877         int y_virstride = reg->reg[8];
878
879         return y_virstride;
880 }
881
882 static int vcodec_fd_to_iova(struct vpu_subdev_data *data,
883                              struct vpu_reg *reg, int fd)
884 {
885         struct vpu_service_info *pservice = data->pservice;
886         struct ion_handle *hdl;
887         int ret = 0;
888         struct vcodec_mem_region *mem_region;
889
890         hdl = ion_import_dma_buf(pservice->ion_client, fd);
891         if (IS_ERR(hdl)) {
892                 vpu_err("import dma-buf from fd %d failed\n", fd);
893                 return PTR_ERR(hdl);
894         }
895         mem_region = kzalloc(sizeof(*mem_region), GFP_KERNEL);
896
897         if (mem_region == NULL) {
898                 vpu_err("allocate memory for iommu memory region failed\n");
899                 ion_free(pservice->ion_client, hdl);
900                 return -ENOMEM;
901         }
902
903         mem_region->hdl = hdl;
904         if (data->mmu_dev)
905                 ret = ion_map_iommu(data->dev, pservice->ion_client,
906                                     mem_region->hdl, &mem_region->iova,
907                                     &mem_region->len);
908         else
909                 ret = ion_phys(pservice->ion_client,
910                                mem_region->hdl,
911                                (ion_phys_addr_t *)&mem_region->iova,
912                                (size_t *)&mem_region->len);
913
914         if (ret < 0) {
915                 vpu_err("fd %d ion map iommu failed\n", fd);
916                 kfree(mem_region);
917                 ion_free(pservice->ion_client, hdl);
918                 return -EFAULT;
919         }
920         INIT_LIST_HEAD(&mem_region->reg_lnk);
921         list_add_tail(&mem_region->reg_lnk, &reg->mem_region_list);
922         return mem_region->iova;
923 }
924
925 /*
926  * NOTE: rkvdec/rkhevc put scaling list address in pps buffer hardware will read
927  * it by pps id in video stream data.
928  *
929  * So we need to translate the address in iommu case. The address data is also
930  * 10bit fd + 22bit offset mode.
931  * Because userspace decoder do not give the pps id in the register file sets
932  * kernel driver need to translate each scaling list address in pps buffer which
933  * means 256 pps for H.264, 64 pps for H.265.
934  *
935  * In order to optimize the performance kernel driver ask userspace decoder to
936  * set all scaling list address in pps buffer to the same one which will be used
937  * on current decoding task. Then kernel driver can only translate the first
938  * address then copy it all pps buffer.
939  */
940 static int fill_scaling_list_addr_in_pps(
941                 struct vpu_subdev_data *data,
942                 struct vpu_reg *reg,
943                 char *pps,
944                 int pps_info_count,
945                 int pps_info_size,
946                 int scaling_list_addr_offset)
947 {
948         int base = scaling_list_addr_offset;
949         int scaling_fd = 0;
950         u32 scaling_offset;
951
952         scaling_offset  = (u32)pps[base + 0];
953         scaling_offset += (u32)pps[base + 1] << 8;
954         scaling_offset += (u32)pps[base + 2] << 16;
955         scaling_offset += (u32)pps[base + 3] << 24;
956
957         scaling_fd = scaling_offset & 0x3ff;
958         scaling_offset = scaling_offset >> 10;
959
960         if (scaling_fd > 0) {
961                 int i = 0;
962                 u32 tmp = vcodec_fd_to_iova(data, reg, scaling_fd);
963
964                 if (IS_ERR_VALUE(tmp))
965                         return -1;
966                 tmp += scaling_offset;
967
968                 for (i = 0; i < pps_info_count; i++, base += pps_info_size) {
969                         pps[base + 0] = (tmp >>  0) & 0xff;
970                         pps[base + 1] = (tmp >>  8) & 0xff;
971                         pps[base + 2] = (tmp >> 16) & 0xff;
972                         pps[base + 3] = (tmp >> 24) & 0xff;
973                 }
974         }
975
976         return 0;
977 }
978
979 static int vcodec_bufid_to_iova(struct vpu_subdev_data *data, const u8 *tbl,
980                                 int size, struct vpu_reg *reg,
981                                 struct extra_info_for_iommu *ext_inf)
982 {
983         struct vpu_service_info *pservice = data->pservice;
984         struct vpu_task_info *task = reg->task;
985         enum FORMAT_TYPE type;
986         struct ion_handle *hdl;
987         int ret = 0;
988         struct vcodec_mem_region *mem_region;
989         int i;
990         int offset = 0;
991
992         if (tbl == NULL || size <= 0) {
993                 dev_err(pservice->dev, "input arguments invalidate\n");
994                 return -1;
995         }
996
997         if (task->get_fmt)
998                 type = task->get_fmt(reg->reg);
999         else {
1000                 pr_err("invalid task with NULL get_fmt\n");
1001                 return -1;
1002         }
1003
1004         for (i = 0; i < size; i++) {
1005                 int usr_fd = reg->reg[tbl[i]] & 0x3FF;
1006
1007                 /* if userspace do not set the fd at this register, skip */
1008                 if (usr_fd == 0)
1009                         continue;
1010
1011                 /*
1012                  * special offset scale case
1013                  *
1014                  * This translation is for fd + offset translation.
1015                  * One register has 32bits. We need to transfer both buffer file
1016                  * handle and the start address offset so we packet file handle
1017                  * and offset together using below format.
1018                  *
1019                  *  0~9  bit for buffer file handle range 0 ~ 1023
1020                  * 10~31 bit for offset range 0 ~ 4M
1021                  *
1022                  * But on 4K case the offset can be larger the 4M
1023                  * So on H.264 4K vpu/vpu2 decoder we scale the offset by 16
1024                  * But MPEG4 will use the same register for colmv and it do not
1025                  * need scale.
1026                  *
1027                  * RKVdec do not have this issue.
1028                  */
1029                 if ((type == FMT_H264D || type == FMT_VP9D) &&
1030                     task->reg_dir_mv > 0 && task->reg_dir_mv == tbl[i])
1031                         offset = reg->reg[tbl[i]] >> 10 << 4;
1032                 else
1033                         offset = reg->reg[tbl[i]] >> 10;
1034
1035                 vpu_debug(DEBUG_IOMMU, "pos %3d fd %3d offset %10d\n",
1036                           tbl[i], usr_fd, offset);
1037
1038                 hdl = ion_import_dma_buf(pservice->ion_client, usr_fd);
1039                 if (IS_ERR(hdl)) {
1040                         dev_err(pservice->dev,
1041                                 "import dma-buf from fd %d failed, reg[%d]\n",
1042                                 usr_fd, tbl[i]);
1043                         return PTR_ERR(hdl);
1044                 }
1045
1046                 if (task->reg_pps > 0 && task->reg_pps == tbl[i]) {
1047                         int pps_info_offset;
1048                         int pps_info_count;
1049                         int pps_info_size;
1050                         int scaling_list_addr_offset;
1051
1052                         switch (type) {
1053                         case FMT_H264D: {
1054                                 pps_info_offset = offset;
1055                                 pps_info_count = 256;
1056                                 pps_info_size = 32;
1057                                 scaling_list_addr_offset = 23;
1058                         } break;
1059                         case FMT_H265D: {
1060                                 pps_info_offset = 0;
1061                                 pps_info_count = 64;
1062                                 pps_info_size = 80;
1063                                 scaling_list_addr_offset = 74;
1064                         } break;
1065                         default: {
1066                                 pps_info_offset = 0;
1067                                 pps_info_count = 0;
1068                                 pps_info_size = 0;
1069                                 scaling_list_addr_offset = 0;
1070                         } break;
1071                         }
1072
1073                         vpu_debug(DEBUG_PPS_FILL,
1074                                   "scaling list filling parameter:\n");
1075                         vpu_debug(DEBUG_PPS_FILL,
1076                                   "pps_info_offset %d\n", pps_info_offset);
1077                         vpu_debug(DEBUG_PPS_FILL,
1078                                   "pps_info_count  %d\n", pps_info_count);
1079                         vpu_debug(DEBUG_PPS_FILL,
1080                                   "pps_info_size   %d\n", pps_info_size);
1081                         vpu_debug(DEBUG_PPS_FILL,
1082                                   "scaling_list_addr_offset %d\n",
1083                                   scaling_list_addr_offset);
1084
1085                         if (pps_info_count) {
1086                                 char *pps = (char *)ion_map_kernel(
1087                                                 pservice->ion_client, hdl);
1088                                 vpu_debug(DEBUG_PPS_FILL,
1089                                           "scaling list setting pps %p\n", pps);
1090                                 pps += pps_info_offset;
1091
1092                                 if (fill_scaling_list_addr_in_pps(
1093                                                 data, reg, pps,
1094                                                 pps_info_count,
1095                                                 pps_info_size,
1096                                                 scaling_list_addr_offset) < 0) {
1097                                         ion_free(pservice->ion_client, hdl);
1098                                         return -1;
1099                                 }
1100                         }
1101                 }
1102
1103                 mem_region = kzalloc(sizeof(*mem_region), GFP_KERNEL);
1104
1105                 if (!mem_region) {
1106                         ion_free(pservice->ion_client, hdl);
1107                         return -ENOMEM;
1108                 }
1109
1110                 mem_region->hdl = hdl;
1111                 mem_region->reg_idx = tbl[i];
1112
1113                 if (data->mmu_dev)
1114                         ret = ion_map_iommu(data->dev,
1115                                             pservice->ion_client,
1116                                             mem_region->hdl,
1117                                             &mem_region->iova,
1118                                             &mem_region->len);
1119                 else
1120                         ret = ion_phys(pservice->ion_client,
1121                                        mem_region->hdl,
1122                                        (ion_phys_addr_t *)&mem_region->iova,
1123                                        (size_t *)&mem_region->len);
1124
1125                 if (ret < 0) {
1126                         dev_err(pservice->dev, "reg %d fd %d ion map iommu failed\n",
1127                                 tbl[i], usr_fd);
1128                         kfree(mem_region);
1129                         ion_free(pservice->ion_client, hdl);
1130                         return ret;
1131                 }
1132
1133                 /*
1134                  * special for vpu dec num 12: record decoded length
1135                  * hacking for decoded length
1136                  * NOTE: not a perfect fix, the fd is not recorded
1137                  */
1138                 if (task->reg_len > 0 && task->reg_len == tbl[i]) {
1139                         reg->dec_base = mem_region->iova + offset;
1140                         vpu_debug(DEBUG_REGISTER, "dec_set %08x\n",
1141                                   reg->dec_base);
1142                 }
1143
1144                 reg->reg[tbl[i]] = mem_region->iova + offset;
1145                 INIT_LIST_HEAD(&mem_region->reg_lnk);
1146                 list_add_tail(&mem_region->reg_lnk, &reg->mem_region_list);
1147         }
1148
1149         if (ext_inf != NULL && ext_inf->magic == EXTRA_INFO_MAGIC) {
1150                 for (i = 0; i < ext_inf->cnt; i++) {
1151                         vpu_debug(DEBUG_IOMMU, "reg[%d] + offset %d\n",
1152                                   ext_inf->elem[i].index,
1153                                   ext_inf->elem[i].offset);
1154                         reg->reg[ext_inf->elem[i].index] +=
1155                                 ext_inf->elem[i].offset;
1156                 }
1157         }
1158
1159         return 0;
1160 }
1161
1162 static int vcodec_reg_address_translate(struct vpu_subdev_data *data,
1163                                         struct vpu_reg *reg,
1164                                         struct extra_info_for_iommu *ext_inf)
1165 {
1166         enum FORMAT_TYPE type = reg->task->get_fmt(reg->reg);
1167
1168         if (type < FMT_TYPE_BUTT) {
1169                 const struct vpu_trans_info *info = &reg->trans[type];
1170                 const u8 *tbl = info->table;
1171                 int size = info->count;
1172
1173                 return vcodec_bufid_to_iova(data, tbl, size, reg, ext_inf);
1174         }
1175         pr_err("found invalid format type!\n");
1176         return -1;
1177 }
1178
1179 static void get_reg_freq(struct vpu_subdev_data *data, struct vpu_reg *reg)
1180 {
1181
1182         if (!of_machine_is_compatible("rockchip,rk2928g")) {
1183                 if (reg->type == VPU_DEC || reg->type == VPU_DEC_PP) {
1184                         if (reg_check_fmt(reg) == VPU_DEC_FMT_H264) {
1185                                 if (reg_probe_width(reg) > 3200) {
1186                                         /*raise frequency for 4k avc.*/
1187                                         reg->freq = VPU_FREQ_600M;
1188                                 }
1189                         } else {
1190                                 if (reg_check_interlace(reg))
1191                                         reg->freq = VPU_FREQ_400M;
1192                         }
1193                 }
1194                 if (data->hw_id == HEVC_ID) {
1195                         if (reg_probe_hevc_y_stride(reg) > 60000)
1196                                 reg->freq = VPU_FREQ_400M;
1197                 }
1198                 if (reg->type == VPU_PP)
1199                         reg->freq = VPU_FREQ_400M;
1200         }
1201 }
1202
1203 static struct vpu_reg *reg_init(struct vpu_subdev_data *data,
1204                                 struct vpu_session *session,
1205                                 void __user *src, u32 size)
1206 {
1207         struct vpu_service_info *pservice = data->pservice;
1208         int extra_size = 0;
1209         struct extra_info_for_iommu extra_info;
1210         struct vpu_reg *reg = kzalloc(sizeof(*reg) + data->reg_size,
1211                                       GFP_KERNEL);
1212
1213         vpu_debug_enter();
1214
1215         if (NULL == reg) {
1216                 vpu_err("error: kmalloc failed\n");
1217                 return NULL;
1218         }
1219
1220         if (size > data->reg_size) {
1221                 extra_size = size - data->reg_size;
1222                 size = data->reg_size;
1223         }
1224         reg->session = session;
1225         reg->data = data;
1226         reg->type = session->type;
1227         reg->size = size;
1228         reg->freq = VPU_FREQ_DEFAULT;
1229         reg->task = &data->task_info[session->type];
1230         reg->trans = data->trans_info;
1231         reg->reg = (u32 *)&reg[1];
1232         INIT_LIST_HEAD(&reg->session_link);
1233         INIT_LIST_HEAD(&reg->status_link);
1234
1235         INIT_LIST_HEAD(&reg->mem_region_list);
1236
1237         if (copy_from_user(&reg->reg[0], (void __user *)src, size)) {
1238                 vpu_err("error: copy_from_user failed\n");
1239                 kfree(reg);
1240                 return NULL;
1241         }
1242
1243         if (copy_from_user(&extra_info, (u8 *)src + size, extra_size)) {
1244                 vpu_err("error: copy_from_user failed\n");
1245                 kfree(reg);
1246                 return NULL;
1247         }
1248
1249         if (0 > vcodec_reg_address_translate(data, reg, &extra_info)) {
1250                 int i = 0;
1251
1252                 vpu_err("error: translate reg address failed, dumping regs\n");
1253                 for (i = 0; i < size >> 2; i++)
1254                         pr_err("reg[%02d]: %08x\n", i, *((u32 *)src + i));
1255
1256                 kfree(reg);
1257                 return NULL;
1258         }
1259
1260         mutex_lock(&pservice->lock);
1261         list_add_tail(&reg->status_link, &pservice->waiting);
1262         list_add_tail(&reg->session_link, &session->waiting);
1263         mutex_unlock(&pservice->lock);
1264
1265         if (pservice->auto_freq)
1266                 get_reg_freq(data, reg);
1267
1268         vpu_debug_leave();
1269         return reg;
1270 }
1271
1272 static void reg_deinit(struct vpu_subdev_data *data, struct vpu_reg *reg)
1273 {
1274         struct vpu_service_info *pservice = data->pservice;
1275         struct vcodec_mem_region *mem_region = NULL, *n;
1276
1277         list_del_init(&reg->session_link);
1278         list_del_init(&reg->status_link);
1279         if (reg == pservice->reg_codec)
1280                 pservice->reg_codec = NULL;
1281         if (reg == pservice->reg_pproc)
1282                 pservice->reg_pproc = NULL;
1283
1284         /* release memory region attach to this registers table. */
1285         list_for_each_entry_safe(mem_region, n,
1286                         &reg->mem_region_list, reg_lnk) {
1287                 ion_free(pservice->ion_client, mem_region->hdl);
1288                 list_del_init(&mem_region->reg_lnk);
1289                 kfree(mem_region);
1290         }
1291
1292         kfree(reg);
1293 }
1294
1295 static void reg_from_wait_to_run(struct vpu_service_info *pservice,
1296                                  struct vpu_reg *reg)
1297 {
1298         vpu_debug_enter();
1299         list_del_init(&reg->status_link);
1300         list_add_tail(&reg->status_link, &pservice->running);
1301
1302         list_del_init(&reg->session_link);
1303         list_add_tail(&reg->session_link, &reg->session->running);
1304         vpu_debug_leave();
1305 }
1306
1307 static void reg_copy_from_hw(struct vpu_reg *reg, u32 *src, u32 count)
1308 {
1309         int i;
1310         u32 *dst = reg->reg;
1311
1312         vpu_debug_enter();
1313         for (i = 0; i < count; i++, src++)
1314                 *dst++ = readl_relaxed(src);
1315
1316         dst = (u32 *)&reg->reg[0];
1317         for (i = 0; i < count; i++)
1318                 vpu_debug(DEBUG_GET_REG, "get reg[%02d] %08x\n", i, dst[i]);
1319
1320         vpu_debug_leave();
1321 }
1322
1323 static void reg_from_run_to_done(struct vpu_subdev_data *data,
1324                                  struct vpu_reg *reg)
1325 {
1326         struct vpu_service_info *pservice = data->pservice;
1327         struct vpu_hw_info *hw_info = data->hw_info;
1328         struct vpu_task_info *task = reg->task;
1329
1330         vpu_debug_enter();
1331
1332         list_del_init(&reg->status_link);
1333         list_add_tail(&reg->status_link, &pservice->done);
1334
1335         list_del_init(&reg->session_link);
1336         list_add_tail(&reg->session_link, &reg->session->done);
1337
1338         switch (reg->type) {
1339         case VPU_ENC: {
1340                 pservice->reg_codec = NULL;
1341                 reg_copy_from_hw(reg, data->enc_dev.regs, hw_info->enc_reg_num);
1342                 reg->reg[task->reg_irq] = pservice->irq_status;
1343         } break;
1344         case VPU_DEC: {
1345                 pservice->reg_codec = NULL;
1346                 reg_copy_from_hw(reg, data->dec_dev.regs, hw_info->dec_reg_num);
1347
1348                 /* revert hack for decoded length */
1349                 if (task->reg_len > 0) {
1350                         int reg_len = task->reg_len;
1351                         u32 dec_get = reg->reg[reg_len];
1352                         s32 dec_length = dec_get - reg->dec_base;
1353
1354                         vpu_debug(DEBUG_REGISTER,
1355                                   "dec_get %08x dec_length %d\n",
1356                                   dec_get, dec_length);
1357                         reg->reg[reg_len] = dec_length << 10;
1358                 }
1359
1360                 reg->reg[task->reg_irq] = pservice->irq_status;
1361         } break;
1362         case VPU_PP: {
1363                 pservice->reg_pproc = NULL;
1364                 reg_copy_from_hw(reg, data->dec_dev.regs, hw_info->dec_reg_num);
1365                 writel_relaxed(0, data->dec_dev.regs + task->reg_irq);
1366         } break;
1367         case VPU_DEC_PP: {
1368                 u32 pipe_mode;
1369                 u32 *regs = data->dec_dev.regs;
1370
1371                 pservice->reg_codec = NULL;
1372                 pservice->reg_pproc = NULL;
1373
1374                 reg_copy_from_hw(reg, data->dec_dev.regs, hw_info->dec_reg_num);
1375
1376                 /* NOTE: remove pp pipeline mode flag first */
1377                 pipe_mode = readl_relaxed(regs + task->reg_pipe);
1378                 pipe_mode &= ~task->pipe_mask;
1379                 writel_relaxed(pipe_mode, regs + task->reg_pipe);
1380
1381                 /* revert hack for decoded length */
1382                 if (task->reg_len > 0) {
1383                         int reg_len = task->reg_len;
1384                         u32 dec_get = reg->reg[reg_len];
1385                         s32 dec_length = dec_get - reg->dec_base;
1386
1387                         vpu_debug(DEBUG_REGISTER,
1388                                   "dec_get %08x dec_length %d\n",
1389                                   dec_get, dec_length);
1390                         reg->reg[reg_len] = dec_length << 10;
1391                 }
1392
1393                 reg->reg[task->reg_irq] = pservice->irq_status;
1394         } break;
1395         default: {
1396                 vpu_err("error: copy reg from hw with unknown type %d\n",
1397                         reg->type);
1398         } break;
1399         }
1400         vcodec_exit_mode(data);
1401
1402         atomic_sub(1, &reg->session->task_running);
1403         atomic_sub(1, &pservice->total_running);
1404         wake_up(&reg->session->wait);
1405
1406         vpu_debug_leave();
1407 }
1408
1409 static void vpu_service_set_freq(struct vpu_service_info *pservice,
1410                                  struct vpu_reg *reg)
1411 {
1412         enum VPU_FREQ curr = atomic_read(&pservice->freq_status);
1413
1414         if (curr == reg->freq)
1415                 return;
1416
1417         atomic_set(&pservice->freq_status, reg->freq);
1418         switch (reg->freq) {
1419         case VPU_FREQ_200M: {
1420                 clk_set_rate(pservice->aclk_vcodec, 200*MHZ);
1421         } break;
1422         case VPU_FREQ_266M: {
1423                 clk_set_rate(pservice->aclk_vcodec, 266*MHZ);
1424         } break;
1425         case VPU_FREQ_300M: {
1426                 clk_set_rate(pservice->aclk_vcodec, 300*MHZ);
1427         } break;
1428         case VPU_FREQ_400M: {
1429                 clk_set_rate(pservice->aclk_vcodec, 400*MHZ);
1430         } break;
1431         case VPU_FREQ_500M: {
1432                 clk_set_rate(pservice->aclk_vcodec, 500*MHZ);
1433         } break;
1434         case VPU_FREQ_600M: {
1435                 clk_set_rate(pservice->aclk_vcodec, 600*MHZ);
1436         } break;
1437         default: {
1438                 unsigned long rate = 300*MHZ;
1439
1440                 if (of_machine_is_compatible("rockchip,rk2928g"))
1441                         rate = 400*MHZ;
1442
1443                 clk_set_rate(pservice->aclk_vcodec, rate);
1444         } break;
1445         }
1446 }
1447
1448 static void reg_copy_to_hw(struct vpu_subdev_data *data, struct vpu_reg *reg)
1449 {
1450         struct vpu_service_info *pservice = data->pservice;
1451         struct vpu_task_info *task = reg->task;
1452         struct vpu_hw_info *hw_info = data->hw_info;
1453         int i;
1454         u32 *src = (u32 *)&reg->reg[0];
1455         u32 enable_mask = task->enable_mask;
1456         u32 gating_mask = task->gating_mask;
1457         u32 reg_en = task->reg_en;
1458
1459         vpu_debug_enter();
1460
1461         atomic_add(1, &pservice->total_running);
1462         atomic_add(1, &reg->session->task_running);
1463
1464         if (pservice->auto_freq)
1465                 vpu_service_set_freq(pservice, reg);
1466
1467         vcodec_enter_mode(data);
1468
1469         switch (reg->type) {
1470         case VPU_ENC: {
1471                 u32 *dst = data->enc_dev.regs;
1472                 u32 base = 0;
1473                 u32 end  = hw_info->enc_reg_num;
1474                 /* u32 reg_gating = task->reg_gating; */
1475
1476                 pservice->reg_codec = reg;
1477
1478                 vpu_debug(DEBUG_TASK_INFO, "reg: base %3d end %d en %2d mask: en %x gate %x\n",
1479                           base, end, reg_en, enable_mask, gating_mask);
1480
1481                 VEPU_CLEAN_CACHE(dst);
1482
1483                 if (debug & DEBUG_SET_REG)
1484                         for (i = base; i < end; i++)
1485                                 vpu_debug(DEBUG_SET_REG, "set reg[%02d] %08x\n",
1486                                           i, src[i]);
1487
1488                 /*
1489                  * NOTE: encoder need to setup mode first
1490                  */
1491                 writel_relaxed(src[reg_en] & enable_mask, dst + reg_en);
1492
1493                 /* NOTE: encoder gating is not on enable register */
1494                 /* src[reg_gating] |= gating_mask; */
1495
1496                 for (i = base; i < end; i++) {
1497                         if (i != reg_en)
1498                                 writel_relaxed(src[i], dst + i);
1499                 }
1500
1501                 writel(src[reg_en], dst + reg_en);
1502                 dsb(sy);
1503
1504                 time_record(reg->task, 0);
1505         } break;
1506         case VPU_DEC: {
1507                 u32 *dst = data->dec_dev.regs;
1508                 u32 len = hw_info->dec_reg_num;
1509                 u32 base = hw_info->base_dec;
1510                 u32 end  = hw_info->end_dec;
1511
1512                 pservice->reg_codec = reg;
1513
1514                 vpu_debug(DEBUG_TASK_INFO, "reg: base %3d end %d en %2d mask: en %x gate %x\n",
1515                           base, end, reg_en, enable_mask, gating_mask);
1516
1517                 VDPU_CLEAN_CACHE(dst);
1518
1519                 /* on rkvdec set cache size to 64byte */
1520                 if (pservice->dev_id == VCODEC_DEVICE_ID_RKVDEC) {
1521                         u32 *cache_base = dst + 0x100;
1522                         u32 val = (debug & DEBUG_CACHE_32B) ? (0x3) : (0x13);
1523                         writel_relaxed(val, cache_base + 0x07);
1524                         writel_relaxed(val, cache_base + 0x17);
1525                 }
1526
1527                 if (debug & DEBUG_SET_REG)
1528                         for (i = 0; i < len; i++)
1529                                 vpu_debug(DEBUG_SET_REG, "set reg[%02d] %08x\n",
1530                                           i, src[i]);
1531
1532                 /*
1533                  * NOTE: The end register is invalid. Do NOT write to it
1534                  *       Also the base register must be written
1535                  */
1536                 for (i = base; i < end; i++) {
1537                         if (i != reg_en)
1538                                 writel_relaxed(src[i], dst + i);
1539                 }
1540
1541                 writel(src[reg_en] | gating_mask, dst + reg_en);
1542                 dsb(sy);
1543
1544                 time_record(reg->task, 0);
1545         } break;
1546         case VPU_PP: {
1547                 u32 *dst = data->dec_dev.regs;
1548                 u32 base = hw_info->base_pp;
1549                 u32 end  = hw_info->end_pp;
1550
1551                 pservice->reg_pproc = reg;
1552
1553                 vpu_debug(DEBUG_TASK_INFO, "reg: base %3d end %d en %2d mask: en %x gate %x\n",
1554                           base, end, reg_en, enable_mask, gating_mask);
1555
1556                 if (debug & DEBUG_SET_REG)
1557                         for (i = base; i < end; i++)
1558                                 vpu_debug(DEBUG_SET_REG, "set reg[%02d] %08x\n",
1559                                           i, src[i]);
1560
1561                 for (i = base; i < end; i++) {
1562                         if (i != reg_en)
1563                                 writel_relaxed(src[i], dst + i);
1564                 }
1565
1566                 writel(src[reg_en] | gating_mask, dst + reg_en);
1567                 dsb(sy);
1568
1569                 time_record(reg->task, 0);
1570         } break;
1571         case VPU_DEC_PP: {
1572                 u32 *dst = data->dec_dev.regs;
1573                 u32 base = hw_info->base_dec_pp;
1574                 u32 end  = hw_info->end_dec_pp;
1575
1576                 pservice->reg_codec = reg;
1577                 pservice->reg_pproc = reg;
1578
1579                 vpu_debug(DEBUG_TASK_INFO, "reg: base %3d end %d en %2d mask: en %x gate %x\n",
1580                           base, end, reg_en, enable_mask, gating_mask);
1581
1582                 /* VDPU_SOFT_RESET(dst); */
1583                 VDPU_CLEAN_CACHE(dst);
1584
1585                 if (debug & DEBUG_SET_REG)
1586                         for (i = base; i < end; i++)
1587                                 vpu_debug(DEBUG_SET_REG, "set reg[%02d] %08x\n",
1588                                           i, src[i]);
1589
1590                 for (i = base; i < end; i++) {
1591                         if (i != reg_en)
1592                                 writel_relaxed(src[i], dst + i);
1593                 }
1594
1595                 /* NOTE: dec output must be disabled */
1596
1597                 writel(src[reg_en] | gating_mask, dst + reg_en);
1598                 dsb(sy);
1599
1600                 time_record(reg->task, 0);
1601         } break;
1602         default: {
1603                 vpu_err("error: unsupport session type %d", reg->type);
1604                 atomic_sub(1, &pservice->total_running);
1605                 atomic_sub(1, &reg->session->task_running);
1606         } break;
1607         }
1608
1609         vpu_debug_leave();
1610 }
1611
1612 static void try_set_reg(struct vpu_subdev_data *data)
1613 {
1614         struct vpu_service_info *pservice = data->pservice;
1615
1616         vpu_debug_enter();
1617
1618         mutex_lock(&pservice->shutdown_lock);
1619         if (atomic_read(&pservice->service_on) == 0) {
1620                 mutex_unlock(&pservice->shutdown_lock);
1621                 return;
1622         }
1623         if (!list_empty(&pservice->waiting)) {
1624                 struct vpu_reg *reg_codec = pservice->reg_codec;
1625                 struct vpu_reg *reg_pproc = pservice->reg_pproc;
1626                 int can_set = 0;
1627                 bool change_able = (reg_codec == NULL) && (reg_pproc == NULL);
1628                 int reset_request = atomic_read(&pservice->reset_request);
1629                 struct vpu_reg *reg = list_entry(pservice->waiting.next,
1630                                 struct vpu_reg, status_link);
1631
1632                 vpu_service_power_on(pservice);
1633
1634                 if (change_able || !reset_request) {
1635                         switch (reg->type) {
1636                         case VPU_ENC: {
1637                                 if (change_able)
1638                                         can_set = 1;
1639                         } break;
1640                         case VPU_DEC: {
1641                                 if (reg_codec == NULL)
1642                                         can_set = 1;
1643                                 if (pservice->auto_freq && (reg_pproc != NULL))
1644                                         can_set = 0;
1645                         } break;
1646                         case VPU_PP: {
1647                                 if (reg_codec == NULL) {
1648                                         if (reg_pproc == NULL)
1649                                                 can_set = 1;
1650                                 } else {
1651                                         if ((reg_codec->type == VPU_DEC) &&
1652                                             (reg_pproc == NULL))
1653                                                 can_set = 1;
1654
1655                                         /*
1656                                          * NOTE:
1657                                          * can not charge frequency
1658                                          * when vpu is working
1659                                          */
1660                                         if (pservice->auto_freq)
1661                                                 can_set = 0;
1662                                 }
1663                         } break;
1664                         case VPU_DEC_PP: {
1665                                 if (change_able)
1666                                         can_set = 1;
1667                                 } break;
1668                         default: {
1669                                 pr_err("undefined reg type %d\n", reg->type);
1670                         } break;
1671                         }
1672                 }
1673
1674                 /* then check reset request */
1675                 if (reset_request && !change_able)
1676                         reset_request = 0;
1677
1678                 /* do reset before setting registers */
1679                 if (reset_request)
1680                         vpu_reset(data);
1681
1682                 if (can_set) {
1683                         reg_from_wait_to_run(pservice, reg);
1684                         reg_copy_to_hw(reg->data, reg);
1685                 }
1686         }
1687
1688         mutex_unlock(&pservice->shutdown_lock);
1689         vpu_debug_leave();
1690 }
1691
1692 static int return_reg(struct vpu_subdev_data *data,
1693                       struct vpu_reg *reg, u32 __user *dst)
1694 {
1695         struct vpu_hw_info *hw_info = data->hw_info;
1696         size_t size = reg->size;
1697         u32 base;
1698
1699         vpu_debug_enter();
1700         switch (reg->type) {
1701         case VPU_ENC: {
1702                 base = 0;
1703         } break;
1704         case VPU_DEC: {
1705                 base = hw_info->base_dec_pp;
1706         } break;
1707         case VPU_PP: {
1708                 base = hw_info->base_pp;
1709         } break;
1710         case VPU_DEC_PP: {
1711                 base = hw_info->base_dec_pp;
1712         } break;
1713         default: {
1714                 vpu_err("error: copy reg to user with unknown type %d\n",
1715                         reg->type);
1716                 return -EFAULT;
1717         } break;
1718         }
1719
1720         if (copy_to_user(dst, &reg->reg[base], size)) {
1721                 vpu_err("error: copy_to_user failed\n");
1722                 return -EFAULT;
1723         }
1724
1725         reg_deinit(data, reg);
1726         vpu_debug_leave();
1727         return 0;
1728 }
1729
1730 static long vpu_service_ioctl(struct file *filp, unsigned int cmd,
1731                               unsigned long arg)
1732 {
1733         struct vpu_subdev_data *data =
1734                 container_of(filp->f_path.dentry->d_inode->i_cdev,
1735                              struct vpu_subdev_data, cdev);
1736         struct vpu_service_info *pservice = data->pservice;
1737         struct vpu_session *session = (struct vpu_session *)filp->private_data;
1738
1739         vpu_debug_enter();
1740         if (NULL == session)
1741                 return -EINVAL;
1742
1743         switch (cmd) {
1744         case VPU_IOC_SET_CLIENT_TYPE: {
1745                 session->type = (enum VPU_CLIENT_TYPE)arg;
1746                 vpu_debug(DEBUG_IOCTL, "pid %d set client type %d\n",
1747                           session->pid, session->type);
1748         } break;
1749         case VPU_IOC_GET_HW_FUSE_STATUS: {
1750                 struct vpu_request req;
1751
1752                 vpu_debug(DEBUG_IOCTL, "pid %d get hw status %d\n",
1753                           session->pid, session->type);
1754                 if (copy_from_user(&req, (void __user *)arg, sizeof(req))) {
1755                         vpu_err("error: get hw status copy_from_user failed\n");
1756                         return -EFAULT;
1757                 } else {
1758                         void *config = (session->type != VPU_ENC) ?
1759                                        ((void *)&pservice->dec_config) :
1760                                        ((void *)&pservice->enc_config);
1761                         size_t size = (session->type != VPU_ENC) ?
1762                                       (sizeof(struct vpu_dec_config)) :
1763                                       (sizeof(struct vpu_enc_config));
1764                         if (copy_to_user((void __user *)req.req,
1765                                          config, size)) {
1766                                 vpu_err("error: get hw status copy_to_user failed type %d\n",
1767                                         session->type);
1768                                 return -EFAULT;
1769                         }
1770                 }
1771         } break;
1772         case VPU_IOC_SET_REG: {
1773                 struct vpu_request req;
1774                 struct vpu_reg *reg;
1775
1776                 vpu_debug(DEBUG_IOCTL, "pid %d set reg type %d\n",
1777                           session->pid, session->type);
1778                 if (copy_from_user(&req, (void __user *)arg,
1779                                    sizeof(struct vpu_request))) {
1780                         vpu_err("error: set reg copy_from_user failed\n");
1781                         return -EFAULT;
1782                 }
1783
1784                 reg = reg_init(data, session, (void __user *)req.req, req.size);
1785                 if (NULL == reg) {
1786                         return -EFAULT;
1787                 } else {
1788                         mutex_lock(&pservice->lock);
1789                         try_set_reg(data);
1790                         mutex_unlock(&pservice->lock);
1791                 }
1792         } break;
1793         case VPU_IOC_GET_REG: {
1794                 struct vpu_request req;
1795                 struct vpu_reg *reg;
1796                 int ret;
1797
1798                 vpu_debug(DEBUG_IOCTL, "pid %d get reg type %d\n",
1799                           session->pid, session->type);
1800                 if (copy_from_user(&req, (void __user *)arg,
1801                                    sizeof(struct vpu_request))) {
1802                         vpu_err("error: get reg copy_from_user failed\n");
1803                         return -EFAULT;
1804                 }
1805
1806                 ret = wait_event_timeout(session->wait,
1807                                          !list_empty(&session->done),
1808                                          VPU_TIMEOUT_DELAY);
1809
1810                 if (!list_empty(&session->done)) {
1811                         if (ret < 0)
1812                                 vpu_err("warning: pid %d wait task error ret %d\n",
1813                                         session->pid, ret);
1814                         ret = 0;
1815                 } else {
1816                         if (unlikely(ret < 0)) {
1817                                 vpu_err("error: pid %d wait task ret %d\n",
1818                                         session->pid, ret);
1819                         } else if (ret == 0) {
1820                                 vpu_err("error: pid %d wait %d task done timeout\n",
1821                                         session->pid,
1822                                         atomic_read(&session->task_running));
1823                                 ret = -ETIMEDOUT;
1824                         }
1825                 }
1826
1827                 if (ret < 0) {
1828                         int task_running = atomic_read(&session->task_running);
1829
1830                         mutex_lock(&pservice->lock);
1831                         vpu_service_dump(pservice);
1832                         if (task_running) {
1833                                 atomic_set(&session->task_running, 0);
1834                                 atomic_sub(task_running,
1835                                            &pservice->total_running);
1836                                 pr_err("%d task is running but not return, reset hardware...",
1837                                        task_running);
1838                                 vpu_reset(data);
1839                                 pr_err("done\n");
1840                         }
1841                         vpu_service_session_clear(data, session);
1842                         mutex_unlock(&pservice->lock);
1843                         return ret;
1844                 }
1845
1846                 mutex_lock(&pservice->lock);
1847                 reg = list_entry(session->done.next,
1848                                  struct vpu_reg, session_link);
1849                 return_reg(data, reg, (u32 __user *)req.req);
1850                 mutex_unlock(&pservice->lock);
1851         } break;
1852         case VPU_IOC_PROBE_IOMMU_STATUS: {
1853                 int iommu_enable = 1;
1854
1855                 vpu_debug(DEBUG_IOCTL, "VPU_IOC_PROBE_IOMMU_STATUS\n");
1856
1857                 if (copy_to_user((void __user *)arg,
1858                                  &iommu_enable, sizeof(int))) {
1859                         vpu_err("error: iommu status copy_to_user failed\n");
1860                         return -EFAULT;
1861                 }
1862         } break;
1863         default: {
1864                 vpu_err("error: unknow vpu service ioctl cmd %x\n", cmd);
1865         } break;
1866         }
1867         vpu_debug_leave();
1868         return 0;
1869 }
1870
1871 #ifdef CONFIG_COMPAT
1872 static long compat_vpu_service_ioctl(struct file *filp, unsigned int cmd,
1873                                      unsigned long arg)
1874 {
1875         struct vpu_subdev_data *data =
1876                 container_of(filp->f_path.dentry->d_inode->i_cdev,
1877                              struct vpu_subdev_data, cdev);
1878         struct vpu_service_info *pservice = data->pservice;
1879         struct vpu_session *session = (struct vpu_session *)filp->private_data;
1880
1881         vpu_debug_enter();
1882         vpu_debug(3, "cmd %x, COMPAT_VPU_IOC_SET_CLIENT_TYPE %x\n", cmd,
1883                   (u32)COMPAT_VPU_IOC_SET_CLIENT_TYPE);
1884         if (NULL == session)
1885                 return -EINVAL;
1886
1887         switch (cmd) {
1888         case COMPAT_VPU_IOC_SET_CLIENT_TYPE: {
1889                 session->type = (enum VPU_CLIENT_TYPE)arg;
1890                 vpu_debug(DEBUG_IOCTL, "compat set client type %d\n",
1891                           session->type);
1892         } break;
1893         case COMPAT_VPU_IOC_GET_HW_FUSE_STATUS: {
1894                 struct compat_vpu_request req;
1895
1896                 vpu_debug(DEBUG_IOCTL, "compat get hw status %d\n",
1897                           session->type);
1898                 if (copy_from_user(&req, compat_ptr((compat_uptr_t)arg),
1899                                    sizeof(struct compat_vpu_request))) {
1900                         vpu_err("error: compat get hw status copy_from_user failed\n");
1901                         return -EFAULT;
1902                 } else {
1903                         void *config = (session->type != VPU_ENC) ?
1904                                        ((void *)&pservice->dec_config) :
1905                                        ((void *)&pservice->enc_config);
1906                         size_t size = (session->type != VPU_ENC) ?
1907                                       (sizeof(struct vpu_dec_config)) :
1908                                       (sizeof(struct vpu_enc_config));
1909
1910                         if (copy_to_user(compat_ptr((compat_uptr_t)req.req),
1911                                          config, size)) {
1912                                 vpu_err("error: compat get hw status copy_to_user failed type %d\n",
1913                                         session->type);
1914                                 return -EFAULT;
1915                         }
1916                 }
1917         } break;
1918         case COMPAT_VPU_IOC_SET_REG: {
1919                 struct compat_vpu_request req;
1920                 struct vpu_reg *reg;
1921
1922                 vpu_debug(DEBUG_IOCTL, "compat set reg type %d\n",
1923                           session->type);
1924                 if (copy_from_user(&req, compat_ptr((compat_uptr_t)arg),
1925                                    sizeof(struct compat_vpu_request))) {
1926                         vpu_err("compat set_reg copy_from_user failed\n");
1927                         return -EFAULT;
1928                 }
1929                 reg = reg_init(data, session,
1930                                compat_ptr((compat_uptr_t)req.req), req.size);
1931                 if (NULL == reg) {
1932                         return -EFAULT;
1933                 } else {
1934                         mutex_lock(&pservice->lock);
1935                         try_set_reg(data);
1936                         mutex_unlock(&pservice->lock);
1937                 }
1938         } break;
1939         case COMPAT_VPU_IOC_GET_REG: {
1940                 struct compat_vpu_request req;
1941                 struct vpu_reg *reg;
1942                 int ret;
1943
1944                 vpu_debug(DEBUG_IOCTL, "compat get reg type %d\n",
1945                           session->type);
1946                 if (copy_from_user(&req, compat_ptr((compat_uptr_t)arg),
1947                                    sizeof(struct compat_vpu_request))) {
1948                         vpu_err("compat get reg copy_from_user failed\n");
1949                         return -EFAULT;
1950                 }
1951
1952                 ret = wait_event_timeout(session->wait,
1953                                          !list_empty(&session->done),
1954                                          VPU_TIMEOUT_DELAY);
1955
1956                 if (!list_empty(&session->done)) {
1957                         if (ret < 0)
1958                                 vpu_err("warning: pid %d wait task error ret %d\n",
1959                                         session->pid, ret);
1960                         ret = 0;
1961                 } else {
1962                         if (unlikely(ret < 0)) {
1963                                 vpu_err("error: pid %d wait task ret %d\n",
1964                                         session->pid, ret);
1965                         } else if (ret == 0) {
1966                                 vpu_err("error: pid %d wait %d task done timeout\n",
1967                                         session->pid,
1968                                         atomic_read(&session->task_running));
1969                                 ret = -ETIMEDOUT;
1970                         }
1971                 }
1972
1973                 if (ret < 0) {
1974                         int task_running = atomic_read(&session->task_running);
1975
1976                         mutex_lock(&pservice->lock);
1977                         vpu_service_dump(pservice);
1978                         if (task_running) {
1979                                 atomic_set(&session->task_running, 0);
1980                                 atomic_sub(task_running,
1981                                            &pservice->total_running);
1982                                 pr_err("%d task is running but not return, reset hardware...",
1983                                        task_running);
1984                                 vpu_reset(data);
1985                                 pr_err("done\n");
1986                         }
1987                         vpu_service_session_clear(data, session);
1988                         mutex_unlock(&pservice->lock);
1989                         return ret;
1990                 }
1991
1992                 mutex_lock(&pservice->lock);
1993                 reg = list_entry(session->done.next,
1994                                  struct vpu_reg, session_link);
1995                 return_reg(data, reg, compat_ptr((compat_uptr_t)req.req));
1996                 mutex_unlock(&pservice->lock);
1997         } break;
1998         case COMPAT_VPU_IOC_PROBE_IOMMU_STATUS: {
1999                 int iommu_enable = 1;
2000
2001                 vpu_debug(DEBUG_IOCTL, "COMPAT_VPU_IOC_PROBE_IOMMU_STATUS\n");
2002
2003                 if (copy_to_user(compat_ptr((compat_uptr_t)arg),
2004                                  &iommu_enable, sizeof(int))) {
2005                         vpu_err("error: VPU_IOC_PROBE_IOMMU_STATUS copy_to_user failed\n");
2006                         return -EFAULT;
2007                 }
2008         } break;
2009         default: {
2010                 vpu_err("error: unknow vpu service ioctl cmd %x\n", cmd);
2011         } break;
2012         }
2013         vpu_debug_leave();
2014         return 0;
2015 }
2016 #endif
2017
2018 static int vpu_service_check_hw(struct vpu_subdev_data *data)
2019 {
2020         int ret = -EINVAL, i = 0;
2021         u32 hw_id = readl_relaxed(data->regs);
2022
2023         hw_id = (hw_id >> 16) & 0xFFFF;
2024         pr_info("checking hw id %x\n", hw_id);
2025         data->hw_info = NULL;
2026         for (i = 0; i < ARRAY_SIZE(vcodec_info_set); i++) {
2027                 struct vcodec_info *info = &vcodec_info_set[i];
2028
2029                 if (hw_id == info->hw_id) {
2030                         data->hw_id = info->hw_id;
2031                         data->hw_info = info->hw_info;
2032                         data->task_info = info->task_info;
2033                         data->trans_info = info->trans_info;
2034                         ret = 0;
2035                         break;
2036                 }
2037         }
2038         return ret;
2039 }
2040
2041 static int vpu_service_open(struct inode *inode, struct file *filp)
2042 {
2043         struct vpu_subdev_data *data = container_of(
2044                         inode->i_cdev, struct vpu_subdev_data, cdev);
2045         struct vpu_service_info *pservice = data->pservice;
2046         struct vpu_session *session = kmalloc(sizeof(*session), GFP_KERNEL);
2047
2048         vpu_debug_enter();
2049
2050         if (NULL == session) {
2051                 vpu_err("error: unable to allocate memory for vpu_session.");
2052                 return -ENOMEM;
2053         }
2054
2055         session->type   = VPU_TYPE_BUTT;
2056         session->pid    = current->pid;
2057         INIT_LIST_HEAD(&session->waiting);
2058         INIT_LIST_HEAD(&session->running);
2059         INIT_LIST_HEAD(&session->done);
2060         INIT_LIST_HEAD(&session->list_session);
2061         init_waitqueue_head(&session->wait);
2062         atomic_set(&session->task_running, 0);
2063         mutex_lock(&pservice->lock);
2064         list_add_tail(&session->list_session, &pservice->session);
2065         filp->private_data = (void *)session;
2066         mutex_unlock(&pservice->lock);
2067
2068         pr_debug("dev opened\n");
2069         vpu_debug_leave();
2070         return nonseekable_open(inode, filp);
2071 }
2072
2073 static int vpu_service_release(struct inode *inode, struct file *filp)
2074 {
2075         struct vpu_subdev_data *data = container_of(
2076                         inode->i_cdev, struct vpu_subdev_data, cdev);
2077         struct vpu_service_info *pservice = data->pservice;
2078         int task_running;
2079         struct vpu_session *session = (struct vpu_session *)filp->private_data;
2080
2081         vpu_debug_enter();
2082         if (NULL == session)
2083                 return -EINVAL;
2084
2085         task_running = atomic_read(&session->task_running);
2086         if (task_running) {
2087                 pr_err("error: session %d still has %d task running when closing\n",
2088                        session->pid, task_running);
2089                 msleep(50);
2090         }
2091         wake_up(&session->wait);
2092
2093         mutex_lock(&pservice->lock);
2094         /* remove this filp from the asynchronusly notified filp's */
2095         list_del_init(&session->list_session);
2096         vpu_service_session_clear(data, session);
2097         kfree(session);
2098         filp->private_data = NULL;
2099         mutex_unlock(&pservice->lock);
2100
2101         pr_debug("dev closed\n");
2102         vpu_debug_leave();
2103         return 0;
2104 }
2105
2106 static const struct file_operations vpu_service_fops = {
2107         .unlocked_ioctl = vpu_service_ioctl,
2108         .open           = vpu_service_open,
2109         .release        = vpu_service_release,
2110 #ifdef CONFIG_COMPAT
2111         .compat_ioctl   = compat_vpu_service_ioctl,
2112 #endif
2113 };
2114
2115 static irqreturn_t vdpu_irq(int irq, void *dev_id);
2116 static irqreturn_t vdpu_isr(int irq, void *dev_id);
2117 static irqreturn_t vepu_irq(int irq, void *dev_id);
2118 static irqreturn_t vepu_isr(int irq, void *dev_id);
2119 static void get_hw_info(struct vpu_subdev_data *data);
2120
2121 static struct device *rockchip_get_sysmmu_dev(const char *compt)
2122 {
2123         struct device_node *dn = NULL;
2124         struct platform_device *pd = NULL;
2125         struct device *ret = NULL;
2126
2127         dn = of_find_compatible_node(NULL, NULL, compt);
2128         if (!dn) {
2129                 pr_err("can't find device node %s \r\n", compt);
2130                 return NULL;
2131         }
2132
2133         pd = of_find_device_by_node(dn);
2134         if (!pd) {
2135                 pr_err("can't find platform device in device node %s\n", compt);
2136                 return  NULL;
2137         }
2138         ret = &pd->dev;
2139
2140         return ret;
2141 }
2142
2143 #ifdef CONFIG_IOMMU_API
2144 static inline void platform_set_sysmmu(struct device *iommu,
2145                                        struct device *dev)
2146 {
2147         dev->archdata.iommu = iommu;
2148 }
2149 #else
2150 static inline void platform_set_sysmmu(struct device *iommu,
2151                                        struct device *dev)
2152 {
2153 }
2154 #endif
2155
2156 int vcodec_sysmmu_fault_hdl(struct device *dev,
2157                             enum rk_iommu_inttype itype,
2158                             unsigned long pgtable_base,
2159                             unsigned long fault_addr, unsigned int status)
2160 {
2161         struct platform_device *pdev;
2162         struct vpu_service_info *pservice;
2163         struct vpu_subdev_data *data;
2164
2165         vpu_debug_enter();
2166
2167         if (dev == NULL) {
2168                 pr_err("invalid NULL dev\n");
2169                 return 0;
2170         }
2171
2172         pdev = container_of(dev, struct platform_device, dev);
2173         if (pdev == NULL) {
2174                 pr_err("invalid NULL platform_device\n");
2175                 return 0;
2176         }
2177
2178         data = platform_get_drvdata(pdev);
2179         if (data == NULL) {
2180                 pr_err("invalid NULL vpu_subdev_data\n");
2181                 return 0;
2182         }
2183
2184         pservice = data->pservice;
2185         if (pservice == NULL) {
2186                 pr_err("invalid NULL vpu_service_info\n");
2187                 return 0;
2188         }
2189
2190         if (pservice->reg_codec) {
2191                 struct vpu_reg *reg = pservice->reg_codec;
2192                 struct vcodec_mem_region *mem, *n;
2193                 int i = 0;
2194
2195                 pr_err("vcodec, fault addr 0x%08lx\n", fault_addr);
2196                 if (!list_empty(&reg->mem_region_list)) {
2197                         list_for_each_entry_safe(mem, n, &reg->mem_region_list,
2198                                                  reg_lnk) {
2199                                 pr_err("vcodec, reg[%02u] mem region [%02d] 0x%lx %lx\n",
2200                                        mem->reg_idx, i, mem->iova, mem->len);
2201                                 i++;
2202                         }
2203                 } else {
2204                         pr_err("no memory region mapped\n");
2205                 }
2206
2207                 if (reg->data) {
2208                         struct vpu_subdev_data *data = reg->data;
2209                         u32 *base = (u32 *)data->dec_dev.regs;
2210                         u32 len = data->hw_info->dec_reg_num;
2211
2212                         pr_err("current errror register set:\n");
2213
2214                         for (i = 0; i < len; i++)
2215                                 pr_err("reg[%02d] %08x\n",
2216                                        i, readl_relaxed(base + i));
2217                 }
2218
2219                 pr_alert("vcodec, page fault occur, reset hw\n");
2220
2221                 /* reg->reg[101] = 1; */
2222                 vpu_reset(data);
2223         }
2224
2225         return 0;
2226 }
2227
2228 static int vcodec_subdev_probe(struct platform_device *pdev,
2229                                struct vpu_service_info *pservice)
2230 {
2231         int ret = 0;
2232         struct resource *res = NULL;
2233         u32 ioaddr = 0;
2234         u8 *regs = NULL;
2235         struct vpu_hw_info *hw_info = NULL;
2236         struct device *dev = &pdev->dev;
2237         char *name = (char *)dev_name(dev);
2238         struct device_node *np = pdev->dev.of_node;
2239         struct vpu_subdev_data *data =
2240                 devm_kzalloc(dev, sizeof(struct vpu_subdev_data), GFP_KERNEL);
2241         u32 iommu_en = 0;
2242         char mmu_dev_dts_name[40];
2243
2244         of_property_read_u32(np, "iommu_enabled", &iommu_en);
2245
2246         pr_info("probe device %s\n", dev_name(dev));
2247
2248         data->pservice = pservice;
2249         data->dev = dev;
2250
2251         of_property_read_string(np, "name", (const char **)&name);
2252         of_property_read_u32(np, "dev_mode", (u32 *)&data->mode);
2253
2254         if (pservice->reg_base == 0) {
2255                 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2256                 data->regs = devm_ioremap_resource(dev, res);
2257                 if (IS_ERR(data->regs)) {
2258                         ret = PTR_ERR(data->regs);
2259                         goto err;
2260                 }
2261                 ioaddr = res->start;
2262         } else {
2263                 data->regs = pservice->reg_base;
2264                 ioaddr = pservice->ioaddr;
2265         }
2266
2267         clear_bit(MMU_ACTIVATED, &data->state);
2268         vcodec_enter_mode(data);
2269
2270         vpu_service_power_on(pservice);
2271         ret = vpu_service_check_hw(data);
2272         if (ret < 0) {
2273                 vpu_err("error: hw info check faild\n");
2274                 goto err;
2275         }
2276
2277         hw_info = data->hw_info;
2278         regs = (u8 *)data->regs;
2279
2280         if (hw_info->dec_reg_num) {
2281                 data->dec_dev.iosize = hw_info->dec_io_size;
2282                 data->dec_dev.regs = (u32 *)(regs + hw_info->dec_offset);
2283         }
2284
2285         if (hw_info->enc_reg_num) {
2286                 data->enc_dev.iosize = hw_info->enc_io_size;
2287                 data->enc_dev.regs = (u32 *)(regs + hw_info->enc_offset);
2288         }
2289
2290         data->reg_size = max(hw_info->dec_io_size, hw_info->enc_io_size);
2291
2292         data->irq_enc = platform_get_irq_byname(pdev, "irq_enc");
2293         if (data->irq_enc > 0) {
2294                 ret = devm_request_threaded_irq(dev, data->irq_enc,
2295                                                 vepu_irq, vepu_isr,
2296                                                 IRQF_SHARED, dev_name(dev),
2297                                                 (void *)data);
2298                 if (ret) {
2299                         dev_err(dev, "error: can't request vepu irq %d\n",
2300                                 data->irq_enc);
2301                         goto err;
2302                 }
2303         }
2304         data->irq_dec = platform_get_irq_byname(pdev, "irq_dec");
2305         if (data->irq_dec > 0) {
2306                 ret = devm_request_threaded_irq(dev, data->irq_dec,
2307                                                 vdpu_irq, vdpu_isr,
2308                                                 IRQF_SHARED, dev_name(dev),
2309                                                 (void *)data);
2310                 if (ret) {
2311                         dev_err(dev, "error: can't request vdpu irq %d\n",
2312                                 data->irq_dec);
2313                         goto err;
2314                 }
2315         }
2316         atomic_set(&data->dec_dev.irq_count_codec, 0);
2317         atomic_set(&data->dec_dev.irq_count_pp, 0);
2318         atomic_set(&data->enc_dev.irq_count_codec, 0);
2319         atomic_set(&data->enc_dev.irq_count_pp, 0);
2320
2321         if (iommu_en) {
2322                 if (data->mode == VCODEC_RUNNING_MODE_HEVC)
2323                         sprintf(mmu_dev_dts_name,
2324                                 HEVC_IOMMU_COMPATIBLE_NAME);
2325                 else if (data->mode == VCODEC_RUNNING_MODE_VPU)
2326                         sprintf(mmu_dev_dts_name,
2327                                 VPU_IOMMU_COMPATIBLE_NAME);
2328                 else if (data->mode == VCODEC_RUNNING_MODE_RKVDEC)
2329                         sprintf(mmu_dev_dts_name, VDEC_IOMMU_COMPATIBLE_NAME);
2330                 else
2331                         sprintf(mmu_dev_dts_name,
2332                                 HEVC_IOMMU_COMPATIBLE_NAME);
2333
2334                 data->mmu_dev =
2335                         rockchip_get_sysmmu_dev(mmu_dev_dts_name);
2336
2337                 if (data->mmu_dev)
2338                         platform_set_sysmmu(data->mmu_dev, dev);
2339
2340                 rockchip_iovmm_set_fault_handler(dev, vcodec_sysmmu_fault_hdl);
2341         }
2342
2343         get_hw_info(data);
2344         pservice->auto_freq = true;
2345
2346         vcodec_exit_mode(data);
2347         /* create device node */
2348         ret = alloc_chrdev_region(&data->dev_t, 0, 1, name);
2349         if (ret) {
2350                 dev_err(dev, "alloc dev_t failed\n");
2351                 goto err;
2352         }
2353
2354         cdev_init(&data->cdev, &vpu_service_fops);
2355
2356         data->cdev.owner = THIS_MODULE;
2357         data->cdev.ops = &vpu_service_fops;
2358
2359         ret = cdev_add(&data->cdev, data->dev_t, 1);
2360
2361         if (ret) {
2362                 dev_err(dev, "add dev_t failed\n");
2363                 goto err;
2364         }
2365
2366         data->cls = class_create(THIS_MODULE, name);
2367
2368         if (IS_ERR(data->cls)) {
2369                 ret = PTR_ERR(data->cls);
2370                 dev_err(dev, "class_create err:%d\n", ret);
2371                 goto err;
2372         }
2373
2374         data->child_dev = device_create(data->cls, dev,
2375                 data->dev_t, "%s", name);
2376
2377         platform_set_drvdata(pdev, data);
2378
2379         INIT_LIST_HEAD(&data->lnk_service);
2380         list_add_tail(&data->lnk_service, &pservice->subdev_list);
2381
2382 #ifdef CONFIG_DEBUG_FS
2383         data->debugfs_dir = vcodec_debugfs_create_device_dir(name, parent);
2384         if (!IS_ERR_OR_NULL(data->debugfs_dir))
2385                 data->debugfs_file_regs =
2386                         debugfs_create_file("regs", 0664, data->debugfs_dir,
2387                                         data, &debug_vcodec_fops);
2388         else
2389                 vpu_err("create debugfs dir %s failed\n", name);
2390 #endif
2391         return 0;
2392 err:
2393         if (data->child_dev) {
2394                 device_destroy(data->cls, data->dev_t);
2395                 cdev_del(&data->cdev);
2396                 unregister_chrdev_region(data->dev_t, 1);
2397         }
2398
2399         if (data->cls)
2400                 class_destroy(data->cls);
2401         return -1;
2402 }
2403
2404 static void vcodec_subdev_remove(struct vpu_subdev_data *data)
2405 {
2406         struct vpu_service_info *pservice = data->pservice;
2407
2408         mutex_lock(&pservice->lock);
2409         cancel_delayed_work_sync(&pservice->power_off_work);
2410         vpu_service_power_off(pservice);
2411         mutex_unlock(&pservice->lock);
2412
2413         device_destroy(data->cls, data->dev_t);
2414         class_destroy(data->cls);
2415         cdev_del(&data->cdev);
2416         unregister_chrdev_region(data->dev_t, 1);
2417
2418 #ifdef CONFIG_DEBUG_FS
2419         if (!IS_ERR_OR_NULL(data->debugfs_dir))
2420                 debugfs_remove_recursive(data->debugfs_dir);
2421 #endif
2422 }
2423
2424 static void vcodec_read_property(struct device_node *np,
2425                                  struct vpu_service_info *pservice)
2426 {
2427         pservice->mode_bit = 0;
2428         pservice->mode_ctrl = 0;
2429         pservice->subcnt = 0;
2430         pservice->grf_base = NULL;
2431
2432         of_property_read_u32(np, "subcnt", &pservice->subcnt);
2433
2434         if (pservice->subcnt > 1) {
2435                 of_property_read_u32(np, "mode_bit", &pservice->mode_bit);
2436                 of_property_read_u32(np, "mode_ctrl", &pservice->mode_ctrl);
2437         }
2438 #ifdef CONFIG_MFD_SYSCON
2439         pservice->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
2440         if (IS_ERR_OR_NULL(pservice->grf)) {
2441                 pservice->grf = NULL;
2442 #ifdef CONFIG_ARM
2443                 pservice->grf_base = RK_GRF_VIRT;
2444 #else
2445                 vpu_err("can't find vpu grf property\n");
2446                 return;
2447 #endif
2448         }
2449 #else
2450 #ifdef CONFIG_ARM
2451         pservice->grf_base = RK_GRF_VIRT;
2452 #else
2453         vpu_err("can't find vpu grf property\n");
2454         return;
2455 #endif
2456 #endif
2457
2458 #ifdef CONFIG_RESET_CONTROLLER
2459         pservice->rst_a = devm_reset_control_get(pservice->dev, "video_a");
2460         pservice->rst_h = devm_reset_control_get(pservice->dev, "video_h");
2461         pservice->rst_v = devm_reset_control_get(pservice->dev, "video");
2462
2463         if (IS_ERR_OR_NULL(pservice->rst_a)) {
2464                 pr_warn("No aclk reset resource define\n");
2465                 pservice->rst_a = NULL;
2466         }
2467
2468         if (IS_ERR_OR_NULL(pservice->rst_h)) {
2469                 pr_warn("No hclk reset resource define\n");
2470                 pservice->rst_h = NULL;
2471         }
2472
2473         if (IS_ERR_OR_NULL(pservice->rst_v)) {
2474                 pr_warn("No core reset resource define\n");
2475                 pservice->rst_v = NULL;
2476         }
2477 #endif
2478
2479         of_property_read_string(np, "name", (const char **)&pservice->name);
2480 }
2481
2482 static void vcodec_init_drvdata(struct vpu_service_info *pservice)
2483 {
2484         pservice->dev_id = VCODEC_DEVICE_ID_VPU;
2485         pservice->curr_mode = -1;
2486
2487         wake_lock_init(&pservice->wake_lock, WAKE_LOCK_SUSPEND, "vpu");
2488         INIT_LIST_HEAD(&pservice->waiting);
2489         INIT_LIST_HEAD(&pservice->running);
2490         mutex_init(&pservice->lock);
2491         mutex_init(&pservice->shutdown_lock);
2492         atomic_set(&pservice->service_on, 1);
2493
2494         INIT_LIST_HEAD(&pservice->done);
2495         INIT_LIST_HEAD(&pservice->session);
2496         INIT_LIST_HEAD(&pservice->subdev_list);
2497
2498         pservice->reg_pproc     = NULL;
2499         atomic_set(&pservice->total_running, 0);
2500         atomic_set(&pservice->enabled,       0);
2501         atomic_set(&pservice->power_on_cnt,  0);
2502         atomic_set(&pservice->power_off_cnt, 0);
2503         atomic_set(&pservice->reset_request, 0);
2504
2505         INIT_DELAYED_WORK(&pservice->power_off_work, vpu_power_off_work);
2506         pservice->last.tv64 = 0;
2507
2508         pservice->ion_client = rockchip_ion_client_create("vpu");
2509         if (IS_ERR(pservice->ion_client)) {
2510                 vpu_err("failed to create ion client for vcodec ret %ld\n",
2511                         PTR_ERR(pservice->ion_client));
2512         } else {
2513                 vpu_debug(DEBUG_IOMMU, "vcodec ion client create success!\n");
2514         }
2515 }
2516
2517 static int vcodec_probe(struct platform_device *pdev)
2518 {
2519         int i;
2520         int ret = 0;
2521         struct resource *res = NULL;
2522         struct device *dev = &pdev->dev;
2523         struct device_node *np = pdev->dev.of_node;
2524         struct vpu_service_info *pservice =
2525                 devm_kzalloc(dev, sizeof(struct vpu_service_info), GFP_KERNEL);
2526
2527         pservice->dev = dev;
2528
2529         vcodec_read_property(np, pservice);
2530         vcodec_init_drvdata(pservice);
2531
2532         if (strncmp(pservice->name, "hevc_service", 12) == 0)
2533                 pservice->dev_id = VCODEC_DEVICE_ID_HEVC;
2534         else if (strncmp(pservice->name, "vpu_service", 11) == 0)
2535                 pservice->dev_id = VCODEC_DEVICE_ID_VPU;
2536         else if (strncmp(pservice->name, "rkvdec", 6) == 0)
2537                 pservice->dev_id = VCODEC_DEVICE_ID_RKVDEC;
2538         else
2539                 pservice->dev_id = VCODEC_DEVICE_ID_COMBO;
2540
2541         if (0 > vpu_get_clk(pservice))
2542                 goto err;
2543
2544         if (of_property_read_bool(np, "reg")) {
2545                 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2546
2547                 pservice->reg_base = devm_ioremap_resource(pservice->dev, res);
2548                 if (IS_ERR(pservice->reg_base)) {
2549                         vpu_err("ioremap registers base failed\n");
2550                         ret = PTR_ERR(pservice->reg_base);
2551                         goto err;
2552                 }
2553                 pservice->ioaddr = res->start;
2554         } else {
2555                 pservice->reg_base = 0;
2556         }
2557
2558         pm_runtime_enable(dev);
2559
2560         if (of_property_read_bool(np, "subcnt")) {
2561                 for (i = 0; i < pservice->subcnt; i++) {
2562                         struct device_node *sub_np;
2563                         struct platform_device *sub_pdev;
2564
2565                         sub_np = of_parse_phandle(np, "rockchip,sub", i);
2566                         sub_pdev = of_find_device_by_node(sub_np);
2567
2568                         vcodec_subdev_probe(sub_pdev, pservice);
2569                 }
2570         } else {
2571                 vcodec_subdev_probe(pdev, pservice);
2572         }
2573
2574         vpu_service_power_off(pservice);
2575
2576         pr_info("init success\n");
2577
2578         return 0;
2579
2580 err:
2581         pr_info("init failed\n");
2582         vpu_service_power_off(pservice);
2583         vpu_put_clk(pservice);
2584         wake_lock_destroy(&pservice->wake_lock);
2585
2586         return ret;
2587 }
2588
2589 static int vcodec_remove(struct platform_device *pdev)
2590 {
2591         struct vpu_subdev_data *data = platform_get_drvdata(pdev);
2592
2593         vcodec_subdev_remove(data);
2594
2595         pm_runtime_disable(data->pservice->dev);
2596
2597         return 0;
2598 }
2599
2600 static void vcodec_shutdown(struct platform_device *pdev)
2601 {
2602         struct vpu_subdev_data *data = platform_get_drvdata(pdev);
2603         struct vpu_service_info *pservice = data->pservice;
2604
2605         dev_info(&pdev->dev, "%s IN\n", __func__);
2606
2607         mutex_lock(&pservice->shutdown_lock);
2608         atomic_set(&pservice->service_on, 0);
2609         mutex_unlock(&pservice->shutdown_lock);
2610
2611         vcodec_exit_mode(data);
2612
2613         vpu_service_clear(data);
2614         vcodec_subdev_remove(data);
2615
2616         pm_runtime_disable(&pdev->dev);
2617 }
2618
2619 #if defined(CONFIG_OF)
2620 static const struct of_device_id vcodec_service_dt_ids[] = {
2621         {.compatible = "rockchip,vpu_service",},
2622         {.compatible = "rockchip,hevc_service",},
2623         {.compatible = "rockchip,vpu_combo",},
2624         {.compatible = "rockchip,rkvdec",},
2625         {},
2626 };
2627 #endif
2628
2629 static struct platform_driver vcodec_driver = {
2630         .probe = vcodec_probe,
2631         .remove = vcodec_remove,
2632         .shutdown = vcodec_shutdown,
2633         .driver = {
2634                 .name = "vcodec",
2635                 .owner = THIS_MODULE,
2636 #if defined(CONFIG_OF)
2637                 .of_match_table = of_match_ptr(vcodec_service_dt_ids),
2638 #endif
2639         },
2640 };
2641
2642 static void get_hw_info(struct vpu_subdev_data *data)
2643 {
2644         struct vpu_service_info *pservice = data->pservice;
2645         struct vpu_dec_config *dec = &pservice->dec_config;
2646         struct vpu_enc_config *enc = &pservice->enc_config;
2647
2648         if (of_machine_is_compatible("rockchip,rk2928") ||
2649                         of_machine_is_compatible("rockchip,rk3036") ||
2650                         of_machine_is_compatible("rockchip,rk3066") ||
2651                         of_machine_is_compatible("rockchip,rk3126") ||
2652                         of_machine_is_compatible("rockchip,rk3188"))
2653                 dec->max_dec_pic_width = 1920;
2654         else
2655                 dec->max_dec_pic_width = 4096;
2656
2657         if (data->mode == VCODEC_RUNNING_MODE_VPU) {
2658                 dec->h264_support = 3;
2659                 dec->jpeg_support = 1;
2660                 dec->mpeg4_support = 2;
2661                 dec->vc1_support = 3;
2662                 dec->mpeg2_support = 1;
2663                 dec->pp_support = 1;
2664                 dec->sorenson_support = 1;
2665                 dec->ref_buf_support = 3;
2666                 dec->vp6_support = 1;
2667                 dec->vp7_support = 1;
2668                 dec->vp8_support = 1;
2669                 dec->avs_support = 1;
2670                 dec->jpeg_ext_support = 0;
2671                 dec->custom_mpeg4_support = 1;
2672                 dec->reserve = 0;
2673                 dec->mvc_support = 1;
2674
2675                 if (!of_machine_is_compatible("rockchip,rk3036")) {
2676                         u32 config_reg = readl_relaxed(data->enc_dev.regs + 63);
2677
2678                         enc->max_encoded_width = config_reg & ((1 << 11) - 1);
2679                         enc->h264_enabled = 1;
2680                         enc->mpeg4_enabled = (config_reg >> 26) & 1;
2681                         enc->jpeg_enabled = 1;
2682                         enc->vs_enabled = (config_reg >> 24) & 1;
2683                         enc->rgb_enabled = (config_reg >> 28) & 1;
2684                         enc->reg_size = data->reg_size;
2685                         enc->reserv[0] = 0;
2686                         enc->reserv[1] = 0;
2687                 }
2688
2689                 pservice->auto_freq = true;
2690                 vpu_debug(DEBUG_EXTRA_INFO, "vpu_service set to auto frequency mode\n");
2691                 atomic_set(&pservice->freq_status, VPU_FREQ_BUT);
2692
2693                 pservice->bug_dec_addr = of_machine_is_compatible
2694                         ("rockchip,rk30xx");
2695         } else if (data->mode == VCODEC_RUNNING_MODE_RKVDEC) {
2696                 pservice->auto_freq = true;
2697                 atomic_set(&pservice->freq_status, VPU_FREQ_BUT);
2698         } else {
2699                 /* disable frequency switch in hevc.*/
2700                 pservice->auto_freq = false;
2701         }
2702 }
2703
2704 static bool check_irq_err(struct vpu_task_info *task, u32 irq_status)
2705 {
2706         vpu_debug(DEBUG_IRQ_CHECK, "task %s status %08x mask %08x\n",
2707                   task->name, irq_status, task->error_mask);
2708
2709         return (task->error_mask & irq_status) ? true : false;
2710 }
2711
2712 static irqreturn_t vdpu_irq(int irq, void *dev_id)
2713 {
2714         struct vpu_subdev_data *data = (struct vpu_subdev_data *)dev_id;
2715         struct vpu_service_info *pservice = data->pservice;
2716         struct vpu_task_info *task = NULL;
2717         struct vpu_device *dev = &data->dec_dev;
2718         u32 hw_id = data->hw_info->hw_id;
2719         u32 raw_status;
2720         u32 dec_status;
2721
2722         task = &data->task_info[TASK_DEC];
2723
2724         raw_status = readl_relaxed(dev->regs + task->reg_irq);
2725         dec_status = raw_status;
2726
2727         vpu_debug(DEBUG_TASK_INFO, "vdpu_irq reg %d status %x mask: irq %x ready %x error %0x\n",
2728                   task->reg_irq, dec_status,
2729                   task->irq_mask, task->ready_mask, task->error_mask);
2730
2731         if (dec_status & task->irq_mask) {
2732                 time_record(task, 1);
2733                 vpu_debug(DEBUG_IRQ_STATUS, "vdpu_irq dec status %08x\n",
2734                           dec_status);
2735                 if ((dec_status & 0x40001) == 0x40001) {
2736                         do {
2737                                 dec_status =
2738                                         readl_relaxed(dev->regs +
2739                                                 task->reg_irq);
2740                         } while ((dec_status & 0x40001) == 0x40001);
2741                 }
2742
2743                 if (check_irq_err(task, dec_status))
2744                         atomic_add(1, &pservice->reset_request);
2745
2746                 writel_relaxed(0, dev->regs + task->reg_irq);
2747
2748                 /*
2749                  * NOTE: rkvdec need to reset after each task to avoid timeout
2750                  *       error on H.264 switch to H.265
2751                  */
2752                 if (data->mode == VCODEC_RUNNING_MODE_RKVDEC)
2753                         writel(0x100000, dev->regs + task->reg_irq);
2754
2755                 /* set clock gating to save power */
2756                 writel(task->gating_mask, dev->regs + task->reg_en);
2757
2758                 atomic_add(1, &dev->irq_count_codec);
2759                 time_diff(task);
2760         }
2761
2762         task = &data->task_info[TASK_PP];
2763         if (hw_id != HEVC_ID && hw_id != RKV_DEC_ID) {
2764                 u32 pp_status = readl_relaxed(dev->regs + task->irq_mask);
2765
2766                 if (pp_status & task->irq_mask) {
2767                         time_record(task, 1);
2768                         vpu_debug(DEBUG_IRQ_STATUS, "vdpu_irq pp status %08x\n",
2769                                   pp_status);
2770
2771                         if (check_irq_err(task, dec_status))
2772                                 atomic_add(1, &pservice->reset_request);
2773
2774                         /* clear pp IRQ */
2775                         writel_relaxed(pp_status & (~task->reg_irq),
2776                                        dev->regs + task->irq_mask);
2777                         atomic_add(1, &dev->irq_count_pp);
2778                         time_diff(task);
2779                 }
2780         }
2781
2782         pservice->irq_status = raw_status;
2783
2784         if (atomic_read(&dev->irq_count_pp) ||
2785             atomic_read(&dev->irq_count_codec))
2786                 return IRQ_WAKE_THREAD;
2787         else
2788                 return IRQ_NONE;
2789 }
2790
2791 static irqreturn_t vdpu_isr(int irq, void *dev_id)
2792 {
2793         struct vpu_subdev_data *data = (struct vpu_subdev_data *)dev_id;
2794         struct vpu_service_info *pservice = data->pservice;
2795         struct vpu_device *dev = &data->dec_dev;
2796
2797         mutex_lock(&pservice->lock);
2798         if (atomic_read(&dev->irq_count_codec)) {
2799                 atomic_sub(1, &dev->irq_count_codec);
2800                 if (pservice->reg_codec == NULL) {
2801                         vpu_err("error: dec isr with no task waiting\n");
2802                 } else {
2803                         reg_from_run_to_done(data, pservice->reg_codec);
2804                         /* avoid vpu timeout and can't recover problem */
2805                         VDPU_SOFT_RESET(data->regs);
2806                 }
2807         }
2808
2809         if (atomic_read(&dev->irq_count_pp)) {
2810                 atomic_sub(1, &dev->irq_count_pp);
2811                 if (pservice->reg_pproc == NULL)
2812                         vpu_err("error: pp isr with no task waiting\n");
2813                 else
2814                         reg_from_run_to_done(data, pservice->reg_pproc);
2815         }
2816         try_set_reg(data);
2817         mutex_unlock(&pservice->lock);
2818         return IRQ_HANDLED;
2819 }
2820
2821 static irqreturn_t vepu_irq(int irq, void *dev_id)
2822 {
2823         struct vpu_subdev_data *data = (struct vpu_subdev_data *)dev_id;
2824         struct vpu_service_info *pservice = data->pservice;
2825         struct vpu_task_info *task = &data->task_info[TASK_ENC];
2826         struct vpu_device *dev = &data->enc_dev;
2827         u32 irq_status;
2828
2829         irq_status = readl_relaxed(dev->regs + task->reg_irq);
2830
2831         vpu_debug(DEBUG_TASK_INFO, "vepu_irq reg %d status %x mask: irq %x ready %x error %0x\n",
2832                   task->reg_irq, irq_status,
2833                   task->irq_mask, task->ready_mask, task->error_mask);
2834
2835         vpu_debug(DEBUG_IRQ_STATUS, "vepu_irq enc status %08x\n", irq_status);
2836
2837         if (likely(irq_status & task->irq_mask)) {
2838                 time_record(task, 1);
2839
2840                 if (check_irq_err(task, irq_status))
2841                         atomic_add(1, &pservice->reset_request);
2842
2843                 /* clear enc IRQ */
2844                 writel_relaxed(irq_status & (~task->irq_mask),
2845                                dev->regs + task->reg_irq);
2846
2847                 atomic_add(1, &dev->irq_count_codec);
2848                 time_diff(task);
2849         }
2850
2851         pservice->irq_status = irq_status;
2852
2853         if (atomic_read(&dev->irq_count_codec))
2854                 return IRQ_WAKE_THREAD;
2855         else
2856                 return IRQ_NONE;
2857 }
2858
2859 static irqreturn_t vepu_isr(int irq, void *dev_id)
2860 {
2861         struct vpu_subdev_data *data = (struct vpu_subdev_data *)dev_id;
2862         struct vpu_service_info *pservice = data->pservice;
2863         struct vpu_device *dev = &data->enc_dev;
2864
2865         mutex_lock(&pservice->lock);
2866         if (atomic_read(&dev->irq_count_codec)) {
2867                 atomic_sub(1, &dev->irq_count_codec);
2868                 if (NULL == pservice->reg_codec)
2869                         vpu_err("error: enc isr with no task waiting\n");
2870                 else
2871                         reg_from_run_to_done(data, pservice->reg_codec);
2872         }
2873         try_set_reg(data);
2874         mutex_unlock(&pservice->lock);
2875         return IRQ_HANDLED;
2876 }
2877
2878 static int __init vcodec_service_init(void)
2879 {
2880         int ret = platform_driver_register(&vcodec_driver);
2881
2882         if (ret) {
2883                 vpu_err("Platform device register failed (%d).\n", ret);
2884                 return ret;
2885         }
2886
2887 #ifdef CONFIG_DEBUG_FS
2888         vcodec_debugfs_init();
2889 #endif
2890
2891         return ret;
2892 }
2893
2894 static void __exit vcodec_service_exit(void)
2895 {
2896 #ifdef CONFIG_DEBUG_FS
2897         vcodec_debugfs_exit();
2898 #endif
2899
2900         platform_driver_unregister(&vcodec_driver);
2901 }
2902
2903 module_init(vcodec_service_init);
2904 module_exit(vcodec_service_exit);
2905 MODULE_LICENSE("GPL v2");
2906
2907 #ifdef CONFIG_DEBUG_FS
2908 #include <linux/seq_file.h>
2909
2910 static int vcodec_debugfs_init(void)
2911 {
2912         parent = debugfs_create_dir("vcodec", NULL);
2913         if (!parent)
2914                 return -1;
2915
2916         return 0;
2917 }
2918
2919 static void vcodec_debugfs_exit(void)
2920 {
2921         debugfs_remove(parent);
2922 }
2923
2924 static struct dentry *vcodec_debugfs_create_device_dir(
2925                 char *dirname, struct dentry *parent)
2926 {
2927         return debugfs_create_dir(dirname, parent);
2928 }
2929
2930 static int debug_vcodec_show(struct seq_file *s, void *unused)
2931 {
2932         struct vpu_subdev_data *data = s->private;
2933         struct vpu_service_info *pservice = data->pservice;
2934         unsigned int i, n;
2935         struct vpu_reg *reg, *reg_tmp;
2936         struct vpu_session *session, *session_tmp;
2937
2938         mutex_lock(&pservice->lock);
2939         vpu_service_power_on(pservice);
2940         if (data->hw_info->hw_id != HEVC_ID) {
2941                 seq_puts(s, "\nENC Registers:\n");
2942                 n = data->enc_dev.iosize >> 2;
2943
2944                 for (i = 0; i < n; i++)
2945                         seq_printf(s, "\tswreg%d = %08X\n", i,
2946                                    readl_relaxed(data->enc_dev.regs + i));
2947         }
2948
2949         seq_puts(s, "\nDEC Registers:\n");
2950
2951         n = data->dec_dev.iosize >> 2;
2952         for (i = 0; i < n; i++)
2953                 seq_printf(s, "\tswreg%d = %08X\n", i,
2954                            readl_relaxed(data->dec_dev.regs + i));
2955
2956         seq_puts(s, "\nvpu service status:\n");
2957
2958         list_for_each_entry_safe(session, session_tmp,
2959                                  &pservice->session, list_session) {
2960                 seq_printf(s, "session pid %d type %d:\n",
2961                            session->pid, session->type);
2962
2963                 list_for_each_entry_safe(reg, reg_tmp,
2964                                          &session->waiting, session_link) {
2965                         seq_printf(s, "waiting register set %p\n", reg);
2966                 }
2967                 list_for_each_entry_safe(reg, reg_tmp,
2968                                          &session->running, session_link) {
2969                         seq_printf(s, "running register set %p\n", reg);
2970                 }
2971                 list_for_each_entry_safe(reg, reg_tmp,
2972                                          &session->done, session_link) {
2973                         seq_printf(s, "done    register set %p\n", reg);
2974                 }
2975         }
2976
2977         seq_printf(s, "\npower counter: on %d off %d\n",
2978                    atomic_read(&pservice->power_on_cnt),
2979                    atomic_read(&pservice->power_off_cnt));
2980
2981         mutex_unlock(&pservice->lock);
2982         vpu_service_power_off(pservice);
2983
2984         return 0;
2985 }
2986
2987 static int debug_vcodec_open(struct inode *inode, struct file *file)
2988 {
2989         return single_open(file, debug_vcodec_show, inode->i_private);
2990 }
2991
2992 #endif
2993