48803f83ef4dc9d4694dee2850d3e4b4b2c709e3
[firefly-linux-kernel-4.4.55.git] / arch / arm / mach-rockchip / vcodec_service.c
1 /**
2  * Copyright (C) 2014 ROCKCHIP, Inc.
3  * author: chenhengming chm@rock-chips.com
4  *         Alpha Lin, alpha.lin@rock-chips.com
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include <linux/clk.h>
20 #include <linux/compat.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/io.h>
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/fs.h>
28 #include <linux/ioport.h>
29 #include <linux/miscdevice.h>
30 #include <linux/mm.h>
31 #include <linux/poll.h>
32 #include <linux/platform_device.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/wakelock.h>
36 #include <linux/cdev.h>
37 #include <linux/of.h>
38 #include <linux/of_platform.h>
39 #include <linux/of_irq.h>
40 #include <linux/rockchip/cpu.h>
41 #include <linux/rockchip/cru.h>
42 #ifdef CONFIG_MFD_SYSCON
43 #include <linux/regmap.h>
44 #endif
45 #include <linux/mfd/syscon.h>
46
47 #include <asm/cacheflush.h>
48 #include <linux/uaccess.h>
49 #include <linux/rockchip/grf.h>
50
51 #if defined(CONFIG_ION_ROCKCHIP)
52 #include <linux/rockchip_ion.h>
53 #endif
54
55 #if defined(CONFIG_ROCKCHIP_IOMMU) & defined(CONFIG_ION_ROCKCHIP)
56 #define CONFIG_VCODEC_MMU
57 #endif
58
59 #ifdef CONFIG_VCODEC_MMU
60 #include <linux/rockchip-iovmm.h>
61 #include <linux/dma-buf.h>
62 #endif
63
64 #ifdef CONFIG_DEBUG_FS
65 #include <linux/debugfs.h>
66 #endif
67
68 #if defined(CONFIG_ARCH_RK319X)
69 #include <mach/grf.h>
70 #endif
71
72 #include "vcodec_service.h"
73
74 /*
75  * debug flag usage:
76  * +------+-------------------+
77  * | 8bit |      24bit        |
78  * +------+-------------------+
79  *  0~23 bit is for different information type
80  * 24~31 bit is for information print format
81  */
82
83 #define DEBUG_POWER                             0x00000001
84 #define DEBUG_CLOCK                             0x00000002
85 #define DEBUG_IRQ_STATUS                        0x00000004
86 #define DEBUG_IOMMU                             0x00000008
87 #define DEBUG_IOCTL                             0x00000010
88 #define DEBUG_FUNCTION                          0x00000020
89 #define DEBUG_REGISTER                          0x00000040
90 #define DEBUG_EXTRA_INFO                        0x00000080
91 #define DEBUG_TIMING                            0x00000100
92
93 #define PRINT_FUNCTION                          0x80000000
94 #define PRINT_LINE                              0x40000000
95
96 static int debug;
97 module_param(debug, int, S_IRUGO | S_IWUSR);
98 MODULE_PARM_DESC(debug,
99                  "Debug level - higher value produces more verbose messages");
100
101 #define HEVC_TEST_ENABLE        0
102 #define VCODEC_CLOCK_ENABLE     1
103
104 typedef enum {
105         VPU_DEC_ID_9190         = 0x6731,
106         VPU_ID_8270             = 0x8270,
107         VPU_ID_4831             = 0x4831,
108         HEVC_ID                 = 0x6867,
109 } VPU_HW_ID;
110
111 enum VPU_HW_SPEC {
112         VPU_TYPE_VPU,
113         VPU_TYPE_HEVC,
114         VPU_TYPE_COMBO_NOENC,
115         VPU_TYPE_COMBO
116 };
117
118 typedef enum {
119         VPU_DEC_TYPE_9190       = 0,
120         VPU_ENC_TYPE_8270       = 0x100,
121         VPU_ENC_TYPE_4831       ,
122 } VPU_HW_TYPE_E;
123
124 typedef enum VPU_FREQ {
125         VPU_FREQ_200M,
126         VPU_FREQ_266M,
127         VPU_FREQ_300M,
128         VPU_FREQ_400M,
129         VPU_FREQ_500M,
130         VPU_FREQ_600M,
131         VPU_FREQ_DEFAULT,
132         VPU_FREQ_BUT,
133 } VPU_FREQ;
134
135 typedef struct {
136         VPU_HW_ID               hw_id;
137         unsigned long           hw_addr;
138         unsigned long           enc_offset;
139         unsigned long           enc_reg_num;
140         unsigned long           enc_io_size;
141         unsigned long           dec_offset;
142         unsigned long           dec_reg_num;
143         unsigned long           dec_io_size;
144 } VPU_HW_INFO_E;
145
146 struct extra_info_elem {
147         u32 index;
148         u32 offset;
149 };
150
151 #define EXTRA_INFO_MAGIC        0x4C4A46
152
153 struct extra_info_for_iommu {
154         u32 magic;
155         u32 cnt;
156         struct extra_info_elem elem[20];
157 };
158
159 #define MHZ                                     (1000*1000)
160
161 #define REG_NUM_9190_DEC                        (60)
162 #define REG_NUM_9190_PP                         (41)
163 #define REG_NUM_9190_DEC_PP                     (REG_NUM_9190_DEC+REG_NUM_9190_PP)
164
165 #define REG_NUM_DEC_PP                          (REG_NUM_9190_DEC+REG_NUM_9190_PP)
166
167 #define REG_NUM_ENC_8270                        (96)
168 #define REG_SIZE_ENC_8270                       (0x200)
169 #define REG_NUM_ENC_4831                        (164)
170 #define REG_SIZE_ENC_4831                       (0x400)
171
172 #define REG_NUM_HEVC_DEC                        (68)
173
174 #define SIZE_REG(reg)                           ((reg)*4)
175
176 static VPU_HW_INFO_E vpu_hw_set[] = {
177         [0] = {
178                 .hw_id          = VPU_ID_8270,
179                 .hw_addr        = 0,
180                 .enc_offset     = 0x0,
181                 .enc_reg_num    = REG_NUM_ENC_8270,
182                 .enc_io_size    = REG_NUM_ENC_8270 * 4,
183                 .dec_offset     = REG_SIZE_ENC_8270,
184                 .dec_reg_num    = REG_NUM_9190_DEC_PP,
185                 .dec_io_size    = REG_NUM_9190_DEC_PP * 4,
186         },
187         [1] = {
188                 .hw_id          = VPU_ID_4831,
189                 .hw_addr        = 0,
190                 .enc_offset     = 0x0,
191                 .enc_reg_num    = REG_NUM_ENC_4831,
192                 .enc_io_size    = REG_NUM_ENC_4831 * 4,
193                 .dec_offset     = REG_SIZE_ENC_4831,
194                 .dec_reg_num    = REG_NUM_9190_DEC_PP,
195                 .dec_io_size    = REG_NUM_9190_DEC_PP * 4,
196         },
197         [2] = {
198                 .hw_id          = HEVC_ID,
199                 .hw_addr        = 0,
200                 .dec_offset     = 0x0,
201                 .dec_reg_num    = REG_NUM_HEVC_DEC,
202                 .dec_io_size    = REG_NUM_HEVC_DEC * 4,
203         },
204         [3] = {
205                 .hw_id          = VPU_DEC_ID_9190,
206                 .hw_addr        = 0,
207                 .enc_offset     = 0x0,
208                 .enc_reg_num    = 0,
209                 .enc_io_size    = 0,
210                 .dec_offset     = 0,
211                 .dec_reg_num    = REG_NUM_9190_DEC_PP,
212                 .dec_io_size    = REG_NUM_9190_DEC_PP * 4,
213         },
214 };
215
216 #ifndef BIT
217 #define BIT(x)                                  (1<<(x))
218 #endif
219
220 // interrupt and error status register
221 #define DEC_INTERRUPT_REGISTER                  1
222 #define DEC_INTERRUPT_BIT                       BIT(8)
223 #define DEC_READY_BIT                           BIT(12)
224 #define DEC_BUS_ERROR_BIT                       BIT(13)
225 #define DEC_BUFFER_EMPTY_BIT                    BIT(14)
226 #define DEC_ASO_ERROR_BIT                       BIT(15)
227 #define DEC_STREAM_ERROR_BIT                    BIT(16)
228 #define DEC_SLICE_DONE_BIT                      BIT(17)
229 #define DEC_TIMEOUT_BIT                         BIT(18)
230 #define DEC_ERR_MASK                            DEC_BUS_ERROR_BIT \
231                                                 |DEC_BUFFER_EMPTY_BIT \
232                                                 |DEC_STREAM_ERROR_BIT \
233                                                 |DEC_TIMEOUT_BIT
234
235 #define PP_INTERRUPT_REGISTER                   60
236 #define PP_INTERRUPT_BIT                        BIT(8)
237 #define PP_READY_BIT                            BIT(12)
238 #define PP_BUS_ERROR_BIT                        BIT(13)
239 #define PP_ERR_MASK                             PP_BUS_ERROR_BIT
240
241 #define ENC_INTERRUPT_REGISTER                  1
242 #define ENC_INTERRUPT_BIT                       BIT(0)
243 #define ENC_READY_BIT                           BIT(2)
244 #define ENC_BUS_ERROR_BIT                       BIT(3)
245 #define ENC_BUFFER_FULL_BIT                     BIT(5)
246 #define ENC_TIMEOUT_BIT                         BIT(6)
247 #define ENC_ERR_MASK                            ENC_BUS_ERROR_BIT \
248                                                 |ENC_BUFFER_FULL_BIT \
249                                                 |ENC_TIMEOUT_BIT
250
251 #define HEVC_INTERRUPT_REGISTER                 1
252 #define HEVC_DEC_INT_RAW_BIT                    BIT(9)
253 #define HEVC_DEC_BUS_ERROR_BIT                  BIT(13)
254 #define HEVC_DEC_STR_ERROR_BIT                  BIT(14)
255 #define HEVC_DEC_TIMEOUT_BIT                    BIT(15)
256 #define HEVC_DEC_BUFFER_EMPTY_BIT               BIT(16)
257 #define HEVC_DEC_COLMV_ERROR_BIT                BIT(17)
258 #define HEVC_DEC_ERR_MASK                       HEVC_DEC_BUS_ERROR_BIT \
259                                                 |HEVC_DEC_STR_ERROR_BIT \
260                                                 |HEVC_DEC_TIMEOUT_BIT \
261                                                 |HEVC_DEC_BUFFER_EMPTY_BIT \
262                                                 |HEVC_DEC_COLMV_ERROR_BIT
263
264
265 // gating configuration set
266 #define VPU_REG_EN_ENC                          14
267 #define VPU_REG_ENC_GATE                        2
268 #define VPU_REG_ENC_GATE_BIT                    (1<<4)
269
270 #define VPU_REG_EN_DEC                          1
271 #define VPU_REG_DEC_GATE                        2
272 #define VPU_REG_DEC_GATE_BIT                    (1<<10)
273 #define VPU_REG_EN_PP                           0
274 #define VPU_REG_PP_GATE                         1
275 #define VPU_REG_PP_GATE_BIT                     (1<<8)
276 #define VPU_REG_EN_DEC_PP                       1
277 #define VPU_REG_DEC_PP_GATE                     61
278 #define VPU_REG_DEC_PP_GATE_BIT                 (1<<8)
279
280 #define DEBUG
281 #ifdef DEBUG
282 #define vpu_debug_func(type, fmt, args...)                      \
283         do {                                                    \
284                 if (unlikely(debug & type)) {                   \
285                         pr_info("%s:%d: " fmt,                  \
286                                  __func__, __LINE__, ##args);   \
287                 }                                               \
288         } while (0)
289 #define vpu_debug(type, fmt, args...)                           \
290         do {                                                    \
291                 if (unlikely(debug & type)) {                   \
292                         pr_info(fmt, ##args);                   \
293                 }                                               \
294         } while (0)
295 #else
296 #define vpu_debug_func(level, fmt, args...)
297 #define vpu_debug(level, fmt, args...)
298 #endif
299
300 #define vpu_debug_enter() vpu_debug_func(DEBUG_FUNCTION, "enter\n")
301 #define vpu_debug_leave() vpu_debug_func(DEBUG_FUNCTION, "leave\n")
302
303 #define vpu_err(fmt, args...)                           \
304                 pr_err("%s:%d: " fmt, __func__, __LINE__, ##args)
305
306 #if defined(CONFIG_VCODEC_MMU)
307 static u8 addr_tbl_vpu_h264dec[] = {
308         12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
309         25, 26, 27, 28, 29, 40, 41
310 };
311
312 static u8 addr_tbl_vpu_vp8dec[] = {
313         10, 12, 13, 14, 18, 19, 22, 23, 24, 25, 26, 27, 28, 29, 40
314 };
315
316 static u8 addr_tbl_vpu_vp6dec[] = {
317         12, 13, 14, 18, 27, 40
318 };
319
320 static u8 addr_tbl_vpu_vc1dec[] = {
321         12, 13, 14, 15, 16, 17, 27, 41
322 };
323
324 static u8 addr_tbl_vpu_jpegdec[] = {
325         12, 40, 66, 67
326 };
327
328 static u8 addr_tbl_vpu_defaultdec[] = {
329         12, 13, 14, 15, 16, 17, 40, 41
330 };
331
332 static u8 addr_tbl_vpu_enc[] = {
333         5, 6, 7, 8, 9, 10, 11, 12, 13, 51
334 };
335
336 static u8 addr_tbl_hevc_dec[] = {
337         4, 6, 7, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
338         21, 22, 23, 24, 42, 43
339 };
340 #endif
341
342 enum VPU_DEC_FMT {
343         VPU_DEC_FMT_H264,
344         VPU_DEC_FMT_MPEG4,
345         VPU_DEC_FMT_H263,
346         VPU_DEC_FMT_JPEG,
347         VPU_DEC_FMT_VC1,
348         VPU_DEC_FMT_MPEG2,
349         VPU_DEC_FMT_MPEG1,
350         VPU_DEC_FMT_VP6,
351         VPU_DEC_FMT_RV,
352         VPU_DEC_FMT_VP7,
353         VPU_DEC_FMT_VP8,
354         VPU_DEC_FMT_AVS,
355         VPU_DEC_FMT_SVC,
356         VPU_DEC_FMT_VC2,
357         VPU_DEC_FMT_MVC,
358         VPU_DEC_FMT_THEORA,
359         VPU_DEC_FMT_RES
360 };
361
362 /**
363  * struct for process session which connect to vpu
364  *
365  * @author ChenHengming (2011-5-3)
366  */
367 typedef struct vpu_session {
368         enum VPU_CLIENT_TYPE type;
369         /* a linked list of data so we can access them for debugging */
370         struct list_head list_session;
371         /* a linked list of register data waiting for process */
372         struct list_head waiting;
373         /* a linked list of register data in processing */
374         struct list_head running;
375         /* a linked list of register data processed */
376         struct list_head done;
377         wait_queue_head_t wait;
378         pid_t pid;
379         atomic_t task_running;
380 } vpu_session;
381
382 /**
383  * struct for process register set
384  *
385  * @author ChenHengming (2011-5-4)
386  */
387 typedef struct vpu_reg {
388         enum VPU_CLIENT_TYPE type;
389         VPU_FREQ freq;
390         vpu_session *session;
391         struct vpu_subdev_data *data;
392         struct list_head session_link;          /* link to vpu service session */
393         struct list_head status_link;           /* link to register set list */
394         unsigned long size;
395 #if defined(CONFIG_VCODEC_MMU)
396         struct list_head mem_region_list;
397 #endif
398         u32 *reg;
399 } vpu_reg;
400
401 typedef struct vpu_device {
402         atomic_t                irq_count_codec;
403         atomic_t                irq_count_pp;
404         unsigned long           iobaseaddr;
405         unsigned int            iosize;
406         volatile u32            *hwregs;
407 } vpu_device;
408
409 enum vcodec_device_id {
410         VCODEC_DEVICE_ID_VPU,
411         VCODEC_DEVICE_ID_HEVC,
412         VCODEC_DEVICE_ID_COMBO
413 };
414
415 enum VCODEC_RUNNING_MODE {
416         VCODEC_RUNNING_MODE_NONE = -1,
417         VCODEC_RUNNING_MODE_VPU,
418         VCODEC_RUNNING_MODE_HEVC,
419 };
420
421 struct vcodec_mem_region {
422         struct list_head srv_lnk;
423         struct list_head reg_lnk;
424         struct list_head session_lnk;
425         unsigned long iova;     /* virtual address for iommu */
426         unsigned long len;
427         u32 reg_idx;
428         struct ion_handle *hdl;
429 };
430
431 enum vpu_ctx_state {
432         MMU_ACTIVATED   = BIT(0)
433 };
434
435 struct vpu_subdev_data {
436         struct cdev cdev;
437         dev_t dev_t;
438         struct class *cls;
439         struct device *child_dev;
440
441         int irq_enc;
442         int irq_dec;
443         struct vpu_service_info *pservice;
444
445         u32 *regs;
446         enum VCODEC_RUNNING_MODE mode;
447         struct list_head lnk_service;
448
449         struct device *dev;
450
451         vpu_device enc_dev;
452         vpu_device dec_dev;
453         VPU_HW_INFO_E *hw_info;
454
455         u32 reg_size;
456         unsigned long state;
457
458 #ifdef CONFIG_DEBUG_FS
459         struct dentry *debugfs_dir;
460         struct dentry *debugfs_file_regs;
461 #endif
462
463 #if defined(CONFIG_VCODEC_MMU)
464         struct device *mmu_dev;
465 #endif
466 };
467
468 typedef struct vpu_service_info {
469         struct wake_lock wake_lock;
470         struct delayed_work power_off_work;
471         struct mutex lock;
472         struct list_head waiting;               /* link to link_reg in struct vpu_reg */
473         struct list_head running;               /* link to link_reg in struct vpu_reg */
474         struct list_head done;                  /* link to link_reg in struct vpu_reg */
475         struct list_head session;               /* link to list_session in struct vpu_session */
476         atomic_t total_running;
477         atomic_t enabled;
478         atomic_t power_on_cnt;
479         atomic_t power_off_cnt;
480         vpu_reg *reg_codec;
481         vpu_reg *reg_pproc;
482         vpu_reg *reg_resev;
483         struct vpu_dec_config dec_config;
484         struct vpu_enc_config enc_config;
485
486         bool auto_freq;
487         bool bug_dec_addr;
488         atomic_t freq_status;
489
490         struct clk *aclk_vcodec;
491         struct clk *hclk_vcodec;
492         struct clk *clk_core;
493         struct clk *clk_cabac;
494         struct clk *pd_video;
495
496         struct device *dev;
497
498         u32 irq_status;
499 #if defined(CONFIG_VCODEC_MMU)
500         struct ion_client *ion_client;
501         struct list_head mem_region_list;
502 #endif
503
504         enum vcodec_device_id dev_id;
505
506         enum VCODEC_RUNNING_MODE curr_mode;
507         u32 prev_mode;
508
509         struct delayed_work simulate_work;
510
511         u32 mode_bit;
512         u32 mode_ctrl;
513         u32 *reg_base;
514         u32 ioaddr;
515 #ifdef CONFIG_MFD_SYSCON
516         struct regmap *grf_base;
517 #else
518         u32 *grf_base;
519 #endif
520         char *name;
521
522         u32 subcnt;
523         struct list_head subdev_list;
524 } vpu_service_info;
525
526 struct vcodec_combo {
527         struct vpu_service_info *vpu_srv;
528         struct vpu_service_info *hevc_srv;
529         struct list_head waiting;
530         struct list_head running;
531         struct mutex run_lock;
532         vpu_reg *reg_codec;
533         enum vcodec_device_id current_hw_mode;
534 };
535
536 struct vpu_request {
537         u32 *req;
538         u32 size;
539 };
540
541 struct compat_vpu_request {
542         compat_uptr_t req;
543         u32 size;
544 };
545
546 /* debugfs root directory for all device (vpu, hevc).*/
547 static struct dentry *parent;
548
549 #ifdef CONFIG_DEBUG_FS
550 static int vcodec_debugfs_init(void);
551 static void vcodec_debugfs_exit(void);
552 static struct dentry* vcodec_debugfs_create_device_dir(char *dirname, struct dentry *parent);
553 static int debug_vcodec_open(struct inode *inode, struct file *file);
554
555 static const struct file_operations debug_vcodec_fops = {
556         .open = debug_vcodec_open,
557         .read = seq_read,
558         .llseek = seq_lseek,
559         .release = single_release,
560 };
561 #endif
562
563 #define VDPU_SOFT_RESET_REG     101
564 #define VDPU_CLEAN_CACHE_REG    516
565 #define VEPU_CLEAN_CACHE_REG    772
566 #define HEVC_CLEAN_CACHE_REG    260
567
568 #define VPU_REG_ENABLE(base, reg)       do { \
569                                                 base[reg] = 1; \
570                                         } while (0)
571
572 #define VDPU_SOFT_RESET(base)   VPU_REG_ENABLE(base, VDPU_SOFT_RESET_REG)
573 #define VDPU_CLEAN_CACHE(base)  VPU_REG_ENABLE(base, VDPU_CLEAN_CACHE_REG)
574 #define VEPU_CLEAN_CACHE(base)  VPU_REG_ENABLE(base, VEPU_CLEAN_CACHE_REG)
575 #define HEVC_CLEAN_CACHE(base)  VPU_REG_ENABLE(base, HEVC_CLEAN_CACHE_REG)
576
577 #define VPU_POWER_OFF_DELAY             4*HZ /* 4s */
578 #define VPU_TIMEOUT_DELAY               2*HZ /* 2s */
579
580 typedef struct {
581         char *name;
582         struct timeval start;
583         struct timeval end;
584         u32 error_mask;
585 } task_info;
586
587 typedef enum {
588         TASK_VPU_ENC,
589         TASK_VPU_DEC,
590         TASK_VPU_PP,
591         TASK_RKDEC_HEVC,
592         TASK_TYPE_BUTT,
593 } TASK_TYPE;
594
595 task_info tasks[TASK_TYPE_BUTT] = {
596         {
597                 .name = "enc",
598                 .error_mask = ENC_ERR_MASK
599         },
600         {
601                 .name = "dec",
602                 .error_mask = DEC_ERR_MASK
603         },
604         {
605                 .name = "pp",
606                 .error_mask = PP_ERR_MASK
607         },
608         {
609                 .name = "hevc",
610                 .error_mask = HEVC_DEC_ERR_MASK
611         },
612 };
613
614 static void time_record(task_info *task, int is_end)
615 {
616         if (unlikely(debug & DEBUG_TIMING)) {
617                 do_gettimeofday((is_end)?(&task->end):(&task->start));
618         }
619 }
620
621 static void time_diff(task_info *task)
622 {
623         vpu_debug(DEBUG_TIMING, "%s task: %ld ms\n", task->name,
624                         (task->end.tv_sec  - task->start.tv_sec)  * 1000 +
625                         (task->end.tv_usec - task->start.tv_usec) / 1000);
626 }
627
628 static void vcodec_enter_mode(struct vpu_subdev_data *data)
629 {
630         int bits;
631         u32 raw = 0;
632         struct vpu_service_info *pservice = data->pservice;
633         struct vpu_subdev_data *subdata, *n;
634         if (pservice->subcnt < 2) {
635 #if defined(CONFIG_VCODEC_MMU)
636                 if (data->mmu_dev && !test_bit(MMU_ACTIVATED, &data->state)) {
637                         set_bit(MMU_ACTIVATED, &data->state);
638                         if (atomic_read(&pservice->enabled))
639                                 rockchip_iovmm_activate(data->dev);
640                         else
641                                 BUG_ON(!atomic_read(&pservice->enabled));
642                 }
643 #endif
644                 return;
645         }
646
647         if (pservice->curr_mode == data->mode)
648                 return;
649
650         vpu_debug(DEBUG_IOMMU, "vcodec enter mode %d\n", data->mode);
651 #if defined(CONFIG_VCODEC_MMU)
652         list_for_each_entry_safe(subdata, n, &pservice->subdev_list, lnk_service) {
653                 if (data != subdata && subdata->mmu_dev &&
654                     test_bit(MMU_ACTIVATED, &subdata->state)) {
655                         clear_bit(MMU_ACTIVATED, &subdata->state);
656                         rockchip_iovmm_deactivate(subdata->dev);
657                 }
658         }
659 #endif
660         bits = 1 << pservice->mode_bit;
661 #ifdef CONFIG_MFD_SYSCON
662         regmap_read(pservice->grf_base, pservice->mode_ctrl, &raw);
663
664         if (data->mode == VCODEC_RUNNING_MODE_HEVC)
665                 regmap_write(pservice->grf_base, pservice->mode_ctrl,
666                         raw | bits | (bits << 16));
667         else
668                 regmap_write(pservice->grf_base, pservice->mode_ctrl,
669                         (raw & (~bits)) | (bits << 16));
670 #else
671         raw = readl_relaxed(pservice->grf_base + pservice->mode_ctrl / 4);
672         if (data->mode == VCODEC_RUNNING_MODE_HEVC)
673                 writel_relaxed(raw | bits | (bits << 16),
674                         pservice->grf_base + pservice->mode_ctrl / 4);
675         else
676                 writel_relaxed((raw & (~bits)) | (bits << 16),
677                         pservice->grf_base + pservice->mode_ctrl / 4);
678 #endif
679 #if defined(CONFIG_VCODEC_MMU)
680         if (data->mmu_dev && !test_bit(MMU_ACTIVATED, &data->state)) {
681                 set_bit(MMU_ACTIVATED, &data->state);
682                 if (atomic_read(&pservice->enabled))
683                         rockchip_iovmm_activate(data->dev);
684                 else
685                         BUG_ON(!atomic_read(&pservice->enabled));
686         }
687 #endif
688         pservice->prev_mode = pservice->curr_mode;
689         pservice->curr_mode = data->mode;
690 }
691
692 static void vcodec_exit_mode(struct vpu_subdev_data *data)
693 {
694         if (data->mmu_dev && test_bit(MMU_ACTIVATED, &data->state)) {
695                 clear_bit(MMU_ACTIVATED, &data->state);
696                 rockchip_iovmm_deactivate(data->dev);
697                 data->pservice->curr_mode = VCODEC_RUNNING_MODE_NONE;
698         }
699 }
700
701 static int vpu_get_clk(struct vpu_service_info *pservice)
702 {
703 #if VCODEC_CLOCK_ENABLE
704         switch (pservice->dev_id) {
705         case VCODEC_DEVICE_ID_HEVC:
706                 pservice->pd_video = devm_clk_get(pservice->dev, "pd_hevc");
707                 if (IS_ERR(pservice->pd_video)) {
708                         dev_err(pservice->dev, "failed on clk_get pd_hevc\n");
709                         return -1;
710                 }
711         case VCODEC_DEVICE_ID_COMBO:
712                 pservice->clk_cabac = devm_clk_get(pservice->dev, "clk_cabac");
713                 if (IS_ERR(pservice->clk_cabac)) {
714                         dev_err(pservice->dev, "failed on clk_get clk_cabac\n");
715                         pservice->clk_cabac = NULL;
716                 }
717                 pservice->clk_core = devm_clk_get(pservice->dev, "clk_core");
718                 if (IS_ERR(pservice->clk_core)) {
719                         dev_err(pservice->dev, "failed on clk_get clk_core\n");
720                         return -1;
721                 }
722         case VCODEC_DEVICE_ID_VPU:
723                 pservice->aclk_vcodec = devm_clk_get(pservice->dev, "aclk_vcodec");
724                 if (IS_ERR(pservice->aclk_vcodec)) {
725                         dev_err(pservice->dev, "failed on clk_get aclk_vcodec\n");
726                         return -1;
727                 }
728
729                 pservice->hclk_vcodec = devm_clk_get(pservice->dev, "hclk_vcodec");
730                 if (IS_ERR(pservice->hclk_vcodec)) {
731                         dev_err(pservice->dev, "failed on clk_get hclk_vcodec\n");
732                         return -1;
733                 }
734                 if (pservice->pd_video == NULL) {
735                         pservice->pd_video = devm_clk_get(pservice->dev, "pd_video");
736                         if (IS_ERR(pservice->pd_video)) {
737                                 pservice->pd_video = NULL;
738                                 dev_info(pservice->dev, "do not have pd_video\n");
739                         }
740                 }
741                 break;
742         default:
743                 ;
744         }
745
746         return 0;
747 #else
748         return 0;
749 #endif
750 }
751
752 static void vpu_put_clk(struct vpu_service_info *pservice)
753 {
754 #if VCODEC_CLOCK_ENABLE
755         if (pservice->pd_video)
756                 devm_clk_put(pservice->dev, pservice->pd_video);
757         if (pservice->aclk_vcodec)
758                 devm_clk_put(pservice->dev, pservice->aclk_vcodec);
759         if (pservice->hclk_vcodec)
760                 devm_clk_put(pservice->dev, pservice->hclk_vcodec);
761         if (pservice->clk_core)
762                 devm_clk_put(pservice->dev, pservice->clk_core);
763         if (pservice->clk_cabac)
764                 devm_clk_put(pservice->dev, pservice->clk_cabac);
765 #endif
766 }
767
768 static void vpu_reset(struct vpu_subdev_data *data)
769 {
770         struct vpu_service_info *pservice = data->pservice;
771         pr_info("%s: resetting...", dev_name(pservice->dev));
772
773 #if defined(CONFIG_ARCH_RK29)
774         clk_disable(aclk_ddr_vepu);
775         cru_set_soft_reset(SOFT_RST_CPU_VODEC_A2A_AHB, true);
776         cru_set_soft_reset(SOFT_RST_DDR_VCODEC_PORT, true);
777         cru_set_soft_reset(SOFT_RST_VCODEC_AHB_BUS, true);
778         cru_set_soft_reset(SOFT_RST_VCODEC_AXI_BUS, true);
779         mdelay(10);
780         cru_set_soft_reset(SOFT_RST_VCODEC_AXI_BUS, false);
781         cru_set_soft_reset(SOFT_RST_VCODEC_AHB_BUS, false);
782         cru_set_soft_reset(SOFT_RST_DDR_VCODEC_PORT, false);
783         cru_set_soft_reset(SOFT_RST_CPU_VODEC_A2A_AHB, false);
784         clk_enable(aclk_ddr_vepu);
785 #elif defined(CONFIG_ARCH_RK30)
786         pmu_set_idle_request(IDLE_REQ_VIDEO, true);
787         cru_set_soft_reset(SOFT_RST_CPU_VCODEC, true);
788         cru_set_soft_reset(SOFT_RST_VCODEC_NIU_AXI, true);
789         cru_set_soft_reset(SOFT_RST_VCODEC_AHB, true);
790         cru_set_soft_reset(SOFT_RST_VCODEC_AXI, true);
791         mdelay(1);
792         cru_set_soft_reset(SOFT_RST_VCODEC_AXI, false);
793         cru_set_soft_reset(SOFT_RST_VCODEC_AHB, false);
794         cru_set_soft_reset(SOFT_RST_VCODEC_NIU_AXI, false);
795         cru_set_soft_reset(SOFT_RST_CPU_VCODEC, false);
796         pmu_set_idle_request(IDLE_REQ_VIDEO, false);
797 #endif
798         pservice->reg_codec = NULL;
799         pservice->reg_pproc = NULL;
800         pservice->reg_resev = NULL;
801
802 #if defined(CONFIG_VCODEC_MMU)
803         if (data->mmu_dev && test_bit(MMU_ACTIVATED, &data->state)) {
804                 clear_bit(MMU_ACTIVATED, &data->state);
805                 if (atomic_read(&pservice->enabled))
806                         rockchip_iovmm_deactivate(data->dev);
807                 else
808                         BUG_ON(!atomic_read(&pservice->enabled));
809         }
810 #endif
811 }
812
813 static void reg_deinit(struct vpu_subdev_data *data, vpu_reg *reg);
814 static void vpu_service_session_clear(struct vpu_subdev_data *data, vpu_session *session)
815 {
816         vpu_reg *reg, *n;
817         list_for_each_entry_safe(reg, n, &session->waiting, session_link) {
818                 reg_deinit(data, reg);
819         }
820         list_for_each_entry_safe(reg, n, &session->running, session_link) {
821                 reg_deinit(data, reg);
822         }
823         list_for_each_entry_safe(reg, n, &session->done, session_link) {
824                 reg_deinit(data, reg);
825         }
826 }
827
828 static void vpu_service_dump(struct vpu_service_info *pservice)
829 {
830 }
831
832 static void vpu_service_power_off(struct vpu_service_info *pservice)
833 {
834         int total_running;
835         struct vpu_subdev_data *data = NULL, *n;
836         int ret = atomic_add_unless(&pservice->enabled, -1, 0);
837         if (!ret)
838                 return;
839
840         total_running = atomic_read(&pservice->total_running);
841         if (total_running) {
842                 pr_alert("alert: power off when %d task running!!\n", total_running);
843                 mdelay(50);
844                 pr_alert("alert: delay 50 ms for running task\n");
845                 vpu_service_dump(pservice);
846         }
847
848         pr_info("%s: power off...", dev_name(pservice->dev));
849         udelay(10);
850 #if defined(CONFIG_VCODEC_MMU)
851         list_for_each_entry_safe(data, n, &pservice->subdev_list, lnk_service) {
852                 if (data->mmu_dev && test_bit(MMU_ACTIVATED, &data->state)) {
853                         clear_bit(MMU_ACTIVATED, &data->state);
854                         rockchip_iovmm_deactivate(data->dev);
855                 }
856         }
857         pservice->curr_mode = VCODEC_RUNNING_MODE_NONE;
858 #endif
859
860 #if VCODEC_CLOCK_ENABLE
861         if (pservice->pd_video)
862                 clk_disable_unprepare(pservice->pd_video);
863         if (pservice->hclk_vcodec)
864                 clk_disable_unprepare(pservice->hclk_vcodec);
865         if (pservice->aclk_vcodec)
866                 clk_disable_unprepare(pservice->aclk_vcodec);
867         if (pservice->clk_core)
868                 clk_disable_unprepare(pservice->clk_core);
869         if (pservice->clk_cabac)
870                 clk_disable_unprepare(pservice->clk_cabac);
871 #endif
872
873         atomic_add(1, &pservice->power_off_cnt);
874         wake_unlock(&pservice->wake_lock);
875         pr_info("done\n");
876 }
877
878 static inline void vpu_queue_power_off_work(struct vpu_service_info *pservice)
879 {
880         queue_delayed_work(system_nrt_wq, &pservice->power_off_work, VPU_POWER_OFF_DELAY);
881 }
882
883 static void vpu_power_off_work(struct work_struct *work_s)
884 {
885         struct delayed_work *dlwork = container_of(work_s, struct delayed_work, work);
886         struct vpu_service_info *pservice = container_of(dlwork, struct vpu_service_info, power_off_work);
887
888         if (mutex_trylock(&pservice->lock)) {
889                 vpu_service_power_off(pservice);
890                 mutex_unlock(&pservice->lock);
891         } else {
892                 /* Come back later if the device is busy... */
893                 vpu_queue_power_off_work(pservice);
894         }
895 }
896
897 static void vpu_service_power_on(struct vpu_service_info *pservice)
898 {
899         int ret;
900         static ktime_t last;
901         ktime_t now = ktime_get();
902         if (ktime_to_ns(ktime_sub(now, last)) > NSEC_PER_SEC) {
903                 cancel_delayed_work_sync(&pservice->power_off_work);
904                 vpu_queue_power_off_work(pservice);
905                 last = now;
906         }
907         ret = atomic_add_unless(&pservice->enabled, 1, 1);
908         if (!ret)
909                 return ;
910
911         pr_info("%s: power on\n", dev_name(pservice->dev));
912
913 #define BIT_VCODEC_CLK_SEL      (1<<10)
914         if (cpu_is_rk312x())
915                 writel_relaxed(readl_relaxed(RK_GRF_VIRT + RK312X_GRF_SOC_CON1) |
916                         BIT_VCODEC_CLK_SEL | (BIT_VCODEC_CLK_SEL << 16),
917                         RK_GRF_VIRT + RK312X_GRF_SOC_CON1);
918
919 #if VCODEC_CLOCK_ENABLE
920         if (pservice->aclk_vcodec)
921                 clk_prepare_enable(pservice->aclk_vcodec);
922         if (pservice->hclk_vcodec)
923                 clk_prepare_enable(pservice->hclk_vcodec);
924         if (pservice->clk_core)
925                 clk_prepare_enable(pservice->clk_core);
926         if (pservice->clk_cabac)
927                 clk_prepare_enable(pservice->clk_cabac);
928         if (pservice->pd_video)
929                 clk_prepare_enable(pservice->pd_video);
930 #endif
931
932         udelay(10);
933         atomic_add(1, &pservice->power_on_cnt);
934         wake_lock(&pservice->wake_lock);
935 }
936
937 static inline bool reg_check_rmvb_wmv(vpu_reg *reg)
938 {
939         u32 type = (reg->reg[3] & 0xF0000000) >> 28;
940         return ((type == 8) || (type == 4));
941 }
942
943 static inline bool reg_check_interlace(vpu_reg *reg)
944 {
945         u32 type = (reg->reg[3] & (1 << 23));
946         return (type > 0);
947 }
948
949 static inline enum VPU_DEC_FMT reg_check_fmt(vpu_reg *reg)
950 {
951         enum VPU_DEC_FMT type = (enum VPU_DEC_FMT)((reg->reg[3] & 0xF0000000) >> 28);
952         return type;
953 }
954
955 static inline int reg_probe_width(vpu_reg *reg)
956 {
957         int width_in_mb = reg->reg[4] >> 23;
958         return width_in_mb * 16;
959 }
960
961 #if defined(CONFIG_VCODEC_MMU)
962 static int vcodec_fd_to_iova(struct vpu_subdev_data *data, vpu_reg *reg,int fd)
963 {
964         struct vpu_service_info *pservice = data->pservice;
965         struct ion_handle *hdl;
966         int ret = 0;
967         struct vcodec_mem_region *mem_region;
968
969         hdl = ion_import_dma_buf(pservice->ion_client, fd);
970         if (IS_ERR(hdl)) {
971                 vpu_err("import dma-buf from fd %d failed\n", fd);
972                 return PTR_ERR(hdl);
973         }
974         mem_region = kzalloc(sizeof(struct vcodec_mem_region), GFP_KERNEL);
975
976         if (mem_region == NULL) {
977                 vpu_err("allocate memory for iommu memory region failed\n");
978                 ion_free(pservice->ion_client, hdl);
979                 return -1;
980         }
981
982         mem_region->hdl = hdl;
983         ret = ion_map_iommu(data->dev, pservice->ion_client,
984                 mem_region->hdl, &mem_region->iova, &mem_region->len);
985
986         if (ret < 0) {
987                 vpu_err("ion map iommu failed\n");
988                 kfree(mem_region);
989                 ion_free(pservice->ion_client, hdl);
990                 return ret;
991         }
992         INIT_LIST_HEAD(&mem_region->reg_lnk);
993         list_add_tail(&mem_region->reg_lnk, &reg->mem_region_list);
994         return mem_region->iova;
995 }
996
997 static int vcodec_bufid_to_iova(struct vpu_subdev_data *data, u8 *tbl,
998                                 int size, vpu_reg *reg,
999                                 struct extra_info_for_iommu *ext_inf)
1000 {
1001         struct vpu_service_info *pservice = data->pservice;
1002         int i;
1003         int usr_fd = 0;
1004         int offset = 0;
1005
1006         if (tbl == NULL || size <= 0) {
1007                 dev_err(pservice->dev, "input arguments invalidate\n");
1008                 return -1;
1009         }
1010
1011         for (i = 0; i < size; i++) {
1012                 usr_fd = reg->reg[tbl[i]] & 0x3FF;
1013
1014                 if (tbl[i] == 41 && data->hw_info->hw_id != HEVC_ID &&
1015                     (reg->type == VPU_DEC || reg->type == VPU_DEC_PP))
1016                         /* special for vpu dec num 41 regitster */
1017                         offset = reg->reg[tbl[i]] >> 10 << 4;
1018                 else
1019                         offset = reg->reg[tbl[i]] >> 10;
1020
1021                 if (usr_fd != 0) {
1022                         struct ion_handle *hdl;
1023                         int ret = 0;
1024                         struct vcodec_mem_region *mem_region;
1025
1026                         hdl = ion_import_dma_buf(pservice->ion_client, usr_fd);
1027                         if (IS_ERR(hdl)) {
1028                                 dev_err(pservice->dev, "import dma-buf from fd %d failed, reg[%d]\n", usr_fd, tbl[i]);
1029                                 return PTR_ERR(hdl);
1030                         }
1031
1032                         if (tbl[i] == 42 && data->hw_info->hw_id == HEVC_ID){
1033                                 int i = 0;
1034                                 char *pps;
1035                                 pps = (char *)ion_map_kernel(pservice->ion_client,hdl);
1036                                 for (i=0; i<64; i++) {
1037                                         u32 scaling_offset;
1038                                         u32 tmp;
1039                                         int scaling_fd= 0;
1040                                         scaling_offset = (u32)pps[i*80+74];
1041                                         scaling_offset += (u32)pps[i*80+75] << 8;
1042                                         scaling_offset += (u32)pps[i*80+76] << 16;
1043                                         scaling_offset += (u32)pps[i*80+77] << 24;
1044                                         scaling_fd = scaling_offset&0x3ff;
1045                                         scaling_offset = scaling_offset >> 10;
1046                                         if(scaling_fd > 0) {
1047                                                 tmp = vcodec_fd_to_iova(data, reg, scaling_fd);
1048                                                 tmp += scaling_offset;
1049                                                 pps[i*80+74] = tmp & 0xff;
1050                                                 pps[i*80+75] = (tmp >> 8) & 0xff;
1051                                                 pps[i*80+76] = (tmp >> 16) & 0xff;
1052                                                 pps[i*80+77] = (tmp >> 24) & 0xff;
1053                                         }
1054                                 }
1055                         }
1056
1057                         mem_region = kzalloc(sizeof(struct vcodec_mem_region), GFP_KERNEL);
1058
1059                         if (mem_region == NULL) {
1060                                 dev_err(pservice->dev, "allocate memory for iommu memory region failed\n");
1061                                 ion_free(pservice->ion_client, hdl);
1062                                 return -1;
1063                         }
1064
1065                         mem_region->hdl = hdl;
1066                         mem_region->reg_idx = tbl[i];
1067                         ret = ion_map_iommu(data->dev,
1068                                             pservice->ion_client,
1069                                             mem_region->hdl,
1070                                             &mem_region->iova,
1071                                             &mem_region->len);
1072
1073                         if (ret < 0) {
1074                                 dev_err(pservice->dev, "ion map iommu failed\n");
1075                                 kfree(mem_region);
1076                                 ion_free(pservice->ion_client, hdl);
1077                                 return ret;
1078                         }
1079                         reg->reg[tbl[i]] = mem_region->iova + offset;
1080                         INIT_LIST_HEAD(&mem_region->reg_lnk);
1081                         list_add_tail(&mem_region->reg_lnk, &reg->mem_region_list);
1082                 }
1083         }
1084
1085         if (ext_inf != NULL && ext_inf->magic == EXTRA_INFO_MAGIC) {
1086                 for (i=0; i<ext_inf->cnt; i++) {
1087                         vpu_debug(DEBUG_IOMMU, "reg[%d] + offset %d\n",
1088                                   ext_inf->elem[i].index,
1089                                   ext_inf->elem[i].offset);
1090                         reg->reg[ext_inf->elem[i].index] +=
1091                                 ext_inf->elem[i].offset;
1092                 }
1093         }
1094
1095         return 0;
1096 }
1097
1098 static int vcodec_reg_address_translate(struct vpu_subdev_data *data,
1099                                         vpu_reg *reg,
1100                                         struct extra_info_for_iommu *ext_inf)
1101 {
1102         VPU_HW_ID hw_id;
1103         u8 *tbl;
1104         int size = 0;
1105
1106         hw_id = data->hw_info->hw_id;
1107
1108         if (hw_id == HEVC_ID) {
1109                 tbl = addr_tbl_hevc_dec;
1110                 size = sizeof(addr_tbl_hevc_dec);
1111         } else {
1112                 if (reg->type == VPU_DEC || reg->type == VPU_DEC_PP) {
1113                         switch (reg_check_fmt(reg)) {
1114                         case VPU_DEC_FMT_H264:
1115                                 {
1116                                         tbl = addr_tbl_vpu_h264dec;
1117                                         size = sizeof(addr_tbl_vpu_h264dec);
1118                                         break;
1119                                 }
1120                         case VPU_DEC_FMT_VP8:
1121                         case VPU_DEC_FMT_VP7:
1122                                 {
1123                                         tbl = addr_tbl_vpu_vp8dec;
1124                                         size = sizeof(addr_tbl_vpu_vp8dec);
1125                                         break;
1126                                 }
1127
1128                         case VPU_DEC_FMT_VP6:
1129                                 {
1130                                         tbl = addr_tbl_vpu_vp6dec;
1131                                         size = sizeof(addr_tbl_vpu_vp6dec);
1132                                         break;
1133                                 }
1134                         case VPU_DEC_FMT_VC1:
1135                                 {
1136                                         tbl = addr_tbl_vpu_vc1dec;
1137                                         size = sizeof(addr_tbl_vpu_vc1dec);
1138                                         break;
1139                                 }
1140
1141                         case VPU_DEC_FMT_JPEG:
1142                                 {
1143                                         tbl = addr_tbl_vpu_jpegdec;
1144                                         size = sizeof(addr_tbl_vpu_jpegdec);
1145                                         break;
1146                                 }
1147                         default:
1148                                 tbl = addr_tbl_vpu_defaultdec;
1149                                 size = sizeof(addr_tbl_vpu_defaultdec);
1150                                 break;
1151                         }
1152                 } else if (reg->type == VPU_ENC) {
1153                         tbl = addr_tbl_vpu_enc;
1154                         size = sizeof(addr_tbl_vpu_enc);
1155                 }
1156         }
1157
1158         if (size != 0) {
1159                 return vcodec_bufid_to_iova(data, tbl, size, reg, ext_inf);
1160         } else {
1161                 return -1;
1162         }
1163 }
1164 #endif
1165
1166 static vpu_reg *reg_init(struct vpu_subdev_data *data,
1167         vpu_session *session, void __user *src, u32 size)
1168 {
1169         struct vpu_service_info *pservice = data->pservice;
1170         int extra_size = 0;
1171         struct extra_info_for_iommu extra_info;
1172         vpu_reg *reg = kmalloc(sizeof(vpu_reg) + data->reg_size, GFP_KERNEL);
1173
1174         vpu_debug_enter();
1175
1176         if (NULL == reg) {
1177                 vpu_err("error: kmalloc fail in reg_init\n");
1178                 return NULL;
1179         }
1180
1181         if (size > data->reg_size) {
1182                 /*printk("warning: vpu reg size %u is larger than hw reg size %u\n",
1183                   size, data->reg_size);*/
1184                 extra_size = size - data->reg_size;
1185                 size = data->reg_size;
1186         }
1187         reg->session = session;
1188         reg->data = data;
1189         reg->type = session->type;
1190         reg->size = size;
1191         reg->freq = VPU_FREQ_DEFAULT;
1192         reg->reg = (u32 *)&reg[1];
1193         INIT_LIST_HEAD(&reg->session_link);
1194         INIT_LIST_HEAD(&reg->status_link);
1195
1196 #if defined(CONFIG_VCODEC_MMU)
1197         if (data->mmu_dev)
1198                 INIT_LIST_HEAD(&reg->mem_region_list);
1199 #endif
1200
1201         if (copy_from_user(&reg->reg[0], (void __user *)src, size)) {
1202                 vpu_err("error: copy_from_user failed in reg_init\n");
1203                 kfree(reg);
1204                 return NULL;
1205         }
1206
1207         if (copy_from_user(&extra_info, (u8 *)src + size, extra_size)) {
1208                 vpu_err("error: copy_from_user failed in reg_init\n");
1209                 kfree(reg);
1210                 return NULL;
1211         }
1212
1213 #if defined(CONFIG_VCODEC_MMU)
1214         if (data->mmu_dev &&
1215             0 > vcodec_reg_address_translate(data, reg, &extra_info)) {
1216                 vpu_err("error: translate reg address failed\n");
1217                 kfree(reg);
1218                 return NULL;
1219         }
1220 #endif
1221
1222         mutex_lock(&pservice->lock);
1223         list_add_tail(&reg->status_link, &pservice->waiting);
1224         list_add_tail(&reg->session_link, &session->waiting);
1225         mutex_unlock(&pservice->lock);
1226
1227         if (pservice->auto_freq) {
1228                 if (!soc_is_rk2928g()) {
1229                         if (reg->type == VPU_DEC || reg->type == VPU_DEC_PP) {
1230                                 if (reg_check_rmvb_wmv(reg)) {
1231                                         reg->freq = VPU_FREQ_200M;
1232                                 } else if (reg_check_fmt(reg) == VPU_DEC_FMT_H264) {
1233                                         if (reg_probe_width(reg) > 3200) {
1234                                                 /*raise frequency for 4k avc.*/
1235                                                 reg->freq = VPU_FREQ_500M;
1236                                         }
1237                                 } else {
1238                                         if (reg_check_interlace(reg)) {
1239                                                 reg->freq = VPU_FREQ_400M;
1240                                         }
1241                                 }
1242                         }
1243                         if (reg->type == VPU_PP) {
1244                                 reg->freq = VPU_FREQ_400M;
1245                         }
1246                 }
1247         }
1248         vpu_debug_leave();
1249         return reg;
1250 }
1251
1252 static void reg_deinit(struct vpu_subdev_data *data, vpu_reg *reg)
1253 {
1254         struct vpu_service_info *pservice = data->pservice;
1255 #if defined(CONFIG_VCODEC_MMU)
1256         struct vcodec_mem_region *mem_region = NULL, *n;
1257 #endif
1258
1259         list_del_init(&reg->session_link);
1260         list_del_init(&reg->status_link);
1261         if (reg == pservice->reg_codec)
1262                 pservice->reg_codec = NULL;
1263         if (reg == pservice->reg_pproc)
1264                 pservice->reg_pproc = NULL;
1265
1266 #if defined(CONFIG_VCODEC_MMU)
1267         /* release memory region attach to this registers table. */
1268         if (data->mmu_dev) {
1269                 list_for_each_entry_safe(mem_region, n,
1270                         &reg->mem_region_list, reg_lnk) {
1271                         /* do not unmap iommu manually,
1272                            unmap will proccess when memory release */
1273                         /*vcodec_enter_mode(data);
1274                         ion_unmap_iommu(data->dev,
1275                                         pservice->ion_client,
1276                                         mem_region->hdl);
1277                         vcodec_exit_mode();*/
1278                         ion_free(pservice->ion_client, mem_region->hdl);
1279                         list_del_init(&mem_region->reg_lnk);
1280                         kfree(mem_region);
1281                 }
1282         }
1283 #endif
1284
1285         kfree(reg);
1286 }
1287
1288 static void reg_from_wait_to_run(struct vpu_service_info *pservice, vpu_reg *reg)
1289 {
1290         vpu_debug_enter();
1291         list_del_init(&reg->status_link);
1292         list_add_tail(&reg->status_link, &pservice->running);
1293
1294         list_del_init(&reg->session_link);
1295         list_add_tail(&reg->session_link, &reg->session->running);
1296         vpu_debug_leave();
1297 }
1298
1299 static void reg_copy_from_hw(vpu_reg *reg, volatile u32 *src, u32 count)
1300 {
1301         int i;
1302         u32 *dst = (u32 *)&reg->reg[0];
1303         vpu_debug_enter();
1304         for (i = 0; i < count; i++)
1305                 *dst++ = *src++;
1306         vpu_debug_leave();
1307 }
1308
1309 static void reg_from_run_to_done(struct vpu_subdev_data *data,
1310         vpu_reg *reg)
1311 {
1312         struct vpu_service_info *pservice = data->pservice;
1313         int irq_reg = -1;
1314
1315         vpu_debug_enter();
1316
1317         list_del_init(&reg->status_link);
1318         list_add_tail(&reg->status_link, &pservice->done);
1319
1320         list_del_init(&reg->session_link);
1321         list_add_tail(&reg->session_link, &reg->session->done);
1322
1323         /*vcodec_enter_mode(data);*/
1324         switch (reg->type) {
1325         case VPU_ENC : {
1326                 pservice->reg_codec = NULL;
1327                 reg_copy_from_hw(reg, data->enc_dev.hwregs, data->hw_info->enc_reg_num);
1328                 irq_reg = ENC_INTERRUPT_REGISTER;
1329                 break;
1330         }
1331         case VPU_DEC : {
1332                 int reg_len = REG_NUM_9190_DEC;
1333                 pservice->reg_codec = NULL;
1334                 reg_copy_from_hw(reg, data->dec_dev.hwregs, reg_len);
1335                 irq_reg = DEC_INTERRUPT_REGISTER;
1336                 break;
1337         }
1338         case VPU_PP : {
1339                 pservice->reg_pproc = NULL;
1340                 reg_copy_from_hw(reg, data->dec_dev.hwregs + PP_INTERRUPT_REGISTER, REG_NUM_9190_PP);
1341                 data->dec_dev.hwregs[PP_INTERRUPT_REGISTER] = 0;
1342                 break;
1343         }
1344         case VPU_DEC_PP : {
1345                 pservice->reg_codec = NULL;
1346                 pservice->reg_pproc = NULL;
1347                 reg_copy_from_hw(reg, data->dec_dev.hwregs, REG_NUM_9190_DEC_PP);
1348                 data->dec_dev.hwregs[PP_INTERRUPT_REGISTER] = 0;
1349                 break;
1350         }
1351         default : {
1352                 vpu_err("error: copy reg from hw with unknown type %d\n", reg->type);
1353                 break;
1354         }
1355         }
1356         vcodec_exit_mode(data);
1357
1358         if (irq_reg != -1)
1359                 reg->reg[irq_reg] = pservice->irq_status;
1360
1361         atomic_sub(1, &reg->session->task_running);
1362         atomic_sub(1, &pservice->total_running);
1363         wake_up(&reg->session->wait);
1364
1365         vpu_debug_leave();
1366 }
1367
1368 static void vpu_service_set_freq(struct vpu_service_info *pservice, vpu_reg *reg)
1369 {
1370         VPU_FREQ curr = atomic_read(&pservice->freq_status);
1371         if (curr == reg->freq)
1372                 return;
1373         atomic_set(&pservice->freq_status, reg->freq);
1374         switch (reg->freq) {
1375         case VPU_FREQ_200M : {
1376                 clk_set_rate(pservice->aclk_vcodec, 200*MHZ);
1377         } break;
1378         case VPU_FREQ_266M : {
1379                 clk_set_rate(pservice->aclk_vcodec, 266*MHZ);
1380         } break;
1381         case VPU_FREQ_300M : {
1382                 clk_set_rate(pservice->aclk_vcodec, 300*MHZ);
1383         } break;
1384         case VPU_FREQ_400M : {
1385                 clk_set_rate(pservice->aclk_vcodec, 400*MHZ);
1386         } break;
1387         case VPU_FREQ_500M : {
1388                 clk_set_rate(pservice->aclk_vcodec, 500*MHZ);
1389         } break;
1390         case VPU_FREQ_600M : {
1391                 clk_set_rate(pservice->aclk_vcodec, 600*MHZ);
1392         } break;
1393         default : {
1394                 if (soc_is_rk2928g())
1395                         clk_set_rate(pservice->aclk_vcodec, 400*MHZ);
1396                 else
1397                         clk_set_rate(pservice->aclk_vcodec, 300*MHZ);
1398         } break;
1399         }
1400 }
1401
1402 static void reg_copy_to_hw(struct vpu_subdev_data *data, vpu_reg *reg)
1403 {
1404         struct vpu_service_info *pservice = data->pservice;
1405         int i;
1406         u32 *src = (u32 *)&reg->reg[0];
1407         vpu_debug_enter();
1408
1409         atomic_add(1, &pservice->total_running);
1410         atomic_add(1, &reg->session->task_running);
1411         if (pservice->auto_freq)
1412                 vpu_service_set_freq(pservice, reg);
1413
1414         vcodec_enter_mode(data);
1415
1416         switch (reg->type) {
1417         case VPU_ENC : {
1418                 int enc_count = data->hw_info->enc_reg_num;
1419                 u32 *dst = (u32 *)data->enc_dev.hwregs;
1420
1421                 pservice->reg_codec = reg;
1422
1423                 dst[VPU_REG_EN_ENC] = src[VPU_REG_EN_ENC] & 0x6;
1424
1425                 for (i = 0; i < VPU_REG_EN_ENC; i++)
1426                         dst[i] = src[i];
1427
1428                 for (i = VPU_REG_EN_ENC + 1; i < enc_count; i++)
1429                         dst[i] = src[i];
1430
1431                 VEPU_CLEAN_CACHE(dst);
1432
1433                 dsb(sy);
1434
1435                 dst[VPU_REG_ENC_GATE] = src[VPU_REG_ENC_GATE] | VPU_REG_ENC_GATE_BIT;
1436                 dst[VPU_REG_EN_ENC]   = src[VPU_REG_EN_ENC];
1437
1438                 time_record(&tasks[TASK_VPU_ENC], 0);
1439         } break;
1440         case VPU_DEC : {
1441                 u32 *dst = (u32 *)data->dec_dev.hwregs;
1442
1443                 pservice->reg_codec = reg;
1444
1445                 if (data->hw_info->hw_id != HEVC_ID) {
1446                         for (i = REG_NUM_9190_DEC - 1; i > VPU_REG_DEC_GATE; i--)
1447                                 dst[i] = src[i];
1448                         VDPU_CLEAN_CACHE(dst);
1449                 } else {
1450                         for (i = REG_NUM_HEVC_DEC - 1; i > VPU_REG_EN_DEC; i--)
1451                                 dst[i] = src[i];
1452                         HEVC_CLEAN_CACHE(dst);
1453                 }
1454
1455                 dsb(sy);
1456
1457                 if (data->hw_info->hw_id != HEVC_ID) {
1458                         dst[VPU_REG_DEC_GATE] = src[VPU_REG_DEC_GATE] | VPU_REG_DEC_GATE_BIT;
1459                         dst[VPU_REG_EN_DEC] = src[VPU_REG_EN_DEC];
1460                 } else {
1461                         dst[VPU_REG_EN_DEC] = src[VPU_REG_EN_DEC];
1462                 }
1463                 dsb(sy);
1464                 dmb(sy);
1465
1466                 time_record(&tasks[TASK_VPU_DEC], 0);
1467         } break;
1468         case VPU_PP : {
1469                 u32 *dst = (u32 *)data->dec_dev.hwregs + PP_INTERRUPT_REGISTER;
1470                 pservice->reg_pproc = reg;
1471
1472                 dst[VPU_REG_PP_GATE] = src[VPU_REG_PP_GATE] | VPU_REG_PP_GATE_BIT;
1473
1474                 for (i = VPU_REG_PP_GATE + 1; i < REG_NUM_9190_PP; i++)
1475                         dst[i] = src[i];
1476
1477                 dsb(sy);
1478
1479                 dst[VPU_REG_EN_PP] = src[VPU_REG_EN_PP];
1480
1481                 time_record(&tasks[TASK_VPU_PP], 0);
1482         } break;
1483         case VPU_DEC_PP : {
1484                 u32 *dst = (u32 *)data->dec_dev.hwregs;
1485                 pservice->reg_codec = reg;
1486                 pservice->reg_pproc = reg;
1487
1488                 VDPU_SOFT_RESET(dst);
1489                 VDPU_CLEAN_CACHE(dst);
1490
1491                 for (i = VPU_REG_EN_DEC_PP + 1; i < REG_NUM_9190_DEC_PP; i++)
1492                         dst[i] = src[i];
1493
1494                 dst[VPU_REG_EN_DEC_PP]   = src[VPU_REG_EN_DEC_PP] | 0x2;
1495                 dsb(sy);
1496
1497                 dst[VPU_REG_DEC_PP_GATE] = src[VPU_REG_DEC_PP_GATE] | VPU_REG_PP_GATE_BIT;
1498                 dst[VPU_REG_DEC_GATE]    = src[VPU_REG_DEC_GATE]    | VPU_REG_DEC_GATE_BIT;
1499                 dst[VPU_REG_EN_DEC]      = src[VPU_REG_EN_DEC];
1500
1501                 time_record(&tasks[TASK_VPU_DEC], 0);
1502         } break;
1503         default : {
1504                 vpu_err("error: unsupport session type %d", reg->type);
1505                 atomic_sub(1, &pservice->total_running);
1506                 atomic_sub(1, &reg->session->task_running);
1507         } break;
1508         }
1509
1510         /*vcodec_exit_mode(data);*/
1511         vpu_debug_leave();
1512 }
1513
1514 static void try_set_reg(struct vpu_subdev_data *data)
1515 {
1516         struct vpu_service_info *pservice = data->pservice;
1517         vpu_debug_enter();
1518         if (!list_empty(&pservice->waiting)) {
1519                 int can_set = 0;
1520                 vpu_reg *reg = list_entry(pservice->waiting.next, vpu_reg, status_link);
1521
1522                 vpu_service_power_on(pservice);
1523
1524                 switch (reg->type) {
1525                 case VPU_ENC : {
1526                         if ((NULL == pservice->reg_codec) &&  (NULL == pservice->reg_pproc))
1527                                 can_set = 1;
1528                 } break;
1529                 case VPU_DEC : {
1530                         if (NULL == pservice->reg_codec)
1531                                 can_set = 1;
1532                         if (pservice->auto_freq && (NULL != pservice->reg_pproc))
1533                                 can_set = 0;
1534                 } break;
1535                 case VPU_PP : {
1536                         if (NULL == pservice->reg_codec) {
1537                                 if (NULL == pservice->reg_pproc)
1538                                         can_set = 1;
1539                         } else {
1540                                 if ((VPU_DEC == pservice->reg_codec->type) && (NULL == pservice->reg_pproc))
1541                                         can_set = 1;
1542                                 /* can not charge frequency when vpu is working */
1543                                 if (pservice->auto_freq)
1544                                         can_set = 0;
1545                         }
1546                 } break;
1547                 case VPU_DEC_PP : {
1548                         if ((NULL == pservice->reg_codec) && (NULL == pservice->reg_pproc))
1549                                 can_set = 1;
1550                         } break;
1551                 default : {
1552                         printk("undefined reg type %d\n", reg->type);
1553                 } break;
1554                 }
1555                 if (can_set) {
1556                         reg_from_wait_to_run(pservice, reg);
1557                         reg_copy_to_hw(reg->data, reg);
1558                 }
1559         }
1560         vpu_debug_leave();
1561 }
1562
1563 static int return_reg(struct vpu_subdev_data *data,
1564         vpu_reg *reg, u32 __user *dst)
1565 {
1566         int ret = 0;
1567         vpu_debug_enter();
1568         switch (reg->type) {
1569         case VPU_ENC : {
1570                 if (copy_to_user(dst, &reg->reg[0], data->hw_info->enc_io_size))
1571                         ret = -EFAULT;
1572                 break;
1573         }
1574         case VPU_DEC : {
1575                 int reg_len = data->hw_info->hw_id == HEVC_ID ? REG_NUM_HEVC_DEC : REG_NUM_9190_DEC;
1576                 if (copy_to_user(dst, &reg->reg[0], SIZE_REG(reg_len)))
1577                         ret = -EFAULT;
1578                 break;
1579         }
1580         case VPU_PP : {
1581                 if (copy_to_user(dst, &reg->reg[0], SIZE_REG(REG_NUM_9190_PP)))
1582                         ret = -EFAULT;
1583                 break;
1584         }
1585         case VPU_DEC_PP : {
1586                 if (copy_to_user(dst, &reg->reg[0], SIZE_REG(REG_NUM_9190_DEC_PP)))
1587                         ret = -EFAULT;
1588                 break;
1589         }
1590         default : {
1591                 ret = -EFAULT;
1592                 vpu_err("error: copy reg to user with unknown type %d\n", reg->type);
1593                 break;
1594         }
1595         }
1596         reg_deinit(data, reg);
1597         vpu_debug_leave();
1598         return ret;
1599 }
1600
1601 static long vpu_service_ioctl(struct file *filp, unsigned int cmd,
1602         unsigned long arg)
1603 {
1604         struct vpu_subdev_data *data =
1605                 container_of(filp->f_dentry->d_inode->i_cdev,
1606                         struct vpu_subdev_data, cdev);
1607         struct vpu_service_info *pservice = data->pservice;
1608         vpu_session *session = (vpu_session *)filp->private_data;
1609         vpu_debug_enter();
1610         if (NULL == session)
1611                 return -EINVAL;
1612
1613         switch (cmd) {
1614         case VPU_IOC_SET_CLIENT_TYPE : {
1615                 session->type = (enum VPU_CLIENT_TYPE)arg;
1616                 vpu_debug(DEBUG_IOCTL, "VPU_IOC_SET_CLIENT_TYPE %d\n", session->type);
1617                 break;
1618         }
1619         case VPU_IOC_GET_HW_FUSE_STATUS : {
1620                 struct vpu_request req;
1621                 vpu_debug(DEBUG_IOCTL, "VPU_IOC_GET_HW_FUSE_STATUS type %d\n", session->type);
1622                 if (copy_from_user(&req, (void __user *)arg, sizeof(struct vpu_request))) {
1623                         vpu_err("error: VPU_IOC_GET_HW_FUSE_STATUS copy_from_user failed\n");
1624                         return -EFAULT;
1625                 } else {
1626                         if (VPU_ENC != session->type) {
1627                                 if (copy_to_user((void __user *)req.req,
1628                                         &pservice->dec_config,
1629                                         sizeof(struct vpu_dec_config))) {
1630                                         vpu_err("error: VPU_IOC_GET_HW_FUSE_STATUS copy_to_user failed type %d\n",
1631                                                 session->type);
1632                                         return -EFAULT;
1633                                 }
1634                         } else {
1635                                 if (copy_to_user((void __user *)req.req,
1636                                         &pservice->enc_config,
1637                                         sizeof(struct vpu_enc_config ))) {
1638                                         vpu_err("error: VPU_IOC_GET_HW_FUSE_STATUS copy_to_user failed type %d\n",
1639                                                 session->type);
1640                                         return -EFAULT;
1641                                 }
1642                         }
1643                 }
1644
1645                 break;
1646         }
1647         case VPU_IOC_SET_REG : {
1648                 struct vpu_request req;
1649                 vpu_reg *reg;
1650                 vpu_debug(DEBUG_IOCTL, "VPU_IOC_SET_REG type %d\n", session->type);
1651                 if (copy_from_user(&req, (void __user *)arg,
1652                         sizeof(struct vpu_request))) {
1653                         vpu_err("error: VPU_IOC_SET_REG copy_from_user failed\n");
1654                         return -EFAULT;
1655                 }
1656                 reg = reg_init(data, session,
1657                         (void __user *)req.req, req.size);
1658                 if (NULL == reg) {
1659                         return -EFAULT;
1660                 } else {
1661                         mutex_lock(&pservice->lock);
1662                         try_set_reg(data);
1663                         mutex_unlock(&pservice->lock);
1664                 }
1665
1666                 break;
1667         }
1668         case VPU_IOC_GET_REG : {
1669                 struct vpu_request req;
1670                 vpu_reg *reg;
1671                 vpu_debug(DEBUG_IOCTL, "VPU_IOC_GET_REG type %d\n", session->type);
1672                 if (copy_from_user(&req, (void __user *)arg,
1673                         sizeof(struct vpu_request))) {
1674                         vpu_err("error: VPU_IOC_GET_REG copy_from_user failed\n");
1675                         return -EFAULT;
1676                 } else {
1677                         int ret = wait_event_timeout(session->wait, !list_empty(&session->done), VPU_TIMEOUT_DELAY);
1678                         if (!list_empty(&session->done)) {
1679                                 if (ret < 0) {
1680                                         vpu_err("warning: pid %d wait task sucess but wait_evernt ret %d\n", session->pid, ret);
1681                                 }
1682                                 ret = 0;
1683                         } else {
1684                                 if (unlikely(ret < 0)) {
1685                                         vpu_err("error: pid %d wait task ret %d\n", session->pid, ret);
1686                                 } else if (0 == ret) {
1687                                         vpu_err("error: pid %d wait %d task done timeout\n", session->pid, atomic_read(&session->task_running));
1688                                         ret = -ETIMEDOUT;
1689                                 }
1690                         }
1691                         if (ret < 0) {
1692                                 int task_running = atomic_read(&session->task_running);
1693                                 mutex_lock(&pservice->lock);
1694                                 vpu_service_dump(pservice);
1695                                 if (task_running) {
1696                                         atomic_set(&session->task_running, 0);
1697                                         atomic_sub(task_running, &pservice->total_running);
1698                                         printk("%d task is running but not return, reset hardware...", task_running);
1699                                         vpu_reset(data);
1700                                         printk("done\n");
1701                                 }
1702                                 vpu_service_session_clear(data, session);
1703                                 mutex_unlock(&pservice->lock);
1704                                 return ret;
1705                         }
1706                 }
1707                 mutex_lock(&pservice->lock);
1708                 reg = list_entry(session->done.next, vpu_reg, session_link);
1709                 return_reg(data, reg, (u32 __user *)req.req);
1710                 mutex_unlock(&pservice->lock);
1711                 break;
1712         }
1713         case VPU_IOC_PROBE_IOMMU_STATUS: {
1714                 int iommu_enable = 0;
1715
1716                 vpu_debug(DEBUG_IOCTL, "VPU_IOC_PROBE_IOMMU_STATUS\n");
1717
1718 #if defined(CONFIG_VCODEC_MMU)
1719                 iommu_enable = data->mmu_dev ? 1 : 0;
1720 #endif
1721
1722                 if (copy_to_user((void __user *)arg, &iommu_enable, sizeof(int))) {
1723                         vpu_err("error: VPU_IOC_PROBE_IOMMU_STATUS copy_to_user failed\n");
1724                         return -EFAULT;
1725                 }
1726                 break;
1727         }
1728         default : {
1729                 vpu_err("error: unknow vpu service ioctl cmd %x\n", cmd);
1730                 break;
1731         }
1732         }
1733         vpu_debug_leave();
1734         return 0;
1735 }
1736
1737 #ifdef CONFIG_COMPAT
1738 static long compat_vpu_service_ioctl(struct file *filp, unsigned int cmd,
1739         unsigned long arg)
1740 {
1741         struct vpu_subdev_data *data =
1742                 container_of(filp->f_dentry->d_inode->i_cdev,
1743                         struct vpu_subdev_data, cdev);
1744         struct vpu_service_info *pservice = data->pservice;
1745         vpu_session *session = (vpu_session *)filp->private_data;
1746         vpu_debug_enter();
1747         vpu_debug(3, "cmd %x, COMPAT_VPU_IOC_SET_CLIENT_TYPE %x\n", cmd,
1748                   (u32)COMPAT_VPU_IOC_SET_CLIENT_TYPE);
1749         if (NULL == session)
1750                 return -EINVAL;
1751
1752         switch (cmd) {
1753         case COMPAT_VPU_IOC_SET_CLIENT_TYPE : {
1754                 session->type = (enum VPU_CLIENT_TYPE)arg;
1755                 vpu_debug(DEBUG_IOCTL, "COMPAT_VPU_IOC_SET_CLIENT_TYPE type %d\n", session->type);
1756                 break;
1757         }
1758         case COMPAT_VPU_IOC_GET_HW_FUSE_STATUS : {
1759                 struct compat_vpu_request req;
1760                 vpu_debug(DEBUG_IOCTL, "COMPAT_VPU_IOC_GET_HW_FUSE_STATUS type %d\n", session->type);
1761                 if (copy_from_user(&req, compat_ptr((compat_uptr_t)arg),
1762                                    sizeof(struct compat_vpu_request))) {
1763                         vpu_err("error: VPU_IOC_GET_HW_FUSE_STATUS"
1764                                 " copy_from_user failed\n");
1765                         return -EFAULT;
1766                 } else {
1767                         if (VPU_ENC != session->type) {
1768                                 if (copy_to_user(compat_ptr((compat_uptr_t)req.req),
1769                                                  &pservice->dec_config,
1770                                                  sizeof(struct vpu_dec_config))) {
1771                                         vpu_err("error: VPU_IOC_GET_HW_FUSE_STATUS "
1772                                                 "copy_to_user failed type %d\n",
1773                                                 session->type);
1774                                         return -EFAULT;
1775                                 }
1776                         } else {
1777                                 if (copy_to_user(compat_ptr((compat_uptr_t)req.req),
1778                                                  &pservice->enc_config,
1779                                                  sizeof(struct vpu_enc_config ))) {
1780                                         vpu_err("error: VPU_IOC_GET_HW_FUSE_STATUS"
1781                                                 " copy_to_user failed type %d\n",
1782                                                 session->type);
1783                                         return -EFAULT;
1784                                 }
1785                         }
1786                 }
1787
1788                 break;
1789         }
1790         case COMPAT_VPU_IOC_SET_REG : {
1791                 struct compat_vpu_request req;
1792                 vpu_reg *reg;
1793                 vpu_debug(DEBUG_IOCTL, "COMPAT_VPU_IOC_SET_REG type %d\n", session->type);
1794                 if (copy_from_user(&req, compat_ptr((compat_uptr_t)arg),
1795                                    sizeof(struct compat_vpu_request))) {
1796                         vpu_err("VPU_IOC_SET_REG copy_from_user failed\n");
1797                         return -EFAULT;
1798                 }
1799                 reg = reg_init(data, session,
1800                                compat_ptr((compat_uptr_t)req.req), req.size);
1801                 if (NULL == reg) {
1802                         return -EFAULT;
1803                 } else {
1804                         mutex_lock(&pservice->lock);
1805                         try_set_reg(data);
1806                         mutex_unlock(&pservice->lock);
1807                 }
1808
1809                 break;
1810         }
1811         case COMPAT_VPU_IOC_GET_REG : {
1812                 struct compat_vpu_request req;
1813                 vpu_reg *reg;
1814                 vpu_debug(DEBUG_IOCTL, "COMPAT_VPU_IOC_GET_REG type %d\n", session->type);
1815                 if (copy_from_user(&req, compat_ptr((compat_uptr_t)arg),
1816                                    sizeof(struct compat_vpu_request))) {
1817                         vpu_err("VPU_IOC_GET_REG copy_from_user failed\n");
1818                         return -EFAULT;
1819                 } else {
1820                         int ret = wait_event_timeout(session->wait, !list_empty(&session->done), VPU_TIMEOUT_DELAY);
1821                         if (!list_empty(&session->done)) {
1822                                 if (ret < 0) {
1823                                         vpu_err("warning: pid %d wait task sucess but wait_evernt ret %d\n", session->pid, ret);
1824                                 }
1825                                 ret = 0;
1826                         } else {
1827                                 if (unlikely(ret < 0)) {
1828                                         vpu_err("error: pid %d wait task ret %d\n", session->pid, ret);
1829                                 } else if (0 == ret) {
1830                                         vpu_err("error: pid %d wait %d task done timeout\n", session->pid, atomic_read(&session->task_running));
1831                                         ret = -ETIMEDOUT;
1832                                 }
1833                         }
1834                         if (ret < 0) {
1835                                 int task_running = atomic_read(&session->task_running);
1836                                 mutex_lock(&pservice->lock);
1837                                 vpu_service_dump(pservice);
1838                                 if (task_running) {
1839                                         atomic_set(&session->task_running, 0);
1840                                         atomic_sub(task_running, &pservice->total_running);
1841                                         printk("%d task is running but not return, reset hardware...", task_running);
1842                                         vpu_reset(data);
1843                                         printk("done\n");
1844                                 }
1845                                 vpu_service_session_clear(data, session);
1846                                 mutex_unlock(&pservice->lock);
1847                                 return ret;
1848                         }
1849                 }
1850                 mutex_lock(&pservice->lock);
1851                 reg = list_entry(session->done.next, vpu_reg, session_link);
1852                 return_reg(data, reg, compat_ptr((compat_uptr_t)req.req));
1853                 mutex_unlock(&pservice->lock);
1854                 break;
1855         }
1856         case COMPAT_VPU_IOC_PROBE_IOMMU_STATUS : {
1857                 int iommu_enable = 0;
1858
1859                 vpu_debug(DEBUG_IOCTL, "COMPAT_VPU_IOC_PROBE_IOMMU_STATUS\n");
1860 #if defined(CONFIG_VCODEC_MMU)
1861                 iommu_enable = data->mmu_dev ? 1 : 0;
1862 #endif
1863
1864                 if (copy_to_user(compat_ptr((compat_uptr_t)arg), &iommu_enable, sizeof(int))) {
1865                         vpu_err("error: VPU_IOC_PROBE_IOMMU_STATUS copy_to_user failed\n");
1866                         return -EFAULT;
1867                 }
1868                 break;
1869         }
1870         default : {
1871                 vpu_err("error: unknow vpu service ioctl cmd %x\n", cmd);
1872                 break;
1873         }
1874         }
1875         vpu_debug_leave();
1876         return 0;
1877 }
1878 #endif
1879
1880 static int vpu_service_check_hw(struct vpu_subdev_data *data, u32 hw_addr)
1881 {
1882         int ret = -EINVAL, i = 0;
1883         volatile u32 *tmp = (volatile u32 *)ioremap_nocache(hw_addr, 0x4);
1884         u32 enc_id = *tmp;
1885
1886         enc_id = (enc_id >> 16) & 0xFFFF;
1887         pr_info("checking hw id %x\n", enc_id);
1888         data->hw_info = NULL;
1889         for (i = 0; i < ARRAY_SIZE(vpu_hw_set); i++) {
1890                 if (enc_id == vpu_hw_set[i].hw_id) {
1891                         data->hw_info = &vpu_hw_set[i];
1892                         ret = 0;
1893                         break;
1894                 }
1895         }
1896         iounmap((void *)tmp);
1897         return ret;
1898 }
1899
1900 static int vpu_service_open(struct inode *inode, struct file *filp)
1901 {
1902         struct vpu_subdev_data *data = container_of(inode->i_cdev, struct vpu_subdev_data, cdev);
1903         struct vpu_service_info *pservice = data->pservice;
1904         vpu_session *session = (vpu_session *)kmalloc(sizeof(vpu_session), GFP_KERNEL);
1905
1906         vpu_debug_enter();
1907
1908         if (NULL == session) {
1909                 vpu_err("error: unable to allocate memory for vpu_session.");
1910                 return -ENOMEM;
1911         }
1912
1913         session->type   = VPU_TYPE_BUTT;
1914         session->pid    = current->pid;
1915         INIT_LIST_HEAD(&session->waiting);
1916         INIT_LIST_HEAD(&session->running);
1917         INIT_LIST_HEAD(&session->done);
1918         INIT_LIST_HEAD(&session->list_session);
1919         init_waitqueue_head(&session->wait);
1920         atomic_set(&session->task_running, 0);
1921         mutex_lock(&pservice->lock);
1922         list_add_tail(&session->list_session, &pservice->session);
1923         filp->private_data = (void *)session;
1924         mutex_unlock(&pservice->lock);
1925
1926         pr_debug("dev opened\n");
1927         vpu_debug_leave();
1928         return nonseekable_open(inode, filp);
1929 }
1930
1931 static int vpu_service_release(struct inode *inode, struct file *filp)
1932 {
1933         struct vpu_subdev_data *data = container_of(inode->i_cdev, struct vpu_subdev_data, cdev);
1934         struct vpu_service_info *pservice = data->pservice;
1935         int task_running;
1936         vpu_session *session = (vpu_session *)filp->private_data;
1937         vpu_debug_enter();
1938         if (NULL == session)
1939                 return -EINVAL;
1940
1941         task_running = atomic_read(&session->task_running);
1942         if (task_running) {
1943                 vpu_err("error: vpu_service session %d still has %d task running when closing\n", session->pid, task_running);
1944                 msleep(50);
1945         }
1946         wake_up(&session->wait);
1947
1948         mutex_lock(&pservice->lock);
1949         /* remove this filp from the asynchronusly notified filp's */
1950         list_del_init(&session->list_session);
1951         vpu_service_session_clear(data, session);
1952         kfree(session);
1953         filp->private_data = NULL;
1954         mutex_unlock(&pservice->lock);
1955
1956         pr_debug("dev closed\n");
1957         vpu_debug_leave();
1958         return 0;
1959 }
1960
1961 static const struct file_operations vpu_service_fops = {
1962         .unlocked_ioctl = vpu_service_ioctl,
1963         .open           = vpu_service_open,
1964         .release        = vpu_service_release,
1965 #ifdef CONFIG_COMPAT
1966         .compat_ioctl   = compat_vpu_service_ioctl,
1967 #endif
1968 };
1969
1970 static irqreturn_t vdpu_irq(int irq, void *dev_id);
1971 static irqreturn_t vdpu_isr(int irq, void *dev_id);
1972 static irqreturn_t vepu_irq(int irq, void *dev_id);
1973 static irqreturn_t vepu_isr(int irq, void *dev_id);
1974 static void get_hw_info(struct vpu_subdev_data *data);
1975
1976 #ifdef CONFIG_VCODEC_MMU
1977 static struct device *rockchip_get_sysmmu_dev(const char *compt)
1978 {
1979         struct device_node *dn = NULL;
1980         struct platform_device *pd = NULL;
1981         struct device *ret = NULL ;
1982
1983         dn = of_find_compatible_node(NULL,NULL,compt);
1984         if(!dn) {
1985                 printk("can't find device node %s \r\n",compt);
1986                 return NULL;
1987         }
1988
1989         pd = of_find_device_by_node(dn);
1990         if(!pd) {
1991                 printk("can't find platform device in device node %s\n",compt);
1992                 return  NULL;
1993         }
1994         ret = &pd->dev;
1995
1996         return ret;
1997
1998 }
1999 #ifdef CONFIG_IOMMU_API
2000 static inline void platform_set_sysmmu(struct device *iommu,
2001         struct device *dev)
2002 {
2003         dev->archdata.iommu = iommu;
2004 }
2005 #else
2006 static inline void platform_set_sysmmu(struct device *iommu,
2007         struct device *dev)
2008 {
2009 }
2010 #endif
2011
2012 int vcodec_sysmmu_fault_hdl(struct device *dev,
2013                                 enum rk_iommu_inttype itype,
2014                                 unsigned long pgtable_base,
2015                                 unsigned long fault_addr, unsigned int status)
2016 {
2017         struct platform_device *pdev;
2018         struct vpu_subdev_data *data;
2019         struct vpu_service_info *pservice;
2020
2021         vpu_debug_enter();
2022
2023         pdev = container_of(dev, struct platform_device, dev);
2024
2025         data = platform_get_drvdata(pdev);
2026         pservice = data->pservice;
2027
2028         if (pservice->reg_codec) {
2029                 struct vcodec_mem_region *mem, *n;
2030                 int i = 0;
2031                 vpu_debug(DEBUG_IOMMU, "vcodec, fault addr 0x%08x\n", (u32)fault_addr);
2032                 list_for_each_entry_safe(mem, n,
2033                                          &pservice->reg_codec->mem_region_list,
2034                                          reg_lnk) {
2035                         vpu_debug(DEBUG_IOMMU, "vcodec, reg[%02u] mem region [%02d] 0x%08x %ld\n",
2036                                 mem->reg_idx, i, (u32)mem->iova, mem->len);
2037                         i++;
2038                 }
2039
2040                 pr_alert("vcodec, page fault occur, reset hw\n");
2041                 pservice->reg_codec->reg[101] = 1;
2042                 vpu_reset(data);
2043         }
2044
2045         return 0;
2046 }
2047 #endif
2048
2049 #if HEVC_TEST_ENABLE
2050 static int hevc_test_case0(vpu_service_info *pservice);
2051 #endif
2052 #if defined(CONFIG_ION_ROCKCHIP)
2053 extern struct ion_client *rockchip_ion_client_create(const char * name);
2054 #endif
2055
2056 static int vcodec_subdev_probe(struct platform_device *pdev,
2057         struct vpu_service_info *pservice)
2058 {
2059         int ret = 0;
2060         struct resource *res = NULL;
2061         u32 ioaddr = 0;
2062         struct device *dev = &pdev->dev;
2063         char *name = (char*)dev_name(dev);
2064         struct device_node *np = pdev->dev.of_node;
2065         struct vpu_subdev_data *data =
2066                 devm_kzalloc(dev, sizeof(struct vpu_subdev_data), GFP_KERNEL);
2067 #if defined(CONFIG_VCODEC_MMU)
2068         u32 iommu_en = 0;
2069         char mmu_dev_dts_name[40];
2070         of_property_read_u32(np, "iommu_enabled", &iommu_en);
2071 #endif
2072         pr_info("probe device %s\n", dev_name(dev));
2073
2074         data->pservice = pservice;
2075         data->dev = dev;
2076
2077         of_property_read_string(np, "name", (const char**)&name);
2078         of_property_read_u32(np, "dev_mode", (u32*)&data->mode);
2079         /*dev_set_name(dev, name);*/
2080
2081         if (pservice->reg_base == 0) {
2082                 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2083                 data->regs = devm_ioremap_resource(dev, res);
2084                 if (IS_ERR(data->regs)) {
2085                         ret = PTR_ERR(data->regs);
2086                         goto err;
2087                 }
2088                 ioaddr = res->start;
2089         } else {
2090                 data->regs = pservice->reg_base;
2091                 ioaddr = pservice->ioaddr;
2092         }
2093
2094         clear_bit(MMU_ACTIVATED, &data->state);
2095         vcodec_enter_mode(data);
2096         ret = vpu_service_check_hw(data, ioaddr);
2097         if (ret < 0) {
2098                 vpu_err("error: hw info check faild\n");
2099                 goto err;
2100         }
2101
2102         data->dec_dev.iosize = data->hw_info->dec_io_size;
2103         data->dec_dev.hwregs = (volatile u32 *)((u8 *)data->regs + data->hw_info->dec_offset);
2104         data->reg_size = data->dec_dev.iosize;
2105
2106         if (data->mode == VCODEC_RUNNING_MODE_VPU) {
2107                 data->enc_dev.iosize = data->hw_info->enc_io_size;
2108                 data->reg_size = data->reg_size > data->enc_dev.iosize ? data->reg_size : data->enc_dev.iosize;
2109                 data->enc_dev.hwregs = (volatile u32 *)((u8 *)data->regs + data->hw_info->enc_offset);
2110         }
2111
2112         data->irq_enc = platform_get_irq_byname(pdev, "irq_enc");
2113         if (data->irq_enc > 0) {
2114                 ret = devm_request_threaded_irq(dev,
2115                         data->irq_enc, vepu_irq, vepu_isr,
2116                         IRQF_SHARED, dev_name(dev),
2117                         (void *)data);
2118                 if (ret) {
2119                         dev_err(dev,
2120                                 "error: can't request vepu irq %d\n",
2121                                 data->irq_enc);
2122                         goto err;
2123                 }
2124         }
2125         data->irq_dec = platform_get_irq_byname(pdev, "irq_dec");
2126         if (data->irq_dec > 0) {
2127                 ret = devm_request_threaded_irq(dev,
2128                         data->irq_dec, vdpu_irq, vdpu_isr,
2129                         IRQF_SHARED, dev_name(dev),
2130                         (void *)data);
2131                 if (ret) {
2132                         dev_err(dev,
2133                                 "error: can't request vdpu irq %d\n",
2134                                 data->irq_dec);
2135                         goto err;
2136                 }
2137         }
2138         atomic_set(&data->dec_dev.irq_count_codec, 0);
2139         atomic_set(&data->dec_dev.irq_count_pp, 0);
2140         atomic_set(&data->enc_dev.irq_count_codec, 0);
2141         atomic_set(&data->enc_dev.irq_count_pp, 0);
2142 #if defined(CONFIG_VCODEC_MMU)
2143         if (iommu_en) {
2144                 if (data->mode == VCODEC_RUNNING_MODE_HEVC)
2145                         sprintf(mmu_dev_dts_name,
2146                                 HEVC_IOMMU_COMPATIBLE_NAME);
2147                 else
2148                         sprintf(mmu_dev_dts_name,
2149                                 VPU_IOMMU_COMPATIBLE_NAME);
2150
2151                 data->mmu_dev =
2152                         rockchip_get_sysmmu_dev(mmu_dev_dts_name);
2153
2154                 if (data->mmu_dev)
2155                         platform_set_sysmmu(data->mmu_dev, dev);
2156
2157                 rockchip_iovmm_set_fault_handler(dev, vcodec_sysmmu_fault_hdl);
2158         }
2159 #endif
2160         vcodec_exit_mode(data);
2161         /* create device node */
2162         ret = alloc_chrdev_region(&data->dev_t, 0, 1, name);
2163         if (ret) {
2164                 dev_err(dev, "alloc dev_t failed\n");
2165                 goto err;
2166         }
2167
2168         cdev_init(&data->cdev, &vpu_service_fops);
2169
2170         data->cdev.owner = THIS_MODULE;
2171         data->cdev.ops = &vpu_service_fops;
2172
2173         ret = cdev_add(&data->cdev, data->dev_t, 1);
2174
2175         if (ret) {
2176                 dev_err(dev, "add dev_t failed\n");
2177                 goto err;
2178         }
2179
2180         data->cls = class_create(THIS_MODULE, name);
2181
2182         if (IS_ERR(data->cls)) {
2183                 ret = PTR_ERR(data->cls);
2184                 dev_err(dev, "class_create err:%d\n", ret);
2185                 goto err;
2186         }
2187
2188         data->child_dev = device_create(data->cls, dev,
2189                 data->dev_t, NULL, name);
2190
2191         get_hw_info(data);
2192
2193         platform_set_drvdata(pdev, data);
2194
2195         INIT_LIST_HEAD(&data->lnk_service);
2196         list_add_tail(&data->lnk_service, &pservice->subdev_list);
2197
2198 #ifdef CONFIG_DEBUG_FS
2199         data->debugfs_dir =
2200                 vcodec_debugfs_create_device_dir((char*)name, parent);
2201         if (data->debugfs_dir == NULL)
2202                 vpu_err("create debugfs dir %s failed\n", name);
2203
2204         data->debugfs_file_regs =
2205                 debugfs_create_file("regs", 0664,
2206                                     data->debugfs_dir, data,
2207                                     &debug_vcodec_fops);
2208 #endif
2209         return 0;
2210 err:
2211         if (data->irq_enc > 0)
2212                 free_irq(data->irq_enc, (void *)data);
2213         if (data->irq_dec > 0)
2214                 free_irq(data->irq_dec, (void *)data);
2215
2216         if (data->child_dev) {
2217                 device_destroy(data->cls, data->dev_t);
2218                 cdev_del(&data->cdev);
2219                 unregister_chrdev_region(data->dev_t, 1);
2220         }
2221
2222         if (data->cls)
2223                 class_destroy(data->cls);
2224         return -1;
2225 }
2226
2227 static void vcodec_subdev_remove(struct vpu_subdev_data *data)
2228 {
2229         device_destroy(data->cls, data->dev_t);
2230         class_destroy(data->cls);
2231         cdev_del(&data->cdev);
2232         unregister_chrdev_region(data->dev_t, 1);
2233
2234         free_irq(data->irq_enc, (void *)&data);
2235         free_irq(data->irq_dec, (void *)&data);
2236
2237 #ifdef CONFIG_DEBUG_FS
2238         debugfs_remove_recursive(data->debugfs_dir);
2239 #endif
2240 }
2241
2242 static void vcodec_read_property(struct device_node *np,
2243         struct vpu_service_info *pservice)
2244 {
2245         pservice->mode_bit = 0;
2246         pservice->mode_ctrl = 0;
2247         pservice->subcnt = 0;
2248
2249         of_property_read_u32(np, "subcnt", &pservice->subcnt);
2250
2251         if (pservice->subcnt > 1) {
2252                 of_property_read_u32(np, "mode_bit", &pservice->mode_bit);
2253                 of_property_read_u32(np, "mode_ctrl", &pservice->mode_ctrl);
2254         }
2255 #ifdef CONFIG_MFD_SYSCON
2256         pservice->grf_base = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
2257 #else
2258         pservice->grf_base = (u32*)RK_GRF_VIRT;
2259 #endif
2260         if (IS_ERR(pservice->grf_base)) {
2261 #ifdef CONFIG_ARM
2262                 pservice->grf_base = RK_GRF_VIRT;
2263 #else
2264                 vpu_err("can't find vpu grf property\n");
2265                 return;
2266 #endif
2267         }
2268         of_property_read_string(np, "name", (const char**)&pservice->name);
2269 }
2270
2271 static void vcodec_init_drvdata(struct vpu_service_info *pservice)
2272 {
2273         pservice->dev_id = VCODEC_DEVICE_ID_VPU;
2274         pservice->curr_mode = -1;
2275
2276         wake_lock_init(&pservice->wake_lock, WAKE_LOCK_SUSPEND, "vpu");
2277         INIT_LIST_HEAD(&pservice->waiting);
2278         INIT_LIST_HEAD(&pservice->running);
2279         mutex_init(&pservice->lock);
2280
2281         INIT_LIST_HEAD(&pservice->done);
2282         INIT_LIST_HEAD(&pservice->session);
2283         INIT_LIST_HEAD(&pservice->subdev_list);
2284
2285         pservice->reg_pproc     = NULL;
2286         atomic_set(&pservice->total_running, 0);
2287         atomic_set(&pservice->enabled,       0);
2288         atomic_set(&pservice->power_on_cnt,  0);
2289         atomic_set(&pservice->power_off_cnt, 0);
2290
2291         INIT_DELAYED_WORK(&pservice->power_off_work, vpu_power_off_work);
2292
2293         pservice->ion_client = rockchip_ion_client_create("vpu");
2294         if (IS_ERR(pservice->ion_client)) {
2295                 vpu_err("failed to create ion client for vcodec ret %ld\n",
2296                         PTR_ERR(pservice->ion_client));
2297         } else {
2298                 vpu_debug(DEBUG_IOMMU, "vcodec ion client create success!\n");
2299         }
2300 }
2301
2302 static int vcodec_probe(struct platform_device *pdev)
2303 {
2304         int i;
2305         int ret = 0;
2306         struct resource *res = NULL;
2307         struct device *dev = &pdev->dev;
2308         struct device_node *np = pdev->dev.of_node;
2309         struct vpu_service_info *pservice =
2310                 devm_kzalloc(dev, sizeof(struct vpu_service_info), GFP_KERNEL);
2311
2312         pr_info("probe device %s\n", dev_name(dev));
2313
2314         vcodec_read_property(np, pservice);
2315         vcodec_init_drvdata(pservice);
2316
2317         if (strncmp(pservice->name, "hevc_service", 12) == 0)
2318                 pservice->dev_id = VCODEC_DEVICE_ID_HEVC;
2319         else if (strncmp(pservice->name, "vpu_service", 11) == 0)
2320                 pservice->dev_id = VCODEC_DEVICE_ID_VPU;
2321         else
2322                 pservice->dev_id = VCODEC_DEVICE_ID_COMBO;
2323
2324         pservice->dev = dev;
2325
2326         if (0 > vpu_get_clk(pservice))
2327                 goto err;
2328
2329         vpu_service_power_on(pservice);
2330
2331         if (of_property_read_bool(np, "reg")) {
2332                 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2333
2334                 pservice->reg_base = devm_ioremap_resource(pservice->dev, res);
2335                 if (IS_ERR(pservice->reg_base)) {
2336                         vpu_err("ioremap registers base failed\n");
2337                         ret = PTR_ERR(pservice->reg_base);
2338                         goto err;
2339                 }
2340                 pservice->ioaddr = res->start;
2341         } else {
2342                 pservice->reg_base = 0;
2343         }
2344
2345         if (of_property_read_bool(np, "subcnt")) {
2346                 for (i = 0; i<pservice->subcnt; i++) {
2347                         struct device_node *sub_np;
2348                         struct platform_device *sub_pdev;
2349                         sub_np = of_parse_phandle(np, "rockchip,sub", i);
2350                         sub_pdev = of_find_device_by_node(sub_np);
2351
2352                         vcodec_subdev_probe(sub_pdev, pservice);
2353                 }
2354         } else {
2355                 vcodec_subdev_probe(pdev, pservice);
2356         }
2357         platform_set_drvdata(pdev, pservice);
2358
2359         vpu_service_power_off(pservice);
2360
2361         pr_info("init success\n");
2362
2363         return 0;
2364
2365 err:
2366         pr_info("init failed\n");
2367         vpu_service_power_off(pservice);
2368         vpu_put_clk(pservice);
2369         wake_lock_destroy(&pservice->wake_lock);
2370
2371         if (res)
2372                 devm_release_mem_region(&pdev->dev, res->start, resource_size(res));
2373
2374         return ret;
2375 }
2376
2377 static int vcodec_remove(struct platform_device *pdev)
2378 {
2379         struct vpu_service_info *pservice = platform_get_drvdata(pdev);
2380         struct resource *res;
2381         struct vpu_subdev_data *data, *n;
2382
2383         list_for_each_entry_safe(data, n, &pservice->subdev_list, lnk_service) {
2384                 vcodec_subdev_remove(data);
2385         }
2386
2387         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2388         devm_release_mem_region(&pdev->dev, res->start, resource_size(res));
2389         vpu_put_clk(pservice);
2390         wake_lock_destroy(&pservice->wake_lock);
2391
2392         return 0;
2393 }
2394
2395 #if defined(CONFIG_OF)
2396 static const struct of_device_id vcodec_service_dt_ids[] = {
2397         {.compatible = "vpu_service",},
2398         {.compatible = "rockchip,hevc_service",},
2399         {.compatible = "rockchip,vpu_combo",},
2400         {},
2401 };
2402 #endif
2403
2404 static struct platform_driver vcodec_driver = {
2405         .probe = vcodec_probe,
2406         .remove = vcodec_remove,
2407         .driver = {
2408                 .name = "vcodec",
2409                 .owner = THIS_MODULE,
2410 #if defined(CONFIG_OF)
2411                 .of_match_table = of_match_ptr(vcodec_service_dt_ids),
2412 #endif
2413         },
2414 };
2415
2416 static void get_hw_info(struct vpu_subdev_data *data)
2417 {
2418         struct vpu_service_info *pservice = data->pservice;
2419         struct vpu_dec_config *dec = &pservice->dec_config;
2420         struct vpu_enc_config *enc = &pservice->enc_config;
2421         if (data->mode == VCODEC_RUNNING_MODE_VPU) {
2422                 u32 configReg   = data->dec_dev.hwregs[VPU_DEC_HWCFG0];
2423                 u32 asicID      = data->dec_dev.hwregs[0];
2424
2425                 dec->h264_support    = (configReg >> DWL_H264_E) & 0x3U;
2426                 dec->jpegSupport    = (configReg >> DWL_JPEG_E) & 0x01U;
2427                 if (dec->jpegSupport && ((configReg >> DWL_PJPEG_E) & 0x01U))
2428                         dec->jpegSupport = JPEG_PROGRESSIVE;
2429                 dec->mpeg4Support   = (configReg >> DWL_MPEG4_E) & 0x3U;
2430                 dec->vc1Support     = (configReg >> DWL_VC1_E) & 0x3U;
2431                 dec->mpeg2Support   = (configReg >> DWL_MPEG2_E) & 0x01U;
2432                 dec->sorensonSparkSupport = (configReg >> DWL_SORENSONSPARK_E) & 0x01U;
2433                 dec->refBufSupport  = (configReg >> DWL_REF_BUFF_E) & 0x01U;
2434                 dec->vp6Support     = (configReg >> DWL_VP6_E) & 0x01U;
2435
2436                 dec->maxDecPicWidth = 4096;
2437
2438                 /* 2nd Config register */
2439                 configReg   = data->dec_dev.hwregs[VPU_DEC_HWCFG1];
2440                 if (dec->refBufSupport) {
2441                         if ((configReg >> DWL_REF_BUFF_ILACE_E) & 0x01U)
2442                                 dec->refBufSupport |= 2;
2443                         if ((configReg >> DWL_REF_BUFF_DOUBLE_E) & 0x01U)
2444                                 dec->refBufSupport |= 4;
2445                 }
2446                 dec->customMpeg4Support = (configReg >> DWL_MPEG4_CUSTOM_E) & 0x01U;
2447                 dec->vp7Support     = (configReg >> DWL_VP7_E) & 0x01U;
2448                 dec->vp8Support     = (configReg >> DWL_VP8_E) & 0x01U;
2449                 dec->avsSupport     = (configReg >> DWL_AVS_E) & 0x01U;
2450
2451                 /* JPEG xtensions */
2452                 if (((asicID >> 16) >= 0x8190U) || ((asicID >> 16) == 0x6731U))
2453                         dec->jpegESupport = (configReg >> DWL_JPEG_EXT_E) & 0x01U;
2454                 else
2455                         dec->jpegESupport = JPEG_EXT_NOT_SUPPORTED;
2456
2457                 if (((asicID >> 16) >= 0x9170U) || ((asicID >> 16) == 0x6731U) )
2458                         dec->rvSupport = (configReg >> DWL_RV_E) & 0x03U;
2459                 else
2460                         dec->rvSupport = RV_NOT_SUPPORTED;
2461                 dec->mvcSupport = (configReg >> DWL_MVC_E) & 0x03U;
2462
2463                 if (dec->refBufSupport && (asicID >> 16) == 0x6731U )
2464                         dec->refBufSupport |= 8; /* enable HW support for offset */
2465
2466                 if (!cpu_is_rk3036()) {
2467                         configReg = data->enc_dev.hwregs[63];
2468                         enc->maxEncodedWidth = configReg & ((1 << 11) - 1);
2469                         enc->h264Enabled = (configReg >> 27) & 1;
2470                         enc->mpeg4Enabled = (configReg >> 26) & 1;
2471                         enc->jpegEnabled = (configReg >> 25) & 1;
2472                         enc->vsEnabled = (configReg >> 24) & 1;
2473                         enc->rgbEnabled = (configReg >> 28) & 1;
2474                         enc->reg_size = data->reg_size;
2475                         enc->reserv[0] = enc->reserv[1] = 0;
2476                 }
2477                 pservice->auto_freq = true;
2478                 vpu_debug(DEBUG_EXTRA_INFO, "vpu_service set to auto frequency mode\n");
2479                 atomic_set(&pservice->freq_status, VPU_FREQ_BUT);
2480
2481                 pservice->bug_dec_addr = cpu_is_rk30xx();
2482         } else {
2483                 if (cpu_is_rk3036()  || cpu_is_rk312x())
2484                         dec->maxDecPicWidth = 1920;
2485                 else
2486                         dec->maxDecPicWidth = 4096;
2487                 /* disable frequency switch in hevc.*/
2488                 pservice->auto_freq = false;
2489         }
2490 }
2491
2492 static irqreturn_t vdpu_irq(int irq, void *dev_id)
2493 {
2494         struct vpu_subdev_data *data = (struct vpu_subdev_data*)dev_id;
2495         struct vpu_service_info *pservice = data->pservice;
2496         vpu_device *dev = &data->dec_dev;
2497         u32 raw_status;
2498         u32 irq_status;
2499
2500         /*vcodec_enter_mode(data);*/
2501
2502         irq_status = raw_status = readl(dev->hwregs + DEC_INTERRUPT_REGISTER);
2503
2504         if (irq_status & DEC_INTERRUPT_BIT) {
2505                 time_record(&tasks[TASK_VPU_DEC], 1);
2506                 vpu_debug(DEBUG_IRQ_STATUS, "vdpu_irq dec status %08x\n", irq_status);
2507                 if ((irq_status & 0x40001) == 0x40001) {
2508                         do {
2509                                 irq_status =
2510                                         readl(dev->hwregs +
2511                                                 DEC_INTERRUPT_REGISTER);
2512                         } while ((irq_status & 0x40001) == 0x40001);
2513                 }
2514
2515                 writel(0, dev->hwregs + DEC_INTERRUPT_REGISTER);
2516                 atomic_add(1, &dev->irq_count_codec);
2517                 time_diff(&tasks[TASK_VPU_DEC]);
2518         }
2519
2520         if (data->hw_info->hw_id != HEVC_ID) {
2521                 irq_status = readl(dev->hwregs + PP_INTERRUPT_REGISTER);
2522                 if (irq_status & PP_INTERRUPT_BIT) {
2523                         time_record(&tasks[TASK_VPU_PP], 1);
2524                         vpu_debug(DEBUG_IRQ_STATUS, "vdpu_irq pp status %08x\n", irq_status);
2525                         /* clear pp IRQ */
2526                         writel(irq_status & (~DEC_INTERRUPT_BIT), dev->hwregs + PP_INTERRUPT_REGISTER);
2527                         atomic_add(1, &dev->irq_count_pp);
2528                         time_diff(&tasks[TASK_VPU_PP]);
2529                 }
2530         }
2531
2532         pservice->irq_status = raw_status;
2533
2534         /*vcodec_exit_mode(pservice);*/
2535
2536         if (atomic_read(&dev->irq_count_pp) ||
2537             atomic_read(&dev->irq_count_codec))
2538                 return IRQ_WAKE_THREAD;
2539         else
2540                 return IRQ_NONE;
2541 }
2542
2543 static irqreturn_t vdpu_isr(int irq, void *dev_id)
2544 {
2545         struct vpu_subdev_data *data = (struct vpu_subdev_data*)dev_id;
2546         struct vpu_service_info *pservice = data->pservice;
2547         vpu_device *dev = &data->dec_dev;
2548
2549         mutex_lock(&pservice->lock);
2550         if (atomic_read(&dev->irq_count_codec)) {
2551                 atomic_sub(1, &dev->irq_count_codec);
2552                 if (NULL == pservice->reg_codec) {
2553                         vpu_err("error: dec isr with no task waiting\n");
2554                 } else {
2555                         reg_from_run_to_done(data, pservice->reg_codec);
2556                         /* avoid vpu timeout and can't recover problem */
2557                         VDPU_SOFT_RESET(data->regs);
2558                 }
2559         }
2560
2561         if (atomic_read(&dev->irq_count_pp)) {
2562                 atomic_sub(1, &dev->irq_count_pp);
2563                 if (NULL == pservice->reg_pproc) {
2564                         vpu_err("error: pp isr with no task waiting\n");
2565                 } else {
2566                         reg_from_run_to_done(data, pservice->reg_pproc);
2567                 }
2568         }
2569         try_set_reg(data);
2570         mutex_unlock(&pservice->lock);
2571         return IRQ_HANDLED;
2572 }
2573
2574 static irqreturn_t vepu_irq(int irq, void *dev_id)
2575 {
2576         struct vpu_subdev_data *data = (struct vpu_subdev_data*)dev_id;
2577         struct vpu_service_info *pservice = data->pservice;
2578         vpu_device *dev = &data->enc_dev;
2579         u32 irq_status;
2580
2581         /*vcodec_enter_mode(data);*/
2582         irq_status= readl(dev->hwregs + ENC_INTERRUPT_REGISTER);
2583
2584         vpu_debug(DEBUG_IRQ_STATUS, "vepu_irq irq status %x\n", irq_status);
2585
2586         if (likely(irq_status & ENC_INTERRUPT_BIT)) {
2587                 time_record(&tasks[TASK_VPU_ENC], 1);
2588                 /* clear enc IRQ */
2589                 writel(irq_status & (~ENC_INTERRUPT_BIT), dev->hwregs + ENC_INTERRUPT_REGISTER);
2590                 atomic_add(1, &dev->irq_count_codec);
2591                 time_diff(&tasks[TASK_VPU_ENC]);
2592         }
2593
2594         pservice->irq_status = irq_status;
2595
2596         /*vcodec_exit_mode(pservice);*/
2597
2598         if (atomic_read(&dev->irq_count_codec))
2599                 return IRQ_WAKE_THREAD;
2600         else
2601                 return IRQ_NONE;
2602 }
2603
2604 static irqreturn_t vepu_isr(int irq, void *dev_id)
2605 {
2606         struct vpu_subdev_data *data = (struct vpu_subdev_data*)dev_id;
2607         struct vpu_service_info *pservice = data->pservice;
2608         vpu_device *dev = &data->enc_dev;
2609
2610         mutex_lock(&pservice->lock);
2611         if (atomic_read(&dev->irq_count_codec)) {
2612                 atomic_sub(1, &dev->irq_count_codec);
2613                 if (NULL == pservice->reg_codec) {
2614                         vpu_err("error: enc isr with no task waiting\n");
2615                 } else {
2616                         reg_from_run_to_done(data, pservice->reg_codec);
2617                 }
2618         }
2619         try_set_reg(data);
2620         mutex_unlock(&pservice->lock);
2621         return IRQ_HANDLED;
2622 }
2623
2624 static int __init vcodec_service_init(void)
2625 {
2626         int ret;
2627
2628         if ((ret = platform_driver_register(&vcodec_driver)) != 0) {
2629                 vpu_err("Platform device register failed (%d).\n", ret);
2630                 return ret;
2631         }
2632
2633 #ifdef CONFIG_DEBUG_FS
2634         vcodec_debugfs_init();
2635 #endif
2636
2637         return ret;
2638 }
2639
2640 static void __exit vcodec_service_exit(void)
2641 {
2642 #ifdef CONFIG_DEBUG_FS
2643         vcodec_debugfs_exit();
2644 #endif
2645
2646         platform_driver_unregister(&vcodec_driver);
2647 }
2648
2649 module_init(vcodec_service_init);
2650 module_exit(vcodec_service_exit);
2651
2652 #ifdef CONFIG_DEBUG_FS
2653 #include <linux/seq_file.h>
2654
2655 static int vcodec_debugfs_init()
2656 {
2657         parent = debugfs_create_dir("vcodec", NULL);
2658         if (!parent)
2659                 return -1;
2660
2661         return 0;
2662 }
2663
2664 static void vcodec_debugfs_exit()
2665 {
2666         debugfs_remove(parent);
2667 }
2668
2669 static struct dentry* vcodec_debugfs_create_device_dir(char *dirname, struct dentry *parent)
2670 {
2671         return debugfs_create_dir(dirname, parent);
2672 }
2673
2674 static int debug_vcodec_show(struct seq_file *s, void *unused)
2675 {
2676         struct vpu_subdev_data *data = s->private;
2677         struct vpu_service_info *pservice = data->pservice;
2678         unsigned int i, n;
2679         vpu_reg *reg, *reg_tmp;
2680         vpu_session *session, *session_tmp;
2681
2682         mutex_lock(&pservice->lock);
2683         vpu_service_power_on(pservice);
2684         if (data->hw_info->hw_id != HEVC_ID) {
2685                 seq_printf(s, "\nENC Registers:\n");
2686                 n = data->enc_dev.iosize >> 2;
2687                 for (i = 0; i < n; i++)
2688                         seq_printf(s, "\tswreg%d = %08X\n", i, readl(data->enc_dev.hwregs + i));
2689         }
2690         seq_printf(s, "\nDEC Registers:\n");
2691         n = data->dec_dev.iosize >> 2;
2692         for (i = 0; i < n; i++)
2693                 seq_printf(s, "\tswreg%d = %08X\n", i, readl(data->dec_dev.hwregs + i));
2694
2695         seq_printf(s, "\nvpu service status:\n");
2696         list_for_each_entry_safe(session, session_tmp, &pservice->session, list_session) {
2697                 seq_printf(s, "session pid %d type %d:\n", session->pid, session->type);
2698                 /*seq_printf(s, "waiting reg set %d\n");*/
2699                 list_for_each_entry_safe(reg, reg_tmp, &session->waiting, session_link) {
2700                         seq_printf(s, "waiting register set\n");
2701                 }
2702                 list_for_each_entry_safe(reg, reg_tmp, &session->running, session_link) {
2703                         seq_printf(s, "running register set\n");
2704                 }
2705                 list_for_each_entry_safe(reg, reg_tmp, &session->done, session_link) {
2706                         seq_printf(s, "done    register set\n");
2707                 }
2708         }
2709
2710         seq_printf(s, "\npower counter: on %d off %d\n",
2711                         atomic_read(&pservice->power_on_cnt),
2712                         atomic_read(&pservice->power_off_cnt));
2713         mutex_unlock(&pservice->lock);
2714         vpu_service_power_off(pservice);
2715
2716         return 0;
2717 }
2718
2719 static int debug_vcodec_open(struct inode *inode, struct file *file)
2720 {
2721         return single_open(file, debug_vcodec_show, inode->i_private);
2722 }
2723
2724 #endif
2725
2726 #if HEVC_TEST_ENABLE & defined(CONFIG_ION_ROCKCHIP)
2727 #include "hevc_test_inc/pps_00.h"
2728 #include "hevc_test_inc/register_00.h"
2729 #include "hevc_test_inc/rps_00.h"
2730 #include "hevc_test_inc/scaling_list_00.h"
2731 #include "hevc_test_inc/stream_00.h"
2732
2733 #include "hevc_test_inc/pps_01.h"
2734 #include "hevc_test_inc/register_01.h"
2735 #include "hevc_test_inc/rps_01.h"
2736 #include "hevc_test_inc/scaling_list_01.h"
2737 #include "hevc_test_inc/stream_01.h"
2738
2739 #include "hevc_test_inc/cabac.h"
2740
2741 extern struct ion_client *rockchip_ion_client_create(const char * name);
2742
2743 static struct ion_client *ion_client = NULL;
2744 u8* get_align_ptr(u8* tbl, int len, u32 *phy)
2745 {
2746         int size = (len+15) & (~15);
2747         struct ion_handle *handle;
2748         u8 *ptr;
2749
2750         if (ion_client == NULL)
2751                 ion_client = rockchip_ion_client_create("vcodec");
2752
2753         handle = ion_alloc(ion_client, (size_t)len, 16, ION_HEAP(ION_CMA_HEAP_ID), 0);
2754
2755         ptr = ion_map_kernel(ion_client, handle);
2756
2757         ion_phys(ion_client, handle, phy, &size);
2758
2759         memcpy(ptr, tbl, len);
2760
2761         return ptr;
2762 }
2763
2764 u8* get_align_ptr_no_copy(int len, u32 *phy)
2765 {
2766         int size = (len+15) & (~15);
2767         struct ion_handle *handle;
2768         u8 *ptr;
2769
2770         if (ion_client == NULL)
2771                 ion_client = rockchip_ion_client_create("vcodec");
2772
2773         handle = ion_alloc(ion_client, (size_t)len, 16, ION_HEAP(ION_CMA_HEAP_ID), 0);
2774
2775         ptr = ion_map_kernel(ion_client, handle);
2776
2777         ion_phys(ion_client, handle, phy, &size);
2778
2779         return ptr;
2780 }
2781
2782 #define TEST_CNT    2
2783 static int hevc_test_case0(vpu_service_info *pservice)
2784 {
2785         vpu_session session;
2786         vpu_reg *reg;
2787         unsigned long size = 272;
2788         int testidx = 0;
2789         int ret = 0;
2790         u8 *pps_tbl[TEST_CNT];
2791         u8 *register_tbl[TEST_CNT];
2792         u8 *rps_tbl[TEST_CNT];
2793         u8 *scaling_list_tbl[TEST_CNT];
2794         u8 *stream_tbl[TEST_CNT];
2795
2796         int stream_size[2];
2797         int pps_size[2];
2798         int rps_size[2];
2799         int scl_size[2];
2800         int cabac_size[2];
2801
2802         u32 phy_pps;
2803         u32 phy_rps;
2804         u32 phy_scl;
2805         u32 phy_str;
2806         u32 phy_yuv;
2807         u32 phy_ref;
2808         u32 phy_cabac;
2809
2810         volatile u8 *stream_buf;
2811         volatile u8 *pps_buf;
2812         volatile u8 *rps_buf;
2813         volatile u8 *scl_buf;
2814         volatile u8 *yuv_buf;
2815         volatile u8 *cabac_buf;
2816         volatile u8 *ref_buf;
2817
2818         u8 *pps;
2819         u8 *yuv[2];
2820         int i;
2821
2822         pps_tbl[0] = pps_00;
2823         pps_tbl[1] = pps_01;
2824
2825         register_tbl[0] = register_00;
2826         register_tbl[1] = register_01;
2827
2828         rps_tbl[0] = rps_00;
2829         rps_tbl[1] = rps_01;
2830
2831         scaling_list_tbl[0] = scaling_list_00;
2832         scaling_list_tbl[1] = scaling_list_01;
2833
2834         stream_tbl[0] = stream_00;
2835         stream_tbl[1] = stream_01;
2836
2837         stream_size[0] = sizeof(stream_00);
2838         stream_size[1] = sizeof(stream_01);
2839
2840         pps_size[0] = sizeof(pps_00);
2841         pps_size[1] = sizeof(pps_01);
2842
2843         rps_size[0] = sizeof(rps_00);
2844         rps_size[1] = sizeof(rps_01);
2845
2846         scl_size[0] = sizeof(scaling_list_00);
2847         scl_size[1] = sizeof(scaling_list_01);
2848
2849         cabac_size[0] = sizeof(Cabac_table);
2850         cabac_size[1] = sizeof(Cabac_table);
2851
2852         /* create session */
2853         session.pid = current->pid;
2854         session.type = VPU_DEC;
2855         INIT_LIST_HEAD(&session.waiting);
2856         INIT_LIST_HEAD(&session.running);
2857         INIT_LIST_HEAD(&session.done);
2858         INIT_LIST_HEAD(&session.list_session);
2859         init_waitqueue_head(&session.wait);
2860         atomic_set(&session.task_running, 0);
2861         list_add_tail(&session.list_session, &pservice->session);
2862
2863         yuv[0] = get_align_ptr_no_copy(256*256*2, &phy_yuv);
2864         yuv[1] = get_align_ptr_no_copy(256*256*2, &phy_ref);
2865
2866         while (testidx < TEST_CNT) {
2867                 /* create registers */
2868                 reg = kmalloc(sizeof(vpu_reg)+pservice->reg_size, GFP_KERNEL);
2869                 if (NULL == reg) {
2870                         vpu_err("error: kmalloc fail in reg_init\n");
2871                         return -1;
2872                 }
2873
2874                 if (size > pservice->reg_size) {
2875                         printk("warning: vpu reg size %lu is larger than hw reg size %lu\n", size, pservice->reg_size);
2876                         size = pservice->reg_size;
2877                 }
2878                 reg->session = &session;
2879                 reg->type = session.type;
2880                 reg->size = size;
2881                 reg->freq = VPU_FREQ_DEFAULT;
2882                 reg->reg = (unsigned long *)&reg[1];
2883                 INIT_LIST_HEAD(&reg->session_link);
2884                 INIT_LIST_HEAD(&reg->status_link);
2885
2886                 /* TODO: stuff registers */
2887                 memcpy(&reg->reg[0], register_tbl[testidx], /*sizeof(register_00)*/ 176);
2888
2889                 stream_buf = get_align_ptr(stream_tbl[testidx], stream_size[testidx], &phy_str);
2890                 pps_buf = get_align_ptr(pps_tbl[0], pps_size[0], &phy_pps);
2891                 rps_buf = get_align_ptr(rps_tbl[testidx], rps_size[testidx], &phy_rps);
2892                 scl_buf = get_align_ptr(scaling_list_tbl[testidx], scl_size[testidx], &phy_scl);
2893                 cabac_buf = get_align_ptr(Cabac_table, cabac_size[testidx], &phy_cabac);
2894
2895                 pps = pps_buf;
2896
2897                 /* TODO: replace reigster address */
2898                 for (i=0; i<64; i++) {
2899                         u32 scaling_offset;
2900                         u32 tmp;
2901
2902                         scaling_offset = (u32)pps[i*80+74];
2903                         scaling_offset += (u32)pps[i*80+75] << 8;
2904                         scaling_offset += (u32)pps[i*80+76] << 16;
2905                         scaling_offset += (u32)pps[i*80+77] << 24;
2906
2907                         tmp = phy_scl + scaling_offset;
2908
2909                         pps[i*80+74] = tmp & 0xff;
2910                         pps[i*80+75] = (tmp >> 8) & 0xff;
2911                         pps[i*80+76] = (tmp >> 16) & 0xff;
2912                         pps[i*80+77] = (tmp >> 24) & 0xff;
2913                 }
2914
2915                 printk("%s %d, phy stream %08x, phy pps %08x, phy rps %08x\n",
2916                         __func__, __LINE__, phy_str, phy_pps, phy_rps);
2917
2918                 reg->reg[1] = 0x21;
2919                 reg->reg[4] = phy_str;
2920                 reg->reg[5] = ((stream_size[testidx]+15)&(~15))+64;
2921                 reg->reg[6] = phy_cabac;
2922                 reg->reg[7] = testidx?phy_ref:phy_yuv;
2923                 reg->reg[42] = phy_pps;
2924                 reg->reg[43] = phy_rps;
2925                 for (i = 10; i <= 24; i++)
2926                         reg->reg[i] = phy_yuv;
2927
2928                 mutex_lock(pservice->lock);
2929                 list_add_tail(&reg->status_link, &pservice->waiting);
2930                 list_add_tail(&reg->session_link, &session.waiting);
2931                 mutex_unlock(pservice->lock);
2932
2933                 /* stuff hardware */
2934                 try_set_reg(data);
2935
2936                 /* wait for result */
2937                 ret = wait_event_timeout(session.wait, !list_empty(&session.done), VPU_TIMEOUT_DELAY);
2938                 if (!list_empty(&session.done)) {
2939                         if (ret < 0)
2940                                 vpu_err("warning: pid %d wait task sucess but wait_evernt ret %d\n", session.pid, ret);
2941                         ret = 0;
2942                 } else {
2943                         if (unlikely(ret < 0)) {
2944                                 vpu_err("error: pid %d wait task ret %d\n", session.pid, ret);
2945                         } else if (0 == ret) {
2946                                 vpu_err("error: pid %d wait %d task done timeout\n", session.pid, atomic_read(&session.task_running));
2947                                 ret = -ETIMEDOUT;
2948                         }
2949                 }
2950                 if (ret < 0) {
2951                         int task_running = atomic_read(&session.task_running);
2952                         int n;
2953                         mutex_lock(pservice->lock);
2954                         vpu_service_dump(pservice);
2955                         if (task_running) {
2956                                 atomic_set(&session.task_running, 0);
2957                                 atomic_sub(task_running, &pservice->total_running);
2958                                 printk("%d task is running but not return, reset hardware...", task_running);
2959                                 vpu_reset(data);
2960                                 printk("done\n");
2961                         }
2962                         vpu_service_session_clear(pservice, &session);
2963                         mutex_unlock(pservice->lock);
2964
2965                         printk("\nDEC Registers:\n");
2966                         n = data->dec_dev.iosize >> 2;
2967                         for (i=0; i<n; i++)
2968                                 printk("\tswreg%d = %08X\n", i, readl(data->dec_dev.hwregs + i));
2969
2970                         vpu_err("test index %d failed\n", testidx);
2971                         break;
2972                 } else {
2973                         vpu_debug(DEBUG_EXTRA_INFO, "test index %d success\n", testidx);
2974
2975                         vpu_reg *reg = list_entry(session.done.next, vpu_reg, session_link);
2976
2977                         for (i=0; i<68; i++) {
2978                                 if (i % 4 == 0)
2979                                         printk("%02d: ", i);
2980                                 printk("%08x ", reg->reg[i]);
2981                                 if ((i+1) % 4 == 0)
2982                                         printk("\n");
2983                         }
2984
2985                         testidx++;
2986                 }
2987
2988                 reg_deinit(data, reg);
2989         }
2990
2991         return 0;
2992 }
2993
2994 #endif
2995