rockchip/vcodec: drop needless devm_clk_put
[firefly-linux-kernel-4.4.55.git] / drivers / video / rockchip / vcodec / vcodec_service.c
1 /**
2  * Copyright (C) 2015 Fuzhou Rockchip Electronics Co., Ltd
3  * author: chenhengming chm@rock-chips.com
4  *         Alpha Lin, alpha.lin@rock-chips.com
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include <linux/clk.h>
20 #include <linux/compat.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/module.h>
25 #include <linux/fs.h>
26 #include <linux/mm.h>
27 #include <linux/platform_device.h>
28 #include <linux/reset.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31 #include <linux/wakelock.h>
32 #include <linux/cdev.h>
33 #include <linux/of.h>
34 #include <linux/of_platform.h>
35 #include <linux/of_irq.h>
36 #include <linux/regmap.h>
37 #include <linux/mfd/syscon.h>
38 #include <linux/uaccess.h>
39 #include <linux/debugfs.h>
40 #include <linux/pm_runtime.h>
41
42 #include <linux/rockchip/cru.h>
43 #include <linux/rockchip/pmu.h>
44 #include <linux/rockchip/grf.h>
45
46 #if defined(CONFIG_ION_ROCKCHIP)
47 #include <linux/rockchip_ion.h>
48 #endif
49
50 #include <linux/rockchip-iovmm.h>
51 #include <linux/dma-buf.h>
52
53 #include "vcodec_hw_info.h"
54 #include "vcodec_hw_vpu.h"
55 #include "vcodec_hw_rkv.h"
56 #include "vcodec_hw_vpu2.h"
57
58 #include "vcodec_service.h"
59
60 /*
61  * debug flag usage:
62  * +------+-------------------+
63  * | 8bit |      24bit        |
64  * +------+-------------------+
65  *  0~23 bit is for different information type
66  * 24~31 bit is for information print format
67  */
68
69 #define DEBUG_POWER                             0x00000001
70 #define DEBUG_CLOCK                             0x00000002
71 #define DEBUG_IRQ_STATUS                        0x00000004
72 #define DEBUG_IOMMU                             0x00000008
73 #define DEBUG_IOCTL                             0x00000010
74 #define DEBUG_FUNCTION                          0x00000020
75 #define DEBUG_REGISTER                          0x00000040
76 #define DEBUG_EXTRA_INFO                        0x00000080
77 #define DEBUG_TIMING                            0x00000100
78 #define DEBUG_TASK_INFO                         0x00000200
79
80 #define DEBUG_SET_REG                           0x00001000
81 #define DEBUG_GET_REG                           0x00002000
82 #define DEBUG_PPS_FILL                          0x00004000
83 #define DEBUG_IRQ_CHECK                         0x00008000
84 #define DEBUG_CACHE_32B                         0x00010000
85
86 #define PRINT_FUNCTION                          0x80000000
87 #define PRINT_LINE                              0x40000000
88
89 static int debug;
90 module_param(debug, int, S_IRUGO | S_IWUSR);
91 MODULE_PARM_DESC(debug, "bit switch for vcodec_service debug information");
92
93 #define VCODEC_CLOCK_ENABLE     1
94
95 /*
96  * hardware information organization
97  *
98  * In order to support multiple hardware with different version the hardware
99  * information is organized as follow:
100  *
101  * 1. First, index hardware by register size / position.
102  *    These information is fix for each hardware and do not relate to runtime
103  *    work flow. It only related to resource allocation.
104  *    Descriptor: struct vpu_hw_info
105  *
106  * 2. Then, index hardware by runtime configuration
107  *    These information is related to runtime setting behave including enable
108  *    register, irq register and other key control flag
109  *    Descriptor: struct vpu_task_info
110  *
111  * 3. Final, on iommu case the fd translation is required
112  *    Descriptor: struct vpu_trans_info
113  */
114
115 enum VPU_FREQ {
116         VPU_FREQ_200M,
117         VPU_FREQ_266M,
118         VPU_FREQ_300M,
119         VPU_FREQ_400M,
120         VPU_FREQ_500M,
121         VPU_FREQ_600M,
122         VPU_FREQ_DEFAULT,
123         VPU_FREQ_BUT,
124 };
125
126 struct extra_info_elem {
127         u32 index;
128         u32 offset;
129 };
130
131 #define EXTRA_INFO_MAGIC        0x4C4A46
132
133 struct extra_info_for_iommu {
134         u32 magic;
135         u32 cnt;
136         struct extra_info_elem elem[20];
137 };
138
139 #define MHZ                                     (1000*1000)
140 #define SIZE_REG(reg)                           ((reg)*4)
141
142 static struct vcodec_info vcodec_info_set[] = {
143         [0] = {
144                 .hw_id          = VPU_ID_8270,
145                 .hw_info        = &hw_vpu_8270,
146                 .task_info      = task_vpu,
147                 .trans_info     = trans_vpu,
148         },
149         [1] = {
150                 .hw_id          = VPU_ID_4831,
151                 .hw_info        = &hw_vpu_4831,
152                 .task_info      = task_vpu,
153                 .trans_info     = trans_vpu,
154         },
155         [2] = {
156                 .hw_id          = VPU_DEC_ID_9190,
157                 .hw_info        = &hw_vpu_9190,
158                 .task_info      = task_vpu,
159                 .trans_info     = trans_vpu,
160         },
161         [3] = {
162                 .hw_id          = HEVC_ID,
163                 .hw_info        = &hw_rkhevc,
164                 .task_info      = task_rkv,
165                 .trans_info     = trans_rkv,
166         },
167         [4] = {
168                 .hw_id          = RKV_DEC_ID,
169                 .hw_info        = &hw_rkvdec,
170                 .task_info      = task_rkv,
171                 .trans_info     = trans_rkv,
172         },
173         [5] = {
174                 .hw_id          = VPU2_ID,
175                 .hw_info        = &hw_vpu2,
176                 .task_info      = task_vpu2,
177                 .trans_info     = trans_vpu2,
178         },
179 };
180
181 #define DEBUG
182 #ifdef DEBUG
183 #define vpu_debug_func(type, fmt, args...)                      \
184         do {                                                    \
185                 if (unlikely(debug & type)) {                   \
186                         pr_info("%s:%d: " fmt,                  \
187                                  __func__, __LINE__, ##args);   \
188                 }                                               \
189         } while (0)
190 #define vpu_debug(type, fmt, args...)                           \
191         do {                                                    \
192                 if (unlikely(debug & type)) {                   \
193                         pr_info(fmt, ##args);                   \
194                 }                                               \
195         } while (0)
196 #else
197 #define vpu_debug_func(level, fmt, args...)
198 #define vpu_debug(level, fmt, args...)
199 #endif
200
201 #define vpu_debug_enter() vpu_debug_func(DEBUG_FUNCTION, "enter\n")
202 #define vpu_debug_leave() vpu_debug_func(DEBUG_FUNCTION, "leave\n")
203
204 #define vpu_err(fmt, args...)                           \
205                 pr_err("%s:%d: " fmt, __func__, __LINE__, ##args)
206
207 enum VPU_DEC_FMT {
208         VPU_DEC_FMT_H264,
209         VPU_DEC_FMT_MPEG4,
210         VPU_DEC_FMT_H263,
211         VPU_DEC_FMT_JPEG,
212         VPU_DEC_FMT_VC1,
213         VPU_DEC_FMT_MPEG2,
214         VPU_DEC_FMT_MPEG1,
215         VPU_DEC_FMT_VP6,
216         VPU_DEC_FMT_RESERV0,
217         VPU_DEC_FMT_VP7,
218         VPU_DEC_FMT_VP8,
219         VPU_DEC_FMT_AVS,
220         VPU_DEC_FMT_RES
221 };
222
223 /**
224  * struct for process session which connect to vpu
225  *
226  * @author ChenHengming (2011-5-3)
227  */
228 struct vpu_session {
229         enum VPU_CLIENT_TYPE type;
230         /* a linked list of data so we can access them for debugging */
231         struct list_head list_session;
232         /* a linked list of register data waiting for process */
233         struct list_head waiting;
234         /* a linked list of register data in processing */
235         struct list_head running;
236         /* a linked list of register data processed */
237         struct list_head done;
238         wait_queue_head_t wait;
239         pid_t pid;
240         atomic_t task_running;
241 };
242
243 /**
244  * struct for process register set
245  *
246  * @author ChenHengming (2011-5-4)
247  */
248 struct vpu_reg {
249         enum VPU_CLIENT_TYPE type;
250         enum VPU_FREQ freq;
251         struct vpu_session *session;
252         struct vpu_subdev_data *data;
253         struct vpu_task_info *task;
254         const struct vpu_trans_info *trans;
255
256         /* link to vpu service session */
257         struct list_head session_link;
258         /* link to register set list */
259         struct list_head status_link;
260
261         unsigned long size;
262         struct list_head mem_region_list;
263         u32 dec_base;
264         u32 *reg;
265 };
266
267 struct vpu_device {
268         atomic_t irq_count_codec;
269         atomic_t irq_count_pp;
270         unsigned int iosize;
271         u32 *regs;
272 };
273
274 enum vcodec_device_id {
275         VCODEC_DEVICE_ID_VPU,
276         VCODEC_DEVICE_ID_HEVC,
277         VCODEC_DEVICE_ID_COMBO,
278         VCODEC_DEVICE_ID_RKVDEC,
279         VCODEC_DEVICE_ID_BUTT
280 };
281
282 enum VCODEC_RUNNING_MODE {
283         VCODEC_RUNNING_MODE_NONE = -1,
284         VCODEC_RUNNING_MODE_VPU,
285         VCODEC_RUNNING_MODE_HEVC,
286         VCODEC_RUNNING_MODE_RKVDEC
287 };
288
289 struct vcodec_mem_region {
290         struct list_head srv_lnk;
291         struct list_head reg_lnk;
292         struct list_head session_lnk;
293         unsigned long iova;     /* virtual address for iommu */
294         unsigned long len;
295         u32 reg_idx;
296         struct ion_handle *hdl;
297 };
298
299 enum vpu_ctx_state {
300         MMU_ACTIVATED   = BIT(0)
301 };
302
303 struct vpu_subdev_data {
304         struct cdev cdev;
305         dev_t dev_t;
306         struct class *cls;
307         struct device *child_dev;
308
309         int irq_enc;
310         int irq_dec;
311         struct vpu_service_info *pservice;
312
313         u32 *regs;
314         enum VCODEC_RUNNING_MODE mode;
315         struct list_head lnk_service;
316
317         struct device *dev;
318
319         struct vpu_device enc_dev;
320         struct vpu_device dec_dev;
321
322         enum VPU_HW_ID hw_id;
323         struct vpu_hw_info *hw_info;
324         struct vpu_task_info *task_info;
325         const struct vpu_trans_info *trans_info;
326
327         u32 reg_size;
328         unsigned long state;
329
330 #ifdef CONFIG_DEBUG_FS
331         struct dentry *debugfs_dir;
332         struct dentry *debugfs_file_regs;
333 #endif
334
335         struct device *mmu_dev;
336 };
337
338 struct vpu_service_info {
339         struct wake_lock wake_lock;
340         struct delayed_work power_off_work;
341         ktime_t last; /* record previous power-on time */
342         /* vpu service structure global lock */
343         struct mutex lock;
344         /* link to link_reg in struct vpu_reg */
345         struct list_head waiting;
346         /* link to link_reg in struct vpu_reg */
347         struct list_head running;
348         /* link to link_reg in struct vpu_reg */
349         struct list_head done;
350         /* link to list_session in struct vpu_session */
351         struct list_head session;
352         atomic_t total_running;
353         atomic_t enabled;
354         atomic_t power_on_cnt;
355         atomic_t power_off_cnt;
356         atomic_t service_on;
357         struct mutex shutdown_lock;
358         struct vpu_reg *reg_codec;
359         struct vpu_reg *reg_pproc;
360         struct vpu_reg *reg_resev;
361         struct vpu_dec_config dec_config;
362         struct vpu_enc_config enc_config;
363
364         bool auto_freq;
365         bool bug_dec_addr;
366         atomic_t freq_status;
367
368         struct clk *aclk_vcodec;
369         struct clk *hclk_vcodec;
370         struct clk *clk_core;
371         struct clk *clk_cabac;
372         struct clk *pd_video;
373
374 #ifdef CONFIG_RESET_CONTROLLER
375         struct reset_control *rst_a;
376         struct reset_control *rst_h;
377         struct reset_control *rst_v;
378 #endif
379         struct device *dev;
380
381         u32 irq_status;
382         atomic_t reset_request;
383         struct ion_client *ion_client;
384         struct list_head mem_region_list;
385
386         enum vcodec_device_id dev_id;
387
388         enum VCODEC_RUNNING_MODE curr_mode;
389         u32 prev_mode;
390
391         struct delayed_work simulate_work;
392
393         u32 mode_bit;
394         u32 mode_ctrl;
395         u32 *reg_base;
396         u32 ioaddr;
397         struct regmap *grf;
398         u32 *grf_base;
399
400         char *name;
401
402         u32 subcnt;
403         struct list_head subdev_list;
404 };
405
406 struct vpu_request {
407         u32 *req;
408         u32 size;
409 };
410
411 #ifdef CONFIG_COMPAT
412 struct compat_vpu_request {
413         compat_uptr_t req;
414         u32 size;
415 };
416 #endif
417
418 /* debugfs root directory for all device (vpu, hevc).*/
419 static struct dentry *parent;
420
421 #ifdef CONFIG_DEBUG_FS
422 static int vcodec_debugfs_init(void);
423 static void vcodec_debugfs_exit(void);
424 static struct dentry *vcodec_debugfs_create_device_dir(
425                 char *dirname, struct dentry *parent);
426 static int debug_vcodec_open(struct inode *inode, struct file *file);
427
428 static const struct file_operations debug_vcodec_fops = {
429         .open = debug_vcodec_open,
430         .read = seq_read,
431         .llseek = seq_lseek,
432         .release = single_release,
433 };
434 #endif
435
436 #define VDPU_SOFT_RESET_REG     101
437 #define VDPU_CLEAN_CACHE_REG    516
438 #define VEPU_CLEAN_CACHE_REG    772
439 #define HEVC_CLEAN_CACHE_REG    260
440
441 #define VPU_REG_ENABLE(base, reg)       writel_relaxed(1, base + reg)
442
443 #define VDPU_SOFT_RESET(base)   VPU_REG_ENABLE(base, VDPU_SOFT_RESET_REG)
444 #define VDPU_CLEAN_CACHE(base)  VPU_REG_ENABLE(base, VDPU_CLEAN_CACHE_REG)
445 #define VEPU_CLEAN_CACHE(base)  VPU_REG_ENABLE(base, VEPU_CLEAN_CACHE_REG)
446 #define HEVC_CLEAN_CACHE(base)  VPU_REG_ENABLE(base, HEVC_CLEAN_CACHE_REG)
447
448 #define VPU_POWER_OFF_DELAY             (4 * HZ) /* 4s */
449 #define VPU_TIMEOUT_DELAY               (2 * HZ) /* 2s */
450
451 static void time_record(struct vpu_task_info *task, int is_end)
452 {
453         if (unlikely(debug & DEBUG_TIMING) && task)
454                 do_gettimeofday((is_end) ? (&task->end) : (&task->start));
455 }
456
457 static void time_diff(struct vpu_task_info *task)
458 {
459         vpu_debug(DEBUG_TIMING, "%s task: %ld ms\n", task->name,
460                   (task->end.tv_sec  - task->start.tv_sec)  * 1000 +
461                   (task->end.tv_usec - task->start.tv_usec) / 1000);
462 }
463
464 static void vcodec_enter_mode(struct vpu_subdev_data *data)
465 {
466         int bits;
467         u32 raw = 0;
468         struct vpu_service_info *pservice = data->pservice;
469         struct vpu_subdev_data *subdata, *n;
470
471         if (pservice->subcnt < 2) {
472                 if (data->mmu_dev && !test_bit(MMU_ACTIVATED, &data->state)) {
473                         set_bit(MMU_ACTIVATED, &data->state);
474                         if (atomic_read(&pservice->enabled))
475                                 rockchip_iovmm_activate(data->dev);
476                         else
477                                 BUG_ON(!atomic_read(&pservice->enabled));
478                 }
479                 return;
480         }
481
482         if (pservice->curr_mode == data->mode)
483                 return;
484
485         vpu_debug(DEBUG_IOMMU, "vcodec enter mode %d\n", data->mode);
486         list_for_each_entry_safe(subdata, n,
487                                  &pservice->subdev_list, lnk_service) {
488                 if (data != subdata && subdata->mmu_dev &&
489                     test_bit(MMU_ACTIVATED, &subdata->state)) {
490                         clear_bit(MMU_ACTIVATED, &subdata->state);
491                         rockchip_iovmm_deactivate(subdata->dev);
492                 }
493         }
494         bits = 1 << pservice->mode_bit;
495 #ifdef CONFIG_MFD_SYSCON
496         if (pservice->grf) {
497                 regmap_read(pservice->grf, pservice->mode_ctrl, &raw);
498
499                 if (data->mode == VCODEC_RUNNING_MODE_HEVC)
500                         regmap_write(pservice->grf, pservice->mode_ctrl,
501                                      raw | bits | (bits << 16));
502                 else
503                         regmap_write(pservice->grf, pservice->mode_ctrl,
504                                      (raw & (~bits)) | (bits << 16));
505         } else if (pservice->grf_base) {
506                 u32 *grf_base = pservice->grf_base;
507
508                 raw = readl_relaxed(grf_base + pservice->mode_ctrl / 4);
509                 if (data->mode == VCODEC_RUNNING_MODE_HEVC)
510                         writel_relaxed(raw | bits | (bits << 16),
511                                        grf_base + pservice->mode_ctrl / 4);
512                 else
513                         writel_relaxed((raw & (~bits)) | (bits << 16),
514                                        grf_base + pservice->mode_ctrl / 4);
515         } else {
516                 vpu_err("no grf resource define, switch decoder failed\n");
517                 return;
518         }
519 #else
520         if (pservice->grf_base) {
521                 u32 *grf_base = pservice->grf_base;
522
523                 raw = readl_relaxed(grf_base + pservice->mode_ctrl / 4);
524                 if (data->mode == VCODEC_RUNNING_MODE_HEVC)
525                         writel_relaxed(raw | bits | (bits << 16),
526                                        grf_base + pservice->mode_ctrl / 4);
527                 else
528                         writel_relaxed((raw & (~bits)) | (bits << 16),
529                                        grf_base + pservice->mode_ctrl / 4);
530         } else {
531                 vpu_err("no grf resource define, switch decoder failed\n");
532                 return;
533         }
534 #endif
535         if (data->mmu_dev && !test_bit(MMU_ACTIVATED, &data->state)) {
536                 set_bit(MMU_ACTIVATED, &data->state);
537                 if (atomic_read(&pservice->enabled))
538                         rockchip_iovmm_activate(data->dev);
539                 else
540                         BUG_ON(!atomic_read(&pservice->enabled));
541         }
542
543         pservice->prev_mode = pservice->curr_mode;
544         pservice->curr_mode = data->mode;
545 }
546
547 static void vcodec_exit_mode(struct vpu_subdev_data *data)
548 {
549         if (data->mmu_dev && test_bit(MMU_ACTIVATED, &data->state)) {
550                 clear_bit(MMU_ACTIVATED, &data->state);
551                 rockchip_iovmm_deactivate(data->dev);
552         }
553         /*
554          * In case of VPU Combo, it require HW switch its running mode
555          * before the other HW component start work. set current HW running
556          * mode to none, can ensure HW switch to its reqired mode properly.
557          */
558         data->pservice->curr_mode = VCODEC_RUNNING_MODE_NONE;
559 }
560
561 static int vpu_get_clk(struct vpu_service_info *pservice)
562 {
563 #if VCODEC_CLOCK_ENABLE
564         struct device *dev = pservice->dev;
565
566         switch (pservice->dev_id) {
567         case VCODEC_DEVICE_ID_HEVC:
568                 pservice->pd_video = devm_clk_get(dev, "pd_hevc");
569                 if (IS_ERR(pservice->pd_video)) {
570                         dev_err(dev, "failed on clk_get pd_hevc\n");
571                         pservice->pd_video = NULL;
572                         return -1;
573                 }
574         case VCODEC_DEVICE_ID_COMBO:
575         case VCODEC_DEVICE_ID_RKVDEC:
576                 pservice->clk_cabac = devm_clk_get(dev, "clk_cabac");
577                 if (IS_ERR(pservice->clk_cabac)) {
578                         dev_err(dev, "failed on clk_get clk_cabac\n");
579                         pservice->clk_cabac = NULL;
580                 }
581                 pservice->clk_core = devm_clk_get(dev, "clk_core");
582                 if (IS_ERR(pservice->clk_core)) {
583                         dev_err(dev, "failed on clk_get clk_core\n");
584                         pservice->clk_core = NULL;
585                         return -1;
586                 }
587         case VCODEC_DEVICE_ID_VPU:
588                 pservice->aclk_vcodec = devm_clk_get(dev, "aclk_vcodec");
589                 if (IS_ERR(pservice->aclk_vcodec)) {
590                         dev_err(dev, "failed on clk_get aclk_vcodec\n");
591                         pservice->aclk_vcodec = NULL;
592                         return -1;
593                 }
594
595                 pservice->hclk_vcodec = devm_clk_get(dev, "hclk_vcodec");
596                 if (IS_ERR(pservice->hclk_vcodec)) {
597                         dev_err(dev, "failed on clk_get hclk_vcodec\n");
598                         pservice->hclk_vcodec = NULL;
599                         return -1;
600                 }
601                 if (pservice->pd_video == NULL) {
602                         pservice->pd_video = devm_clk_get(dev, "pd_video");
603                         if (IS_ERR(pservice->pd_video)) {
604                                 pservice->pd_video = NULL;
605                                 dev_info(dev, "do not have pd_video\n");
606                         }
607                 }
608                 break;
609         default:
610                 break;
611         }
612
613         return 0;
614 #else
615         return 0;
616 #endif
617 }
618
619 static void vpu_reset(struct vpu_subdev_data *data)
620 {
621         struct vpu_service_info *pservice = data->pservice;
622         enum pmu_idle_req type = IDLE_REQ_VIDEO;
623
624         if (pservice->dev_id == VCODEC_DEVICE_ID_HEVC)
625                 type = IDLE_REQ_HEVC;
626
627         pr_info("%s: resetting...", dev_name(pservice->dev));
628
629 #if defined(CONFIG_ARCH_RK29)
630         clk_disable(aclk_ddr_vepu);
631         cru_set_soft_reset(SOFT_RST_CPU_VODEC_A2A_AHB, true);
632         cru_set_soft_reset(SOFT_RST_DDR_VCODEC_PORT, true);
633         cru_set_soft_reset(SOFT_RST_VCODEC_AHB_BUS, true);
634         cru_set_soft_reset(SOFT_RST_VCODEC_AXI_BUS, true);
635         mdelay(10);
636         cru_set_soft_reset(SOFT_RST_VCODEC_AXI_BUS, false);
637         cru_set_soft_reset(SOFT_RST_VCODEC_AHB_BUS, false);
638         cru_set_soft_reset(SOFT_RST_DDR_VCODEC_PORT, false);
639         cru_set_soft_reset(SOFT_RST_CPU_VODEC_A2A_AHB, false);
640         clk_enable(aclk_ddr_vepu);
641 #elif defined(CONFIG_ARCH_RK30)
642         pmu_set_idle_request(IDLE_REQ_VIDEO, true);
643         cru_set_soft_reset(SOFT_RST_CPU_VCODEC, true);
644         cru_set_soft_reset(SOFT_RST_VCODEC_NIU_AXI, true);
645         cru_set_soft_reset(SOFT_RST_VCODEC_AHB, true);
646         cru_set_soft_reset(SOFT_RST_VCODEC_AXI, true);
647         mdelay(1);
648         cru_set_soft_reset(SOFT_RST_VCODEC_AXI, false);
649         cru_set_soft_reset(SOFT_RST_VCODEC_AHB, false);
650         cru_set_soft_reset(SOFT_RST_VCODEC_NIU_AXI, false);
651         cru_set_soft_reset(SOFT_RST_CPU_VCODEC, false);
652         pmu_set_idle_request(IDLE_REQ_VIDEO, false);
653 #else
654 #endif
655         WARN_ON(pservice->reg_codec != NULL);
656         WARN_ON(pservice->reg_pproc != NULL);
657         WARN_ON(pservice->reg_resev != NULL);
658         pservice->reg_codec = NULL;
659         pservice->reg_pproc = NULL;
660         pservice->reg_resev = NULL;
661
662         pr_info("for 3288/3368...");
663 #ifdef CONFIG_RESET_CONTROLLER
664         if (pservice->rst_a && pservice->rst_h) {
665                 pr_info("reset in\n");
666                 if (pservice->rst_v)
667                         reset_control_assert(pservice->rst_v);
668                 reset_control_assert(pservice->rst_a);
669                 reset_control_assert(pservice->rst_h);
670                 udelay(5);
671                 reset_control_deassert(pservice->rst_h);
672                 reset_control_deassert(pservice->rst_a);
673                 if (pservice->rst_v)
674                         reset_control_deassert(pservice->rst_v);
675         }
676 #endif
677
678         if (data->mmu_dev && test_bit(MMU_ACTIVATED, &data->state)) {
679                 clear_bit(MMU_ACTIVATED, &data->state);
680                 if (atomic_read(&pservice->enabled))
681                         rockchip_iovmm_deactivate(data->dev);
682                 else
683                         BUG_ON(!atomic_read(&pservice->enabled));
684         }
685
686         atomic_set(&pservice->reset_request, 0);
687         pr_info("done\n");
688 }
689
690 static void reg_deinit(struct vpu_subdev_data *data, struct vpu_reg *reg);
691 static void vpu_service_session_clear(struct vpu_subdev_data *data,
692                                       struct vpu_session *session)
693 {
694         struct vpu_reg *reg, *n;
695
696         list_for_each_entry_safe(reg, n, &session->waiting, session_link) {
697                 reg_deinit(data, reg);
698         }
699         list_for_each_entry_safe(reg, n, &session->running, session_link) {
700                 reg_deinit(data, reg);
701         }
702         list_for_each_entry_safe(reg, n, &session->done, session_link) {
703                 reg_deinit(data, reg);
704         }
705 }
706
707 static void vpu_service_clear(struct vpu_subdev_data *data)
708 {
709         struct vpu_reg *reg, *n;
710         struct vpu_session *session, *s;
711         struct vpu_service_info *pservice = data->pservice;
712
713         list_for_each_entry_safe(reg, n, &pservice->waiting, status_link) {
714                 reg_deinit(data, reg);
715         }
716
717         /* wake up session wait event to prevent the timeout hw reset
718          * during reboot procedure.
719          */
720         list_for_each_entry_safe(session, s,
721                                  &pservice->session, list_session)
722                 wake_up(&session->wait);
723 }
724
725 static void vpu_service_dump(struct vpu_service_info *pservice)
726 {
727 }
728
729
730 static void vpu_service_power_off(struct vpu_service_info *pservice)
731 {
732         int total_running;
733         struct vpu_subdev_data *data = NULL, *n;
734         int ret = atomic_add_unless(&pservice->enabled, -1, 0);
735
736         if (!ret)
737                 return;
738
739         total_running = atomic_read(&pservice->total_running);
740         if (total_running) {
741                 pr_alert("alert: power off when %d task running!!\n",
742                          total_running);
743                 mdelay(50);
744                 pr_alert("alert: delay 50 ms for running task\n");
745                 vpu_service_dump(pservice);
746         }
747
748         pr_info("%s: power off...", dev_name(pservice->dev));
749
750         udelay(5);
751
752         list_for_each_entry_safe(data, n, &pservice->subdev_list, lnk_service) {
753                 if (data->mmu_dev && test_bit(MMU_ACTIVATED, &data->state)) {
754                         clear_bit(MMU_ACTIVATED, &data->state);
755                         rockchip_iovmm_deactivate(data->dev);
756                 }
757         }
758         pservice->curr_mode = VCODEC_RUNNING_MODE_NONE;
759
760 #if VCODEC_CLOCK_ENABLE
761                 if (pservice->pd_video)
762                         clk_disable_unprepare(pservice->pd_video);
763                 if (pservice->hclk_vcodec)
764                         clk_disable_unprepare(pservice->hclk_vcodec);
765                 if (pservice->aclk_vcodec)
766                         clk_disable_unprepare(pservice->aclk_vcodec);
767                 if (pservice->clk_core)
768                         clk_disable_unprepare(pservice->clk_core);
769                 if (pservice->clk_cabac)
770                         clk_disable_unprepare(pservice->clk_cabac);
771 #endif
772         pm_runtime_put(pservice->dev);
773
774         atomic_add(1, &pservice->power_off_cnt);
775         wake_unlock(&pservice->wake_lock);
776         pr_info("done\n");
777 }
778
779 static inline void vpu_queue_power_off_work(struct vpu_service_info *pservice)
780 {
781         queue_delayed_work(system_wq, &pservice->power_off_work,
782                            VPU_POWER_OFF_DELAY);
783 }
784
785 static void vpu_power_off_work(struct work_struct *work_s)
786 {
787         struct delayed_work *dlwork = container_of(work_s,
788                         struct delayed_work, work);
789         struct vpu_service_info *pservice = container_of(dlwork,
790                         struct vpu_service_info, power_off_work);
791
792         if (mutex_trylock(&pservice->lock)) {
793                 vpu_service_power_off(pservice);
794                 mutex_unlock(&pservice->lock);
795         } else {
796                 /* Come back later if the device is busy... */
797                 vpu_queue_power_off_work(pservice);
798         }
799 }
800
801 static void vpu_service_power_on(struct vpu_service_info *pservice)
802 {
803         int ret;
804         ktime_t now = ktime_get();
805
806         if (ktime_to_ns(ktime_sub(now, pservice->last)) > NSEC_PER_SEC) {
807                 cancel_delayed_work_sync(&pservice->power_off_work);
808                 vpu_queue_power_off_work(pservice);
809                 pservice->last = now;
810         }
811         ret = atomic_add_unless(&pservice->enabled, 1, 1);
812         if (!ret)
813                 return;
814
815         pr_info("%s: power on\n", dev_name(pservice->dev));
816
817 #define BIT_VCODEC_CLK_SEL      (1<<10)
818         if (of_machine_is_compatible("rockchip,rk3126"))
819                 writel_relaxed(readl_relaxed(RK_GRF_VIRT + RK312X_GRF_SOC_CON1)
820                         | BIT_VCODEC_CLK_SEL | (BIT_VCODEC_CLK_SEL << 16),
821                         RK_GRF_VIRT + RK312X_GRF_SOC_CON1);
822
823 #if VCODEC_CLOCK_ENABLE
824         if (pservice->aclk_vcodec)
825                 clk_prepare_enable(pservice->aclk_vcodec);
826         if (pservice->hclk_vcodec)
827                 clk_prepare_enable(pservice->hclk_vcodec);
828         if (pservice->clk_core)
829                 clk_prepare_enable(pservice->clk_core);
830         if (pservice->clk_cabac)
831                 clk_prepare_enable(pservice->clk_cabac);
832         if (pservice->pd_video)
833                 clk_prepare_enable(pservice->pd_video);
834 #endif
835         pm_runtime_get_sync(pservice->dev);
836
837         udelay(5);
838         atomic_add(1, &pservice->power_on_cnt);
839         wake_lock(&pservice->wake_lock);
840 }
841
842 static inline bool reg_check_interlace(struct vpu_reg *reg)
843 {
844         u32 type = (reg->reg[3] & (1 << 23));
845
846         return (type > 0);
847 }
848
849 static inline enum VPU_DEC_FMT reg_check_fmt(struct vpu_reg *reg)
850 {
851         enum VPU_DEC_FMT type = (enum VPU_DEC_FMT)((reg->reg[3] >> 28) & 0xf);
852
853         return type;
854 }
855
856 static inline int reg_probe_width(struct vpu_reg *reg)
857 {
858         int width_in_mb = reg->reg[4] >> 23;
859
860         return width_in_mb * 16;
861 }
862
863 static inline int reg_probe_hevc_y_stride(struct vpu_reg *reg)
864 {
865         int y_virstride = reg->reg[8];
866
867         return y_virstride;
868 }
869
870 static int vcodec_fd_to_iova(struct vpu_subdev_data *data,
871                              struct vpu_reg *reg, int fd)
872 {
873         struct vpu_service_info *pservice = data->pservice;
874         struct ion_handle *hdl;
875         int ret = 0;
876         struct vcodec_mem_region *mem_region;
877
878         hdl = ion_import_dma_buf(pservice->ion_client, fd);
879         if (IS_ERR(hdl)) {
880                 vpu_err("import dma-buf from fd %d failed\n", fd);
881                 return PTR_ERR(hdl);
882         }
883         mem_region = kzalloc(sizeof(*mem_region), GFP_KERNEL);
884
885         if (mem_region == NULL) {
886                 vpu_err("allocate memory for iommu memory region failed\n");
887                 ion_free(pservice->ion_client, hdl);
888                 return -ENOMEM;
889         }
890
891         mem_region->hdl = hdl;
892         if (data->mmu_dev)
893                 ret = ion_map_iommu(data->dev, pservice->ion_client,
894                                     mem_region->hdl, &mem_region->iova,
895                                     &mem_region->len);
896         else
897                 ret = ion_phys(pservice->ion_client,
898                                mem_region->hdl,
899                                (ion_phys_addr_t *)&mem_region->iova,
900                                (size_t *)&mem_region->len);
901
902         if (ret < 0) {
903                 vpu_err("fd %d ion map iommu failed\n", fd);
904                 kfree(mem_region);
905                 ion_free(pservice->ion_client, hdl);
906                 return -EFAULT;
907         }
908         INIT_LIST_HEAD(&mem_region->reg_lnk);
909         list_add_tail(&mem_region->reg_lnk, &reg->mem_region_list);
910         return mem_region->iova;
911 }
912
913 /*
914  * NOTE: rkvdec/rkhevc put scaling list address in pps buffer hardware will read
915  * it by pps id in video stream data.
916  *
917  * So we need to translate the address in iommu case. The address data is also
918  * 10bit fd + 22bit offset mode.
919  * Because userspace decoder do not give the pps id in the register file sets
920  * kernel driver need to translate each scaling list address in pps buffer which
921  * means 256 pps for H.264, 64 pps for H.265.
922  *
923  * In order to optimize the performance kernel driver ask userspace decoder to
924  * set all scaling list address in pps buffer to the same one which will be used
925  * on current decoding task. Then kernel driver can only translate the first
926  * address then copy it all pps buffer.
927  */
928 static int fill_scaling_list_addr_in_pps(
929                 struct vpu_subdev_data *data,
930                 struct vpu_reg *reg,
931                 char *pps,
932                 int pps_info_count,
933                 int pps_info_size,
934                 int scaling_list_addr_offset)
935 {
936         int base = scaling_list_addr_offset;
937         int scaling_fd = 0;
938         u32 scaling_offset;
939
940         scaling_offset  = (u32)pps[base + 0];
941         scaling_offset += (u32)pps[base + 1] << 8;
942         scaling_offset += (u32)pps[base + 2] << 16;
943         scaling_offset += (u32)pps[base + 3] << 24;
944
945         scaling_fd = scaling_offset & 0x3ff;
946         scaling_offset = scaling_offset >> 10;
947
948         if (scaling_fd > 0) {
949                 int i = 0;
950                 u32 tmp = vcodec_fd_to_iova(data, reg, scaling_fd);
951
952                 if (IS_ERR_VALUE(tmp))
953                         return -1;
954                 tmp += scaling_offset;
955
956                 for (i = 0; i < pps_info_count; i++, base += pps_info_size) {
957                         pps[base + 0] = (tmp >>  0) & 0xff;
958                         pps[base + 1] = (tmp >>  8) & 0xff;
959                         pps[base + 2] = (tmp >> 16) & 0xff;
960                         pps[base + 3] = (tmp >> 24) & 0xff;
961                 }
962         }
963
964         return 0;
965 }
966
967 static int vcodec_bufid_to_iova(struct vpu_subdev_data *data, const u8 *tbl,
968                                 int size, struct vpu_reg *reg,
969                                 struct extra_info_for_iommu *ext_inf)
970 {
971         struct vpu_service_info *pservice = data->pservice;
972         struct vpu_task_info *task = reg->task;
973         enum FORMAT_TYPE type;
974         struct ion_handle *hdl;
975         int ret = 0;
976         struct vcodec_mem_region *mem_region;
977         int i;
978         int offset = 0;
979
980         if (tbl == NULL || size <= 0) {
981                 dev_err(pservice->dev, "input arguments invalidate\n");
982                 return -1;
983         }
984
985         if (task->get_fmt)
986                 type = task->get_fmt(reg->reg);
987         else {
988                 pr_err("invalid task with NULL get_fmt\n");
989                 return -1;
990         }
991
992         for (i = 0; i < size; i++) {
993                 int usr_fd = reg->reg[tbl[i]] & 0x3FF;
994
995                 /* if userspace do not set the fd at this register, skip */
996                 if (usr_fd == 0)
997                         continue;
998
999                 /*
1000                  * special offset scale case
1001                  *
1002                  * This translation is for fd + offset translation.
1003                  * One register has 32bits. We need to transfer both buffer file
1004                  * handle and the start address offset so we packet file handle
1005                  * and offset together using below format.
1006                  *
1007                  *  0~9  bit for buffer file handle range 0 ~ 1023
1008                  * 10~31 bit for offset range 0 ~ 4M
1009                  *
1010                  * But on 4K case the offset can be larger the 4M
1011                  * So on H.264 4K vpu/vpu2 decoder we scale the offset by 16
1012                  * But MPEG4 will use the same register for colmv and it do not
1013                  * need scale.
1014                  *
1015                  * RKVdec do not have this issue.
1016                  */
1017                 if ((type == FMT_H264D || type == FMT_VP9D) &&
1018                     task->reg_dir_mv > 0 && task->reg_dir_mv == tbl[i])
1019                         offset = reg->reg[tbl[i]] >> 10 << 4;
1020                 else
1021                         offset = reg->reg[tbl[i]] >> 10;
1022
1023                 vpu_debug(DEBUG_IOMMU, "pos %3d fd %3d offset %10d\n",
1024                           tbl[i], usr_fd, offset);
1025
1026                 hdl = ion_import_dma_buf(pservice->ion_client, usr_fd);
1027                 if (IS_ERR(hdl)) {
1028                         dev_err(pservice->dev,
1029                                 "import dma-buf from fd %d failed, reg[%d]\n",
1030                                 usr_fd, tbl[i]);
1031                         return PTR_ERR(hdl);
1032                 }
1033
1034                 if (task->reg_pps > 0 && task->reg_pps == tbl[i]) {
1035                         int pps_info_offset;
1036                         int pps_info_count;
1037                         int pps_info_size;
1038                         int scaling_list_addr_offset;
1039
1040                         switch (type) {
1041                         case FMT_H264D: {
1042                                 pps_info_offset = offset;
1043                                 pps_info_count = 256;
1044                                 pps_info_size = 32;
1045                                 scaling_list_addr_offset = 23;
1046                         } break;
1047                         case FMT_H265D: {
1048                                 pps_info_offset = 0;
1049                                 pps_info_count = 64;
1050                                 pps_info_size = 80;
1051                                 scaling_list_addr_offset = 74;
1052                         } break;
1053                         default: {
1054                                 pps_info_offset = 0;
1055                                 pps_info_count = 0;
1056                                 pps_info_size = 0;
1057                                 scaling_list_addr_offset = 0;
1058                         } break;
1059                         }
1060
1061                         vpu_debug(DEBUG_PPS_FILL,
1062                                   "scaling list filling parameter:\n");
1063                         vpu_debug(DEBUG_PPS_FILL,
1064                                   "pps_info_offset %d\n", pps_info_offset);
1065                         vpu_debug(DEBUG_PPS_FILL,
1066                                   "pps_info_count  %d\n", pps_info_count);
1067                         vpu_debug(DEBUG_PPS_FILL,
1068                                   "pps_info_size   %d\n", pps_info_size);
1069                         vpu_debug(DEBUG_PPS_FILL,
1070                                   "scaling_list_addr_offset %d\n",
1071                                   scaling_list_addr_offset);
1072
1073                         if (pps_info_count) {
1074                                 char *pps = (char *)ion_map_kernel(
1075                                                 pservice->ion_client, hdl);
1076                                 vpu_debug(DEBUG_PPS_FILL,
1077                                           "scaling list setting pps %p\n", pps);
1078                                 pps += pps_info_offset;
1079
1080                                 if (fill_scaling_list_addr_in_pps(
1081                                                 data, reg, pps,
1082                                                 pps_info_count,
1083                                                 pps_info_size,
1084                                                 scaling_list_addr_offset) < 0) {
1085                                         ion_free(pservice->ion_client, hdl);
1086                                         return -1;
1087                                 }
1088                         }
1089                 }
1090
1091                 mem_region = kzalloc(sizeof(*mem_region), GFP_KERNEL);
1092
1093                 if (!mem_region) {
1094                         ion_free(pservice->ion_client, hdl);
1095                         return -ENOMEM;
1096                 }
1097
1098                 mem_region->hdl = hdl;
1099                 mem_region->reg_idx = tbl[i];
1100
1101                 if (data->mmu_dev)
1102                         ret = ion_map_iommu(data->dev,
1103                                             pservice->ion_client,
1104                                             mem_region->hdl,
1105                                             &mem_region->iova,
1106                                             &mem_region->len);
1107                 else
1108                         ret = ion_phys(pservice->ion_client,
1109                                        mem_region->hdl,
1110                                        (ion_phys_addr_t *)&mem_region->iova,
1111                                        (size_t *)&mem_region->len);
1112
1113                 if (ret < 0) {
1114                         dev_err(pservice->dev, "reg %d fd %d ion map iommu failed\n",
1115                                 tbl[i], usr_fd);
1116                         kfree(mem_region);
1117                         ion_free(pservice->ion_client, hdl);
1118                         return ret;
1119                 }
1120
1121                 /*
1122                  * special for vpu dec num 12: record decoded length
1123                  * hacking for decoded length
1124                  * NOTE: not a perfect fix, the fd is not recorded
1125                  */
1126                 if (task->reg_len > 0 && task->reg_len == tbl[i]) {
1127                         reg->dec_base = mem_region->iova + offset;
1128                         vpu_debug(DEBUG_REGISTER, "dec_set %08x\n",
1129                                   reg->dec_base);
1130                 }
1131
1132                 reg->reg[tbl[i]] = mem_region->iova + offset;
1133                 INIT_LIST_HEAD(&mem_region->reg_lnk);
1134                 list_add_tail(&mem_region->reg_lnk, &reg->mem_region_list);
1135         }
1136
1137         if (ext_inf != NULL && ext_inf->magic == EXTRA_INFO_MAGIC) {
1138                 for (i = 0; i < ext_inf->cnt; i++) {
1139                         vpu_debug(DEBUG_IOMMU, "reg[%d] + offset %d\n",
1140                                   ext_inf->elem[i].index,
1141                                   ext_inf->elem[i].offset);
1142                         reg->reg[ext_inf->elem[i].index] +=
1143                                 ext_inf->elem[i].offset;
1144                 }
1145         }
1146
1147         return 0;
1148 }
1149
1150 static int vcodec_reg_address_translate(struct vpu_subdev_data *data,
1151                                         struct vpu_reg *reg,
1152                                         struct extra_info_for_iommu *ext_inf)
1153 {
1154         enum FORMAT_TYPE type = reg->task->get_fmt(reg->reg);
1155
1156         if (type < FMT_TYPE_BUTT) {
1157                 const struct vpu_trans_info *info = &reg->trans[type];
1158                 const u8 *tbl = info->table;
1159                 int size = info->count;
1160
1161                 return vcodec_bufid_to_iova(data, tbl, size, reg, ext_inf);
1162         }
1163         pr_err("found invalid format type!\n");
1164         return -1;
1165 }
1166
1167 static void get_reg_freq(struct vpu_subdev_data *data, struct vpu_reg *reg)
1168 {
1169
1170         if (!of_machine_is_compatible("rockchip,rk2928g")) {
1171                 if (reg->type == VPU_DEC || reg->type == VPU_DEC_PP) {
1172                         if (reg_check_fmt(reg) == VPU_DEC_FMT_H264) {
1173                                 if (reg_probe_width(reg) > 3200) {
1174                                         /*raise frequency for 4k avc.*/
1175                                         reg->freq = VPU_FREQ_600M;
1176                                 }
1177                         } else {
1178                                 if (reg_check_interlace(reg))
1179                                         reg->freq = VPU_FREQ_400M;
1180                         }
1181                 }
1182                 if (data->hw_id == HEVC_ID) {
1183                         if (reg_probe_hevc_y_stride(reg) > 60000)
1184                                 reg->freq = VPU_FREQ_400M;
1185                 }
1186                 if (reg->type == VPU_PP)
1187                         reg->freq = VPU_FREQ_400M;
1188         }
1189 }
1190
1191 static struct vpu_reg *reg_init(struct vpu_subdev_data *data,
1192                                 struct vpu_session *session,
1193                                 void __user *src, u32 size)
1194 {
1195         struct vpu_service_info *pservice = data->pservice;
1196         int extra_size = 0;
1197         struct extra_info_for_iommu extra_info;
1198         struct vpu_reg *reg = kzalloc(sizeof(*reg) + data->reg_size,
1199                                       GFP_KERNEL);
1200
1201         vpu_debug_enter();
1202
1203         if (NULL == reg) {
1204                 vpu_err("error: kmalloc failed\n");
1205                 return NULL;
1206         }
1207
1208         if (size > data->reg_size) {
1209                 extra_size = size - data->reg_size;
1210                 size = data->reg_size;
1211         }
1212         reg->session = session;
1213         reg->data = data;
1214         reg->type = session->type;
1215         reg->size = size;
1216         reg->freq = VPU_FREQ_DEFAULT;
1217         reg->task = &data->task_info[session->type];
1218         reg->trans = data->trans_info;
1219         reg->reg = (u32 *)&reg[1];
1220         INIT_LIST_HEAD(&reg->session_link);
1221         INIT_LIST_HEAD(&reg->status_link);
1222
1223         INIT_LIST_HEAD(&reg->mem_region_list);
1224
1225         if (copy_from_user(&reg->reg[0], (void __user *)src, size)) {
1226                 vpu_err("error: copy_from_user failed\n");
1227                 kfree(reg);
1228                 return NULL;
1229         }
1230
1231         if (copy_from_user(&extra_info, (u8 *)src + size, extra_size)) {
1232                 vpu_err("error: copy_from_user failed\n");
1233                 kfree(reg);
1234                 return NULL;
1235         }
1236
1237         if (0 > vcodec_reg_address_translate(data, reg, &extra_info)) {
1238                 int i = 0;
1239
1240                 vpu_err("error: translate reg address failed, dumping regs\n");
1241                 for (i = 0; i < size >> 2; i++)
1242                         pr_err("reg[%02d]: %08x\n", i, *((u32 *)src + i));
1243
1244                 kfree(reg);
1245                 return NULL;
1246         }
1247
1248         mutex_lock(&pservice->lock);
1249         list_add_tail(&reg->status_link, &pservice->waiting);
1250         list_add_tail(&reg->session_link, &session->waiting);
1251         mutex_unlock(&pservice->lock);
1252
1253         if (pservice->auto_freq)
1254                 get_reg_freq(data, reg);
1255
1256         vpu_debug_leave();
1257         return reg;
1258 }
1259
1260 static void reg_deinit(struct vpu_subdev_data *data, struct vpu_reg *reg)
1261 {
1262         struct vpu_service_info *pservice = data->pservice;
1263         struct vcodec_mem_region *mem_region = NULL, *n;
1264
1265         list_del_init(&reg->session_link);
1266         list_del_init(&reg->status_link);
1267         if (reg == pservice->reg_codec)
1268                 pservice->reg_codec = NULL;
1269         if (reg == pservice->reg_pproc)
1270                 pservice->reg_pproc = NULL;
1271
1272         /* release memory region attach to this registers table. */
1273         list_for_each_entry_safe(mem_region, n,
1274                         &reg->mem_region_list, reg_lnk) {
1275                 ion_free(pservice->ion_client, mem_region->hdl);
1276                 list_del_init(&mem_region->reg_lnk);
1277                 kfree(mem_region);
1278         }
1279
1280         kfree(reg);
1281 }
1282
1283 static void reg_from_wait_to_run(struct vpu_service_info *pservice,
1284                                  struct vpu_reg *reg)
1285 {
1286         vpu_debug_enter();
1287         list_del_init(&reg->status_link);
1288         list_add_tail(&reg->status_link, &pservice->running);
1289
1290         list_del_init(&reg->session_link);
1291         list_add_tail(&reg->session_link, &reg->session->running);
1292         vpu_debug_leave();
1293 }
1294
1295 static void reg_copy_from_hw(struct vpu_reg *reg, u32 *src, u32 count)
1296 {
1297         int i;
1298         u32 *dst = reg->reg;
1299
1300         vpu_debug_enter();
1301         for (i = 0; i < count; i++, src++)
1302                 *dst++ = readl_relaxed(src);
1303
1304         dst = (u32 *)&reg->reg[0];
1305         for (i = 0; i < count; i++)
1306                 vpu_debug(DEBUG_GET_REG, "get reg[%02d] %08x\n", i, dst[i]);
1307
1308         vpu_debug_leave();
1309 }
1310
1311 static void reg_from_run_to_done(struct vpu_subdev_data *data,
1312                                  struct vpu_reg *reg)
1313 {
1314         struct vpu_service_info *pservice = data->pservice;
1315         struct vpu_hw_info *hw_info = data->hw_info;
1316         struct vpu_task_info *task = reg->task;
1317
1318         vpu_debug_enter();
1319
1320         list_del_init(&reg->status_link);
1321         list_add_tail(&reg->status_link, &pservice->done);
1322
1323         list_del_init(&reg->session_link);
1324         list_add_tail(&reg->session_link, &reg->session->done);
1325
1326         switch (reg->type) {
1327         case VPU_ENC: {
1328                 pservice->reg_codec = NULL;
1329                 reg_copy_from_hw(reg, data->enc_dev.regs, hw_info->enc_reg_num);
1330                 reg->reg[task->reg_irq] = pservice->irq_status;
1331         } break;
1332         case VPU_DEC: {
1333                 pservice->reg_codec = NULL;
1334                 reg_copy_from_hw(reg, data->dec_dev.regs, hw_info->dec_reg_num);
1335
1336                 /* revert hack for decoded length */
1337                 if (task->reg_len > 0) {
1338                         int reg_len = task->reg_len;
1339                         u32 dec_get = reg->reg[reg_len];
1340                         s32 dec_length = dec_get - reg->dec_base;
1341
1342                         vpu_debug(DEBUG_REGISTER,
1343                                   "dec_get %08x dec_length %d\n",
1344                                   dec_get, dec_length);
1345                         reg->reg[reg_len] = dec_length << 10;
1346                 }
1347
1348                 reg->reg[task->reg_irq] = pservice->irq_status;
1349         } break;
1350         case VPU_PP: {
1351                 pservice->reg_pproc = NULL;
1352                 reg_copy_from_hw(reg, data->dec_dev.regs, hw_info->dec_reg_num);
1353                 writel_relaxed(0, data->dec_dev.regs + task->reg_irq);
1354         } break;
1355         case VPU_DEC_PP: {
1356                 u32 pipe_mode;
1357                 u32 *regs = data->dec_dev.regs;
1358
1359                 pservice->reg_codec = NULL;
1360                 pservice->reg_pproc = NULL;
1361
1362                 reg_copy_from_hw(reg, data->dec_dev.regs, hw_info->dec_reg_num);
1363
1364                 /* NOTE: remove pp pipeline mode flag first */
1365                 pipe_mode = readl_relaxed(regs + task->reg_pipe);
1366                 pipe_mode &= ~task->pipe_mask;
1367                 writel_relaxed(pipe_mode, regs + task->reg_pipe);
1368
1369                 /* revert hack for decoded length */
1370                 if (task->reg_len > 0) {
1371                         int reg_len = task->reg_len;
1372                         u32 dec_get = reg->reg[reg_len];
1373                         s32 dec_length = dec_get - reg->dec_base;
1374
1375                         vpu_debug(DEBUG_REGISTER,
1376                                   "dec_get %08x dec_length %d\n",
1377                                   dec_get, dec_length);
1378                         reg->reg[reg_len] = dec_length << 10;
1379                 }
1380
1381                 reg->reg[task->reg_irq] = pservice->irq_status;
1382         } break;
1383         default: {
1384                 vpu_err("error: copy reg from hw with unknown type %d\n",
1385                         reg->type);
1386         } break;
1387         }
1388         vcodec_exit_mode(data);
1389
1390         atomic_sub(1, &reg->session->task_running);
1391         atomic_sub(1, &pservice->total_running);
1392         wake_up(&reg->session->wait);
1393
1394         vpu_debug_leave();
1395 }
1396
1397 static void vpu_service_set_freq(struct vpu_service_info *pservice,
1398                                  struct vpu_reg *reg)
1399 {
1400         enum VPU_FREQ curr = atomic_read(&pservice->freq_status);
1401
1402         if (curr == reg->freq)
1403                 return;
1404
1405         atomic_set(&pservice->freq_status, reg->freq);
1406         switch (reg->freq) {
1407         case VPU_FREQ_200M: {
1408                 clk_set_rate(pservice->aclk_vcodec, 200*MHZ);
1409         } break;
1410         case VPU_FREQ_266M: {
1411                 clk_set_rate(pservice->aclk_vcodec, 266*MHZ);
1412         } break;
1413         case VPU_FREQ_300M: {
1414                 clk_set_rate(pservice->aclk_vcodec, 300*MHZ);
1415         } break;
1416         case VPU_FREQ_400M: {
1417                 clk_set_rate(pservice->aclk_vcodec, 400*MHZ);
1418         } break;
1419         case VPU_FREQ_500M: {
1420                 clk_set_rate(pservice->aclk_vcodec, 500*MHZ);
1421         } break;
1422         case VPU_FREQ_600M: {
1423                 clk_set_rate(pservice->aclk_vcodec, 600*MHZ);
1424         } break;
1425         default: {
1426                 unsigned long rate = 300*MHZ;
1427
1428                 if (of_machine_is_compatible("rockchip,rk2928g"))
1429                         rate = 400*MHZ;
1430
1431                 clk_set_rate(pservice->aclk_vcodec, rate);
1432         } break;
1433         }
1434 }
1435
1436 static void reg_copy_to_hw(struct vpu_subdev_data *data, struct vpu_reg *reg)
1437 {
1438         struct vpu_service_info *pservice = data->pservice;
1439         struct vpu_task_info *task = reg->task;
1440         struct vpu_hw_info *hw_info = data->hw_info;
1441         int i;
1442         u32 *src = (u32 *)&reg->reg[0];
1443         u32 enable_mask = task->enable_mask;
1444         u32 gating_mask = task->gating_mask;
1445         u32 reg_en = task->reg_en;
1446
1447         vpu_debug_enter();
1448
1449         atomic_add(1, &pservice->total_running);
1450         atomic_add(1, &reg->session->task_running);
1451
1452         if (pservice->auto_freq)
1453                 vpu_service_set_freq(pservice, reg);
1454
1455         vcodec_enter_mode(data);
1456
1457         switch (reg->type) {
1458         case VPU_ENC: {
1459                 u32 *dst = data->enc_dev.regs;
1460                 u32 base = 0;
1461                 u32 end  = hw_info->enc_reg_num;
1462                 /* u32 reg_gating = task->reg_gating; */
1463
1464                 pservice->reg_codec = reg;
1465
1466                 vpu_debug(DEBUG_TASK_INFO, "reg: base %3d end %d en %2d mask: en %x gate %x\n",
1467                           base, end, reg_en, enable_mask, gating_mask);
1468
1469                 VEPU_CLEAN_CACHE(dst);
1470
1471                 if (debug & DEBUG_SET_REG)
1472                         for (i = base; i < end; i++)
1473                                 vpu_debug(DEBUG_SET_REG, "set reg[%02d] %08x\n",
1474                                           i, src[i]);
1475
1476                 /*
1477                  * NOTE: encoder need to setup mode first
1478                  */
1479                 writel_relaxed(src[reg_en] & enable_mask, dst + reg_en);
1480
1481                 /* NOTE: encoder gating is not on enable register */
1482                 /* src[reg_gating] |= gating_mask; */
1483
1484                 for (i = base; i < end; i++) {
1485                         if (i != reg_en)
1486                                 writel_relaxed(src[i], dst + i);
1487                 }
1488
1489                 writel(src[reg_en], dst + reg_en);
1490                 dsb(sy);
1491
1492                 time_record(reg->task, 0);
1493         } break;
1494         case VPU_DEC: {
1495                 u32 *dst = data->dec_dev.regs;
1496                 u32 len = hw_info->dec_reg_num;
1497                 u32 base = hw_info->base_dec;
1498                 u32 end  = hw_info->end_dec;
1499
1500                 pservice->reg_codec = reg;
1501
1502                 vpu_debug(DEBUG_TASK_INFO, "reg: base %3d end %d en %2d mask: en %x gate %x\n",
1503                           base, end, reg_en, enable_mask, gating_mask);
1504
1505                 VDPU_CLEAN_CACHE(dst);
1506
1507                 /* on rkvdec set cache size to 64byte */
1508                 if (pservice->dev_id == VCODEC_DEVICE_ID_RKVDEC) {
1509                         u32 *cache_base = dst + 0x100;
1510                         u32 val = (debug & DEBUG_CACHE_32B) ? (0x3) : (0x13);
1511                         writel_relaxed(val, cache_base + 0x07);
1512                         writel_relaxed(val, cache_base + 0x17);
1513                 }
1514
1515                 if (debug & DEBUG_SET_REG)
1516                         for (i = 0; i < len; i++)
1517                                 vpu_debug(DEBUG_SET_REG, "set reg[%02d] %08x\n",
1518                                           i, src[i]);
1519
1520                 /*
1521                  * NOTE: The end register is invalid. Do NOT write to it
1522                  *       Also the base register must be written
1523                  */
1524                 for (i = base; i < end; i++) {
1525                         if (i != reg_en)
1526                                 writel_relaxed(src[i], dst + i);
1527                 }
1528
1529                 writel(src[reg_en] | gating_mask, dst + reg_en);
1530                 dsb(sy);
1531
1532                 time_record(reg->task, 0);
1533         } break;
1534         case VPU_PP: {
1535                 u32 *dst = data->dec_dev.regs;
1536                 u32 base = hw_info->base_pp;
1537                 u32 end  = hw_info->end_pp;
1538
1539                 pservice->reg_pproc = reg;
1540
1541                 vpu_debug(DEBUG_TASK_INFO, "reg: base %3d end %d en %2d mask: en %x gate %x\n",
1542                           base, end, reg_en, enable_mask, gating_mask);
1543
1544                 if (debug & DEBUG_SET_REG)
1545                         for (i = base; i < end; i++)
1546                                 vpu_debug(DEBUG_SET_REG, "set reg[%02d] %08x\n",
1547                                           i, src[i]);
1548
1549                 for (i = base; i < end; i++) {
1550                         if (i != reg_en)
1551                                 writel_relaxed(src[i], dst + i);
1552                 }
1553
1554                 writel(src[reg_en] | gating_mask, dst + reg_en);
1555                 dsb(sy);
1556
1557                 time_record(reg->task, 0);
1558         } break;
1559         case VPU_DEC_PP: {
1560                 u32 *dst = data->dec_dev.regs;
1561                 u32 base = hw_info->base_dec_pp;
1562                 u32 end  = hw_info->end_dec_pp;
1563
1564                 pservice->reg_codec = reg;
1565                 pservice->reg_pproc = reg;
1566
1567                 vpu_debug(DEBUG_TASK_INFO, "reg: base %3d end %d en %2d mask: en %x gate %x\n",
1568                           base, end, reg_en, enable_mask, gating_mask);
1569
1570                 /* VDPU_SOFT_RESET(dst); */
1571                 VDPU_CLEAN_CACHE(dst);
1572
1573                 if (debug & DEBUG_SET_REG)
1574                         for (i = base; i < end; i++)
1575                                 vpu_debug(DEBUG_SET_REG, "set reg[%02d] %08x\n",
1576                                           i, src[i]);
1577
1578                 for (i = base; i < end; i++) {
1579                         if (i != reg_en)
1580                                 writel_relaxed(src[i], dst + i);
1581                 }
1582
1583                 /* NOTE: dec output must be disabled */
1584
1585                 writel(src[reg_en] | gating_mask, dst + reg_en);
1586                 dsb(sy);
1587
1588                 time_record(reg->task, 0);
1589         } break;
1590         default: {
1591                 vpu_err("error: unsupport session type %d", reg->type);
1592                 atomic_sub(1, &pservice->total_running);
1593                 atomic_sub(1, &reg->session->task_running);
1594         } break;
1595         }
1596
1597         vpu_debug_leave();
1598 }
1599
1600 static void try_set_reg(struct vpu_subdev_data *data)
1601 {
1602         struct vpu_service_info *pservice = data->pservice;
1603
1604         vpu_debug_enter();
1605
1606         mutex_lock(&pservice->shutdown_lock);
1607         if (atomic_read(&pservice->service_on) == 0) {
1608                 mutex_unlock(&pservice->shutdown_lock);
1609                 return;
1610         }
1611         if (!list_empty(&pservice->waiting)) {
1612                 struct vpu_reg *reg_codec = pservice->reg_codec;
1613                 struct vpu_reg *reg_pproc = pservice->reg_pproc;
1614                 int can_set = 0;
1615                 bool change_able = (reg_codec == NULL) && (reg_pproc == NULL);
1616                 int reset_request = atomic_read(&pservice->reset_request);
1617                 struct vpu_reg *reg = list_entry(pservice->waiting.next,
1618                                 struct vpu_reg, status_link);
1619
1620                 vpu_service_power_on(pservice);
1621
1622                 if (change_able || !reset_request) {
1623                         switch (reg->type) {
1624                         case VPU_ENC: {
1625                                 if (change_able)
1626                                         can_set = 1;
1627                         } break;
1628                         case VPU_DEC: {
1629                                 if (reg_codec == NULL)
1630                                         can_set = 1;
1631                                 if (pservice->auto_freq && (reg_pproc != NULL))
1632                                         can_set = 0;
1633                         } break;
1634                         case VPU_PP: {
1635                                 if (reg_codec == NULL) {
1636                                         if (reg_pproc == NULL)
1637                                                 can_set = 1;
1638                                 } else {
1639                                         if ((reg_codec->type == VPU_DEC) &&
1640                                             (reg_pproc == NULL))
1641                                                 can_set = 1;
1642
1643                                         /*
1644                                          * NOTE:
1645                                          * can not charge frequency
1646                                          * when vpu is working
1647                                          */
1648                                         if (pservice->auto_freq)
1649                                                 can_set = 0;
1650                                 }
1651                         } break;
1652                         case VPU_DEC_PP: {
1653                                 if (change_able)
1654                                         can_set = 1;
1655                                 } break;
1656                         default: {
1657                                 pr_err("undefined reg type %d\n", reg->type);
1658                         } break;
1659                         }
1660                 }
1661
1662                 /* then check reset request */
1663                 if (reset_request && !change_able)
1664                         reset_request = 0;
1665
1666                 /* do reset before setting registers */
1667                 if (reset_request)
1668                         vpu_reset(data);
1669
1670                 if (can_set) {
1671                         reg_from_wait_to_run(pservice, reg);
1672                         reg_copy_to_hw(reg->data, reg);
1673                 }
1674         }
1675
1676         mutex_unlock(&pservice->shutdown_lock);
1677         vpu_debug_leave();
1678 }
1679
1680 static int return_reg(struct vpu_subdev_data *data,
1681                       struct vpu_reg *reg, u32 __user *dst)
1682 {
1683         struct vpu_hw_info *hw_info = data->hw_info;
1684         size_t size = reg->size;
1685         u32 base;
1686
1687         vpu_debug_enter();
1688         switch (reg->type) {
1689         case VPU_ENC: {
1690                 base = 0;
1691         } break;
1692         case VPU_DEC: {
1693                 base = hw_info->base_dec_pp;
1694         } break;
1695         case VPU_PP: {
1696                 base = hw_info->base_pp;
1697         } break;
1698         case VPU_DEC_PP: {
1699                 base = hw_info->base_dec_pp;
1700         } break;
1701         default: {
1702                 vpu_err("error: copy reg to user with unknown type %d\n",
1703                         reg->type);
1704                 return -EFAULT;
1705         } break;
1706         }
1707
1708         if (copy_to_user(dst, &reg->reg[base], size)) {
1709                 vpu_err("error: copy_to_user failed\n");
1710                 return -EFAULT;
1711         }
1712
1713         reg_deinit(data, reg);
1714         vpu_debug_leave();
1715         return 0;
1716 }
1717
1718 static long vpu_service_ioctl(struct file *filp, unsigned int cmd,
1719                               unsigned long arg)
1720 {
1721         struct vpu_subdev_data *data =
1722                 container_of(filp->f_path.dentry->d_inode->i_cdev,
1723                              struct vpu_subdev_data, cdev);
1724         struct vpu_service_info *pservice = data->pservice;
1725         struct vpu_session *session = (struct vpu_session *)filp->private_data;
1726
1727         vpu_debug_enter();
1728         if (NULL == session)
1729                 return -EINVAL;
1730
1731         switch (cmd) {
1732         case VPU_IOC_SET_CLIENT_TYPE: {
1733                 session->type = (enum VPU_CLIENT_TYPE)arg;
1734                 vpu_debug(DEBUG_IOCTL, "pid %d set client type %d\n",
1735                           session->pid, session->type);
1736         } break;
1737         case VPU_IOC_GET_HW_FUSE_STATUS: {
1738                 struct vpu_request req;
1739
1740                 vpu_debug(DEBUG_IOCTL, "pid %d get hw status %d\n",
1741                           session->pid, session->type);
1742                 if (copy_from_user(&req, (void __user *)arg, sizeof(req))) {
1743                         vpu_err("error: get hw status copy_from_user failed\n");
1744                         return -EFAULT;
1745                 } else {
1746                         void *config = (session->type != VPU_ENC) ?
1747                                        ((void *)&pservice->dec_config) :
1748                                        ((void *)&pservice->enc_config);
1749                         size_t size = (session->type != VPU_ENC) ?
1750                                       (sizeof(struct vpu_dec_config)) :
1751                                       (sizeof(struct vpu_enc_config));
1752                         if (copy_to_user((void __user *)req.req,
1753                                          config, size)) {
1754                                 vpu_err("error: get hw status copy_to_user failed type %d\n",
1755                                         session->type);
1756                                 return -EFAULT;
1757                         }
1758                 }
1759         } break;
1760         case VPU_IOC_SET_REG: {
1761                 struct vpu_request req;
1762                 struct vpu_reg *reg;
1763
1764                 vpu_debug(DEBUG_IOCTL, "pid %d set reg type %d\n",
1765                           session->pid, session->type);
1766                 if (copy_from_user(&req, (void __user *)arg,
1767                                    sizeof(struct vpu_request))) {
1768                         vpu_err("error: set reg copy_from_user failed\n");
1769                         return -EFAULT;
1770                 }
1771
1772                 reg = reg_init(data, session, (void __user *)req.req, req.size);
1773                 if (NULL == reg) {
1774                         return -EFAULT;
1775                 } else {
1776                         mutex_lock(&pservice->lock);
1777                         try_set_reg(data);
1778                         mutex_unlock(&pservice->lock);
1779                 }
1780         } break;
1781         case VPU_IOC_GET_REG: {
1782                 struct vpu_request req;
1783                 struct vpu_reg *reg;
1784                 int ret;
1785
1786                 vpu_debug(DEBUG_IOCTL, "pid %d get reg type %d\n",
1787                           session->pid, session->type);
1788                 if (copy_from_user(&req, (void __user *)arg,
1789                                    sizeof(struct vpu_request))) {
1790                         vpu_err("error: get reg copy_from_user failed\n");
1791                         return -EFAULT;
1792                 }
1793
1794                 ret = wait_event_timeout(session->wait,
1795                                          !list_empty(&session->done),
1796                                          VPU_TIMEOUT_DELAY);
1797
1798                 if (!list_empty(&session->done)) {
1799                         if (ret < 0)
1800                                 vpu_err("warning: pid %d wait task error ret %d\n",
1801                                         session->pid, ret);
1802                         ret = 0;
1803                 } else {
1804                         if (unlikely(ret < 0)) {
1805                                 vpu_err("error: pid %d wait task ret %d\n",
1806                                         session->pid, ret);
1807                         } else if (ret == 0) {
1808                                 vpu_err("error: pid %d wait %d task done timeout\n",
1809                                         session->pid,
1810                                         atomic_read(&session->task_running));
1811                                 ret = -ETIMEDOUT;
1812                         }
1813                 }
1814
1815                 if (ret < 0) {
1816                         int task_running = atomic_read(&session->task_running);
1817
1818                         mutex_lock(&pservice->lock);
1819                         vpu_service_dump(pservice);
1820                         if (task_running) {
1821                                 atomic_set(&session->task_running, 0);
1822                                 atomic_sub(task_running,
1823                                            &pservice->total_running);
1824                                 pr_err("%d task is running but not return, reset hardware...",
1825                                        task_running);
1826                                 vpu_reset(data);
1827                                 pr_err("done\n");
1828                         }
1829                         vpu_service_session_clear(data, session);
1830                         mutex_unlock(&pservice->lock);
1831                         return ret;
1832                 }
1833
1834                 mutex_lock(&pservice->lock);
1835                 reg = list_entry(session->done.next,
1836                                  struct vpu_reg, session_link);
1837                 return_reg(data, reg, (u32 __user *)req.req);
1838                 mutex_unlock(&pservice->lock);
1839         } break;
1840         case VPU_IOC_PROBE_IOMMU_STATUS: {
1841                 int iommu_enable = 1;
1842
1843                 vpu_debug(DEBUG_IOCTL, "VPU_IOC_PROBE_IOMMU_STATUS\n");
1844
1845                 if (copy_to_user((void __user *)arg,
1846                                  &iommu_enable, sizeof(int))) {
1847                         vpu_err("error: iommu status copy_to_user failed\n");
1848                         return -EFAULT;
1849                 }
1850         } break;
1851         default: {
1852                 vpu_err("error: unknow vpu service ioctl cmd %x\n", cmd);
1853         } break;
1854         }
1855         vpu_debug_leave();
1856         return 0;
1857 }
1858
1859 #ifdef CONFIG_COMPAT
1860 static long compat_vpu_service_ioctl(struct file *filp, unsigned int cmd,
1861                                      unsigned long arg)
1862 {
1863         struct vpu_subdev_data *data =
1864                 container_of(filp->f_path.dentry->d_inode->i_cdev,
1865                              struct vpu_subdev_data, cdev);
1866         struct vpu_service_info *pservice = data->pservice;
1867         struct vpu_session *session = (struct vpu_session *)filp->private_data;
1868
1869         vpu_debug_enter();
1870         vpu_debug(3, "cmd %x, COMPAT_VPU_IOC_SET_CLIENT_TYPE %x\n", cmd,
1871                   (u32)COMPAT_VPU_IOC_SET_CLIENT_TYPE);
1872         if (NULL == session)
1873                 return -EINVAL;
1874
1875         switch (cmd) {
1876         case COMPAT_VPU_IOC_SET_CLIENT_TYPE: {
1877                 session->type = (enum VPU_CLIENT_TYPE)arg;
1878                 vpu_debug(DEBUG_IOCTL, "compat set client type %d\n",
1879                           session->type);
1880         } break;
1881         case COMPAT_VPU_IOC_GET_HW_FUSE_STATUS: {
1882                 struct compat_vpu_request req;
1883
1884                 vpu_debug(DEBUG_IOCTL, "compat get hw status %d\n",
1885                           session->type);
1886                 if (copy_from_user(&req, compat_ptr((compat_uptr_t)arg),
1887                                    sizeof(struct compat_vpu_request))) {
1888                         vpu_err("error: compat get hw status copy_from_user failed\n");
1889                         return -EFAULT;
1890                 } else {
1891                         void *config = (session->type != VPU_ENC) ?
1892                                        ((void *)&pservice->dec_config) :
1893                                        ((void *)&pservice->enc_config);
1894                         size_t size = (session->type != VPU_ENC) ?
1895                                       (sizeof(struct vpu_dec_config)) :
1896                                       (sizeof(struct vpu_enc_config));
1897
1898                         if (copy_to_user(compat_ptr((compat_uptr_t)req.req),
1899                                          config, size)) {
1900                                 vpu_err("error: compat get hw status copy_to_user failed type %d\n",
1901                                         session->type);
1902                                 return -EFAULT;
1903                         }
1904                 }
1905         } break;
1906         case COMPAT_VPU_IOC_SET_REG: {
1907                 struct compat_vpu_request req;
1908                 struct vpu_reg *reg;
1909
1910                 vpu_debug(DEBUG_IOCTL, "compat set reg type %d\n",
1911                           session->type);
1912                 if (copy_from_user(&req, compat_ptr((compat_uptr_t)arg),
1913                                    sizeof(struct compat_vpu_request))) {
1914                         vpu_err("compat set_reg copy_from_user failed\n");
1915                         return -EFAULT;
1916                 }
1917                 reg = reg_init(data, session,
1918                                compat_ptr((compat_uptr_t)req.req), req.size);
1919                 if (NULL == reg) {
1920                         return -EFAULT;
1921                 } else {
1922                         mutex_lock(&pservice->lock);
1923                         try_set_reg(data);
1924                         mutex_unlock(&pservice->lock);
1925                 }
1926         } break;
1927         case COMPAT_VPU_IOC_GET_REG: {
1928                 struct compat_vpu_request req;
1929                 struct vpu_reg *reg;
1930                 int ret;
1931
1932                 vpu_debug(DEBUG_IOCTL, "compat get reg type %d\n",
1933                           session->type);
1934                 if (copy_from_user(&req, compat_ptr((compat_uptr_t)arg),
1935                                    sizeof(struct compat_vpu_request))) {
1936                         vpu_err("compat get reg copy_from_user failed\n");
1937                         return -EFAULT;
1938                 }
1939
1940                 ret = wait_event_timeout(session->wait,
1941                                          !list_empty(&session->done),
1942                                          VPU_TIMEOUT_DELAY);
1943
1944                 if (!list_empty(&session->done)) {
1945                         if (ret < 0)
1946                                 vpu_err("warning: pid %d wait task error ret %d\n",
1947                                         session->pid, ret);
1948                         ret = 0;
1949                 } else {
1950                         if (unlikely(ret < 0)) {
1951                                 vpu_err("error: pid %d wait task ret %d\n",
1952                                         session->pid, ret);
1953                         } else if (ret == 0) {
1954                                 vpu_err("error: pid %d wait %d task done timeout\n",
1955                                         session->pid,
1956                                         atomic_read(&session->task_running));
1957                                 ret = -ETIMEDOUT;
1958                         }
1959                 }
1960
1961                 if (ret < 0) {
1962                         int task_running = atomic_read(&session->task_running);
1963
1964                         mutex_lock(&pservice->lock);
1965                         vpu_service_dump(pservice);
1966                         if (task_running) {
1967                                 atomic_set(&session->task_running, 0);
1968                                 atomic_sub(task_running,
1969                                            &pservice->total_running);
1970                                 pr_err("%d task is running but not return, reset hardware...",
1971                                        task_running);
1972                                 vpu_reset(data);
1973                                 pr_err("done\n");
1974                         }
1975                         vpu_service_session_clear(data, session);
1976                         mutex_unlock(&pservice->lock);
1977                         return ret;
1978                 }
1979
1980                 mutex_lock(&pservice->lock);
1981                 reg = list_entry(session->done.next,
1982                                  struct vpu_reg, session_link);
1983                 return_reg(data, reg, compat_ptr((compat_uptr_t)req.req));
1984                 mutex_unlock(&pservice->lock);
1985         } break;
1986         case COMPAT_VPU_IOC_PROBE_IOMMU_STATUS: {
1987                 int iommu_enable = 1;
1988
1989                 vpu_debug(DEBUG_IOCTL, "COMPAT_VPU_IOC_PROBE_IOMMU_STATUS\n");
1990
1991                 if (copy_to_user(compat_ptr((compat_uptr_t)arg),
1992                                  &iommu_enable, sizeof(int))) {
1993                         vpu_err("error: VPU_IOC_PROBE_IOMMU_STATUS copy_to_user failed\n");
1994                         return -EFAULT;
1995                 }
1996         } break;
1997         default: {
1998                 vpu_err("error: unknow vpu service ioctl cmd %x\n", cmd);
1999         } break;
2000         }
2001         vpu_debug_leave();
2002         return 0;
2003 }
2004 #endif
2005
2006 static int vpu_service_check_hw(struct vpu_subdev_data *data)
2007 {
2008         int ret = -EINVAL, i = 0;
2009         u32 hw_id = readl_relaxed(data->regs);
2010
2011         hw_id = (hw_id >> 16) & 0xFFFF;
2012         pr_info("checking hw id %x\n", hw_id);
2013         data->hw_info = NULL;
2014         for (i = 0; i < ARRAY_SIZE(vcodec_info_set); i++) {
2015                 struct vcodec_info *info = &vcodec_info_set[i];
2016
2017                 if (hw_id == info->hw_id) {
2018                         data->hw_id = info->hw_id;
2019                         data->hw_info = info->hw_info;
2020                         data->task_info = info->task_info;
2021                         data->trans_info = info->trans_info;
2022                         ret = 0;
2023                         break;
2024                 }
2025         }
2026         return ret;
2027 }
2028
2029 static int vpu_service_open(struct inode *inode, struct file *filp)
2030 {
2031         struct vpu_subdev_data *data = container_of(
2032                         inode->i_cdev, struct vpu_subdev_data, cdev);
2033         struct vpu_service_info *pservice = data->pservice;
2034         struct vpu_session *session = kmalloc(sizeof(*session), GFP_KERNEL);
2035
2036         vpu_debug_enter();
2037
2038         if (NULL == session) {
2039                 vpu_err("error: unable to allocate memory for vpu_session.");
2040                 return -ENOMEM;
2041         }
2042
2043         session->type   = VPU_TYPE_BUTT;
2044         session->pid    = current->pid;
2045         INIT_LIST_HEAD(&session->waiting);
2046         INIT_LIST_HEAD(&session->running);
2047         INIT_LIST_HEAD(&session->done);
2048         INIT_LIST_HEAD(&session->list_session);
2049         init_waitqueue_head(&session->wait);
2050         atomic_set(&session->task_running, 0);
2051         mutex_lock(&pservice->lock);
2052         list_add_tail(&session->list_session, &pservice->session);
2053         filp->private_data = (void *)session;
2054         mutex_unlock(&pservice->lock);
2055
2056         pr_debug("dev opened\n");
2057         vpu_debug_leave();
2058         return nonseekable_open(inode, filp);
2059 }
2060
2061 static int vpu_service_release(struct inode *inode, struct file *filp)
2062 {
2063         struct vpu_subdev_data *data = container_of(
2064                         inode->i_cdev, struct vpu_subdev_data, cdev);
2065         struct vpu_service_info *pservice = data->pservice;
2066         int task_running;
2067         struct vpu_session *session = (struct vpu_session *)filp->private_data;
2068
2069         vpu_debug_enter();
2070         if (NULL == session)
2071                 return -EINVAL;
2072
2073         task_running = atomic_read(&session->task_running);
2074         if (task_running) {
2075                 pr_err("error: session %d still has %d task running when closing\n",
2076                        session->pid, task_running);
2077                 msleep(50);
2078         }
2079         wake_up(&session->wait);
2080
2081         mutex_lock(&pservice->lock);
2082         /* remove this filp from the asynchronusly notified filp's */
2083         list_del_init(&session->list_session);
2084         vpu_service_session_clear(data, session);
2085         kfree(session);
2086         filp->private_data = NULL;
2087         mutex_unlock(&pservice->lock);
2088
2089         pr_debug("dev closed\n");
2090         vpu_debug_leave();
2091         return 0;
2092 }
2093
2094 static const struct file_operations vpu_service_fops = {
2095         .unlocked_ioctl = vpu_service_ioctl,
2096         .open           = vpu_service_open,
2097         .release        = vpu_service_release,
2098 #ifdef CONFIG_COMPAT
2099         .compat_ioctl   = compat_vpu_service_ioctl,
2100 #endif
2101 };
2102
2103 static irqreturn_t vdpu_irq(int irq, void *dev_id);
2104 static irqreturn_t vdpu_isr(int irq, void *dev_id);
2105 static irqreturn_t vepu_irq(int irq, void *dev_id);
2106 static irqreturn_t vepu_isr(int irq, void *dev_id);
2107 static void get_hw_info(struct vpu_subdev_data *data);
2108
2109 static struct device *rockchip_get_sysmmu_dev(const char *compt)
2110 {
2111         struct device_node *dn = NULL;
2112         struct platform_device *pd = NULL;
2113         struct device *ret = NULL;
2114
2115         dn = of_find_compatible_node(NULL, NULL, compt);
2116         if (!dn) {
2117                 pr_err("can't find device node %s \r\n", compt);
2118                 return NULL;
2119         }
2120
2121         pd = of_find_device_by_node(dn);
2122         if (!pd) {
2123                 pr_err("can't find platform device in device node %s\n", compt);
2124                 return  NULL;
2125         }
2126         ret = &pd->dev;
2127
2128         return ret;
2129 }
2130
2131 #ifdef CONFIG_IOMMU_API
2132 static inline void platform_set_sysmmu(struct device *iommu,
2133                                        struct device *dev)
2134 {
2135         dev->archdata.iommu = iommu;
2136 }
2137 #else
2138 static inline void platform_set_sysmmu(struct device *iommu,
2139                                        struct device *dev)
2140 {
2141 }
2142 #endif
2143
2144 int vcodec_sysmmu_fault_hdl(struct device *dev,
2145                             enum rk_iommu_inttype itype,
2146                             unsigned long pgtable_base,
2147                             unsigned long fault_addr, unsigned int status)
2148 {
2149         struct platform_device *pdev;
2150         struct vpu_service_info *pservice;
2151         struct vpu_subdev_data *data;
2152
2153         vpu_debug_enter();
2154
2155         if (dev == NULL) {
2156                 pr_err("invalid NULL dev\n");
2157                 return 0;
2158         }
2159
2160         pdev = container_of(dev, struct platform_device, dev);
2161         if (pdev == NULL) {
2162                 pr_err("invalid NULL platform_device\n");
2163                 return 0;
2164         }
2165
2166         data = platform_get_drvdata(pdev);
2167         if (data == NULL) {
2168                 pr_err("invalid NULL vpu_subdev_data\n");
2169                 return 0;
2170         }
2171
2172         pservice = data->pservice;
2173         if (pservice == NULL) {
2174                 pr_err("invalid NULL vpu_service_info\n");
2175                 return 0;
2176         }
2177
2178         if (pservice->reg_codec) {
2179                 struct vpu_reg *reg = pservice->reg_codec;
2180                 struct vcodec_mem_region *mem, *n;
2181                 int i = 0;
2182
2183                 pr_err("vcodec, fault addr 0x%08lx\n", fault_addr);
2184                 if (!list_empty(&reg->mem_region_list)) {
2185                         list_for_each_entry_safe(mem, n, &reg->mem_region_list,
2186                                                  reg_lnk) {
2187                                 pr_err("vcodec, reg[%02u] mem region [%02d] 0x%lx %lx\n",
2188                                        mem->reg_idx, i, mem->iova, mem->len);
2189                                 i++;
2190                         }
2191                 } else {
2192                         pr_err("no memory region mapped\n");
2193                 }
2194
2195                 if (reg->data) {
2196                         struct vpu_subdev_data *data = reg->data;
2197                         u32 *base = (u32 *)data->dec_dev.regs;
2198                         u32 len = data->hw_info->dec_reg_num;
2199
2200                         pr_err("current errror register set:\n");
2201
2202                         for (i = 0; i < len; i++)
2203                                 pr_err("reg[%02d] %08x\n",
2204                                        i, readl_relaxed(base + i));
2205                 }
2206
2207                 pr_alert("vcodec, page fault occur, reset hw\n");
2208
2209                 /* reg->reg[101] = 1; */
2210                 vpu_reset(data);
2211         }
2212
2213         return 0;
2214 }
2215
2216 static int vcodec_subdev_probe(struct platform_device *pdev,
2217                                struct vpu_service_info *pservice)
2218 {
2219         int ret = 0;
2220         struct resource *res = NULL;
2221         u32 ioaddr = 0;
2222         u8 *regs = NULL;
2223         struct vpu_hw_info *hw_info = NULL;
2224         struct device *dev = &pdev->dev;
2225         char *name = (char *)dev_name(dev);
2226         struct device_node *np = pdev->dev.of_node;
2227         struct vpu_subdev_data *data =
2228                 devm_kzalloc(dev, sizeof(struct vpu_subdev_data), GFP_KERNEL);
2229         u32 iommu_en = 0;
2230         char mmu_dev_dts_name[40];
2231
2232         of_property_read_u32(np, "iommu_enabled", &iommu_en);
2233
2234         pr_info("probe device %s\n", dev_name(dev));
2235
2236         data->pservice = pservice;
2237         data->dev = dev;
2238
2239         of_property_read_string(np, "name", (const char **)&name);
2240         of_property_read_u32(np, "dev_mode", (u32 *)&data->mode);
2241
2242         if (pservice->reg_base == 0) {
2243                 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2244                 data->regs = devm_ioremap_resource(dev, res);
2245                 if (IS_ERR(data->regs)) {
2246                         ret = PTR_ERR(data->regs);
2247                         goto err;
2248                 }
2249                 ioaddr = res->start;
2250         } else {
2251                 data->regs = pservice->reg_base;
2252                 ioaddr = pservice->ioaddr;
2253         }
2254
2255         clear_bit(MMU_ACTIVATED, &data->state);
2256         vcodec_enter_mode(data);
2257
2258         vpu_service_power_on(pservice);
2259         ret = vpu_service_check_hw(data);
2260         if (ret < 0) {
2261                 vpu_err("error: hw info check faild\n");
2262                 goto err;
2263         }
2264
2265         hw_info = data->hw_info;
2266         regs = (u8 *)data->regs;
2267
2268         if (hw_info->dec_reg_num) {
2269                 data->dec_dev.iosize = hw_info->dec_io_size;
2270                 data->dec_dev.regs = (u32 *)(regs + hw_info->dec_offset);
2271         }
2272
2273         if (hw_info->enc_reg_num) {
2274                 data->enc_dev.iosize = hw_info->enc_io_size;
2275                 data->enc_dev.regs = (u32 *)(regs + hw_info->enc_offset);
2276         }
2277
2278         data->reg_size = max(hw_info->dec_io_size, hw_info->enc_io_size);
2279
2280         data->irq_enc = platform_get_irq_byname(pdev, "irq_enc");
2281         if (data->irq_enc > 0) {
2282                 ret = devm_request_threaded_irq(dev, data->irq_enc,
2283                                                 vepu_irq, vepu_isr,
2284                                                 IRQF_SHARED, dev_name(dev),
2285                                                 (void *)data);
2286                 if (ret) {
2287                         dev_err(dev, "error: can't request vepu irq %d\n",
2288                                 data->irq_enc);
2289                         goto err;
2290                 }
2291         }
2292         data->irq_dec = platform_get_irq_byname(pdev, "irq_dec");
2293         if (data->irq_dec > 0) {
2294                 ret = devm_request_threaded_irq(dev, data->irq_dec,
2295                                                 vdpu_irq, vdpu_isr,
2296                                                 IRQF_SHARED, dev_name(dev),
2297                                                 (void *)data);
2298                 if (ret) {
2299                         dev_err(dev, "error: can't request vdpu irq %d\n",
2300                                 data->irq_dec);
2301                         goto err;
2302                 }
2303         }
2304         atomic_set(&data->dec_dev.irq_count_codec, 0);
2305         atomic_set(&data->dec_dev.irq_count_pp, 0);
2306         atomic_set(&data->enc_dev.irq_count_codec, 0);
2307         atomic_set(&data->enc_dev.irq_count_pp, 0);
2308
2309         if (iommu_en) {
2310                 if (data->mode == VCODEC_RUNNING_MODE_HEVC)
2311                         sprintf(mmu_dev_dts_name,
2312                                 HEVC_IOMMU_COMPATIBLE_NAME);
2313                 else if (data->mode == VCODEC_RUNNING_MODE_VPU)
2314                         sprintf(mmu_dev_dts_name,
2315                                 VPU_IOMMU_COMPATIBLE_NAME);
2316                 else if (data->mode == VCODEC_RUNNING_MODE_RKVDEC)
2317                         sprintf(mmu_dev_dts_name, VDEC_IOMMU_COMPATIBLE_NAME);
2318                 else
2319                         sprintf(mmu_dev_dts_name,
2320                                 HEVC_IOMMU_COMPATIBLE_NAME);
2321
2322                 data->mmu_dev =
2323                         rockchip_get_sysmmu_dev(mmu_dev_dts_name);
2324
2325                 if (data->mmu_dev)
2326                         platform_set_sysmmu(data->mmu_dev, dev);
2327
2328                 rockchip_iovmm_set_fault_handler(dev, vcodec_sysmmu_fault_hdl);
2329         }
2330
2331         get_hw_info(data);
2332         pservice->auto_freq = true;
2333
2334         vcodec_exit_mode(data);
2335         /* create device node */
2336         ret = alloc_chrdev_region(&data->dev_t, 0, 1, name);
2337         if (ret) {
2338                 dev_err(dev, "alloc dev_t failed\n");
2339                 goto err;
2340         }
2341
2342         cdev_init(&data->cdev, &vpu_service_fops);
2343
2344         data->cdev.owner = THIS_MODULE;
2345         data->cdev.ops = &vpu_service_fops;
2346
2347         ret = cdev_add(&data->cdev, data->dev_t, 1);
2348
2349         if (ret) {
2350                 dev_err(dev, "add dev_t failed\n");
2351                 goto err;
2352         }
2353
2354         data->cls = class_create(THIS_MODULE, name);
2355
2356         if (IS_ERR(data->cls)) {
2357                 ret = PTR_ERR(data->cls);
2358                 dev_err(dev, "class_create err:%d\n", ret);
2359                 goto err;
2360         }
2361
2362         data->child_dev = device_create(data->cls, dev,
2363                 data->dev_t, "%s", name);
2364
2365         platform_set_drvdata(pdev, data);
2366
2367         INIT_LIST_HEAD(&data->lnk_service);
2368         list_add_tail(&data->lnk_service, &pservice->subdev_list);
2369
2370 #ifdef CONFIG_DEBUG_FS
2371         data->debugfs_dir = vcodec_debugfs_create_device_dir(name, parent);
2372         if (!IS_ERR_OR_NULL(data->debugfs_dir))
2373                 data->debugfs_file_regs =
2374                         debugfs_create_file("regs", 0664, data->debugfs_dir,
2375                                         data, &debug_vcodec_fops);
2376         else
2377                 vpu_err("create debugfs dir %s failed\n", name);
2378 #endif
2379         return 0;
2380 err:
2381         if (data->child_dev) {
2382                 device_destroy(data->cls, data->dev_t);
2383                 cdev_del(&data->cdev);
2384                 unregister_chrdev_region(data->dev_t, 1);
2385         }
2386
2387         if (data->cls)
2388                 class_destroy(data->cls);
2389         return -1;
2390 }
2391
2392 static void vcodec_subdev_remove(struct vpu_subdev_data *data)
2393 {
2394         struct vpu_service_info *pservice = data->pservice;
2395
2396         mutex_lock(&pservice->lock);
2397         cancel_delayed_work_sync(&pservice->power_off_work);
2398         vpu_service_power_off(pservice);
2399         mutex_unlock(&pservice->lock);
2400
2401         device_destroy(data->cls, data->dev_t);
2402         class_destroy(data->cls);
2403         cdev_del(&data->cdev);
2404         unregister_chrdev_region(data->dev_t, 1);
2405
2406 #ifdef CONFIG_DEBUG_FS
2407         if (!IS_ERR_OR_NULL(data->debugfs_dir))
2408                 debugfs_remove_recursive(data->debugfs_dir);
2409 #endif
2410 }
2411
2412 static void vcodec_read_property(struct device_node *np,
2413                                  struct vpu_service_info *pservice)
2414 {
2415         pservice->mode_bit = 0;
2416         pservice->mode_ctrl = 0;
2417         pservice->subcnt = 0;
2418         pservice->grf_base = NULL;
2419
2420         of_property_read_u32(np, "subcnt", &pservice->subcnt);
2421
2422         if (pservice->subcnt > 1) {
2423                 of_property_read_u32(np, "mode_bit", &pservice->mode_bit);
2424                 of_property_read_u32(np, "mode_ctrl", &pservice->mode_ctrl);
2425         }
2426 #ifdef CONFIG_MFD_SYSCON
2427         pservice->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
2428         if (IS_ERR_OR_NULL(pservice->grf)) {
2429                 pservice->grf = NULL;
2430 #ifdef CONFIG_ARM
2431                 pservice->grf_base = RK_GRF_VIRT;
2432 #else
2433                 vpu_err("can't find vpu grf property\n");
2434                 return;
2435 #endif
2436         }
2437 #else
2438 #ifdef CONFIG_ARM
2439         pservice->grf_base = RK_GRF_VIRT;
2440 #else
2441         vpu_err("can't find vpu grf property\n");
2442         return;
2443 #endif
2444 #endif
2445
2446 #ifdef CONFIG_RESET_CONTROLLER
2447         pservice->rst_a = devm_reset_control_get(pservice->dev, "video_a");
2448         pservice->rst_h = devm_reset_control_get(pservice->dev, "video_h");
2449         pservice->rst_v = devm_reset_control_get(pservice->dev, "video");
2450
2451         if (IS_ERR_OR_NULL(pservice->rst_a)) {
2452                 pr_warn("No aclk reset resource define\n");
2453                 pservice->rst_a = NULL;
2454         }
2455
2456         if (IS_ERR_OR_NULL(pservice->rst_h)) {
2457                 pr_warn("No hclk reset resource define\n");
2458                 pservice->rst_h = NULL;
2459         }
2460
2461         if (IS_ERR_OR_NULL(pservice->rst_v)) {
2462                 pr_warn("No core reset resource define\n");
2463                 pservice->rst_v = NULL;
2464         }
2465 #endif
2466
2467         of_property_read_string(np, "name", (const char **)&pservice->name);
2468 }
2469
2470 static void vcodec_init_drvdata(struct vpu_service_info *pservice)
2471 {
2472         pservice->dev_id = VCODEC_DEVICE_ID_VPU;
2473         pservice->curr_mode = -1;
2474
2475         wake_lock_init(&pservice->wake_lock, WAKE_LOCK_SUSPEND, "vpu");
2476         INIT_LIST_HEAD(&pservice->waiting);
2477         INIT_LIST_HEAD(&pservice->running);
2478         mutex_init(&pservice->lock);
2479         mutex_init(&pservice->shutdown_lock);
2480         atomic_set(&pservice->service_on, 1);
2481
2482         INIT_LIST_HEAD(&pservice->done);
2483         INIT_LIST_HEAD(&pservice->session);
2484         INIT_LIST_HEAD(&pservice->subdev_list);
2485
2486         pservice->reg_pproc     = NULL;
2487         atomic_set(&pservice->total_running, 0);
2488         atomic_set(&pservice->enabled,       0);
2489         atomic_set(&pservice->power_on_cnt,  0);
2490         atomic_set(&pservice->power_off_cnt, 0);
2491         atomic_set(&pservice->reset_request, 0);
2492
2493         INIT_DELAYED_WORK(&pservice->power_off_work, vpu_power_off_work);
2494         pservice->last.tv64 = 0;
2495
2496         pservice->ion_client = rockchip_ion_client_create("vpu");
2497         if (IS_ERR(pservice->ion_client)) {
2498                 vpu_err("failed to create ion client for vcodec ret %ld\n",
2499                         PTR_ERR(pservice->ion_client));
2500         } else {
2501                 vpu_debug(DEBUG_IOMMU, "vcodec ion client create success!\n");
2502         }
2503 }
2504
2505 static int vcodec_probe(struct platform_device *pdev)
2506 {
2507         int i;
2508         int ret = 0;
2509         struct resource *res = NULL;
2510         struct device *dev = &pdev->dev;
2511         struct device_node *np = pdev->dev.of_node;
2512         struct vpu_service_info *pservice =
2513                 devm_kzalloc(dev, sizeof(struct vpu_service_info), GFP_KERNEL);
2514
2515         pservice->dev = dev;
2516
2517         vcodec_read_property(np, pservice);
2518         vcodec_init_drvdata(pservice);
2519
2520         if (strncmp(pservice->name, "hevc_service", 12) == 0)
2521                 pservice->dev_id = VCODEC_DEVICE_ID_HEVC;
2522         else if (strncmp(pservice->name, "vpu_service", 11) == 0)
2523                 pservice->dev_id = VCODEC_DEVICE_ID_VPU;
2524         else if (strncmp(pservice->name, "rkvdec", 6) == 0)
2525                 pservice->dev_id = VCODEC_DEVICE_ID_RKVDEC;
2526         else
2527                 pservice->dev_id = VCODEC_DEVICE_ID_COMBO;
2528
2529         if (0 > vpu_get_clk(pservice))
2530                 goto err;
2531
2532         if (of_property_read_bool(np, "reg")) {
2533                 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2534
2535                 pservice->reg_base = devm_ioremap_resource(pservice->dev, res);
2536                 if (IS_ERR(pservice->reg_base)) {
2537                         vpu_err("ioremap registers base failed\n");
2538                         ret = PTR_ERR(pservice->reg_base);
2539                         goto err;
2540                 }
2541                 pservice->ioaddr = res->start;
2542         } else {
2543                 pservice->reg_base = 0;
2544         }
2545
2546         pm_runtime_enable(dev);
2547
2548         if (of_property_read_bool(np, "subcnt")) {
2549                 for (i = 0; i < pservice->subcnt; i++) {
2550                         struct device_node *sub_np;
2551                         struct platform_device *sub_pdev;
2552
2553                         sub_np = of_parse_phandle(np, "rockchip,sub", i);
2554                         sub_pdev = of_find_device_by_node(sub_np);
2555
2556                         vcodec_subdev_probe(sub_pdev, pservice);
2557                 }
2558         } else {
2559                 vcodec_subdev_probe(pdev, pservice);
2560         }
2561
2562         vpu_service_power_off(pservice);
2563
2564         pr_info("init success\n");
2565
2566         return 0;
2567
2568 err:
2569         pr_info("init failed\n");
2570         vpu_service_power_off(pservice);
2571         wake_lock_destroy(&pservice->wake_lock);
2572
2573         return ret;
2574 }
2575
2576 static int vcodec_remove(struct platform_device *pdev)
2577 {
2578         struct vpu_subdev_data *data = platform_get_drvdata(pdev);
2579
2580         vcodec_subdev_remove(data);
2581
2582         pm_runtime_disable(data->pservice->dev);
2583
2584         return 0;
2585 }
2586
2587 static void vcodec_shutdown(struct platform_device *pdev)
2588 {
2589         struct vpu_subdev_data *data = platform_get_drvdata(pdev);
2590         struct vpu_service_info *pservice = data->pservice;
2591
2592         dev_info(&pdev->dev, "%s IN\n", __func__);
2593
2594         mutex_lock(&pservice->shutdown_lock);
2595         atomic_set(&pservice->service_on, 0);
2596         mutex_unlock(&pservice->shutdown_lock);
2597
2598         vcodec_exit_mode(data);
2599
2600         vpu_service_clear(data);
2601         vcodec_subdev_remove(data);
2602
2603         pm_runtime_disable(&pdev->dev);
2604 }
2605
2606 #if defined(CONFIG_OF)
2607 static const struct of_device_id vcodec_service_dt_ids[] = {
2608         {.compatible = "rockchip,vpu_service",},
2609         {.compatible = "rockchip,hevc_service",},
2610         {.compatible = "rockchip,vpu_combo",},
2611         {.compatible = "rockchip,rkvdec",},
2612         {},
2613 };
2614 #endif
2615
2616 static struct platform_driver vcodec_driver = {
2617         .probe = vcodec_probe,
2618         .remove = vcodec_remove,
2619         .shutdown = vcodec_shutdown,
2620         .driver = {
2621                 .name = "vcodec",
2622                 .owner = THIS_MODULE,
2623 #if defined(CONFIG_OF)
2624                 .of_match_table = of_match_ptr(vcodec_service_dt_ids),
2625 #endif
2626         },
2627 };
2628
2629 static void get_hw_info(struct vpu_subdev_data *data)
2630 {
2631         struct vpu_service_info *pservice = data->pservice;
2632         struct vpu_dec_config *dec = &pservice->dec_config;
2633         struct vpu_enc_config *enc = &pservice->enc_config;
2634
2635         if (of_machine_is_compatible("rockchip,rk2928") ||
2636                         of_machine_is_compatible("rockchip,rk3036") ||
2637                         of_machine_is_compatible("rockchip,rk3066") ||
2638                         of_machine_is_compatible("rockchip,rk3126") ||
2639                         of_machine_is_compatible("rockchip,rk3188"))
2640                 dec->max_dec_pic_width = 1920;
2641         else
2642                 dec->max_dec_pic_width = 4096;
2643
2644         if (data->mode == VCODEC_RUNNING_MODE_VPU) {
2645                 dec->h264_support = 3;
2646                 dec->jpeg_support = 1;
2647                 dec->mpeg4_support = 2;
2648                 dec->vc1_support = 3;
2649                 dec->mpeg2_support = 1;
2650                 dec->pp_support = 1;
2651                 dec->sorenson_support = 1;
2652                 dec->ref_buf_support = 3;
2653                 dec->vp6_support = 1;
2654                 dec->vp7_support = 1;
2655                 dec->vp8_support = 1;
2656                 dec->avs_support = 1;
2657                 dec->jpeg_ext_support = 0;
2658                 dec->custom_mpeg4_support = 1;
2659                 dec->reserve = 0;
2660                 dec->mvc_support = 1;
2661
2662                 if (!of_machine_is_compatible("rockchip,rk3036")) {
2663                         u32 config_reg = readl_relaxed(data->enc_dev.regs + 63);
2664
2665                         enc->max_encoded_width = config_reg & ((1 << 11) - 1);
2666                         enc->h264_enabled = 1;
2667                         enc->mpeg4_enabled = (config_reg >> 26) & 1;
2668                         enc->jpeg_enabled = 1;
2669                         enc->vs_enabled = (config_reg >> 24) & 1;
2670                         enc->rgb_enabled = (config_reg >> 28) & 1;
2671                         enc->reg_size = data->reg_size;
2672                         enc->reserv[0] = 0;
2673                         enc->reserv[1] = 0;
2674                 }
2675
2676                 pservice->auto_freq = true;
2677                 vpu_debug(DEBUG_EXTRA_INFO, "vpu_service set to auto frequency mode\n");
2678                 atomic_set(&pservice->freq_status, VPU_FREQ_BUT);
2679
2680                 pservice->bug_dec_addr = of_machine_is_compatible
2681                         ("rockchip,rk30xx");
2682         } else if (data->mode == VCODEC_RUNNING_MODE_RKVDEC) {
2683                 pservice->auto_freq = true;
2684                 atomic_set(&pservice->freq_status, VPU_FREQ_BUT);
2685         } else {
2686                 /* disable frequency switch in hevc.*/
2687                 pservice->auto_freq = false;
2688         }
2689 }
2690
2691 static bool check_irq_err(struct vpu_task_info *task, u32 irq_status)
2692 {
2693         vpu_debug(DEBUG_IRQ_CHECK, "task %s status %08x mask %08x\n",
2694                   task->name, irq_status, task->error_mask);
2695
2696         return (task->error_mask & irq_status) ? true : false;
2697 }
2698
2699 static irqreturn_t vdpu_irq(int irq, void *dev_id)
2700 {
2701         struct vpu_subdev_data *data = (struct vpu_subdev_data *)dev_id;
2702         struct vpu_service_info *pservice = data->pservice;
2703         struct vpu_task_info *task = NULL;
2704         struct vpu_device *dev = &data->dec_dev;
2705         u32 hw_id = data->hw_info->hw_id;
2706         u32 raw_status;
2707         u32 dec_status;
2708
2709         task = &data->task_info[TASK_DEC];
2710
2711         raw_status = readl_relaxed(dev->regs + task->reg_irq);
2712         dec_status = raw_status;
2713
2714         vpu_debug(DEBUG_TASK_INFO, "vdpu_irq reg %d status %x mask: irq %x ready %x error %0x\n",
2715                   task->reg_irq, dec_status,
2716                   task->irq_mask, task->ready_mask, task->error_mask);
2717
2718         if (dec_status & task->irq_mask) {
2719                 time_record(task, 1);
2720                 vpu_debug(DEBUG_IRQ_STATUS, "vdpu_irq dec status %08x\n",
2721                           dec_status);
2722                 if ((dec_status & 0x40001) == 0x40001) {
2723                         do {
2724                                 dec_status =
2725                                         readl_relaxed(dev->regs +
2726                                                 task->reg_irq);
2727                         } while ((dec_status & 0x40001) == 0x40001);
2728                 }
2729
2730                 if (check_irq_err(task, dec_status))
2731                         atomic_add(1, &pservice->reset_request);
2732
2733                 writel_relaxed(0, dev->regs + task->reg_irq);
2734
2735                 /*
2736                  * NOTE: rkvdec need to reset after each task to avoid timeout
2737                  *       error on H.264 switch to H.265
2738                  */
2739                 if (data->mode == VCODEC_RUNNING_MODE_RKVDEC)
2740                         writel(0x100000, dev->regs + task->reg_irq);
2741
2742                 /* set clock gating to save power */
2743                 writel(task->gating_mask, dev->regs + task->reg_en);
2744
2745                 atomic_add(1, &dev->irq_count_codec);
2746                 time_diff(task);
2747         }
2748
2749         task = &data->task_info[TASK_PP];
2750         if (hw_id != HEVC_ID && hw_id != RKV_DEC_ID) {
2751                 u32 pp_status = readl_relaxed(dev->regs + task->irq_mask);
2752
2753                 if (pp_status & task->irq_mask) {
2754                         time_record(task, 1);
2755                         vpu_debug(DEBUG_IRQ_STATUS, "vdpu_irq pp status %08x\n",
2756                                   pp_status);
2757
2758                         if (check_irq_err(task, dec_status))
2759                                 atomic_add(1, &pservice->reset_request);
2760
2761                         /* clear pp IRQ */
2762                         writel_relaxed(pp_status & (~task->reg_irq),
2763                                        dev->regs + task->irq_mask);
2764                         atomic_add(1, &dev->irq_count_pp);
2765                         time_diff(task);
2766                 }
2767         }
2768
2769         pservice->irq_status = raw_status;
2770
2771         if (atomic_read(&dev->irq_count_pp) ||
2772             atomic_read(&dev->irq_count_codec))
2773                 return IRQ_WAKE_THREAD;
2774         else
2775                 return IRQ_NONE;
2776 }
2777
2778 static irqreturn_t vdpu_isr(int irq, void *dev_id)
2779 {
2780         struct vpu_subdev_data *data = (struct vpu_subdev_data *)dev_id;
2781         struct vpu_service_info *pservice = data->pservice;
2782         struct vpu_device *dev = &data->dec_dev;
2783
2784         mutex_lock(&pservice->lock);
2785         if (atomic_read(&dev->irq_count_codec)) {
2786                 atomic_sub(1, &dev->irq_count_codec);
2787                 if (pservice->reg_codec == NULL) {
2788                         vpu_err("error: dec isr with no task waiting\n");
2789                 } else {
2790                         reg_from_run_to_done(data, pservice->reg_codec);
2791                         /* avoid vpu timeout and can't recover problem */
2792                         VDPU_SOFT_RESET(data->regs);
2793                 }
2794         }
2795
2796         if (atomic_read(&dev->irq_count_pp)) {
2797                 atomic_sub(1, &dev->irq_count_pp);
2798                 if (pservice->reg_pproc == NULL)
2799                         vpu_err("error: pp isr with no task waiting\n");
2800                 else
2801                         reg_from_run_to_done(data, pservice->reg_pproc);
2802         }
2803         try_set_reg(data);
2804         mutex_unlock(&pservice->lock);
2805         return IRQ_HANDLED;
2806 }
2807
2808 static irqreturn_t vepu_irq(int irq, void *dev_id)
2809 {
2810         struct vpu_subdev_data *data = (struct vpu_subdev_data *)dev_id;
2811         struct vpu_service_info *pservice = data->pservice;
2812         struct vpu_task_info *task = &data->task_info[TASK_ENC];
2813         struct vpu_device *dev = &data->enc_dev;
2814         u32 irq_status;
2815
2816         irq_status = readl_relaxed(dev->regs + task->reg_irq);
2817
2818         vpu_debug(DEBUG_TASK_INFO, "vepu_irq reg %d status %x mask: irq %x ready %x error %0x\n",
2819                   task->reg_irq, irq_status,
2820                   task->irq_mask, task->ready_mask, task->error_mask);
2821
2822         vpu_debug(DEBUG_IRQ_STATUS, "vepu_irq enc status %08x\n", irq_status);
2823
2824         if (likely(irq_status & task->irq_mask)) {
2825                 time_record(task, 1);
2826
2827                 if (check_irq_err(task, irq_status))
2828                         atomic_add(1, &pservice->reset_request);
2829
2830                 /* clear enc IRQ */
2831                 writel_relaxed(irq_status & (~task->irq_mask),
2832                                dev->regs + task->reg_irq);
2833
2834                 atomic_add(1, &dev->irq_count_codec);
2835                 time_diff(task);
2836         }
2837
2838         pservice->irq_status = irq_status;
2839
2840         if (atomic_read(&dev->irq_count_codec))
2841                 return IRQ_WAKE_THREAD;
2842         else
2843                 return IRQ_NONE;
2844 }
2845
2846 static irqreturn_t vepu_isr(int irq, void *dev_id)
2847 {
2848         struct vpu_subdev_data *data = (struct vpu_subdev_data *)dev_id;
2849         struct vpu_service_info *pservice = data->pservice;
2850         struct vpu_device *dev = &data->enc_dev;
2851
2852         mutex_lock(&pservice->lock);
2853         if (atomic_read(&dev->irq_count_codec)) {
2854                 atomic_sub(1, &dev->irq_count_codec);
2855                 if (NULL == pservice->reg_codec)
2856                         vpu_err("error: enc isr with no task waiting\n");
2857                 else
2858                         reg_from_run_to_done(data, pservice->reg_codec);
2859         }
2860         try_set_reg(data);
2861         mutex_unlock(&pservice->lock);
2862         return IRQ_HANDLED;
2863 }
2864
2865 static int __init vcodec_service_init(void)
2866 {
2867         int ret = platform_driver_register(&vcodec_driver);
2868
2869         if (ret) {
2870                 vpu_err("Platform device register failed (%d).\n", ret);
2871                 return ret;
2872         }
2873
2874 #ifdef CONFIG_DEBUG_FS
2875         vcodec_debugfs_init();
2876 #endif
2877
2878         return ret;
2879 }
2880
2881 static void __exit vcodec_service_exit(void)
2882 {
2883 #ifdef CONFIG_DEBUG_FS
2884         vcodec_debugfs_exit();
2885 #endif
2886
2887         platform_driver_unregister(&vcodec_driver);
2888 }
2889
2890 module_init(vcodec_service_init);
2891 module_exit(vcodec_service_exit);
2892 MODULE_LICENSE("GPL v2");
2893
2894 #ifdef CONFIG_DEBUG_FS
2895 #include <linux/seq_file.h>
2896
2897 static int vcodec_debugfs_init(void)
2898 {
2899         parent = debugfs_create_dir("vcodec", NULL);
2900         if (!parent)
2901                 return -1;
2902
2903         return 0;
2904 }
2905
2906 static void vcodec_debugfs_exit(void)
2907 {
2908         debugfs_remove(parent);
2909 }
2910
2911 static struct dentry *vcodec_debugfs_create_device_dir(
2912                 char *dirname, struct dentry *parent)
2913 {
2914         return debugfs_create_dir(dirname, parent);
2915 }
2916
2917 static int debug_vcodec_show(struct seq_file *s, void *unused)
2918 {
2919         struct vpu_subdev_data *data = s->private;
2920         struct vpu_service_info *pservice = data->pservice;
2921         unsigned int i, n;
2922         struct vpu_reg *reg, *reg_tmp;
2923         struct vpu_session *session, *session_tmp;
2924
2925         mutex_lock(&pservice->lock);
2926         vpu_service_power_on(pservice);
2927         if (data->hw_info->hw_id != HEVC_ID) {
2928                 seq_puts(s, "\nENC Registers:\n");
2929                 n = data->enc_dev.iosize >> 2;
2930
2931                 for (i = 0; i < n; i++)
2932                         seq_printf(s, "\tswreg%d = %08X\n", i,
2933                                    readl_relaxed(data->enc_dev.regs + i));
2934         }
2935
2936         seq_puts(s, "\nDEC Registers:\n");
2937
2938         n = data->dec_dev.iosize >> 2;
2939         for (i = 0; i < n; i++)
2940                 seq_printf(s, "\tswreg%d = %08X\n", i,
2941                            readl_relaxed(data->dec_dev.regs + i));
2942
2943         seq_puts(s, "\nvpu service status:\n");
2944
2945         list_for_each_entry_safe(session, session_tmp,
2946                                  &pservice->session, list_session) {
2947                 seq_printf(s, "session pid %d type %d:\n",
2948                            session->pid, session->type);
2949
2950                 list_for_each_entry_safe(reg, reg_tmp,
2951                                          &session->waiting, session_link) {
2952                         seq_printf(s, "waiting register set %p\n", reg);
2953                 }
2954                 list_for_each_entry_safe(reg, reg_tmp,
2955                                          &session->running, session_link) {
2956                         seq_printf(s, "running register set %p\n", reg);
2957                 }
2958                 list_for_each_entry_safe(reg, reg_tmp,
2959                                          &session->done, session_link) {
2960                         seq_printf(s, "done    register set %p\n", reg);
2961                 }
2962         }
2963
2964         seq_printf(s, "\npower counter: on %d off %d\n",
2965                    atomic_read(&pservice->power_on_cnt),
2966                    atomic_read(&pservice->power_off_cnt));
2967
2968         mutex_unlock(&pservice->lock);
2969         vpu_service_power_off(pservice);
2970
2971         return 0;
2972 }
2973
2974 static int debug_vcodec_open(struct inode *inode, struct file *file)
2975 {
2976         return single_open(file, debug_vcodec_show, inode->i_private);
2977 }
2978
2979 #endif
2980