Merge tag 'lsk-v4.4-16.05-android'
[firefly-linux-kernel-4.4.55.git] / drivers / video / rockchip / vcodec / vcodec_service.c
1 /**
2  * Copyright (C) 2015 Fuzhou Rockchip Electronics Co., Ltd
3  * author: chenhengming chm@rock-chips.com
4  *         Alpha Lin, alpha.lin@rock-chips.com
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include <linux/clk.h>
20 #include <linux/compat.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/module.h>
25 #include <linux/fs.h>
26 #include <linux/mm.h>
27 #include <linux/platform_device.h>
28 #include <linux/reset.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31 #include <linux/wakelock.h>
32 #include <linux/cdev.h>
33 #include <linux/of.h>
34 #include <linux/of_platform.h>
35 #include <linux/of_irq.h>
36 #include <linux/regmap.h>
37 #include <linux/mfd/syscon.h>
38 #include <linux/uaccess.h>
39 #include <linux/debugfs.h>
40
41 #include <linux/rockchip/cpu.h>
42 #include <linux/rockchip/cru.h>
43 #include <linux/rockchip/pmu.h>
44 #include <linux/rockchip/grf.h>
45
46 #if defined(CONFIG_ION_ROCKCHIP)
47 #include <linux/rockchip_ion.h>
48 #endif
49
50 #include <linux/rockchip-iovmm.h>
51 #include <linux/dma-buf.h>
52
53 #include "vcodec_hw_info.h"
54 #include "vcodec_hw_vpu.h"
55 #include "vcodec_hw_rkv.h"
56 #include "vcodec_hw_vpu2.h"
57
58 #include "vcodec_service.h"
59
60 /*
61  * debug flag usage:
62  * +------+-------------------+
63  * | 8bit |      24bit        |
64  * +------+-------------------+
65  *  0~23 bit is for different information type
66  * 24~31 bit is for information print format
67  */
68
69 #define DEBUG_POWER                             0x00000001
70 #define DEBUG_CLOCK                             0x00000002
71 #define DEBUG_IRQ_STATUS                        0x00000004
72 #define DEBUG_IOMMU                             0x00000008
73 #define DEBUG_IOCTL                             0x00000010
74 #define DEBUG_FUNCTION                          0x00000020
75 #define DEBUG_REGISTER                          0x00000040
76 #define DEBUG_EXTRA_INFO                        0x00000080
77 #define DEBUG_TIMING                            0x00000100
78 #define DEBUG_TASK_INFO                         0x00000200
79
80 #define DEBUG_SET_REG                           0x00001000
81 #define DEBUG_GET_REG                           0x00002000
82 #define DEBUG_PPS_FILL                          0x00004000
83 #define DEBUG_IRQ_CHECK                         0x00008000
84 #define DEBUG_CACHE_32B                         0x00010000
85
86 #define PRINT_FUNCTION                          0x80000000
87 #define PRINT_LINE                              0x40000000
88
89 static int debug;
90 module_param(debug, int, S_IRUGO | S_IWUSR);
91 MODULE_PARM_DESC(debug, "bit switch for vcodec_service debug information");
92
93 #define VCODEC_CLOCK_ENABLE     1
94
95 /*
96  * hardware information organization
97  *
98  * In order to support multiple hardware with different version the hardware
99  * information is organized as follow:
100  *
101  * 1. First, index hardware by register size / position.
102  *    These information is fix for each hardware and do not relate to runtime
103  *    work flow. It only related to resource allocation.
104  *    Descriptor: struct vpu_hw_info
105  *
106  * 2. Then, index hardware by runtime configuration
107  *    These information is related to runtime setting behave including enable
108  *    register, irq register and other key control flag
109  *    Descriptor: struct vpu_task_info
110  *
111  * 3. Final, on iommu case the fd translation is required
112  *    Descriptor: struct vpu_trans_info
113  */
114
115 enum VPU_FREQ {
116         VPU_FREQ_200M,
117         VPU_FREQ_266M,
118         VPU_FREQ_300M,
119         VPU_FREQ_400M,
120         VPU_FREQ_500M,
121         VPU_FREQ_600M,
122         VPU_FREQ_DEFAULT,
123         VPU_FREQ_BUT,
124 };
125
126 struct extra_info_elem {
127         u32 index;
128         u32 offset;
129 };
130
131 #define EXTRA_INFO_MAGIC        0x4C4A46
132
133 struct extra_info_for_iommu {
134         u32 magic;
135         u32 cnt;
136         struct extra_info_elem elem[20];
137 };
138
139 #define MHZ                                     (1000*1000)
140 #define SIZE_REG(reg)                           ((reg)*4)
141
142 static struct vcodec_info vcodec_info_set[] = {
143         [0] = {
144                 .hw_id          = VPU_ID_8270,
145                 .hw_info        = &hw_vpu_8270,
146                 .task_info      = task_vpu,
147                 .trans_info     = trans_vpu,
148         },
149         [1] = {
150                 .hw_id          = VPU_ID_4831,
151                 .hw_info        = &hw_vpu_4831,
152                 .task_info      = task_vpu,
153                 .trans_info     = trans_vpu,
154         },
155         [2] = {
156                 .hw_id          = VPU_DEC_ID_9190,
157                 .hw_info        = &hw_vpu_9190,
158                 .task_info      = task_vpu,
159                 .trans_info     = trans_vpu,
160         },
161         [3] = {
162                 .hw_id          = HEVC_ID,
163                 .hw_info        = &hw_rkhevc,
164                 .task_info      = task_rkv,
165                 .trans_info     = trans_rkv,
166         },
167         [4] = {
168                 .hw_id          = RKV_DEC_ID,
169                 .hw_info        = &hw_rkvdec,
170                 .task_info      = task_rkv,
171                 .trans_info     = trans_rkv,
172         },
173         [5] = {
174                 .hw_id          = VPU2_ID,
175                 .hw_info        = &hw_vpu2,
176                 .task_info      = task_vpu2,
177                 .trans_info     = trans_vpu2,
178         },
179 };
180
181 #define DEBUG
182 #ifdef DEBUG
183 #define vpu_debug_func(type, fmt, args...)                      \
184         do {                                                    \
185                 if (unlikely(debug & type)) {                   \
186                         pr_info("%s:%d: " fmt,                  \
187                                  __func__, __LINE__, ##args);   \
188                 }                                               \
189         } while (0)
190 #define vpu_debug(type, fmt, args...)                           \
191         do {                                                    \
192                 if (unlikely(debug & type)) {                   \
193                         pr_info(fmt, ##args);                   \
194                 }                                               \
195         } while (0)
196 #else
197 #define vpu_debug_func(level, fmt, args...)
198 #define vpu_debug(level, fmt, args...)
199 #endif
200
201 #define vpu_debug_enter() vpu_debug_func(DEBUG_FUNCTION, "enter\n")
202 #define vpu_debug_leave() vpu_debug_func(DEBUG_FUNCTION, "leave\n")
203
204 #define vpu_err(fmt, args...)                           \
205                 pr_err("%s:%d: " fmt, __func__, __LINE__, ##args)
206
207 enum VPU_DEC_FMT {
208         VPU_DEC_FMT_H264,
209         VPU_DEC_FMT_MPEG4,
210         VPU_DEC_FMT_H263,
211         VPU_DEC_FMT_JPEG,
212         VPU_DEC_FMT_VC1,
213         VPU_DEC_FMT_MPEG2,
214         VPU_DEC_FMT_MPEG1,
215         VPU_DEC_FMT_VP6,
216         VPU_DEC_FMT_RESERV0,
217         VPU_DEC_FMT_VP7,
218         VPU_DEC_FMT_VP8,
219         VPU_DEC_FMT_AVS,
220         VPU_DEC_FMT_RES
221 };
222
223 /**
224  * struct for process session which connect to vpu
225  *
226  * @author ChenHengming (2011-5-3)
227  */
228 struct vpu_session {
229         enum VPU_CLIENT_TYPE type;
230         /* a linked list of data so we can access them for debugging */
231         struct list_head list_session;
232         /* a linked list of register data waiting for process */
233         struct list_head waiting;
234         /* a linked list of register data in processing */
235         struct list_head running;
236         /* a linked list of register data processed */
237         struct list_head done;
238         wait_queue_head_t wait;
239         pid_t pid;
240         atomic_t task_running;
241 };
242
243 /**
244  * struct for process register set
245  *
246  * @author ChenHengming (2011-5-4)
247  */
248 struct vpu_reg {
249         enum VPU_CLIENT_TYPE type;
250         enum VPU_FREQ freq;
251         struct vpu_session *session;
252         struct vpu_subdev_data *data;
253         struct vpu_task_info *task;
254         const struct vpu_trans_info *trans;
255
256         /* link to vpu service session */
257         struct list_head session_link;
258         /* link to register set list */
259         struct list_head status_link;
260
261         unsigned long size;
262         struct list_head mem_region_list;
263         u32 dec_base;
264         u32 *reg;
265 };
266
267 struct vpu_device {
268         atomic_t irq_count_codec;
269         atomic_t irq_count_pp;
270         unsigned int iosize;
271         u32 *regs;
272 };
273
274 enum vcodec_device_id {
275         VCODEC_DEVICE_ID_VPU,
276         VCODEC_DEVICE_ID_HEVC,
277         VCODEC_DEVICE_ID_COMBO,
278         VCODEC_DEVICE_ID_RKVDEC,
279         VCODEC_DEVICE_ID_BUTT
280 };
281
282 enum VCODEC_RUNNING_MODE {
283         VCODEC_RUNNING_MODE_NONE = -1,
284         VCODEC_RUNNING_MODE_VPU,
285         VCODEC_RUNNING_MODE_HEVC,
286         VCODEC_RUNNING_MODE_RKVDEC
287 };
288
289 struct vcodec_mem_region {
290         struct list_head srv_lnk;
291         struct list_head reg_lnk;
292         struct list_head session_lnk;
293         unsigned long iova;     /* virtual address for iommu */
294         unsigned long len;
295         u32 reg_idx;
296         struct ion_handle *hdl;
297 };
298
299 enum vpu_ctx_state {
300         MMU_ACTIVATED   = BIT(0)
301 };
302
303 struct vpu_subdev_data {
304         struct cdev cdev;
305         dev_t dev_t;
306         struct class *cls;
307         struct device *child_dev;
308
309         int irq_enc;
310         int irq_dec;
311         struct vpu_service_info *pservice;
312
313         u32 *regs;
314         enum VCODEC_RUNNING_MODE mode;
315         struct list_head lnk_service;
316
317         struct device *dev;
318
319         struct vpu_device enc_dev;
320         struct vpu_device dec_dev;
321
322         enum VPU_HW_ID hw_id;
323         struct vpu_hw_info *hw_info;
324         struct vpu_task_info *task_info;
325         const struct vpu_trans_info *trans_info;
326
327         u32 reg_size;
328         unsigned long state;
329
330 #ifdef CONFIG_DEBUG_FS
331         struct dentry *debugfs_dir;
332         struct dentry *debugfs_file_regs;
333 #endif
334
335         struct device *mmu_dev;
336 };
337
338 struct vpu_service_info {
339         struct wake_lock wake_lock;
340         struct delayed_work power_off_work;
341         ktime_t last; /* record previous power-on time */
342         /* vpu service structure global lock */
343         struct mutex lock;
344         /* link to link_reg in struct vpu_reg */
345         struct list_head waiting;
346         /* link to link_reg in struct vpu_reg */
347         struct list_head running;
348         /* link to link_reg in struct vpu_reg */
349         struct list_head done;
350         /* link to list_session in struct vpu_session */
351         struct list_head session;
352         atomic_t total_running;
353         atomic_t enabled;
354         atomic_t power_on_cnt;
355         atomic_t power_off_cnt;
356         struct vpu_reg *reg_codec;
357         struct vpu_reg *reg_pproc;
358         struct vpu_reg *reg_resev;
359         struct vpu_dec_config dec_config;
360         struct vpu_enc_config enc_config;
361
362         bool auto_freq;
363         bool bug_dec_addr;
364         atomic_t freq_status;
365
366         struct clk *aclk_vcodec;
367         struct clk *hclk_vcodec;
368         struct clk *clk_core;
369         struct clk *clk_cabac;
370         struct clk *pd_video;
371
372 #ifdef CONFIG_RESET_CONTROLLER
373         struct reset_control *rst_a;
374         struct reset_control *rst_h;
375         struct reset_control *rst_v;
376 #endif
377         struct device *dev;
378
379         u32 irq_status;
380         atomic_t reset_request;
381         struct ion_client *ion_client;
382         struct list_head mem_region_list;
383
384         enum vcodec_device_id dev_id;
385
386         enum VCODEC_RUNNING_MODE curr_mode;
387         u32 prev_mode;
388
389         struct delayed_work simulate_work;
390
391         u32 mode_bit;
392         u32 mode_ctrl;
393         u32 *reg_base;
394         u32 ioaddr;
395         struct regmap *grf;
396         u32 *grf_base;
397
398         char *name;
399
400         u32 subcnt;
401         struct list_head subdev_list;
402 };
403
404 struct vpu_request {
405         u32 *req;
406         u32 size;
407 };
408
409 #ifdef CONFIG_COMPAT
410 struct compat_vpu_request {
411         compat_uptr_t req;
412         u32 size;
413 };
414 #endif
415
416 /* debugfs root directory for all device (vpu, hevc).*/
417 static struct dentry *parent;
418
419 #ifdef CONFIG_DEBUG_FS
420 static int vcodec_debugfs_init(void);
421 static void vcodec_debugfs_exit(void);
422 static struct dentry *vcodec_debugfs_create_device_dir(
423                 char *dirname, struct dentry *parent);
424 static int debug_vcodec_open(struct inode *inode, struct file *file);
425
426 static const struct file_operations debug_vcodec_fops = {
427         .open = debug_vcodec_open,
428         .read = seq_read,
429         .llseek = seq_lseek,
430         .release = single_release,
431 };
432 #endif
433
434 #define VDPU_SOFT_RESET_REG     101
435 #define VDPU_CLEAN_CACHE_REG    516
436 #define VEPU_CLEAN_CACHE_REG    772
437 #define HEVC_CLEAN_CACHE_REG    260
438
439 #define VPU_REG_ENABLE(base, reg)       writel_relaxed(1, base + reg)
440
441 #define VDPU_SOFT_RESET(base)   VPU_REG_ENABLE(base, VDPU_SOFT_RESET_REG)
442 #define VDPU_CLEAN_CACHE(base)  VPU_REG_ENABLE(base, VDPU_CLEAN_CACHE_REG)
443 #define VEPU_CLEAN_CACHE(base)  VPU_REG_ENABLE(base, VEPU_CLEAN_CACHE_REG)
444 #define HEVC_CLEAN_CACHE(base)  VPU_REG_ENABLE(base, HEVC_CLEAN_CACHE_REG)
445
446 #define VPU_POWER_OFF_DELAY             (4 * HZ) /* 4s */
447 #define VPU_TIMEOUT_DELAY               (2 * HZ) /* 2s */
448
449 static void time_record(struct vpu_task_info *task, int is_end)
450 {
451         if (unlikely(debug & DEBUG_TIMING) && task)
452                 do_gettimeofday((is_end) ? (&task->end) : (&task->start));
453 }
454
455 static void time_diff(struct vpu_task_info *task)
456 {
457         vpu_debug(DEBUG_TIMING, "%s task: %ld ms\n", task->name,
458                   (task->end.tv_sec  - task->start.tv_sec)  * 1000 +
459                   (task->end.tv_usec - task->start.tv_usec) / 1000);
460 }
461
462 static void vcodec_enter_mode(struct vpu_subdev_data *data)
463 {
464         int bits;
465         u32 raw = 0;
466         struct vpu_service_info *pservice = data->pservice;
467         struct vpu_subdev_data *subdata, *n;
468
469         if (pservice->subcnt < 2) {
470                 if (data->mmu_dev && !test_bit(MMU_ACTIVATED, &data->state)) {
471                         set_bit(MMU_ACTIVATED, &data->state);
472                         if (atomic_read(&pservice->enabled))
473                                 rockchip_iovmm_activate(data->dev);
474                         else
475                                 BUG_ON(!atomic_read(&pservice->enabled));
476                 }
477                 return;
478         }
479
480         if (pservice->curr_mode == data->mode)
481                 return;
482
483         vpu_debug(DEBUG_IOMMU, "vcodec enter mode %d\n", data->mode);
484         list_for_each_entry_safe(subdata, n,
485                                  &pservice->subdev_list, lnk_service) {
486                 if (data != subdata && subdata->mmu_dev &&
487                     test_bit(MMU_ACTIVATED, &subdata->state)) {
488                         clear_bit(MMU_ACTIVATED, &subdata->state);
489                         rockchip_iovmm_deactivate(subdata->dev);
490                 }
491         }
492         bits = 1 << pservice->mode_bit;
493 #ifdef CONFIG_MFD_SYSCON
494         if (pservice->grf) {
495                 regmap_read(pservice->grf, pservice->mode_ctrl, &raw);
496
497                 if (data->mode == VCODEC_RUNNING_MODE_HEVC)
498                         regmap_write(pservice->grf, pservice->mode_ctrl,
499                                      raw | bits | (bits << 16));
500                 else
501                         regmap_write(pservice->grf, pservice->mode_ctrl,
502                                      (raw & (~bits)) | (bits << 16));
503         } else if (pservice->grf_base) {
504                 u32 *grf_base = pservice->grf_base;
505
506                 raw = readl_relaxed(grf_base + pservice->mode_ctrl / 4);
507                 if (data->mode == VCODEC_RUNNING_MODE_HEVC)
508                         writel_relaxed(raw | bits | (bits << 16),
509                                        grf_base + pservice->mode_ctrl / 4);
510                 else
511                         writel_relaxed((raw & (~bits)) | (bits << 16),
512                                        grf_base + pservice->mode_ctrl / 4);
513         } else {
514                 vpu_err("no grf resource define, switch decoder failed\n");
515                 return;
516         }
517 #else
518         if (pservice->grf_base) {
519                 u32 *grf_base = pservice->grf_base;
520
521                 raw = readl_relaxed(grf_base + pservice->mode_ctrl / 4);
522                 if (data->mode == VCODEC_RUNNING_MODE_HEVC)
523                         writel_relaxed(raw | bits | (bits << 16),
524                                        grf_base + pservice->mode_ctrl / 4);
525                 else
526                         writel_relaxed((raw & (~bits)) | (bits << 16),
527                                        grf_base + pservice->mode_ctrl / 4);
528         } else {
529                 vpu_err("no grf resource define, switch decoder failed\n");
530                 return;
531         }
532 #endif
533         if (data->mmu_dev && !test_bit(MMU_ACTIVATED, &data->state)) {
534                 set_bit(MMU_ACTIVATED, &data->state);
535                 if (atomic_read(&pservice->enabled))
536                         rockchip_iovmm_activate(data->dev);
537                 else
538                         BUG_ON(!atomic_read(&pservice->enabled));
539         }
540
541         pservice->prev_mode = pservice->curr_mode;
542         pservice->curr_mode = data->mode;
543 }
544
545 static void vcodec_exit_mode(struct vpu_subdev_data *data)
546 {
547         if (data->mmu_dev && test_bit(MMU_ACTIVATED, &data->state)) {
548                 clear_bit(MMU_ACTIVATED, &data->state);
549                 rockchip_iovmm_deactivate(data->dev);
550                 data->pservice->curr_mode = VCODEC_RUNNING_MODE_NONE;
551         }
552 }
553
554 static int vpu_get_clk(struct vpu_service_info *pservice)
555 {
556 #if VCODEC_CLOCK_ENABLE
557         struct device *dev = pservice->dev;
558
559         switch (pservice->dev_id) {
560         case VCODEC_DEVICE_ID_HEVC:
561                 pservice->pd_video = devm_clk_get(dev, "pd_hevc");
562                 if (IS_ERR(pservice->pd_video)) {
563                         dev_err(dev, "failed on clk_get pd_hevc\n");
564                         return -1;
565                 }
566         case VCODEC_DEVICE_ID_COMBO:
567         case VCODEC_DEVICE_ID_RKVDEC:
568                 pservice->clk_cabac = devm_clk_get(dev, "clk_cabac");
569                 if (IS_ERR(pservice->clk_cabac)) {
570                         dev_err(dev, "failed on clk_get clk_cabac\n");
571                         pservice->clk_cabac = NULL;
572                 }
573                 pservice->clk_core = devm_clk_get(dev, "clk_core");
574                 if (IS_ERR(pservice->clk_core)) {
575                         dev_err(dev, "failed on clk_get clk_core\n");
576                         return -1;
577                 }
578         case VCODEC_DEVICE_ID_VPU:
579                 pservice->aclk_vcodec = devm_clk_get(dev, "aclk_vcodec");
580                 if (IS_ERR(pservice->aclk_vcodec)) {
581                         dev_err(dev, "failed on clk_get aclk_vcodec\n");
582                         return -1;
583                 }
584
585                 pservice->hclk_vcodec = devm_clk_get(dev, "hclk_vcodec");
586                 if (IS_ERR(pservice->hclk_vcodec)) {
587                         dev_err(dev, "failed on clk_get hclk_vcodec\n");
588                         return -1;
589                 }
590                 if (pservice->pd_video == NULL) {
591                         pservice->pd_video = devm_clk_get(dev, "pd_video");
592                         if (IS_ERR(pservice->pd_video)) {
593                                 pservice->pd_video = NULL;
594                                 dev_info(dev, "do not have pd_video\n");
595                         }
596                 }
597                 break;
598         default:
599                 break;
600         }
601
602         return 0;
603 #else
604         return 0;
605 #endif
606 }
607
608 static void vpu_put_clk(struct vpu_service_info *pservice)
609 {
610 #if VCODEC_CLOCK_ENABLE
611         if (pservice->pd_video)
612                 devm_clk_put(pservice->dev, pservice->pd_video);
613         if (pservice->aclk_vcodec)
614                 devm_clk_put(pservice->dev, pservice->aclk_vcodec);
615         if (pservice->hclk_vcodec)
616                 devm_clk_put(pservice->dev, pservice->hclk_vcodec);
617         if (pservice->clk_core)
618                 devm_clk_put(pservice->dev, pservice->clk_core);
619         if (pservice->clk_cabac)
620                 devm_clk_put(pservice->dev, pservice->clk_cabac);
621 #endif
622 }
623
624 static void vpu_reset(struct vpu_subdev_data *data)
625 {
626         struct vpu_service_info *pservice = data->pservice;
627         enum pmu_idle_req type = IDLE_REQ_VIDEO;
628
629         if (pservice->dev_id == VCODEC_DEVICE_ID_HEVC)
630                 type = IDLE_REQ_HEVC;
631
632         pr_info("%s: resetting...", dev_name(pservice->dev));
633
634 #if defined(CONFIG_ARCH_RK29)
635         clk_disable(aclk_ddr_vepu);
636         cru_set_soft_reset(SOFT_RST_CPU_VODEC_A2A_AHB, true);
637         cru_set_soft_reset(SOFT_RST_DDR_VCODEC_PORT, true);
638         cru_set_soft_reset(SOFT_RST_VCODEC_AHB_BUS, true);
639         cru_set_soft_reset(SOFT_RST_VCODEC_AXI_BUS, true);
640         mdelay(10);
641         cru_set_soft_reset(SOFT_RST_VCODEC_AXI_BUS, false);
642         cru_set_soft_reset(SOFT_RST_VCODEC_AHB_BUS, false);
643         cru_set_soft_reset(SOFT_RST_DDR_VCODEC_PORT, false);
644         cru_set_soft_reset(SOFT_RST_CPU_VODEC_A2A_AHB, false);
645         clk_enable(aclk_ddr_vepu);
646 #elif defined(CONFIG_ARCH_RK30)
647         pmu_set_idle_request(IDLE_REQ_VIDEO, true);
648         cru_set_soft_reset(SOFT_RST_CPU_VCODEC, true);
649         cru_set_soft_reset(SOFT_RST_VCODEC_NIU_AXI, true);
650         cru_set_soft_reset(SOFT_RST_VCODEC_AHB, true);
651         cru_set_soft_reset(SOFT_RST_VCODEC_AXI, true);
652         mdelay(1);
653         cru_set_soft_reset(SOFT_RST_VCODEC_AXI, false);
654         cru_set_soft_reset(SOFT_RST_VCODEC_AHB, false);
655         cru_set_soft_reset(SOFT_RST_VCODEC_NIU_AXI, false);
656         cru_set_soft_reset(SOFT_RST_CPU_VCODEC, false);
657         pmu_set_idle_request(IDLE_REQ_VIDEO, false);
658 #else
659 #endif
660         WARN_ON(pservice->reg_codec != NULL);
661         WARN_ON(pservice->reg_pproc != NULL);
662         WARN_ON(pservice->reg_resev != NULL);
663         pservice->reg_codec = NULL;
664         pservice->reg_pproc = NULL;
665         pservice->reg_resev = NULL;
666
667         pr_info("for 3288/3368...");
668 #if 0 //def CONFIG_RESET_CONTROLLER
669         if (pservice->rst_a && pservice->rst_h) {
670                 if (rockchip_pmu_ops.set_idle_request)
671                         rockchip_pmu_ops.set_idle_request(type, true);
672                 pr_info("reset in\n");
673                 if (pservice->rst_v)
674                         reset_control_assert(pservice->rst_v);
675                 reset_control_assert(pservice->rst_a);
676                 reset_control_assert(pservice->rst_h);
677                 udelay(5);
678                 reset_control_deassert(pservice->rst_h);
679                 reset_control_deassert(pservice->rst_a);
680                 if (pservice->rst_v)
681                         reset_control_deassert(pservice->rst_v);
682                 if (rockchip_pmu_ops.set_idle_request)
683                         rockchip_pmu_ops.set_idle_request(type, false);
684         }
685 #endif
686
687         if (data->mmu_dev && test_bit(MMU_ACTIVATED, &data->state)) {
688                 clear_bit(MMU_ACTIVATED, &data->state);
689                 if (atomic_read(&pservice->enabled))
690                         rockchip_iovmm_deactivate(data->dev);
691                 else
692                         BUG_ON(!atomic_read(&pservice->enabled));
693         }
694
695         atomic_set(&pservice->reset_request, 0);
696         pr_info("done\n");
697 }
698
699 static void reg_deinit(struct vpu_subdev_data *data, struct vpu_reg *reg);
700 static void vpu_service_session_clear(struct vpu_subdev_data *data,
701                                       struct vpu_session *session)
702 {
703         struct vpu_reg *reg, *n;
704
705         list_for_each_entry_safe(reg, n, &session->waiting, session_link) {
706                 reg_deinit(data, reg);
707         }
708         list_for_each_entry_safe(reg, n, &session->running, session_link) {
709                 reg_deinit(data, reg);
710         }
711         list_for_each_entry_safe(reg, n, &session->done, session_link) {
712                 reg_deinit(data, reg);
713         }
714 }
715
716 static void vpu_service_dump(struct vpu_service_info *pservice)
717 {
718 }
719
720
721 static void vpu_service_power_off(struct vpu_service_info *pservice)
722 {
723         int total_running;
724         struct vpu_subdev_data *data = NULL, *n;
725         int ret = atomic_add_unless(&pservice->enabled, -1, 0);
726
727         if (!ret)
728                 return;
729
730         total_running = atomic_read(&pservice->total_running);
731         if (total_running) {
732                 pr_alert("alert: power off when %d task running!!\n",
733                          total_running);
734                 mdelay(50);
735                 pr_alert("alert: delay 50 ms for running task\n");
736                 vpu_service_dump(pservice);
737         }
738
739         pr_info("%s: power off...", dev_name(pservice->dev));
740
741         udelay(5);
742
743         list_for_each_entry_safe(data, n, &pservice->subdev_list, lnk_service) {
744                 if (data->mmu_dev && test_bit(MMU_ACTIVATED, &data->state)) {
745                         clear_bit(MMU_ACTIVATED, &data->state);
746                         rockchip_iovmm_deactivate(data->dev);
747                 }
748         }
749         pservice->curr_mode = VCODEC_RUNNING_MODE_NONE;
750
751 #if VCODEC_CLOCK_ENABLE
752                 if (pservice->pd_video)
753                         clk_disable_unprepare(pservice->pd_video);
754                 if (pservice->hclk_vcodec)
755                         clk_disable_unprepare(pservice->hclk_vcodec);
756                 if (pservice->aclk_vcodec)
757                         clk_disable_unprepare(pservice->aclk_vcodec);
758                 if (pservice->clk_core)
759                         clk_disable_unprepare(pservice->clk_core);
760                 if (pservice->clk_cabac)
761                         clk_disable_unprepare(pservice->clk_cabac);
762 #endif
763
764         atomic_add(1, &pservice->power_off_cnt);
765         wake_unlock(&pservice->wake_lock);
766         pr_info("done\n");
767 }
768
769 static inline void vpu_queue_power_off_work(struct vpu_service_info *pservice)
770 {
771         queue_delayed_work(system_wq, &pservice->power_off_work,
772                            VPU_POWER_OFF_DELAY);
773 }
774
775 static void vpu_power_off_work(struct work_struct *work_s)
776 {
777         struct delayed_work *dlwork = container_of(work_s,
778                         struct delayed_work, work);
779         struct vpu_service_info *pservice = container_of(dlwork,
780                         struct vpu_service_info, power_off_work);
781
782         if (mutex_trylock(&pservice->lock)) {
783                 vpu_service_power_off(pservice);
784                 mutex_unlock(&pservice->lock);
785         } else {
786                 /* Come back later if the device is busy... */
787                 vpu_queue_power_off_work(pservice);
788         }
789 }
790
791 static void vpu_service_power_on(struct vpu_service_info *pservice)
792 {
793         int ret;
794         ktime_t now = ktime_get();
795
796         if (ktime_to_ns(ktime_sub(now, pservice->last)) > NSEC_PER_SEC) {
797                 cancel_delayed_work_sync(&pservice->power_off_work);
798                 vpu_queue_power_off_work(pservice);
799                 pservice->last = now;
800         }
801         ret = atomic_add_unless(&pservice->enabled, 1, 1);
802         if (!ret)
803                 return;
804
805         pr_info("%s: power on\n", dev_name(pservice->dev));
806
807 #define BIT_VCODEC_CLK_SEL      (1<<10)
808         if (cpu_is_rk312x())
809                 writel_relaxed(readl_relaxed(RK_GRF_VIRT + RK312X_GRF_SOC_CON1)
810                         | BIT_VCODEC_CLK_SEL | (BIT_VCODEC_CLK_SEL << 16),
811                         RK_GRF_VIRT + RK312X_GRF_SOC_CON1);
812
813 #if VCODEC_CLOCK_ENABLE
814         if (pservice->aclk_vcodec)
815                 clk_prepare_enable(pservice->aclk_vcodec);
816         if (pservice->hclk_vcodec)
817                 clk_prepare_enable(pservice->hclk_vcodec);
818         if (pservice->clk_core)
819                 clk_prepare_enable(pservice->clk_core);
820         if (pservice->clk_cabac)
821                 clk_prepare_enable(pservice->clk_cabac);
822         if (pservice->pd_video)
823                 clk_prepare_enable(pservice->pd_video);
824 #endif
825
826         udelay(5);
827         atomic_add(1, &pservice->power_on_cnt);
828         wake_lock(&pservice->wake_lock);
829 }
830
831 static inline bool reg_check_interlace(struct vpu_reg *reg)
832 {
833         u32 type = (reg->reg[3] & (1 << 23));
834
835         return (type > 0);
836 }
837
838 static inline enum VPU_DEC_FMT reg_check_fmt(struct vpu_reg *reg)
839 {
840         enum VPU_DEC_FMT type = (enum VPU_DEC_FMT)((reg->reg[3] >> 28) & 0xf);
841
842         return type;
843 }
844
845 static inline int reg_probe_width(struct vpu_reg *reg)
846 {
847         int width_in_mb = reg->reg[4] >> 23;
848
849         return width_in_mb * 16;
850 }
851
852 static inline int reg_probe_hevc_y_stride(struct vpu_reg *reg)
853 {
854         int y_virstride = reg->reg[8];
855
856         return y_virstride;
857 }
858
859 static int vcodec_fd_to_iova(struct vpu_subdev_data *data,
860                              struct vpu_reg *reg, int fd)
861 {
862         struct vpu_service_info *pservice = data->pservice;
863         struct ion_handle *hdl;
864         int ret = 0;
865         struct vcodec_mem_region *mem_region;
866
867         hdl = ion_import_dma_buf(pservice->ion_client, fd);
868         if (IS_ERR(hdl)) {
869                 vpu_err("import dma-buf from fd %d failed\n", fd);
870                 return PTR_ERR(hdl);
871         }
872         mem_region = kzalloc(sizeof(*mem_region), GFP_KERNEL);
873
874         if (mem_region == NULL) {
875                 vpu_err("allocate memory for iommu memory region failed\n");
876                 ion_free(pservice->ion_client, hdl);
877                 return -1;
878         }
879
880         mem_region->hdl = hdl;
881         if (data->mmu_dev)
882                 ret = ion_map_iommu(data->dev, pservice->ion_client,
883                                     mem_region->hdl, &mem_region->iova,
884                                     &mem_region->len);
885         else
886                 ret = ion_phys(pservice->ion_client,
887                                mem_region->hdl,
888                                (ion_phys_addr_t *)&mem_region->iova,
889                                (size_t *)&mem_region->len);
890
891         if (ret < 0) {
892                 vpu_err("fd %d ion map iommu failed\n", fd);
893                 kfree(mem_region);
894                 ion_free(pservice->ion_client, hdl);
895                 return ret;
896         }
897         INIT_LIST_HEAD(&mem_region->reg_lnk);
898         list_add_tail(&mem_region->reg_lnk, &reg->mem_region_list);
899         return mem_region->iova;
900 }
901
902 /*
903  * NOTE: rkvdec/rkhevc put scaling list address in pps buffer hardware will read
904  * it by pps id in video stream data.
905  *
906  * So we need to translate the address in iommu case. The address data is also
907  * 10bit fd + 22bit offset mode.
908  * Because userspace decoder do not give the pps id in the register file sets
909  * kernel driver need to translate each scaling list address in pps buffer which
910  * means 256 pps for H.264, 64 pps for H.265.
911  *
912  * In order to optimize the performance kernel driver ask userspace decoder to
913  * set all scaling list address in pps buffer to the same one which will be used
914  * on current decoding task. Then kernel driver can only translate the first
915  * address then copy it all pps buffer.
916  */
917 static void fill_scaling_list_addr_in_pps(
918                 struct vpu_subdev_data *data,
919                 struct vpu_reg *reg,
920                 char *pps,
921                 int pps_info_count,
922                 int pps_info_size,
923                 int scaling_list_addr_offset)
924 {
925         int base = scaling_list_addr_offset;
926         int scaling_fd = 0;
927         u32 scaling_offset;
928
929         scaling_offset  = (u32)pps[base + 0];
930         scaling_offset += (u32)pps[base + 1] << 8;
931         scaling_offset += (u32)pps[base + 2] << 16;
932         scaling_offset += (u32)pps[base + 3] << 24;
933
934         scaling_fd = scaling_offset & 0x3ff;
935         scaling_offset = scaling_offset >> 10;
936
937         if (scaling_fd > 0) {
938                 int i = 0;
939                 u32 tmp = vcodec_fd_to_iova(data, reg, scaling_fd);
940                 tmp += scaling_offset;
941
942                 for (i = 0; i < pps_info_count; i++, base += pps_info_size) {
943                         pps[base + 0] = (tmp >>  0) & 0xff;
944                         pps[base + 1] = (tmp >>  8) & 0xff;
945                         pps[base + 2] = (tmp >> 16) & 0xff;
946                         pps[base + 3] = (tmp >> 24) & 0xff;
947                 }
948         }
949 }
950
951 static int vcodec_bufid_to_iova(struct vpu_subdev_data *data, const u8 *tbl,
952                                 int size, struct vpu_reg *reg,
953                                 struct extra_info_for_iommu *ext_inf)
954 {
955         struct vpu_service_info *pservice = data->pservice;
956         struct vpu_task_info *task = reg->task;
957         enum FORMAT_TYPE type;
958         struct ion_handle *hdl;
959         int ret = 0;
960         struct vcodec_mem_region *mem_region;
961         int i;
962         int offset = 0;
963
964         if (tbl == NULL || size <= 0) {
965                 dev_err(pservice->dev, "input arguments invalidate\n");
966                 return -1;
967         }
968
969         if (task->get_fmt)
970                 type = task->get_fmt(reg->reg);
971         else {
972                 pr_err("invalid task with NULL get_fmt\n");
973                 return -1;
974         }
975
976         for (i = 0; i < size; i++) {
977                 int usr_fd = reg->reg[tbl[i]] & 0x3FF;
978
979                 /* if userspace do not set the fd at this register, skip */
980                 if (usr_fd == 0)
981                         continue;
982
983                 /*
984                  * special offset scale case
985                  *
986                  * This translation is for fd + offset translation.
987                  * One register has 32bits. We need to transfer both buffer file
988                  * handle and the start address offset so we packet file handle
989                  * and offset together using below format.
990                  *
991                  *  0~9  bit for buffer file handle range 0 ~ 1023
992                  * 10~31 bit for offset range 0 ~ 4M
993                  *
994                  * But on 4K case the offset can be larger the 4M
995                  * So on H.264 4K vpu/vpu2 decoder we scale the offset by 16
996                  * But MPEG4 will use the same register for colmv and it do not
997                  * need scale.
998                  *
999                  * RKVdec do not have this issue.
1000                  */
1001                 if ((type == FMT_H264D || type == FMT_VP9D) &&
1002                     task->reg_dir_mv > 0 && task->reg_dir_mv == tbl[i])
1003                         offset = reg->reg[tbl[i]] >> 10 << 4;
1004                 else
1005                         offset = reg->reg[tbl[i]] >> 10;
1006
1007                 vpu_debug(DEBUG_IOMMU, "pos %3d fd %3d offset %10d\n",
1008                           tbl[i], usr_fd, offset);
1009
1010                 hdl = ion_import_dma_buf(pservice->ion_client, usr_fd);
1011                 if (IS_ERR(hdl)) {
1012                         dev_err(pservice->dev,
1013                                 "import dma-buf from fd %d failed, reg[%d]\n",
1014                                 usr_fd, tbl[i]);
1015                         return PTR_ERR(hdl);
1016                 }
1017
1018                 if (task->reg_pps > 0 && task->reg_pps == tbl[i]) {
1019                         int pps_info_offset;
1020                         int pps_info_count;
1021                         int pps_info_size;
1022                         int scaling_list_addr_offset;
1023
1024                         switch (type) {
1025                         case FMT_H264D: {
1026                                 pps_info_offset = offset;
1027                                 pps_info_count = 256;
1028                                 pps_info_size = 32;
1029                                 scaling_list_addr_offset = 23;
1030                         } break;
1031                         case FMT_H265D: {
1032                                 pps_info_offset = 0;
1033                                 pps_info_count = 64;
1034                                 pps_info_size = 80;
1035                                 scaling_list_addr_offset = 74;
1036                         } break;
1037                         default: {
1038                                 pps_info_offset = 0;
1039                                 pps_info_count = 0;
1040                                 pps_info_size = 0;
1041                                 scaling_list_addr_offset = 0;
1042                         } break;
1043                         }
1044
1045                         vpu_debug(DEBUG_PPS_FILL,
1046                                   "scaling list filling parameter:\n");
1047                         vpu_debug(DEBUG_PPS_FILL,
1048                                   "pps_info_offset %d\n", pps_info_offset);
1049                         vpu_debug(DEBUG_PPS_FILL,
1050                                   "pps_info_count  %d\n", pps_info_count);
1051                         vpu_debug(DEBUG_PPS_FILL,
1052                                   "pps_info_size   %d\n", pps_info_size);
1053                         vpu_debug(DEBUG_PPS_FILL,
1054                                   "scaling_list_addr_offset %d\n",
1055                                   scaling_list_addr_offset);
1056
1057                         if (pps_info_count) {
1058                                 char *pps = (char *)ion_map_kernel(
1059                                                 pservice->ion_client, hdl);
1060                                 vpu_debug(DEBUG_PPS_FILL,
1061                                           "scaling list setting pps %p\n", pps);
1062                                 pps += pps_info_offset;
1063
1064                                 fill_scaling_list_addr_in_pps(
1065                                                 data, reg, pps,
1066                                                 pps_info_count,
1067                                                 pps_info_size,
1068                                                 scaling_list_addr_offset);
1069                         }
1070                 }
1071
1072                 mem_region = kzalloc(sizeof(*mem_region), GFP_KERNEL);
1073
1074                 if (!mem_region) {
1075                         ion_free(pservice->ion_client, hdl);
1076                         return -1;
1077                 }
1078
1079                 mem_region->hdl = hdl;
1080                 mem_region->reg_idx = tbl[i];
1081
1082                 if (data->mmu_dev)
1083                         ret = ion_map_iommu(data->dev,
1084                                             pservice->ion_client,
1085                                             mem_region->hdl,
1086                                             &mem_region->iova,
1087                                             &mem_region->len);
1088                 else
1089                         ret = ion_phys(pservice->ion_client,
1090                                        mem_region->hdl,
1091                                        (ion_phys_addr_t *)&mem_region->iova,
1092                                        (size_t *)&mem_region->len);
1093
1094                 if (ret < 0) {
1095                         dev_err(pservice->dev, "reg %d fd %d ion map iommu failed\n",
1096                                 tbl[i], usr_fd);
1097                         kfree(mem_region);
1098                         ion_free(pservice->ion_client, hdl);
1099                         return ret;
1100                 }
1101
1102                 /*
1103                  * special for vpu dec num 12: record decoded length
1104                  * hacking for decoded length
1105                  * NOTE: not a perfect fix, the fd is not recorded
1106                  */
1107                 if (task->reg_len > 0 && task->reg_len == tbl[i]) {
1108                         reg->dec_base = mem_region->iova + offset;
1109                         vpu_debug(DEBUG_REGISTER, "dec_set %08x\n",
1110                                   reg->dec_base);
1111                 }
1112
1113                 reg->reg[tbl[i]] = mem_region->iova + offset;
1114                 INIT_LIST_HEAD(&mem_region->reg_lnk);
1115                 list_add_tail(&mem_region->reg_lnk, &reg->mem_region_list);
1116         }
1117
1118         if (ext_inf != NULL && ext_inf->magic == EXTRA_INFO_MAGIC) {
1119                 for (i = 0; i < ext_inf->cnt; i++) {
1120                         vpu_debug(DEBUG_IOMMU, "reg[%d] + offset %d\n",
1121                                   ext_inf->elem[i].index,
1122                                   ext_inf->elem[i].offset);
1123                         reg->reg[ext_inf->elem[i].index] +=
1124                                 ext_inf->elem[i].offset;
1125                 }
1126         }
1127
1128         return 0;
1129 }
1130
1131 static int vcodec_reg_address_translate(struct vpu_subdev_data *data,
1132                                         struct vpu_reg *reg,
1133                                         struct extra_info_for_iommu *ext_inf)
1134 {
1135         enum FORMAT_TYPE type = reg->task->get_fmt(reg->reg);
1136
1137         if (type < FMT_TYPE_BUTT) {
1138                 const struct vpu_trans_info *info = &reg->trans[type];
1139                 const u8 *tbl = info->table;
1140                 int size = info->count;
1141
1142                 return vcodec_bufid_to_iova(data, tbl, size, reg, ext_inf);
1143         }
1144         pr_err("found invalid format type!\n");
1145         return -1;
1146 }
1147
1148 static void get_reg_freq(struct vpu_subdev_data *data, struct vpu_reg *reg)
1149 {
1150
1151         if (!soc_is_rk2928g()) {
1152                 if (reg->type == VPU_DEC || reg->type == VPU_DEC_PP) {
1153                         if (reg_check_fmt(reg) == VPU_DEC_FMT_H264) {
1154                                 if (reg_probe_width(reg) > 3200) {
1155                                         /*raise frequency for 4k avc.*/
1156                                         reg->freq = VPU_FREQ_600M;
1157                                 }
1158                         } else {
1159                                 if (reg_check_interlace(reg))
1160                                         reg->freq = VPU_FREQ_400M;
1161                         }
1162                 }
1163                 if (data->hw_id == HEVC_ID) {
1164                         if (reg_probe_hevc_y_stride(reg) > 60000)
1165                                 reg->freq = VPU_FREQ_400M;
1166                 }
1167                 if (reg->type == VPU_PP)
1168                         reg->freq = VPU_FREQ_400M;
1169         }
1170 }
1171
1172 static struct vpu_reg *reg_init(struct vpu_subdev_data *data,
1173                                 struct vpu_session *session,
1174                                 void __user *src, u32 size)
1175 {
1176         struct vpu_service_info *pservice = data->pservice;
1177         int extra_size = 0;
1178         struct extra_info_for_iommu extra_info;
1179         struct vpu_reg *reg = kzalloc(sizeof(*reg) + data->reg_size,
1180                                       GFP_KERNEL);
1181
1182         vpu_debug_enter();
1183
1184         if (NULL == reg) {
1185                 vpu_err("error: kmalloc fail in reg_init\n");
1186                 return NULL;
1187         }
1188
1189         if (size > data->reg_size) {
1190                 pr_err("vpu reg size %u is larger than hw reg size %u\n",
1191                        size, data->reg_size);
1192                 extra_size = size - data->reg_size;
1193                 size = data->reg_size;
1194         }
1195         reg->session = session;
1196         reg->data = data;
1197         reg->type = session->type;
1198         reg->size = size;
1199         reg->freq = VPU_FREQ_DEFAULT;
1200         reg->task = &data->task_info[session->type];
1201         reg->trans = data->trans_info;
1202         reg->reg = (u32 *)&reg[1];
1203         INIT_LIST_HEAD(&reg->session_link);
1204         INIT_LIST_HEAD(&reg->status_link);
1205
1206         INIT_LIST_HEAD(&reg->mem_region_list);
1207
1208         if (copy_from_user(&reg->reg[0], (void __user *)src, size)) {
1209                 vpu_err("error: copy_from_user failed in reg_init\n");
1210                 kfree(reg);
1211                 return NULL;
1212         }
1213
1214         if (copy_from_user(&extra_info, (u8 *)src + size, extra_size)) {
1215                 vpu_err("error: copy_from_user failed in reg_init\n");
1216                 kfree(reg);
1217                 return NULL;
1218         }
1219
1220         if (0 > vcodec_reg_address_translate(data, reg, &extra_info)) {
1221                 int i = 0;
1222
1223                 vpu_err("error: translate reg address failed, dumping regs\n");
1224                 for (i = 0; i < size >> 2; i++)
1225                         pr_err("reg[%02d]: %08x\n", i, *((u32 *)src + i));
1226
1227                 kfree(reg);
1228                 return NULL;
1229         }
1230
1231         mutex_lock(&pservice->lock);
1232         list_add_tail(&reg->status_link, &pservice->waiting);
1233         list_add_tail(&reg->session_link, &session->waiting);
1234         mutex_unlock(&pservice->lock);
1235
1236         if (pservice->auto_freq)
1237                 get_reg_freq(data, reg);
1238
1239         vpu_debug_leave();
1240         return reg;
1241 }
1242
1243 static void reg_deinit(struct vpu_subdev_data *data, struct vpu_reg *reg)
1244 {
1245         struct vpu_service_info *pservice = data->pservice;
1246         struct vcodec_mem_region *mem_region = NULL, *n;
1247
1248         list_del_init(&reg->session_link);
1249         list_del_init(&reg->status_link);
1250         if (reg == pservice->reg_codec)
1251                 pservice->reg_codec = NULL;
1252         if (reg == pservice->reg_pproc)
1253                 pservice->reg_pproc = NULL;
1254
1255         /* release memory region attach to this registers table. */
1256         list_for_each_entry_safe(mem_region, n,
1257                         &reg->mem_region_list, reg_lnk) {
1258                 ion_free(pservice->ion_client, mem_region->hdl);
1259                 list_del_init(&mem_region->reg_lnk);
1260                 kfree(mem_region);
1261         }
1262
1263         kfree(reg);
1264 }
1265
1266 static void reg_from_wait_to_run(struct vpu_service_info *pservice,
1267                                  struct vpu_reg *reg)
1268 {
1269         vpu_debug_enter();
1270         list_del_init(&reg->status_link);
1271         list_add_tail(&reg->status_link, &pservice->running);
1272
1273         list_del_init(&reg->session_link);
1274         list_add_tail(&reg->session_link, &reg->session->running);
1275         vpu_debug_leave();
1276 }
1277
1278 static void reg_copy_from_hw(struct vpu_reg *reg, u32 *src, u32 count)
1279 {
1280         int i;
1281         u32 *dst = reg->reg;
1282
1283         vpu_debug_enter();
1284         for (i = 0; i < count; i++, src++)
1285                 *dst++ = readl_relaxed(src);
1286
1287         dst = (u32 *)&reg->reg[0];
1288         for (i = 0; i < count; i++)
1289                 vpu_debug(DEBUG_GET_REG, "get reg[%02d] %08x\n", i, dst[i]);
1290
1291         vpu_debug_leave();
1292 }
1293
1294 static void reg_from_run_to_done(struct vpu_subdev_data *data,
1295                                  struct vpu_reg *reg)
1296 {
1297         struct vpu_service_info *pservice = data->pservice;
1298         struct vpu_hw_info *hw_info = data->hw_info;
1299         struct vpu_task_info *task = reg->task;
1300
1301         vpu_debug_enter();
1302
1303         list_del_init(&reg->status_link);
1304         list_add_tail(&reg->status_link, &pservice->done);
1305
1306         list_del_init(&reg->session_link);
1307         list_add_tail(&reg->session_link, &reg->session->done);
1308
1309         switch (reg->type) {
1310         case VPU_ENC: {
1311                 pservice->reg_codec = NULL;
1312                 reg_copy_from_hw(reg, data->enc_dev.regs, hw_info->enc_reg_num);
1313                 reg->reg[task->reg_irq] = pservice->irq_status;
1314         } break;
1315         case VPU_DEC: {
1316                 pservice->reg_codec = NULL;
1317                 reg_copy_from_hw(reg, data->dec_dev.regs, hw_info->dec_reg_num);
1318
1319                 /* revert hack for decoded length */
1320                 if (task->reg_len > 0) {
1321                         int reg_len = task->reg_len;
1322                         u32 dec_get = reg->reg[reg_len];
1323                         s32 dec_length = dec_get - reg->dec_base;
1324
1325                         vpu_debug(DEBUG_REGISTER,
1326                                   "dec_get %08x dec_length %d\n",
1327                                   dec_get, dec_length);
1328                         reg->reg[reg_len] = dec_length << 10;
1329                 }
1330
1331                 reg->reg[task->reg_irq] = pservice->irq_status;
1332         } break;
1333         case VPU_PP: {
1334                 pservice->reg_pproc = NULL;
1335                 reg_copy_from_hw(reg, data->dec_dev.regs, hw_info->dec_reg_num);
1336                 writel_relaxed(0, data->dec_dev.regs + task->reg_irq);
1337         } break;
1338         case VPU_DEC_PP: {
1339                 u32 pipe_mode;
1340                 u32 *regs = data->dec_dev.regs;
1341
1342                 pservice->reg_codec = NULL;
1343                 pservice->reg_pproc = NULL;
1344
1345                 reg_copy_from_hw(reg, data->dec_dev.regs, hw_info->dec_reg_num);
1346
1347                 /* NOTE: remove pp pipeline mode flag first */
1348                 pipe_mode = readl_relaxed(regs + task->reg_pipe);
1349                 pipe_mode &= ~task->pipe_mask;
1350                 writel_relaxed(pipe_mode, regs + task->reg_pipe);
1351
1352                 /* revert hack for decoded length */
1353                 if (task->reg_len > 0) {
1354                         int reg_len = task->reg_len;
1355                         u32 dec_get = reg->reg[reg_len];
1356                         s32 dec_length = dec_get - reg->dec_base;
1357
1358                         vpu_debug(DEBUG_REGISTER,
1359                                   "dec_get %08x dec_length %d\n",
1360                                   dec_get, dec_length);
1361                         reg->reg[reg_len] = dec_length << 10;
1362                 }
1363
1364                 reg->reg[task->reg_irq] = pservice->irq_status;
1365         } break;
1366         default: {
1367                 vpu_err("error: copy reg from hw with unknown type %d\n",
1368                         reg->type);
1369         } break;
1370         }
1371         vcodec_exit_mode(data);
1372
1373         atomic_sub(1, &reg->session->task_running);
1374         atomic_sub(1, &pservice->total_running);
1375         wake_up(&reg->session->wait);
1376
1377         vpu_debug_leave();
1378 }
1379
1380 static void vpu_service_set_freq(struct vpu_service_info *pservice,
1381                                  struct vpu_reg *reg)
1382 {
1383         enum VPU_FREQ curr = atomic_read(&pservice->freq_status);
1384
1385         if (curr == reg->freq)
1386                 return;
1387
1388         atomic_set(&pservice->freq_status, reg->freq);
1389         switch (reg->freq) {
1390         case VPU_FREQ_200M: {
1391                 clk_set_rate(pservice->aclk_vcodec, 200*MHZ);
1392         } break;
1393         case VPU_FREQ_266M: {
1394                 clk_set_rate(pservice->aclk_vcodec, 266*MHZ);
1395         } break;
1396         case VPU_FREQ_300M: {
1397                 clk_set_rate(pservice->aclk_vcodec, 300*MHZ);
1398         } break;
1399         case VPU_FREQ_400M: {
1400                 clk_set_rate(pservice->aclk_vcodec, 400*MHZ);
1401         } break;
1402         case VPU_FREQ_500M: {
1403                 clk_set_rate(pservice->aclk_vcodec, 500*MHZ);
1404         } break;
1405         case VPU_FREQ_600M: {
1406                 clk_set_rate(pservice->aclk_vcodec, 600*MHZ);
1407         } break;
1408         default: {
1409                 unsigned long rate = 300*MHZ;
1410
1411                 if (soc_is_rk2928g())
1412                         rate = 400*MHZ;
1413
1414                 clk_set_rate(pservice->aclk_vcodec, rate);
1415         } break;
1416         }
1417 }
1418
1419 static void reg_copy_to_hw(struct vpu_subdev_data *data, struct vpu_reg *reg)
1420 {
1421         struct vpu_service_info *pservice = data->pservice;
1422         struct vpu_task_info *task = reg->task;
1423         struct vpu_hw_info *hw_info = data->hw_info;
1424         int i;
1425         u32 *src = (u32 *)&reg->reg[0];
1426         u32 enable_mask = task->enable_mask;
1427         u32 gating_mask = task->gating_mask;
1428         u32 reg_en = task->reg_en;
1429
1430         vpu_debug_enter();
1431
1432         atomic_add(1, &pservice->total_running);
1433         atomic_add(1, &reg->session->task_running);
1434
1435         if (pservice->auto_freq)
1436                 vpu_service_set_freq(pservice, reg);
1437
1438         vcodec_enter_mode(data);
1439
1440         switch (reg->type) {
1441         case VPU_ENC: {
1442                 u32 *dst = data->enc_dev.regs;
1443                 u32 base = 0;
1444                 u32 end  = hw_info->enc_reg_num;
1445                 /* u32 reg_gating = task->reg_gating; */
1446
1447                 pservice->reg_codec = reg;
1448
1449                 vpu_debug(DEBUG_TASK_INFO, "reg: base %3d end %d en %2d mask: en %x gate %x\n",
1450                           base, end, reg_en, enable_mask, gating_mask);
1451
1452                 VEPU_CLEAN_CACHE(dst);
1453
1454                 if (debug & DEBUG_SET_REG)
1455                         for (i = base; i < end; i++)
1456                                 vpu_debug(DEBUG_SET_REG, "set reg[%02d] %08x\n",
1457                                           i, src[i]);
1458
1459                 /*
1460                  * NOTE: encoder need to setup mode first
1461                  */
1462                 writel_relaxed(src[reg_en] & enable_mask, dst + reg_en);
1463
1464                 /* NOTE: encoder gating is not on enable register */
1465                 /* src[reg_gating] |= gating_mask; */
1466
1467                 for (i = base; i < end; i++) {
1468                         if (i != reg_en)
1469                                 writel_relaxed(src[i], dst + i);
1470                 }
1471
1472                 writel(src[reg_en], dst + reg_en);
1473                 dsb(sy);
1474
1475                 time_record(reg->task, 0);
1476         } break;
1477         case VPU_DEC: {
1478                 u32 *dst = data->dec_dev.regs;
1479                 u32 len = hw_info->dec_reg_num;
1480                 u32 base = hw_info->base_dec;
1481                 u32 end  = hw_info->end_dec;
1482
1483                 pservice->reg_codec = reg;
1484
1485                 vpu_debug(DEBUG_TASK_INFO, "reg: base %3d end %d en %2d mask: en %x gate %x\n",
1486                           base, end, reg_en, enable_mask, gating_mask);
1487
1488                 VDPU_CLEAN_CACHE(dst);
1489
1490                 /* on rkvdec set cache size to 64byte */
1491                 if (pservice->dev_id == VCODEC_DEVICE_ID_RKVDEC) {
1492                         u32 *cache_base = dst + 0x100;
1493                         u32 val = (debug & DEBUG_CACHE_32B) ? (0x3) : (0x13);
1494                         writel_relaxed(val, cache_base + 0x07);
1495                         writel_relaxed(val, cache_base + 0x17);
1496                 }
1497
1498                 if (debug & DEBUG_SET_REG)
1499                         for (i = 0; i < len; i++)
1500                                 vpu_debug(DEBUG_SET_REG, "set reg[%02d] %08x\n",
1501                                           i, src[i]);
1502
1503                 /*
1504                  * NOTE: The end register is invalid. Do NOT write to it
1505                  *       Also the base register must be written
1506                  */
1507                 for (i = base; i < end; i++) {
1508                         if (i != reg_en)
1509                                 writel_relaxed(src[i], dst + i);
1510                 }
1511
1512                 writel(src[reg_en] | gating_mask, dst + reg_en);
1513                 dsb(sy);
1514
1515                 time_record(reg->task, 0);
1516         } break;
1517         case VPU_PP: {
1518                 u32 *dst = data->dec_dev.regs;
1519                 u32 base = hw_info->base_pp;
1520                 u32 end  = hw_info->end_pp;
1521
1522                 pservice->reg_pproc = reg;
1523
1524                 vpu_debug(DEBUG_TASK_INFO, "reg: base %3d end %d en %2d mask: en %x gate %x\n",
1525                           base, end, reg_en, enable_mask, gating_mask);
1526
1527                 if (debug & DEBUG_SET_REG)
1528                         for (i = base; i < end; i++)
1529                                 vpu_debug(DEBUG_SET_REG, "set reg[%02d] %08x\n",
1530                                           i, src[i]);
1531
1532                 for (i = base; i < end; i++) {
1533                         if (i != reg_en)
1534                                 writel_relaxed(src[i], dst + i);
1535                 }
1536
1537                 writel(src[reg_en] | gating_mask, dst + reg_en);
1538                 dsb(sy);
1539
1540                 time_record(reg->task, 0);
1541         } break;
1542         case VPU_DEC_PP: {
1543                 u32 *dst = data->dec_dev.regs;
1544                 u32 base = hw_info->base_dec_pp;
1545                 u32 end  = hw_info->end_dec_pp;
1546
1547                 pservice->reg_codec = reg;
1548                 pservice->reg_pproc = reg;
1549
1550                 vpu_debug(DEBUG_TASK_INFO, "reg: base %3d end %d en %2d mask: en %x gate %x\n",
1551                           base, end, reg_en, enable_mask, gating_mask);
1552
1553                 /* VDPU_SOFT_RESET(dst); */
1554                 VDPU_CLEAN_CACHE(dst);
1555
1556                 if (debug & DEBUG_SET_REG)
1557                         for (i = base; i < end; i++)
1558                                 vpu_debug(DEBUG_SET_REG, "set reg[%02d] %08x\n",
1559                                           i, src[i]);
1560
1561                 for (i = base; i < end; i++) {
1562                         if (i != reg_en)
1563                                 writel_relaxed(src[i], dst + i);
1564                 }
1565
1566                 /* NOTE: dec output must be disabled */
1567
1568                 writel(src[reg_en] | gating_mask, dst + reg_en);
1569                 dsb(sy);
1570
1571                 time_record(reg->task, 0);
1572         } break;
1573         default: {
1574                 vpu_err("error: unsupport session type %d", reg->type);
1575                 atomic_sub(1, &pservice->total_running);
1576                 atomic_sub(1, &reg->session->task_running);
1577         } break;
1578         }
1579
1580         vpu_debug_leave();
1581 }
1582
1583 static void try_set_reg(struct vpu_subdev_data *data)
1584 {
1585         struct vpu_service_info *pservice = data->pservice;
1586
1587         vpu_debug_enter();
1588         if (!list_empty(&pservice->waiting)) {
1589                 struct vpu_reg *reg_codec = pservice->reg_codec;
1590                 struct vpu_reg *reg_pproc = pservice->reg_pproc;
1591                 int can_set = 0;
1592                 bool change_able = (reg_codec == NULL) && (reg_pproc == NULL);
1593                 int reset_request = atomic_read(&pservice->reset_request);
1594                 struct vpu_reg *reg = list_entry(pservice->waiting.next,
1595                                 struct vpu_reg, status_link);
1596
1597                 vpu_service_power_on(pservice);
1598
1599                 if (change_able || !reset_request) {
1600                         switch (reg->type) {
1601                         case VPU_ENC: {
1602                                 if (change_able)
1603                                         can_set = 1;
1604                         } break;
1605                         case VPU_DEC: {
1606                                 if (reg_codec == NULL)
1607                                         can_set = 1;
1608                                 if (pservice->auto_freq && (reg_pproc != NULL))
1609                                         can_set = 0;
1610                         } break;
1611                         case VPU_PP: {
1612                                 if (reg_codec == NULL) {
1613                                         if (reg_pproc == NULL)
1614                                                 can_set = 1;
1615                                 } else {
1616                                         if ((reg_codec->type == VPU_DEC) &&
1617                                             (reg_pproc == NULL))
1618                                                 can_set = 1;
1619
1620                                         /*
1621                                          * NOTE:
1622                                          * can not charge frequency
1623                                          * when vpu is working
1624                                          */
1625                                         if (pservice->auto_freq)
1626                                                 can_set = 0;
1627                                 }
1628                         } break;
1629                         case VPU_DEC_PP: {
1630                                 if (change_able)
1631                                         can_set = 1;
1632                                 } break;
1633                         default: {
1634                                 pr_err("undefined reg type %d\n", reg->type);
1635                         } break;
1636                         }
1637                 }
1638
1639                 /* then check reset request */
1640                 if (reset_request && !change_able)
1641                         reset_request = 0;
1642
1643                 /* do reset before setting registers */
1644                 if (reset_request)
1645                         vpu_reset(data);
1646
1647                 if (can_set) {
1648                         reg_from_wait_to_run(pservice, reg);
1649                         reg_copy_to_hw(reg->data, reg);
1650                 }
1651         }
1652         vpu_debug_leave();
1653 }
1654
1655 static int return_reg(struct vpu_subdev_data *data,
1656                       struct vpu_reg *reg, u32 __user *dst)
1657 {
1658         struct vpu_hw_info *hw_info = data->hw_info;
1659         size_t size = reg->size;
1660         u32 base;
1661
1662         vpu_debug_enter();
1663         switch (reg->type) {
1664         case VPU_ENC: {
1665                 base = 0;
1666         } break;
1667         case VPU_DEC: {
1668                 base = hw_info->base_dec_pp;
1669         } break;
1670         case VPU_PP: {
1671                 base = hw_info->base_pp;
1672         } break;
1673         case VPU_DEC_PP: {
1674                 base = hw_info->base_dec_pp;
1675         } break;
1676         default: {
1677                 vpu_err("error: copy reg to user with unknown type %d\n",
1678                         reg->type);
1679                 return -EFAULT;
1680         } break;
1681         }
1682
1683         if (copy_to_user(dst, &reg->reg[base], size)) {
1684                 vpu_err("error: return_reg copy_to_user failed\n");
1685                 return -EFAULT;
1686         }
1687
1688         reg_deinit(data, reg);
1689         vpu_debug_leave();
1690         return 0;
1691 }
1692
1693 static long vpu_service_ioctl(struct file *filp, unsigned int cmd,
1694                               unsigned long arg)
1695 {
1696         struct vpu_subdev_data *data =
1697                 container_of(filp->f_path.dentry->d_inode->i_cdev,
1698                              struct vpu_subdev_data, cdev);
1699         struct vpu_service_info *pservice = data->pservice;
1700         struct vpu_session *session = (struct vpu_session *)filp->private_data;
1701
1702         vpu_debug_enter();
1703         if (NULL == session)
1704                 return -EINVAL;
1705
1706         switch (cmd) {
1707         case VPU_IOC_SET_CLIENT_TYPE: {
1708                 session->type = (enum VPU_CLIENT_TYPE)arg;
1709                 vpu_debug(DEBUG_IOCTL, "pid %d set client type %d\n",
1710                           session->pid, session->type);
1711         } break;
1712         case VPU_IOC_GET_HW_FUSE_STATUS: {
1713                 struct vpu_request req;
1714
1715                 vpu_debug(DEBUG_IOCTL, "pid %d get hw status %d\n",
1716                           session->pid, session->type);
1717                 if (copy_from_user(&req, (void __user *)arg, sizeof(req))) {
1718                         vpu_err("error: get hw status copy_from_user failed\n");
1719                         return -EFAULT;
1720                 } else {
1721                         void *config = (session->type != VPU_ENC) ?
1722                                        ((void *)&pservice->dec_config) :
1723                                        ((void *)&pservice->enc_config);
1724                         size_t size = (session->type != VPU_ENC) ?
1725                                       (sizeof(struct vpu_dec_config)) :
1726                                       (sizeof(struct vpu_enc_config));
1727                         if (copy_to_user((void __user *)req.req,
1728                                          config, size)) {
1729                                 vpu_err("error: get hw status copy_to_user failed type %d\n",
1730                                         session->type);
1731                                 return -EFAULT;
1732                         }
1733                 }
1734         } break;
1735         case VPU_IOC_SET_REG: {
1736                 struct vpu_request req;
1737                 struct vpu_reg *reg;
1738
1739                 vpu_debug(DEBUG_IOCTL, "pid %d set reg type %d\n",
1740                           session->pid, session->type);
1741                 if (copy_from_user(&req, (void __user *)arg,
1742                                    sizeof(struct vpu_request))) {
1743                         vpu_err("error: set reg copy_from_user failed\n");
1744                         return -EFAULT;
1745                 }
1746                 reg = reg_init(data, session, (void __user *)req.req, req.size);
1747                 if (NULL == reg) {
1748                         return -EFAULT;
1749                 } else {
1750                         mutex_lock(&pservice->lock);
1751                         try_set_reg(data);
1752                         mutex_unlock(&pservice->lock);
1753                 }
1754         } break;
1755         case VPU_IOC_GET_REG: {
1756                 struct vpu_request req;
1757                 struct vpu_reg *reg;
1758                 int ret;
1759
1760                 vpu_debug(DEBUG_IOCTL, "pid %d get reg type %d\n",
1761                           session->pid, session->type);
1762                 if (copy_from_user(&req, (void __user *)arg,
1763                                    sizeof(struct vpu_request))) {
1764                         vpu_err("error: get reg copy_from_user failed\n");
1765                         return -EFAULT;
1766                 }
1767
1768                 ret = wait_event_timeout(session->wait,
1769                                          !list_empty(&session->done),
1770                                          VPU_TIMEOUT_DELAY);
1771
1772                 if (!list_empty(&session->done)) {
1773                         if (ret < 0)
1774                                 vpu_err("warning: pid %d wait task error ret %d\n",
1775                                         session->pid, ret);
1776                         ret = 0;
1777                 } else {
1778                         if (unlikely(ret < 0)) {
1779                                 vpu_err("error: pid %d wait task ret %d\n",
1780                                         session->pid, ret);
1781                         } else if (ret == 0) {
1782                                 vpu_err("error: pid %d wait %d task done timeout\n",
1783                                         session->pid,
1784                                         atomic_read(&session->task_running));
1785                                 ret = -ETIMEDOUT;
1786                         }
1787                 }
1788
1789                 if (ret < 0) {
1790                         int task_running = atomic_read(&session->task_running);
1791
1792                         mutex_lock(&pservice->lock);
1793                         vpu_service_dump(pservice);
1794                         if (task_running) {
1795                                 atomic_set(&session->task_running, 0);
1796                                 atomic_sub(task_running,
1797                                            &pservice->total_running);
1798                                 pr_err("%d task is running but not return, reset hardware...",
1799                                        task_running);
1800                                 vpu_reset(data);
1801                                 pr_err("done\n");
1802                         }
1803                         vpu_service_session_clear(data, session);
1804                         mutex_unlock(&pservice->lock);
1805                         return ret;
1806                 }
1807
1808                 mutex_lock(&pservice->lock);
1809                 reg = list_entry(session->done.next,
1810                                  struct vpu_reg, session_link);
1811                 return_reg(data, reg, (u32 __user *)req.req);
1812                 mutex_unlock(&pservice->lock);
1813         } break;
1814         case VPU_IOC_PROBE_IOMMU_STATUS: {
1815                 int iommu_enable = 1;
1816
1817                 vpu_debug(DEBUG_IOCTL, "VPU_IOC_PROBE_IOMMU_STATUS\n");
1818
1819                 if (copy_to_user((void __user *)arg,
1820                                  &iommu_enable, sizeof(int))) {
1821                         vpu_err("error: iommu status copy_to_user failed\n");
1822                         return -EFAULT;
1823                 }
1824         } break;
1825         default: {
1826                 vpu_err("error: unknow vpu service ioctl cmd %x\n", cmd);
1827         } break;
1828         }
1829         vpu_debug_leave();
1830         return 0;
1831 }
1832
1833 #ifdef CONFIG_COMPAT
1834 static long compat_vpu_service_ioctl(struct file *filp, unsigned int cmd,
1835                                      unsigned long arg)
1836 {
1837         struct vpu_subdev_data *data =
1838                 container_of(filp->f_path.dentry->d_inode->i_cdev,
1839                              struct vpu_subdev_data, cdev);
1840         struct vpu_service_info *pservice = data->pservice;
1841         struct vpu_session *session = (struct vpu_session *)filp->private_data;
1842
1843         vpu_debug_enter();
1844         vpu_debug(3, "cmd %x, COMPAT_VPU_IOC_SET_CLIENT_TYPE %x\n", cmd,
1845                   (u32)COMPAT_VPU_IOC_SET_CLIENT_TYPE);
1846         if (NULL == session)
1847                 return -EINVAL;
1848
1849         switch (cmd) {
1850         case COMPAT_VPU_IOC_SET_CLIENT_TYPE: {
1851                 session->type = (enum VPU_CLIENT_TYPE)arg;
1852                 vpu_debug(DEBUG_IOCTL, "compat set client type %d\n",
1853                           session->type);
1854         } break;
1855         case COMPAT_VPU_IOC_GET_HW_FUSE_STATUS: {
1856                 struct compat_vpu_request req;
1857
1858                 vpu_debug(DEBUG_IOCTL, "compat get hw status %d\n",
1859                           session->type);
1860                 if (copy_from_user(&req, compat_ptr((compat_uptr_t)arg),
1861                                    sizeof(struct compat_vpu_request))) {
1862                         vpu_err("error: compat get hw status copy_from_user failed\n");
1863                         return -EFAULT;
1864                 } else {
1865                         void *config = (session->type != VPU_ENC) ?
1866                                        ((void *)&pservice->dec_config) :
1867                                        ((void *)&pservice->enc_config);
1868                         size_t size = (session->type != VPU_ENC) ?
1869                                       (sizeof(struct vpu_dec_config)) :
1870                                       (sizeof(struct vpu_enc_config));
1871
1872                         if (copy_to_user(compat_ptr((compat_uptr_t)req.req),
1873                                          config, size)) {
1874                                 vpu_err("error: compat get hw status copy_to_user failed type %d\n",
1875                                         session->type);
1876                                 return -EFAULT;
1877                         }
1878                 }
1879         } break;
1880         case COMPAT_VPU_IOC_SET_REG: {
1881                 struct compat_vpu_request req;
1882                 struct vpu_reg *reg;
1883
1884                 vpu_debug(DEBUG_IOCTL, "compat set reg type %d\n",
1885                           session->type);
1886                 if (copy_from_user(&req, compat_ptr((compat_uptr_t)arg),
1887                                    sizeof(struct compat_vpu_request))) {
1888                         vpu_err("compat set_reg copy_from_user failed\n");
1889                         return -EFAULT;
1890                 }
1891                 reg = reg_init(data, session,
1892                                compat_ptr((compat_uptr_t)req.req), req.size);
1893                 if (NULL == reg) {
1894                         return -EFAULT;
1895                 } else {
1896                         mutex_lock(&pservice->lock);
1897                         try_set_reg(data);
1898                         mutex_unlock(&pservice->lock);
1899                 }
1900         } break;
1901         case COMPAT_VPU_IOC_GET_REG: {
1902                 struct compat_vpu_request req;
1903                 struct vpu_reg *reg;
1904                 int ret;
1905
1906                 vpu_debug(DEBUG_IOCTL, "compat get reg type %d\n",
1907                           session->type);
1908                 if (copy_from_user(&req, compat_ptr((compat_uptr_t)arg),
1909                                    sizeof(struct compat_vpu_request))) {
1910                         vpu_err("compat get reg copy_from_user failed\n");
1911                         return -EFAULT;
1912                 }
1913
1914                 ret = wait_event_timeout(session->wait,
1915                                          !list_empty(&session->done),
1916                                          VPU_TIMEOUT_DELAY);
1917
1918                 if (!list_empty(&session->done)) {
1919                         if (ret < 0)
1920                                 vpu_err("warning: pid %d wait task error ret %d\n",
1921                                         session->pid, ret);
1922                         ret = 0;
1923                 } else {
1924                         if (unlikely(ret < 0)) {
1925                                 vpu_err("error: pid %d wait task ret %d\n",
1926                                         session->pid, ret);
1927                         } else if (ret == 0) {
1928                                 vpu_err("error: pid %d wait %d task done timeout\n",
1929                                         session->pid,
1930                                         atomic_read(&session->task_running));
1931                                 ret = -ETIMEDOUT;
1932                         }
1933                 }
1934
1935                 if (ret < 0) {
1936                         int task_running = atomic_read(&session->task_running);
1937
1938                         mutex_lock(&pservice->lock);
1939                         vpu_service_dump(pservice);
1940                         if (task_running) {
1941                                 atomic_set(&session->task_running, 0);
1942                                 atomic_sub(task_running,
1943                                            &pservice->total_running);
1944                                 pr_err("%d task is running but not return, reset hardware...",
1945                                        task_running);
1946                                 vpu_reset(data);
1947                                 pr_err("done\n");
1948                         }
1949                         vpu_service_session_clear(data, session);
1950                         mutex_unlock(&pservice->lock);
1951                         return ret;
1952                 }
1953
1954                 mutex_lock(&pservice->lock);
1955                 reg = list_entry(session->done.next,
1956                                  struct vpu_reg, session_link);
1957                 return_reg(data, reg, compat_ptr((compat_uptr_t)req.req));
1958                 mutex_unlock(&pservice->lock);
1959         } break;
1960         case COMPAT_VPU_IOC_PROBE_IOMMU_STATUS: {
1961                 int iommu_enable = 1;
1962
1963                 vpu_debug(DEBUG_IOCTL, "COMPAT_VPU_IOC_PROBE_IOMMU_STATUS\n");
1964
1965                 if (copy_to_user(compat_ptr((compat_uptr_t)arg),
1966                                  &iommu_enable, sizeof(int))) {
1967                         vpu_err("error: VPU_IOC_PROBE_IOMMU_STATUS copy_to_user failed\n");
1968                         return -EFAULT;
1969                 }
1970         } break;
1971         default: {
1972                 vpu_err("error: unknow vpu service ioctl cmd %x\n", cmd);
1973         } break;
1974         }
1975         vpu_debug_leave();
1976         return 0;
1977 }
1978 #endif
1979
1980 static int vpu_service_check_hw(struct vpu_subdev_data *data)
1981 {
1982         int ret = -EINVAL, i = 0;
1983         u32 hw_id = readl_relaxed(data->regs);
1984
1985         hw_id = (hw_id >> 16) & 0xFFFF;
1986         pr_info("checking hw id %x\n", hw_id);
1987         data->hw_info = NULL;
1988         for (i = 0; i < ARRAY_SIZE(vcodec_info_set); i++) {
1989                 struct vcodec_info *info = &vcodec_info_set[i];
1990
1991                 if (hw_id == info->hw_id) {
1992                         data->hw_id = info->hw_id;
1993                         data->hw_info = info->hw_info;
1994                         data->task_info = info->task_info;
1995                         data->trans_info = info->trans_info;
1996                         ret = 0;
1997                         break;
1998                 }
1999         }
2000         return ret;
2001 }
2002
2003 static int vpu_service_open(struct inode *inode, struct file *filp)
2004 {
2005         struct vpu_subdev_data *data = container_of(
2006                         inode->i_cdev, struct vpu_subdev_data, cdev);
2007         struct vpu_service_info *pservice = data->pservice;
2008         struct vpu_session *session = kmalloc(sizeof(*session), GFP_KERNEL);
2009
2010         vpu_debug_enter();
2011
2012         if (NULL == session) {
2013                 vpu_err("error: unable to allocate memory for vpu_session.");
2014                 return -ENOMEM;
2015         }
2016
2017         session->type   = VPU_TYPE_BUTT;
2018         session->pid    = current->pid;
2019         INIT_LIST_HEAD(&session->waiting);
2020         INIT_LIST_HEAD(&session->running);
2021         INIT_LIST_HEAD(&session->done);
2022         INIT_LIST_HEAD(&session->list_session);
2023         init_waitqueue_head(&session->wait);
2024         atomic_set(&session->task_running, 0);
2025         mutex_lock(&pservice->lock);
2026         list_add_tail(&session->list_session, &pservice->session);
2027         filp->private_data = (void *)session;
2028         mutex_unlock(&pservice->lock);
2029
2030         pr_debug("dev opened\n");
2031         vpu_debug_leave();
2032         return nonseekable_open(inode, filp);
2033 }
2034
2035 static int vpu_service_release(struct inode *inode, struct file *filp)
2036 {
2037         struct vpu_subdev_data *data = container_of(
2038                         inode->i_cdev, struct vpu_subdev_data, cdev);
2039         struct vpu_service_info *pservice = data->pservice;
2040         int task_running;
2041         struct vpu_session *session = (struct vpu_session *)filp->private_data;
2042
2043         vpu_debug_enter();
2044         if (NULL == session)
2045                 return -EINVAL;
2046
2047         task_running = atomic_read(&session->task_running);
2048         if (task_running) {
2049                 pr_err("error: session %d still has %d task running when closing\n",
2050                        session->pid, task_running);
2051                 msleep(50);
2052         }
2053         wake_up(&session->wait);
2054
2055         mutex_lock(&pservice->lock);
2056         /* remove this filp from the asynchronusly notified filp's */
2057         list_del_init(&session->list_session);
2058         vpu_service_session_clear(data, session);
2059         kfree(session);
2060         filp->private_data = NULL;
2061         mutex_unlock(&pservice->lock);
2062
2063         pr_debug("dev closed\n");
2064         vpu_debug_leave();
2065         return 0;
2066 }
2067
2068 static const struct file_operations vpu_service_fops = {
2069         .unlocked_ioctl = vpu_service_ioctl,
2070         .open           = vpu_service_open,
2071         .release        = vpu_service_release,
2072 #ifdef CONFIG_COMPAT
2073         .compat_ioctl   = compat_vpu_service_ioctl,
2074 #endif
2075 };
2076
2077 static irqreturn_t vdpu_irq(int irq, void *dev_id);
2078 static irqreturn_t vdpu_isr(int irq, void *dev_id);
2079 static irqreturn_t vepu_irq(int irq, void *dev_id);
2080 static irqreturn_t vepu_isr(int irq, void *dev_id);
2081 static void get_hw_info(struct vpu_subdev_data *data);
2082
2083 static struct device *rockchip_get_sysmmu_dev(const char *compt)
2084 {
2085         struct device_node *dn = NULL;
2086         struct platform_device *pd = NULL;
2087         struct device *ret = NULL;
2088
2089         dn = of_find_compatible_node(NULL, NULL, compt);
2090         if (!dn) {
2091                 pr_err("can't find device node %s \r\n", compt);
2092                 return NULL;
2093         }
2094
2095         pd = of_find_device_by_node(dn);
2096         if (!pd) {
2097                 pr_err("can't find platform device in device node %s\n", compt);
2098                 return  NULL;
2099         }
2100         ret = &pd->dev;
2101
2102         return ret;
2103 }
2104
2105 #ifdef CONFIG_IOMMU_API
2106 static inline void platform_set_sysmmu(struct device *iommu,
2107                                        struct device *dev)
2108 {
2109         dev->archdata.iommu = iommu;
2110 }
2111 #else
2112 static inline void platform_set_sysmmu(struct device *iommu,
2113                                        struct device *dev)
2114 {
2115 }
2116 #endif
2117
2118 int vcodec_sysmmu_fault_hdl(struct device *dev,
2119                             enum rk_iommu_inttype itype,
2120                             unsigned long pgtable_base,
2121                             unsigned long fault_addr, unsigned int status)
2122 {
2123         struct platform_device *pdev;
2124         struct vpu_service_info *pservice;
2125         struct vpu_subdev_data *data;
2126
2127         vpu_debug_enter();
2128
2129         if (dev == NULL) {
2130                 pr_err("invalid NULL dev\n");
2131                 return 0;
2132         }
2133
2134         pdev = container_of(dev, struct platform_device, dev);
2135         if (pdev == NULL) {
2136                 pr_err("invalid NULL platform_device\n");
2137                 return 0;
2138         }
2139
2140         data = platform_get_drvdata(pdev);
2141         if (data == NULL) {
2142                 pr_err("invalid NULL vpu_subdev_data\n");
2143                 return 0;
2144         }
2145
2146         pservice = data->pservice;
2147         if (pservice == NULL) {
2148                 pr_err("invalid NULL vpu_service_info\n");
2149                 return 0;
2150         }
2151
2152         if (pservice->reg_codec) {
2153                 struct vpu_reg *reg = pservice->reg_codec;
2154                 struct vcodec_mem_region *mem, *n;
2155                 int i = 0;
2156
2157                 pr_err("vcodec, fault addr 0x%08lx\n", fault_addr);
2158                 if (!list_empty(&reg->mem_region_list)) {
2159                         list_for_each_entry_safe(mem, n, &reg->mem_region_list,
2160                                                  reg_lnk) {
2161                                 pr_err("vcodec, reg[%02u] mem region [%02d] 0x%lx %lx\n",
2162                                        mem->reg_idx, i, mem->iova, mem->len);
2163                                 i++;
2164                         }
2165                 } else {
2166                         pr_err("no memory region mapped\n");
2167                 }
2168
2169                 if (reg->data) {
2170                         struct vpu_subdev_data *data = reg->data;
2171                         u32 *base = (u32 *)data->dec_dev.regs;
2172                         u32 len = data->hw_info->dec_reg_num;
2173
2174                         pr_err("current errror register set:\n");
2175
2176                         for (i = 0; i < len; i++)
2177                                 pr_err("reg[%02d] %08x\n",
2178                                        i, readl_relaxed(base + i));
2179                 }
2180
2181                 pr_alert("vcodec, page fault occur, reset hw\n");
2182
2183                 /* reg->reg[101] = 1; */
2184                 vpu_reset(data);
2185         }
2186
2187         return 0;
2188 }
2189
2190 static int vcodec_subdev_probe(struct platform_device *pdev,
2191                                struct vpu_service_info *pservice)
2192 {
2193         int ret = 0;
2194         struct resource *res = NULL;
2195         u32 ioaddr = 0;
2196         u8 *regs = NULL;
2197         struct vpu_hw_info *hw_info = NULL;
2198         struct device *dev = &pdev->dev;
2199         char *name = (char *)dev_name(dev);
2200         struct device_node *np = pdev->dev.of_node;
2201         struct vpu_subdev_data *data =
2202                 devm_kzalloc(dev, sizeof(struct vpu_subdev_data), GFP_KERNEL);
2203         u32 iommu_en = 0;
2204         char mmu_dev_dts_name[40];
2205
2206         of_property_read_u32(np, "iommu_enabled", &iommu_en);
2207
2208         pr_info("probe device %s\n", dev_name(dev));
2209
2210         data->pservice = pservice;
2211         data->dev = dev;
2212
2213         of_property_read_string(np, "name", (const char **)&name);
2214         of_property_read_u32(np, "dev_mode", (u32 *)&data->mode);
2215
2216         if (pservice->reg_base == 0) {
2217                 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2218                 data->regs = devm_ioremap_resource(dev, res);
2219                 if (IS_ERR(data->regs)) {
2220                         ret = PTR_ERR(data->regs);
2221                         goto err;
2222                 }
2223                 ioaddr = res->start;
2224         } else {
2225                 data->regs = pservice->reg_base;
2226                 ioaddr = pservice->ioaddr;
2227         }
2228
2229         clear_bit(MMU_ACTIVATED, &data->state);
2230         vcodec_enter_mode(data);
2231         ret = vpu_service_check_hw(data);
2232         if (ret < 0) {
2233                 vpu_err("error: hw info check faild\n");
2234                 goto err;
2235         }
2236
2237         hw_info = data->hw_info;
2238         regs = (u8 *)data->regs;
2239
2240         if (hw_info->dec_reg_num) {
2241                 data->dec_dev.iosize = hw_info->dec_io_size;
2242                 data->dec_dev.regs = (u32 *)(regs + hw_info->dec_offset);
2243         }
2244
2245         if (hw_info->enc_reg_num) {
2246                 data->enc_dev.iosize = hw_info->enc_io_size;
2247                 data->enc_dev.regs = (u32 *)(regs + hw_info->enc_offset);
2248         }
2249
2250         data->reg_size = max(hw_info->dec_io_size, hw_info->enc_io_size);
2251
2252         data->irq_enc = platform_get_irq_byname(pdev, "irq_enc");
2253         if (data->irq_enc > 0) {
2254                 ret = devm_request_threaded_irq(dev, data->irq_enc,
2255                                                 vepu_irq, vepu_isr,
2256                                                 IRQF_SHARED, dev_name(dev),
2257                                                 (void *)data);
2258                 if (ret) {
2259                         dev_err(dev, "error: can't request vepu irq %d\n",
2260                                 data->irq_enc);
2261                         goto err;
2262                 }
2263         }
2264         data->irq_dec = platform_get_irq_byname(pdev, "irq_dec");
2265         if (data->irq_dec > 0) {
2266                 ret = devm_request_threaded_irq(dev, data->irq_dec,
2267                                                 vdpu_irq, vdpu_isr,
2268                                                 IRQF_SHARED, dev_name(dev),
2269                                                 (void *)data);
2270                 if (ret) {
2271                         dev_err(dev, "error: can't request vdpu irq %d\n",
2272                                 data->irq_dec);
2273                         goto err;
2274                 }
2275         }
2276         atomic_set(&data->dec_dev.irq_count_codec, 0);
2277         atomic_set(&data->dec_dev.irq_count_pp, 0);
2278         atomic_set(&data->enc_dev.irq_count_codec, 0);
2279         atomic_set(&data->enc_dev.irq_count_pp, 0);
2280
2281         if (iommu_en) {
2282                 if (data->mode == VCODEC_RUNNING_MODE_HEVC)
2283                         sprintf(mmu_dev_dts_name,
2284                                 HEVC_IOMMU_COMPATIBLE_NAME);
2285                 else if (data->mode == VCODEC_RUNNING_MODE_VPU)
2286                         sprintf(mmu_dev_dts_name,
2287                                 VPU_IOMMU_COMPATIBLE_NAME);
2288                 else if (data->mode == VCODEC_RUNNING_MODE_RKVDEC)
2289                         sprintf(mmu_dev_dts_name, VDEC_IOMMU_COMPATIBLE_NAME);
2290                 else
2291                         sprintf(mmu_dev_dts_name,
2292                                 HEVC_IOMMU_COMPATIBLE_NAME);
2293
2294                 data->mmu_dev =
2295                         rockchip_get_sysmmu_dev(mmu_dev_dts_name);
2296
2297                 if (data->mmu_dev)
2298                         platform_set_sysmmu(data->mmu_dev, dev);
2299
2300                 rockchip_iovmm_set_fault_handler(dev, vcodec_sysmmu_fault_hdl);
2301         }
2302
2303         get_hw_info(data);
2304         pservice->auto_freq = true;
2305
2306         vcodec_exit_mode(data);
2307         /* create device node */
2308         ret = alloc_chrdev_region(&data->dev_t, 0, 1, name);
2309         if (ret) {
2310                 dev_err(dev, "alloc dev_t failed\n");
2311                 goto err;
2312         }
2313
2314         cdev_init(&data->cdev, &vpu_service_fops);
2315
2316         data->cdev.owner = THIS_MODULE;
2317         data->cdev.ops = &vpu_service_fops;
2318
2319         ret = cdev_add(&data->cdev, data->dev_t, 1);
2320
2321         if (ret) {
2322                 dev_err(dev, "add dev_t failed\n");
2323                 goto err;
2324         }
2325
2326         data->cls = class_create(THIS_MODULE, name);
2327
2328         if (IS_ERR(data->cls)) {
2329                 ret = PTR_ERR(data->cls);
2330                 dev_err(dev, "class_create err:%d\n", ret);
2331                 goto err;
2332         }
2333
2334         data->child_dev = device_create(data->cls, dev,
2335                 data->dev_t, NULL, name);
2336
2337         platform_set_drvdata(pdev, data);
2338
2339         INIT_LIST_HEAD(&data->lnk_service);
2340         list_add_tail(&data->lnk_service, &pservice->subdev_list);
2341
2342 #ifdef CONFIG_DEBUG_FS
2343         data->debugfs_dir = vcodec_debugfs_create_device_dir(name, parent);
2344         if (!IS_ERR_OR_NULL(data->debugfs_dir))
2345                 data->debugfs_file_regs =
2346                         debugfs_create_file("regs", 0664, data->debugfs_dir,
2347                                         data, &debug_vcodec_fops);
2348         else
2349                 vpu_err("create debugfs dir %s failed\n", name);
2350 #endif
2351         return 0;
2352 err:
2353         if (data->child_dev) {
2354                 device_destroy(data->cls, data->dev_t);
2355                 cdev_del(&data->cdev);
2356                 unregister_chrdev_region(data->dev_t, 1);
2357         }
2358
2359         if (data->cls)
2360                 class_destroy(data->cls);
2361         return -1;
2362 }
2363
2364 static void vcodec_subdev_remove(struct vpu_subdev_data *data)
2365 {
2366         struct vpu_service_info *pservice = data->pservice;
2367
2368         mutex_lock(&pservice->lock);
2369         cancel_delayed_work_sync(&pservice->power_off_work);
2370         vpu_service_power_off(pservice);
2371         mutex_unlock(&pservice->lock);
2372
2373         device_destroy(data->cls, data->dev_t);
2374         class_destroy(data->cls);
2375         cdev_del(&data->cdev);
2376         unregister_chrdev_region(data->dev_t, 1);
2377
2378 #ifdef CONFIG_DEBUG_FS
2379         if (!IS_ERR_OR_NULL(data->debugfs_dir))
2380                 debugfs_remove_recursive(data->debugfs_dir);
2381 #endif
2382 }
2383
2384 static void vcodec_read_property(struct device_node *np,
2385                                  struct vpu_service_info *pservice)
2386 {
2387         pservice->mode_bit = 0;
2388         pservice->mode_ctrl = 0;
2389         pservice->subcnt = 0;
2390         pservice->grf_base = NULL;
2391
2392         of_property_read_u32(np, "subcnt", &pservice->subcnt);
2393
2394         if (pservice->subcnt > 1) {
2395                 of_property_read_u32(np, "mode_bit", &pservice->mode_bit);
2396                 of_property_read_u32(np, "mode_ctrl", &pservice->mode_ctrl);
2397         }
2398 #ifdef CONFIG_MFD_SYSCON
2399         pservice->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
2400         if (IS_ERR_OR_NULL(pservice->grf)) {
2401                 pservice->grf = NULL;
2402 #ifdef CONFIG_ARM
2403                 pservice->grf_base = RK_GRF_VIRT;
2404 #else
2405                 vpu_err("can't find vpu grf property\n");
2406                 return;
2407 #endif
2408         }
2409 #else
2410 #ifdef CONFIG_ARM
2411         pservice->grf_base = RK_GRF_VIRT;
2412 #else
2413         vpu_err("can't find vpu grf property\n");
2414         return;
2415 #endif
2416 #endif
2417
2418 #ifdef CONFIG_RESET_CONTROLLER
2419         pservice->rst_a = devm_reset_control_get(pservice->dev, "video_a");
2420         pservice->rst_h = devm_reset_control_get(pservice->dev, "video_h");
2421         pservice->rst_v = devm_reset_control_get(pservice->dev, "video");
2422
2423         if (IS_ERR_OR_NULL(pservice->rst_a)) {
2424                 pr_warn("No aclk reset resource define\n");
2425                 pservice->rst_a = NULL;
2426         }
2427
2428         if (IS_ERR_OR_NULL(pservice->rst_h)) {
2429                 pr_warn("No hclk reset resource define\n");
2430                 pservice->rst_h = NULL;
2431         }
2432
2433         if (IS_ERR_OR_NULL(pservice->rst_v)) {
2434                 pr_warn("No core reset resource define\n");
2435                 pservice->rst_v = NULL;
2436         }
2437 #endif
2438
2439         of_property_read_string(np, "name", (const char **)&pservice->name);
2440 }
2441
2442 static void vcodec_init_drvdata(struct vpu_service_info *pservice)
2443 {
2444         pservice->dev_id = VCODEC_DEVICE_ID_VPU;
2445         pservice->curr_mode = -1;
2446
2447         wake_lock_init(&pservice->wake_lock, WAKE_LOCK_SUSPEND, "vpu");
2448         INIT_LIST_HEAD(&pservice->waiting);
2449         INIT_LIST_HEAD(&pservice->running);
2450         mutex_init(&pservice->lock);
2451
2452         INIT_LIST_HEAD(&pservice->done);
2453         INIT_LIST_HEAD(&pservice->session);
2454         INIT_LIST_HEAD(&pservice->subdev_list);
2455
2456         pservice->reg_pproc     = NULL;
2457         atomic_set(&pservice->total_running, 0);
2458         atomic_set(&pservice->enabled,       0);
2459         atomic_set(&pservice->power_on_cnt,  0);
2460         atomic_set(&pservice->power_off_cnt, 0);
2461         atomic_set(&pservice->reset_request, 0);
2462
2463         INIT_DELAYED_WORK(&pservice->power_off_work, vpu_power_off_work);
2464         pservice->last.tv64 = 0;
2465
2466         pservice->ion_client = rockchip_ion_client_create("vpu");
2467         if (IS_ERR(pservice->ion_client)) {
2468                 vpu_err("failed to create ion client for vcodec ret %ld\n",
2469                         PTR_ERR(pservice->ion_client));
2470         } else {
2471                 vpu_debug(DEBUG_IOMMU, "vcodec ion client create success!\n");
2472         }
2473 }
2474
2475 static int vcodec_probe(struct platform_device *pdev)
2476 {
2477         int i;
2478         int ret = 0;
2479         struct resource *res = NULL;
2480         struct device *dev = &pdev->dev;
2481         struct device_node *np = pdev->dev.of_node;
2482         struct vpu_service_info *pservice =
2483                 devm_kzalloc(dev, sizeof(struct vpu_service_info), GFP_KERNEL);
2484
2485         pservice->dev = dev;
2486
2487         vcodec_read_property(np, pservice);
2488         vcodec_init_drvdata(pservice);
2489
2490         if (strncmp(pservice->name, "hevc_service", 12) == 0)
2491                 pservice->dev_id = VCODEC_DEVICE_ID_HEVC;
2492         else if (strncmp(pservice->name, "vpu_service", 11) == 0)
2493                 pservice->dev_id = VCODEC_DEVICE_ID_VPU;
2494         else if (strncmp(pservice->name, "rkvdec", 6) == 0)
2495                 pservice->dev_id = VCODEC_DEVICE_ID_RKVDEC;
2496         else
2497                 pservice->dev_id = VCODEC_DEVICE_ID_COMBO;
2498
2499         if (0 > vpu_get_clk(pservice))
2500                 goto err;
2501
2502         vpu_service_power_on(pservice);
2503
2504         if (of_property_read_bool(np, "reg")) {
2505                 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2506
2507                 pservice->reg_base = devm_ioremap_resource(pservice->dev, res);
2508                 if (IS_ERR(pservice->reg_base)) {
2509                         vpu_err("ioremap registers base failed\n");
2510                         ret = PTR_ERR(pservice->reg_base);
2511                         goto err;
2512                 }
2513                 pservice->ioaddr = res->start;
2514         } else {
2515                 pservice->reg_base = 0;
2516         }
2517
2518         if (of_property_read_bool(np, "subcnt")) {
2519                 for (i = 0; i < pservice->subcnt; i++) {
2520                         struct device_node *sub_np;
2521                         struct platform_device *sub_pdev;
2522
2523                         sub_np = of_parse_phandle(np, "rockchip,sub", i);
2524                         sub_pdev = of_find_device_by_node(sub_np);
2525
2526                         vcodec_subdev_probe(sub_pdev, pservice);
2527                 }
2528         } else {
2529                 vcodec_subdev_probe(pdev, pservice);
2530         }
2531
2532         vpu_service_power_off(pservice);
2533
2534         pr_info("init success\n");
2535
2536         return 0;
2537
2538 err:
2539         pr_info("init failed\n");
2540         vpu_service_power_off(pservice);
2541         vpu_put_clk(pservice);
2542         wake_lock_destroy(&pservice->wake_lock);
2543
2544         return ret;
2545 }
2546
2547 static int vcodec_remove(struct platform_device *pdev)
2548 {
2549         struct vpu_subdev_data *data = platform_get_drvdata(pdev);
2550
2551         vcodec_subdev_remove(data);
2552         return 0;
2553 }
2554
2555 #if defined(CONFIG_OF)
2556 static const struct of_device_id vcodec_service_dt_ids[] = {
2557         {.compatible = "rockchip,vpu_service",},
2558         {.compatible = "rockchip,hevc_service",},
2559         {.compatible = "rockchip,vpu_combo",},
2560         {.compatible = "rockchip,rkvdec",},
2561         {},
2562 };
2563 #endif
2564
2565 static struct platform_driver vcodec_driver = {
2566         .probe = vcodec_probe,
2567         .remove = vcodec_remove,
2568         .driver = {
2569                 .name = "vcodec",
2570                 .owner = THIS_MODULE,
2571 #if defined(CONFIG_OF)
2572                 .of_match_table = of_match_ptr(vcodec_service_dt_ids),
2573 #endif
2574         },
2575 };
2576
2577 static void get_hw_info(struct vpu_subdev_data *data)
2578 {
2579         struct vpu_service_info *pservice = data->pservice;
2580         struct vpu_dec_config *dec = &pservice->dec_config;
2581         struct vpu_enc_config *enc = &pservice->enc_config;
2582
2583         if (cpu_is_rk2928() || cpu_is_rk3036() ||
2584             cpu_is_rk30xx() || cpu_is_rk312x() ||
2585             cpu_is_rk3188())
2586                 dec->max_dec_pic_width = 1920;
2587         else
2588                 dec->max_dec_pic_width = 4096;
2589
2590         if (data->mode == VCODEC_RUNNING_MODE_VPU) {
2591                 dec->h264_support = 3;
2592                 dec->jpeg_support = 1;
2593                 dec->mpeg4_support = 2;
2594                 dec->vc1_support = 3;
2595                 dec->mpeg2_support = 1;
2596                 dec->pp_support = 1;
2597                 dec->sorenson_support = 1;
2598                 dec->ref_buf_support = 3;
2599                 dec->vp6_support = 1;
2600                 dec->vp7_support = 1;
2601                 dec->vp8_support = 1;
2602                 dec->avs_support = 1;
2603                 dec->jpeg_ext_support = 0;
2604                 dec->custom_mpeg4_support = 1;
2605                 dec->reserve = 0;
2606                 dec->mvc_support = 1;
2607
2608                 if (!cpu_is_rk3036()) {
2609                         u32 config_reg = readl_relaxed(data->enc_dev.regs + 63);
2610
2611                         enc->max_encoded_width = config_reg & ((1 << 11) - 1);
2612                         enc->h264_enabled = 1;
2613                         enc->mpeg4_enabled = (config_reg >> 26) & 1;
2614                         enc->jpeg_enabled = 1;
2615                         enc->vs_enabled = (config_reg >> 24) & 1;
2616                         enc->rgb_enabled = (config_reg >> 28) & 1;
2617                         enc->reg_size = data->reg_size;
2618                         enc->reserv[0] = 0;
2619                         enc->reserv[1] = 0;
2620                 }
2621
2622                 pservice->auto_freq = true;
2623                 vpu_debug(DEBUG_EXTRA_INFO, "vpu_service set to auto frequency mode\n");
2624                 atomic_set(&pservice->freq_status, VPU_FREQ_BUT);
2625
2626                 pservice->bug_dec_addr = cpu_is_rk30xx();
2627         } else if (data->mode == VCODEC_RUNNING_MODE_RKVDEC) {
2628                 pservice->auto_freq = true;
2629                 atomic_set(&pservice->freq_status, VPU_FREQ_BUT);
2630         } else {
2631                 /* disable frequency switch in hevc.*/
2632                 pservice->auto_freq = false;
2633         }
2634 }
2635
2636 static bool check_irq_err(struct vpu_task_info *task, u32 irq_status)
2637 {
2638         vpu_debug(DEBUG_IRQ_CHECK, "task %s status %08x mask %08x\n",
2639                   task->name, irq_status, task->error_mask);
2640
2641         return (task->error_mask & irq_status) ? true : false;
2642 }
2643
2644 static irqreturn_t vdpu_irq(int irq, void *dev_id)
2645 {
2646         struct vpu_subdev_data *data = (struct vpu_subdev_data *)dev_id;
2647         struct vpu_service_info *pservice = data->pservice;
2648         struct vpu_task_info *task = NULL;
2649         struct vpu_device *dev = &data->dec_dev;
2650         u32 hw_id = data->hw_info->hw_id;
2651         u32 raw_status;
2652         u32 dec_status;
2653
2654         task = &data->task_info[TASK_DEC];
2655
2656         raw_status = readl_relaxed(dev->regs + task->reg_irq);
2657         dec_status = raw_status;
2658
2659         vpu_debug(DEBUG_TASK_INFO, "vdpu_irq reg %d status %x mask: irq %x ready %x error %0x\n",
2660                   task->reg_irq, dec_status,
2661                   task->irq_mask, task->ready_mask, task->error_mask);
2662
2663         if (dec_status & task->irq_mask) {
2664                 time_record(task, 1);
2665                 vpu_debug(DEBUG_IRQ_STATUS, "vdpu_irq dec status %08x\n",
2666                           dec_status);
2667                 if ((dec_status & 0x40001) == 0x40001) {
2668                         do {
2669                                 dec_status =
2670                                         readl_relaxed(dev->regs +
2671                                                 task->reg_irq);
2672                         } while ((dec_status & 0x40001) == 0x40001);
2673                 }
2674
2675                 if (check_irq_err(task, dec_status))
2676                         atomic_add(1, &pservice->reset_request);
2677
2678                 writel_relaxed(0, dev->regs + task->reg_irq);
2679
2680                 /*
2681                  * NOTE: rkvdec need to reset after each task to avoid timeout
2682                  *       error on H.264 switch to H.265
2683                  */
2684                 if (data->mode == VCODEC_RUNNING_MODE_RKVDEC)
2685                         writel(0x100000, dev->regs + task->reg_irq);
2686
2687                 /* set clock gating to save power */
2688                 writel(task->gating_mask, dev->regs + task->reg_irq);
2689
2690                 atomic_add(1, &dev->irq_count_codec);
2691                 time_diff(task);
2692         }
2693
2694         task = &data->task_info[TASK_PP];
2695         if (hw_id != HEVC_ID && hw_id != RKV_DEC_ID) {
2696                 u32 pp_status = readl_relaxed(dev->regs + task->irq_mask);
2697
2698                 if (pp_status & task->irq_mask) {
2699                         time_record(task, 1);
2700                         vpu_debug(DEBUG_IRQ_STATUS, "vdpu_irq pp status %08x\n",
2701                                   pp_status);
2702
2703                         if (check_irq_err(task, dec_status))
2704                                 atomic_add(1, &pservice->reset_request);
2705
2706                         /* clear pp IRQ */
2707                         writel_relaxed(pp_status & (~task->reg_irq),
2708                                        dev->regs + task->irq_mask);
2709                         atomic_add(1, &dev->irq_count_pp);
2710                         time_diff(task);
2711                 }
2712         }
2713
2714         pservice->irq_status = raw_status;
2715
2716         if (atomic_read(&dev->irq_count_pp) ||
2717             atomic_read(&dev->irq_count_codec))
2718                 return IRQ_WAKE_THREAD;
2719         else
2720                 return IRQ_NONE;
2721 }
2722
2723 static irqreturn_t vdpu_isr(int irq, void *dev_id)
2724 {
2725         struct vpu_subdev_data *data = (struct vpu_subdev_data *)dev_id;
2726         struct vpu_service_info *pservice = data->pservice;
2727         struct vpu_device *dev = &data->dec_dev;
2728
2729         mutex_lock(&pservice->lock);
2730         if (atomic_read(&dev->irq_count_codec)) {
2731                 atomic_sub(1, &dev->irq_count_codec);
2732                 if (pservice->reg_codec == NULL) {
2733                         vpu_err("error: dec isr with no task waiting\n");
2734                 } else {
2735                         reg_from_run_to_done(data, pservice->reg_codec);
2736                         /* avoid vpu timeout and can't recover problem */
2737                         VDPU_SOFT_RESET(data->regs);
2738                 }
2739         }
2740
2741         if (atomic_read(&dev->irq_count_pp)) {
2742                 atomic_sub(1, &dev->irq_count_pp);
2743                 if (pservice->reg_pproc == NULL)
2744                         vpu_err("error: pp isr with no task waiting\n");
2745                 else
2746                         reg_from_run_to_done(data, pservice->reg_pproc);
2747         }
2748         try_set_reg(data);
2749         mutex_unlock(&pservice->lock);
2750         return IRQ_HANDLED;
2751 }
2752
2753 static irqreturn_t vepu_irq(int irq, void *dev_id)
2754 {
2755         struct vpu_subdev_data *data = (struct vpu_subdev_data *)dev_id;
2756         struct vpu_service_info *pservice = data->pservice;
2757         struct vpu_task_info *task = &data->task_info[TASK_ENC];
2758         struct vpu_device *dev = &data->enc_dev;
2759         u32 irq_status;
2760
2761         irq_status = readl_relaxed(dev->regs + task->reg_irq);
2762
2763         vpu_debug(DEBUG_TASK_INFO, "vepu_irq reg %d status %x mask: irq %x ready %x error %0x\n",
2764                   task->reg_irq, irq_status,
2765                   task->irq_mask, task->ready_mask, task->error_mask);
2766
2767         vpu_debug(DEBUG_IRQ_STATUS, "vepu_irq enc status %08x\n", irq_status);
2768
2769         if (likely(irq_status & task->irq_mask)) {
2770                 time_record(task, 1);
2771
2772                 if (check_irq_err(task, irq_status))
2773                         atomic_add(1, &pservice->reset_request);
2774
2775                 /* clear enc IRQ */
2776                 writel_relaxed(irq_status & (~task->irq_mask),
2777                                dev->regs + task->reg_irq);
2778
2779                 atomic_add(1, &dev->irq_count_codec);
2780                 time_diff(task);
2781         }
2782
2783         pservice->irq_status = irq_status;
2784
2785         if (atomic_read(&dev->irq_count_codec))
2786                 return IRQ_WAKE_THREAD;
2787         else
2788                 return IRQ_NONE;
2789 }
2790
2791 static irqreturn_t vepu_isr(int irq, void *dev_id)
2792 {
2793         struct vpu_subdev_data *data = (struct vpu_subdev_data *)dev_id;
2794         struct vpu_service_info *pservice = data->pservice;
2795         struct vpu_device *dev = &data->enc_dev;
2796
2797         mutex_lock(&pservice->lock);
2798         if (atomic_read(&dev->irq_count_codec)) {
2799                 atomic_sub(1, &dev->irq_count_codec);
2800                 if (NULL == pservice->reg_codec)
2801                         vpu_err("error: enc isr with no task waiting\n");
2802                 else
2803                         reg_from_run_to_done(data, pservice->reg_codec);
2804         }
2805         try_set_reg(data);
2806         mutex_unlock(&pservice->lock);
2807         return IRQ_HANDLED;
2808 }
2809
2810 static int __init vcodec_service_init(void)
2811 {
2812         int ret = platform_driver_register(&vcodec_driver);
2813
2814         if (ret) {
2815                 vpu_err("Platform device register failed (%d).\n", ret);
2816                 return ret;
2817         }
2818
2819 #ifdef CONFIG_DEBUG_FS
2820         vcodec_debugfs_init();
2821 #endif
2822
2823         return ret;
2824 }
2825
2826 static void __exit vcodec_service_exit(void)
2827 {
2828 #ifdef CONFIG_DEBUG_FS
2829         vcodec_debugfs_exit();
2830 #endif
2831
2832         platform_driver_unregister(&vcodec_driver);
2833 }
2834
2835 module_init(vcodec_service_init);
2836 module_exit(vcodec_service_exit);
2837 MODULE_LICENSE("Proprietary");
2838
2839 #ifdef CONFIG_DEBUG_FS
2840 #include <linux/seq_file.h>
2841
2842 static int vcodec_debugfs_init(void)
2843 {
2844         parent = debugfs_create_dir("vcodec", NULL);
2845         if (!parent)
2846                 return -1;
2847
2848         return 0;
2849 }
2850
2851 static void vcodec_debugfs_exit(void)
2852 {
2853         debugfs_remove(parent);
2854 }
2855
2856 static struct dentry *vcodec_debugfs_create_device_dir(
2857                 char *dirname, struct dentry *parent)
2858 {
2859         return debugfs_create_dir(dirname, parent);
2860 }
2861
2862 static int debug_vcodec_show(struct seq_file *s, void *unused)
2863 {
2864         struct vpu_subdev_data *data = s->private;
2865         struct vpu_service_info *pservice = data->pservice;
2866         unsigned int i, n;
2867         struct vpu_reg *reg, *reg_tmp;
2868         struct vpu_session *session, *session_tmp;
2869
2870         mutex_lock(&pservice->lock);
2871         vpu_service_power_on(pservice);
2872         if (data->hw_info->hw_id != HEVC_ID) {
2873                 seq_puts(s, "\nENC Registers:\n");
2874                 n = data->enc_dev.iosize >> 2;
2875
2876                 for (i = 0; i < n; i++)
2877                         seq_printf(s, "\tswreg%d = %08X\n", i,
2878                                    readl_relaxed(data->enc_dev.regs + i));
2879         }
2880
2881         seq_puts(s, "\nDEC Registers:\n");
2882
2883         n = data->dec_dev.iosize >> 2;
2884         for (i = 0; i < n; i++)
2885                 seq_printf(s, "\tswreg%d = %08X\n", i,
2886                            readl_relaxed(data->dec_dev.regs + i));
2887
2888         seq_puts(s, "\nvpu service status:\n");
2889
2890         list_for_each_entry_safe(session, session_tmp,
2891                                  &pservice->session, list_session) {
2892                 seq_printf(s, "session pid %d type %d:\n",
2893                            session->pid, session->type);
2894
2895                 list_for_each_entry_safe(reg, reg_tmp,
2896                                          &session->waiting, session_link) {
2897                         seq_printf(s, "waiting register set %p\n", reg);
2898                 }
2899                 list_for_each_entry_safe(reg, reg_tmp,
2900                                          &session->running, session_link) {
2901                         seq_printf(s, "running register set %p\n", reg);
2902                 }
2903                 list_for_each_entry_safe(reg, reg_tmp,
2904                                          &session->done, session_link) {
2905                         seq_printf(s, "done    register set %p\n", reg);
2906                 }
2907         }
2908
2909         seq_printf(s, "\npower counter: on %d off %d\n",
2910                    atomic_read(&pservice->power_on_cnt),
2911                    atomic_read(&pservice->power_off_cnt));
2912
2913         mutex_unlock(&pservice->lock);
2914         vpu_service_power_off(pservice);
2915
2916         return 0;
2917 }
2918
2919 static int debug_vcodec_open(struct inode *inode, struct file *file)
2920 {
2921         return single_open(file, debug_vcodec_show, inode->i_private);
2922 }
2923
2924 #endif
2925