871b44c64ad6e3171f079f50570db9f05a1fb7e1
[firefly-linux-kernel-4.4.55.git] / drivers / video / rockchip / vcodec / vcodec_service.c
1 /**
2  * Copyright (C) 2015 Fuzhou Rockchip Electronics Co., Ltd
3  * author: chenhengming chm@rock-chips.com
4  *         Alpha Lin, alpha.lin@rock-chips.com
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include <linux/clk.h>
20 #include <linux/compat.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/module.h>
25 #include <linux/fs.h>
26 #include <linux/mm.h>
27 #include <linux/platform_device.h>
28 #include <linux/reset.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31 #include <linux/wakelock.h>
32 #include <linux/cdev.h>
33 #include <linux/of.h>
34 #include <linux/of_platform.h>
35 #include <linux/of_irq.h>
36 #include <linux/regmap.h>
37 #include <linux/mfd/syscon.h>
38 #include <linux/uaccess.h>
39 #include <linux/debugfs.h>
40 #include <linux/pm_runtime.h>
41
42 #include <linux/rockchip/cpu.h>
43 #include <linux/rockchip/cru.h>
44 #include <linux/rockchip/pmu.h>
45 #include <linux/rockchip/grf.h>
46
47 #if defined(CONFIG_ION_ROCKCHIP)
48 #include <linux/rockchip_ion.h>
49 #endif
50
51 #include <linux/rockchip-iovmm.h>
52 #include <linux/dma-buf.h>
53
54 #include "vcodec_hw_info.h"
55 #include "vcodec_hw_vpu.h"
56 #include "vcodec_hw_rkv.h"
57 #include "vcodec_hw_vpu2.h"
58
59 #include "vcodec_service.h"
60
61 /*
62  * debug flag usage:
63  * +------+-------------------+
64  * | 8bit |      24bit        |
65  * +------+-------------------+
66  *  0~23 bit is for different information type
67  * 24~31 bit is for information print format
68  */
69
70 #define DEBUG_POWER                             0x00000001
71 #define DEBUG_CLOCK                             0x00000002
72 #define DEBUG_IRQ_STATUS                        0x00000004
73 #define DEBUG_IOMMU                             0x00000008
74 #define DEBUG_IOCTL                             0x00000010
75 #define DEBUG_FUNCTION                          0x00000020
76 #define DEBUG_REGISTER                          0x00000040
77 #define DEBUG_EXTRA_INFO                        0x00000080
78 #define DEBUG_TIMING                            0x00000100
79 #define DEBUG_TASK_INFO                         0x00000200
80
81 #define DEBUG_SET_REG                           0x00001000
82 #define DEBUG_GET_REG                           0x00002000
83 #define DEBUG_PPS_FILL                          0x00004000
84 #define DEBUG_IRQ_CHECK                         0x00008000
85 #define DEBUG_CACHE_32B                         0x00010000
86
87 #define PRINT_FUNCTION                          0x80000000
88 #define PRINT_LINE                              0x40000000
89
90 static int debug;
91 module_param(debug, int, S_IRUGO | S_IWUSR);
92 MODULE_PARM_DESC(debug, "bit switch for vcodec_service debug information");
93
94 #define VCODEC_CLOCK_ENABLE     1
95
96 /*
97  * hardware information organization
98  *
99  * In order to support multiple hardware with different version the hardware
100  * information is organized as follow:
101  *
102  * 1. First, index hardware by register size / position.
103  *    These information is fix for each hardware and do not relate to runtime
104  *    work flow. It only related to resource allocation.
105  *    Descriptor: struct vpu_hw_info
106  *
107  * 2. Then, index hardware by runtime configuration
108  *    These information is related to runtime setting behave including enable
109  *    register, irq register and other key control flag
110  *    Descriptor: struct vpu_task_info
111  *
112  * 3. Final, on iommu case the fd translation is required
113  *    Descriptor: struct vpu_trans_info
114  */
115
116 enum VPU_FREQ {
117         VPU_FREQ_200M,
118         VPU_FREQ_266M,
119         VPU_FREQ_300M,
120         VPU_FREQ_400M,
121         VPU_FREQ_500M,
122         VPU_FREQ_600M,
123         VPU_FREQ_DEFAULT,
124         VPU_FREQ_BUT,
125 };
126
127 struct extra_info_elem {
128         u32 index;
129         u32 offset;
130 };
131
132 #define EXTRA_INFO_MAGIC        0x4C4A46
133
134 struct extra_info_for_iommu {
135         u32 magic;
136         u32 cnt;
137         struct extra_info_elem elem[20];
138 };
139
140 #define MHZ                                     (1000*1000)
141 #define SIZE_REG(reg)                           ((reg)*4)
142
143 static struct vcodec_info vcodec_info_set[] = {
144         [0] = {
145                 .hw_id          = VPU_ID_8270,
146                 .hw_info        = &hw_vpu_8270,
147                 .task_info      = task_vpu,
148                 .trans_info     = trans_vpu,
149         },
150         [1] = {
151                 .hw_id          = VPU_ID_4831,
152                 .hw_info        = &hw_vpu_4831,
153                 .task_info      = task_vpu,
154                 .trans_info     = trans_vpu,
155         },
156         [2] = {
157                 .hw_id          = VPU_DEC_ID_9190,
158                 .hw_info        = &hw_vpu_9190,
159                 .task_info      = task_vpu,
160                 .trans_info     = trans_vpu,
161         },
162         [3] = {
163                 .hw_id          = HEVC_ID,
164                 .hw_info        = &hw_rkhevc,
165                 .task_info      = task_rkv,
166                 .trans_info     = trans_rkv,
167         },
168         [4] = {
169                 .hw_id          = RKV_DEC_ID,
170                 .hw_info        = &hw_rkvdec,
171                 .task_info      = task_rkv,
172                 .trans_info     = trans_rkv,
173         },
174         [5] = {
175                 .hw_id          = VPU2_ID,
176                 .hw_info        = &hw_vpu2,
177                 .task_info      = task_vpu2,
178                 .trans_info     = trans_vpu2,
179         },
180 };
181
182 #define DEBUG
183 #ifdef DEBUG
184 #define vpu_debug_func(type, fmt, args...)                      \
185         do {                                                    \
186                 if (unlikely(debug & type)) {                   \
187                         pr_info("%s:%d: " fmt,                  \
188                                  __func__, __LINE__, ##args);   \
189                 }                                               \
190         } while (0)
191 #define vpu_debug(type, fmt, args...)                           \
192         do {                                                    \
193                 if (unlikely(debug & type)) {                   \
194                         pr_info(fmt, ##args);                   \
195                 }                                               \
196         } while (0)
197 #else
198 #define vpu_debug_func(level, fmt, args...)
199 #define vpu_debug(level, fmt, args...)
200 #endif
201
202 #define vpu_debug_enter() vpu_debug_func(DEBUG_FUNCTION, "enter\n")
203 #define vpu_debug_leave() vpu_debug_func(DEBUG_FUNCTION, "leave\n")
204
205 #define vpu_err(fmt, args...)                           \
206                 pr_err("%s:%d: " fmt, __func__, __LINE__, ##args)
207
208 enum VPU_DEC_FMT {
209         VPU_DEC_FMT_H264,
210         VPU_DEC_FMT_MPEG4,
211         VPU_DEC_FMT_H263,
212         VPU_DEC_FMT_JPEG,
213         VPU_DEC_FMT_VC1,
214         VPU_DEC_FMT_MPEG2,
215         VPU_DEC_FMT_MPEG1,
216         VPU_DEC_FMT_VP6,
217         VPU_DEC_FMT_RESERV0,
218         VPU_DEC_FMT_VP7,
219         VPU_DEC_FMT_VP8,
220         VPU_DEC_FMT_AVS,
221         VPU_DEC_FMT_RES
222 };
223
224 /**
225  * struct for process session which connect to vpu
226  *
227  * @author ChenHengming (2011-5-3)
228  */
229 struct vpu_session {
230         enum VPU_CLIENT_TYPE type;
231         /* a linked list of data so we can access them for debugging */
232         struct list_head list_session;
233         /* a linked list of register data waiting for process */
234         struct list_head waiting;
235         /* a linked list of register data in processing */
236         struct list_head running;
237         /* a linked list of register data processed */
238         struct list_head done;
239         wait_queue_head_t wait;
240         pid_t pid;
241         atomic_t task_running;
242 };
243
244 /**
245  * struct for process register set
246  *
247  * @author ChenHengming (2011-5-4)
248  */
249 struct vpu_reg {
250         enum VPU_CLIENT_TYPE type;
251         enum VPU_FREQ freq;
252         struct vpu_session *session;
253         struct vpu_subdev_data *data;
254         struct vpu_task_info *task;
255         const struct vpu_trans_info *trans;
256
257         /* link to vpu service session */
258         struct list_head session_link;
259         /* link to register set list */
260         struct list_head status_link;
261
262         unsigned long size;
263         struct list_head mem_region_list;
264         u32 dec_base;
265         u32 *reg;
266 };
267
268 struct vpu_device {
269         atomic_t irq_count_codec;
270         atomic_t irq_count_pp;
271         unsigned int iosize;
272         u32 *regs;
273 };
274
275 enum vcodec_device_id {
276         VCODEC_DEVICE_ID_VPU,
277         VCODEC_DEVICE_ID_HEVC,
278         VCODEC_DEVICE_ID_COMBO,
279         VCODEC_DEVICE_ID_RKVDEC,
280         VCODEC_DEVICE_ID_BUTT
281 };
282
283 enum VCODEC_RUNNING_MODE {
284         VCODEC_RUNNING_MODE_NONE = -1,
285         VCODEC_RUNNING_MODE_VPU,
286         VCODEC_RUNNING_MODE_HEVC,
287         VCODEC_RUNNING_MODE_RKVDEC
288 };
289
290 struct vcodec_mem_region {
291         struct list_head srv_lnk;
292         struct list_head reg_lnk;
293         struct list_head session_lnk;
294         unsigned long iova;     /* virtual address for iommu */
295         unsigned long len;
296         u32 reg_idx;
297         struct ion_handle *hdl;
298 };
299
300 enum vpu_ctx_state {
301         MMU_ACTIVATED   = BIT(0)
302 };
303
304 struct vpu_subdev_data {
305         struct cdev cdev;
306         dev_t dev_t;
307         struct class *cls;
308         struct device *child_dev;
309
310         int irq_enc;
311         int irq_dec;
312         struct vpu_service_info *pservice;
313
314         u32 *regs;
315         enum VCODEC_RUNNING_MODE mode;
316         struct list_head lnk_service;
317
318         struct device *dev;
319
320         struct vpu_device enc_dev;
321         struct vpu_device dec_dev;
322
323         enum VPU_HW_ID hw_id;
324         struct vpu_hw_info *hw_info;
325         struct vpu_task_info *task_info;
326         const struct vpu_trans_info *trans_info;
327
328         u32 reg_size;
329         unsigned long state;
330
331 #ifdef CONFIG_DEBUG_FS
332         struct dentry *debugfs_dir;
333         struct dentry *debugfs_file_regs;
334 #endif
335
336         struct device *mmu_dev;
337 };
338
339 struct vpu_service_info {
340         struct wake_lock wake_lock;
341         struct delayed_work power_off_work;
342         ktime_t last; /* record previous power-on time */
343         /* vpu service structure global lock */
344         struct mutex lock;
345         /* link to link_reg in struct vpu_reg */
346         struct list_head waiting;
347         /* link to link_reg in struct vpu_reg */
348         struct list_head running;
349         /* link to link_reg in struct vpu_reg */
350         struct list_head done;
351         /* link to list_session in struct vpu_session */
352         struct list_head session;
353         atomic_t total_running;
354         atomic_t enabled;
355         atomic_t power_on_cnt;
356         atomic_t power_off_cnt;
357         atomic_t service_on;
358         struct mutex shutdown_lock;
359         struct vpu_reg *reg_codec;
360         struct vpu_reg *reg_pproc;
361         struct vpu_reg *reg_resev;
362         struct vpu_dec_config dec_config;
363         struct vpu_enc_config enc_config;
364
365         bool auto_freq;
366         bool bug_dec_addr;
367         atomic_t freq_status;
368
369         struct clk *aclk_vcodec;
370         struct clk *hclk_vcodec;
371         struct clk *clk_core;
372         struct clk *clk_cabac;
373         struct clk *pd_video;
374
375 #ifdef CONFIG_RESET_CONTROLLER
376         struct reset_control *rst_a;
377         struct reset_control *rst_h;
378         struct reset_control *rst_v;
379 #endif
380         struct device *dev;
381
382         u32 irq_status;
383         atomic_t reset_request;
384         struct ion_client *ion_client;
385         struct list_head mem_region_list;
386
387         enum vcodec_device_id dev_id;
388
389         enum VCODEC_RUNNING_MODE curr_mode;
390         u32 prev_mode;
391
392         struct delayed_work simulate_work;
393
394         u32 mode_bit;
395         u32 mode_ctrl;
396         u32 *reg_base;
397         u32 ioaddr;
398         struct regmap *grf;
399         u32 *grf_base;
400
401         char *name;
402
403         u32 subcnt;
404         struct list_head subdev_list;
405 };
406
407 struct vpu_request {
408         u32 *req;
409         u32 size;
410 };
411
412 #ifdef CONFIG_COMPAT
413 struct compat_vpu_request {
414         compat_uptr_t req;
415         u32 size;
416 };
417 #endif
418
419 /* debugfs root directory for all device (vpu, hevc).*/
420 static struct dentry *parent;
421
422 #ifdef CONFIG_DEBUG_FS
423 static int vcodec_debugfs_init(void);
424 static void vcodec_debugfs_exit(void);
425 static struct dentry *vcodec_debugfs_create_device_dir(
426                 char *dirname, struct dentry *parent);
427 static int debug_vcodec_open(struct inode *inode, struct file *file);
428
429 static const struct file_operations debug_vcodec_fops = {
430         .open = debug_vcodec_open,
431         .read = seq_read,
432         .llseek = seq_lseek,
433         .release = single_release,
434 };
435 #endif
436
437 #define VDPU_SOFT_RESET_REG     101
438 #define VDPU_CLEAN_CACHE_REG    516
439 #define VEPU_CLEAN_CACHE_REG    772
440 #define HEVC_CLEAN_CACHE_REG    260
441
442 #define VPU_REG_ENABLE(base, reg)       writel_relaxed(1, base + reg)
443
444 #define VDPU_SOFT_RESET(base)   VPU_REG_ENABLE(base, VDPU_SOFT_RESET_REG)
445 #define VDPU_CLEAN_CACHE(base)  VPU_REG_ENABLE(base, VDPU_CLEAN_CACHE_REG)
446 #define VEPU_CLEAN_CACHE(base)  VPU_REG_ENABLE(base, VEPU_CLEAN_CACHE_REG)
447 #define HEVC_CLEAN_CACHE(base)  VPU_REG_ENABLE(base, HEVC_CLEAN_CACHE_REG)
448
449 #define VPU_POWER_OFF_DELAY             (4 * HZ) /* 4s */
450 #define VPU_TIMEOUT_DELAY               (2 * HZ) /* 2s */
451
452 static void time_record(struct vpu_task_info *task, int is_end)
453 {
454         if (unlikely(debug & DEBUG_TIMING) && task)
455                 do_gettimeofday((is_end) ? (&task->end) : (&task->start));
456 }
457
458 static void time_diff(struct vpu_task_info *task)
459 {
460         vpu_debug(DEBUG_TIMING, "%s task: %ld ms\n", task->name,
461                   (task->end.tv_sec  - task->start.tv_sec)  * 1000 +
462                   (task->end.tv_usec - task->start.tv_usec) / 1000);
463 }
464
465 static void vcodec_enter_mode(struct vpu_subdev_data *data)
466 {
467         int bits;
468         u32 raw = 0;
469         struct vpu_service_info *pservice = data->pservice;
470         struct vpu_subdev_data *subdata, *n;
471
472         if (pservice->subcnt < 2) {
473                 if (data->mmu_dev && !test_bit(MMU_ACTIVATED, &data->state)) {
474                         set_bit(MMU_ACTIVATED, &data->state);
475                         if (atomic_read(&pservice->enabled))
476                                 rockchip_iovmm_activate(data->dev);
477                         else
478                                 BUG_ON(!atomic_read(&pservice->enabled));
479                 }
480                 return;
481         }
482
483         if (pservice->curr_mode == data->mode)
484                 return;
485
486         vpu_debug(DEBUG_IOMMU, "vcodec enter mode %d\n", data->mode);
487         list_for_each_entry_safe(subdata, n,
488                                  &pservice->subdev_list, lnk_service) {
489                 if (data != subdata && subdata->mmu_dev &&
490                     test_bit(MMU_ACTIVATED, &subdata->state)) {
491                         clear_bit(MMU_ACTIVATED, &subdata->state);
492                         rockchip_iovmm_deactivate(subdata->dev);
493                 }
494         }
495         bits = 1 << pservice->mode_bit;
496 #ifdef CONFIG_MFD_SYSCON
497         if (pservice->grf) {
498                 regmap_read(pservice->grf, pservice->mode_ctrl, &raw);
499
500                 if (data->mode == VCODEC_RUNNING_MODE_HEVC)
501                         regmap_write(pservice->grf, pservice->mode_ctrl,
502                                      raw | bits | (bits << 16));
503                 else
504                         regmap_write(pservice->grf, pservice->mode_ctrl,
505                                      (raw & (~bits)) | (bits << 16));
506         } else if (pservice->grf_base) {
507                 u32 *grf_base = pservice->grf_base;
508
509                 raw = readl_relaxed(grf_base + pservice->mode_ctrl / 4);
510                 if (data->mode == VCODEC_RUNNING_MODE_HEVC)
511                         writel_relaxed(raw | bits | (bits << 16),
512                                        grf_base + pservice->mode_ctrl / 4);
513                 else
514                         writel_relaxed((raw & (~bits)) | (bits << 16),
515                                        grf_base + pservice->mode_ctrl / 4);
516         } else {
517                 vpu_err("no grf resource define, switch decoder failed\n");
518                 return;
519         }
520 #else
521         if (pservice->grf_base) {
522                 u32 *grf_base = pservice->grf_base;
523
524                 raw = readl_relaxed(grf_base + pservice->mode_ctrl / 4);
525                 if (data->mode == VCODEC_RUNNING_MODE_HEVC)
526                         writel_relaxed(raw | bits | (bits << 16),
527                                        grf_base + pservice->mode_ctrl / 4);
528                 else
529                         writel_relaxed((raw & (~bits)) | (bits << 16),
530                                        grf_base + pservice->mode_ctrl / 4);
531         } else {
532                 vpu_err("no grf resource define, switch decoder failed\n");
533                 return;
534         }
535 #endif
536         if (data->mmu_dev && !test_bit(MMU_ACTIVATED, &data->state)) {
537                 set_bit(MMU_ACTIVATED, &data->state);
538                 if (atomic_read(&pservice->enabled))
539                         rockchip_iovmm_activate(data->dev);
540                 else
541                         BUG_ON(!atomic_read(&pservice->enabled));
542         }
543
544         pservice->prev_mode = pservice->curr_mode;
545         pservice->curr_mode = data->mode;
546 }
547
548 static void vcodec_exit_mode(struct vpu_subdev_data *data)
549 {
550         if (data->mmu_dev && test_bit(MMU_ACTIVATED, &data->state)) {
551                 clear_bit(MMU_ACTIVATED, &data->state);
552                 rockchip_iovmm_deactivate(data->dev);
553         }
554         /*
555          * In case of VPU Combo, it require HW switch its running mode
556          * before the other HW component start work. set current HW running
557          * mode to none, can ensure HW switch to its reqired mode properly.
558          */
559         data->pservice->curr_mode = VCODEC_RUNNING_MODE_NONE;
560 }
561
562 static int vpu_get_clk(struct vpu_service_info *pservice)
563 {
564 #if VCODEC_CLOCK_ENABLE
565         struct device *dev = pservice->dev;
566
567         switch (pservice->dev_id) {
568         case VCODEC_DEVICE_ID_HEVC:
569                 pservice->pd_video = devm_clk_get(dev, "pd_hevc");
570                 if (IS_ERR(pservice->pd_video)) {
571                         dev_err(dev, "failed on clk_get pd_hevc\n");
572                         return -1;
573                 }
574         case VCODEC_DEVICE_ID_COMBO:
575         case VCODEC_DEVICE_ID_RKVDEC:
576                 pservice->clk_cabac = devm_clk_get(dev, "clk_cabac");
577                 if (IS_ERR(pservice->clk_cabac)) {
578                         dev_err(dev, "failed on clk_get clk_cabac\n");
579                         pservice->clk_cabac = NULL;
580                 }
581                 pservice->clk_core = devm_clk_get(dev, "clk_core");
582                 if (IS_ERR(pservice->clk_core)) {
583                         dev_err(dev, "failed on clk_get clk_core\n");
584                         return -1;
585                 }
586         case VCODEC_DEVICE_ID_VPU:
587                 pservice->aclk_vcodec = devm_clk_get(dev, "aclk_vcodec");
588                 if (IS_ERR(pservice->aclk_vcodec)) {
589                         dev_err(dev, "failed on clk_get aclk_vcodec\n");
590                         return -1;
591                 }
592
593                 pservice->hclk_vcodec = devm_clk_get(dev, "hclk_vcodec");
594                 if (IS_ERR(pservice->hclk_vcodec)) {
595                         dev_err(dev, "failed on clk_get hclk_vcodec\n");
596                         return -1;
597                 }
598                 if (pservice->pd_video == NULL) {
599                         pservice->pd_video = devm_clk_get(dev, "pd_video");
600                         if (IS_ERR(pservice->pd_video)) {
601                                 pservice->pd_video = NULL;
602                                 dev_info(dev, "do not have pd_video\n");
603                         }
604                 }
605                 break;
606         default:
607                 break;
608         }
609
610         return 0;
611 #else
612         return 0;
613 #endif
614 }
615
616 static void vpu_put_clk(struct vpu_service_info *pservice)
617 {
618 #if VCODEC_CLOCK_ENABLE
619         if (pservice->pd_video)
620                 devm_clk_put(pservice->dev, pservice->pd_video);
621         if (pservice->aclk_vcodec)
622                 devm_clk_put(pservice->dev, pservice->aclk_vcodec);
623         if (pservice->hclk_vcodec)
624                 devm_clk_put(pservice->dev, pservice->hclk_vcodec);
625         if (pservice->clk_core)
626                 devm_clk_put(pservice->dev, pservice->clk_core);
627         if (pservice->clk_cabac)
628                 devm_clk_put(pservice->dev, pservice->clk_cabac);
629 #endif
630 }
631
632 static void vpu_reset(struct vpu_subdev_data *data)
633 {
634         struct vpu_service_info *pservice = data->pservice;
635         enum pmu_idle_req type = IDLE_REQ_VIDEO;
636
637         if (pservice->dev_id == VCODEC_DEVICE_ID_HEVC)
638                 type = IDLE_REQ_HEVC;
639
640         pr_info("%s: resetting...", dev_name(pservice->dev));
641
642 #if defined(CONFIG_ARCH_RK29)
643         clk_disable(aclk_ddr_vepu);
644         cru_set_soft_reset(SOFT_RST_CPU_VODEC_A2A_AHB, true);
645         cru_set_soft_reset(SOFT_RST_DDR_VCODEC_PORT, true);
646         cru_set_soft_reset(SOFT_RST_VCODEC_AHB_BUS, true);
647         cru_set_soft_reset(SOFT_RST_VCODEC_AXI_BUS, true);
648         mdelay(10);
649         cru_set_soft_reset(SOFT_RST_VCODEC_AXI_BUS, false);
650         cru_set_soft_reset(SOFT_RST_VCODEC_AHB_BUS, false);
651         cru_set_soft_reset(SOFT_RST_DDR_VCODEC_PORT, false);
652         cru_set_soft_reset(SOFT_RST_CPU_VODEC_A2A_AHB, false);
653         clk_enable(aclk_ddr_vepu);
654 #elif defined(CONFIG_ARCH_RK30)
655         pmu_set_idle_request(IDLE_REQ_VIDEO, true);
656         cru_set_soft_reset(SOFT_RST_CPU_VCODEC, true);
657         cru_set_soft_reset(SOFT_RST_VCODEC_NIU_AXI, true);
658         cru_set_soft_reset(SOFT_RST_VCODEC_AHB, true);
659         cru_set_soft_reset(SOFT_RST_VCODEC_AXI, true);
660         mdelay(1);
661         cru_set_soft_reset(SOFT_RST_VCODEC_AXI, false);
662         cru_set_soft_reset(SOFT_RST_VCODEC_AHB, false);
663         cru_set_soft_reset(SOFT_RST_VCODEC_NIU_AXI, false);
664         cru_set_soft_reset(SOFT_RST_CPU_VCODEC, false);
665         pmu_set_idle_request(IDLE_REQ_VIDEO, false);
666 #else
667 #endif
668         WARN_ON(pservice->reg_codec != NULL);
669         WARN_ON(pservice->reg_pproc != NULL);
670         WARN_ON(pservice->reg_resev != NULL);
671         pservice->reg_codec = NULL;
672         pservice->reg_pproc = NULL;
673         pservice->reg_resev = NULL;
674
675         pr_info("for 3288/3368...");
676 #ifdef CONFIG_RESET_CONTROLLER
677         if (pservice->rst_a && pservice->rst_h) {
678                 pr_info("reset in\n");
679                 if (pservice->rst_v)
680                         reset_control_assert(pservice->rst_v);
681                 reset_control_assert(pservice->rst_a);
682                 reset_control_assert(pservice->rst_h);
683                 udelay(5);
684                 reset_control_deassert(pservice->rst_h);
685                 reset_control_deassert(pservice->rst_a);
686                 if (pservice->rst_v)
687                         reset_control_deassert(pservice->rst_v);
688         }
689 #endif
690
691         if (data->mmu_dev && test_bit(MMU_ACTIVATED, &data->state)) {
692                 clear_bit(MMU_ACTIVATED, &data->state);
693                 if (atomic_read(&pservice->enabled))
694                         rockchip_iovmm_deactivate(data->dev);
695                 else
696                         BUG_ON(!atomic_read(&pservice->enabled));
697         }
698
699         atomic_set(&pservice->reset_request, 0);
700         pr_info("done\n");
701 }
702
703 static void reg_deinit(struct vpu_subdev_data *data, struct vpu_reg *reg);
704 static void vpu_service_session_clear(struct vpu_subdev_data *data,
705                                       struct vpu_session *session)
706 {
707         struct vpu_reg *reg, *n;
708
709         list_for_each_entry_safe(reg, n, &session->waiting, session_link) {
710                 reg_deinit(data, reg);
711         }
712         list_for_each_entry_safe(reg, n, &session->running, session_link) {
713                 reg_deinit(data, reg);
714         }
715         list_for_each_entry_safe(reg, n, &session->done, session_link) {
716                 reg_deinit(data, reg);
717         }
718 }
719
720 static void vpu_service_clear(struct vpu_subdev_data *data)
721 {
722         struct vpu_reg *reg, *n;
723         struct vpu_session *session, *s;
724         struct vpu_service_info *pservice = data->pservice;
725
726         list_for_each_entry_safe(reg, n, &pservice->waiting, status_link) {
727                 reg_deinit(data, reg);
728         }
729
730         /* wake up session wait event to prevent the timeout hw reset
731          * during reboot procedure.
732          */
733         list_for_each_entry_safe(session, s,
734                                  &pservice->session, list_session)
735                 wake_up(&session->wait);
736 }
737
738 static void vpu_service_dump(struct vpu_service_info *pservice)
739 {
740 }
741
742
743 static void vpu_service_power_off(struct vpu_service_info *pservice)
744 {
745         int total_running;
746         struct vpu_subdev_data *data = NULL, *n;
747         int ret = atomic_add_unless(&pservice->enabled, -1, 0);
748
749         if (!ret)
750                 return;
751
752         total_running = atomic_read(&pservice->total_running);
753         if (total_running) {
754                 pr_alert("alert: power off when %d task running!!\n",
755                          total_running);
756                 mdelay(50);
757                 pr_alert("alert: delay 50 ms for running task\n");
758                 vpu_service_dump(pservice);
759         }
760
761         pr_info("%s: power off...", dev_name(pservice->dev));
762
763         udelay(5);
764
765         list_for_each_entry_safe(data, n, &pservice->subdev_list, lnk_service) {
766                 if (data->mmu_dev && test_bit(MMU_ACTIVATED, &data->state)) {
767                         clear_bit(MMU_ACTIVATED, &data->state);
768                         rockchip_iovmm_deactivate(data->dev);
769                 }
770         }
771         pservice->curr_mode = VCODEC_RUNNING_MODE_NONE;
772
773 #if VCODEC_CLOCK_ENABLE
774                 if (pservice->pd_video)
775                         clk_disable_unprepare(pservice->pd_video);
776                 if (pservice->hclk_vcodec)
777                         clk_disable_unprepare(pservice->hclk_vcodec);
778                 if (pservice->aclk_vcodec)
779                         clk_disable_unprepare(pservice->aclk_vcodec);
780                 if (pservice->clk_core)
781                         clk_disable_unprepare(pservice->clk_core);
782                 if (pservice->clk_cabac)
783                         clk_disable_unprepare(pservice->clk_cabac);
784 #endif
785         pm_runtime_put(pservice->dev);
786
787         atomic_add(1, &pservice->power_off_cnt);
788         wake_unlock(&pservice->wake_lock);
789         pr_info("done\n");
790 }
791
792 static inline void vpu_queue_power_off_work(struct vpu_service_info *pservice)
793 {
794         queue_delayed_work(system_wq, &pservice->power_off_work,
795                            VPU_POWER_OFF_DELAY);
796 }
797
798 static void vpu_power_off_work(struct work_struct *work_s)
799 {
800         struct delayed_work *dlwork = container_of(work_s,
801                         struct delayed_work, work);
802         struct vpu_service_info *pservice = container_of(dlwork,
803                         struct vpu_service_info, power_off_work);
804
805         if (mutex_trylock(&pservice->lock)) {
806                 vpu_service_power_off(pservice);
807                 mutex_unlock(&pservice->lock);
808         } else {
809                 /* Come back later if the device is busy... */
810                 vpu_queue_power_off_work(pservice);
811         }
812 }
813
814 static void vpu_service_power_on(struct vpu_service_info *pservice)
815 {
816         int ret;
817         ktime_t now = ktime_get();
818
819         if (ktime_to_ns(ktime_sub(now, pservice->last)) > NSEC_PER_SEC) {
820                 cancel_delayed_work_sync(&pservice->power_off_work);
821                 vpu_queue_power_off_work(pservice);
822                 pservice->last = now;
823         }
824         ret = atomic_add_unless(&pservice->enabled, 1, 1);
825         if (!ret)
826                 return;
827
828         pr_info("%s: power on\n", dev_name(pservice->dev));
829
830 #define BIT_VCODEC_CLK_SEL      (1<<10)
831         if (cpu_is_rk312x())
832                 writel_relaxed(readl_relaxed(RK_GRF_VIRT + RK312X_GRF_SOC_CON1)
833                         | BIT_VCODEC_CLK_SEL | (BIT_VCODEC_CLK_SEL << 16),
834                         RK_GRF_VIRT + RK312X_GRF_SOC_CON1);
835
836 #if VCODEC_CLOCK_ENABLE
837         if (pservice->aclk_vcodec)
838                 clk_prepare_enable(pservice->aclk_vcodec);
839         if (pservice->hclk_vcodec)
840                 clk_prepare_enable(pservice->hclk_vcodec);
841         if (pservice->clk_core)
842                 clk_prepare_enable(pservice->clk_core);
843         if (pservice->clk_cabac)
844                 clk_prepare_enable(pservice->clk_cabac);
845         if (pservice->pd_video)
846                 clk_prepare_enable(pservice->pd_video);
847 #endif
848         pm_runtime_get_sync(pservice->dev);
849
850         udelay(5);
851         atomic_add(1, &pservice->power_on_cnt);
852         wake_lock(&pservice->wake_lock);
853 }
854
855 static inline bool reg_check_interlace(struct vpu_reg *reg)
856 {
857         u32 type = (reg->reg[3] & (1 << 23));
858
859         return (type > 0);
860 }
861
862 static inline enum VPU_DEC_FMT reg_check_fmt(struct vpu_reg *reg)
863 {
864         enum VPU_DEC_FMT type = (enum VPU_DEC_FMT)((reg->reg[3] >> 28) & 0xf);
865
866         return type;
867 }
868
869 static inline int reg_probe_width(struct vpu_reg *reg)
870 {
871         int width_in_mb = reg->reg[4] >> 23;
872
873         return width_in_mb * 16;
874 }
875
876 static inline int reg_probe_hevc_y_stride(struct vpu_reg *reg)
877 {
878         int y_virstride = reg->reg[8];
879
880         return y_virstride;
881 }
882
883 static int vcodec_fd_to_iova(struct vpu_subdev_data *data,
884                              struct vpu_reg *reg, int fd)
885 {
886         struct vpu_service_info *pservice = data->pservice;
887         struct ion_handle *hdl;
888         int ret = 0;
889         struct vcodec_mem_region *mem_region;
890
891         hdl = ion_import_dma_buf(pservice->ion_client, fd);
892         if (IS_ERR(hdl)) {
893                 vpu_err("import dma-buf from fd %d failed\n", fd);
894                 return PTR_ERR(hdl);
895         }
896         mem_region = kzalloc(sizeof(*mem_region), GFP_KERNEL);
897
898         if (mem_region == NULL) {
899                 vpu_err("allocate memory for iommu memory region failed\n");
900                 ion_free(pservice->ion_client, hdl);
901                 return -ENOMEM;
902         }
903
904         mem_region->hdl = hdl;
905         if (data->mmu_dev)
906                 ret = ion_map_iommu(data->dev, pservice->ion_client,
907                                     mem_region->hdl, &mem_region->iova,
908                                     &mem_region->len);
909         else
910                 ret = ion_phys(pservice->ion_client,
911                                mem_region->hdl,
912                                (ion_phys_addr_t *)&mem_region->iova,
913                                (size_t *)&mem_region->len);
914
915         if (ret < 0) {
916                 vpu_err("fd %d ion map iommu failed\n", fd);
917                 kfree(mem_region);
918                 ion_free(pservice->ion_client, hdl);
919                 return -EFAULT;
920         }
921         INIT_LIST_HEAD(&mem_region->reg_lnk);
922         list_add_tail(&mem_region->reg_lnk, &reg->mem_region_list);
923         return mem_region->iova;
924 }
925
926 /*
927  * NOTE: rkvdec/rkhevc put scaling list address in pps buffer hardware will read
928  * it by pps id in video stream data.
929  *
930  * So we need to translate the address in iommu case. The address data is also
931  * 10bit fd + 22bit offset mode.
932  * Because userspace decoder do not give the pps id in the register file sets
933  * kernel driver need to translate each scaling list address in pps buffer which
934  * means 256 pps for H.264, 64 pps for H.265.
935  *
936  * In order to optimize the performance kernel driver ask userspace decoder to
937  * set all scaling list address in pps buffer to the same one which will be used
938  * on current decoding task. Then kernel driver can only translate the first
939  * address then copy it all pps buffer.
940  */
941 static int fill_scaling_list_addr_in_pps(
942                 struct vpu_subdev_data *data,
943                 struct vpu_reg *reg,
944                 char *pps,
945                 int pps_info_count,
946                 int pps_info_size,
947                 int scaling_list_addr_offset)
948 {
949         int base = scaling_list_addr_offset;
950         int scaling_fd = 0;
951         u32 scaling_offset;
952
953         scaling_offset  = (u32)pps[base + 0];
954         scaling_offset += (u32)pps[base + 1] << 8;
955         scaling_offset += (u32)pps[base + 2] << 16;
956         scaling_offset += (u32)pps[base + 3] << 24;
957
958         scaling_fd = scaling_offset & 0x3ff;
959         scaling_offset = scaling_offset >> 10;
960
961         if (scaling_fd > 0) {
962                 int i = 0;
963                 u32 tmp = vcodec_fd_to_iova(data, reg, scaling_fd);
964
965                 if (IS_ERR_VALUE(tmp))
966                         return -1;
967                 tmp += scaling_offset;
968
969                 for (i = 0; i < pps_info_count; i++, base += pps_info_size) {
970                         pps[base + 0] = (tmp >>  0) & 0xff;
971                         pps[base + 1] = (tmp >>  8) & 0xff;
972                         pps[base + 2] = (tmp >> 16) & 0xff;
973                         pps[base + 3] = (tmp >> 24) & 0xff;
974                 }
975         }
976
977         return 0;
978 }
979
980 static int vcodec_bufid_to_iova(struct vpu_subdev_data *data, const u8 *tbl,
981                                 int size, struct vpu_reg *reg,
982                                 struct extra_info_for_iommu *ext_inf)
983 {
984         struct vpu_service_info *pservice = data->pservice;
985         struct vpu_task_info *task = reg->task;
986         enum FORMAT_TYPE type;
987         struct ion_handle *hdl;
988         int ret = 0;
989         struct vcodec_mem_region *mem_region;
990         int i;
991         int offset = 0;
992
993         if (tbl == NULL || size <= 0) {
994                 dev_err(pservice->dev, "input arguments invalidate\n");
995                 return -1;
996         }
997
998         if (task->get_fmt)
999                 type = task->get_fmt(reg->reg);
1000         else {
1001                 pr_err("invalid task with NULL get_fmt\n");
1002                 return -1;
1003         }
1004
1005         for (i = 0; i < size; i++) {
1006                 int usr_fd = reg->reg[tbl[i]] & 0x3FF;
1007
1008                 /* if userspace do not set the fd at this register, skip */
1009                 if (usr_fd == 0)
1010                         continue;
1011
1012                 /*
1013                  * special offset scale case
1014                  *
1015                  * This translation is for fd + offset translation.
1016                  * One register has 32bits. We need to transfer both buffer file
1017                  * handle and the start address offset so we packet file handle
1018                  * and offset together using below format.
1019                  *
1020                  *  0~9  bit for buffer file handle range 0 ~ 1023
1021                  * 10~31 bit for offset range 0 ~ 4M
1022                  *
1023                  * But on 4K case the offset can be larger the 4M
1024                  * So on H.264 4K vpu/vpu2 decoder we scale the offset by 16
1025                  * But MPEG4 will use the same register for colmv and it do not
1026                  * need scale.
1027                  *
1028                  * RKVdec do not have this issue.
1029                  */
1030                 if ((type == FMT_H264D || type == FMT_VP9D) &&
1031                     task->reg_dir_mv > 0 && task->reg_dir_mv == tbl[i])
1032                         offset = reg->reg[tbl[i]] >> 10 << 4;
1033                 else
1034                         offset = reg->reg[tbl[i]] >> 10;
1035
1036                 vpu_debug(DEBUG_IOMMU, "pos %3d fd %3d offset %10d\n",
1037                           tbl[i], usr_fd, offset);
1038
1039                 hdl = ion_import_dma_buf(pservice->ion_client, usr_fd);
1040                 if (IS_ERR(hdl)) {
1041                         dev_err(pservice->dev,
1042                                 "import dma-buf from fd %d failed, reg[%d]\n",
1043                                 usr_fd, tbl[i]);
1044                         return PTR_ERR(hdl);
1045                 }
1046
1047                 if (task->reg_pps > 0 && task->reg_pps == tbl[i]) {
1048                         int pps_info_offset;
1049                         int pps_info_count;
1050                         int pps_info_size;
1051                         int scaling_list_addr_offset;
1052
1053                         switch (type) {
1054                         case FMT_H264D: {
1055                                 pps_info_offset = offset;
1056                                 pps_info_count = 256;
1057                                 pps_info_size = 32;
1058                                 scaling_list_addr_offset = 23;
1059                         } break;
1060                         case FMT_H265D: {
1061                                 pps_info_offset = 0;
1062                                 pps_info_count = 64;
1063                                 pps_info_size = 80;
1064                                 scaling_list_addr_offset = 74;
1065                         } break;
1066                         default: {
1067                                 pps_info_offset = 0;
1068                                 pps_info_count = 0;
1069                                 pps_info_size = 0;
1070                                 scaling_list_addr_offset = 0;
1071                         } break;
1072                         }
1073
1074                         vpu_debug(DEBUG_PPS_FILL,
1075                                   "scaling list filling parameter:\n");
1076                         vpu_debug(DEBUG_PPS_FILL,
1077                                   "pps_info_offset %d\n", pps_info_offset);
1078                         vpu_debug(DEBUG_PPS_FILL,
1079                                   "pps_info_count  %d\n", pps_info_count);
1080                         vpu_debug(DEBUG_PPS_FILL,
1081                                   "pps_info_size   %d\n", pps_info_size);
1082                         vpu_debug(DEBUG_PPS_FILL,
1083                                   "scaling_list_addr_offset %d\n",
1084                                   scaling_list_addr_offset);
1085
1086                         if (pps_info_count) {
1087                                 char *pps = (char *)ion_map_kernel(
1088                                                 pservice->ion_client, hdl);
1089                                 vpu_debug(DEBUG_PPS_FILL,
1090                                           "scaling list setting pps %p\n", pps);
1091                                 pps += pps_info_offset;
1092
1093                                 if (fill_scaling_list_addr_in_pps(
1094                                                 data, reg, pps,
1095                                                 pps_info_count,
1096                                                 pps_info_size,
1097                                                 scaling_list_addr_offset) < 0) {
1098                                         ion_free(pservice->ion_client, hdl);
1099                                         return -1;
1100                                 }
1101                         }
1102                 }
1103
1104                 mem_region = kzalloc(sizeof(*mem_region), GFP_KERNEL);
1105
1106                 if (!mem_region) {
1107                         ion_free(pservice->ion_client, hdl);
1108                         return -ENOMEM;
1109                 }
1110
1111                 mem_region->hdl = hdl;
1112                 mem_region->reg_idx = tbl[i];
1113
1114                 if (data->mmu_dev)
1115                         ret = ion_map_iommu(data->dev,
1116                                             pservice->ion_client,
1117                                             mem_region->hdl,
1118                                             &mem_region->iova,
1119                                             &mem_region->len);
1120                 else
1121                         ret = ion_phys(pservice->ion_client,
1122                                        mem_region->hdl,
1123                                        (ion_phys_addr_t *)&mem_region->iova,
1124                                        (size_t *)&mem_region->len);
1125
1126                 if (ret < 0) {
1127                         dev_err(pservice->dev, "reg %d fd %d ion map iommu failed\n",
1128                                 tbl[i], usr_fd);
1129                         kfree(mem_region);
1130                         ion_free(pservice->ion_client, hdl);
1131                         return ret;
1132                 }
1133
1134                 /*
1135                  * special for vpu dec num 12: record decoded length
1136                  * hacking for decoded length
1137                  * NOTE: not a perfect fix, the fd is not recorded
1138                  */
1139                 if (task->reg_len > 0 && task->reg_len == tbl[i]) {
1140                         reg->dec_base = mem_region->iova + offset;
1141                         vpu_debug(DEBUG_REGISTER, "dec_set %08x\n",
1142                                   reg->dec_base);
1143                 }
1144
1145                 reg->reg[tbl[i]] = mem_region->iova + offset;
1146                 INIT_LIST_HEAD(&mem_region->reg_lnk);
1147                 list_add_tail(&mem_region->reg_lnk, &reg->mem_region_list);
1148         }
1149
1150         if (ext_inf != NULL && ext_inf->magic == EXTRA_INFO_MAGIC) {
1151                 for (i = 0; i < ext_inf->cnt; i++) {
1152                         vpu_debug(DEBUG_IOMMU, "reg[%d] + offset %d\n",
1153                                   ext_inf->elem[i].index,
1154                                   ext_inf->elem[i].offset);
1155                         reg->reg[ext_inf->elem[i].index] +=
1156                                 ext_inf->elem[i].offset;
1157                 }
1158         }
1159
1160         return 0;
1161 }
1162
1163 static int vcodec_reg_address_translate(struct vpu_subdev_data *data,
1164                                         struct vpu_reg *reg,
1165                                         struct extra_info_for_iommu *ext_inf)
1166 {
1167         enum FORMAT_TYPE type = reg->task->get_fmt(reg->reg);
1168
1169         if (type < FMT_TYPE_BUTT) {
1170                 const struct vpu_trans_info *info = &reg->trans[type];
1171                 const u8 *tbl = info->table;
1172                 int size = info->count;
1173
1174                 return vcodec_bufid_to_iova(data, tbl, size, reg, ext_inf);
1175         }
1176         pr_err("found invalid format type!\n");
1177         return -1;
1178 }
1179
1180 static void get_reg_freq(struct vpu_subdev_data *data, struct vpu_reg *reg)
1181 {
1182
1183         if (!soc_is_rk2928g()) {
1184                 if (reg->type == VPU_DEC || reg->type == VPU_DEC_PP) {
1185                         if (reg_check_fmt(reg) == VPU_DEC_FMT_H264) {
1186                                 if (reg_probe_width(reg) > 3200) {
1187                                         /*raise frequency for 4k avc.*/
1188                                         reg->freq = VPU_FREQ_600M;
1189                                 }
1190                         } else {
1191                                 if (reg_check_interlace(reg))
1192                                         reg->freq = VPU_FREQ_400M;
1193                         }
1194                 }
1195                 if (data->hw_id == HEVC_ID) {
1196                         if (reg_probe_hevc_y_stride(reg) > 60000)
1197                                 reg->freq = VPU_FREQ_400M;
1198                 }
1199                 if (reg->type == VPU_PP)
1200                         reg->freq = VPU_FREQ_400M;
1201         }
1202 }
1203
1204 static struct vpu_reg *reg_init(struct vpu_subdev_data *data,
1205                                 struct vpu_session *session,
1206                                 void __user *src, u32 size)
1207 {
1208         struct vpu_service_info *pservice = data->pservice;
1209         int extra_size = 0;
1210         struct extra_info_for_iommu extra_info;
1211         struct vpu_reg *reg = kzalloc(sizeof(*reg) + data->reg_size,
1212                                       GFP_KERNEL);
1213
1214         vpu_debug_enter();
1215
1216         if (NULL == reg) {
1217                 vpu_err("error: kmalloc failed\n");
1218                 return NULL;
1219         }
1220
1221         if (size > data->reg_size) {
1222                 extra_size = size - data->reg_size;
1223                 size = data->reg_size;
1224         }
1225         reg->session = session;
1226         reg->data = data;
1227         reg->type = session->type;
1228         reg->size = size;
1229         reg->freq = VPU_FREQ_DEFAULT;
1230         reg->task = &data->task_info[session->type];
1231         reg->trans = data->trans_info;
1232         reg->reg = (u32 *)&reg[1];
1233         INIT_LIST_HEAD(&reg->session_link);
1234         INIT_LIST_HEAD(&reg->status_link);
1235
1236         INIT_LIST_HEAD(&reg->mem_region_list);
1237
1238         if (copy_from_user(&reg->reg[0], (void __user *)src, size)) {
1239                 vpu_err("error: copy_from_user failed\n");
1240                 kfree(reg);
1241                 return NULL;
1242         }
1243
1244         if (copy_from_user(&extra_info, (u8 *)src + size, extra_size)) {
1245                 vpu_err("error: copy_from_user failed\n");
1246                 kfree(reg);
1247                 return NULL;
1248         }
1249
1250         if (0 > vcodec_reg_address_translate(data, reg, &extra_info)) {
1251                 int i = 0;
1252
1253                 vpu_err("error: translate reg address failed, dumping regs\n");
1254                 for (i = 0; i < size >> 2; i++)
1255                         pr_err("reg[%02d]: %08x\n", i, *((u32 *)src + i));
1256
1257                 kfree(reg);
1258                 return NULL;
1259         }
1260
1261         mutex_lock(&pservice->lock);
1262         list_add_tail(&reg->status_link, &pservice->waiting);
1263         list_add_tail(&reg->session_link, &session->waiting);
1264         mutex_unlock(&pservice->lock);
1265
1266         if (pservice->auto_freq)
1267                 get_reg_freq(data, reg);
1268
1269         vpu_debug_leave();
1270         return reg;
1271 }
1272
1273 static void reg_deinit(struct vpu_subdev_data *data, struct vpu_reg *reg)
1274 {
1275         struct vpu_service_info *pservice = data->pservice;
1276         struct vcodec_mem_region *mem_region = NULL, *n;
1277
1278         list_del_init(&reg->session_link);
1279         list_del_init(&reg->status_link);
1280         if (reg == pservice->reg_codec)
1281                 pservice->reg_codec = NULL;
1282         if (reg == pservice->reg_pproc)
1283                 pservice->reg_pproc = NULL;
1284
1285         /* release memory region attach to this registers table. */
1286         list_for_each_entry_safe(mem_region, n,
1287                         &reg->mem_region_list, reg_lnk) {
1288                 ion_free(pservice->ion_client, mem_region->hdl);
1289                 list_del_init(&mem_region->reg_lnk);
1290                 kfree(mem_region);
1291         }
1292
1293         kfree(reg);
1294 }
1295
1296 static void reg_from_wait_to_run(struct vpu_service_info *pservice,
1297                                  struct vpu_reg *reg)
1298 {
1299         vpu_debug_enter();
1300         list_del_init(&reg->status_link);
1301         list_add_tail(&reg->status_link, &pservice->running);
1302
1303         list_del_init(&reg->session_link);
1304         list_add_tail(&reg->session_link, &reg->session->running);
1305         vpu_debug_leave();
1306 }
1307
1308 static void reg_copy_from_hw(struct vpu_reg *reg, u32 *src, u32 count)
1309 {
1310         int i;
1311         u32 *dst = reg->reg;
1312
1313         vpu_debug_enter();
1314         for (i = 0; i < count; i++, src++)
1315                 *dst++ = readl_relaxed(src);
1316
1317         dst = (u32 *)&reg->reg[0];
1318         for (i = 0; i < count; i++)
1319                 vpu_debug(DEBUG_GET_REG, "get reg[%02d] %08x\n", i, dst[i]);
1320
1321         vpu_debug_leave();
1322 }
1323
1324 static void reg_from_run_to_done(struct vpu_subdev_data *data,
1325                                  struct vpu_reg *reg)
1326 {
1327         struct vpu_service_info *pservice = data->pservice;
1328         struct vpu_hw_info *hw_info = data->hw_info;
1329         struct vpu_task_info *task = reg->task;
1330
1331         vpu_debug_enter();
1332
1333         list_del_init(&reg->status_link);
1334         list_add_tail(&reg->status_link, &pservice->done);
1335
1336         list_del_init(&reg->session_link);
1337         list_add_tail(&reg->session_link, &reg->session->done);
1338
1339         switch (reg->type) {
1340         case VPU_ENC: {
1341                 pservice->reg_codec = NULL;
1342                 reg_copy_from_hw(reg, data->enc_dev.regs, hw_info->enc_reg_num);
1343                 reg->reg[task->reg_irq] = pservice->irq_status;
1344         } break;
1345         case VPU_DEC: {
1346                 pservice->reg_codec = NULL;
1347                 reg_copy_from_hw(reg, data->dec_dev.regs, hw_info->dec_reg_num);
1348
1349                 /* revert hack for decoded length */
1350                 if (task->reg_len > 0) {
1351                         int reg_len = task->reg_len;
1352                         u32 dec_get = reg->reg[reg_len];
1353                         s32 dec_length = dec_get - reg->dec_base;
1354
1355                         vpu_debug(DEBUG_REGISTER,
1356                                   "dec_get %08x dec_length %d\n",
1357                                   dec_get, dec_length);
1358                         reg->reg[reg_len] = dec_length << 10;
1359                 }
1360
1361                 reg->reg[task->reg_irq] = pservice->irq_status;
1362         } break;
1363         case VPU_PP: {
1364                 pservice->reg_pproc = NULL;
1365                 reg_copy_from_hw(reg, data->dec_dev.regs, hw_info->dec_reg_num);
1366                 writel_relaxed(0, data->dec_dev.regs + task->reg_irq);
1367         } break;
1368         case VPU_DEC_PP: {
1369                 u32 pipe_mode;
1370                 u32 *regs = data->dec_dev.regs;
1371
1372                 pservice->reg_codec = NULL;
1373                 pservice->reg_pproc = NULL;
1374
1375                 reg_copy_from_hw(reg, data->dec_dev.regs, hw_info->dec_reg_num);
1376
1377                 /* NOTE: remove pp pipeline mode flag first */
1378                 pipe_mode = readl_relaxed(regs + task->reg_pipe);
1379                 pipe_mode &= ~task->pipe_mask;
1380                 writel_relaxed(pipe_mode, regs + task->reg_pipe);
1381
1382                 /* revert hack for decoded length */
1383                 if (task->reg_len > 0) {
1384                         int reg_len = task->reg_len;
1385                         u32 dec_get = reg->reg[reg_len];
1386                         s32 dec_length = dec_get - reg->dec_base;
1387
1388                         vpu_debug(DEBUG_REGISTER,
1389                                   "dec_get %08x dec_length %d\n",
1390                                   dec_get, dec_length);
1391                         reg->reg[reg_len] = dec_length << 10;
1392                 }
1393
1394                 reg->reg[task->reg_irq] = pservice->irq_status;
1395         } break;
1396         default: {
1397                 vpu_err("error: copy reg from hw with unknown type %d\n",
1398                         reg->type);
1399         } break;
1400         }
1401         vcodec_exit_mode(data);
1402
1403         atomic_sub(1, &reg->session->task_running);
1404         atomic_sub(1, &pservice->total_running);
1405         wake_up(&reg->session->wait);
1406
1407         vpu_debug_leave();
1408 }
1409
1410 static void vpu_service_set_freq(struct vpu_service_info *pservice,
1411                                  struct vpu_reg *reg)
1412 {
1413         enum VPU_FREQ curr = atomic_read(&pservice->freq_status);
1414
1415         if (curr == reg->freq)
1416                 return;
1417
1418         atomic_set(&pservice->freq_status, reg->freq);
1419         switch (reg->freq) {
1420         case VPU_FREQ_200M: {
1421                 clk_set_rate(pservice->aclk_vcodec, 200*MHZ);
1422         } break;
1423         case VPU_FREQ_266M: {
1424                 clk_set_rate(pservice->aclk_vcodec, 266*MHZ);
1425         } break;
1426         case VPU_FREQ_300M: {
1427                 clk_set_rate(pservice->aclk_vcodec, 300*MHZ);
1428         } break;
1429         case VPU_FREQ_400M: {
1430                 clk_set_rate(pservice->aclk_vcodec, 400*MHZ);
1431         } break;
1432         case VPU_FREQ_500M: {
1433                 clk_set_rate(pservice->aclk_vcodec, 500*MHZ);
1434         } break;
1435         case VPU_FREQ_600M: {
1436                 clk_set_rate(pservice->aclk_vcodec, 600*MHZ);
1437         } break;
1438         default: {
1439                 unsigned long rate = 300*MHZ;
1440
1441                 if (soc_is_rk2928g())
1442                         rate = 400*MHZ;
1443
1444                 clk_set_rate(pservice->aclk_vcodec, rate);
1445         } break;
1446         }
1447 }
1448
1449 static void reg_copy_to_hw(struct vpu_subdev_data *data, struct vpu_reg *reg)
1450 {
1451         struct vpu_service_info *pservice = data->pservice;
1452         struct vpu_task_info *task = reg->task;
1453         struct vpu_hw_info *hw_info = data->hw_info;
1454         int i;
1455         u32 *src = (u32 *)&reg->reg[0];
1456         u32 enable_mask = task->enable_mask;
1457         u32 gating_mask = task->gating_mask;
1458         u32 reg_en = task->reg_en;
1459
1460         vpu_debug_enter();
1461
1462         atomic_add(1, &pservice->total_running);
1463         atomic_add(1, &reg->session->task_running);
1464
1465         if (pservice->auto_freq)
1466                 vpu_service_set_freq(pservice, reg);
1467
1468         vcodec_enter_mode(data);
1469
1470         switch (reg->type) {
1471         case VPU_ENC: {
1472                 u32 *dst = data->enc_dev.regs;
1473                 u32 base = 0;
1474                 u32 end  = hw_info->enc_reg_num;
1475                 /* u32 reg_gating = task->reg_gating; */
1476
1477                 pservice->reg_codec = reg;
1478
1479                 vpu_debug(DEBUG_TASK_INFO, "reg: base %3d end %d en %2d mask: en %x gate %x\n",
1480                           base, end, reg_en, enable_mask, gating_mask);
1481
1482                 VEPU_CLEAN_CACHE(dst);
1483
1484                 if (debug & DEBUG_SET_REG)
1485                         for (i = base; i < end; i++)
1486                                 vpu_debug(DEBUG_SET_REG, "set reg[%02d] %08x\n",
1487                                           i, src[i]);
1488
1489                 /*
1490                  * NOTE: encoder need to setup mode first
1491                  */
1492                 writel_relaxed(src[reg_en] & enable_mask, dst + reg_en);
1493
1494                 /* NOTE: encoder gating is not on enable register */
1495                 /* src[reg_gating] |= gating_mask; */
1496
1497                 for (i = base; i < end; i++) {
1498                         if (i != reg_en)
1499                                 writel_relaxed(src[i], dst + i);
1500                 }
1501
1502                 writel(src[reg_en], dst + reg_en);
1503                 dsb(sy);
1504
1505                 time_record(reg->task, 0);
1506         } break;
1507         case VPU_DEC: {
1508                 u32 *dst = data->dec_dev.regs;
1509                 u32 len = hw_info->dec_reg_num;
1510                 u32 base = hw_info->base_dec;
1511                 u32 end  = hw_info->end_dec;
1512
1513                 pservice->reg_codec = reg;
1514
1515                 vpu_debug(DEBUG_TASK_INFO, "reg: base %3d end %d en %2d mask: en %x gate %x\n",
1516                           base, end, reg_en, enable_mask, gating_mask);
1517
1518                 VDPU_CLEAN_CACHE(dst);
1519
1520                 /* on rkvdec set cache size to 64byte */
1521                 if (pservice->dev_id == VCODEC_DEVICE_ID_RKVDEC) {
1522                         u32 *cache_base = dst + 0x100;
1523                         u32 val = (debug & DEBUG_CACHE_32B) ? (0x3) : (0x13);
1524                         writel_relaxed(val, cache_base + 0x07);
1525                         writel_relaxed(val, cache_base + 0x17);
1526                 }
1527
1528                 if (debug & DEBUG_SET_REG)
1529                         for (i = 0; i < len; i++)
1530                                 vpu_debug(DEBUG_SET_REG, "set reg[%02d] %08x\n",
1531                                           i, src[i]);
1532
1533                 /*
1534                  * NOTE: The end register is invalid. Do NOT write to it
1535                  *       Also the base register must be written
1536                  */
1537                 for (i = base; i < end; i++) {
1538                         if (i != reg_en)
1539                                 writel_relaxed(src[i], dst + i);
1540                 }
1541
1542                 writel(src[reg_en] | gating_mask, dst + reg_en);
1543                 dsb(sy);
1544
1545                 time_record(reg->task, 0);
1546         } break;
1547         case VPU_PP: {
1548                 u32 *dst = data->dec_dev.regs;
1549                 u32 base = hw_info->base_pp;
1550                 u32 end  = hw_info->end_pp;
1551
1552                 pservice->reg_pproc = reg;
1553
1554                 vpu_debug(DEBUG_TASK_INFO, "reg: base %3d end %d en %2d mask: en %x gate %x\n",
1555                           base, end, reg_en, enable_mask, gating_mask);
1556
1557                 if (debug & DEBUG_SET_REG)
1558                         for (i = base; i < end; i++)
1559                                 vpu_debug(DEBUG_SET_REG, "set reg[%02d] %08x\n",
1560                                           i, src[i]);
1561
1562                 for (i = base; i < end; i++) {
1563                         if (i != reg_en)
1564                                 writel_relaxed(src[i], dst + i);
1565                 }
1566
1567                 writel(src[reg_en] | gating_mask, dst + reg_en);
1568                 dsb(sy);
1569
1570                 time_record(reg->task, 0);
1571         } break;
1572         case VPU_DEC_PP: {
1573                 u32 *dst = data->dec_dev.regs;
1574                 u32 base = hw_info->base_dec_pp;
1575                 u32 end  = hw_info->end_dec_pp;
1576
1577                 pservice->reg_codec = reg;
1578                 pservice->reg_pproc = reg;
1579
1580                 vpu_debug(DEBUG_TASK_INFO, "reg: base %3d end %d en %2d mask: en %x gate %x\n",
1581                           base, end, reg_en, enable_mask, gating_mask);
1582
1583                 /* VDPU_SOFT_RESET(dst); */
1584                 VDPU_CLEAN_CACHE(dst);
1585
1586                 if (debug & DEBUG_SET_REG)
1587                         for (i = base; i < end; i++)
1588                                 vpu_debug(DEBUG_SET_REG, "set reg[%02d] %08x\n",
1589                                           i, src[i]);
1590
1591                 for (i = base; i < end; i++) {
1592                         if (i != reg_en)
1593                                 writel_relaxed(src[i], dst + i);
1594                 }
1595
1596                 /* NOTE: dec output must be disabled */
1597
1598                 writel(src[reg_en] | gating_mask, dst + reg_en);
1599                 dsb(sy);
1600
1601                 time_record(reg->task, 0);
1602         } break;
1603         default: {
1604                 vpu_err("error: unsupport session type %d", reg->type);
1605                 atomic_sub(1, &pservice->total_running);
1606                 atomic_sub(1, &reg->session->task_running);
1607         } break;
1608         }
1609
1610         vpu_debug_leave();
1611 }
1612
1613 static void try_set_reg(struct vpu_subdev_data *data)
1614 {
1615         struct vpu_service_info *pservice = data->pservice;
1616
1617         vpu_debug_enter();
1618
1619         mutex_lock(&pservice->shutdown_lock);
1620         if (atomic_read(&pservice->service_on) == 0) {
1621                 mutex_unlock(&pservice->shutdown_lock);
1622                 return;
1623         }
1624         if (!list_empty(&pservice->waiting)) {
1625                 struct vpu_reg *reg_codec = pservice->reg_codec;
1626                 struct vpu_reg *reg_pproc = pservice->reg_pproc;
1627                 int can_set = 0;
1628                 bool change_able = (reg_codec == NULL) && (reg_pproc == NULL);
1629                 int reset_request = atomic_read(&pservice->reset_request);
1630                 struct vpu_reg *reg = list_entry(pservice->waiting.next,
1631                                 struct vpu_reg, status_link);
1632
1633                 vpu_service_power_on(pservice);
1634
1635                 if (change_able || !reset_request) {
1636                         switch (reg->type) {
1637                         case VPU_ENC: {
1638                                 if (change_able)
1639                                         can_set = 1;
1640                         } break;
1641                         case VPU_DEC: {
1642                                 if (reg_codec == NULL)
1643                                         can_set = 1;
1644                                 if (pservice->auto_freq && (reg_pproc != NULL))
1645                                         can_set = 0;
1646                         } break;
1647                         case VPU_PP: {
1648                                 if (reg_codec == NULL) {
1649                                         if (reg_pproc == NULL)
1650                                                 can_set = 1;
1651                                 } else {
1652                                         if ((reg_codec->type == VPU_DEC) &&
1653                                             (reg_pproc == NULL))
1654                                                 can_set = 1;
1655
1656                                         /*
1657                                          * NOTE:
1658                                          * can not charge frequency
1659                                          * when vpu is working
1660                                          */
1661                                         if (pservice->auto_freq)
1662                                                 can_set = 0;
1663                                 }
1664                         } break;
1665                         case VPU_DEC_PP: {
1666                                 if (change_able)
1667                                         can_set = 1;
1668                                 } break;
1669                         default: {
1670                                 pr_err("undefined reg type %d\n", reg->type);
1671                         } break;
1672                         }
1673                 }
1674
1675                 /* then check reset request */
1676                 if (reset_request && !change_able)
1677                         reset_request = 0;
1678
1679                 /* do reset before setting registers */
1680                 if (reset_request)
1681                         vpu_reset(data);
1682
1683                 if (can_set) {
1684                         reg_from_wait_to_run(pservice, reg);
1685                         reg_copy_to_hw(reg->data, reg);
1686                 }
1687         }
1688
1689         mutex_unlock(&pservice->shutdown_lock);
1690         vpu_debug_leave();
1691 }
1692
1693 static int return_reg(struct vpu_subdev_data *data,
1694                       struct vpu_reg *reg, u32 __user *dst)
1695 {
1696         struct vpu_hw_info *hw_info = data->hw_info;
1697         size_t size = reg->size;
1698         u32 base;
1699
1700         vpu_debug_enter();
1701         switch (reg->type) {
1702         case VPU_ENC: {
1703                 base = 0;
1704         } break;
1705         case VPU_DEC: {
1706                 base = hw_info->base_dec_pp;
1707         } break;
1708         case VPU_PP: {
1709                 base = hw_info->base_pp;
1710         } break;
1711         case VPU_DEC_PP: {
1712                 base = hw_info->base_dec_pp;
1713         } break;
1714         default: {
1715                 vpu_err("error: copy reg to user with unknown type %d\n",
1716                         reg->type);
1717                 return -EFAULT;
1718         } break;
1719         }
1720
1721         if (copy_to_user(dst, &reg->reg[base], size)) {
1722                 vpu_err("error: copy_to_user failed\n");
1723                 return -EFAULT;
1724         }
1725
1726         reg_deinit(data, reg);
1727         vpu_debug_leave();
1728         return 0;
1729 }
1730
1731 static long vpu_service_ioctl(struct file *filp, unsigned int cmd,
1732                               unsigned long arg)
1733 {
1734         struct vpu_subdev_data *data =
1735                 container_of(filp->f_path.dentry->d_inode->i_cdev,
1736                              struct vpu_subdev_data, cdev);
1737         struct vpu_service_info *pservice = data->pservice;
1738         struct vpu_session *session = (struct vpu_session *)filp->private_data;
1739
1740         vpu_debug_enter();
1741         if (NULL == session)
1742                 return -EINVAL;
1743
1744         switch (cmd) {
1745         case VPU_IOC_SET_CLIENT_TYPE: {
1746                 session->type = (enum VPU_CLIENT_TYPE)arg;
1747                 vpu_debug(DEBUG_IOCTL, "pid %d set client type %d\n",
1748                           session->pid, session->type);
1749         } break;
1750         case VPU_IOC_GET_HW_FUSE_STATUS: {
1751                 struct vpu_request req;
1752
1753                 vpu_debug(DEBUG_IOCTL, "pid %d get hw status %d\n",
1754                           session->pid, session->type);
1755                 if (copy_from_user(&req, (void __user *)arg, sizeof(req))) {
1756                         vpu_err("error: get hw status copy_from_user failed\n");
1757                         return -EFAULT;
1758                 } else {
1759                         void *config = (session->type != VPU_ENC) ?
1760                                        ((void *)&pservice->dec_config) :
1761                                        ((void *)&pservice->enc_config);
1762                         size_t size = (session->type != VPU_ENC) ?
1763                                       (sizeof(struct vpu_dec_config)) :
1764                                       (sizeof(struct vpu_enc_config));
1765                         if (copy_to_user((void __user *)req.req,
1766                                          config, size)) {
1767                                 vpu_err("error: get hw status copy_to_user failed type %d\n",
1768                                         session->type);
1769                                 return -EFAULT;
1770                         }
1771                 }
1772         } break;
1773         case VPU_IOC_SET_REG: {
1774                 struct vpu_request req;
1775                 struct vpu_reg *reg;
1776
1777                 vpu_debug(DEBUG_IOCTL, "pid %d set reg type %d\n",
1778                           session->pid, session->type);
1779                 if (copy_from_user(&req, (void __user *)arg,
1780                                    sizeof(struct vpu_request))) {
1781                         vpu_err("error: set reg copy_from_user failed\n");
1782                         return -EFAULT;
1783                 }
1784
1785                 reg = reg_init(data, session, (void __user *)req.req, req.size);
1786                 if (NULL == reg) {
1787                         return -EFAULT;
1788                 } else {
1789                         mutex_lock(&pservice->lock);
1790                         try_set_reg(data);
1791                         mutex_unlock(&pservice->lock);
1792                 }
1793         } break;
1794         case VPU_IOC_GET_REG: {
1795                 struct vpu_request req;
1796                 struct vpu_reg *reg;
1797                 int ret;
1798
1799                 vpu_debug(DEBUG_IOCTL, "pid %d get reg type %d\n",
1800                           session->pid, session->type);
1801                 if (copy_from_user(&req, (void __user *)arg,
1802                                    sizeof(struct vpu_request))) {
1803                         vpu_err("error: get reg copy_from_user failed\n");
1804                         return -EFAULT;
1805                 }
1806
1807                 ret = wait_event_timeout(session->wait,
1808                                          !list_empty(&session->done),
1809                                          VPU_TIMEOUT_DELAY);
1810
1811                 if (!list_empty(&session->done)) {
1812                         if (ret < 0)
1813                                 vpu_err("warning: pid %d wait task error ret %d\n",
1814                                         session->pid, ret);
1815                         ret = 0;
1816                 } else {
1817                         if (unlikely(ret < 0)) {
1818                                 vpu_err("error: pid %d wait task ret %d\n",
1819                                         session->pid, ret);
1820                         } else if (ret == 0) {
1821                                 vpu_err("error: pid %d wait %d task done timeout\n",
1822                                         session->pid,
1823                                         atomic_read(&session->task_running));
1824                                 ret = -ETIMEDOUT;
1825                         }
1826                 }
1827
1828                 if (ret < 0) {
1829                         int task_running = atomic_read(&session->task_running);
1830
1831                         mutex_lock(&pservice->lock);
1832                         vpu_service_dump(pservice);
1833                         if (task_running) {
1834                                 atomic_set(&session->task_running, 0);
1835                                 atomic_sub(task_running,
1836                                            &pservice->total_running);
1837                                 pr_err("%d task is running but not return, reset hardware...",
1838                                        task_running);
1839                                 vpu_reset(data);
1840                                 pr_err("done\n");
1841                         }
1842                         vpu_service_session_clear(data, session);
1843                         mutex_unlock(&pservice->lock);
1844                         return ret;
1845                 }
1846
1847                 mutex_lock(&pservice->lock);
1848                 reg = list_entry(session->done.next,
1849                                  struct vpu_reg, session_link);
1850                 return_reg(data, reg, (u32 __user *)req.req);
1851                 mutex_unlock(&pservice->lock);
1852         } break;
1853         case VPU_IOC_PROBE_IOMMU_STATUS: {
1854                 int iommu_enable = 1;
1855
1856                 vpu_debug(DEBUG_IOCTL, "VPU_IOC_PROBE_IOMMU_STATUS\n");
1857
1858                 if (copy_to_user((void __user *)arg,
1859                                  &iommu_enable, sizeof(int))) {
1860                         vpu_err("error: iommu status copy_to_user failed\n");
1861                         return -EFAULT;
1862                 }
1863         } break;
1864         default: {
1865                 vpu_err("error: unknow vpu service ioctl cmd %x\n", cmd);
1866         } break;
1867         }
1868         vpu_debug_leave();
1869         return 0;
1870 }
1871
1872 #ifdef CONFIG_COMPAT
1873 static long compat_vpu_service_ioctl(struct file *filp, unsigned int cmd,
1874                                      unsigned long arg)
1875 {
1876         struct vpu_subdev_data *data =
1877                 container_of(filp->f_path.dentry->d_inode->i_cdev,
1878                              struct vpu_subdev_data, cdev);
1879         struct vpu_service_info *pservice = data->pservice;
1880         struct vpu_session *session = (struct vpu_session *)filp->private_data;
1881
1882         vpu_debug_enter();
1883         vpu_debug(3, "cmd %x, COMPAT_VPU_IOC_SET_CLIENT_TYPE %x\n", cmd,
1884                   (u32)COMPAT_VPU_IOC_SET_CLIENT_TYPE);
1885         if (NULL == session)
1886                 return -EINVAL;
1887
1888         switch (cmd) {
1889         case COMPAT_VPU_IOC_SET_CLIENT_TYPE: {
1890                 session->type = (enum VPU_CLIENT_TYPE)arg;
1891                 vpu_debug(DEBUG_IOCTL, "compat set client type %d\n",
1892                           session->type);
1893         } break;
1894         case COMPAT_VPU_IOC_GET_HW_FUSE_STATUS: {
1895                 struct compat_vpu_request req;
1896
1897                 vpu_debug(DEBUG_IOCTL, "compat get hw status %d\n",
1898                           session->type);
1899                 if (copy_from_user(&req, compat_ptr((compat_uptr_t)arg),
1900                                    sizeof(struct compat_vpu_request))) {
1901                         vpu_err("error: compat get hw status copy_from_user failed\n");
1902                         return -EFAULT;
1903                 } else {
1904                         void *config = (session->type != VPU_ENC) ?
1905                                        ((void *)&pservice->dec_config) :
1906                                        ((void *)&pservice->enc_config);
1907                         size_t size = (session->type != VPU_ENC) ?
1908                                       (sizeof(struct vpu_dec_config)) :
1909                                       (sizeof(struct vpu_enc_config));
1910
1911                         if (copy_to_user(compat_ptr((compat_uptr_t)req.req),
1912                                          config, size)) {
1913                                 vpu_err("error: compat get hw status copy_to_user failed type %d\n",
1914                                         session->type);
1915                                 return -EFAULT;
1916                         }
1917                 }
1918         } break;
1919         case COMPAT_VPU_IOC_SET_REG: {
1920                 struct compat_vpu_request req;
1921                 struct vpu_reg *reg;
1922
1923                 vpu_debug(DEBUG_IOCTL, "compat set reg type %d\n",
1924                           session->type);
1925                 if (copy_from_user(&req, compat_ptr((compat_uptr_t)arg),
1926                                    sizeof(struct compat_vpu_request))) {
1927                         vpu_err("compat set_reg copy_from_user failed\n");
1928                         return -EFAULT;
1929                 }
1930                 reg = reg_init(data, session,
1931                                compat_ptr((compat_uptr_t)req.req), req.size);
1932                 if (NULL == reg) {
1933                         return -EFAULT;
1934                 } else {
1935                         mutex_lock(&pservice->lock);
1936                         try_set_reg(data);
1937                         mutex_unlock(&pservice->lock);
1938                 }
1939         } break;
1940         case COMPAT_VPU_IOC_GET_REG: {
1941                 struct compat_vpu_request req;
1942                 struct vpu_reg *reg;
1943                 int ret;
1944
1945                 vpu_debug(DEBUG_IOCTL, "compat get reg type %d\n",
1946                           session->type);
1947                 if (copy_from_user(&req, compat_ptr((compat_uptr_t)arg),
1948                                    sizeof(struct compat_vpu_request))) {
1949                         vpu_err("compat get reg copy_from_user failed\n");
1950                         return -EFAULT;
1951                 }
1952
1953                 ret = wait_event_timeout(session->wait,
1954                                          !list_empty(&session->done),
1955                                          VPU_TIMEOUT_DELAY);
1956
1957                 if (!list_empty(&session->done)) {
1958                         if (ret < 0)
1959                                 vpu_err("warning: pid %d wait task error ret %d\n",
1960                                         session->pid, ret);
1961                         ret = 0;
1962                 } else {
1963                         if (unlikely(ret < 0)) {
1964                                 vpu_err("error: pid %d wait task ret %d\n",
1965                                         session->pid, ret);
1966                         } else if (ret == 0) {
1967                                 vpu_err("error: pid %d wait %d task done timeout\n",
1968                                         session->pid,
1969                                         atomic_read(&session->task_running));
1970                                 ret = -ETIMEDOUT;
1971                         }
1972                 }
1973
1974                 if (ret < 0) {
1975                         int task_running = atomic_read(&session->task_running);
1976
1977                         mutex_lock(&pservice->lock);
1978                         vpu_service_dump(pservice);
1979                         if (task_running) {
1980                                 atomic_set(&session->task_running, 0);
1981                                 atomic_sub(task_running,
1982                                            &pservice->total_running);
1983                                 pr_err("%d task is running but not return, reset hardware...",
1984                                        task_running);
1985                                 vpu_reset(data);
1986                                 pr_err("done\n");
1987                         }
1988                         vpu_service_session_clear(data, session);
1989                         mutex_unlock(&pservice->lock);
1990                         return ret;
1991                 }
1992
1993                 mutex_lock(&pservice->lock);
1994                 reg = list_entry(session->done.next,
1995                                  struct vpu_reg, session_link);
1996                 return_reg(data, reg, compat_ptr((compat_uptr_t)req.req));
1997                 mutex_unlock(&pservice->lock);
1998         } break;
1999         case COMPAT_VPU_IOC_PROBE_IOMMU_STATUS: {
2000                 int iommu_enable = 1;
2001
2002                 vpu_debug(DEBUG_IOCTL, "COMPAT_VPU_IOC_PROBE_IOMMU_STATUS\n");
2003
2004                 if (copy_to_user(compat_ptr((compat_uptr_t)arg),
2005                                  &iommu_enable, sizeof(int))) {
2006                         vpu_err("error: VPU_IOC_PROBE_IOMMU_STATUS copy_to_user failed\n");
2007                         return -EFAULT;
2008                 }
2009         } break;
2010         default: {
2011                 vpu_err("error: unknow vpu service ioctl cmd %x\n", cmd);
2012         } break;
2013         }
2014         vpu_debug_leave();
2015         return 0;
2016 }
2017 #endif
2018
2019 static int vpu_service_check_hw(struct vpu_subdev_data *data)
2020 {
2021         int ret = -EINVAL, i = 0;
2022         u32 hw_id = readl_relaxed(data->regs);
2023
2024         hw_id = (hw_id >> 16) & 0xFFFF;
2025         pr_info("checking hw id %x\n", hw_id);
2026         data->hw_info = NULL;
2027         for (i = 0; i < ARRAY_SIZE(vcodec_info_set); i++) {
2028                 struct vcodec_info *info = &vcodec_info_set[i];
2029
2030                 if (hw_id == info->hw_id) {
2031                         data->hw_id = info->hw_id;
2032                         data->hw_info = info->hw_info;
2033                         data->task_info = info->task_info;
2034                         data->trans_info = info->trans_info;
2035                         ret = 0;
2036                         break;
2037                 }
2038         }
2039         return ret;
2040 }
2041
2042 static int vpu_service_open(struct inode *inode, struct file *filp)
2043 {
2044         struct vpu_subdev_data *data = container_of(
2045                         inode->i_cdev, struct vpu_subdev_data, cdev);
2046         struct vpu_service_info *pservice = data->pservice;
2047         struct vpu_session *session = kmalloc(sizeof(*session), GFP_KERNEL);
2048
2049         vpu_debug_enter();
2050
2051         if (NULL == session) {
2052                 vpu_err("error: unable to allocate memory for vpu_session.");
2053                 return -ENOMEM;
2054         }
2055
2056         session->type   = VPU_TYPE_BUTT;
2057         session->pid    = current->pid;
2058         INIT_LIST_HEAD(&session->waiting);
2059         INIT_LIST_HEAD(&session->running);
2060         INIT_LIST_HEAD(&session->done);
2061         INIT_LIST_HEAD(&session->list_session);
2062         init_waitqueue_head(&session->wait);
2063         atomic_set(&session->task_running, 0);
2064         mutex_lock(&pservice->lock);
2065         list_add_tail(&session->list_session, &pservice->session);
2066         filp->private_data = (void *)session;
2067         mutex_unlock(&pservice->lock);
2068
2069         pr_debug("dev opened\n");
2070         vpu_debug_leave();
2071         return nonseekable_open(inode, filp);
2072 }
2073
2074 static int vpu_service_release(struct inode *inode, struct file *filp)
2075 {
2076         struct vpu_subdev_data *data = container_of(
2077                         inode->i_cdev, struct vpu_subdev_data, cdev);
2078         struct vpu_service_info *pservice = data->pservice;
2079         int task_running;
2080         struct vpu_session *session = (struct vpu_session *)filp->private_data;
2081
2082         vpu_debug_enter();
2083         if (NULL == session)
2084                 return -EINVAL;
2085
2086         task_running = atomic_read(&session->task_running);
2087         if (task_running) {
2088                 pr_err("error: session %d still has %d task running when closing\n",
2089                        session->pid, task_running);
2090                 msleep(50);
2091         }
2092         wake_up(&session->wait);
2093
2094         mutex_lock(&pservice->lock);
2095         /* remove this filp from the asynchronusly notified filp's */
2096         list_del_init(&session->list_session);
2097         vpu_service_session_clear(data, session);
2098         kfree(session);
2099         filp->private_data = NULL;
2100         mutex_unlock(&pservice->lock);
2101
2102         pr_debug("dev closed\n");
2103         vpu_debug_leave();
2104         return 0;
2105 }
2106
2107 static const struct file_operations vpu_service_fops = {
2108         .unlocked_ioctl = vpu_service_ioctl,
2109         .open           = vpu_service_open,
2110         .release        = vpu_service_release,
2111 #ifdef CONFIG_COMPAT
2112         .compat_ioctl   = compat_vpu_service_ioctl,
2113 #endif
2114 };
2115
2116 static irqreturn_t vdpu_irq(int irq, void *dev_id);
2117 static irqreturn_t vdpu_isr(int irq, void *dev_id);
2118 static irqreturn_t vepu_irq(int irq, void *dev_id);
2119 static irqreturn_t vepu_isr(int irq, void *dev_id);
2120 static void get_hw_info(struct vpu_subdev_data *data);
2121
2122 static struct device *rockchip_get_sysmmu_dev(const char *compt)
2123 {
2124         struct device_node *dn = NULL;
2125         struct platform_device *pd = NULL;
2126         struct device *ret = NULL;
2127
2128         dn = of_find_compatible_node(NULL, NULL, compt);
2129         if (!dn) {
2130                 pr_err("can't find device node %s \r\n", compt);
2131                 return NULL;
2132         }
2133
2134         pd = of_find_device_by_node(dn);
2135         if (!pd) {
2136                 pr_err("can't find platform device in device node %s\n", compt);
2137                 return  NULL;
2138         }
2139         ret = &pd->dev;
2140
2141         return ret;
2142 }
2143
2144 #ifdef CONFIG_IOMMU_API
2145 static inline void platform_set_sysmmu(struct device *iommu,
2146                                        struct device *dev)
2147 {
2148         dev->archdata.iommu = iommu;
2149 }
2150 #else
2151 static inline void platform_set_sysmmu(struct device *iommu,
2152                                        struct device *dev)
2153 {
2154 }
2155 #endif
2156
2157 int vcodec_sysmmu_fault_hdl(struct device *dev,
2158                             enum rk_iommu_inttype itype,
2159                             unsigned long pgtable_base,
2160                             unsigned long fault_addr, unsigned int status)
2161 {
2162         struct platform_device *pdev;
2163         struct vpu_service_info *pservice;
2164         struct vpu_subdev_data *data;
2165
2166         vpu_debug_enter();
2167
2168         if (dev == NULL) {
2169                 pr_err("invalid NULL dev\n");
2170                 return 0;
2171         }
2172
2173         pdev = container_of(dev, struct platform_device, dev);
2174         if (pdev == NULL) {
2175                 pr_err("invalid NULL platform_device\n");
2176                 return 0;
2177         }
2178
2179         data = platform_get_drvdata(pdev);
2180         if (data == NULL) {
2181                 pr_err("invalid NULL vpu_subdev_data\n");
2182                 return 0;
2183         }
2184
2185         pservice = data->pservice;
2186         if (pservice == NULL) {
2187                 pr_err("invalid NULL vpu_service_info\n");
2188                 return 0;
2189         }
2190
2191         if (pservice->reg_codec) {
2192                 struct vpu_reg *reg = pservice->reg_codec;
2193                 struct vcodec_mem_region *mem, *n;
2194                 int i = 0;
2195
2196                 pr_err("vcodec, fault addr 0x%08lx\n", fault_addr);
2197                 if (!list_empty(&reg->mem_region_list)) {
2198                         list_for_each_entry_safe(mem, n, &reg->mem_region_list,
2199                                                  reg_lnk) {
2200                                 pr_err("vcodec, reg[%02u] mem region [%02d] 0x%lx %lx\n",
2201                                        mem->reg_idx, i, mem->iova, mem->len);
2202                                 i++;
2203                         }
2204                 } else {
2205                         pr_err("no memory region mapped\n");
2206                 }
2207
2208                 if (reg->data) {
2209                         struct vpu_subdev_data *data = reg->data;
2210                         u32 *base = (u32 *)data->dec_dev.regs;
2211                         u32 len = data->hw_info->dec_reg_num;
2212
2213                         pr_err("current errror register set:\n");
2214
2215                         for (i = 0; i < len; i++)
2216                                 pr_err("reg[%02d] %08x\n",
2217                                        i, readl_relaxed(base + i));
2218                 }
2219
2220                 pr_alert("vcodec, page fault occur, reset hw\n");
2221
2222                 /* reg->reg[101] = 1; */
2223                 vpu_reset(data);
2224         }
2225
2226         return 0;
2227 }
2228
2229 static int vcodec_subdev_probe(struct platform_device *pdev,
2230                                struct vpu_service_info *pservice)
2231 {
2232         int ret = 0;
2233         struct resource *res = NULL;
2234         u32 ioaddr = 0;
2235         u8 *regs = NULL;
2236         struct vpu_hw_info *hw_info = NULL;
2237         struct device *dev = &pdev->dev;
2238         char *name = (char *)dev_name(dev);
2239         struct device_node *np = pdev->dev.of_node;
2240         struct vpu_subdev_data *data =
2241                 devm_kzalloc(dev, sizeof(struct vpu_subdev_data), GFP_KERNEL);
2242         u32 iommu_en = 0;
2243         char mmu_dev_dts_name[40];
2244
2245         of_property_read_u32(np, "iommu_enabled", &iommu_en);
2246
2247         pr_info("probe device %s\n", dev_name(dev));
2248
2249         data->pservice = pservice;
2250         data->dev = dev;
2251
2252         of_property_read_string(np, "name", (const char **)&name);
2253         of_property_read_u32(np, "dev_mode", (u32 *)&data->mode);
2254
2255         if (pservice->reg_base == 0) {
2256                 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2257                 data->regs = devm_ioremap_resource(dev, res);
2258                 if (IS_ERR(data->regs)) {
2259                         ret = PTR_ERR(data->regs);
2260                         goto err;
2261                 }
2262                 ioaddr = res->start;
2263         } else {
2264                 data->regs = pservice->reg_base;
2265                 ioaddr = pservice->ioaddr;
2266         }
2267
2268         clear_bit(MMU_ACTIVATED, &data->state);
2269         vcodec_enter_mode(data);
2270
2271         vpu_service_power_on(pservice);
2272         ret = vpu_service_check_hw(data);
2273         if (ret < 0) {
2274                 vpu_err("error: hw info check faild\n");
2275                 goto err;
2276         }
2277
2278         hw_info = data->hw_info;
2279         regs = (u8 *)data->regs;
2280
2281         if (hw_info->dec_reg_num) {
2282                 data->dec_dev.iosize = hw_info->dec_io_size;
2283                 data->dec_dev.regs = (u32 *)(regs + hw_info->dec_offset);
2284         }
2285
2286         if (hw_info->enc_reg_num) {
2287                 data->enc_dev.iosize = hw_info->enc_io_size;
2288                 data->enc_dev.regs = (u32 *)(regs + hw_info->enc_offset);
2289         }
2290
2291         data->reg_size = max(hw_info->dec_io_size, hw_info->enc_io_size);
2292
2293         data->irq_enc = platform_get_irq_byname(pdev, "irq_enc");
2294         if (data->irq_enc > 0) {
2295                 ret = devm_request_threaded_irq(dev, data->irq_enc,
2296                                                 vepu_irq, vepu_isr,
2297                                                 IRQF_SHARED, dev_name(dev),
2298                                                 (void *)data);
2299                 if (ret) {
2300                         dev_err(dev, "error: can't request vepu irq %d\n",
2301                                 data->irq_enc);
2302                         goto err;
2303                 }
2304         }
2305         data->irq_dec = platform_get_irq_byname(pdev, "irq_dec");
2306         if (data->irq_dec > 0) {
2307                 ret = devm_request_threaded_irq(dev, data->irq_dec,
2308                                                 vdpu_irq, vdpu_isr,
2309                                                 IRQF_SHARED, dev_name(dev),
2310                                                 (void *)data);
2311                 if (ret) {
2312                         dev_err(dev, "error: can't request vdpu irq %d\n",
2313                                 data->irq_dec);
2314                         goto err;
2315                 }
2316         }
2317         atomic_set(&data->dec_dev.irq_count_codec, 0);
2318         atomic_set(&data->dec_dev.irq_count_pp, 0);
2319         atomic_set(&data->enc_dev.irq_count_codec, 0);
2320         atomic_set(&data->enc_dev.irq_count_pp, 0);
2321
2322         if (iommu_en) {
2323                 if (data->mode == VCODEC_RUNNING_MODE_HEVC)
2324                         sprintf(mmu_dev_dts_name,
2325                                 HEVC_IOMMU_COMPATIBLE_NAME);
2326                 else if (data->mode == VCODEC_RUNNING_MODE_VPU)
2327                         sprintf(mmu_dev_dts_name,
2328                                 VPU_IOMMU_COMPATIBLE_NAME);
2329                 else if (data->mode == VCODEC_RUNNING_MODE_RKVDEC)
2330                         sprintf(mmu_dev_dts_name, VDEC_IOMMU_COMPATIBLE_NAME);
2331                 else
2332                         sprintf(mmu_dev_dts_name,
2333                                 HEVC_IOMMU_COMPATIBLE_NAME);
2334
2335                 data->mmu_dev =
2336                         rockchip_get_sysmmu_dev(mmu_dev_dts_name);
2337
2338                 if (data->mmu_dev)
2339                         platform_set_sysmmu(data->mmu_dev, dev);
2340
2341                 rockchip_iovmm_set_fault_handler(dev, vcodec_sysmmu_fault_hdl);
2342         }
2343
2344         get_hw_info(data);
2345         pservice->auto_freq = true;
2346
2347         vcodec_exit_mode(data);
2348         /* create device node */
2349         ret = alloc_chrdev_region(&data->dev_t, 0, 1, name);
2350         if (ret) {
2351                 dev_err(dev, "alloc dev_t failed\n");
2352                 goto err;
2353         }
2354
2355         cdev_init(&data->cdev, &vpu_service_fops);
2356
2357         data->cdev.owner = THIS_MODULE;
2358         data->cdev.ops = &vpu_service_fops;
2359
2360         ret = cdev_add(&data->cdev, data->dev_t, 1);
2361
2362         if (ret) {
2363                 dev_err(dev, "add dev_t failed\n");
2364                 goto err;
2365         }
2366
2367         data->cls = class_create(THIS_MODULE, name);
2368
2369         if (IS_ERR(data->cls)) {
2370                 ret = PTR_ERR(data->cls);
2371                 dev_err(dev, "class_create err:%d\n", ret);
2372                 goto err;
2373         }
2374
2375         data->child_dev = device_create(data->cls, dev,
2376                 data->dev_t, NULL, name);
2377
2378         platform_set_drvdata(pdev, data);
2379
2380         INIT_LIST_HEAD(&data->lnk_service);
2381         list_add_tail(&data->lnk_service, &pservice->subdev_list);
2382
2383 #ifdef CONFIG_DEBUG_FS
2384         data->debugfs_dir = vcodec_debugfs_create_device_dir(name, parent);
2385         if (!IS_ERR_OR_NULL(data->debugfs_dir))
2386                 data->debugfs_file_regs =
2387                         debugfs_create_file("regs", 0664, data->debugfs_dir,
2388                                         data, &debug_vcodec_fops);
2389         else
2390                 vpu_err("create debugfs dir %s failed\n", name);
2391 #endif
2392         return 0;
2393 err:
2394         if (data->child_dev) {
2395                 device_destroy(data->cls, data->dev_t);
2396                 cdev_del(&data->cdev);
2397                 unregister_chrdev_region(data->dev_t, 1);
2398         }
2399
2400         if (data->cls)
2401                 class_destroy(data->cls);
2402         return -1;
2403 }
2404
2405 static void vcodec_subdev_remove(struct vpu_subdev_data *data)
2406 {
2407         struct vpu_service_info *pservice = data->pservice;
2408
2409         mutex_lock(&pservice->lock);
2410         cancel_delayed_work_sync(&pservice->power_off_work);
2411         vpu_service_power_off(pservice);
2412         mutex_unlock(&pservice->lock);
2413
2414         device_destroy(data->cls, data->dev_t);
2415         class_destroy(data->cls);
2416         cdev_del(&data->cdev);
2417         unregister_chrdev_region(data->dev_t, 1);
2418
2419 #ifdef CONFIG_DEBUG_FS
2420         if (!IS_ERR_OR_NULL(data->debugfs_dir))
2421                 debugfs_remove_recursive(data->debugfs_dir);
2422 #endif
2423 }
2424
2425 static void vcodec_read_property(struct device_node *np,
2426                                  struct vpu_service_info *pservice)
2427 {
2428         pservice->mode_bit = 0;
2429         pservice->mode_ctrl = 0;
2430         pservice->subcnt = 0;
2431         pservice->grf_base = NULL;
2432
2433         of_property_read_u32(np, "subcnt", &pservice->subcnt);
2434
2435         if (pservice->subcnt > 1) {
2436                 of_property_read_u32(np, "mode_bit", &pservice->mode_bit);
2437                 of_property_read_u32(np, "mode_ctrl", &pservice->mode_ctrl);
2438         }
2439 #ifdef CONFIG_MFD_SYSCON
2440         pservice->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
2441         if (IS_ERR_OR_NULL(pservice->grf)) {
2442                 pservice->grf = NULL;
2443 #ifdef CONFIG_ARM
2444                 pservice->grf_base = RK_GRF_VIRT;
2445 #else
2446                 vpu_err("can't find vpu grf property\n");
2447                 return;
2448 #endif
2449         }
2450 #else
2451 #ifdef CONFIG_ARM
2452         pservice->grf_base = RK_GRF_VIRT;
2453 #else
2454         vpu_err("can't find vpu grf property\n");
2455         return;
2456 #endif
2457 #endif
2458
2459 #ifdef CONFIG_RESET_CONTROLLER
2460         pservice->rst_a = devm_reset_control_get(pservice->dev, "video_a");
2461         pservice->rst_h = devm_reset_control_get(pservice->dev, "video_h");
2462         pservice->rst_v = devm_reset_control_get(pservice->dev, "video");
2463
2464         if (IS_ERR_OR_NULL(pservice->rst_a)) {
2465                 pr_warn("No aclk reset resource define\n");
2466                 pservice->rst_a = NULL;
2467         }
2468
2469         if (IS_ERR_OR_NULL(pservice->rst_h)) {
2470                 pr_warn("No hclk reset resource define\n");
2471                 pservice->rst_h = NULL;
2472         }
2473
2474         if (IS_ERR_OR_NULL(pservice->rst_v)) {
2475                 pr_warn("No core reset resource define\n");
2476                 pservice->rst_v = NULL;
2477         }
2478 #endif
2479
2480         of_property_read_string(np, "name", (const char **)&pservice->name);
2481 }
2482
2483 static void vcodec_init_drvdata(struct vpu_service_info *pservice)
2484 {
2485         pservice->dev_id = VCODEC_DEVICE_ID_VPU;
2486         pservice->curr_mode = -1;
2487
2488         wake_lock_init(&pservice->wake_lock, WAKE_LOCK_SUSPEND, "vpu");
2489         INIT_LIST_HEAD(&pservice->waiting);
2490         INIT_LIST_HEAD(&pservice->running);
2491         mutex_init(&pservice->lock);
2492         mutex_init(&pservice->shutdown_lock);
2493         atomic_set(&pservice->service_on, 1);
2494
2495         INIT_LIST_HEAD(&pservice->done);
2496         INIT_LIST_HEAD(&pservice->session);
2497         INIT_LIST_HEAD(&pservice->subdev_list);
2498
2499         pservice->reg_pproc     = NULL;
2500         atomic_set(&pservice->total_running, 0);
2501         atomic_set(&pservice->enabled,       0);
2502         atomic_set(&pservice->power_on_cnt,  0);
2503         atomic_set(&pservice->power_off_cnt, 0);
2504         atomic_set(&pservice->reset_request, 0);
2505
2506         INIT_DELAYED_WORK(&pservice->power_off_work, vpu_power_off_work);
2507         pservice->last.tv64 = 0;
2508
2509         pservice->ion_client = rockchip_ion_client_create("vpu");
2510         if (IS_ERR(pservice->ion_client)) {
2511                 vpu_err("failed to create ion client for vcodec ret %ld\n",
2512                         PTR_ERR(pservice->ion_client));
2513         } else {
2514                 vpu_debug(DEBUG_IOMMU, "vcodec ion client create success!\n");
2515         }
2516 }
2517
2518 static int vcodec_probe(struct platform_device *pdev)
2519 {
2520         int i;
2521         int ret = 0;
2522         struct resource *res = NULL;
2523         struct device *dev = &pdev->dev;
2524         struct device_node *np = pdev->dev.of_node;
2525         struct vpu_service_info *pservice =
2526                 devm_kzalloc(dev, sizeof(struct vpu_service_info), GFP_KERNEL);
2527
2528         pservice->dev = dev;
2529
2530         vcodec_read_property(np, pservice);
2531         vcodec_init_drvdata(pservice);
2532
2533         if (strncmp(pservice->name, "hevc_service", 12) == 0)
2534                 pservice->dev_id = VCODEC_DEVICE_ID_HEVC;
2535         else if (strncmp(pservice->name, "vpu_service", 11) == 0)
2536                 pservice->dev_id = VCODEC_DEVICE_ID_VPU;
2537         else if (strncmp(pservice->name, "rkvdec", 6) == 0)
2538                 pservice->dev_id = VCODEC_DEVICE_ID_RKVDEC;
2539         else
2540                 pservice->dev_id = VCODEC_DEVICE_ID_COMBO;
2541
2542         if (0 > vpu_get_clk(pservice))
2543                 goto err;
2544
2545         if (of_property_read_bool(np, "reg")) {
2546                 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2547
2548                 pservice->reg_base = devm_ioremap_resource(pservice->dev, res);
2549                 if (IS_ERR(pservice->reg_base)) {
2550                         vpu_err("ioremap registers base failed\n");
2551                         ret = PTR_ERR(pservice->reg_base);
2552                         goto err;
2553                 }
2554                 pservice->ioaddr = res->start;
2555         } else {
2556                 pservice->reg_base = 0;
2557         }
2558
2559         pm_runtime_enable(dev);
2560
2561         if (of_property_read_bool(np, "subcnt")) {
2562                 for (i = 0; i < pservice->subcnt; i++) {
2563                         struct device_node *sub_np;
2564                         struct platform_device *sub_pdev;
2565
2566                         sub_np = of_parse_phandle(np, "rockchip,sub", i);
2567                         sub_pdev = of_find_device_by_node(sub_np);
2568
2569                         vcodec_subdev_probe(sub_pdev, pservice);
2570                 }
2571         } else {
2572                 vcodec_subdev_probe(pdev, pservice);
2573         }
2574
2575         vpu_service_power_off(pservice);
2576
2577         pr_info("init success\n");
2578
2579         return 0;
2580
2581 err:
2582         pr_info("init failed\n");
2583         vpu_service_power_off(pservice);
2584         vpu_put_clk(pservice);
2585         wake_lock_destroy(&pservice->wake_lock);
2586
2587         return ret;
2588 }
2589
2590 static int vcodec_remove(struct platform_device *pdev)
2591 {
2592         struct vpu_subdev_data *data = platform_get_drvdata(pdev);
2593
2594         vcodec_subdev_remove(data);
2595
2596         pm_runtime_disable(data->pservice->dev);
2597
2598         return 0;
2599 }
2600
2601 static void vcodec_shutdown(struct platform_device *pdev)
2602 {
2603         struct vpu_subdev_data *data = platform_get_drvdata(pdev);
2604         struct vpu_service_info *pservice = data->pservice;
2605
2606         dev_info(&pdev->dev, "%s IN\n", __func__);
2607
2608         mutex_lock(&pservice->shutdown_lock);
2609         atomic_set(&pservice->service_on, 0);
2610         mutex_unlock(&pservice->shutdown_lock);
2611
2612         vcodec_exit_mode(data);
2613
2614         vpu_service_clear(data);
2615         vcodec_subdev_remove(data);
2616
2617         pm_runtime_disable(&pdev->dev);
2618 }
2619
2620 #if defined(CONFIG_OF)
2621 static const struct of_device_id vcodec_service_dt_ids[] = {
2622         {.compatible = "rockchip,vpu_service",},
2623         {.compatible = "rockchip,hevc_service",},
2624         {.compatible = "rockchip,vpu_combo",},
2625         {.compatible = "rockchip,rkvdec",},
2626         {},
2627 };
2628 #endif
2629
2630 static struct platform_driver vcodec_driver = {
2631         .probe = vcodec_probe,
2632         .remove = vcodec_remove,
2633         .shutdown = vcodec_shutdown,
2634         .driver = {
2635                 .name = "vcodec",
2636                 .owner = THIS_MODULE,
2637 #if defined(CONFIG_OF)
2638                 .of_match_table = of_match_ptr(vcodec_service_dt_ids),
2639 #endif
2640         },
2641 };
2642
2643 static void get_hw_info(struct vpu_subdev_data *data)
2644 {
2645         struct vpu_service_info *pservice = data->pservice;
2646         struct vpu_dec_config *dec = &pservice->dec_config;
2647         struct vpu_enc_config *enc = &pservice->enc_config;
2648
2649         if (cpu_is_rk2928() || cpu_is_rk3036() ||
2650             cpu_is_rk30xx() || cpu_is_rk312x() ||
2651             cpu_is_rk3188())
2652                 dec->max_dec_pic_width = 1920;
2653         else
2654                 dec->max_dec_pic_width = 4096;
2655
2656         if (data->mode == VCODEC_RUNNING_MODE_VPU) {
2657                 dec->h264_support = 3;
2658                 dec->jpeg_support = 1;
2659                 dec->mpeg4_support = 2;
2660                 dec->vc1_support = 3;
2661                 dec->mpeg2_support = 1;
2662                 dec->pp_support = 1;
2663                 dec->sorenson_support = 1;
2664                 dec->ref_buf_support = 3;
2665                 dec->vp6_support = 1;
2666                 dec->vp7_support = 1;
2667                 dec->vp8_support = 1;
2668                 dec->avs_support = 1;
2669                 dec->jpeg_ext_support = 0;
2670                 dec->custom_mpeg4_support = 1;
2671                 dec->reserve = 0;
2672                 dec->mvc_support = 1;
2673
2674                 if (!cpu_is_rk3036()) {
2675                         u32 config_reg = readl_relaxed(data->enc_dev.regs + 63);
2676
2677                         enc->max_encoded_width = config_reg & ((1 << 11) - 1);
2678                         enc->h264_enabled = 1;
2679                         enc->mpeg4_enabled = (config_reg >> 26) & 1;
2680                         enc->jpeg_enabled = 1;
2681                         enc->vs_enabled = (config_reg >> 24) & 1;
2682                         enc->rgb_enabled = (config_reg >> 28) & 1;
2683                         enc->reg_size = data->reg_size;
2684                         enc->reserv[0] = 0;
2685                         enc->reserv[1] = 0;
2686                 }
2687
2688                 pservice->auto_freq = true;
2689                 vpu_debug(DEBUG_EXTRA_INFO, "vpu_service set to auto frequency mode\n");
2690                 atomic_set(&pservice->freq_status, VPU_FREQ_BUT);
2691
2692                 pservice->bug_dec_addr = cpu_is_rk30xx();
2693         } else if (data->mode == VCODEC_RUNNING_MODE_RKVDEC) {
2694                 pservice->auto_freq = true;
2695                 atomic_set(&pservice->freq_status, VPU_FREQ_BUT);
2696         } else {
2697                 /* disable frequency switch in hevc.*/
2698                 pservice->auto_freq = false;
2699         }
2700 }
2701
2702 static bool check_irq_err(struct vpu_task_info *task, u32 irq_status)
2703 {
2704         vpu_debug(DEBUG_IRQ_CHECK, "task %s status %08x mask %08x\n",
2705                   task->name, irq_status, task->error_mask);
2706
2707         return (task->error_mask & irq_status) ? true : false;
2708 }
2709
2710 static irqreturn_t vdpu_irq(int irq, void *dev_id)
2711 {
2712         struct vpu_subdev_data *data = (struct vpu_subdev_data *)dev_id;
2713         struct vpu_service_info *pservice = data->pservice;
2714         struct vpu_task_info *task = NULL;
2715         struct vpu_device *dev = &data->dec_dev;
2716         u32 hw_id = data->hw_info->hw_id;
2717         u32 raw_status;
2718         u32 dec_status;
2719
2720         task = &data->task_info[TASK_DEC];
2721
2722         raw_status = readl_relaxed(dev->regs + task->reg_irq);
2723         dec_status = raw_status;
2724
2725         vpu_debug(DEBUG_TASK_INFO, "vdpu_irq reg %d status %x mask: irq %x ready %x error %0x\n",
2726                   task->reg_irq, dec_status,
2727                   task->irq_mask, task->ready_mask, task->error_mask);
2728
2729         if (dec_status & task->irq_mask) {
2730                 time_record(task, 1);
2731                 vpu_debug(DEBUG_IRQ_STATUS, "vdpu_irq dec status %08x\n",
2732                           dec_status);
2733                 if ((dec_status & 0x40001) == 0x40001) {
2734                         do {
2735                                 dec_status =
2736                                         readl_relaxed(dev->regs +
2737                                                 task->reg_irq);
2738                         } while ((dec_status & 0x40001) == 0x40001);
2739                 }
2740
2741                 if (check_irq_err(task, dec_status))
2742                         atomic_add(1, &pservice->reset_request);
2743
2744                 writel_relaxed(0, dev->regs + task->reg_irq);
2745
2746                 /*
2747                  * NOTE: rkvdec need to reset after each task to avoid timeout
2748                  *       error on H.264 switch to H.265
2749                  */
2750                 if (data->mode == VCODEC_RUNNING_MODE_RKVDEC)
2751                         writel(0x100000, dev->regs + task->reg_irq);
2752
2753                 /* set clock gating to save power */
2754                 writel(task->gating_mask, dev->regs + task->reg_en);
2755
2756                 atomic_add(1, &dev->irq_count_codec);
2757                 time_diff(task);
2758         }
2759
2760         task = &data->task_info[TASK_PP];
2761         if (hw_id != HEVC_ID && hw_id != RKV_DEC_ID) {
2762                 u32 pp_status = readl_relaxed(dev->regs + task->irq_mask);
2763
2764                 if (pp_status & task->irq_mask) {
2765                         time_record(task, 1);
2766                         vpu_debug(DEBUG_IRQ_STATUS, "vdpu_irq pp status %08x\n",
2767                                   pp_status);
2768
2769                         if (check_irq_err(task, dec_status))
2770                                 atomic_add(1, &pservice->reset_request);
2771
2772                         /* clear pp IRQ */
2773                         writel_relaxed(pp_status & (~task->reg_irq),
2774                                        dev->regs + task->irq_mask);
2775                         atomic_add(1, &dev->irq_count_pp);
2776                         time_diff(task);
2777                 }
2778         }
2779
2780         pservice->irq_status = raw_status;
2781
2782         if (atomic_read(&dev->irq_count_pp) ||
2783             atomic_read(&dev->irq_count_codec))
2784                 return IRQ_WAKE_THREAD;
2785         else
2786                 return IRQ_NONE;
2787 }
2788
2789 static irqreturn_t vdpu_isr(int irq, void *dev_id)
2790 {
2791         struct vpu_subdev_data *data = (struct vpu_subdev_data *)dev_id;
2792         struct vpu_service_info *pservice = data->pservice;
2793         struct vpu_device *dev = &data->dec_dev;
2794
2795         mutex_lock(&pservice->lock);
2796         if (atomic_read(&dev->irq_count_codec)) {
2797                 atomic_sub(1, &dev->irq_count_codec);
2798                 if (pservice->reg_codec == NULL) {
2799                         vpu_err("error: dec isr with no task waiting\n");
2800                 } else {
2801                         reg_from_run_to_done(data, pservice->reg_codec);
2802                         /* avoid vpu timeout and can't recover problem */
2803                         VDPU_SOFT_RESET(data->regs);
2804                 }
2805         }
2806
2807         if (atomic_read(&dev->irq_count_pp)) {
2808                 atomic_sub(1, &dev->irq_count_pp);
2809                 if (pservice->reg_pproc == NULL)
2810                         vpu_err("error: pp isr with no task waiting\n");
2811                 else
2812                         reg_from_run_to_done(data, pservice->reg_pproc);
2813         }
2814         try_set_reg(data);
2815         mutex_unlock(&pservice->lock);
2816         return IRQ_HANDLED;
2817 }
2818
2819 static irqreturn_t vepu_irq(int irq, void *dev_id)
2820 {
2821         struct vpu_subdev_data *data = (struct vpu_subdev_data *)dev_id;
2822         struct vpu_service_info *pservice = data->pservice;
2823         struct vpu_task_info *task = &data->task_info[TASK_ENC];
2824         struct vpu_device *dev = &data->enc_dev;
2825         u32 irq_status;
2826
2827         irq_status = readl_relaxed(dev->regs + task->reg_irq);
2828
2829         vpu_debug(DEBUG_TASK_INFO, "vepu_irq reg %d status %x mask: irq %x ready %x error %0x\n",
2830                   task->reg_irq, irq_status,
2831                   task->irq_mask, task->ready_mask, task->error_mask);
2832
2833         vpu_debug(DEBUG_IRQ_STATUS, "vepu_irq enc status %08x\n", irq_status);
2834
2835         if (likely(irq_status & task->irq_mask)) {
2836                 time_record(task, 1);
2837
2838                 if (check_irq_err(task, irq_status))
2839                         atomic_add(1, &pservice->reset_request);
2840
2841                 /* clear enc IRQ */
2842                 writel_relaxed(irq_status & (~task->irq_mask),
2843                                dev->regs + task->reg_irq);
2844
2845                 atomic_add(1, &dev->irq_count_codec);
2846                 time_diff(task);
2847         }
2848
2849         pservice->irq_status = irq_status;
2850
2851         if (atomic_read(&dev->irq_count_codec))
2852                 return IRQ_WAKE_THREAD;
2853         else
2854                 return IRQ_NONE;
2855 }
2856
2857 static irqreturn_t vepu_isr(int irq, void *dev_id)
2858 {
2859         struct vpu_subdev_data *data = (struct vpu_subdev_data *)dev_id;
2860         struct vpu_service_info *pservice = data->pservice;
2861         struct vpu_device *dev = &data->enc_dev;
2862
2863         mutex_lock(&pservice->lock);
2864         if (atomic_read(&dev->irq_count_codec)) {
2865                 atomic_sub(1, &dev->irq_count_codec);
2866                 if (NULL == pservice->reg_codec)
2867                         vpu_err("error: enc isr with no task waiting\n");
2868                 else
2869                         reg_from_run_to_done(data, pservice->reg_codec);
2870         }
2871         try_set_reg(data);
2872         mutex_unlock(&pservice->lock);
2873         return IRQ_HANDLED;
2874 }
2875
2876 static int __init vcodec_service_init(void)
2877 {
2878         int ret = platform_driver_register(&vcodec_driver);
2879
2880         if (ret) {
2881                 vpu_err("Platform device register failed (%d).\n", ret);
2882                 return ret;
2883         }
2884
2885 #ifdef CONFIG_DEBUG_FS
2886         vcodec_debugfs_init();
2887 #endif
2888
2889         return ret;
2890 }
2891
2892 static void __exit vcodec_service_exit(void)
2893 {
2894 #ifdef CONFIG_DEBUG_FS
2895         vcodec_debugfs_exit();
2896 #endif
2897
2898         platform_driver_unregister(&vcodec_driver);
2899 }
2900
2901 module_init(vcodec_service_init);
2902 module_exit(vcodec_service_exit);
2903 MODULE_LICENSE("Proprietary");
2904
2905 #ifdef CONFIG_DEBUG_FS
2906 #include <linux/seq_file.h>
2907
2908 static int vcodec_debugfs_init(void)
2909 {
2910         parent = debugfs_create_dir("vcodec", NULL);
2911         if (!parent)
2912                 return -1;
2913
2914         return 0;
2915 }
2916
2917 static void vcodec_debugfs_exit(void)
2918 {
2919         debugfs_remove(parent);
2920 }
2921
2922 static struct dentry *vcodec_debugfs_create_device_dir(
2923                 char *dirname, struct dentry *parent)
2924 {
2925         return debugfs_create_dir(dirname, parent);
2926 }
2927
2928 static int debug_vcodec_show(struct seq_file *s, void *unused)
2929 {
2930         struct vpu_subdev_data *data = s->private;
2931         struct vpu_service_info *pservice = data->pservice;
2932         unsigned int i, n;
2933         struct vpu_reg *reg, *reg_tmp;
2934         struct vpu_session *session, *session_tmp;
2935
2936         mutex_lock(&pservice->lock);
2937         vpu_service_power_on(pservice);
2938         if (data->hw_info->hw_id != HEVC_ID) {
2939                 seq_puts(s, "\nENC Registers:\n");
2940                 n = data->enc_dev.iosize >> 2;
2941
2942                 for (i = 0; i < n; i++)
2943                         seq_printf(s, "\tswreg%d = %08X\n", i,
2944                                    readl_relaxed(data->enc_dev.regs + i));
2945         }
2946
2947         seq_puts(s, "\nDEC Registers:\n");
2948
2949         n = data->dec_dev.iosize >> 2;
2950         for (i = 0; i < n; i++)
2951                 seq_printf(s, "\tswreg%d = %08X\n", i,
2952                            readl_relaxed(data->dec_dev.regs + i));
2953
2954         seq_puts(s, "\nvpu service status:\n");
2955
2956         list_for_each_entry_safe(session, session_tmp,
2957                                  &pservice->session, list_session) {
2958                 seq_printf(s, "session pid %d type %d:\n",
2959                            session->pid, session->type);
2960
2961                 list_for_each_entry_safe(reg, reg_tmp,
2962                                          &session->waiting, session_link) {
2963                         seq_printf(s, "waiting register set %p\n", reg);
2964                 }
2965                 list_for_each_entry_safe(reg, reg_tmp,
2966                                          &session->running, session_link) {
2967                         seq_printf(s, "running register set %p\n", reg);
2968                 }
2969                 list_for_each_entry_safe(reg, reg_tmp,
2970                                          &session->done, session_link) {
2971                         seq_printf(s, "done    register set %p\n", reg);
2972                 }
2973         }
2974
2975         seq_printf(s, "\npower counter: on %d off %d\n",
2976                    atomic_read(&pservice->power_on_cnt),
2977                    atomic_read(&pservice->power_off_cnt));
2978
2979         mutex_unlock(&pservice->lock);
2980         vpu_service_power_off(pservice);
2981
2982         return 0;
2983 }
2984
2985 static int debug_vcodec_open(struct inode *inode, struct file *file)
2986 {
2987         return single_open(file, debug_vcodec_show, inode->i_private);
2988 }
2989
2990 #endif
2991