69e04b84df8b459040da8054ec52761c751f2b6e
[firefly-linux-kernel-4.4.55.git] / drivers / video / rockchip / vcodec / vcodec_service.c
1 /**
2  * Copyright (C) 2015 Fuzhou Rockchip Electronics Co., Ltd
3  * author: chenhengming, chm@rock-chips.com
4  *         Alpha Lin, alpha.lin@rock-chips.com
5  *         Jung Zhao, jung.zhao@rock-chips.com
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/clk.h>
21 #include <linux/compat.h>
22 #include <linux/delay.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/module.h>
26 #include <linux/fs.h>
27 #include <linux/mm.h>
28 #include <linux/platform_device.h>
29 #include <linux/reset.h>
30 #include <linux/sched.h>
31 #include <linux/slab.h>
32 #include <linux/wakelock.h>
33 #include <linux/cdev.h>
34 #include <linux/of.h>
35 #include <linux/of_platform.h>
36 #include <linux/of_irq.h>
37 #include <linux/regmap.h>
38 #include <linux/mfd/syscon.h>
39 #include <linux/uaccess.h>
40 #include <linux/debugfs.h>
41 #include <linux/pm_runtime.h>
42
43 #include <linux/rockchip/cru.h>
44 #include <linux/rockchip/pmu.h>
45 #include <linux/rockchip/grf.h>
46
47 #include <linux/dma-buf.h>
48 #include <linux/rockchip-iovmm.h>
49
50 #include "vcodec_hw_info.h"
51 #include "vcodec_hw_vpu.h"
52 #include "vcodec_hw_rkv.h"
53 #include "vcodec_hw_vpu2.h"
54
55 #include "vcodec_service.h"
56
57 #include "vcodec_iommu_ops.h"
58
59 /*
60  * debug flag usage:
61  * +------+-------------------+
62  * | 8bit |      24bit        |
63  * +------+-------------------+
64  *  0~23 bit is for different information type
65  * 24~31 bit is for information print format
66  */
67
68 #define DEBUG_POWER                             0x00000001
69 #define DEBUG_CLOCK                             0x00000002
70 #define DEBUG_IRQ_STATUS                        0x00000004
71 #define DEBUG_IOMMU                             0x00000008
72 #define DEBUG_IOCTL                             0x00000010
73 #define DEBUG_FUNCTION                          0x00000020
74 #define DEBUG_REGISTER                          0x00000040
75 #define DEBUG_EXTRA_INFO                        0x00000080
76 #define DEBUG_TIMING                            0x00000100
77 #define DEBUG_TASK_INFO                         0x00000200
78
79 #define DEBUG_SET_REG                           0x00001000
80 #define DEBUG_GET_REG                           0x00002000
81 #define DEBUG_PPS_FILL                          0x00004000
82 #define DEBUG_IRQ_CHECK                         0x00008000
83 #define DEBUG_CACHE_32B                         0x00010000
84
85 #define PRINT_FUNCTION                          0x80000000
86 #define PRINT_LINE                              0x40000000
87
88 #define MHZ                                     (1000 * 1000)
89 #define SIZE_REG(reg)                           ((reg) * 4)
90
91 #define VCODEC_CLOCK_ENABLE     1
92 #define EXTRA_INFO_MAGIC        0x4C4A46
93
94 static int debug;
95 module_param(debug, int, S_IRUGO | S_IWUSR);
96 MODULE_PARM_DESC(debug, "bit switch for vcodec_service debug information");
97 /*
98  * hardware information organization
99  *
100  * In order to support multiple hardware with different version the hardware
101  * information is organized as follow:
102  *
103  * 1. First, index hardware by register size / position.
104  *    These information is fix for each hardware and do not relate to runtime
105  *    work flow. It only related to resource allocation.
106  *    Descriptor: struct vpu_hw_info
107  *
108  * 2. Then, index hardware by runtime configuration
109  *    These information is related to runtime setting behave including enable
110  *    register, irq register and other key control flag
111  *    Descriptor: struct vpu_task_info
112  *
113  * 3. Final, on iommu case the fd translation is required
114  *    Descriptor: struct vpu_trans_info
115  */
116
117 enum VPU_FREQ {
118         VPU_FREQ_200M,
119         VPU_FREQ_266M,
120         VPU_FREQ_300M,
121         VPU_FREQ_400M,
122         VPU_FREQ_500M,
123         VPU_FREQ_600M,
124         VPU_FREQ_DEFAULT,
125         VPU_FREQ_BUT,
126 };
127
128 struct extra_info_elem {
129         u32 index;
130         u32 offset;
131 };
132
133
134 struct extra_info_for_iommu {
135         u32 magic;
136         u32 cnt;
137         struct extra_info_elem elem[20];
138 };
139
140 static const struct vcodec_info vcodec_info_set[] = {
141         {
142                 .hw_id          = VPU_ID_8270,
143                 .hw_info        = &hw_vpu_8270,
144                 .task_info      = task_vpu,
145                 .trans_info     = trans_vpu,
146         },
147         {
148                 .hw_id          = VPU_ID_4831,
149                 .hw_info        = &hw_vpu_4831,
150                 .task_info      = task_vpu,
151                 .trans_info     = trans_vpu,
152         },
153         {
154                 .hw_id          = VPU_DEC_ID_9190,
155                 .hw_info        = &hw_vpu_9190,
156                 .task_info      = task_vpu,
157                 .trans_info     = trans_vpu,
158         },
159         {
160                 .hw_id          = HEVC_ID,
161                 .hw_info        = &hw_rkhevc,
162                 .task_info      = task_rkv,
163                 .trans_info     = trans_rkv,
164         },
165         {
166                 .hw_id          = RKV_DEC_ID,
167                 .hw_info        = &hw_rkvdec,
168                 .task_info      = task_rkv,
169                 .trans_info     = trans_rkv,
170         },
171         {
172                 .hw_id          = VPU2_ID,
173                 .hw_info        = &hw_vpu2,
174                 .task_info      = task_vpu2,
175                 .trans_info     = trans_vpu2,
176         },
177 };
178
179 /* Both VPU1 and VPU2 */
180 static const struct vcodec_device_info vpu_device_info = {
181         .device_type = VCODEC_DEVICE_TYPE_VPUX,
182         .name = "vpu-service",
183 };
184
185 static const struct vcodec_device_info vpu_combo_device_info = {
186         .device_type = VCODEC_DEVICE_TYPE_VPUC,
187         .name = "vpu-combo",
188 };
189
190 static const struct vcodec_device_info hevc_device_info = {
191         .device_type = VCODEC_DEVICE_TYPE_HEVC,
192         .name = "hevc-service",
193 };
194
195 static const struct vcodec_device_info rkvd_device_info = {
196         .device_type = VCODEC_DEVICE_TYPE_RKVD,
197         .name = "rkvdec",
198 };
199
200 #define DEBUG
201 #ifdef DEBUG
202 #define vpu_debug_func(type, fmt, args...)                      \
203         do {                                                    \
204                 if (unlikely(debug & type)) {                   \
205                         pr_info("%s:%d: " fmt,                  \
206                                  __func__, __LINE__, ##args);   \
207                 }                                               \
208         } while (0)
209 #define vpu_debug(type, fmt, args...)                           \
210         do {                                                    \
211                 if (unlikely(debug & type)) {                   \
212                         pr_info(fmt, ##args);                   \
213                 }                                               \
214         } while (0)
215 #else
216 #define vpu_debug_func(level, fmt, args...)
217 #define vpu_debug(level, fmt, args...)
218 #endif
219
220 #define vpu_debug_enter() vpu_debug_func(DEBUG_FUNCTION, "enter\n")
221 #define vpu_debug_leave() vpu_debug_func(DEBUG_FUNCTION, "leave\n")
222
223 #define vpu_err(fmt, args...)                           \
224                 pr_err("%s:%d: " fmt, __func__, __LINE__, ##args)
225
226 enum VPU_DEC_FMT {
227         VPU_DEC_FMT_H264,
228         VPU_DEC_FMT_MPEG4,
229         VPU_DEC_FMT_H263,
230         VPU_DEC_FMT_JPEG,
231         VPU_DEC_FMT_VC1,
232         VPU_DEC_FMT_MPEG2,
233         VPU_DEC_FMT_MPEG1,
234         VPU_DEC_FMT_VP6,
235         VPU_DEC_FMT_RESERV0,
236         VPU_DEC_FMT_VP7,
237         VPU_DEC_FMT_VP8,
238         VPU_DEC_FMT_AVS,
239         VPU_DEC_FMT_RES
240 };
241
242 /**
243  * struct for process session which connect to vpu
244  *
245  * @author ChenHengming (2011-5-3)
246  */
247 struct vpu_session {
248         enum VPU_CLIENT_TYPE type;
249         /* a linked list of data so we can access them for debugging */
250         struct list_head list_session;
251         /* a linked list of register data waiting for process */
252         struct list_head waiting;
253         /* a linked list of register data in processing */
254         struct list_head running;
255         /* a linked list of register data processed */
256         struct list_head done;
257         wait_queue_head_t wait;
258         pid_t pid;
259         atomic_t task_running;
260 };
261
262 /**
263  * struct for process register set
264  *
265  * @author ChenHengming (2011-5-4)
266  */
267 struct vpu_reg {
268         enum VPU_CLIENT_TYPE type;
269         enum VPU_FREQ freq;
270         struct vpu_session *session;
271         struct vpu_subdev_data *data;
272         struct vpu_task_info *task;
273         const struct vpu_trans_info *trans;
274
275         /* link to vpu service session */
276         struct list_head session_link;
277         /* link to register set list */
278         struct list_head status_link;
279
280         unsigned long size;
281         struct list_head mem_region_list;
282         u32 dec_base;
283         u32 *reg;
284 };
285
286 struct vpu_device {
287         atomic_t irq_count_codec;
288         atomic_t irq_count_pp;
289         unsigned int iosize;
290         u32 *regs;
291 };
292
293 enum vcodec_device_id {
294         VCODEC_DEVICE_ID_VPU,
295         VCODEC_DEVICE_ID_HEVC,
296         VCODEC_DEVICE_ID_COMBO,
297         VCODEC_DEVICE_ID_RKVDEC,
298         VCODEC_DEVICE_ID_BUTT
299 };
300
301 enum VCODEC_RUNNING_MODE {
302         VCODEC_RUNNING_MODE_NONE = -1,
303         VCODEC_RUNNING_MODE_VPU,
304         VCODEC_RUNNING_MODE_HEVC,
305         VCODEC_RUNNING_MODE_RKVDEC
306 };
307
308 struct vcodec_mem_region {
309         struct list_head srv_lnk;
310         struct list_head reg_lnk;
311         struct list_head session_lnk;
312         unsigned long iova;     /* virtual address for iommu */
313         unsigned long len;
314         u32 reg_idx;
315         int hdl;
316 };
317
318 enum vpu_ctx_state {
319         MMU_ACTIVATED   = BIT(0)
320 };
321
322 struct vpu_subdev_data {
323         struct cdev cdev;
324         dev_t dev_t;
325         struct class *cls;
326         struct device *child_dev;
327
328         int irq_enc;
329         int irq_dec;
330         struct vpu_service_info *pservice;
331
332         u32 *regs;
333         enum VCODEC_RUNNING_MODE mode;
334         struct list_head lnk_service;
335
336         struct device *dev;
337
338         struct vpu_device enc_dev;
339         struct vpu_device dec_dev;
340
341         enum VPU_HW_ID hw_id;
342         struct vpu_hw_info *hw_info;
343         struct vpu_task_info *task_info;
344         const struct vpu_trans_info *trans_info;
345
346         u32 reg_size;
347         unsigned long state;
348
349 #ifdef CONFIG_DEBUG_FS
350         struct dentry *debugfs_dir;
351         struct dentry *debugfs_file_regs;
352 #endif
353
354         struct device *mmu_dev;
355         struct vcodec_iommu_info *iommu_info;
356 };
357
358 struct vpu_service_info {
359         struct wake_lock wake_lock;
360         struct delayed_work power_off_work;
361         ktime_t last; /* record previous power-on time */
362         /* vpu service structure global lock */
363         struct mutex lock;
364         /* link to link_reg in struct vpu_reg */
365         struct list_head waiting;
366         /* link to link_reg in struct vpu_reg */
367         struct list_head running;
368         /* link to link_reg in struct vpu_reg */
369         struct list_head done;
370         /* link to list_session in struct vpu_session */
371         struct list_head session;
372         atomic_t total_running;
373         atomic_t enabled;
374         atomic_t power_on_cnt;
375         atomic_t power_off_cnt;
376         atomic_t service_on;
377         struct mutex shutdown_lock;
378         struct vpu_reg *reg_codec;
379         struct vpu_reg *reg_pproc;
380         struct vpu_reg *reg_resev;
381         struct vpu_dec_config dec_config;
382         struct vpu_enc_config enc_config;
383
384         bool auto_freq;
385         bool bug_dec_addr;
386         atomic_t freq_status;
387
388         struct clk *aclk_vcodec;
389         struct clk *hclk_vcodec;
390         struct clk *clk_core;
391         struct clk *clk_cabac;
392         struct clk *pd_video;
393
394 #ifdef CONFIG_RESET_CONTROLLER
395         struct reset_control *rst_a;
396         struct reset_control *rst_h;
397         struct reset_control *rst_v;
398 #endif
399         struct device *dev;
400
401         u32 irq_status;
402         atomic_t reset_request;
403         struct list_head mem_region_list;
404
405         enum vcodec_device_id dev_id;
406
407         enum VCODEC_RUNNING_MODE curr_mode;
408         u32 prev_mode;
409
410         struct delayed_work simulate_work;
411
412         u32 mode_bit;
413         u32 mode_ctrl;
414         u32 *reg_base;
415         u32 ioaddr;
416         struct regmap *grf;
417         u32 *grf_base;
418
419         char *name;
420
421         u32 subcnt;
422         struct list_head subdev_list;
423
424         u32 alloc_type;
425 };
426
427 struct vpu_request {
428         u32 *req;
429         u32 size;
430 };
431
432 #ifdef CONFIG_COMPAT
433 struct compat_vpu_request {
434         compat_uptr_t req;
435         u32 size;
436 };
437 #endif
438
439 #define VDPU_SOFT_RESET_REG     101
440 #define VDPU_CLEAN_CACHE_REG    516
441 #define VEPU_CLEAN_CACHE_REG    772
442 #define HEVC_CLEAN_CACHE_REG    260
443
444 #define VPU_REG_ENABLE(base, reg)       writel_relaxed(1, base + reg)
445
446 #define VDPU_SOFT_RESET(base)   VPU_REG_ENABLE(base, VDPU_SOFT_RESET_REG)
447 #define VDPU_CLEAN_CACHE(base)  VPU_REG_ENABLE(base, VDPU_CLEAN_CACHE_REG)
448 #define VEPU_CLEAN_CACHE(base)  VPU_REG_ENABLE(base, VEPU_CLEAN_CACHE_REG)
449 #define HEVC_CLEAN_CACHE(base)  VPU_REG_ENABLE(base, HEVC_CLEAN_CACHE_REG)
450
451 #define VPU_POWER_OFF_DELAY             (4 * HZ) /* 4s */
452 #define VPU_TIMEOUT_DELAY               (2 * HZ) /* 2s */
453
454 static void *vcodec_get_drv_data(struct platform_device *pdev);
455
456 static void vpu_service_power_on(struct vpu_subdev_data *data,
457                                  struct vpu_service_info *pservice);
458
459 static void time_record(struct vpu_task_info *task, int is_end)
460 {
461         if (unlikely(debug & DEBUG_TIMING) && task)
462                 do_gettimeofday((is_end) ? (&task->end) : (&task->start));
463 }
464
465 static void time_diff(struct vpu_task_info *task)
466 {
467         vpu_debug(DEBUG_TIMING, "%s task: %ld ms\n", task->name,
468                   (task->end.tv_sec  - task->start.tv_sec)  * 1000 +
469                   (task->end.tv_usec - task->start.tv_usec) / 1000);
470 }
471
472 static void vcodec_enter_mode(struct vpu_subdev_data *data)
473 {
474         int bits;
475         u32 raw = 0;
476         struct vpu_service_info *pservice = data->pservice;
477         struct vpu_subdev_data *subdata, *n;
478
479         if (pservice->subcnt < 2)
480                 return;
481
482         if (pservice->curr_mode == data->mode)
483                 return;
484
485         vpu_debug(DEBUG_IOMMU, "vcodec enter mode %d\n", data->mode);
486         list_for_each_entry_safe(subdata, n,
487                                  &pservice->subdev_list, lnk_service) {
488                 if (data != subdata && subdata->mmu_dev &&
489                     test_bit(MMU_ACTIVATED, &subdata->state)) {
490                         clear_bit(MMU_ACTIVATED, &subdata->state);
491                 }
492         }
493         bits = 1 << pservice->mode_bit;
494 #ifdef CONFIG_MFD_SYSCON
495         if (pservice->grf) {
496                 regmap_read(pservice->grf, pservice->mode_ctrl, &raw);
497
498                 if (data->mode == VCODEC_RUNNING_MODE_HEVC)
499                         regmap_write(pservice->grf, pservice->mode_ctrl,
500                                      raw | bits | (bits << 16));
501                 else
502                         regmap_write(pservice->grf, pservice->mode_ctrl,
503                                      (raw & (~bits)) | (bits << 16));
504         } else if (pservice->grf_base) {
505                 u32 *grf_base = pservice->grf_base;
506
507                 raw = readl_relaxed(grf_base + pservice->mode_ctrl / 4);
508                 if (data->mode == VCODEC_RUNNING_MODE_HEVC)
509                         writel_relaxed(raw | bits | (bits << 16),
510                                        grf_base + pservice->mode_ctrl / 4);
511                 else
512                         writel_relaxed((raw & (~bits)) | (bits << 16),
513                                        grf_base + pservice->mode_ctrl / 4);
514         } else {
515                 vpu_err("no grf resource define, switch decoder failed\n");
516                 return;
517         }
518 #else
519         if (pservice->grf_base) {
520                 u32 *grf_base = pservice->grf_base;
521
522                 raw = readl_relaxed(grf_base + pservice->mode_ctrl / 4);
523                 if (data->mode == VCODEC_RUNNING_MODE_HEVC)
524                         writel_relaxed(raw | bits | (bits << 16),
525                                        grf_base + pservice->mode_ctrl / 4);
526                 else
527                         writel_relaxed((raw & (~bits)) | (bits << 16),
528                                        grf_base + pservice->mode_ctrl / 4);
529         } else {
530                 vpu_err("no grf resource define, switch decoder failed\n");
531                 return;
532         }
533 #endif
534         if (data->mmu_dev && !test_bit(MMU_ACTIVATED, &data->state)) {
535                 set_bit(MMU_ACTIVATED, &data->state);
536                 if (!atomic_read(&pservice->enabled))
537                         /* FIXME BUG_ON should not be used in mass produce */
538                         BUG_ON(!atomic_read(&pservice->enabled));
539         }
540
541         pservice->prev_mode = pservice->curr_mode;
542         pservice->curr_mode = data->mode;
543 }
544
545 static void vcodec_exit_mode(struct vpu_subdev_data *data)
546 {
547         /*
548          * In case of VPU Combo, it require HW switch its running mode
549          * before the other HW component start work. set current HW running
550          * mode to none, can ensure HW switch to its reqired mode properly.
551          */
552         data->pservice->curr_mode = VCODEC_RUNNING_MODE_NONE;
553 }
554
555 static int vpu_get_clk(struct vpu_service_info *pservice)
556 {
557 #if VCODEC_CLOCK_ENABLE
558         struct device *dev = pservice->dev;
559
560         switch (pservice->dev_id) {
561         case VCODEC_DEVICE_ID_HEVC:
562                 pservice->pd_video = devm_clk_get(dev, "pd_hevc");
563                 if (IS_ERR(pservice->pd_video)) {
564                         pservice->pd_video = NULL;
565                         dev_info(dev, "failed on clk_get pd_hevc\n");
566                 }
567         case VCODEC_DEVICE_ID_COMBO:
568         case VCODEC_DEVICE_ID_RKVDEC:
569                 pservice->clk_cabac = devm_clk_get(dev, "clk_cabac");
570                 if (IS_ERR(pservice->clk_cabac)) {
571                         dev_err(dev, "failed on clk_get clk_cabac\n");
572                         pservice->clk_cabac = NULL;
573                 }
574                 pservice->clk_core = devm_clk_get(dev, "clk_core");
575                 if (IS_ERR(pservice->clk_core)) {
576                         dev_err(dev, "failed on clk_get clk_core\n");
577                         pservice->clk_core = NULL;
578                         return -1;
579                 }
580         case VCODEC_DEVICE_ID_VPU:
581                 pservice->aclk_vcodec = devm_clk_get(dev, "aclk_vcodec");
582                 if (IS_ERR(pservice->aclk_vcodec)) {
583                         dev_err(dev, "failed on clk_get aclk_vcodec\n");
584                         pservice->aclk_vcodec = NULL;
585                         return -1;
586                 }
587
588                 pservice->hclk_vcodec = devm_clk_get(dev, "hclk_vcodec");
589                 if (IS_ERR(pservice->hclk_vcodec)) {
590                         dev_err(dev, "failed on clk_get hclk_vcodec\n");
591                         pservice->hclk_vcodec = NULL;
592                         return -1;
593                 }
594                 if (pservice->pd_video == NULL) {
595                         pservice->pd_video = devm_clk_get(dev, "pd_video");
596                         if (IS_ERR(pservice->pd_video)) {
597                                 pservice->pd_video = NULL;
598                                 dev_info(dev, "do not have pd_video\n");
599                         }
600                 }
601                 break;
602         default:
603                 break;
604         }
605
606         return 0;
607 #else
608         return 0;
609 #endif
610 }
611
612 static void _vpu_reset(struct vpu_subdev_data *data)
613 {
614         struct vpu_service_info *pservice = data->pservice;
615         enum pmu_idle_req type = IDLE_REQ_VIDEO;
616
617         if (pservice->dev_id == VCODEC_DEVICE_ID_HEVC)
618                 type = IDLE_REQ_HEVC;
619
620         dev_info(pservice->dev, "resetting...\n");
621         WARN_ON(pservice->reg_codec != NULL);
622         WARN_ON(pservice->reg_pproc != NULL);
623         WARN_ON(pservice->reg_resev != NULL);
624         pservice->reg_codec = NULL;
625         pservice->reg_pproc = NULL;
626         pservice->reg_resev = NULL;
627
628 #ifdef CONFIG_RESET_CONTROLLER
629         dev_info(pservice->dev, "for 3288/3368...");
630         if (of_machine_is_compatible("rockchip,rk3288"))
631                 rockchip_pmu_idle_request(pservice->dev, true);
632         if (pservice->rst_a && pservice->rst_h) {
633                 dev_info(pservice->dev, "vpu reset in\n");
634
635                 if (pservice->rst_v)
636                         reset_control_assert(pservice->rst_v);
637                 reset_control_assert(pservice->rst_a);
638                 reset_control_assert(pservice->rst_h);
639                 udelay(5);
640
641                 reset_control_deassert(pservice->rst_h);
642                 reset_control_deassert(pservice->rst_a);
643                 if (pservice->rst_v)
644                         reset_control_deassert(pservice->rst_v);
645         } else if (pservice->rst_v) {
646                 dev_info(pservice->dev, "hevc reset in\n");
647                 reset_control_assert(pservice->rst_v);
648                 udelay(5);
649
650                 reset_control_deassert(pservice->rst_v);
651         }
652         if (of_machine_is_compatible("rockchip,rk3288"))
653                 rockchip_pmu_idle_request(pservice->dev, false);
654 #endif
655 }
656
657 static void vpu_reset(struct vpu_subdev_data *data)
658 {
659         struct vpu_service_info *pservice = data->pservice;
660
661         _vpu_reset(data);
662         if (data->mmu_dev && test_bit(MMU_ACTIVATED, &data->state)) {
663                 if (atomic_read(&pservice->enabled)) {
664                         /* Need to reset iommu */
665                         vcodec_iommu_detach(data->iommu_info);
666                         vcodec_iommu_attach(data->iommu_info);
667                 } else {
668                         /* FIXME BUG_ON should not be used in mass produce */
669                         BUG_ON(!atomic_read(&pservice->enabled));
670                 }
671         }
672
673         atomic_set(&pservice->reset_request, 0);
674         dev_info(pservice->dev, "reset done\n");
675 }
676
677 static void reg_deinit(struct vpu_subdev_data *data, struct vpu_reg *reg);
678 static void vpu_service_session_clear(struct vpu_subdev_data *data,
679                                       struct vpu_session *session)
680 {
681         struct vpu_reg *reg, *n;
682
683         list_for_each_entry_safe(reg, n, &session->waiting, session_link) {
684                 reg_deinit(data, reg);
685         }
686         list_for_each_entry_safe(reg, n, &session->running, session_link) {
687                 reg_deinit(data, reg);
688         }
689         list_for_each_entry_safe(reg, n, &session->done, session_link) {
690                 reg_deinit(data, reg);
691         }
692 }
693
694 static void vpu_service_clear(struct vpu_subdev_data *data)
695 {
696         struct vpu_reg *reg, *n;
697         struct vpu_session *session, *s;
698         struct vpu_service_info *pservice = data->pservice;
699
700         list_for_each_entry_safe(reg, n, &pservice->waiting, status_link) {
701                 reg_deinit(data, reg);
702         }
703
704         /* wake up session wait event to prevent the timeout hw reset
705          * during reboot procedure.
706          */
707         list_for_each_entry_safe(session, s,
708                                  &pservice->session, list_session)
709                 wake_up(&session->wait);
710 }
711
712 static void vpu_service_dump(struct vpu_service_info *pservice)
713 {
714 }
715
716
717 static void vpu_service_power_off(struct vpu_service_info *pservice)
718 {
719         int total_running;
720         struct vpu_subdev_data *data = NULL, *n;
721         int ret = atomic_add_unless(&pservice->enabled, -1, 0);
722
723         if (!ret)
724                 return;
725
726         total_running = atomic_read(&pservice->total_running);
727         if (total_running) {
728                 pr_alert("alert: power off when %d task running!!\n",
729                          total_running);
730                 mdelay(50);
731                 pr_alert("alert: delay 50 ms for running task\n");
732                 vpu_service_dump(pservice);
733         }
734
735         dev_dbg(pservice->dev, "power off...\n");
736
737         udelay(5);
738
739         list_for_each_entry_safe(data, n, &pservice->subdev_list, lnk_service) {
740                 if (data->mmu_dev && test_bit(MMU_ACTIVATED, &data->state)) {
741                         clear_bit(MMU_ACTIVATED, &data->state);
742                         vcodec_iommu_detach(data->iommu_info);
743                 }
744         }
745         pservice->curr_mode = VCODEC_RUNNING_MODE_NONE;
746         pm_runtime_put(pservice->dev);
747 #if VCODEC_CLOCK_ENABLE
748                 if (pservice->pd_video)
749                         clk_disable_unprepare(pservice->pd_video);
750                 if (pservice->hclk_vcodec)
751                         clk_disable_unprepare(pservice->hclk_vcodec);
752                 if (pservice->aclk_vcodec)
753                         clk_disable_unprepare(pservice->aclk_vcodec);
754                 if (pservice->clk_core)
755                         clk_disable_unprepare(pservice->clk_core);
756                 if (pservice->clk_cabac)
757                         clk_disable_unprepare(pservice->clk_cabac);
758 #endif
759
760         atomic_add(1, &pservice->power_off_cnt);
761         wake_unlock(&pservice->wake_lock);
762         dev_dbg(pservice->dev, "power off done\n");
763 }
764
765 static inline void vpu_queue_power_off_work(struct vpu_service_info *pservice)
766 {
767         queue_delayed_work(system_wq, &pservice->power_off_work,
768                            VPU_POWER_OFF_DELAY);
769 }
770
771 static void vpu_power_off_work(struct work_struct *work_s)
772 {
773         struct delayed_work *dlwork = container_of(work_s,
774                         struct delayed_work, work);
775         struct vpu_service_info *pservice = container_of(dlwork,
776                         struct vpu_service_info, power_off_work);
777
778         if (mutex_trylock(&pservice->lock)) {
779                 vpu_service_power_off(pservice);
780                 mutex_unlock(&pservice->lock);
781         } else {
782                 /* Come back later if the device is busy... */
783                 vpu_queue_power_off_work(pservice);
784         }
785 }
786
787 static void vpu_service_power_on(struct vpu_subdev_data *data,
788                                  struct vpu_service_info *pservice)
789 {
790         int ret;
791         ktime_t now = ktime_get();
792
793         if (ktime_to_ns(ktime_sub(now, pservice->last)) > NSEC_PER_SEC ||
794             atomic_read(&pservice->power_on_cnt)) {
795                 /* NSEC_PER_SEC */
796                 cancel_delayed_work_sync(&pservice->power_off_work);
797                 vpu_queue_power_off_work(pservice);
798                 pservice->last = now;
799         }
800         ret = atomic_add_unless(&pservice->enabled, 1, 1);
801         if (!ret) {
802                 if (data->mmu_dev && !test_bit(MMU_ACTIVATED, &data->state)) {
803                         set_bit(MMU_ACTIVATED, &data->state);
804                         vcodec_iommu_attach(data->iommu_info);
805                 }
806                 return;
807         }
808
809         dev_dbg(pservice->dev, "power on\n");
810
811 #define BIT_VCODEC_CLK_SEL      (1<<10)
812         if (of_machine_is_compatible("rockchip,rk3126"))
813                 writel_relaxed(readl_relaxed(RK_GRF_VIRT + RK312X_GRF_SOC_CON1)
814                         | BIT_VCODEC_CLK_SEL | (BIT_VCODEC_CLK_SEL << 16),
815                         RK_GRF_VIRT + RK312X_GRF_SOC_CON1);
816
817 #if VCODEC_CLOCK_ENABLE
818         if (pservice->aclk_vcodec)
819                 clk_prepare_enable(pservice->aclk_vcodec);
820         if (pservice->hclk_vcodec)
821                 clk_prepare_enable(pservice->hclk_vcodec);
822         if (pservice->clk_core)
823                 clk_prepare_enable(pservice->clk_core);
824         if (pservice->clk_cabac)
825                 clk_prepare_enable(pservice->clk_cabac);
826         if (pservice->pd_video)
827                 clk_prepare_enable(pservice->pd_video);
828 #endif
829         pm_runtime_get_sync(pservice->dev);
830
831         if (data->mmu_dev && !test_bit(MMU_ACTIVATED, &data->state)) {
832                 set_bit(MMU_ACTIVATED, &data->state);
833                 if (atomic_read(&pservice->enabled))
834                         vcodec_iommu_attach(data->iommu_info);
835                 else
836                         /*
837                          * FIXME BUG_ON should not be used in mass
838                          * produce.
839                          */
840                         BUG_ON(!atomic_read(&pservice->enabled));
841         }
842
843         udelay(5);
844         atomic_add(1, &pservice->power_on_cnt);
845         wake_lock(&pservice->wake_lock);
846 }
847
848 static inline bool reg_check_interlace(struct vpu_reg *reg)
849 {
850         u32 type = (reg->reg[3] & (1 << 23));
851
852         return (type > 0);
853 }
854
855 static inline enum VPU_DEC_FMT reg_check_fmt(struct vpu_reg *reg)
856 {
857         enum VPU_DEC_FMT type = (enum VPU_DEC_FMT)((reg->reg[3] >> 28) & 0xf);
858
859         return type;
860 }
861
862 static inline int reg_probe_width(struct vpu_reg *reg)
863 {
864         int width_in_mb = reg->reg[4] >> 23;
865
866         return width_in_mb * 16;
867 }
868
869 static inline int reg_probe_hevc_y_stride(struct vpu_reg *reg)
870 {
871         int y_virstride = reg->reg[8];
872
873         return y_virstride;
874 }
875
876 static int vcodec_fd_to_iova(struct vpu_subdev_data *data,
877                 struct vpu_session *session,
878                 struct vpu_reg *reg,
879                 int fd)
880 {
881         int hdl;
882         int ret = 0;
883         struct vcodec_mem_region *mem_region;
884
885         hdl = vcodec_iommu_import(data->iommu_info, session, fd);
886         if (hdl < 0)
887                 return hdl;
888
889         mem_region = kzalloc(sizeof(*mem_region), GFP_KERNEL);
890         if (mem_region == NULL) {
891                 vpu_err("allocate memory for iommu memory region failed\n");
892                 vcodec_iommu_free(data->iommu_info, session, hdl);
893                 return -ENOMEM;
894         }
895
896         mem_region->hdl = hdl;
897         ret = vcodec_iommu_map_iommu(data->iommu_info, session, mem_region->hdl,
898                                      &mem_region->iova, &mem_region->len);
899         if (ret < 0) {
900                 vpu_err("fd %d ion map iommu failed\n", fd);
901                 kfree(mem_region);
902                 vcodec_iommu_free(data->iommu_info, session, hdl);
903
904                 return -EFAULT;
905         }
906         INIT_LIST_HEAD(&mem_region->reg_lnk);
907         list_add_tail(&mem_region->reg_lnk, &reg->mem_region_list);
908         return mem_region->iova;
909 }
910
911 /*
912  * NOTE: rkvdec/rkhevc put scaling list address in pps buffer hardware will read
913  * it by pps id in video stream data.
914  *
915  * So we need to translate the address in iommu case. The address data is also
916  * 10bit fd + 22bit offset mode.
917  * Because userspace decoder do not give the pps id in the register file sets
918  * kernel driver need to translate each scaling list address in pps buffer which
919  * means 256 pps for H.264, 64 pps for H.265.
920  *
921  * In order to optimize the performance kernel driver ask userspace decoder to
922  * set all scaling list address in pps buffer to the same one which will be used
923  * on current decoding task. Then kernel driver can only translate the first
924  * address then copy it all pps buffer.
925  */
926 static int fill_scaling_list_addr_in_pps(
927                 struct vpu_subdev_data *data,
928                 struct vpu_reg *reg,
929                 char *pps,
930                 int pps_info_count,
931                 int pps_info_size,
932                 int scaling_list_addr_offset)
933 {
934         int base = scaling_list_addr_offset;
935         int scaling_fd = 0;
936         u32 scaling_offset;
937
938         scaling_offset  = (u32)pps[base + 0];
939         scaling_offset += (u32)pps[base + 1] << 8;
940         scaling_offset += (u32)pps[base + 2] << 16;
941         scaling_offset += (u32)pps[base + 3] << 24;
942
943         scaling_fd = scaling_offset & 0x3ff;
944         scaling_offset = scaling_offset >> 10;
945
946         if (scaling_fd > 0) {
947                 int i = 0;
948                 u32 tmp = vcodec_fd_to_iova(data, reg->session, reg,
949                                             scaling_fd);
950
951                 if (IS_ERR_VALUE(tmp))
952                         return -1;
953                 tmp += scaling_offset;
954
955                 for (i = 0; i < pps_info_count; i++, base += pps_info_size) {
956                         pps[base + 0] = (tmp >>  0) & 0xff;
957                         pps[base + 1] = (tmp >>  8) & 0xff;
958                         pps[base + 2] = (tmp >> 16) & 0xff;
959                         pps[base + 3] = (tmp >> 24) & 0xff;
960                 }
961         }
962
963         return 0;
964 }
965
966 static int vcodec_bufid_to_iova(struct vpu_subdev_data *data,
967                                 struct vpu_session *session,
968                                 const u8 *tbl,
969                                 int size, struct vpu_reg *reg,
970                                 struct extra_info_for_iommu *ext_inf)
971 {
972         struct vpu_service_info *pservice = data->pservice;
973         struct vpu_task_info *task = reg->task;
974         enum FORMAT_TYPE type;
975         int hdl;
976         int ret = 0;
977         struct vcodec_mem_region *mem_region;
978         int i;
979         int offset = 0;
980
981         if (tbl == NULL || size <= 0) {
982                 dev_err(pservice->dev, "input arguments invalidate\n");
983                 return -EINVAL;
984         }
985
986         if (task->get_fmt)
987                 type = task->get_fmt(reg->reg);
988         else {
989                 dev_err(pservice->dev, "invalid task with NULL get_fmt\n");
990                 return -EINVAL;
991         }
992
993         for (i = 0; i < size; i++) {
994                 int usr_fd = reg->reg[tbl[i]] & 0x3FF;
995
996                 /* if userspace do not set the fd at this register, skip */
997                 if (usr_fd == 0)
998                         continue;
999
1000                 /*
1001                  * for avoiding cache sync issue, we need to map/unmap
1002                  * input buffer every time. FIX ME, if it is unnecessary
1003                  */
1004                 if (task->reg_rlc == tbl[i])
1005                         vcodec_iommu_free_fd(data->iommu_info, session, usr_fd);
1006                 /*
1007                  * special offset scale case
1008                  *
1009                  * This translation is for fd + offset translation.
1010                  * One register has 32bits. We need to transfer both buffer file
1011                  * handle and the start address offset so we packet file handle
1012                  * and offset together using below format.
1013                  *
1014                  *  0~9  bit for buffer file handle range 0 ~ 1023
1015                  * 10~31 bit for offset range 0 ~ 4M
1016                  *
1017                  * But on 4K case the offset can be larger the 4M
1018                  * So on H.264 4K vpu/vpu2 decoder we scale the offset by 16
1019                  * But MPEG4 will use the same register for colmv and it do not
1020                  * need scale.
1021                  *
1022                  * RKVdec do not have this issue.
1023                  */
1024                 if ((type == FMT_H264D || type == FMT_VP9D) &&
1025                     task->reg_dir_mv > 0 && task->reg_dir_mv == tbl[i])
1026                         offset = reg->reg[tbl[i]] >> 10 << 4;
1027                 else
1028                         offset = reg->reg[tbl[i]] >> 10;
1029
1030                 vpu_debug(DEBUG_IOMMU, "pos %3d fd %3d offset %10d i %d\n",
1031                           tbl[i], usr_fd, offset, i);
1032
1033                 hdl = vcodec_iommu_import(data->iommu_info, session, usr_fd);
1034
1035                 if (task->reg_pps > 0 && task->reg_pps == tbl[i]) {
1036                         int pps_info_offset;
1037                         int pps_info_count;
1038                         int pps_info_size;
1039                         int scaling_list_addr_offset;
1040
1041                         switch (type) {
1042                         case FMT_H264D: {
1043                                 pps_info_offset = offset;
1044                                 pps_info_count = 256;
1045                                 pps_info_size = 32;
1046                                 scaling_list_addr_offset = 23;
1047                         } break;
1048                         case FMT_H265D: {
1049                                 pps_info_offset = 0;
1050                                 pps_info_count = 64;
1051                                 pps_info_size = 80;
1052                                 scaling_list_addr_offset = 74;
1053                         } break;
1054                         default: {
1055                                 pps_info_offset = 0;
1056                                 pps_info_count = 0;
1057                                 pps_info_size = 0;
1058                                 scaling_list_addr_offset = 0;
1059                         } break;
1060                         }
1061
1062                         vpu_debug(DEBUG_PPS_FILL,
1063                                   "scaling list filling parameter:\n");
1064                         vpu_debug(DEBUG_PPS_FILL,
1065                                   "pps_info_offset %d\n", pps_info_offset);
1066                         vpu_debug(DEBUG_PPS_FILL,
1067                                   "pps_info_count  %d\n", pps_info_count);
1068                         vpu_debug(DEBUG_PPS_FILL,
1069                                   "pps_info_size   %d\n", pps_info_size);
1070                         vpu_debug(DEBUG_PPS_FILL,
1071                                   "scaling_list_addr_offset %d\n",
1072                                   scaling_list_addr_offset);
1073
1074                         if (pps_info_count) {
1075                                 u8 *pps;
1076
1077                                 mutex_lock(&pservice->lock);
1078
1079                                 pps = vcodec_iommu_map_kernel
1080                                         (data->iommu_info, session, hdl);
1081
1082                                 vpu_debug(DEBUG_PPS_FILL,
1083                                           "scaling list setting pps %p\n", pps);
1084                                 pps += pps_info_offset;
1085
1086                                 fill_scaling_list_addr_in_pps
1087                                         (data, reg, pps, pps_info_count,
1088                                          pps_info_size,
1089                                          scaling_list_addr_offset);
1090
1091                                 vcodec_iommu_unmap_kernel
1092                                         (data->iommu_info, session, hdl);
1093                                 mutex_unlock(&pservice->lock);
1094                         }
1095                 }
1096
1097                 mem_region = kzalloc(sizeof(*mem_region), GFP_KERNEL);
1098
1099                 if (!mem_region) {
1100                         vcodec_iommu_free(data->iommu_info, session, hdl);
1101                         return -ENOMEM;
1102                 }
1103
1104                 mem_region->hdl = hdl;
1105                 mem_region->reg_idx = tbl[i];
1106
1107                 ret = vcodec_iommu_map_iommu(data->iommu_info, session,
1108                                              mem_region->hdl, &mem_region->iova,
1109                                              &mem_region->len);
1110                 if (ret < 0) {
1111                         dev_err(pservice->dev,
1112                                 "reg %d fd %d ion map iommu failed\n",
1113                                 tbl[i], usr_fd);
1114                         kfree(mem_region);
1115                         vcodec_iommu_free(data->iommu_info, session, hdl);
1116                         return ret;
1117                 }
1118
1119                 /*
1120                  * special for vpu dec num 12: record decoded length
1121                  * hacking for decoded length
1122                  * NOTE: not a perfect fix, the fd is not recorded
1123                  */
1124                 if (task->reg_len > 0 && task->reg_len == tbl[i]) {
1125                         reg->dec_base = mem_region->iova + offset;
1126                         vpu_debug(DEBUG_REGISTER, "dec_set %08x\n",
1127                                   reg->dec_base);
1128                 }
1129
1130                 reg->reg[tbl[i]] = mem_region->iova + offset;
1131                 INIT_LIST_HEAD(&mem_region->reg_lnk);
1132                 list_add_tail(&mem_region->reg_lnk, &reg->mem_region_list);
1133         }
1134
1135         if (ext_inf != NULL && ext_inf->magic == EXTRA_INFO_MAGIC) {
1136                 for (i = 0; i < ext_inf->cnt; i++) {
1137                         vpu_debug(DEBUG_IOMMU, "reg[%d] + offset %d\n",
1138                                   ext_inf->elem[i].index,
1139                                   ext_inf->elem[i].offset);
1140                         reg->reg[ext_inf->elem[i].index] +=
1141                                 ext_inf->elem[i].offset;
1142                 }
1143         }
1144
1145         return 0;
1146 }
1147
1148 static int vcodec_reg_address_translate(struct vpu_subdev_data *data,
1149                                         struct vpu_session *session,
1150                                         struct vpu_reg *reg,
1151                                         struct extra_info_for_iommu *ext_inf)
1152 {
1153         struct vpu_service_info *pservice = data->pservice;
1154         enum FORMAT_TYPE type = reg->task->get_fmt(reg->reg);
1155
1156         if (type < FMT_TYPE_BUTT) {
1157                 const struct vpu_trans_info *info = &reg->trans[type];
1158                 const u8 *tbl = info->table;
1159                 int size = info->count;
1160
1161                 return vcodec_bufid_to_iova(data, session, tbl, size, reg,
1162                                             ext_inf);
1163         }
1164
1165         dev_err(pservice->dev, "found invalid format type!\n");
1166         return -EINVAL;
1167 }
1168
1169 static void get_reg_freq(struct vpu_subdev_data *data, struct vpu_reg *reg)
1170 {
1171
1172         if (!of_machine_is_compatible("rockchip,rk2928g")) {
1173                 if (reg->type == VPU_DEC || reg->type == VPU_DEC_PP) {
1174                         if (reg_check_fmt(reg) == VPU_DEC_FMT_H264) {
1175                                 if (reg_probe_width(reg) > 3200) {
1176                                         /*raise frequency for 4k avc.*/
1177                                         reg->freq = VPU_FREQ_600M;
1178                                 }
1179                         } else {
1180                                 if (reg_check_interlace(reg))
1181                                         reg->freq = VPU_FREQ_400M;
1182                         }
1183                 }
1184                 if (data->hw_id == HEVC_ID) {
1185                         if (reg_probe_hevc_y_stride(reg) > 60000)
1186                                 reg->freq = VPU_FREQ_400M;
1187                 }
1188                 if (reg->type == VPU_PP)
1189                         reg->freq = VPU_FREQ_400M;
1190         }
1191 }
1192
1193 static struct vpu_reg *reg_init(struct vpu_subdev_data *data,
1194                                 struct vpu_session *session,
1195                                 void __user *src, u32 size)
1196 {
1197         struct vpu_service_info *pservice = data->pservice;
1198         int extra_size = 0;
1199         struct extra_info_for_iommu extra_info;
1200         struct vpu_reg *reg = kzalloc(sizeof(*reg) + data->reg_size,
1201                                       GFP_KERNEL);
1202
1203         vpu_debug_enter();
1204
1205         if (!reg) {
1206                 vpu_err("error: kzalloc failed\n");
1207                 return NULL;
1208         }
1209
1210         if (size > data->reg_size) {
1211                 extra_size = size - data->reg_size;
1212                 size = data->reg_size;
1213         }
1214         reg->session = session;
1215         reg->data = data;
1216         reg->type = session->type;
1217         reg->size = size;
1218         reg->freq = VPU_FREQ_DEFAULT;
1219         reg->task = &data->task_info[session->type];
1220         reg->trans = data->trans_info;
1221         reg->reg = (u32 *)&reg[1];
1222         INIT_LIST_HEAD(&reg->session_link);
1223         INIT_LIST_HEAD(&reg->status_link);
1224
1225         INIT_LIST_HEAD(&reg->mem_region_list);
1226
1227         if (copy_from_user(&reg->reg[0], (void __user *)src, size)) {
1228                 vpu_err("error: copy_from_user failed\n");
1229                 kfree(reg);
1230                 return NULL;
1231         }
1232
1233         if (copy_from_user(&extra_info, (u8 *)src + size, extra_size)) {
1234                 vpu_err("error: copy_from_user failed\n");
1235                 kfree(reg);
1236                 return NULL;
1237         }
1238
1239         if (vcodec_reg_address_translate(data, session, reg, &extra_info) < 0) {
1240                 int i = 0;
1241
1242                 vpu_err("error: translate reg address failed, dumping regs\n");
1243                 for (i = 0; i < size >> 2; i++)
1244                         dev_err(pservice->dev, "reg[%02d]: %08x\n",
1245                                 i, *((u32 *)src + i));
1246
1247                 kfree(reg);
1248                 return NULL;
1249         }
1250
1251         mutex_lock(&pservice->lock);
1252         list_add_tail(&reg->status_link, &pservice->waiting);
1253         list_add_tail(&reg->session_link, &session->waiting);
1254         mutex_unlock(&pservice->lock);
1255
1256         if (pservice->auto_freq)
1257                 get_reg_freq(data, reg);
1258
1259         vpu_debug_leave();
1260
1261         return reg;
1262 }
1263
1264 static void reg_deinit(struct vpu_subdev_data *data, struct vpu_reg *reg)
1265 {
1266         struct vpu_service_info *pservice = data->pservice;
1267         struct vcodec_mem_region *mem_region = NULL, *n;
1268
1269         list_del_init(&reg->session_link);
1270         list_del_init(&reg->status_link);
1271         if (reg == pservice->reg_codec)
1272                 pservice->reg_codec = NULL;
1273         if (reg == pservice->reg_pproc)
1274                 pservice->reg_pproc = NULL;
1275
1276         /* release memory region attach to this registers table. */
1277         list_for_each_entry_safe(mem_region, n,
1278                         &reg->mem_region_list, reg_lnk) {
1279                 vcodec_iommu_unmap_iommu(data->iommu_info, reg->session,
1280                                          mem_region->hdl);
1281                 vcodec_iommu_free(data->iommu_info, reg->session,
1282                                   mem_region->hdl);
1283                 list_del_init(&mem_region->reg_lnk);
1284                 kfree(mem_region);
1285         }
1286
1287         kfree(reg);
1288 }
1289
1290 static void reg_from_wait_to_run(struct vpu_service_info *pservice,
1291                                  struct vpu_reg *reg)
1292 {
1293         vpu_debug_enter();
1294         list_del_init(&reg->status_link);
1295         list_add_tail(&reg->status_link, &pservice->running);
1296
1297         list_del_init(&reg->session_link);
1298         list_add_tail(&reg->session_link, &reg->session->running);
1299         vpu_debug_leave();
1300 }
1301
1302 static void reg_copy_from_hw(struct vpu_reg *reg, u32 *src, u32 count)
1303 {
1304         int i;
1305         u32 *dst = reg->reg;
1306
1307         vpu_debug_enter();
1308         for (i = 0; i < count; i++, src++)
1309                 *dst++ = readl_relaxed(src);
1310
1311         dst = (u32 *)&reg->reg[0];
1312         for (i = 0; i < count; i++)
1313                 vpu_debug(DEBUG_GET_REG, "get reg[%02d] %08x\n", i, dst[i]);
1314
1315         vpu_debug_leave();
1316 }
1317
1318 static void reg_from_run_to_done(struct vpu_subdev_data *data,
1319                                  struct vpu_reg *reg)
1320 {
1321         struct vpu_service_info *pservice = data->pservice;
1322         struct vpu_hw_info *hw_info = data->hw_info;
1323         struct vpu_task_info *task = reg->task;
1324
1325         vpu_debug_enter();
1326
1327         list_del_init(&reg->status_link);
1328         list_add_tail(&reg->status_link, &pservice->done);
1329
1330         list_del_init(&reg->session_link);
1331         list_add_tail(&reg->session_link, &reg->session->done);
1332
1333         switch (reg->type) {
1334         case VPU_ENC: {
1335                 pservice->reg_codec = NULL;
1336                 reg_copy_from_hw(reg, data->enc_dev.regs, hw_info->enc_reg_num);
1337                 reg->reg[task->reg_irq] = pservice->irq_status;
1338         } break;
1339         case VPU_DEC: {
1340                 pservice->reg_codec = NULL;
1341                 reg_copy_from_hw(reg, data->dec_dev.regs, hw_info->dec_reg_num);
1342
1343                 /* revert hack for decoded length */
1344                 if (task->reg_len > 0) {
1345                         int reg_len = task->reg_len;
1346                         u32 dec_get = reg->reg[reg_len];
1347                         s32 dec_length = dec_get - reg->dec_base;
1348
1349                         vpu_debug(DEBUG_REGISTER,
1350                                   "dec_get %08x dec_length %d\n",
1351                                   dec_get, dec_length);
1352                         reg->reg[reg_len] = dec_length << 10;
1353                 }
1354
1355                 reg->reg[task->reg_irq] = pservice->irq_status;
1356         } break;
1357         case VPU_PP: {
1358                 pservice->reg_pproc = NULL;
1359                 reg_copy_from_hw(reg, data->dec_dev.regs, hw_info->dec_reg_num);
1360                 writel_relaxed(0, data->dec_dev.regs + task->reg_irq);
1361         } break;
1362         case VPU_DEC_PP: {
1363                 u32 pipe_mode;
1364                 u32 *regs = data->dec_dev.regs;
1365
1366                 pservice->reg_codec = NULL;
1367                 pservice->reg_pproc = NULL;
1368
1369                 reg_copy_from_hw(reg, data->dec_dev.regs, hw_info->dec_reg_num);
1370
1371                 /* NOTE: remove pp pipeline mode flag first */
1372                 pipe_mode = readl_relaxed(regs + task->reg_pipe);
1373                 pipe_mode &= ~task->pipe_mask;
1374                 writel_relaxed(pipe_mode, regs + task->reg_pipe);
1375
1376                 /* revert hack for decoded length */
1377                 if (task->reg_len > 0) {
1378                         int reg_len = task->reg_len;
1379                         u32 dec_get = reg->reg[reg_len];
1380                         s32 dec_length = dec_get - reg->dec_base;
1381
1382                         vpu_debug(DEBUG_REGISTER,
1383                                   "dec_get %08x dec_length %d\n",
1384                                   dec_get, dec_length);
1385                         reg->reg[reg_len] = dec_length << 10;
1386                 }
1387
1388                 reg->reg[task->reg_irq] = pservice->irq_status;
1389         } break;
1390         default: {
1391                 vpu_err("error: copy reg from hw with unknown type %d\n",
1392                         reg->type);
1393         } break;
1394         }
1395         vcodec_exit_mode(data);
1396
1397         atomic_sub(1, &reg->session->task_running);
1398         atomic_sub(1, &pservice->total_running);
1399         wake_up(&reg->session->wait);
1400
1401         vpu_debug_leave();
1402 }
1403
1404 static void vpu_service_set_freq(struct vpu_service_info *pservice,
1405                                  struct vpu_reg *reg)
1406 {
1407         enum VPU_FREQ curr = atomic_read(&pservice->freq_status);
1408
1409         if (curr == reg->freq)
1410                 return;
1411
1412         atomic_set(&pservice->freq_status, reg->freq);
1413         switch (reg->freq) {
1414         case VPU_FREQ_200M: {
1415                 clk_set_rate(pservice->aclk_vcodec, 200*MHZ);
1416         } break;
1417         case VPU_FREQ_266M: {
1418                 clk_set_rate(pservice->aclk_vcodec, 266*MHZ);
1419         } break;
1420         case VPU_FREQ_300M: {
1421                 clk_set_rate(pservice->aclk_vcodec, 300*MHZ);
1422         } break;
1423         case VPU_FREQ_400M: {
1424                 clk_set_rate(pservice->aclk_vcodec, 400*MHZ);
1425         } break;
1426         case VPU_FREQ_500M: {
1427                 clk_set_rate(pservice->aclk_vcodec, 500*MHZ);
1428         } break;
1429         case VPU_FREQ_600M: {
1430                 clk_set_rate(pservice->aclk_vcodec, 600*MHZ);
1431         } break;
1432         default: {
1433                 unsigned long rate = 300*MHZ;
1434
1435                 if (of_machine_is_compatible("rockchip,rk2928g"))
1436                         rate = 400*MHZ;
1437
1438                 clk_set_rate(pservice->aclk_vcodec, rate);
1439         } break;
1440         }
1441 }
1442
1443 static void reg_copy_to_hw(struct vpu_subdev_data *data, struct vpu_reg *reg)
1444 {
1445         struct vpu_service_info *pservice = data->pservice;
1446         struct vpu_task_info *task = reg->task;
1447         struct vpu_hw_info *hw_info = data->hw_info;
1448         int i;
1449         u32 *src = (u32 *)&reg->reg[0];
1450         u32 enable_mask = task->enable_mask;
1451         u32 gating_mask = task->gating_mask;
1452         u32 reg_en = task->reg_en;
1453
1454         vpu_debug_enter();
1455
1456         atomic_add(1, &pservice->total_running);
1457         atomic_add(1, &reg->session->task_running);
1458
1459         if (pservice->auto_freq)
1460                 vpu_service_set_freq(pservice, reg);
1461
1462         vcodec_enter_mode(data);
1463
1464         switch (reg->type) {
1465         case VPU_ENC: {
1466                 u32 *dst = data->enc_dev.regs;
1467                 u32 base = 0;
1468                 u32 end  = hw_info->enc_reg_num;
1469                 /* u32 reg_gating = task->reg_gating; */
1470
1471                 pservice->reg_codec = reg;
1472
1473                 vpu_debug(DEBUG_TASK_INFO, "reg: base %3d end %d en %2d mask: en %x gate %x\n",
1474                           base, end, reg_en, enable_mask, gating_mask);
1475
1476                 VEPU_CLEAN_CACHE(dst);
1477
1478                 if (debug & DEBUG_SET_REG)
1479                         for (i = base; i < end; i++)
1480                                 vpu_debug(DEBUG_SET_REG, "set reg[%02d] %08x\n",
1481                                           i, src[i]);
1482
1483                 /*
1484                  * NOTE: encoder need to setup mode first
1485                  */
1486                 writel_relaxed(src[reg_en] & enable_mask, dst + reg_en);
1487
1488                 /* NOTE: encoder gating is not on enable register */
1489                 /* src[reg_gating] |= gating_mask; */
1490
1491                 for (i = base; i < end; i++) {
1492                         if (i != reg_en)
1493                                 writel_relaxed(src[i], dst + i);
1494                 }
1495
1496                 writel(src[reg_en], dst + reg_en);
1497                 dsb(sy);
1498
1499                 time_record(reg->task, 0);
1500         } break;
1501         case VPU_DEC: {
1502                 u32 *dst = data->dec_dev.regs;
1503                 u32 len = hw_info->dec_reg_num;
1504                 u32 base = hw_info->base_dec;
1505                 u32 end  = hw_info->end_dec;
1506
1507                 pservice->reg_codec = reg;
1508
1509                 vpu_debug(DEBUG_TASK_INFO, "reg: base %3d end %d en %2d mask: en %x gate %x\n",
1510                           base, end, reg_en, enable_mask, gating_mask);
1511
1512                 VDPU_CLEAN_CACHE(dst);
1513
1514                 /* on rkvdec set cache size to 64byte */
1515                 if (pservice->dev_id == VCODEC_DEVICE_ID_RKVDEC) {
1516                         u32 *cache_base = dst + 0x100;
1517                         u32 val = (debug & DEBUG_CACHE_32B) ? (0x3) : (0x13);
1518                         writel_relaxed(val, cache_base + 0x07);
1519                         writel_relaxed(val, cache_base + 0x17);
1520                 }
1521
1522                 if (debug & DEBUG_SET_REG)
1523                         for (i = 0; i < len; i++)
1524                                 vpu_debug(DEBUG_SET_REG, "set reg[%02d] %08x\n",
1525                                           i, src[i]);
1526                 /*
1527                  * NOTE: The end register is invalid. Do NOT write to it
1528                  *       Also the base register must be written
1529                  */
1530                 for (i = base; i < end; i++) {
1531                         if (i != reg_en)
1532                                 writel_relaxed(src[i], dst + i);
1533                 }
1534
1535                 writel(src[reg_en] | gating_mask, dst + reg_en);
1536                 dsb(sy);
1537
1538                 time_record(reg->task, 0);
1539         } break;
1540         case VPU_PP: {
1541                 u32 *dst = data->dec_dev.regs;
1542                 u32 base = hw_info->base_pp;
1543                 u32 end  = hw_info->end_pp;
1544
1545                 pservice->reg_pproc = reg;
1546
1547                 vpu_debug(DEBUG_TASK_INFO, "reg: base %3d end %d en %2d mask: en %x gate %x\n",
1548                           base, end, reg_en, enable_mask, gating_mask);
1549
1550                 if (debug & DEBUG_SET_REG)
1551                         for (i = base; i < end; i++)
1552                                 vpu_debug(DEBUG_SET_REG, "set reg[%02d] %08x\n",
1553                                           i, src[i]);
1554
1555                 for (i = base; i < end; i++) {
1556                         if (i != reg_en)
1557                                 writel_relaxed(src[i], dst + i);
1558                 }
1559
1560                 writel(src[reg_en] | gating_mask, dst + reg_en);
1561                 dsb(sy);
1562
1563                 time_record(reg->task, 0);
1564         } break;
1565         case VPU_DEC_PP: {
1566                 u32 *dst = data->dec_dev.regs;
1567                 u32 base = hw_info->base_dec_pp;
1568                 u32 end  = hw_info->end_dec_pp;
1569
1570                 pservice->reg_codec = reg;
1571                 pservice->reg_pproc = reg;
1572
1573                 vpu_debug(DEBUG_TASK_INFO, "reg: base %3d end %d en %2d mask: en %x gate %x\n",
1574                           base, end, reg_en, enable_mask, gating_mask);
1575
1576                 /* VDPU_SOFT_RESET(dst); */
1577                 VDPU_CLEAN_CACHE(dst);
1578
1579                 if (debug & DEBUG_SET_REG)
1580                         for (i = base; i < end; i++)
1581                                 vpu_debug(DEBUG_SET_REG, "set reg[%02d] %08x\n",
1582                                           i, src[i]);
1583
1584                 for (i = base; i < end; i++) {
1585                         if (i != reg_en)
1586                                 writel_relaxed(src[i], dst + i);
1587                 }
1588
1589                 /* NOTE: dec output must be disabled */
1590
1591                 writel(src[reg_en] | gating_mask, dst + reg_en);
1592                 dsb(sy);
1593
1594                 time_record(reg->task, 0);
1595         } break;
1596         default: {
1597                 vpu_err("error: unsupport session type %d", reg->type);
1598                 atomic_sub(1, &pservice->total_running);
1599                 atomic_sub(1, &reg->session->task_running);
1600         } break;
1601         }
1602
1603         vpu_debug_leave();
1604 }
1605
1606 static void try_set_reg(struct vpu_subdev_data *data)
1607 {
1608         struct vpu_service_info *pservice = data->pservice;
1609
1610         vpu_debug_enter();
1611
1612         mutex_lock(&pservice->shutdown_lock);
1613         if (atomic_read(&pservice->service_on) == 0) {
1614                 mutex_unlock(&pservice->shutdown_lock);
1615                 return;
1616         }
1617         if (!list_empty(&pservice->waiting)) {
1618                 struct vpu_reg *reg_codec = pservice->reg_codec;
1619                 struct vpu_reg *reg_pproc = pservice->reg_pproc;
1620                 int can_set = 0;
1621                 bool change_able = (reg_codec == NULL) && (reg_pproc == NULL);
1622                 int reset_request = atomic_read(&pservice->reset_request);
1623                 struct vpu_reg *reg = list_entry(pservice->waiting.next,
1624                                 struct vpu_reg, status_link);
1625
1626                 if (change_able || !reset_request) {
1627                         switch (reg->type) {
1628                         case VPU_ENC: {
1629                                 if (change_able)
1630                                         can_set = 1;
1631                         } break;
1632                         case VPU_DEC: {
1633                                 if (reg_codec == NULL)
1634                                         can_set = 1;
1635                                 if (pservice->auto_freq && (reg_pproc != NULL))
1636                                         can_set = 0;
1637                         } break;
1638                         case VPU_PP: {
1639                                 if (reg_codec == NULL) {
1640                                         if (reg_pproc == NULL)
1641                                                 can_set = 1;
1642                                 } else {
1643                                         if ((reg_codec->type == VPU_DEC) &&
1644                                             (reg_pproc == NULL))
1645                                                 can_set = 1;
1646
1647                                         /*
1648                                          * NOTE:
1649                                          * can not charge frequency
1650                                          * when vpu is working
1651                                          */
1652                                         if (pservice->auto_freq)
1653                                                 can_set = 0;
1654                                 }
1655                         } break;
1656                         case VPU_DEC_PP: {
1657                                 if (change_able)
1658                                         can_set = 1;
1659                                 } break;
1660                         default: {
1661                                 dev_err(pservice->dev,
1662                                         "undefined reg type %d\n",
1663                                         reg->type);
1664                         } break;
1665                         }
1666                 }
1667
1668                 /* then check reset request */
1669                 if (reset_request && !change_able)
1670                         reset_request = 0;
1671
1672                 /* do reset before setting registers */
1673                 if (reset_request)
1674                         vpu_reset(data);
1675
1676                 if (can_set) {
1677                         reg_from_wait_to_run(pservice, reg);
1678                         reg_copy_to_hw(reg->data, reg);
1679                 }
1680         }
1681
1682         mutex_unlock(&pservice->shutdown_lock);
1683         vpu_debug_leave();
1684 }
1685
1686 static int return_reg(struct vpu_subdev_data *data,
1687                       struct vpu_reg *reg, u32 __user *dst)
1688 {
1689         struct vpu_hw_info *hw_info = data->hw_info;
1690         size_t size = reg->size;
1691         u32 base;
1692
1693         vpu_debug_enter();
1694         switch (reg->type) {
1695         case VPU_ENC: {
1696                 base = 0;
1697         } break;
1698         case VPU_DEC: {
1699                 base = hw_info->base_dec_pp;
1700         } break;
1701         case VPU_PP: {
1702                 base = hw_info->base_pp;
1703         } break;
1704         case VPU_DEC_PP: {
1705                 base = hw_info->base_dec_pp;
1706         } break;
1707         default: {
1708                 vpu_err("error: copy reg to user with unknown type %d\n",
1709                         reg->type);
1710                 return -EFAULT;
1711         } break;
1712         }
1713
1714         if (copy_to_user(dst, &reg->reg[base], size)) {
1715                 vpu_err("error: copy_to_user failed\n");
1716                 return -EFAULT;
1717         }
1718
1719         reg_deinit(data, reg);
1720         vpu_debug_leave();
1721         return 0;
1722 }
1723
1724 static long vpu_service_ioctl(struct file *filp, unsigned int cmd,
1725                               unsigned long arg)
1726 {
1727         struct vpu_subdev_data *data =
1728                 container_of(filp->f_path.dentry->d_inode->i_cdev,
1729                              struct vpu_subdev_data, cdev);
1730         struct vpu_service_info *pservice = data->pservice;
1731         struct vpu_session *session = (struct vpu_session *)filp->private_data;
1732
1733         vpu_debug_enter();
1734         if (NULL == session)
1735                 return -EINVAL;
1736
1737         switch (cmd) {
1738         case VPU_IOC_SET_CLIENT_TYPE: {
1739                 session->type = (enum VPU_CLIENT_TYPE)arg;
1740                 vpu_debug(DEBUG_IOCTL, "pid %d set client type %d\n",
1741                           session->pid, session->type);
1742         } break;
1743         case VPU_IOC_GET_HW_FUSE_STATUS: {
1744                 struct vpu_request req;
1745
1746                 vpu_debug(DEBUG_IOCTL, "pid %d get hw status %d\n",
1747                           session->pid, session->type);
1748                 if (copy_from_user(&req, (void __user *)arg, sizeof(req))) {
1749                         vpu_err("error: get hw status copy_from_user failed\n");
1750                         return -EFAULT;
1751                 } else {
1752                         void *config = (session->type != VPU_ENC) ?
1753                                        ((void *)&pservice->dec_config) :
1754                                        ((void *)&pservice->enc_config);
1755                         size_t size = (session->type != VPU_ENC) ?
1756                                       (sizeof(struct vpu_dec_config)) :
1757                                       (sizeof(struct vpu_enc_config));
1758                         if (copy_to_user((void __user *)req.req,
1759                                          config, size)) {
1760                                 vpu_err("error: get hw status copy_to_user failed type %d\n",
1761                                         session->type);
1762                                 return -EFAULT;
1763                         }
1764                 }
1765         } break;
1766         case VPU_IOC_SET_REG: {
1767                 struct vpu_request req;
1768                 struct vpu_reg *reg;
1769
1770                 vpu_service_power_on(data, pservice);
1771
1772                 vpu_debug(DEBUG_IOCTL, "pid %d set reg type %d\n",
1773                           session->pid, session->type);
1774                 if (copy_from_user(&req, (void __user *)arg,
1775                                    sizeof(struct vpu_request))) {
1776                         vpu_err("error: set reg copy_from_user failed\n");
1777                         return -EFAULT;
1778                 }
1779
1780                 reg = reg_init(data, session, (void __user *)req.req, req.size);
1781                 if (NULL == reg) {
1782                         return -EFAULT;
1783                 } else {
1784                         mutex_lock(&pservice->lock);
1785                         try_set_reg(data);
1786                         mutex_unlock(&pservice->lock);
1787                 }
1788         } break;
1789         case VPU_IOC_GET_REG: {
1790                 struct vpu_request req;
1791                 struct vpu_reg *reg;
1792                 int ret;
1793
1794                 vpu_service_power_on(data, pservice);
1795
1796                 vpu_debug(DEBUG_IOCTL, "pid %d get reg type %d\n",
1797                           session->pid, session->type);
1798                 if (copy_from_user(&req, (void __user *)arg,
1799                                    sizeof(struct vpu_request))) {
1800                         vpu_err("error: get reg copy_from_user failed\n");
1801                         return -EFAULT;
1802                 }
1803
1804                 ret = wait_event_timeout(session->wait,
1805                                          !list_empty(&session->done),
1806                                          VPU_TIMEOUT_DELAY);
1807
1808                 if (!list_empty(&session->done)) {
1809                         if (ret < 0)
1810                                 vpu_err("warning: pid %d wait task error ret %d\n",
1811                                         session->pid, ret);
1812                         ret = 0;
1813                 } else {
1814                         if (unlikely(ret < 0)) {
1815                                 vpu_err("error: pid %d wait task ret %d\n",
1816                                         session->pid, ret);
1817                         } else if (ret == 0) {
1818                                 vpu_err("error: pid %d wait %d task done timeout\n",
1819                                         session->pid,
1820                                         atomic_read(&session->task_running));
1821                                 ret = -ETIMEDOUT;
1822                         }
1823                 }
1824
1825                 if (ret < 0) {
1826                         int task_running = atomic_read(&session->task_running);
1827
1828                         mutex_lock(&pservice->lock);
1829                         vpu_service_dump(pservice);
1830                         if (task_running) {
1831                                 atomic_set(&session->task_running, 0);
1832                                 atomic_sub(task_running,
1833                                            &pservice->total_running);
1834                                 dev_err(pservice->dev,
1835                                         "%d task is running but not return, reset hardware...",
1836                                        task_running);
1837                                 vpu_reset(data);
1838                                 dev_err(pservice->dev, "done\n");
1839                         }
1840                         vpu_service_session_clear(data, session);
1841                         mutex_unlock(&pservice->lock);
1842                         return ret;
1843                 }
1844
1845                 mutex_lock(&pservice->lock);
1846                 reg = list_entry(session->done.next,
1847                                  struct vpu_reg, session_link);
1848                 return_reg(data, reg, (u32 __user *)req.req);
1849                 mutex_unlock(&pservice->lock);
1850         } break;
1851         case VPU_IOC_PROBE_IOMMU_STATUS: {
1852                 int iommu_enable = 1;
1853
1854                 vpu_debug(DEBUG_IOCTL, "VPU_IOC_PROBE_IOMMU_STATUS\n");
1855
1856                 if (copy_to_user((void __user *)arg,
1857                                  &iommu_enable, sizeof(int))) {
1858                         vpu_err("error: iommu status copy_to_user failed\n");
1859                         return -EFAULT;
1860                 }
1861         } break;
1862         default: {
1863                 vpu_err("error: unknow vpu service ioctl cmd %x\n", cmd);
1864         } break;
1865         }
1866         vpu_debug_leave();
1867         return 0;
1868 }
1869
1870 #ifdef CONFIG_COMPAT
1871 static long compat_vpu_service_ioctl(struct file *filp, unsigned int cmd,
1872                                      unsigned long arg)
1873 {
1874         struct vpu_subdev_data *data =
1875                 container_of(filp->f_path.dentry->d_inode->i_cdev,
1876                              struct vpu_subdev_data, cdev);
1877         struct vpu_service_info *pservice = data->pservice;
1878         struct vpu_session *session = (struct vpu_session *)filp->private_data;
1879
1880         vpu_debug_enter();
1881         vpu_debug(3, "cmd %x, COMPAT_VPU_IOC_SET_CLIENT_TYPE %x\n", cmd,
1882                   (u32)COMPAT_VPU_IOC_SET_CLIENT_TYPE);
1883         if (NULL == session)
1884                 return -EINVAL;
1885
1886         switch (cmd) {
1887         case COMPAT_VPU_IOC_SET_CLIENT_TYPE: {
1888                 session->type = (enum VPU_CLIENT_TYPE)arg;
1889                 vpu_debug(DEBUG_IOCTL, "compat set client type %d\n",
1890                           session->type);
1891         } break;
1892         case COMPAT_VPU_IOC_GET_HW_FUSE_STATUS: {
1893                 struct compat_vpu_request req;
1894
1895                 vpu_debug(DEBUG_IOCTL, "compat get hw status %d\n",
1896                           session->type);
1897                 if (copy_from_user(&req, compat_ptr((compat_uptr_t)arg),
1898                                    sizeof(struct compat_vpu_request))) {
1899                         vpu_err("error: compat get hw status copy_from_user failed\n");
1900                         return -EFAULT;
1901                 } else {
1902                         void *config = (session->type != VPU_ENC) ?
1903                                        ((void *)&pservice->dec_config) :
1904                                        ((void *)&pservice->enc_config);
1905                         size_t size = (session->type != VPU_ENC) ?
1906                                       (sizeof(struct vpu_dec_config)) :
1907                                       (sizeof(struct vpu_enc_config));
1908
1909                         if (copy_to_user(compat_ptr((compat_uptr_t)req.req),
1910                                          config, size)) {
1911                                 vpu_err("error: compat get hw status copy_to_user failed type %d\n",
1912                                         session->type);
1913                                 return -EFAULT;
1914                         }
1915                 }
1916         } break;
1917         case COMPAT_VPU_IOC_SET_REG: {
1918                 struct compat_vpu_request req;
1919                 struct vpu_reg *reg;
1920
1921                 vpu_service_power_on(data, pservice);
1922
1923                 vpu_debug(DEBUG_IOCTL, "compat set reg type %d\n",
1924                           session->type);
1925                 if (copy_from_user(&req, compat_ptr((compat_uptr_t)arg),
1926                                    sizeof(struct compat_vpu_request))) {
1927                         vpu_err("compat set_reg copy_from_user failed\n");
1928                         return -EFAULT;
1929                 }
1930                 reg = reg_init(data, session,
1931                                compat_ptr((compat_uptr_t)req.req), req.size);
1932                 if (NULL == reg) {
1933                         return -EFAULT;
1934                 } else {
1935                         mutex_lock(&pservice->lock);
1936                         try_set_reg(data);
1937                         mutex_unlock(&pservice->lock);
1938                 }
1939         } break;
1940         case COMPAT_VPU_IOC_GET_REG: {
1941                 struct compat_vpu_request req;
1942                 struct vpu_reg *reg;
1943                 int ret;
1944
1945                 vpu_service_power_on(data, pservice);
1946
1947                 vpu_debug(DEBUG_IOCTL, "compat get reg type %d\n",
1948                           session->type);
1949                 if (copy_from_user(&req, compat_ptr((compat_uptr_t)arg),
1950                                    sizeof(struct compat_vpu_request))) {
1951                         vpu_err("compat get reg copy_from_user failed\n");
1952                         return -EFAULT;
1953                 }
1954
1955                 ret = wait_event_timeout(session->wait,
1956                                          !list_empty(&session->done),
1957                                          VPU_TIMEOUT_DELAY);
1958
1959                 if (!list_empty(&session->done)) {
1960                         if (ret < 0)
1961                                 vpu_err("warning: pid %d wait task error ret %d\n",
1962                                         session->pid, ret);
1963                         ret = 0;
1964                 } else {
1965                         if (unlikely(ret < 0)) {
1966                                 vpu_err("error: pid %d wait task ret %d\n",
1967                                         session->pid, ret);
1968                         } else if (ret == 0) {
1969                                 vpu_err("error: pid %d wait %d task done timeout\n",
1970                                         session->pid,
1971                                         atomic_read(&session->task_running));
1972                                 ret = -ETIMEDOUT;
1973                         }
1974                 }
1975
1976                 if (ret < 0) {
1977                         int task_running = atomic_read(&session->task_running);
1978
1979                         mutex_lock(&pservice->lock);
1980                         vpu_service_dump(pservice);
1981                         if (task_running) {
1982                                 atomic_set(&session->task_running, 0);
1983                                 atomic_sub(task_running,
1984                                            &pservice->total_running);
1985                                 dev_err(pservice->dev,
1986                                         "%d task is running but not return, reset hardware...",
1987                                         task_running);
1988                                 vpu_reset(data);
1989                                 dev_err(pservice->dev, "done\n");
1990                         }
1991                         vpu_service_session_clear(data, session);
1992                         mutex_unlock(&pservice->lock);
1993                         return ret;
1994                 }
1995
1996                 mutex_lock(&pservice->lock);
1997                 reg = list_entry(session->done.next,
1998                                  struct vpu_reg, session_link);
1999                 return_reg(data, reg, compat_ptr((compat_uptr_t)req.req));
2000                 mutex_unlock(&pservice->lock);
2001         } break;
2002         case COMPAT_VPU_IOC_PROBE_IOMMU_STATUS: {
2003                 int iommu_enable = 1;
2004
2005                 vpu_debug(DEBUG_IOCTL, "COMPAT_VPU_IOC_PROBE_IOMMU_STATUS\n");
2006
2007                 if (copy_to_user(compat_ptr((compat_uptr_t)arg),
2008                                  &iommu_enable, sizeof(int))) {
2009                         vpu_err("error: VPU_IOC_PROBE_IOMMU_STATUS copy_to_user failed\n");
2010                         return -EFAULT;
2011                 }
2012         } break;
2013         default: {
2014                 vpu_err("error: unknow vpu service ioctl cmd %x\n", cmd);
2015         } break;
2016         }
2017         vpu_debug_leave();
2018         return 0;
2019 }
2020 #endif
2021
2022 static int vpu_service_check_hw(struct vpu_subdev_data *data)
2023 {
2024         struct vpu_service_info *pservice = data->pservice;
2025         int ret = -EINVAL, i = 0;
2026         u32 hw_id = readl_relaxed(data->regs);
2027
2028         hw_id = (hw_id >> 16) & 0xFFFF;
2029         dev_info(pservice->dev, "checking hw id %x\n", hw_id);
2030         data->hw_info = NULL;
2031
2032         for (i = 0; i < ARRAY_SIZE(vcodec_info_set); i++) {
2033                 const struct vcodec_info *info = &vcodec_info_set[i];
2034
2035                 if (hw_id == info->hw_id) {
2036                         data->hw_id = info->hw_id;
2037                         data->hw_info = info->hw_info;
2038                         data->task_info = info->task_info;
2039                         data->trans_info = info->trans_info;
2040                         ret = 0;
2041                         break;
2042                 }
2043         }
2044         return ret;
2045 }
2046
2047 static int vpu_service_open(struct inode *inode, struct file *filp)
2048 {
2049         struct vpu_subdev_data *data = container_of(
2050                         inode->i_cdev, struct vpu_subdev_data, cdev);
2051         struct vpu_service_info *pservice = data->pservice;
2052         struct vpu_session *session = NULL;
2053
2054         vpu_debug_enter();
2055
2056         session = kzalloc(sizeof(*session), GFP_KERNEL);
2057         if (!session) {
2058                 vpu_err("error: unable to allocate memory for vpu_session.");
2059                 return -ENOMEM;
2060         }
2061
2062         data->iommu_info->debug_level = debug;
2063
2064         session->type   = VPU_TYPE_BUTT;
2065         session->pid    = current->pid;
2066         INIT_LIST_HEAD(&session->waiting);
2067         INIT_LIST_HEAD(&session->running);
2068         INIT_LIST_HEAD(&session->done);
2069         INIT_LIST_HEAD(&session->list_session);
2070         init_waitqueue_head(&session->wait);
2071         atomic_set(&session->task_running, 0);
2072         mutex_lock(&pservice->lock);
2073         list_add_tail(&session->list_session, &pservice->session);
2074         filp->private_data = (void *)session;
2075         mutex_unlock(&pservice->lock);
2076
2077         dev_dbg(pservice->dev, "dev opened\n");
2078         vpu_debug_leave();
2079         return nonseekable_open(inode, filp);
2080 }
2081
2082 static int vpu_service_release(struct inode *inode, struct file *filp)
2083 {
2084         struct vpu_subdev_data *data = container_of(
2085                         inode->i_cdev, struct vpu_subdev_data, cdev);
2086         struct vpu_service_info *pservice = data->pservice;
2087         int task_running;
2088         struct vpu_session *session = (struct vpu_session *)filp->private_data;
2089
2090         vpu_debug_enter();
2091         if (NULL == session)
2092                 return -EINVAL;
2093
2094         task_running = atomic_read(&session->task_running);
2095         if (task_running) {
2096                 dev_err(pservice->dev,
2097                         "error: session %d still has %d task running when closing\n",
2098                         session->pid, task_running);
2099                 msleep(50);
2100         }
2101         wake_up(&session->wait);
2102
2103         vpu_service_power_on(data, pservice);
2104         mutex_lock(&pservice->lock);
2105         /* remove this filp from the asynchronusly notified filp's */
2106         list_del_init(&session->list_session);
2107         vpu_service_session_clear(data, session);
2108         vcodec_iommu_clear(data->iommu_info, session);
2109         kfree(session);
2110         filp->private_data = NULL;
2111         mutex_unlock(&pservice->lock);
2112
2113         dev_info(pservice->dev, "closed\n");
2114         vpu_debug_leave();
2115         return 0;
2116 }
2117
2118 static const struct file_operations vpu_service_fops = {
2119         .unlocked_ioctl = vpu_service_ioctl,
2120         .open           = vpu_service_open,
2121         .release        = vpu_service_release,
2122 #ifdef CONFIG_COMPAT
2123         .compat_ioctl   = compat_vpu_service_ioctl,
2124 #endif
2125 };
2126
2127 static irqreturn_t vdpu_irq(int irq, void *dev_id);
2128 static irqreturn_t vdpu_isr(int irq, void *dev_id);
2129 static irqreturn_t vepu_irq(int irq, void *dev_id);
2130 static irqreturn_t vepu_isr(int irq, void *dev_id);
2131 static void get_hw_info(struct vpu_subdev_data *data);
2132
2133 static struct device *rockchip_get_sysmmu_dev(const char *compt)
2134 {
2135         struct device_node *dn = NULL;
2136         struct platform_device *pd = NULL;
2137         struct device *ret = NULL;
2138
2139         dn = of_find_compatible_node(NULL, NULL, compt);
2140         if (!dn) {
2141                 pr_err("can't find device node %s \r\n", compt);
2142                 return NULL;
2143         }
2144
2145         pd = of_find_device_by_node(dn);
2146         if (!pd) {
2147                 pr_err("can't find platform device in device node %s\n", compt);
2148                 return  NULL;
2149         }
2150         ret = &pd->dev;
2151
2152         return ret;
2153 }
2154
2155 #ifdef CONFIG_IOMMU_API
2156 static inline void platform_set_sysmmu(struct device *iommu,
2157                                        struct device *dev)
2158 {
2159         dev->archdata.iommu = iommu;
2160 }
2161 #else
2162 static inline void platform_set_sysmmu(struct device *iommu,
2163                                        struct device *dev)
2164 {
2165 }
2166 #endif
2167
2168 int vcodec_sysmmu_fault_hdl(struct device *dev,
2169                             enum rk_iommu_inttype itype,
2170                             unsigned long pgtable_base,
2171                             unsigned long fault_addr, unsigned int status)
2172 {
2173         struct platform_device *pdev;
2174         struct vpu_service_info *pservice;
2175         struct vpu_subdev_data *data;
2176
2177         vpu_debug_enter();
2178
2179         if (dev == NULL) {
2180                 pr_err("invalid NULL dev\n");
2181                 return 0;
2182         }
2183
2184         pdev = container_of(dev, struct platform_device, dev);
2185         if (pdev == NULL) {
2186                 pr_err("invalid NULL platform_device\n");
2187                 return 0;
2188         }
2189
2190         data = platform_get_drvdata(pdev);
2191         if (data == NULL) {
2192                 pr_err("invalid NULL vpu_subdev_data\n");
2193                 return 0;
2194         }
2195
2196         pservice = data->pservice;
2197         if (pservice == NULL) {
2198                 pr_err("invalid NULL vpu_service_info\n");
2199                 return 0;
2200         }
2201
2202         if (pservice->reg_codec) {
2203                 struct vpu_reg *reg = pservice->reg_codec;
2204                 struct vcodec_mem_region *mem, *n;
2205                 int i = 0;
2206
2207                 pr_err("vcodec, fault addr 0x%08lx\n", fault_addr);
2208                 if (!list_empty(&reg->mem_region_list)) {
2209                         list_for_each_entry_safe(mem, n, &reg->mem_region_list,
2210                                                  reg_lnk) {
2211                                 pr_err("vcodec, reg[%02u] mem region [%02d] 0x%lx %lx\n",
2212                                        mem->reg_idx, i, mem->iova, mem->len);
2213                                 i++;
2214                         }
2215                 } else {
2216                         pr_err("no memory region mapped\n");
2217                 }
2218
2219                 if (reg->data) {
2220                         struct vpu_subdev_data *data = reg->data;
2221                         u32 *base = (u32 *)data->dec_dev.regs;
2222                         u32 len = data->hw_info->dec_reg_num;
2223
2224                         pr_err("current errror register set:\n");
2225
2226                         for (i = 0; i < len; i++)
2227                                 pr_err("reg[%02d] %08x\n",
2228                                        i, readl_relaxed(base + i));
2229                 }
2230
2231                 pr_alert("vcodec, page fault occur, reset hw\n");
2232
2233                 /* reg->reg[101] = 1; */
2234                 _vpu_reset(data);
2235         }
2236
2237         return 0;
2238 }
2239
2240 static int vcodec_subdev_probe(struct platform_device *pdev,
2241                                struct vpu_service_info *pservice)
2242 {
2243         uint8_t *regs = NULL;
2244         int32_t ret = 0;
2245         uint32_t ioaddr = 0;
2246         struct resource *res = NULL;
2247         struct vpu_hw_info *hw_info = NULL;
2248         struct device *dev = &pdev->dev;
2249         struct device_node *np = pdev->dev.of_node;
2250         struct vpu_subdev_data *data = NULL;
2251         struct platform_device *sub_dev = NULL;
2252         struct device_node *sub_np = NULL;
2253         const char *name  = np->name;
2254         char mmu_dev_dts_name[40];
2255
2256         dev_info(dev, "probe device");
2257
2258         data = devm_kzalloc(dev, sizeof(struct vpu_subdev_data), GFP_KERNEL);
2259         if (!data)
2260                 return -ENOMEM;
2261
2262         data->pservice = pservice;
2263         data->dev = dev;
2264         of_property_read_u32(np, "dev_mode", (u32 *)&data->mode);
2265
2266         if (pservice->reg_base == 0) {
2267                 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2268                 data->regs = devm_ioremap_resource(dev, res);
2269                 if (IS_ERR(data->regs)) {
2270                         ret = PTR_ERR(data->regs);
2271                         goto err;
2272                 }
2273                 ioaddr = res->start;
2274         } else {
2275                 data->regs = pservice->reg_base;
2276                 ioaddr = pservice->ioaddr;
2277         }
2278
2279         sub_np = of_parse_phandle(np, "iommus", 0);
2280         if (sub_np) {
2281                 sub_dev = of_find_device_by_node(sub_np);
2282                 data->mmu_dev = &sub_dev->dev;
2283         }
2284
2285         /* Back to legacy iommu probe */
2286         if (!data->mmu_dev) {
2287                 switch (data->mode) {
2288                 case VCODEC_RUNNING_MODE_VPU:
2289                         sprintf(mmu_dev_dts_name,
2290                                 VPU_IOMMU_COMPATIBLE_NAME);
2291                         break;
2292                 case VCODEC_RUNNING_MODE_RKVDEC:
2293                         sprintf(mmu_dev_dts_name,
2294                                 VDEC_IOMMU_COMPATIBLE_NAME);
2295                         break;
2296                 case VCODEC_RUNNING_MODE_HEVC:
2297                 default:
2298                         sprintf(mmu_dev_dts_name,
2299                                 HEVC_IOMMU_COMPATIBLE_NAME);
2300                         break;
2301                 }
2302
2303                 data->mmu_dev =
2304                         rockchip_get_sysmmu_dev(mmu_dev_dts_name);
2305                 if (data->mmu_dev)
2306                         platform_set_sysmmu(data->mmu_dev, dev);
2307
2308                 rockchip_iovmm_set_fault_handler
2309                         (dev, vcodec_sysmmu_fault_hdl);
2310         }
2311
2312         dev_info(dev, "vpu mmu dec %p\n", data->mmu_dev);
2313
2314         clear_bit(MMU_ACTIVATED, &data->state);
2315         vpu_service_power_on(data, pservice);
2316
2317         ret = vpu_service_check_hw(data);
2318         if (ret < 0) {
2319                 vpu_err("error: hw info check faild\n");
2320                 goto err;
2321         }
2322
2323         hw_info = data->hw_info;
2324         regs = (u8 *)data->regs;
2325
2326         if (hw_info->dec_reg_num) {
2327                 data->dec_dev.iosize = hw_info->dec_io_size;
2328                 data->dec_dev.regs = (u32 *)(regs + hw_info->dec_offset);
2329         }
2330
2331         if (hw_info->enc_reg_num) {
2332                 data->enc_dev.iosize = hw_info->enc_io_size;
2333                 data->enc_dev.regs = (u32 *)(regs + hw_info->enc_offset);
2334         }
2335
2336         data->reg_size = max(hw_info->dec_io_size, hw_info->enc_io_size);
2337
2338         data->irq_enc = platform_get_irq_byname(pdev, "irq_enc");
2339         if (data->irq_enc > 0) {
2340                 ret = devm_request_threaded_irq(dev, data->irq_enc,
2341                                                 vepu_irq, vepu_isr,
2342                                                 IRQF_SHARED, dev_name(dev),
2343                                                 (void *)data);
2344                 if (ret) {
2345                         dev_err(dev, "error: can't request vepu irq %d\n",
2346                                 data->irq_enc);
2347                         goto err;
2348                 }
2349         }
2350         data->irq_dec = platform_get_irq_byname(pdev, "irq_dec");
2351         if (data->irq_dec > 0) {
2352                 ret = devm_request_threaded_irq(dev, data->irq_dec,
2353                                                 vdpu_irq, vdpu_isr,
2354                                                 IRQF_SHARED, dev_name(dev),
2355                                                 (void *)data);
2356                 if (ret) {
2357                         dev_err(dev, "error: can't request vdpu irq %d\n",
2358                                 data->irq_dec);
2359                         goto err;
2360                 }
2361         }
2362         atomic_set(&data->dec_dev.irq_count_codec, 0);
2363         atomic_set(&data->dec_dev.irq_count_pp, 0);
2364         atomic_set(&data->enc_dev.irq_count_codec, 0);
2365         atomic_set(&data->enc_dev.irq_count_pp, 0);
2366
2367         vcodec_enter_mode(data);
2368         of_property_read_u32(np, "allocator", (u32 *)&pservice->alloc_type);
2369         data->iommu_info = vcodec_iommu_info_create(dev, data->mmu_dev,
2370                                                     pservice->alloc_type);
2371         dev_info(dev, "allocator is %s\n", pservice->alloc_type == 1 ? "drm" :
2372                 (pservice->alloc_type == 2 ? "ion" : "null"));
2373         get_hw_info(data);
2374         pservice->auto_freq = true;
2375
2376         vcodec_exit_mode(data);
2377         /* create device node */
2378         ret = alloc_chrdev_region(&data->dev_t, 0, 1, name);
2379         if (ret) {
2380                 dev_err(dev, "alloc dev_t failed\n");
2381                 goto err;
2382         }
2383
2384         cdev_init(&data->cdev, &vpu_service_fops);
2385
2386         data->cdev.owner = THIS_MODULE;
2387         data->cdev.ops = &vpu_service_fops;
2388
2389         ret = cdev_add(&data->cdev, data->dev_t, 1);
2390
2391         if (ret) {
2392                 dev_err(dev, "add dev_t failed\n");
2393                 goto err;
2394         }
2395
2396         data->cls = class_create(THIS_MODULE, name);
2397
2398         if (IS_ERR(data->cls)) {
2399                 ret = PTR_ERR(data->cls);
2400                 dev_err(dev, "class_create err:%d\n", ret);
2401                 goto err;
2402         }
2403
2404         data->child_dev = device_create(data->cls, dev,
2405                 data->dev_t, "%s", name);
2406
2407         platform_set_drvdata(pdev, data);
2408
2409         INIT_LIST_HEAD(&data->lnk_service);
2410         list_add_tail(&data->lnk_service, &pservice->subdev_list);
2411
2412         return 0;
2413 err:
2414         if (data->child_dev) {
2415                 device_destroy(data->cls, data->dev_t);
2416                 cdev_del(&data->cdev);
2417                 unregister_chrdev_region(data->dev_t, 1);
2418         }
2419
2420         if (data->cls)
2421                 class_destroy(data->cls);
2422         return -1;
2423 }
2424
2425 static void vcodec_subdev_remove(struct vpu_subdev_data *data)
2426 {
2427         struct vpu_service_info *pservice = data->pservice;
2428
2429         vcodec_iommu_info_destroy(data->iommu_info);
2430         data->iommu_info = NULL;
2431
2432         mutex_lock(&pservice->lock);
2433         cancel_delayed_work_sync(&pservice->power_off_work);
2434         vpu_service_power_off(pservice);
2435         mutex_unlock(&pservice->lock);
2436
2437         device_destroy(data->cls, data->dev_t);
2438         class_destroy(data->cls);
2439         cdev_del(&data->cdev);
2440         unregister_chrdev_region(data->dev_t, 1);
2441
2442 #ifdef CONFIG_DEBUG_FS
2443         if (!IS_ERR_OR_NULL(data->debugfs_dir))
2444                 debugfs_remove_recursive(data->debugfs_dir);
2445 #endif
2446 }
2447
2448 static void vcodec_read_property(struct device_node *np,
2449                                  struct vpu_service_info *pservice)
2450 {
2451         pservice->mode_bit = 0;
2452         pservice->mode_ctrl = 0;
2453         pservice->subcnt = 0;
2454         pservice->grf_base = NULL;
2455
2456         of_property_read_u32(np, "subcnt", &pservice->subcnt);
2457
2458         if (pservice->subcnt > 1) {
2459                 of_property_read_u32(np, "mode_bit", &pservice->mode_bit);
2460                 of_property_read_u32(np, "mode_ctrl", &pservice->mode_ctrl);
2461         }
2462 #ifdef CONFIG_MFD_SYSCON
2463         pservice->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
2464         if (IS_ERR_OR_NULL(pservice->grf)) {
2465                 pservice->grf = NULL;
2466 #ifdef CONFIG_ARM
2467                 pservice->grf_base = RK_GRF_VIRT;
2468 #else
2469                 vpu_err("can't find vpu grf property\n");
2470                 return;
2471 #endif
2472         }
2473 #else
2474 #ifdef CONFIG_ARM
2475         pservice->grf_base = RK_GRF_VIRT;
2476 #else
2477         vpu_err("can't find vpu grf property\n");
2478         return;
2479 #endif
2480 #endif
2481
2482 #ifdef CONFIG_RESET_CONTROLLER
2483         pservice->rst_a = devm_reset_control_get(pservice->dev, "video_a");
2484         pservice->rst_h = devm_reset_control_get(pservice->dev, "video_h");
2485         pservice->rst_v = devm_reset_control_get(pservice->dev, "video");
2486
2487         if (IS_ERR_OR_NULL(pservice->rst_a)) {
2488                 dev_warn(pservice->dev, "No aclk reset resource define\n");
2489                 pservice->rst_a = NULL;
2490         }
2491
2492         if (IS_ERR_OR_NULL(pservice->rst_h)) {
2493                 dev_warn(pservice->dev, "No hclk reset resource define\n");
2494                 pservice->rst_h = NULL;
2495         }
2496
2497         if (IS_ERR_OR_NULL(pservice->rst_v)) {
2498                 dev_warn(pservice->dev, "No core reset resource define\n");
2499                 pservice->rst_v = NULL;
2500         }
2501 #endif
2502
2503         of_property_read_string(np, "name", (const char **)&pservice->name);
2504 }
2505
2506 static void vcodec_init_drvdata(struct vpu_service_info *pservice)
2507 {
2508         pservice->dev_id = VCODEC_DEVICE_ID_VPU;
2509         pservice->curr_mode = -1;
2510
2511         wake_lock_init(&pservice->wake_lock, WAKE_LOCK_SUSPEND, "vpu");
2512         INIT_LIST_HEAD(&pservice->waiting);
2513         INIT_LIST_HEAD(&pservice->running);
2514         mutex_init(&pservice->lock);
2515         mutex_init(&pservice->shutdown_lock);
2516         atomic_set(&pservice->service_on, 1);
2517
2518         INIT_LIST_HEAD(&pservice->done);
2519         INIT_LIST_HEAD(&pservice->session);
2520         INIT_LIST_HEAD(&pservice->subdev_list);
2521
2522         pservice->reg_pproc     = NULL;
2523         atomic_set(&pservice->total_running, 0);
2524         atomic_set(&pservice->enabled,       0);
2525         atomic_set(&pservice->power_on_cnt,  0);
2526         atomic_set(&pservice->power_off_cnt, 0);
2527         atomic_set(&pservice->reset_request, 0);
2528
2529         INIT_DELAYED_WORK(&pservice->power_off_work, vpu_power_off_work);
2530         pservice->last.tv64 = 0;
2531
2532         pservice->alloc_type = 0;
2533 }
2534
2535 static int vcodec_probe(struct platform_device *pdev)
2536 {
2537         int i;
2538         int ret = 0;
2539         struct resource *res = NULL;
2540         struct device *dev = &pdev->dev;
2541         struct device_node *np = pdev->dev.of_node;
2542         struct vpu_service_info *pservice = NULL;
2543         struct vcodec_device_info *driver_data;
2544
2545         pservice = devm_kzalloc(dev, sizeof(struct vpu_service_info),
2546                                 GFP_KERNEL);
2547         if (!pservice)
2548                 return -ENOMEM;
2549         pservice->dev = dev;
2550
2551         driver_data = vcodec_get_drv_data(pdev);
2552         if (!driver_data)
2553                 return -EINVAL;
2554
2555         vcodec_read_property(np, pservice);
2556         vcodec_init_drvdata(pservice);
2557
2558         /* Underscore for label, hyphens for name */
2559         switch (driver_data->device_type) {
2560         case VCODEC_DEVICE_TYPE_VPUX:
2561                 pservice->dev_id = VCODEC_DEVICE_ID_VPU;
2562                 break;
2563         case VCODEC_DEVICE_TYPE_VPUC:
2564                 pservice->dev_id = VCODEC_DEVICE_ID_COMBO;
2565                 break;
2566         case VCODEC_DEVICE_TYPE_HEVC:
2567                 pservice->dev_id = VCODEC_DEVICE_ID_HEVC;
2568                 break;
2569         case VCODEC_DEVICE_TYPE_RKVD:
2570                 pservice->dev_id = VCODEC_DEVICE_ID_RKVDEC;
2571                 break;
2572         default:
2573                 dev_err(dev, "unsupported device type\n");
2574                 return -ENODEV;
2575         }
2576
2577         if (0 > vpu_get_clk(pservice))
2578                 goto err;
2579
2580         if (of_property_read_bool(np, "reg")) {
2581                 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2582
2583                 pservice->reg_base = devm_ioremap_resource(pservice->dev, res);
2584                 if (IS_ERR(pservice->reg_base)) {
2585                         vpu_err("ioremap registers base failed\n");
2586                         ret = PTR_ERR(pservice->reg_base);
2587                         goto err;
2588                 }
2589                 pservice->ioaddr = res->start;
2590         } else {
2591                 pservice->reg_base = 0;
2592         }
2593
2594         pm_runtime_enable(dev);
2595
2596         if (of_property_read_bool(np, "subcnt")) {
2597                 for (i = 0; i < pservice->subcnt; i++) {
2598                         struct device_node *sub_np;
2599                         struct platform_device *sub_pdev;
2600
2601                         sub_np = of_parse_phandle(np, "rockchip,sub", i);
2602                         sub_pdev = of_find_device_by_node(sub_np);
2603
2604                         vcodec_subdev_probe(sub_pdev, pservice);
2605                 }
2606         } else {
2607                 vcodec_subdev_probe(pdev, pservice);
2608         }
2609
2610         vpu_service_power_off(pservice);
2611
2612         dev_info(dev, "init success\n");
2613
2614         return 0;
2615
2616 err:
2617         dev_info(dev, "init failed\n");
2618         vpu_service_power_off(pservice);
2619         wake_lock_destroy(&pservice->wake_lock);
2620
2621         return ret;
2622 }
2623
2624 static int vcodec_remove(struct platform_device *pdev)
2625 {
2626         struct vpu_subdev_data *data = platform_get_drvdata(pdev);
2627
2628         vcodec_subdev_remove(data);
2629
2630         pm_runtime_disable(data->pservice->dev);
2631
2632         return 0;
2633 }
2634
2635 static void vcodec_shutdown(struct platform_device *pdev)
2636 {
2637         struct vpu_subdev_data *data = platform_get_drvdata(pdev);
2638         struct vpu_service_info *pservice = data->pservice;
2639
2640         dev_info(&pdev->dev, "vcodec shutdown");
2641
2642         mutex_lock(&pservice->shutdown_lock);
2643         atomic_set(&pservice->service_on, 0);
2644         mutex_unlock(&pservice->shutdown_lock);
2645
2646         vcodec_exit_mode(data);
2647
2648         vpu_service_power_on(data, pservice);
2649         vpu_service_clear(data);
2650         vcodec_subdev_remove(data);
2651
2652         pm_runtime_disable(&pdev->dev);
2653 }
2654
2655 static const struct of_device_id vcodec_service_dt_ids[] = {
2656         {
2657                 .compatible = "rockchip,vpu_service",
2658                 .data = &vpu_device_info,
2659         },
2660         {
2661                 .compatible = "rockchip,hevc_service",
2662                 .data = &hevc_device_info,
2663         },
2664         {
2665                 .compatible = "rockchip,vpu_combo",
2666                 .data = &vpu_combo_device_info,
2667         },
2668         {
2669                 .compatible = "rockchip,rkvdec",
2670                 .data = &rkvd_device_info,
2671         },
2672         {},
2673 };
2674
2675 MODULE_DEVICE_TABLE(of, vcodec_service_dt_ids);
2676
2677 static void *vcodec_get_drv_data(struct platform_device *pdev)
2678 {
2679         struct vcodec_device_info *driver_data = NULL;
2680         const struct of_device_id *match;
2681
2682         match = of_match_node(vcodec_service_dt_ids, pdev->dev.of_node);
2683         if (match)
2684                 driver_data = (struct vcodec_device_info *)match->data;
2685
2686         return driver_data;
2687 }
2688
2689 static struct platform_driver vcodec_driver = {
2690         .probe = vcodec_probe,
2691         .remove = vcodec_remove,
2692         .shutdown = vcodec_shutdown,
2693         .driver = {
2694                 .name = "rk-vcodec",
2695                 .owner = THIS_MODULE,
2696                 .of_match_table = of_match_ptr(vcodec_service_dt_ids),
2697         },
2698 };
2699
2700 static void get_hw_info(struct vpu_subdev_data *data)
2701 {
2702         struct vpu_service_info *pservice = data->pservice;
2703         struct vpu_dec_config *dec = &pservice->dec_config;
2704         struct vpu_enc_config *enc = &pservice->enc_config;
2705
2706         if (of_machine_is_compatible("rockchip,rk2928") ||
2707                         of_machine_is_compatible("rockchip,rk3036") ||
2708                         of_machine_is_compatible("rockchip,rk3066") ||
2709                         of_machine_is_compatible("rockchip,rk3126") ||
2710                         of_machine_is_compatible("rockchip,rk3188"))
2711                 dec->max_dec_pic_width = 1920;
2712         else
2713                 dec->max_dec_pic_width = 4096;
2714
2715         if (data->mode == VCODEC_RUNNING_MODE_VPU) {
2716                 dec->h264_support = 3;
2717                 dec->jpeg_support = 1;
2718                 dec->mpeg4_support = 2;
2719                 dec->vc1_support = 3;
2720                 dec->mpeg2_support = 1;
2721                 dec->pp_support = 1;
2722                 dec->sorenson_support = 1;
2723                 dec->ref_buf_support = 3;
2724                 dec->vp6_support = 1;
2725                 dec->vp7_support = 1;
2726                 dec->vp8_support = 1;
2727                 dec->avs_support = 1;
2728                 dec->jpeg_ext_support = 0;
2729                 dec->custom_mpeg4_support = 1;
2730                 dec->reserve = 0;
2731                 dec->mvc_support = 1;
2732
2733                 if (!of_machine_is_compatible("rockchip,rk3036")) {
2734                         u32 config_reg = readl_relaxed(data->enc_dev.regs + 63);
2735
2736                         enc->max_encoded_width = config_reg & ((1 << 11) - 1);
2737                         enc->h264_enabled = 1;
2738                         enc->mpeg4_enabled = (config_reg >> 26) & 1;
2739                         enc->jpeg_enabled = 1;
2740                         enc->vs_enabled = (config_reg >> 24) & 1;
2741                         enc->rgb_enabled = (config_reg >> 28) & 1;
2742                         enc->reg_size = data->reg_size;
2743                         enc->reserv[0] = 0;
2744                         enc->reserv[1] = 0;
2745                 }
2746
2747                 pservice->auto_freq = true;
2748                 vpu_debug(DEBUG_EXTRA_INFO, "vpu_service set to auto frequency mode\n");
2749                 atomic_set(&pservice->freq_status, VPU_FREQ_BUT);
2750
2751                 pservice->bug_dec_addr = of_machine_is_compatible
2752                         ("rockchip,rk30xx");
2753         } else if (data->mode == VCODEC_RUNNING_MODE_RKVDEC) {
2754                 pservice->auto_freq = true;
2755                 atomic_set(&pservice->freq_status, VPU_FREQ_BUT);
2756         } else {
2757                 /* disable frequency switch in hevc.*/
2758                 pservice->auto_freq = false;
2759         }
2760 }
2761
2762 static bool check_irq_err(struct vpu_task_info *task, u32 irq_status)
2763 {
2764         vpu_debug(DEBUG_IRQ_CHECK, "task %s status %08x mask %08x\n",
2765                   task->name, irq_status, task->error_mask);
2766
2767         return (task->error_mask & irq_status) ? true : false;
2768 }
2769
2770 static irqreturn_t vdpu_irq(int irq, void *dev_id)
2771 {
2772         struct vpu_subdev_data *data = (struct vpu_subdev_data *)dev_id;
2773         struct vpu_service_info *pservice = data->pservice;
2774         struct vpu_task_info *task = NULL;
2775         struct vpu_device *dev = &data->dec_dev;
2776         u32 hw_id = data->hw_info->hw_id;
2777         u32 raw_status;
2778         u32 dec_status;
2779
2780         task = &data->task_info[TASK_DEC];
2781
2782         raw_status = readl_relaxed(dev->regs + task->reg_irq);
2783         dec_status = raw_status;
2784
2785         vpu_debug(DEBUG_TASK_INFO, "vdpu_irq reg %d status %x mask: irq %x ready %x error %0x\n",
2786                   task->reg_irq, dec_status,
2787                   task->irq_mask, task->ready_mask, task->error_mask);
2788
2789         if (dec_status & task->irq_mask) {
2790                 time_record(task, 1);
2791                 vpu_debug(DEBUG_IRQ_STATUS, "vdpu_irq dec status %08x\n",
2792                           dec_status);
2793                 if ((dec_status & 0x40001) == 0x40001) {
2794                         do {
2795                                 dec_status =
2796                                         readl_relaxed(dev->regs +
2797                                                 task->reg_irq);
2798                         } while ((dec_status & 0x40001) == 0x40001);
2799                 }
2800
2801                 if (check_irq_err(task, dec_status))
2802                         atomic_add(1, &pservice->reset_request);
2803
2804                 writel_relaxed(0, dev->regs + task->reg_irq);
2805
2806                 /* set clock gating to save power */
2807                 writel(task->gating_mask, dev->regs + task->reg_en);
2808
2809                 atomic_add(1, &dev->irq_count_codec);
2810                 time_diff(task);
2811         }
2812
2813         task = &data->task_info[TASK_PP];
2814         if (hw_id != HEVC_ID && hw_id != RKV_DEC_ID) {
2815                 u32 pp_status = readl_relaxed(dev->regs + task->irq_mask);
2816
2817                 if (pp_status & task->irq_mask) {
2818                         time_record(task, 1);
2819                         vpu_debug(DEBUG_IRQ_STATUS, "vdpu_irq pp status %08x\n",
2820                                   pp_status);
2821
2822                         if (check_irq_err(task, dec_status))
2823                                 atomic_add(1, &pservice->reset_request);
2824
2825                         /* clear pp IRQ */
2826                         writel_relaxed(pp_status & (~task->reg_irq),
2827                                        dev->regs + task->irq_mask);
2828                         atomic_add(1, &dev->irq_count_pp);
2829                         time_diff(task);
2830                 }
2831         }
2832
2833         pservice->irq_status = raw_status;
2834
2835         if (atomic_read(&dev->irq_count_pp) ||
2836             atomic_read(&dev->irq_count_codec))
2837                 return IRQ_WAKE_THREAD;
2838         else
2839                 return IRQ_NONE;
2840 }
2841
2842 static irqreturn_t vdpu_isr(int irq, void *dev_id)
2843 {
2844         struct vpu_subdev_data *data = (struct vpu_subdev_data *)dev_id;
2845         struct vpu_service_info *pservice = data->pservice;
2846         struct vpu_device *dev = &data->dec_dev;
2847
2848         mutex_lock(&pservice->lock);
2849         if (atomic_read(&dev->irq_count_codec)) {
2850                 atomic_sub(1, &dev->irq_count_codec);
2851                 if (pservice->reg_codec == NULL) {
2852                         vpu_err("error: dec isr with no task waiting\n");
2853                 } else {
2854                         reg_from_run_to_done(data, pservice->reg_codec);
2855                         /* avoid vpu timeout and can't recover problem */
2856                         if (data->mode == VCODEC_RUNNING_MODE_VPU)
2857                                 VDPU_SOFT_RESET(data->regs);
2858                 }
2859         }
2860
2861         if (atomic_read(&dev->irq_count_pp)) {
2862                 atomic_sub(1, &dev->irq_count_pp);
2863                 if (pservice->reg_pproc == NULL)
2864                         vpu_err("error: pp isr with no task waiting\n");
2865                 else
2866                         reg_from_run_to_done(data, pservice->reg_pproc);
2867         }
2868         try_set_reg(data);
2869         mutex_unlock(&pservice->lock);
2870         return IRQ_HANDLED;
2871 }
2872
2873 static irqreturn_t vepu_irq(int irq, void *dev_id)
2874 {
2875         struct vpu_subdev_data *data = (struct vpu_subdev_data *)dev_id;
2876         struct vpu_service_info *pservice = data->pservice;
2877         struct vpu_task_info *task = &data->task_info[TASK_ENC];
2878         struct vpu_device *dev = &data->enc_dev;
2879         u32 irq_status;
2880
2881         irq_status = readl_relaxed(dev->regs + task->reg_irq);
2882
2883         vpu_debug(DEBUG_TASK_INFO, "vepu_irq reg %d status %x mask: irq %x ready %x error %0x\n",
2884                   task->reg_irq, irq_status,
2885                   task->irq_mask, task->ready_mask, task->error_mask);
2886
2887         vpu_debug(DEBUG_IRQ_STATUS, "vepu_irq enc status %08x\n", irq_status);
2888
2889         if (likely(irq_status & task->irq_mask)) {
2890                 time_record(task, 1);
2891
2892                 if (check_irq_err(task, irq_status))
2893                         atomic_add(1, &pservice->reset_request);
2894
2895                 /* clear enc IRQ */
2896                 writel_relaxed(irq_status & (~task->irq_mask),
2897                                dev->regs + task->reg_irq);
2898
2899                 atomic_add(1, &dev->irq_count_codec);
2900                 time_diff(task);
2901         }
2902
2903         pservice->irq_status = irq_status;
2904
2905         if (atomic_read(&dev->irq_count_codec))
2906                 return IRQ_WAKE_THREAD;
2907         else
2908                 return IRQ_NONE;
2909 }
2910
2911 static irqreturn_t vepu_isr(int irq, void *dev_id)
2912 {
2913         struct vpu_subdev_data *data = (struct vpu_subdev_data *)dev_id;
2914         struct vpu_service_info *pservice = data->pservice;
2915         struct vpu_device *dev = &data->enc_dev;
2916
2917         mutex_lock(&pservice->lock);
2918         if (atomic_read(&dev->irq_count_codec)) {
2919                 atomic_sub(1, &dev->irq_count_codec);
2920                 if (NULL == pservice->reg_codec)
2921                         vpu_err("error: enc isr with no task waiting\n");
2922                 else
2923                         reg_from_run_to_done(data, pservice->reg_codec);
2924         }
2925         try_set_reg(data);
2926         mutex_unlock(&pservice->lock);
2927         return IRQ_HANDLED;
2928 }
2929
2930 module_platform_driver(vcodec_driver);
2931 MODULE_LICENSE("GPL v2");