1 /* arch/arm/mach-rk29/vpu.c
3 * Copyright (C) 2010 ROCKCHIP, Inc.
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
16 #ifdef CONFIG_RK29_VPU_DEBUG
18 #define pr_fmt(fmt) "VPU_SERVICE: %s: " fmt, __func__
20 #define pr_fmt(fmt) "VPU_SERVICE: " fmt
24 #include <linux/clk.h>
25 #include <linux/delay.h>
26 #include <linux/init.h>
27 #include <linux/interrupt.h>
29 #include <linux/kernel.h>
30 #include <linux/module.h>
32 #include <linux/ioport.h>
33 #include <linux/miscdevice.h>
35 #include <linux/poll.h>
36 #include <linux/platform_device.h>
37 #include <linux/workqueue.h>
38 #include <linux/slab.h>
40 #include <asm/uaccess.h>
42 #include <mach/irqs.h>
43 #include <mach/vpu_service.h>
44 #include <mach/rk29_iomap.h>
49 #define DEC_INTERRUPT_REGISTER 1
50 #define PP_INTERRUPT_REGISTER 60
51 #define ENC_INTERRUPT_REGISTER 1
53 #define DEC_INTERRUPT_BIT 0x100
54 #define PP_INTERRUPT_BIT 0x100
55 #define ENC_INTERRUPT_BIT 0x1
57 #define REG_NUM_DEC (60)
58 #define REG_NUM_PP (41)
59 #define REG_NUM_ENC (96)
60 #define REG_NUM_DEC_PP (REG_NUM_DEC+REG_NUM_PP)
61 #define SIZE_REG(reg) ((reg)*4)
63 #define DEC_IO_SIZE ((100 + 1) * 4) /* bytes */
64 #define ENC_IO_SIZE (96 * 4) /* bytes */
65 static const u16 dec_hw_ids[] = { 0x8190, 0x8170, 0x9170, 0x9190, 0x6731 };
66 static const u16 enc_hw_ids[] = { 0x6280, 0x7280, 0x8270 };
68 #define VPU_REG_EN_ENC 14
69 #define VPU_REG_ENC_GATE 2
70 #define VPU_REG_ENC_GATE_BIT (1<<4)
72 #define VPU_REG_EN_DEC 1
73 #define VPU_REG_DEC_GATE 2
74 #define VPU_REG_DEC_GATE_BIT (1<<10)
75 #define VPU_REG_EN_PP 0
76 #define VPU_REG_PP_GATE 1
77 #define VPU_REG_PP_GATE_BIT (1<<8)
78 #define VPU_REG_EN_DEC_PP 1
79 #define VPU_REG_DEC_PP_GATE 61
80 #define VPU_REG_DEC_PP_GATE_BIT (1<<8)
84 * struct for process session which connect to vpu
86 * @author ChenHengming (2011-5-3)
88 typedef struct vpu_session {
90 /* a linked list of data so we can access them for debugging */
91 struct list_head list_session;
92 /* a linked list of register data waiting for process */
93 struct list_head waiting;
94 /* a linked list of register data in processing */
95 struct list_head running;
96 /* a linked list of register data processed */
97 struct list_head done;
98 wait_queue_head_t wait;
103 * struct for process register set
105 * @author ChenHengming (2011-5-4)
107 typedef struct vpu_reg {
108 VPU_CLIENT_TYPE type;
109 vpu_session *session;
110 struct list_head session_link; /* link to vpu service session */
111 struct list_head status_link; /* link to register set list */
113 unsigned long reg[VPU_REG_NUM_DEC_PP];
116 typedef struct vpu_device {
117 unsigned long iobaseaddr;
119 volatile u32 *hwregs;
122 typedef struct vpu_service_info {
124 struct workqueue_struct *workqueue;
125 struct list_head waiting; /* link to link_reg in struct vpu_reg */
126 struct list_head running; /* link to link_reg in struct vpu_reg */
127 struct list_head done; /* link to link_reg in struct vpu_reg */
128 struct list_head session; /* link to list_session in struct vpu_session */
129 atomic_t task_running;
134 VPUHwDecConfig_t dec_config;
135 VPUHwEncConfig_t enc_config;
138 typedef struct vpu_request
144 static struct clk *aclk_vepu;
145 static struct clk *hclk_vepu;
146 static struct clk *aclk_ddr_vepu;
147 static struct clk *hclk_cpu_vcodec;
148 static vpu_service_info service;
149 static vpu_device dec_dev;
150 static vpu_device enc_dev;
152 static void vpu_service_power_off_work_func(struct work_struct *work);
153 static DECLARE_DELAYED_WORK(vpu_service_power_off_work, vpu_service_power_off_work_func);
154 #define POWER_OFF_DELAY 3*HZ /* 3s */
156 static void vpu_get_clk(void)
158 aclk_vepu = clk_get(NULL, "aclk_vepu");
159 hclk_vepu = clk_get(NULL, "hclk_vepu");
160 aclk_ddr_vepu = clk_get(NULL, "aclk_ddr_vepu");
161 hclk_cpu_vcodec = clk_get(NULL, "hclk_cpu_vcodec");
164 static void vpu_put_clk(void)
168 clk_put(aclk_ddr_vepu);
169 clk_put(hclk_cpu_vcodec);
172 static void vpu_service_power_on(void)
177 printk("vpu: power on\n");
178 clk_enable(aclk_vepu);
179 clk_enable(hclk_vepu);
180 clk_enable(hclk_cpu_vcodec);
182 pmu_set_power_domain(PD_VCODEC, true);
184 clk_enable(aclk_ddr_vepu);
185 service.enabled = true;
188 static void vpu_service_power_off(void)
190 if (!service.enabled)
193 while(atomic_read(&service.task_running))
196 printk("vpu: power off\n");
197 pmu_set_power_domain(PD_VCODEC, false);
199 clk_disable(hclk_cpu_vcodec);
200 clk_disable(aclk_ddr_vepu);
201 clk_disable(hclk_vepu);
202 clk_disable(aclk_vepu);
204 service.enabled = false;
207 static void vpu_service_power_off_work_func(struct work_struct *work)
210 vpu_service_power_off();
213 static vpu_reg *reg_init(vpu_session *session, void __user *src, unsigned long size)
216 vpu_reg *reg = kmalloc(sizeof(vpu_reg), GFP_KERNEL);
218 pr_err("kmalloc fail in reg_init\n");
222 reg->session = session;
223 reg->type = session->type;
225 INIT_LIST_HEAD(®->session_link);
226 INIT_LIST_HEAD(®->status_link);
228 if (copy_from_user(®->reg[0], (void __user *)src, size)) {
229 pr_err("copy_from_user failed in reg_init\n");
234 spin_lock_irqsave(&service.lock, flag);
235 list_add_tail(®->status_link, &service.waiting);
236 list_add_tail(®->session_link, &session->waiting);
237 spin_unlock_irqrestore(&service.lock, flag);
242 static void reg_deinit(vpu_reg *reg)
244 list_del_init(®->session_link);
245 list_del_init(®->status_link);
247 if (reg == service.reg_codec) service.reg_codec = NULL;
248 if (reg == service.reg_pproc) service.reg_pproc = NULL;
251 static void reg_from_wait_to_run(vpu_reg *reg)
253 list_del_init(®->status_link);
254 list_add_tail(®->status_link, &service.running);
256 list_del_init(®->session_link);
257 list_add_tail(®->session_link, ®->session->running);
259 atomic_add(1, &service.task_running);
262 static void reg_copy_from_hw(vpu_reg *reg, volatile u32 *src, u32 count)
265 u32 *dst = (u32 *)®->reg[0];
266 for (i = 0; i < count; i++)
270 static void reg_from_run_to_done(vpu_reg *reg)
272 spin_lock(&service.lock);
273 list_del_init(®->status_link);
274 list_add_tail(®->status_link, &service.done);
276 list_del_init(®->session_link);
277 list_add_tail(®->session_link, ®->session->done);
281 service.reg_codec = NULL;
282 reg_copy_from_hw(reg, enc_dev.hwregs, REG_NUM_ENC);
286 service.reg_codec = NULL;
287 reg_copy_from_hw(reg, dec_dev.hwregs, REG_NUM_DEC);
291 service.reg_pproc = NULL;
292 reg_copy_from_hw(reg, dec_dev.hwregs + PP_INTERRUPT_REGISTER, REG_NUM_PP);
293 dec_dev.hwregs[PP_INTERRUPT_REGISTER] = 0;
297 service.reg_codec = NULL;
298 service.reg_pproc = NULL;
299 reg_copy_from_hw(reg, dec_dev.hwregs, REG_NUM_DEC_PP);
300 dec_dev.hwregs[PP_INTERRUPT_REGISTER] = 0;
304 pr_err("copy reg from hw with unknown type %d\n", reg->type);
308 atomic_sub(1, &service.task_running);
309 wake_up_interruptible_sync(®->session->wait);
310 spin_unlock(&service.lock);
313 void reg_copy_to_hw(vpu_reg *reg)
316 u32 *src = (u32 *)®->reg[0];
320 u32 *dst = (u32 *)enc_dev.hwregs;
321 service.reg_codec = reg;
323 dst[VPU_REG_EN_ENC] = src[VPU_REG_EN_ENC] & 0x6;
325 for (i = 0; i < VPU_REG_EN_ENC; i++)
328 for (i = VPU_REG_EN_ENC + 1; i < REG_NUM_ENC; i++)
333 dst[VPU_REG_ENC_GATE] = src[VPU_REG_ENC_GATE] | VPU_REG_ENC_GATE_BIT;
334 dst[VPU_REG_EN_ENC] = src[VPU_REG_EN_ENC];
337 u32 *dst = (u32 *)dec_dev.hwregs;
338 service.reg_codec = reg;
340 for (i = REG_NUM_DEC - 1; i > VPU_REG_DEC_GATE; i--)
345 dst[VPU_REG_DEC_GATE] = src[VPU_REG_DEC_GATE] | VPU_REG_DEC_GATE_BIT;
346 dst[VPU_REG_EN_DEC] = src[VPU_REG_EN_DEC];
349 u32 *dst = (u32 *)dec_dev.hwregs + PP_INTERRUPT_REGISTER;
350 service.reg_pproc = reg;
352 dst[VPU_REG_PP_GATE] = src[VPU_REG_PP_GATE] | VPU_REG_PP_GATE_BIT;
354 for (i = VPU_REG_PP_GATE + 1; i < REG_NUM_PP; i++)
359 dst[VPU_REG_EN_PP] = src[VPU_REG_EN_PP];
362 u32 *dst = (u32 *)dec_dev.hwregs;
363 service.reg_codec = reg;
364 service.reg_pproc = reg;
366 for (i = VPU_REG_EN_DEC_PP + 1; i < REG_NUM_DEC_PP; i++)
369 dst[VPU_REG_EN_DEC_PP] = src[VPU_REG_EN_DEC_PP] | 0x2;
372 dst[VPU_REG_DEC_PP_GATE] = src[VPU_REG_DEC_PP_GATE] | VPU_REG_PP_GATE_BIT;
373 dst[VPU_REG_DEC_GATE] = src[VPU_REG_DEC_GATE] | VPU_REG_DEC_GATE_BIT;
374 dst[VPU_REG_EN_DEC] = src[VPU_REG_EN_DEC];
377 pr_err("unsupport session type %d", reg->type);
383 static void try_set_reg(void)
386 // first get reg from reg list
387 spin_lock_irqsave(&service.lock, flag);
388 if (!list_empty(&service.waiting)) {
389 vpu_reg *reg = list_entry(service.waiting.next, vpu_reg, status_link);
391 if (((VPU_DEC_PP == reg->type) && (NULL == service.reg_codec) && (NULL == service.reg_pproc)) ||
392 ((VPU_DEC == reg->type) && (NULL == service.reg_codec)) ||
393 ((VPU_PP == reg->type) && (NULL == service.reg_pproc)) ||
394 ((VPU_ENC == reg->type) && (NULL == service.reg_codec))) {
395 reg_from_wait_to_run(reg);
396 if (!cancel_delayed_work(&vpu_service_power_off_work)) {
397 if (!in_interrupt()) {
398 flush_delayed_work(&vpu_service_power_off_work);
400 pr_err("try_set_reg in inturrpt but cancel power off failed\n");
403 vpu_service_power_on();
406 spin_unlock_irqrestore(&service.lock, flag);
408 spin_unlock_irqrestore(&service.lock, flag);
409 queue_delayed_work(service.workqueue, &vpu_service_power_off_work, POWER_OFF_DELAY);
413 static int return_reg(vpu_reg *reg, u32 __user *dst)
418 if (copy_to_user(dst, ®->reg[0], SIZE_REG(REG_NUM_ENC)))
423 if (copy_to_user(dst, ®->reg[0], SIZE_REG(REG_NUM_DEC)))
428 if (copy_to_user(dst, ®->reg[0], SIZE_REG(REG_NUM_PP)))
433 if (copy_to_user(dst, ®->reg[0], SIZE_REG(REG_NUM_DEC_PP)))
439 pr_err("copy reg to user with unknown type %d\n", reg->type);
447 static long vpu_service_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
449 vpu_session *session = (vpu_session *)filp->private_data;
450 if (NULL == session) {
455 case VPU_IOC_SET_CLIENT_TYPE : {
456 session->type = (VPU_CLIENT_TYPE)arg;
459 case VPU_IOC_GET_HW_FUSE_STATUS : {
461 if (copy_from_user(&req, (void __user *)arg, sizeof(vpu_request))) {
462 pr_err("VPU_IOC_GET_HW_FUSE_STATUS copy_from_user failed\n");
465 if (VPU_ENC != session->type) {
466 if (copy_to_user((void __user *)req.req, &service.dec_config, sizeof(VPUHwDecConfig_t))) {
467 pr_err("VPU_IOC_GET_HW_FUSE_STATUS copy_to_user failed type %d\n", session->type);
471 if (copy_to_user((void __user *)req.req, &service.enc_config, sizeof(VPUHwEncConfig_t))) {
472 pr_err("VPU_IOC_GET_HW_FUSE_STATUS copy_to_user failed type %d\n", session->type);
480 case VPU_IOC_SET_REG : {
483 if (copy_from_user(&req, (void __user *)arg, sizeof(vpu_request))) {
484 pr_err("VPU_IOC_SET_REG copy_from_user failed\n");
488 reg = reg_init(session, (void __user *)req.req, req.size);
497 case VPU_IOC_GET_REG : {
500 if (copy_from_user(&req, (void __user *)arg, sizeof(vpu_request))) {
501 pr_err("VPU_IOC_GET_REG copy_from_user failed\n");
504 int ret = wait_event_interruptible_timeout(session->wait, !list_empty(&session->done), HZ);
505 if (unlikely(ret < 0)) {
506 pr_err("pid %d wait task ret %d\n", session->pid, ret);
508 } else if (0 == ret) {
509 pr_err("pid %d wait task done timeout\n", session->pid);
515 spin_lock_irqsave(&service.lock, flag);
516 reg = list_entry(session->done.next, vpu_reg, session_link);
517 return_reg(reg, (u32 __user *)req.req);
518 spin_unlock_irqrestore(&service.lock, flag);
523 pr_err("unknow vpu service ioctl cmd %x\n", cmd);
531 static int vpu_service_check_hw_id(struct vpu_device * dev, const u16 *hwids, size_t num)
533 u32 hwid = readl(dev->hwregs);
534 pr_info("HW ID = 0x%08x\n", hwid);
536 hwid = (hwid >> 16) & 0xFFFF; /* product version only */
539 if (hwid == hwids[num]) {
540 pr_info("Compatible HW found at 0x%08lx\n", dev->iobaseaddr);
545 pr_info("No Compatible HW found at 0x%08lx\n", dev->iobaseaddr);
549 static void vpu_service_release_io(void)
552 iounmap((void *)dec_dev.hwregs);
553 release_mem_region(dec_dev.iobaseaddr, dec_dev.iosize);
556 iounmap((void *)enc_dev.hwregs);
557 release_mem_region(enc_dev.iobaseaddr, enc_dev.iosize);
560 static int vpu_service_reserve_io(void)
562 unsigned long iobaseaddr;
563 unsigned long iosize;
565 iobaseaddr = dec_dev.iobaseaddr;
566 iosize = dec_dev.iosize;
568 if (!request_mem_region(iobaseaddr, iosize, "vdpu_io")) {
569 pr_info("failed to reserve dec HW regs\n");
573 dec_dev.hwregs = (volatile u32 *)ioremap_nocache(iobaseaddr, iosize);
575 if (dec_dev.hwregs == NULL) {
576 pr_info("failed to ioremap dec HW regs\n");
580 /* check for correct HW */
581 if (!vpu_service_check_hw_id(&dec_dev, dec_hw_ids, ARRAY_SIZE(dec_hw_ids))) {
585 iobaseaddr = enc_dev.iobaseaddr;
586 iosize = enc_dev.iosize;
588 if (!request_mem_region(iobaseaddr, iosize, "hx280enc")) {
589 pr_info("failed to reserve enc HW regs\n");
593 enc_dev.hwregs = (volatile u32 *)ioremap_nocache(iobaseaddr, iosize);
595 if (enc_dev.hwregs == NULL) {
596 pr_info("failed to ioremap enc HW regs\n");
600 /* check for correct HW */
601 if (!vpu_service_check_hw_id(&enc_dev, enc_hw_ids, ARRAY_SIZE(enc_hw_ids))) {
607 vpu_service_release_io();
611 static int vpu_service_open(struct inode *inode, struct file *filp)
613 vpu_session *session = (vpu_session *)kmalloc(sizeof(vpu_session), GFP_KERNEL);
614 if (NULL == session) {
615 pr_err("unable to allocate memory for vpu_session.");
619 session->type = VPU_TYPE_BUTT;
620 session->pid = current->pid;
621 INIT_LIST_HEAD(&session->waiting);
622 INIT_LIST_HEAD(&session->running);
623 INIT_LIST_HEAD(&session->done);
624 INIT_LIST_HEAD(&session->list_session);
625 init_waitqueue_head(&session->wait);
626 /* no need to protect */
627 list_add_tail(&session->list_session, &service.session);
628 filp->private_data = (void *)session;
630 pr_debug("dev opened\n");
631 return nonseekable_open(inode, filp);
634 static int vpu_service_release(struct inode *inode, struct file *filp)
637 vpu_session *session = (vpu_session *)filp->private_data;
641 wake_up_interruptible_sync(&session->wait);
644 /* remove this filp from the asynchronusly notified filp's */
645 //vpu_service_fasync(-1, filp, 0);
646 list_del(&session->list_session);
648 spin_lock_irqsave(&service.lock, flag);
651 list_for_each_entry_safe(reg, n, &session->waiting, session_link) {
654 list_for_each_entry_safe(reg, n, &session->running, session_link) {
657 list_for_each_entry_safe(reg, n, &session->done, session_link) {
661 spin_unlock_irqrestore(&service.lock, flag);
665 pr_debug("dev closed\n");
669 static const struct file_operations vpu_service_fops = {
670 .unlocked_ioctl = vpu_service_ioctl,
671 .open = vpu_service_open,
672 .release = vpu_service_release,
673 //.fasync = vpu_service_fasync,
676 static struct miscdevice vpu_service_misc_device = {
677 .minor = MISC_DYNAMIC_MINOR,
678 .name = "vpu_service",
679 .fops = &vpu_service_fops,
682 static void vpu_service_shutdown(struct platform_device *pdev)
684 pr_cont("shutdown...");
685 cancel_delayed_work(&vpu_service_power_off_work);
686 vpu_service_power_off();
690 static int vpu_service_suspend(struct platform_device *pdev, pm_message_t state)
693 pr_info("suspend...");
694 cancel_delayed_work(&vpu_service_power_off_work);
695 enabled = service.enabled;
696 vpu_service_power_off();
697 service.enabled = enabled;
701 static int vpu_service_resume(struct platform_device *pdev)
703 pr_info("resume...");
704 if (service.enabled) {
705 service.enabled = false;
706 vpu_service_power_on();
711 static struct platform_device vpu_service_device = {
712 .name = "vpu_service",
716 static struct platform_driver vpu_service_driver = {
718 .name = "vpu_service",
719 .owner = THIS_MODULE,
721 .shutdown = vpu_service_shutdown,
722 .suspend = vpu_service_suspend,
723 .resume = vpu_service_resume,
726 static void get_hw_info(void)
728 VPUHwDecConfig_t *dec = &service.dec_config;
729 VPUHwEncConfig_t *enc = &service.enc_config;
730 u32 configReg = dec_dev.hwregs[VPU_DEC_HWCFG0];
731 u32 asicID = dec_dev.hwregs[0];
733 dec->h264Support = (configReg >> DWL_H264_E) & 0x3U;
734 dec->jpegSupport = (configReg >> DWL_JPEG_E) & 0x01U;
735 if (dec->jpegSupport && ((configReg >> DWL_PJPEG_E) & 0x01U))
736 dec->jpegSupport = JPEG_PROGRESSIVE;
737 dec->mpeg4Support = (configReg >> DWL_MPEG4_E) & 0x3U;
738 dec->vc1Support = (configReg >> DWL_VC1_E) & 0x3U;
739 dec->mpeg2Support = (configReg >> DWL_MPEG2_E) & 0x01U;
740 dec->sorensonSparkSupport = (configReg >> DWL_SORENSONSPARK_E) & 0x01U;
741 dec->refBufSupport = (configReg >> DWL_REF_BUFF_E) & 0x01U;
742 dec->vp6Support = (configReg >> DWL_VP6_E) & 0x01U;
743 dec->maxDecPicWidth = configReg & 0x07FFU;
745 /* 2nd Config register */
746 configReg = dec_dev.hwregs[VPU_DEC_HWCFG1];
747 if (dec->refBufSupport) {
748 if ((configReg >> DWL_REF_BUFF_ILACE_E) & 0x01U)
749 dec->refBufSupport |= 2;
750 if ((configReg >> DWL_REF_BUFF_DOUBLE_E) & 0x01U)
751 dec->refBufSupport |= 4;
753 dec->customMpeg4Support = (configReg >> DWL_MPEG4_CUSTOM_E) & 0x01U;
754 dec->vp7Support = (configReg >> DWL_VP7_E) & 0x01U;
755 dec->vp8Support = (configReg >> DWL_VP8_E) & 0x01U;
756 dec->avsSupport = (configReg >> DWL_AVS_E) & 0x01U;
759 if (((asicID >> 16) >= 0x8190U) || ((asicID >> 16) == 0x6731U)) {
760 dec->jpegESupport = (configReg >> DWL_JPEG_EXT_E) & 0x01U;
762 dec->jpegESupport = JPEG_EXT_NOT_SUPPORTED;
765 if (((asicID >> 16) >= 0x9170U) || ((asicID >> 16) == 0x6731U) ) {
766 dec->rvSupport = (configReg >> DWL_RV_E) & 0x03U;
768 dec->rvSupport = RV_NOT_SUPPORTED;
771 dec->mvcSupport = (configReg >> DWL_MVC_E) & 0x03U;
773 if (dec->refBufSupport && (asicID >> 16) == 0x6731U ) {
774 dec->refBufSupport |= 8; /* enable HW support for offset */
778 VPUHwFuseStatus_t hwFuseSts;
779 /* Decoder fuse configuration */
780 u32 fuseReg = dec_dev.hwregs[VPU_DEC_HW_FUSE_CFG];
782 hwFuseSts.h264SupportFuse = (fuseReg >> DWL_H264_FUSE_E) & 0x01U;
783 hwFuseSts.mpeg4SupportFuse = (fuseReg >> DWL_MPEG4_FUSE_E) & 0x01U;
784 hwFuseSts.mpeg2SupportFuse = (fuseReg >> DWL_MPEG2_FUSE_E) & 0x01U;
785 hwFuseSts.sorensonSparkSupportFuse = (fuseReg >> DWL_SORENSONSPARK_FUSE_E) & 0x01U;
786 hwFuseSts.jpegSupportFuse = (fuseReg >> DWL_JPEG_FUSE_E) & 0x01U;
787 hwFuseSts.vp6SupportFuse = (fuseReg >> DWL_VP6_FUSE_E) & 0x01U;
788 hwFuseSts.vc1SupportFuse = (fuseReg >> DWL_VC1_FUSE_E) & 0x01U;
789 hwFuseSts.jpegProgSupportFuse = (fuseReg >> DWL_PJPEG_FUSE_E) & 0x01U;
790 hwFuseSts.rvSupportFuse = (fuseReg >> DWL_RV_FUSE_E) & 0x01U;
791 hwFuseSts.avsSupportFuse = (fuseReg >> DWL_AVS_FUSE_E) & 0x01U;
792 hwFuseSts.vp7SupportFuse = (fuseReg >> DWL_VP7_FUSE_E) & 0x01U;
793 hwFuseSts.vp8SupportFuse = (fuseReg >> DWL_VP8_FUSE_E) & 0x01U;
794 hwFuseSts.customMpeg4SupportFuse = (fuseReg >> DWL_CUSTOM_MPEG4_FUSE_E) & 0x01U;
795 hwFuseSts.mvcSupportFuse = (fuseReg >> DWL_MVC_FUSE_E) & 0x01U;
797 /* check max. decoder output width */
799 if (fuseReg & 0x8000U)
800 hwFuseSts.maxDecPicWidthFuse = 1920;
801 else if (fuseReg & 0x4000U)
802 hwFuseSts.maxDecPicWidthFuse = 1280;
803 else if (fuseReg & 0x2000U)
804 hwFuseSts.maxDecPicWidthFuse = 720;
805 else if (fuseReg & 0x1000U)
806 hwFuseSts.maxDecPicWidthFuse = 352;
807 else /* remove warning */
808 hwFuseSts.maxDecPicWidthFuse = 352;
810 hwFuseSts.refBufSupportFuse = (fuseReg >> DWL_REF_BUFF_FUSE_E) & 0x01U;
812 /* Pp configuration */
813 configReg = dec_dev.hwregs[VPU_PP_HW_SYNTH_CFG];
815 if ((configReg >> DWL_PP_E) & 0x01U) {
817 dec->maxPpOutPicWidth = configReg & 0x07FFU;
818 /*pHwCfg->ppConfig = (configReg >> DWL_CFG_E) & 0x0FU; */
819 dec->ppConfig = configReg;
822 dec->maxPpOutPicWidth = 0;
826 /* check the HW versio */
827 if (((asicID >> 16) >= 0x8190U) || ((asicID >> 16) == 0x6731U)) {
828 /* Pp configuration */
829 configReg = dec_dev.hwregs[VPU_DEC_HW_FUSE_CFG];
831 if ((configReg >> DWL_PP_E) & 0x01U) {
832 /* Pp fuse configuration */
833 u32 fuseRegPp = dec_dev.hwregs[VPU_PP_HW_FUSE_CFG];
835 if ((fuseRegPp >> DWL_PP_FUSE_E) & 0x01U) {
836 hwFuseSts.ppSupportFuse = 1;
837 /* check max. pp output width */
838 if (fuseRegPp & 0x8000U) hwFuseSts.maxPpOutPicWidthFuse = 1920;
839 else if (fuseRegPp & 0x4000U) hwFuseSts.maxPpOutPicWidthFuse = 1280;
840 else if (fuseRegPp & 0x2000U) hwFuseSts.maxPpOutPicWidthFuse = 720;
841 else if (fuseRegPp & 0x1000U) hwFuseSts.maxPpOutPicWidthFuse = 352;
842 else hwFuseSts.maxPpOutPicWidthFuse = 352;
843 hwFuseSts.ppConfigFuse = fuseRegPp;
845 hwFuseSts.ppSupportFuse = 0;
846 hwFuseSts.maxPpOutPicWidthFuse = 0;
847 hwFuseSts.ppConfigFuse = 0;
850 hwFuseSts.ppSupportFuse = 0;
851 hwFuseSts.maxPpOutPicWidthFuse = 0;
852 hwFuseSts.ppConfigFuse = 0;
855 if (dec->maxDecPicWidth > hwFuseSts.maxDecPicWidthFuse)
856 dec->maxDecPicWidth = hwFuseSts.maxDecPicWidthFuse;
857 if (dec->maxPpOutPicWidth > hwFuseSts.maxPpOutPicWidthFuse)
858 dec->maxPpOutPicWidth = hwFuseSts.maxPpOutPicWidthFuse;
859 if (!hwFuseSts.h264SupportFuse) dec->h264Support = H264_NOT_SUPPORTED;
860 if (!hwFuseSts.mpeg4SupportFuse) dec->mpeg4Support = MPEG4_NOT_SUPPORTED;
861 if (!hwFuseSts.customMpeg4SupportFuse) dec->customMpeg4Support = MPEG4_CUSTOM_NOT_SUPPORTED;
862 if (!hwFuseSts.jpegSupportFuse) dec->jpegSupport = JPEG_NOT_SUPPORTED;
863 if ((dec->jpegSupport == JPEG_PROGRESSIVE) && !hwFuseSts.jpegProgSupportFuse)
864 dec->jpegSupport = JPEG_BASELINE;
865 if (!hwFuseSts.mpeg2SupportFuse) dec->mpeg2Support = MPEG2_NOT_SUPPORTED;
866 if (!hwFuseSts.vc1SupportFuse) dec->vc1Support = VC1_NOT_SUPPORTED;
867 if (!hwFuseSts.vp6SupportFuse) dec->vp6Support = VP6_NOT_SUPPORTED;
868 if (!hwFuseSts.vp7SupportFuse) dec->vp7Support = VP7_NOT_SUPPORTED;
869 if (!hwFuseSts.vp8SupportFuse) dec->vp8Support = VP8_NOT_SUPPORTED;
870 if (!hwFuseSts.ppSupportFuse) dec->ppSupport = PP_NOT_SUPPORTED;
872 /* check the pp config vs fuse status */
873 if ((dec->ppConfig & 0xFC000000) && ((hwFuseSts.ppConfigFuse & 0xF0000000) >> 5)) {
874 u32 deInterlace = ((dec->ppConfig & PP_DEINTERLACING) >> 25);
875 u32 alphaBlend = ((dec->ppConfig & PP_ALPHA_BLENDING) >> 24);
876 u32 deInterlaceFuse = (((hwFuseSts.ppConfigFuse >> 5) & PP_DEINTERLACING) >> 25);
877 u32 alphaBlendFuse = (((hwFuseSts.ppConfigFuse >> 5) & PP_ALPHA_BLENDING) >> 24);
879 if (deInterlace && !deInterlaceFuse) dec->ppConfig &= 0xFD000000;
880 if (alphaBlend && !alphaBlendFuse) dec->ppConfig &= 0xFE000000;
882 if (!hwFuseSts.sorensonSparkSupportFuse) dec->sorensonSparkSupport = SORENSON_SPARK_NOT_SUPPORTED;
883 if (!hwFuseSts.refBufSupportFuse) dec->refBufSupport = REF_BUF_NOT_SUPPORTED;
884 if (!hwFuseSts.rvSupportFuse) dec->rvSupport = RV_NOT_SUPPORTED;
885 if (!hwFuseSts.avsSupportFuse) dec->avsSupport = AVS_NOT_SUPPORTED;
886 if (!hwFuseSts.mvcSupportFuse) dec->mvcSupport = MVC_NOT_SUPPORTED;
889 configReg = enc_dev.hwregs[63];
890 enc->maxEncodedWidth = configReg & ((1 << 11) - 1);
891 enc->h264Enabled = (configReg >> 27) & 1;
892 enc->mpeg4Enabled = (configReg >> 26) & 1;
893 enc->jpegEnabled = (configReg >> 25) & 1;
894 enc->vsEnabled = (configReg >> 24) & 1;
895 enc->rgbEnabled = (configReg >> 28) & 1;
896 enc->busType = (configReg >> 20) & 15;
897 enc->synthesisLanguage = (configReg >> 16) & 15;
898 enc->busWidth = (configReg >> 12) & 15;
901 static irqreturn_t vdpu_isr(int irq, void *dev_id)
903 vpu_device *dev = (vpu_device *) dev_id;
904 u32 irq_status_dec = readl(dev->hwregs + DEC_INTERRUPT_REGISTER);
905 u32 irq_status_pp = readl(dev->hwregs + PP_INTERRUPT_REGISTER);
907 pr_debug("vdpu_isr dec %x pp %x\n", irq_status_dec, irq_status_pp);
909 if (irq_status_dec & DEC_INTERRUPT_BIT) {
910 irq_status_dec = readl(dev->hwregs + DEC_INTERRUPT_REGISTER);
911 if ((irq_status_dec & 0x40001) == 0x40001)
914 irq_status_dec = readl(dev->hwregs + DEC_INTERRUPT_REGISTER);
915 } while ((irq_status_dec & 0x40001) == 0x40001);
918 writel(irq_status_dec & (~DEC_INTERRUPT_BIT), dev->hwregs + DEC_INTERRUPT_REGISTER);
919 pr_debug("DEC IRQ received!\n");
920 if (NULL == service.reg_codec) {
921 pr_err("dec isr with no task waiting\n");
923 reg_from_run_to_done(service.reg_codec);
927 if (irq_status_pp & PP_INTERRUPT_BIT) {
929 writel(irq_status_pp & (~DEC_INTERRUPT_BIT), dev->hwregs + PP_INTERRUPT_REGISTER);
930 pr_debug("PP IRQ received!\n");
932 if (NULL == service.reg_pproc) {
933 pr_err("pp isr with no task waiting\n");
935 reg_from_run_to_done(service.reg_pproc);
942 static irqreturn_t vepu_isr(int irq, void *dev_id)
944 struct vpu_device *dev = (struct vpu_device *) dev_id;
945 u32 irq_status = readl(dev->hwregs + ENC_INTERRUPT_REGISTER);
947 pr_debug("enc_isr\n");
949 if (likely(irq_status & ENC_INTERRUPT_BIT)) {
951 writel(irq_status & (~ENC_INTERRUPT_BIT), dev->hwregs + ENC_INTERRUPT_REGISTER);
952 pr_debug("ENC IRQ received!\n");
954 if (NULL == service.reg_codec) {
955 pr_err("enc isr with no task waiting\n");
957 reg_from_run_to_done(service.reg_codec);
964 static int __init vpu_service_init(void)
968 pr_debug("baseaddr = 0x%08x vdpu irq = %d vepu irq = %d\n", RK29_VCODEC_PHYS, IRQ_VDPU, IRQ_VEPU);
970 dec_dev.iobaseaddr = RK29_VCODEC_PHYS + 0x200;
971 dec_dev.iosize = DEC_IO_SIZE;
972 enc_dev.iobaseaddr = RK29_VCODEC_PHYS;
973 enc_dev.iosize = ENC_IO_SIZE;
975 INIT_LIST_HEAD(&service.waiting);
976 INIT_LIST_HEAD(&service.running);
977 INIT_LIST_HEAD(&service.done);
978 INIT_LIST_HEAD(&service.session);
979 spin_lock_init(&service.lock);
980 service.reg_codec = NULL;
981 service.reg_pproc = NULL;
982 atomic_set(&service.task_running, 0);
983 service.enabled = false;
984 service.workqueue = create_singlethread_workqueue("vpu_service");
985 if (!service.workqueue) {
986 pr_err("create_singlethread_workqueue failed\n");
991 vpu_service_power_on();
993 ret = vpu_service_reserve_io();
995 pr_err("reserve io failed\n");
999 /* get the IRQ line */
1000 ret = request_irq(IRQ_VDPU, vdpu_isr, IRQF_SHARED, "vdpu", (void *)&dec_dev);
1002 pr_err("can't request vdpu irq %d\n", IRQ_VDPU);
1003 goto err_req_vdpu_irq;
1006 ret = request_irq(IRQ_VEPU, vepu_isr, IRQF_SHARED, "vepu", (void *)&enc_dev);
1008 pr_err("can't request vepu irq %d\n", IRQ_VEPU);
1009 goto err_req_vepu_irq;
1012 ret = misc_register(&vpu_service_misc_device);
1014 pr_err("misc_register failed\n");
1018 platform_device_register(&vpu_service_device);
1019 platform_driver_probe(&vpu_service_driver, NULL);
1021 vpu_service_power_off();
1022 pr_info("init success\n");
1027 free_irq(IRQ_VEPU, (void *)&enc_dev);
1029 free_irq(IRQ_VDPU, (void *)&dec_dev);
1031 pr_info("init failed\n");
1033 vpu_service_power_off();
1034 vpu_service_release_io();
1036 destroy_workqueue(service.workqueue);
1037 pr_info("init failed\n");
1041 static void __exit vpu_service_exit(void)
1043 cancel_delayed_work(&vpu_service_power_off_work);
1044 vpu_service_power_off();
1045 destroy_workqueue(service.workqueue);
1046 platform_device_unregister(&vpu_service_device);
1047 platform_driver_unregister(&vpu_service_driver);
1048 misc_deregister(&vpu_service_misc_device);
1049 free_irq(IRQ_VEPU, (void *)&enc_dev);
1050 free_irq(IRQ_VDPU, (void *)&dec_dev);
1054 module_init(vpu_service_init);
1055 module_exit(vpu_service_exit);
1056 MODULE_LICENSE("GPL");
1058 #ifdef CONFIG_PROC_FS
1059 #include <linux/proc_fs.h>
1060 #include <linux/seq_file.h>
1062 static int proc_vpu_service_show(struct seq_file *s, void *v)
1066 vpu_reg *reg, *reg_tmp;
1067 vpu_session *session, *session_tmp;
1069 cancel_delayed_work_sync(&vpu_service_power_off_work);
1070 vpu_service_power_on();
1071 seq_printf(s, "\nENC Registers:\n");
1072 n = enc_dev.iosize >> 2;
1073 for (i = 0; i < n; i++) {
1074 seq_printf(s, "\tswreg%d = %08X\n", i, readl(enc_dev.hwregs + i));
1076 seq_printf(s, "\nDEC Registers:\n");
1077 n = dec_dev.iosize >> 2;
1078 for (i = 0; i < n; i++) {
1079 seq_printf(s, "\tswreg%d = %08X\n", i, readl(dec_dev.hwregs + i));
1082 seq_printf(s, "\nvpu service status:\n");
1083 spin_lock_irqsave(&service.lock, flag);
1084 list_for_each_entry_safe(session, session_tmp, &service.session, list_session) {
1085 seq_printf(s, "session pid %d type %d:\n", session->pid, session->type);
1086 //seq_printf(s, "waiting reg set %d\n");
1087 list_for_each_entry_safe(reg, reg_tmp, &session->waiting, session_link) {
1088 seq_printf(s, "waiting register set\n");
1090 list_for_each_entry_safe(reg, reg_tmp, &session->running, session_link) {
1091 seq_printf(s, "running register set\n");
1093 list_for_each_entry_safe(reg, reg_tmp, &session->done, session_link) {
1094 seq_printf(s, "done register set\n");
1097 spin_unlock_irqrestore(&service.lock, flag);
1098 queue_delayed_work(service.workqueue, &vpu_service_power_off_work, POWER_OFF_DELAY);
1103 static int proc_vpu_service_open(struct inode *inode, struct file *file)
1105 return single_open(file, proc_vpu_service_show, NULL);
1108 static const struct file_operations proc_vpu_service_fops = {
1109 .open = proc_vpu_service_open,
1111 .llseek = seq_lseek,
1112 .release = single_release,
1115 static int __init vpu_service_proc_init(void)
1117 proc_create("vpu_service", 0, NULL, &proc_vpu_service_fops);
1121 late_initcall(vpu_service_proc_init);
1122 #endif /* CONFIG_PROC_FS */