1 /* arch/arm/mach-rk29/vpu.c
3 * Copyright (C) 2010 ROCKCHIP, Inc.
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
16 #ifdef CONFIG_RK29_VPU_DEBUG
18 #define pr_fmt(fmt) "VPU_SERVICE: %s: " fmt, __func__
20 #define pr_fmt(fmt) "VPU_SERVICE: " fmt
24 #include <linux/clk.h>
25 #include <linux/delay.h>
26 #include <linux/init.h>
27 #include <linux/interrupt.h>
29 #include <linux/kernel.h>
30 #include <linux/module.h>
32 #include <linux/ioport.h>
33 #include <linux/miscdevice.h>
35 #include <linux/poll.h>
36 #include <linux/platform_device.h>
37 #include <linux/workqueue.h>
39 #include <asm/uaccess.h>
41 #include <mach/irqs.h>
42 #include <mach/vpu_service.h>
43 #include <mach/rk29_iomap.h>
48 #define DEC_INTERRUPT_REGISTER 1
49 #define PP_INTERRUPT_REGISTER 60
50 #define ENC_INTERRUPT_REGISTER 1
52 #define DEC_INTERRUPT_BIT 0x100
53 #define PP_INTERRUPT_BIT 0x100
54 #define ENC_INTERRUPT_BIT 0x1
56 #define REG_NUM_DEC (60)
57 #define REG_NUM_PP (41)
58 #define REG_NUM_ENC (96)
59 #define REG_NUM_DEC_PP (REG_NUM_DEC+REG_NUM_PP)
60 #define SIZE_REG(reg) ((reg)*4)
62 #define DEC_IO_SIZE ((100 + 1) * 4) /* bytes */
63 #define ENC_IO_SIZE (96 * 4) /* bytes */
64 static const u16 dec_hw_ids[] = { 0x8190, 0x8170, 0x9170, 0x9190, 0x6731 };
65 static const u16 enc_hw_ids[] = { 0x6280, 0x7280, 0x8270 };
67 #define VPU_REG_EN_ENC 14
68 #define VPU_REG_ENC_GATE 2
69 #define VPU_REG_ENC_GATE_BIT (1<<4)
71 #define VPU_REG_EN_DEC 1
72 #define VPU_REG_DEC_GATE 2
73 #define VPU_REG_DEC_GATE_BIT (1<<10)
74 #define VPU_REG_EN_PP 0
75 #define VPU_REG_PP_GATE 1
76 #define VPU_REG_PP_GATE_BIT (1<<8)
77 #define VPU_REG_EN_DEC_PP 1
78 #define VPU_REG_DEC_PP_GATE 61
79 #define VPU_REG_DEC_PP_GATE_BIT (1<<8)
83 * struct for process session which connect to vpu
85 * @author ChenHengming (2011-5-3)
87 typedef struct vpu_session {
89 /* a linked list of data so we can access them for debugging */
90 struct list_head list_session;
91 /* a linked list of register data waiting for process */
92 struct list_head waiting;
93 /* a linked list of register data in processing */
94 struct list_head running;
95 /* a linked list of register data processed */
96 struct list_head done;
97 wait_queue_head_t wait;
102 * struct for process register set
104 * @author ChenHengming (2011-5-4)
106 typedef struct vpu_reg {
107 VPU_CLIENT_TYPE type;
108 vpu_session *session;
109 struct list_head session_link; /* link to vpu service session */
110 struct list_head status_link; /* link to register set list */
112 unsigned long reg[VPU_REG_NUM_DEC_PP];
115 typedef struct vpu_device {
116 unsigned long iobaseaddr;
118 volatile u32 *hwregs;
121 typedef struct vpu_service_info {
123 struct workqueue_struct *workqueue;
124 struct list_head waiting; /* link to link_reg in struct vpu_reg */
125 struct list_head running; /* link to link_reg in struct vpu_reg */
126 struct list_head done; /* link to link_reg in struct vpu_reg */
127 struct list_head session; /* link to list_session in struct vpu_session */
128 atomic_t task_running;
133 VPUHwDecConfig_t dec_config;
134 VPUHwEncConfig_t enc_config;
137 typedef struct vpu_request
143 static struct clk *aclk_vepu;
144 static struct clk *hclk_vepu;
145 static struct clk *aclk_ddr_vepu;
146 static struct clk *hclk_cpu_vcodec;
147 static vpu_service_info service;
148 static vpu_device dec_dev;
149 static vpu_device enc_dev;
151 static void vpu_service_power_off_work_func(struct work_struct *work);
152 static DECLARE_DELAYED_WORK(vpu_service_power_off_work, vpu_service_power_off_work_func);
153 #define POWER_OFF_DELAY 3*HZ /* 3s */
155 static void vpu_get_clk(void)
157 aclk_vepu = clk_get(NULL, "aclk_vepu");
158 hclk_vepu = clk_get(NULL, "hclk_vepu");
159 aclk_ddr_vepu = clk_get(NULL, "aclk_ddr_vepu");
160 hclk_cpu_vcodec = clk_get(NULL, "hclk_cpu_vcodec");
163 static void vpu_put_clk(void)
167 clk_put(aclk_ddr_vepu);
168 clk_put(hclk_cpu_vcodec);
171 static void vpu_service_power_on(void)
176 printk("vpu: power on\n");
177 clk_enable(aclk_vepu);
178 clk_enable(hclk_vepu);
179 clk_enable(hclk_cpu_vcodec);
181 pmu_set_power_domain(PD_VCODEC, true);
183 clk_enable(aclk_ddr_vepu);
184 service.enabled = true;
187 static void vpu_service_power_off(void)
189 if (!service.enabled)
192 while(atomic_read(&service.task_running))
195 printk("vpu: power off\n");
196 pmu_set_power_domain(PD_VCODEC, false);
198 clk_disable(hclk_cpu_vcodec);
199 clk_disable(aclk_ddr_vepu);
200 clk_disable(hclk_vepu);
201 clk_disable(aclk_vepu);
203 service.enabled = false;
206 static void vpu_service_power_off_work_func(struct work_struct *work)
209 vpu_service_power_off();
212 static vpu_reg *reg_init(vpu_session *session, void __user *src, unsigned long size)
215 vpu_reg *reg = kmalloc(sizeof(vpu_reg), GFP_KERNEL);
217 pr_err("kmalloc fail in reg_init\n");
221 reg->session = session;
222 reg->type = session->type;
224 INIT_LIST_HEAD(®->session_link);
225 INIT_LIST_HEAD(®->status_link);
227 if (copy_from_user(®->reg[0], (void __user *)src, size)) {
228 pr_err("copy_from_user failed in reg_init\n");
233 spin_lock_irqsave(&service.lock, flag);
234 list_add_tail(®->status_link, &service.waiting);
235 list_add_tail(®->session_link, &session->waiting);
236 spin_unlock_irqrestore(&service.lock, flag);
241 static void reg_deinit(vpu_reg *reg)
243 list_del_init(®->session_link);
244 list_del_init(®->status_link);
246 if (reg == service.reg_codec) service.reg_codec = NULL;
247 if (reg == service.reg_pproc) service.reg_pproc = NULL;
250 static void reg_from_wait_to_run(vpu_reg *reg)
252 list_del_init(®->status_link);
253 list_add_tail(®->status_link, &service.running);
255 list_del_init(®->session_link);
256 list_add_tail(®->session_link, ®->session->running);
258 atomic_add(1, &service.task_running);
261 static void reg_copy_from_hw(vpu_reg *reg, volatile u32 *src, u32 count)
264 u32 *dst = (u32 *)®->reg[0];
265 for (i = 0; i < count; i++)
269 static void reg_from_run_to_done(vpu_reg *reg)
271 spin_lock(&service.lock);
272 list_del_init(®->status_link);
273 list_add_tail(®->status_link, &service.done);
275 list_del_init(®->session_link);
276 list_add_tail(®->session_link, ®->session->done);
280 service.reg_codec = NULL;
281 reg_copy_from_hw(reg, enc_dev.hwregs, REG_NUM_ENC);
285 service.reg_codec = NULL;
286 reg_copy_from_hw(reg, dec_dev.hwregs, REG_NUM_DEC);
290 service.reg_pproc = NULL;
291 reg_copy_from_hw(reg, dec_dev.hwregs + PP_INTERRUPT_REGISTER, REG_NUM_PP);
292 dec_dev.hwregs[PP_INTERRUPT_REGISTER] = 0;
296 service.reg_codec = NULL;
297 service.reg_pproc = NULL;
298 reg_copy_from_hw(reg, dec_dev.hwregs, REG_NUM_DEC_PP);
299 dec_dev.hwregs[PP_INTERRUPT_REGISTER] = 0;
303 pr_err("copy reg from hw with unknown type %d\n", reg->type);
307 atomic_sub(1, &service.task_running);
308 wake_up_interruptible_sync(®->session->wait);
309 spin_unlock(&service.lock);
312 void reg_copy_to_hw(vpu_reg *reg)
315 u32 *src = (u32 *)®->reg[0];
319 u32 *dst = (u32 *)enc_dev.hwregs;
320 service.reg_codec = reg;
322 dst[VPU_REG_EN_ENC] = src[VPU_REG_EN_ENC] & 0x6;
324 for (i = 0; i < VPU_REG_EN_ENC; i++)
327 for (i = VPU_REG_EN_ENC + 1; i < REG_NUM_ENC; i++)
332 dst[VPU_REG_ENC_GATE] = src[VPU_REG_ENC_GATE] | VPU_REG_ENC_GATE_BIT;
333 dst[VPU_REG_EN_ENC] = src[VPU_REG_EN_ENC];
336 u32 *dst = (u32 *)dec_dev.hwregs;
337 service.reg_codec = reg;
339 for (i = REG_NUM_DEC - 1; i > VPU_REG_DEC_GATE; i--)
344 dst[VPU_REG_DEC_GATE] = src[VPU_REG_DEC_GATE] | VPU_REG_DEC_GATE_BIT;
345 dst[VPU_REG_EN_DEC] = src[VPU_REG_EN_DEC];
348 u32 *dst = (u32 *)dec_dev.hwregs + PP_INTERRUPT_REGISTER;
349 service.reg_pproc = reg;
351 dst[VPU_REG_PP_GATE] = src[VPU_REG_PP_GATE] | VPU_REG_PP_GATE_BIT;
353 for (i = VPU_REG_PP_GATE + 1; i < REG_NUM_PP; i++)
358 dst[VPU_REG_EN_PP] = src[VPU_REG_EN_PP];
361 u32 *dst = (u32 *)dec_dev.hwregs;
362 service.reg_codec = reg;
363 service.reg_pproc = reg;
365 for (i = VPU_REG_EN_DEC_PP + 1; i < REG_NUM_DEC_PP; i++)
368 dst[VPU_REG_EN_DEC_PP] = src[VPU_REG_EN_DEC_PP] | 0x2;
371 dst[VPU_REG_DEC_PP_GATE] = src[VPU_REG_DEC_PP_GATE] | VPU_REG_PP_GATE_BIT;
372 dst[VPU_REG_DEC_GATE] = src[VPU_REG_DEC_GATE] | VPU_REG_DEC_GATE_BIT;
373 dst[VPU_REG_EN_DEC] = src[VPU_REG_EN_DEC];
376 pr_err("unsupport session type %d", reg->type);
382 static void try_set_reg(void)
385 // first get reg from reg list
386 spin_lock_irqsave(&service.lock, flag);
387 if (!list_empty(&service.waiting)) {
388 vpu_reg *reg = list_entry(service.waiting.next, vpu_reg, status_link);
390 if (((VPU_DEC_PP == reg->type) && (NULL == service.reg_codec) && (NULL == service.reg_pproc)) ||
391 ((VPU_DEC == reg->type) && (NULL == service.reg_codec)) ||
392 ((VPU_PP == reg->type) && (NULL == service.reg_pproc)) ||
393 ((VPU_ENC == reg->type) && (NULL == service.reg_codec))) {
394 reg_from_wait_to_run(reg);
395 if (!cancel_delayed_work(&vpu_service_power_off_work)) {
396 if (!in_interrupt()) {
397 flush_delayed_work(&vpu_service_power_off_work);
399 pr_err("try_set_reg in inturrpt but cancel power off failed\n");
402 vpu_service_power_on();
405 spin_unlock_irqrestore(&service.lock, flag);
407 spin_unlock_irqrestore(&service.lock, flag);
408 queue_delayed_work(service.workqueue, &vpu_service_power_off_work, POWER_OFF_DELAY);
412 static int return_reg(vpu_reg *reg, u32 __user *dst)
417 if (copy_to_user(dst, ®->reg[0], SIZE_REG(REG_NUM_ENC)))
422 if (copy_to_user(dst, ®->reg[0], SIZE_REG(REG_NUM_DEC)))
427 if (copy_to_user(dst, ®->reg[0], SIZE_REG(REG_NUM_PP)))
432 if (copy_to_user(dst, ®->reg[0], SIZE_REG(REG_NUM_DEC_PP)))
438 pr_err("copy reg to user with unknown type %d\n", reg->type);
446 static long vpu_service_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
448 vpu_session *session = (vpu_session *)filp->private_data;
449 if (NULL == session) {
454 case VPU_IOC_SET_CLIENT_TYPE : {
455 session->type = (VPU_CLIENT_TYPE)arg;
458 case VPU_IOC_GET_HW_FUSE_STATUS : {
460 if (copy_from_user(&req, (void __user *)arg, sizeof(vpu_request))) {
461 pr_err("VPU_IOC_GET_HW_FUSE_STATUS copy_from_user failed\n");
464 if (VPU_ENC != session->type) {
465 if (copy_to_user((void __user *)req.req, &service.dec_config, sizeof(VPUHwDecConfig_t))) {
466 pr_err("VPU_IOC_GET_HW_FUSE_STATUS copy_to_user failed type %d\n", session->type);
470 if (copy_to_user((void __user *)req.req, &service.enc_config, sizeof(VPUHwEncConfig_t))) {
471 pr_err("VPU_IOC_GET_HW_FUSE_STATUS copy_to_user failed type %d\n", session->type);
479 case VPU_IOC_SET_REG : {
482 if (copy_from_user(&req, (void __user *)arg, sizeof(vpu_request))) {
483 pr_err("VPU_IOC_SET_REG copy_from_user failed\n");
487 reg = reg_init(session, (void __user *)req.req, req.size);
496 case VPU_IOC_GET_REG : {
499 if (copy_from_user(&req, (void __user *)arg, sizeof(vpu_request))) {
500 pr_err("VPU_IOC_GET_REG copy_from_user failed\n");
503 int ret = wait_event_interruptible_timeout(session->wait, !list_empty(&session->done), HZ);
504 if (unlikely(ret < 0)) {
505 pr_err("pid %d wait task ret %d\n", session->pid, ret);
507 } else if (0 == ret) {
508 pr_err("pid %d wait task done timeout\n", session->pid);
514 spin_lock_irqsave(&service.lock, flag);
515 reg = list_entry(session->done.next, vpu_reg, session_link);
516 return_reg(reg, (u32 __user *)req.req);
517 spin_unlock_irqrestore(&service.lock, flag);
522 pr_err("unknow vpu service ioctl cmd %x\n", cmd);
530 static int vpu_service_check_hw_id(struct vpu_device * dev, const u16 *hwids, size_t num)
532 u32 hwid = readl(dev->hwregs);
533 pr_info("HW ID = 0x%08x\n", hwid);
535 hwid = (hwid >> 16) & 0xFFFF; /* product version only */
538 if (hwid == hwids[num]) {
539 pr_info("Compatible HW found at 0x%08lx\n", dev->iobaseaddr);
544 pr_info("No Compatible HW found at 0x%08lx\n", dev->iobaseaddr);
548 static void vpu_service_release_io(void)
551 iounmap((void *)dec_dev.hwregs);
552 release_mem_region(dec_dev.iobaseaddr, dec_dev.iosize);
555 iounmap((void *)enc_dev.hwregs);
556 release_mem_region(enc_dev.iobaseaddr, enc_dev.iosize);
559 static int vpu_service_reserve_io(void)
561 unsigned long iobaseaddr;
562 unsigned long iosize;
564 iobaseaddr = dec_dev.iobaseaddr;
565 iosize = dec_dev.iosize;
567 if (!request_mem_region(iobaseaddr, iosize, "vdpu_io")) {
568 pr_info("failed to reserve dec HW regs\n");
572 dec_dev.hwregs = (volatile u32 *)ioremap_nocache(iobaseaddr, iosize);
574 if (dec_dev.hwregs == NULL) {
575 pr_info("failed to ioremap dec HW regs\n");
579 /* check for correct HW */
580 if (!vpu_service_check_hw_id(&dec_dev, dec_hw_ids, ARRAY_SIZE(dec_hw_ids))) {
584 iobaseaddr = enc_dev.iobaseaddr;
585 iosize = enc_dev.iosize;
587 if (!request_mem_region(iobaseaddr, iosize, "hx280enc")) {
588 pr_info("failed to reserve enc HW regs\n");
592 enc_dev.hwregs = (volatile u32 *)ioremap_nocache(iobaseaddr, iosize);
594 if (enc_dev.hwregs == NULL) {
595 pr_info("failed to ioremap enc HW regs\n");
599 /* check for correct HW */
600 if (!vpu_service_check_hw_id(&enc_dev, enc_hw_ids, ARRAY_SIZE(enc_hw_ids))) {
606 vpu_service_release_io();
610 static int vpu_service_open(struct inode *inode, struct file *filp)
612 vpu_session *session = (vpu_session *)kmalloc(sizeof(vpu_session), GFP_KERNEL);
613 if (NULL == session) {
614 pr_err("unable to allocate memory for vpu_session.");
618 session->type = VPU_TYPE_BUTT;
619 session->pid = current->pid;
620 INIT_LIST_HEAD(&session->waiting);
621 INIT_LIST_HEAD(&session->running);
622 INIT_LIST_HEAD(&session->done);
623 INIT_LIST_HEAD(&session->list_session);
624 init_waitqueue_head(&session->wait);
625 /* no need to protect */
626 list_add_tail(&session->list_session, &service.session);
627 filp->private_data = (void *)session;
629 pr_debug("dev opened\n");
630 return nonseekable_open(inode, filp);
633 static int vpu_service_release(struct inode *inode, struct file *filp)
636 vpu_session *session = (vpu_session *)filp->private_data;
640 wake_up_interruptible_sync(&session->wait);
643 /* remove this filp from the asynchronusly notified filp's */
644 //vpu_service_fasync(-1, filp, 0);
645 list_del(&session->list_session);
647 spin_lock_irqsave(&service.lock, flag);
650 list_for_each_entry_safe(reg, n, &session->waiting, session_link) {
653 list_for_each_entry_safe(reg, n, &session->running, session_link) {
656 list_for_each_entry_safe(reg, n, &session->done, session_link) {
660 spin_unlock_irqrestore(&service.lock, flag);
664 pr_debug("dev closed\n");
668 static const struct file_operations vpu_service_fops = {
669 .unlocked_ioctl = vpu_service_ioctl,
670 .open = vpu_service_open,
671 .release = vpu_service_release,
672 //.fasync = vpu_service_fasync,
675 static struct miscdevice vpu_service_misc_device = {
676 .minor = MISC_DYNAMIC_MINOR,
677 .name = "vpu_service",
678 .fops = &vpu_service_fops,
681 static void vpu_service_shutdown(struct platform_device *pdev)
683 pr_cont("shutdown...");
684 cancel_delayed_work(&vpu_service_power_off_work);
685 vpu_service_power_off();
689 static int vpu_service_suspend(struct platform_device *pdev, pm_message_t state)
692 pr_info("suspend...");
693 cancel_delayed_work(&vpu_service_power_off_work);
694 enabled = service.enabled;
695 vpu_service_power_off();
696 service.enabled = enabled;
700 static int vpu_service_resume(struct platform_device *pdev)
702 pr_info("resume...");
703 if (service.enabled) {
704 service.enabled = false;
705 vpu_service_power_on();
710 static struct platform_device vpu_service_device = {
711 .name = "vpu_service",
715 static struct platform_driver vpu_service_driver = {
717 .name = "vpu_service",
718 .owner = THIS_MODULE,
720 .shutdown = vpu_service_shutdown,
721 .suspend = vpu_service_suspend,
722 .resume = vpu_service_resume,
725 static void get_hw_info(void)
727 VPUHwDecConfig_t *dec = &service.dec_config;
728 VPUHwEncConfig_t *enc = &service.enc_config;
729 u32 configReg = dec_dev.hwregs[VPU_DEC_HWCFG0];
730 u32 asicID = dec_dev.hwregs[0];
732 dec->h264Support = (configReg >> DWL_H264_E) & 0x3U;
733 dec->jpegSupport = (configReg >> DWL_JPEG_E) & 0x01U;
734 if (dec->jpegSupport && ((configReg >> DWL_PJPEG_E) & 0x01U))
735 dec->jpegSupport = JPEG_PROGRESSIVE;
736 dec->mpeg4Support = (configReg >> DWL_MPEG4_E) & 0x3U;
737 dec->vc1Support = (configReg >> DWL_VC1_E) & 0x3U;
738 dec->mpeg2Support = (configReg >> DWL_MPEG2_E) & 0x01U;
739 dec->sorensonSparkSupport = (configReg >> DWL_SORENSONSPARK_E) & 0x01U;
740 dec->refBufSupport = (configReg >> DWL_REF_BUFF_E) & 0x01U;
741 dec->vp6Support = (configReg >> DWL_VP6_E) & 0x01U;
742 dec->maxDecPicWidth = configReg & 0x07FFU;
744 /* 2nd Config register */
745 configReg = dec_dev.hwregs[VPU_DEC_HWCFG1];
746 if (dec->refBufSupport) {
747 if ((configReg >> DWL_REF_BUFF_ILACE_E) & 0x01U)
748 dec->refBufSupport |= 2;
749 if ((configReg >> DWL_REF_BUFF_DOUBLE_E) & 0x01U)
750 dec->refBufSupport |= 4;
752 dec->customMpeg4Support = (configReg >> DWL_MPEG4_CUSTOM_E) & 0x01U;
753 dec->vp7Support = (configReg >> DWL_VP7_E) & 0x01U;
754 dec->vp8Support = (configReg >> DWL_VP8_E) & 0x01U;
755 dec->avsSupport = (configReg >> DWL_AVS_E) & 0x01U;
758 if (((asicID >> 16) >= 0x8190U) || ((asicID >> 16) == 0x6731U)) {
759 dec->jpegESupport = (configReg >> DWL_JPEG_EXT_E) & 0x01U;
761 dec->jpegESupport = JPEG_EXT_NOT_SUPPORTED;
764 if (((asicID >> 16) >= 0x9170U) || ((asicID >> 16) == 0x6731U) ) {
765 dec->rvSupport = (configReg >> DWL_RV_E) & 0x03U;
767 dec->rvSupport = RV_NOT_SUPPORTED;
770 dec->mvcSupport = (configReg >> DWL_MVC_E) & 0x03U;
772 if (dec->refBufSupport && (asicID >> 16) == 0x6731U ) {
773 dec->refBufSupport |= 8; /* enable HW support for offset */
777 VPUHwFuseStatus_t hwFuseSts;
778 /* Decoder fuse configuration */
779 u32 fuseReg = dec_dev.hwregs[VPU_DEC_HW_FUSE_CFG];
781 hwFuseSts.h264SupportFuse = (fuseReg >> DWL_H264_FUSE_E) & 0x01U;
782 hwFuseSts.mpeg4SupportFuse = (fuseReg >> DWL_MPEG4_FUSE_E) & 0x01U;
783 hwFuseSts.mpeg2SupportFuse = (fuseReg >> DWL_MPEG2_FUSE_E) & 0x01U;
784 hwFuseSts.sorensonSparkSupportFuse = (fuseReg >> DWL_SORENSONSPARK_FUSE_E) & 0x01U;
785 hwFuseSts.jpegSupportFuse = (fuseReg >> DWL_JPEG_FUSE_E) & 0x01U;
786 hwFuseSts.vp6SupportFuse = (fuseReg >> DWL_VP6_FUSE_E) & 0x01U;
787 hwFuseSts.vc1SupportFuse = (fuseReg >> DWL_VC1_FUSE_E) & 0x01U;
788 hwFuseSts.jpegProgSupportFuse = (fuseReg >> DWL_PJPEG_FUSE_E) & 0x01U;
789 hwFuseSts.rvSupportFuse = (fuseReg >> DWL_RV_FUSE_E) & 0x01U;
790 hwFuseSts.avsSupportFuse = (fuseReg >> DWL_AVS_FUSE_E) & 0x01U;
791 hwFuseSts.vp7SupportFuse = (fuseReg >> DWL_VP7_FUSE_E) & 0x01U;
792 hwFuseSts.vp8SupportFuse = (fuseReg >> DWL_VP8_FUSE_E) & 0x01U;
793 hwFuseSts.customMpeg4SupportFuse = (fuseReg >> DWL_CUSTOM_MPEG4_FUSE_E) & 0x01U;
794 hwFuseSts.mvcSupportFuse = (fuseReg >> DWL_MVC_FUSE_E) & 0x01U;
796 /* check max. decoder output width */
798 if (fuseReg & 0x8000U)
799 hwFuseSts.maxDecPicWidthFuse = 1920;
800 else if (fuseReg & 0x4000U)
801 hwFuseSts.maxDecPicWidthFuse = 1280;
802 else if (fuseReg & 0x2000U)
803 hwFuseSts.maxDecPicWidthFuse = 720;
804 else if (fuseReg & 0x1000U)
805 hwFuseSts.maxDecPicWidthFuse = 352;
806 else /* remove warning */
807 hwFuseSts.maxDecPicWidthFuse = 352;
809 hwFuseSts.refBufSupportFuse = (fuseReg >> DWL_REF_BUFF_FUSE_E) & 0x01U;
811 /* Pp configuration */
812 configReg = dec_dev.hwregs[VPU_PP_HW_SYNTH_CFG];
814 if ((configReg >> DWL_PP_E) & 0x01U) {
816 dec->maxPpOutPicWidth = configReg & 0x07FFU;
817 /*pHwCfg->ppConfig = (configReg >> DWL_CFG_E) & 0x0FU; */
818 dec->ppConfig = configReg;
821 dec->maxPpOutPicWidth = 0;
825 /* check the HW versio */
826 if (((asicID >> 16) >= 0x8190U) || ((asicID >> 16) == 0x6731U)) {
827 /* Pp configuration */
828 configReg = dec_dev.hwregs[VPU_DEC_HW_FUSE_CFG];
830 if ((configReg >> DWL_PP_E) & 0x01U) {
831 /* Pp fuse configuration */
832 u32 fuseRegPp = dec_dev.hwregs[VPU_PP_HW_FUSE_CFG];
834 if ((fuseRegPp >> DWL_PP_FUSE_E) & 0x01U) {
835 hwFuseSts.ppSupportFuse = 1;
836 /* check max. pp output width */
837 if (fuseRegPp & 0x8000U) hwFuseSts.maxPpOutPicWidthFuse = 1920;
838 else if (fuseRegPp & 0x4000U) hwFuseSts.maxPpOutPicWidthFuse = 1280;
839 else if (fuseRegPp & 0x2000U) hwFuseSts.maxPpOutPicWidthFuse = 720;
840 else if (fuseRegPp & 0x1000U) hwFuseSts.maxPpOutPicWidthFuse = 352;
841 else hwFuseSts.maxPpOutPicWidthFuse = 352;
842 hwFuseSts.ppConfigFuse = fuseRegPp;
844 hwFuseSts.ppSupportFuse = 0;
845 hwFuseSts.maxPpOutPicWidthFuse = 0;
846 hwFuseSts.ppConfigFuse = 0;
849 hwFuseSts.ppSupportFuse = 0;
850 hwFuseSts.maxPpOutPicWidthFuse = 0;
851 hwFuseSts.ppConfigFuse = 0;
854 if (dec->maxDecPicWidth > hwFuseSts.maxDecPicWidthFuse)
855 dec->maxDecPicWidth = hwFuseSts.maxDecPicWidthFuse;
856 if (dec->maxPpOutPicWidth > hwFuseSts.maxPpOutPicWidthFuse)
857 dec->maxPpOutPicWidth = hwFuseSts.maxPpOutPicWidthFuse;
858 if (!hwFuseSts.h264SupportFuse) dec->h264Support = H264_NOT_SUPPORTED;
859 if (!hwFuseSts.mpeg4SupportFuse) dec->mpeg4Support = MPEG4_NOT_SUPPORTED;
860 if (!hwFuseSts.customMpeg4SupportFuse) dec->customMpeg4Support = MPEG4_CUSTOM_NOT_SUPPORTED;
861 if (!hwFuseSts.jpegSupportFuse) dec->jpegSupport = JPEG_NOT_SUPPORTED;
862 if ((dec->jpegSupport == JPEG_PROGRESSIVE) && !hwFuseSts.jpegProgSupportFuse)
863 dec->jpegSupport = JPEG_BASELINE;
864 if (!hwFuseSts.mpeg2SupportFuse) dec->mpeg2Support = MPEG2_NOT_SUPPORTED;
865 if (!hwFuseSts.vc1SupportFuse) dec->vc1Support = VC1_NOT_SUPPORTED;
866 if (!hwFuseSts.vp6SupportFuse) dec->vp6Support = VP6_NOT_SUPPORTED;
867 if (!hwFuseSts.vp7SupportFuse) dec->vp7Support = VP7_NOT_SUPPORTED;
868 if (!hwFuseSts.vp8SupportFuse) dec->vp8Support = VP8_NOT_SUPPORTED;
869 if (!hwFuseSts.ppSupportFuse) dec->ppSupport = PP_NOT_SUPPORTED;
871 /* check the pp config vs fuse status */
872 if ((dec->ppConfig & 0xFC000000) && ((hwFuseSts.ppConfigFuse & 0xF0000000) >> 5)) {
873 u32 deInterlace = ((dec->ppConfig & PP_DEINTERLACING) >> 25);
874 u32 alphaBlend = ((dec->ppConfig & PP_ALPHA_BLENDING) >> 24);
875 u32 deInterlaceFuse = (((hwFuseSts.ppConfigFuse >> 5) & PP_DEINTERLACING) >> 25);
876 u32 alphaBlendFuse = (((hwFuseSts.ppConfigFuse >> 5) & PP_ALPHA_BLENDING) >> 24);
878 if (deInterlace && !deInterlaceFuse) dec->ppConfig &= 0xFD000000;
879 if (alphaBlend && !alphaBlendFuse) dec->ppConfig &= 0xFE000000;
881 if (!hwFuseSts.sorensonSparkSupportFuse) dec->sorensonSparkSupport = SORENSON_SPARK_NOT_SUPPORTED;
882 if (!hwFuseSts.refBufSupportFuse) dec->refBufSupport = REF_BUF_NOT_SUPPORTED;
883 if (!hwFuseSts.rvSupportFuse) dec->rvSupport = RV_NOT_SUPPORTED;
884 if (!hwFuseSts.avsSupportFuse) dec->avsSupport = AVS_NOT_SUPPORTED;
885 if (!hwFuseSts.mvcSupportFuse) dec->mvcSupport = MVC_NOT_SUPPORTED;
888 configReg = enc_dev.hwregs[63];
889 enc->maxEncodedWidth = configReg & ((1 << 11) - 1);
890 enc->h264Enabled = (configReg >> 27) & 1;
891 enc->mpeg4Enabled = (configReg >> 26) & 1;
892 enc->jpegEnabled = (configReg >> 25) & 1;
893 enc->vsEnabled = (configReg >> 24) & 1;
894 enc->rgbEnabled = (configReg >> 28) & 1;
895 enc->busType = (configReg >> 20) & 15;
896 enc->synthesisLanguage = (configReg >> 16) & 15;
897 enc->busWidth = (configReg >> 12) & 15;
900 static irqreturn_t vdpu_isr(int irq, void *dev_id)
902 vpu_device *dev = (vpu_device *) dev_id;
903 u32 irq_status_dec = readl(dev->hwregs + DEC_INTERRUPT_REGISTER);
904 u32 irq_status_pp = readl(dev->hwregs + PP_INTERRUPT_REGISTER);
906 pr_debug("vdpu_isr dec %x pp %x\n", irq_status_dec, irq_status_pp);
908 if (irq_status_dec & DEC_INTERRUPT_BIT) {
909 irq_status_dec = readl(dev->hwregs + DEC_INTERRUPT_REGISTER);
910 if ((irq_status_dec & 0x40001) == 0x40001)
913 irq_status_dec = readl(dev->hwregs + DEC_INTERRUPT_REGISTER);
914 } while ((irq_status_dec & 0x40001) == 0x40001);
917 writel(irq_status_dec & (~DEC_INTERRUPT_BIT), dev->hwregs + DEC_INTERRUPT_REGISTER);
918 pr_debug("DEC IRQ received!\n");
919 if (NULL == service.reg_codec) {
920 pr_err("dec isr with no task waiting\n");
922 reg_from_run_to_done(service.reg_codec);
926 if (irq_status_pp & PP_INTERRUPT_BIT) {
928 writel(irq_status_pp & (~DEC_INTERRUPT_BIT), dev->hwregs + PP_INTERRUPT_REGISTER);
929 pr_debug("PP IRQ received!\n");
931 if (NULL == service.reg_pproc) {
932 pr_err("pp isr with no task waiting\n");
934 reg_from_run_to_done(service.reg_pproc);
941 static irqreturn_t vepu_isr(int irq, void *dev_id)
943 struct vpu_device *dev = (struct vpu_device *) dev_id;
944 u32 irq_status = readl(dev->hwregs + ENC_INTERRUPT_REGISTER);
946 pr_debug("enc_isr\n");
948 if (likely(irq_status & ENC_INTERRUPT_BIT)) {
950 writel(irq_status & (~ENC_INTERRUPT_BIT), dev->hwregs + ENC_INTERRUPT_REGISTER);
951 pr_debug("ENC IRQ received!\n");
953 if (NULL == service.reg_codec) {
954 pr_err("enc isr with no task waiting\n");
956 reg_from_run_to_done(service.reg_codec);
963 static int __init vpu_service_init(void)
967 pr_debug("baseaddr = 0x%08x vdpu irq = %d vepu irq = %d\n", RK29_VCODEC_PHYS, IRQ_VDPU, IRQ_VEPU);
969 dec_dev.iobaseaddr = RK29_VCODEC_PHYS + 0x200;
970 dec_dev.iosize = DEC_IO_SIZE;
971 enc_dev.iobaseaddr = RK29_VCODEC_PHYS;
972 enc_dev.iosize = ENC_IO_SIZE;
974 INIT_LIST_HEAD(&service.waiting);
975 INIT_LIST_HEAD(&service.running);
976 INIT_LIST_HEAD(&service.done);
977 INIT_LIST_HEAD(&service.session);
978 spin_lock_init(&service.lock);
979 service.reg_codec = NULL;
980 service.reg_pproc = NULL;
981 atomic_set(&service.task_running, 0);
982 service.enabled = false;
983 service.workqueue = create_singlethread_workqueue("vpu_service");
984 if (!service.workqueue) {
985 pr_err("create_singlethread_workqueue failed\n");
990 vpu_service_power_on();
992 ret = vpu_service_reserve_io();
994 pr_err("reserve io failed\n");
998 /* get the IRQ line */
999 ret = request_irq(IRQ_VDPU, vdpu_isr, IRQF_SHARED, "vdpu", (void *)&dec_dev);
1001 pr_err("can't request vdpu irq %d\n", IRQ_VDPU);
1002 goto err_req_vdpu_irq;
1005 ret = request_irq(IRQ_VEPU, vepu_isr, IRQF_SHARED, "vepu", (void *)&enc_dev);
1007 pr_err("can't request vepu irq %d\n", IRQ_VEPU);
1008 goto err_req_vepu_irq;
1011 ret = misc_register(&vpu_service_misc_device);
1013 pr_err("misc_register failed\n");
1017 platform_device_register(&vpu_service_device);
1018 platform_driver_probe(&vpu_service_driver, NULL);
1020 vpu_service_power_off();
1021 pr_info("init success\n");
1026 free_irq(IRQ_VEPU, (void *)&enc_dev);
1028 free_irq(IRQ_VDPU, (void *)&dec_dev);
1030 pr_info("init failed\n");
1032 vpu_service_power_off();
1033 vpu_service_release_io();
1035 destroy_workqueue(service.workqueue);
1036 pr_info("init failed\n");
1040 static void __exit vpu_service_exit(void)
1042 cancel_delayed_work(&vpu_service_power_off_work);
1043 vpu_service_power_off();
1044 destroy_workqueue(service.workqueue);
1045 platform_device_unregister(&vpu_service_device);
1046 platform_driver_unregister(&vpu_service_driver);
1047 misc_deregister(&vpu_service_misc_device);
1048 free_irq(IRQ_VEPU, (void *)&enc_dev);
1049 free_irq(IRQ_VDPU, (void *)&dec_dev);
1053 module_init(vpu_service_init);
1054 module_exit(vpu_service_exit);
1055 MODULE_LICENSE("GPL");
1057 #ifdef CONFIG_PROC_FS
1058 #include <linux/proc_fs.h>
1059 #include <linux/seq_file.h>
1061 static int proc_vpu_service_show(struct seq_file *s, void *v)
1065 vpu_reg *reg, *reg_tmp;
1066 vpu_session *session, *session_tmp;
1068 cancel_delayed_work_sync(&vpu_service_power_off_work);
1069 vpu_service_power_on();
1070 seq_printf(s, "\nENC Registers:\n");
1071 n = enc_dev.iosize >> 2;
1072 for (i = 0; i < n; i++) {
1073 seq_printf(s, "\tswreg%d = %08X\n", i, readl(enc_dev.hwregs + i));
1075 seq_printf(s, "\nDEC Registers:\n");
1076 n = dec_dev.iosize >> 2;
1077 for (i = 0; i < n; i++) {
1078 seq_printf(s, "\tswreg%d = %08X\n", i, readl(dec_dev.hwregs + i));
1081 seq_printf(s, "\nvpu service status:\n");
1082 spin_lock_irqsave(&service.lock, flag);
1083 list_for_each_entry_safe(session, session_tmp, &service.session, list_session) {
1084 seq_printf(s, "session pid %d type %d:\n", session->pid, session->type);
1085 //seq_printf(s, "waiting reg set %d\n");
1086 list_for_each_entry_safe(reg, reg_tmp, &session->waiting, session_link) {
1087 seq_printf(s, "waiting register set\n");
1089 list_for_each_entry_safe(reg, reg_tmp, &session->running, session_link) {
1090 seq_printf(s, "running register set\n");
1092 list_for_each_entry_safe(reg, reg_tmp, &session->done, session_link) {
1093 seq_printf(s, "done register set\n");
1096 spin_unlock_irqrestore(&service.lock, flag);
1097 queue_delayed_work(service.workqueue, &vpu_service_power_off_work, POWER_OFF_DELAY);
1102 static int proc_vpu_service_open(struct inode *inode, struct file *file)
1104 return single_open(file, proc_vpu_service_show, NULL);
1107 static const struct file_operations proc_vpu_service_fops = {
1108 .open = proc_vpu_service_open,
1110 .llseek = seq_lseek,
1111 .release = single_release,
1114 static int __init vpu_service_proc_init(void)
1116 proc_create("vpu_service", 0, NULL, &proc_vpu_service_fops);
1120 late_initcall(vpu_service_proc_init);
1121 #endif /* CONFIG_PROC_FS */