2 * Copyright (C) 2013 ROCKCHIP, Inc.
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
15 #include <linux/clk.h>
16 #include <linux/delay.h>
17 #include <linux/slab.h>
19 #include <linux/sched.h>
20 #include <linux/uaccess.h>
21 #include <linux/platform_device.h>
22 #include <linux/interrupt.h>
23 #include <linux/kthread.h>
24 #include <linux/poll.h>
25 #include <linux/dma-mapping.h>
27 #include <linux/rk_fb.h>
28 #include <linux/wakelock.h>
30 #include <linux/of_platform.h>
32 #include <linux/module.h>
33 #include <linux/pm_runtime.h>
34 #include <linux/rockchip/cpu.h>
35 #include <linux/rockchip/cru.h>
36 #include <asm/cacheflush.h>
38 #include "hw_iep_reg.h"
39 #include "iep_iommu_ops.h"
42 #define IEP_CLK_ENABLE
43 /*#define IEP_TEST_CASE*/
46 module_param(debug, int, S_IRUGO | S_IWUSR);
47 MODULE_PARM_DESC(debug,
48 "Debug level - higher value produces more verbose messages");
50 #define RK_IEP_SIZE 0x1000
51 #define IEP_TIMEOUT_DELAY 2*HZ
52 #define IEP_POWER_OFF_DELAY 4*HZ
55 struct miscdevice miscdev;
62 struct clk *aclk_vio1;
66 /* direct path interface mode. true: enable, false: disable */
69 struct delayed_work power_off_work;
71 /* clk enable or disable */
73 struct wake_lock wake_lock;
76 atomic_t mmu_page_fault;
77 atomic_t mmu_bus_error;
79 /* capability for this iep device */
84 struct iep_drvdata *iep_drvdata1 = NULL;
85 iep_service_info iep_service;
87 static void iep_reg_deinit(struct iep_reg *reg)
89 struct iep_mem_region *mem_region = NULL, *n;
90 /* release memory region attach to this registers table.*/
91 if (iep_service.iommu_dev) {
92 list_for_each_entry_safe(mem_region, n, ®->mem_region_list,
94 iep_iommu_unmap_iommu(iep_service.iommu_info,
95 reg->session, mem_region->hdl);
96 iep_iommu_free(iep_service.iommu_info,
97 reg->session, mem_region->hdl);
98 list_del_init(&mem_region->reg_lnk);
103 list_del_init(®->session_link);
104 list_del_init(®->status_link);
108 static void iep_reg_from_wait_to_ready(struct iep_reg *reg)
110 list_del_init(®->status_link);
111 list_add_tail(®->status_link, &iep_service.ready);
113 list_del_init(®->session_link);
114 list_add_tail(®->session_link, ®->session->ready);
117 static void iep_reg_from_ready_to_running(struct iep_reg *reg)
119 list_del_init(®->status_link);
120 list_add_tail(®->status_link, &iep_service.running);
122 list_del_init(®->session_link);
123 list_add_tail(®->session_link, ®->session->running);
126 static void iep_del_running_list(void)
131 mutex_lock(&iep_service.lock);
133 while (!list_empty(&iep_service.running)) {
135 reg = list_entry(iep_service.running.next,
136 struct iep_reg, status_link);
138 atomic_dec(®->session->task_running);
139 atomic_dec(&iep_service.total_running);
141 if (list_empty(®->session->waiting)) {
142 atomic_set(®->session->done, 1);
143 atomic_inc(®->session->num_done);
144 wake_up(®->session->wait);
151 mutex_unlock(&iep_service.lock);
154 static void iep_dump(void)
156 struct iep_status sts;
158 sts = iep_get_status(iep_drvdata1->iep_base);
160 IEP_INFO("scl_sts: %u, dil_sts %u, wyuv_sts %u, ryuv_sts %u, wrgb_sts %u, rrgb_sts %u, voi_sts %u\n",
161 sts.scl_sts, sts.dil_sts, sts.wyuv_sts, sts.ryuv_sts, sts.wrgb_sts, sts.rrgb_sts, sts.voi_sts); {
162 int *reg = (int *)iep_drvdata1->iep_base;
165 /* could not read validate data from address after base+0x40 */
166 for (i = 0; i < 0x40; i++) {
167 IEP_INFO("%08x ", reg[i]);
169 if ((i + 1) % 4 == 0) {
178 /* Caller must hold iep_service.lock */
179 static void iep_del_running_list_timeout(void)
183 mutex_lock(&iep_service.lock);
185 while (!list_empty(&iep_service.running)) {
186 reg = list_entry(iep_service.running.next, struct iep_reg, status_link);
188 atomic_dec(®->session->task_running);
189 atomic_dec(&iep_service.total_running);
191 /* iep_soft_rst(iep_drvdata1->iep_base); */
195 if (list_empty(®->session->waiting)) {
196 atomic_set(®->session->done, 1);
197 wake_up(®->session->wait);
203 mutex_unlock(&iep_service.lock);
206 static inline void iep_queue_power_off_work(void)
208 queue_delayed_work(system_wq, &iep_drvdata1->power_off_work, IEP_POWER_OFF_DELAY);
211 static void iep_power_on(void)
214 ktime_t now = ktime_get();
215 if (ktime_to_ns(ktime_sub(now, last)) > NSEC_PER_SEC) {
216 cancel_delayed_work_sync(&iep_drvdata1->power_off_work);
217 iep_queue_power_off_work();
221 if (iep_service.enable)
224 IEP_INFO("IEP Power ON\n");
226 /* iep_soft_rst(iep_drvdata1->iep_base); */
228 #ifdef IEP_CLK_ENABLE
229 pm_runtime_get_sync(iep_drvdata1->dev);
230 if (iep_drvdata1->pd_iep)
231 clk_prepare_enable(iep_drvdata1->pd_iep);
232 clk_prepare_enable(iep_drvdata1->aclk_iep);
233 clk_prepare_enable(iep_drvdata1->hclk_iep);
236 wake_lock(&iep_drvdata1->wake_lock);
238 iep_iommu_attach(iep_service.iommu_info);
240 iep_service.enable = true;
243 static void iep_power_off(void)
247 if (!iep_service.enable) {
251 IEP_INFO("IEP Power OFF\n");
253 total_running = atomic_read(&iep_service.total_running);
255 IEP_WARNING("power off when %d task running!!\n", total_running);
257 IEP_WARNING("delay 50 ms for running task\n");
261 if (iep_service.iommu_dev) {
262 iep_iommu_detach(iep_service.iommu_info);
265 #ifdef IEP_CLK_ENABLE
266 clk_disable_unprepare(iep_drvdata1->aclk_iep);
267 clk_disable_unprepare(iep_drvdata1->hclk_iep);
268 if (iep_drvdata1->pd_iep)
269 clk_disable_unprepare(iep_drvdata1->pd_iep);
270 pm_runtime_put(iep_drvdata1->dev);
273 wake_unlock(&iep_drvdata1->wake_lock);
274 iep_service.enable = false;
277 static void iep_power_off_work(struct work_struct *work)
279 if (mutex_trylock(&iep_service.lock)) {
280 if (!iep_drvdata1->dpi_mode) {
281 IEP_INFO("iep dpi mode inactivity\n");
284 mutex_unlock(&iep_service.lock);
286 /* Come back later if the device is busy... */
287 iep_queue_power_off_work();
291 extern void rk_direct_fb_show(struct fb_info *fbi);
292 extern struct fb_info* rk_get_fb(int fb_id);
293 extern bool rk_fb_poll_wait_frame_complete(void);
294 extern int rk_fb_dpi_open(bool open);
295 extern int rk_fb_dpi_win_sel(int layer_id);
297 static void iep_config_lcdc(struct iep_reg *reg)
303 fbi = reg->layer == 0 ? 0 : 1;
305 rk_fb_dpi_win_sel(fbi);
309 switch (reg->format) {
310 case IEP_FORMAT_ARGB_8888:
311 case IEP_FORMAT_ABGR_8888:
312 fmt = HAL_PIXEL_FORMAT_RGBA_8888;
313 fb->var.bits_per_pixel = 32;
315 fb->var.red.length = 8;
316 fb->var.red.offset = 16;
317 fb->var.red.msb_right = 0;
319 fb->var.green.length = 8;
320 fb->var.green.offset = 8;
321 fb->var.green.msb_right = 0;
323 fb->var.blue.length = 8;
324 fb->var.blue.offset = 0;
325 fb->var.blue.msb_right = 0;
327 fb->var.transp.length = 8;
328 fb->var.transp.offset = 24;
329 fb->var.transp.msb_right = 0;
332 case IEP_FORMAT_BGRA_8888:
333 fmt = HAL_PIXEL_FORMAT_BGRA_8888;
334 fb->var.bits_per_pixel = 32;
336 case IEP_FORMAT_RGB_565:
337 fmt = HAL_PIXEL_FORMAT_RGB_565;
338 fb->var.bits_per_pixel = 16;
340 fb->var.red.length = 5;
341 fb->var.red.offset = 11;
342 fb->var.red.msb_right = 0;
344 fb->var.green.length = 6;
345 fb->var.green.offset = 5;
346 fb->var.green.msb_right = 0;
348 fb->var.blue.length = 5;
349 fb->var.blue.offset = 0;
350 fb->var.blue.msb_right = 0;
353 case IEP_FORMAT_YCbCr_422_SP:
354 fmt = HAL_PIXEL_FORMAT_YCbCr_422_SP;
355 fb->var.bits_per_pixel = 16;
357 case IEP_FORMAT_YCbCr_420_SP:
358 fmt = HAL_PIXEL_FORMAT_YCrCb_NV12;
359 fb->var.bits_per_pixel = 16;
361 case IEP_FORMAT_YCbCr_422_P:
362 case IEP_FORMAT_YCrCb_422_SP:
363 case IEP_FORMAT_YCrCb_422_P:
364 case IEP_FORMAT_YCrCb_420_SP:
365 case IEP_FORMAT_YCbCr_420_P:
366 case IEP_FORMAT_YCrCb_420_P:
367 case IEP_FORMAT_RGBA_8888:
368 case IEP_FORMAT_BGR_565:
369 /* unsupported format */
370 IEP_ERR("unsupported format %d\n", reg->format);
378 fb->var.xres = reg->act_width;
379 fb->var.yres = reg->act_height;
380 fb->var.xres_virtual = reg->act_width;
381 fb->var.yres_virtual = reg->act_height;
382 fb->var.nonstd = ((reg->off_y & 0xFFF) << 20) +
383 ((reg->off_x & 0xFFF) << 8) + (fmt & 0xFF);
385 ((reg->vir_height & 0xFFF) << 20) +
386 ((reg->vir_width & 0xFFF) << 8) + 0;/*win0 xsize & ysize*/
388 rk_direct_fb_show(fb);
391 static int iep_switch_dpi(struct iep_reg *reg)
394 if (!iep_drvdata1->dpi_mode) {
396 rk_fb_dpi_open(true);
397 iep_drvdata1->dpi_mode = true;
399 iep_config_lcdc(reg);
401 if (iep_drvdata1->dpi_mode) {
403 /* wait_lcdc_dpi_close(); */
405 rk_fb_dpi_open(false);
406 status = rk_fb_poll_wait_frame_complete();
408 iep_drvdata1->dpi_mode = false;
409 IEP_INFO("%s %d, iep dpi inactivated\n",
417 static void iep_reg_copy_to_hw(struct iep_reg *reg)
421 u32 *pbase = (u32 *)iep_drvdata1->iep_base;
423 /* config registers */
424 for (i = 0; i < IEP_CNF_REG_LEN; i++)
425 pbase[IEP_CNF_REG_BASE + i] = reg->reg[IEP_CNF_REG_BASE + i];
427 /* command registers */
428 for (i = 0; i < IEP_CMD_REG_LEN; i++)
429 pbase[IEP_CMD_REG_BASE + i] = reg->reg[IEP_CMD_REG_BASE + i];
431 /* address registers */
432 for (i = 0; i < IEP_ADD_REG_LEN; i++)
433 pbase[IEP_ADD_REG_BASE + i] = reg->reg[IEP_ADD_REG_BASE + i];
435 /* dmac_flush_range(&pbase[0], &pbase[IEP_REG_LEN]); */
436 /* outer_flush_range(virt_to_phys(&pbase[0]),virt_to_phys(&pbase[IEP_REG_LEN])); */
441 /** switch fields order before the next lcdc frame start
443 static void iep_switch_fields_order(void)
445 void *pbase = (void *)iep_drvdata1->iep_base;
446 int mode = iep_get_deinterlace_mode(pbase);
450 case dein_mode_I4O1B:
451 iep_set_deinterlace_mode(dein_mode_I4O1T, pbase);
453 case dein_mode_I4O1T:
454 iep_set_deinterlace_mode(dein_mode_I4O1B, pbase);
456 case dein_mode_I2O1B:
457 iep_set_deinterlace_mode(dein_mode_I2O1T, pbase);
459 case dein_mode_I2O1T:
460 iep_set_deinterlace_mode(dein_mode_I2O1B, pbase);
467 rk_direct_fb_show(fb);
469 /*iep_switch_input_address(pbase);*/
472 /* Caller must hold iep_service.lock */
473 static void iep_try_set_reg(void)
477 mutex_lock(&iep_service.lock);
479 if (list_empty(&iep_service.ready)) {
480 if (!list_empty(&iep_service.waiting)) {
481 reg = list_entry(iep_service.waiting.next, struct iep_reg, status_link);
486 iep_reg_from_wait_to_ready(reg);
487 atomic_dec(&iep_service.waitcnt);
489 /*iep_soft_rst(iep_drvdata1->iep_base);*/
491 iep_reg_copy_to_hw(reg);
494 if (iep_drvdata1->dpi_mode)
495 iep_switch_fields_order();
498 mutex_unlock(&iep_service.lock);
501 static void iep_try_start_frm(void)
505 mutex_lock(&iep_service.lock);
507 if (list_empty(&iep_service.running)) {
508 if (!list_empty(&iep_service.ready)) {
509 reg = list_entry(iep_service.ready.next, struct iep_reg, status_link);
513 iep_reg_from_ready_to_running(reg);
514 iep_config_frame_end_int_en(iep_drvdata1->iep_base);
515 iep_config_done(iep_drvdata1->iep_base);
518 atomic_inc(®->session->task_running);
519 atomic_inc(&iep_service.total_running);
520 iep_config_frm_start(iep_drvdata1->iep_base);
524 mutex_unlock(&iep_service.lock);
527 static irqreturn_t iep_isr(int irq, void *dev_id)
529 if (atomic_read(&iep_drvdata1->iep_int) > 0) {
530 if (iep_service.enable) {
531 if (list_empty(&iep_service.waiting)) {
532 if (iep_drvdata1->dpi_mode) {
533 iep_switch_fields_order();
536 iep_del_running_list();
542 atomic_dec(&iep_drvdata1->iep_int);
548 static irqreturn_t iep_irq(int irq, void *dev_id)
551 void *pbase = (void *)iep_drvdata1->iep_base;
553 if (iep_probe_int(pbase)) {
554 iep_config_frame_end_int_clr(pbase);
555 atomic_inc(&iep_drvdata1->iep_int);
558 return IRQ_WAKE_THREAD;
561 static void iep_service_session_clear(iep_session *session)
563 struct iep_reg *reg, *n;
565 list_for_each_entry_safe(reg, n, &session->waiting, session_link) {
569 list_for_each_entry_safe(reg, n, &session->ready, session_link) {
573 list_for_each_entry_safe(reg, n, &session->running, session_link) {
578 static int iep_open(struct inode *inode, struct file *filp)
580 //DECLARE_WAITQUEUE(wait, current);
581 iep_session *session = (iep_session *)kzalloc(sizeof(iep_session),
583 if (NULL == session) {
584 IEP_ERR("unable to allocate memory for iep_session.\n");
588 session->pid = current->pid;
589 INIT_LIST_HEAD(&session->waiting);
590 INIT_LIST_HEAD(&session->ready);
591 INIT_LIST_HEAD(&session->running);
592 INIT_LIST_HEAD(&session->list_session);
593 init_waitqueue_head(&session->wait);
594 /*add_wait_queue(&session->wait, wait);*/
595 /* no need to protect */
596 mutex_lock(&iep_service.lock);
597 list_add_tail(&session->list_session, &iep_service.session);
598 mutex_unlock(&iep_service.lock);
599 atomic_set(&session->task_running, 0);
600 atomic_set(&session->num_done, 0);
602 filp->private_data = (void *)session;
604 return nonseekable_open(inode, filp);
607 static int iep_release(struct inode *inode, struct file *filp)
610 iep_session *session = (iep_session *)filp->private_data;
615 task_running = atomic_read(&session->task_running);
618 IEP_ERR("iep_service session %d still "
619 "has %d task running when closing\n",
620 session->pid, task_running);
625 wake_up(&session->wait);
627 mutex_lock(&iep_service.lock);
628 list_del(&session->list_session);
629 iep_service_session_clear(session);
630 iep_iommu_clear(iep_service.iommu_info, session);
632 mutex_unlock(&iep_service.lock);
637 static unsigned int iep_poll(struct file *filp, poll_table *wait)
640 iep_session *session = (iep_session *)filp->private_data;
643 poll_wait(filp, &session->wait, wait);
644 if (atomic_read(&session->done))
645 mask |= POLL_IN | POLLRDNORM;
650 static int iep_get_result_sync(iep_session *session)
656 ret = wait_event_timeout(session->wait,
657 atomic_read(&session->done), IEP_TIMEOUT_DELAY);
659 if (unlikely(ret < 0)) {
660 IEP_ERR("sync pid %d wait task ret %d\n", session->pid, ret);
661 iep_del_running_list();
663 } else if (0 == ret) {
664 IEP_ERR("sync pid %d wait %d task done timeout\n",
665 session->pid, atomic_read(&session->task_running));
666 iep_del_running_list_timeout();
675 static void iep_get_result_async(iep_session *session)
681 static long iep_ioctl(struct file *filp, uint32_t cmd, unsigned long arg)
684 iep_session *session = (iep_session *)filp->private_data;
686 if (NULL == session) {
687 IEP_ERR("%s [%d] iep thread session is null\n",
688 __FUNCTION__, __LINE__);
692 mutex_lock(&iep_service.mutex);
695 case IEP_SET_PARAMETER:
698 msg = (struct IEP_MSG *)kzalloc(sizeof(struct IEP_MSG),
701 if (copy_from_user(msg, (struct IEP_MSG *)arg,
702 sizeof(struct IEP_MSG))) {
703 IEP_ERR("copy_from_user failure\n");
709 if (atomic_read(&iep_service.waitcnt) < 10) {
711 iep_config(session, msg);
712 atomic_inc(&iep_service.waitcnt);
714 IEP_ERR("iep task queue full\n");
719 /** REGISTER CONFIG must accord to Timing When DPI mode
721 if (!iep_drvdata1->dpi_mode)
726 case IEP_GET_RESULT_SYNC:
727 if (0 > iep_get_result_sync(session)) {
731 case IEP_GET_RESULT_ASYNC:
732 iep_get_result_async(session);
734 case IEP_RELEASE_CURRENT_TASK:
735 iep_del_running_list_timeout();
739 case IEP_GET_IOMMU_STATE:
741 int iommu_enable = 0;
743 iommu_enable = iep_service.iommu_dev ? 1 : 0;
745 if (copy_to_user((void __user *)arg, &iommu_enable,
747 IEP_ERR("error: copy_to_user failed\n");
753 if (copy_to_user((void __user *)arg, &iep_drvdata1->cap,
754 sizeof(struct IEP_CAP))) {
755 IEP_ERR("error: copy_to_user failed\n");
760 IEP_ERR("unknown ioctl cmd!\n");
763 mutex_unlock(&iep_service.mutex);
769 static long compat_iep_ioctl(struct file *filp, uint32_t cmd,
773 iep_session *session = (iep_session *)filp->private_data;
775 if (NULL == session) {
776 IEP_ERR("%s [%d] iep thread session is null\n",
781 mutex_lock(&iep_service.mutex);
784 case COMPAT_IEP_SET_PARAMETER:
788 msg = kzalloc(sizeof(*msg), GFP_KERNEL);
792 (msg, compat_ptr((compat_uptr_t)arg),
793 sizeof(struct IEP_MSG))) {
794 IEP_ERR("copy_from_user failure\n");
800 if (atomic_read(&iep_service.waitcnt) < 10) {
802 iep_config(session, msg);
803 atomic_inc(&iep_service.waitcnt);
805 IEP_ERR("iep task queue full\n");
810 /** REGISTER CONFIG must accord to Timing When DPI mode
812 if (!iep_drvdata1->dpi_mode)
817 case COMPAT_IEP_GET_RESULT_SYNC:
818 if (0 > iep_get_result_sync(session))
821 case COMPAT_IEP_GET_RESULT_ASYNC:
822 iep_get_result_async(session);
824 case COMPAT_IEP_RELEASE_CURRENT_TASK:
825 iep_del_running_list_timeout();
829 case COMPAT_IEP_GET_IOMMU_STATE:
831 int iommu_enable = 0;
833 iommu_enable = iep_service.iommu_dev ? 1 : 0;
835 if (copy_to_user((void __user *)arg, &iommu_enable,
837 IEP_ERR("error: copy_to_user failed\n");
842 case COMPAT_IEP_QUERY_CAP:
843 if (copy_to_user((void __user *)arg, &iep_drvdata1->cap,
844 sizeof(struct IEP_CAP))) {
845 IEP_ERR("error: copy_to_user failed\n");
850 IEP_ERR("unknown ioctl cmd!\n");
853 mutex_unlock(&iep_service.mutex);
859 struct file_operations iep_fops = {
860 .owner = THIS_MODULE,
862 .release = iep_release,
864 .unlocked_ioctl = iep_ioctl,
866 .compat_ioctl = compat_iep_ioctl,
870 static struct miscdevice iep_dev = {
876 static struct device* rockchip_get_sysmmu_device_by_compatible(
879 struct device_node *dn = NULL;
880 struct platform_device *pd = NULL;
881 struct device *ret = NULL;
883 dn = of_find_compatible_node(NULL, NULL, compt);
885 printk("can't find device node %s \r\n", compt);
889 pd = of_find_device_by_node(dn);
891 printk("can't find platform device in device node %s \r\n",
900 #ifdef CONFIG_IOMMU_API
901 static inline void platform_set_sysmmu(struct device *iommu,
904 dev->archdata.iommu = iommu;
907 static inline void platform_set_sysmmu(struct device *iommu,
913 static int iep_sysmmu_fault_handler(struct device *dev,
914 enum rk_iommu_inttype itype,
915 unsigned long pgtable_base,
916 unsigned long fault_addr, unsigned int status)
918 struct iep_reg *reg = list_entry(iep_service.running.next,
919 struct iep_reg, status_link);
921 struct iep_mem_region *mem, *n;
923 pr_info("iep, fault addr 0x%08x\n", (u32)fault_addr);
924 list_for_each_entry_safe(mem, n,
925 ®->mem_region_list,
927 pr_info("iep, mem region [%02d] 0x%08x %ld\n",
928 i, (u32)mem->iova, mem->len);
932 pr_alert("iep, page fault occur\n");
934 iep_del_running_list();
940 static int iep_drv_probe(struct platform_device *pdev)
942 struct iep_drvdata *data;
944 struct resource *res = NULL;
946 struct device_node *np = pdev->dev.of_node;
947 struct platform_device *sub_dev = NULL;
948 struct device_node *sub_np = NULL;
950 struct device *mmu_dev = NULL;
951 of_property_read_u32(np, "iommu_enabled", &iommu_en);
953 data = (struct iep_drvdata *)devm_kzalloc(&pdev->dev,
954 sizeof(struct iep_drvdata), GFP_KERNEL);
956 IEP_ERR("failed to allocate driver data.\n");
962 INIT_LIST_HEAD(&iep_service.waiting);
963 INIT_LIST_HEAD(&iep_service.ready);
964 INIT_LIST_HEAD(&iep_service.running);
965 INIT_LIST_HEAD(&iep_service.done);
966 INIT_LIST_HEAD(&iep_service.session);
967 atomic_set(&iep_service.waitcnt, 0);
968 mutex_init(&iep_service.lock);
969 atomic_set(&iep_service.total_running, 0);
970 iep_service.enable = false;
972 #ifdef IEP_CLK_ENABLE
973 data->pd_iep = devm_clk_get(&pdev->dev, "pd_iep");
974 if (IS_ERR(data->pd_iep)) {
975 IEP_ERR("failed to find iep power down clock source.\n");
979 data->aclk_iep = devm_clk_get(&pdev->dev, "aclk_iep");
980 if (IS_ERR(data->aclk_iep)) {
981 IEP_ERR("failed to find iep axi clock source.\n");
986 data->hclk_iep = devm_clk_get(&pdev->dev, "hclk_iep");
987 if (IS_ERR(data->hclk_iep)) {
988 IEP_ERR("failed to find iep ahb clock source.\n");
994 iep_service.enable = false;
995 INIT_DELAYED_WORK(&data->power_off_work, iep_power_off_work);
996 wake_lock_init(&data->wake_lock, WAKE_LOCK_SUSPEND, "iep");
998 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1000 data->iep_base = (void *)devm_ioremap_resource(&pdev->dev, res);
1001 if (data->iep_base == NULL) {
1002 IEP_ERR("iep ioremap failed\n");
1007 atomic_set(&data->iep_int, 0);
1008 atomic_set(&data->mmu_page_fault, 0);
1009 atomic_set(&data->mmu_bus_error, 0);
1012 data->irq0 = platform_get_irq(pdev, 0);
1013 if (data->irq0 <= 0) {
1014 IEP_ERR("failed to get iep irq resource (%d).\n", data->irq0);
1019 /* request the IRQ */
1020 ret = devm_request_threaded_irq(&pdev->dev, data->irq0, iep_irq,
1021 iep_isr, IRQF_SHARED, dev_name(&pdev->dev), pdev);
1023 IEP_ERR("iep request_irq failed (%d).\n", ret);
1027 mutex_init(&iep_service.mutex);
1029 if (of_property_read_u32(np, "version", &version)) {
1033 data->cap.scaling_supported = 0;
1034 data->cap.i4_deinterlace_supported = 1;
1035 data->cap.i2_deinterlace_supported = 1;
1036 data->cap.compression_noise_reduction_supported = 1;
1037 data->cap.sampling_noise_reduction_supported = 1;
1038 data->cap.hsb_enhancement_supported = 1;
1039 data->cap.cg_enhancement_supported = 1;
1040 data->cap.direct_path_supported = 1;
1041 data->cap.max_dynamic_width = 1920;
1042 data->cap.max_dynamic_height = 1088;
1043 data->cap.max_static_width = 8192;
1044 data->cap.max_static_height = 8192;
1045 data->cap.max_enhance_radius = 3;
1049 data->cap.scaling_supported = 1;
1052 data->cap.compression_noise_reduction_supported = 0;
1053 data->cap.sampling_noise_reduction_supported = 0;
1054 if (soc_is_rk3126b()) {
1055 data->cap.i4_deinterlace_supported = 0;
1056 data->cap.hsb_enhancement_supported = 0;
1057 data->cap.cg_enhancement_supported = 0;
1061 data->cap.max_dynamic_width = 4096;
1062 data->cap.max_dynamic_height = 2340;
1063 data->cap.max_enhance_radius = 2;
1069 platform_set_drvdata(pdev, data);
1071 ret = misc_register(&iep_dev);
1073 IEP_ERR("cannot register miscdev (%d)\n", ret);
1074 goto err_misc_register;
1077 data->dev = &pdev->dev;
1078 #ifdef IEP_CLK_ENABLE
1079 pm_runtime_enable(data->dev);
1082 iep_service.iommu_dev = NULL;
1083 sub_np = of_parse_phandle(np, "iommus", 0);
1085 sub_dev = of_find_device_by_node(sub_np);
1086 iep_service.iommu_dev = &sub_dev->dev;
1089 if (!iep_service.iommu_dev) {
1090 mmu_dev = rockchip_get_sysmmu_device_by_compatible(
1091 IEP_IOMMU_COMPATIBLE_NAME);
1094 platform_set_sysmmu(mmu_dev, &pdev->dev);
1097 rockchip_iovmm_set_fault_handler(&pdev->dev,
1098 iep_sysmmu_fault_handler);
1100 iep_service.iommu_dev = mmu_dev;
1102 of_property_read_u32(np, "allocator", (u32 *)&iep_service.alloc_type);
1104 iep_service.iommu_info = iep_iommu_info_create(data->dev,
1105 iep_service.iommu_dev,
1106 iep_service.alloc_type);
1109 IEP_INFO("IEP Driver loaded succesfully\n");
1114 free_irq(data->irq0, pdev);
1117 if (data->iep_base) {
1118 devm_ioremap_release(&pdev->dev, res);
1120 devm_release_mem_region(&pdev->dev, res->start, resource_size(res));
1123 wake_lock_destroy(&data->wake_lock);
1124 #ifdef IEP_CLK_ENABLE
1130 static int iep_drv_remove(struct platform_device *pdev)
1132 struct iep_drvdata *data = platform_get_drvdata(pdev);
1133 struct resource *res;
1135 iep_iommu_info_destroy(iep_service.iommu_info);
1136 iep_service.iommu_info = NULL;
1138 wake_lock_destroy(&data->wake_lock);
1140 misc_deregister(&(data->miscdev));
1141 free_irq(data->irq0, &data->miscdev);
1142 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1143 devm_ioremap_release(&pdev->dev, res);
1144 devm_release_mem_region(&pdev->dev, res->start, resource_size(res));
1146 #ifdef IEP_CLK_ENABLE
1148 devm_clk_put(&pdev->dev, data->aclk_iep);
1151 devm_clk_put(&pdev->dev, data->hclk_iep);
1154 devm_clk_put(&pdev->dev, data->pd_iep);
1156 pm_runtime_disable(data->dev);
1162 #if defined(CONFIG_OF)
1163 static const struct of_device_id iep_dt_ids[] = {
1164 { .compatible = "rockchip,iep", },
1169 static struct platform_driver iep_driver = {
1170 .probe = iep_drv_probe,
1171 .remove = iep_drv_remove,
1173 .owner = THIS_MODULE,
1175 #if defined(CONFIG_OF)
1176 .of_match_table = of_match_ptr(iep_dt_ids),
1181 #ifdef CONFIG_PROC_FS
1182 #include <linux/proc_fs.h>
1183 #include <linux/seq_file.h>
1185 static int proc_iep_show(struct seq_file *s, void *v)
1187 struct iep_status sts;
1188 //mutex_lock(&iep_service.mutex);
1190 seq_printf(s, "\nIEP Modules Status:\n");
1191 sts = iep_get_status(iep_drvdata1->iep_base);
1192 seq_printf(s, "scl_sts: %u, dil_sts %u, wyuv_sts %u, "
1193 "ryuv_sts %u, wrgb_sts %u, rrgb_sts %u, voi_sts %u\n",
1194 sts.scl_sts, sts.dil_sts, sts.wyuv_sts, sts.ryuv_sts,
1195 sts.wrgb_sts, sts.rrgb_sts, sts.voi_sts); {
1196 int *reg = (int *)iep_drvdata1->iep_base;
1199 /* could not read validate data from address after base+0x40 */
1200 for (i = 0; i < 0x40; i++) {
1201 seq_printf(s, "%08x ", reg[i]);
1203 if ((i + 1) % 4 == 0)
1204 seq_printf(s, "\n");
1207 seq_printf(s, "\n");
1210 //mutex_unlock(&iep_service.mutex);
1215 static int proc_iep_open(struct inode *inode, struct file *file)
1217 return single_open(file, proc_iep_show, NULL);
1220 static const struct file_operations proc_iep_fops = {
1221 .open = proc_iep_open,
1223 .llseek = seq_lseek,
1224 .release = single_release,
1227 static int __init iep_proc_init(void)
1229 proc_create("iep", 0, NULL, &proc_iep_fops);
1233 static void __exit iep_proc_release(void)
1235 remove_proc_entry("iep", NULL);
1239 #ifdef IEP_TEST_CASE
1240 void iep_test_case0(void);
1243 static int __init iep_init(void)
1247 if ((ret = platform_driver_register(&iep_driver)) != 0) {
1248 IEP_ERR("Platform device register failed (%d).\n", ret);
1252 #ifdef CONFIG_PROC_FS
1256 IEP_INFO("Module initialized.\n");
1258 #ifdef IEP_TEST_CASE
1265 static void __exit iep_exit(void)
1267 IEP_ERR("%s IN\n", __func__);
1268 #ifdef CONFIG_PROC_FS
1273 platform_driver_unregister(&iep_driver);
1276 module_init(iep_init);
1277 module_exit(iep_exit);
1279 /* Module information */
1280 MODULE_AUTHOR("ljf@rock-chips.com");
1281 MODULE_DESCRIPTION("Driver for iep device");
1282 MODULE_LICENSE("GPL");
1284 #ifdef IEP_TEST_CASE
1286 #include "yuv420sp_480x480_interlaced.h"
1287 #include "yuv420sp_480x480_deinterlaced_i2o1.h"
1289 //unsigned char tmp_buf[480*480*3/2];
1291 void iep_test_case0(void)
1294 iep_session session;
1295 unsigned int phy_src, phy_dst, phy_tmp;
1298 unsigned char *tmp_buf;
1300 tmp_buf = kmalloc(480 * 480 * 3 / 2, GFP_KERNEL);
1302 session.pid = current->pid;
1303 INIT_LIST_HEAD(&session.waiting);
1304 INIT_LIST_HEAD(&session.ready);
1305 INIT_LIST_HEAD(&session.running);
1306 INIT_LIST_HEAD(&session.list_session);
1307 init_waitqueue_head(&session.wait);
1308 list_add_tail(&session.list_session, &iep_service.session);
1309 atomic_set(&session.task_running, 0);
1310 atomic_set(&session.num_done, 0);
1312 memset(&msg, 0, sizeof(struct IEP_MSG));
1313 memset(tmp_buf, 0xCC, 480 * 480 * 3 / 2);
1315 dmac_flush_range(&tmp_buf[0], &tmp_buf[480 * 480 * 3 / 2]);
1316 outer_flush_range(virt_to_phys(&tmp_buf[0]), virt_to_phys(&tmp_buf[480 * 480 * 3 / 2]));
1318 phy_src = virt_to_phys(&yuv420sp_480x480_interlaced[0]);
1319 phy_tmp = virt_to_phys(&tmp_buf[0]);
1320 phy_dst = virt_to_phys(&yuv420sp_480x480_deinterlaced_i2o1[0]);
1322 dmac_flush_range(&yuv420sp_480x480_interlaced[0], &yuv420sp_480x480_interlaced[480 * 480 * 3 / 2]);
1323 outer_flush_range(virt_to_phys(&yuv420sp_480x480_interlaced[0]), virt_to_phys(&yuv420sp_480x480_interlaced[480 * 480 * 3 / 2]));
1325 IEP_INFO("*********** IEP MSG GENARATE ************\n");
1327 msg.src.act_w = 480;
1328 msg.src.act_h = 480;
1331 msg.src.vir_w = 480;
1332 msg.src.vir_h = 480;
1333 msg.src.format = IEP_FORMAT_YCbCr_420_SP;
1334 msg.src.mem_addr = (uint32_t *)phy_src;
1335 msg.src.uv_addr = (uint32_t *)(phy_src + 480 * 480);
1338 msg.dst.act_w = 480;
1339 msg.dst.act_h = 480;
1342 msg.dst.vir_w = 480;
1343 msg.dst.vir_h = 480;
1344 msg.dst.format = IEP_FORMAT_YCbCr_420_SP;
1345 msg.dst.mem_addr = (uint32_t *)phy_tmp;
1346 msg.dst.uv_addr = (uint32_t *)(phy_tmp + 480 * 480);
1349 msg.dein_mode = IEP_DEINTERLACE_MODE_I2O1;
1350 msg.field_order = FIELD_ORDER_BOTTOM_FIRST;
1352 IEP_INFO("*********** IEP TEST CASE 0 ************\n");
1354 iep_config(&session, &msg);
1356 if (0 > iep_get_result_sync(&session)) {
1357 IEP_INFO("%s failed, timeout\n", __func__);
1363 dmac_flush_range(&tmp_buf[0], &tmp_buf[480 * 480 * 3 / 2]);
1364 outer_flush_range(virt_to_phys(&tmp_buf[0]), virt_to_phys(&tmp_buf[480 * 480 * 3 / 2]));
1366 IEP_INFO("*********** RESULT CHECKING ************\n");
1368 for (i = 0; i < 480 * 480 * 3 / 2; i++) {
1369 if (tmp_buf[i] != yuv420sp_480x480_deinterlaced_i2o1[i]) {
1370 IEP_INFO("diff occur position %d, 0x%02x 0x%02x\n", i, tmp_buf[i], yuv420sp_480x480_deinterlaced_i2o1[i]);
1379 if (i == 480 * 480 * 3 / 2)
1380 IEP_INFO("IEP pass the checking\n");