2 * Rockchip RK3288 VPU codec driver
4 * Copyright (C) 2014 Google, Inc.
5 * Tomasz Figa <tfiga@chromium.org>
7 * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
9 * Copyright (C) 2011 Samsung Electronics Co., Ltd.
11 * This software is licensed under the terms of the GNU General Public
12 * License version 2, as published by the Free Software Foundation, and
13 * may be copied, distributed, and modified under those terms.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
21 #include "rk3288_vpu_common.h"
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/sched.h>
26 #include <linux/slab.h>
27 #include <linux/videodev2.h>
28 #include <media/v4l2-event.h>
29 #include <linux/workqueue.h>
31 #include <media/videobuf2-core.h>
32 #include <media/videobuf2-dma-contig.h>
34 #include "rk3288_vpu_dec.h"
35 #include "rk3288_vpu_enc.h"
36 #include "rk3288_vpu_hw.h"
39 module_param(debug, int, S_IRUGO | S_IWUSR);
40 MODULE_PARM_DESC(debug,
41 "Debug level - higher value produces more verbose messages");
44 * DMA coherent helpers.
47 int rk3288_vpu_aux_buf_alloc(struct rk3288_vpu_dev *vpu,
48 struct rk3288_vpu_aux_buf *buf, size_t size)
50 buf->cpu = dma_alloc_coherent(vpu->dev, size, &buf->dma, GFP_KERNEL);
58 void rk3288_vpu_aux_buf_free(struct rk3288_vpu_dev *vpu,
59 struct rk3288_vpu_aux_buf *buf)
61 dma_free_coherent(vpu->dev, buf->size, buf->cpu, buf->dma);
72 static void rk3288_vpu_prepare_run(struct rk3288_vpu_ctx *ctx)
74 if (ctx->run_ops->prepare_run)
75 ctx->run_ops->prepare_run(ctx);
78 static void __rk3288_vpu_dequeue_run_locked(struct rk3288_vpu_ctx *ctx)
80 struct rk3288_vpu_buf *src, *dst;
83 * Since ctx was dequeued from ready_ctxs list, we know that it has
84 * at least one buffer in each queue.
86 src = list_first_entry(&ctx->src_queue, struct rk3288_vpu_buf, list);
87 dst = list_first_entry(&ctx->dst_queue, struct rk3288_vpu_buf, list);
96 static struct rk3288_vpu_ctx *
97 rk3288_vpu_encode_after_decode_war(struct rk3288_vpu_ctx *ctx)
99 struct rk3288_vpu_dev *dev = ctx->dev;
101 if (dev->was_decoding && rk3288_vpu_ctx_is_encoder(ctx))
102 return dev->dummy_encode_ctx;
107 static void rk3288_vpu_try_run(struct rk3288_vpu_dev *dev)
109 struct rk3288_vpu_ctx *ctx = NULL;
114 spin_lock_irqsave(&dev->irqlock, flags);
116 if (list_empty(&dev->ready_ctxs) ||
117 test_bit(VPU_SUSPENDED, &dev->state))
121 if (test_and_set_bit(VPU_RUNNING, &dev->state))
123 * The hardware is already running. We will pick another
124 * run after we get the notification in rk3288_vpu_run_done().
128 ctx = list_entry(dev->ready_ctxs.next, struct rk3288_vpu_ctx, list);
131 * WAR for corrupted hardware state when encoding directly after
132 * certain decoding runs.
134 * If previous context was decoding and currently picked one is
135 * encoding then we need to execute a dummy encode with proper
136 * settings to reinitialize certain internal hardware state.
138 ctx = rk3288_vpu_encode_after_decode_war(ctx);
140 if (!rk3288_vpu_ctx_is_dummy_encode(ctx)) {
141 list_del_init(&ctx->list);
142 __rk3288_vpu_dequeue_run_locked(ctx);
145 dev->current_ctx = ctx;
146 dev->was_decoding = !rk3288_vpu_ctx_is_encoder(ctx);
149 spin_unlock_irqrestore(&dev->irqlock, flags);
152 rk3288_vpu_prepare_run(ctx);
159 static void __rk3288_vpu_try_context_locked(struct rk3288_vpu_dev *dev,
160 struct rk3288_vpu_ctx *ctx)
162 if (!list_empty(&ctx->list))
163 /* Context already queued. */
166 if (!list_empty(&ctx->dst_queue) && !list_empty(&ctx->src_queue))
167 list_add_tail(&ctx->list, &dev->ready_ctxs);
170 void rk3288_vpu_run_done(struct rk3288_vpu_ctx *ctx,
171 enum vb2_buffer_state result)
173 struct rk3288_vpu_dev *dev = ctx->dev;
178 if (ctx->run_ops->run_done)
179 ctx->run_ops->run_done(ctx, result);
181 if (!rk3288_vpu_ctx_is_dummy_encode(ctx)) {
182 struct vb2_buffer *src = &ctx->run.src->b;
183 struct vb2_buffer *dst = &ctx->run.dst->b;
185 dst->v4l2_buf.timestamp = src->v4l2_buf.timestamp;
186 vb2_buffer_done(&ctx->run.src->b, result);
187 vb2_buffer_done(&ctx->run.dst->b, result);
190 dev->current_ctx = NULL;
191 wake_up_all(&dev->run_wq);
193 spin_lock_irqsave(&dev->irqlock, flags);
195 __rk3288_vpu_try_context_locked(dev, ctx);
196 clear_bit(VPU_RUNNING, &dev->state);
198 spin_unlock_irqrestore(&dev->irqlock, flags);
200 /* Try scheduling another run to see if we have anything left to do. */
201 rk3288_vpu_try_run(dev);
206 void rk3288_vpu_try_context(struct rk3288_vpu_dev *dev,
207 struct rk3288_vpu_ctx *ctx)
213 spin_lock_irqsave(&dev->irqlock, flags);
215 __rk3288_vpu_try_context_locked(dev, ctx);
217 spin_unlock_irqrestore(&dev->irqlock, flags);
219 rk3288_vpu_try_run(dev);
225 * Control registration.
228 #define IS_VPU_PRIV(x) ((V4L2_CTRL_ID2CLASS(x) == V4L2_CTRL_CLASS_MPEG) && \
229 V4L2_CTRL_DRIVER_PRIV(x))
231 int rk3288_vpu_ctrls_setup(struct rk3288_vpu_ctx *ctx,
232 const struct v4l2_ctrl_ops *ctrl_ops,
233 struct rk3288_vpu_control *controls,
235 const char *const *(*get_menu)(u32))
237 struct v4l2_ctrl_config cfg;
240 if (num_ctrls > ARRAY_SIZE(ctx->ctrls)) {
241 vpu_err("context control array not large enough\n");
245 v4l2_ctrl_handler_init(&ctx->ctrl_handler, num_ctrls);
246 if (ctx->ctrl_handler.error) {
247 vpu_err("v4l2_ctrl_handler_init failed\n");
248 return ctx->ctrl_handler.error;
251 for (i = 0; i < num_ctrls; i++) {
252 if (IS_VPU_PRIV(controls[i].id)
253 || controls[i].id >= V4L2_CID_CUSTOM_BASE
254 || controls[i].type == V4L2_CTRL_TYPE_PRIVATE) {
255 memset(&cfg, 0, sizeof(struct v4l2_ctrl_config));
258 cfg.id = controls[i].id;
259 cfg.min = controls[i].minimum;
260 cfg.max = controls[i].maximum;
261 cfg.max_stores = controls[i].max_stores;
262 cfg.def = controls[i].default_value;
263 cfg.name = controls[i].name;
264 cfg.type = controls[i].type;
265 cfg.elem_size = controls[i].elem_size;
266 memcpy(cfg.dims, controls[i].dims, sizeof(cfg.dims));
268 if (cfg.type == V4L2_CTRL_TYPE_MENU) {
269 cfg.menu_skip_mask = cfg.menu_skip_mask;
270 cfg.qmenu = get_menu(cfg.id);
272 cfg.step = controls[i].step;
275 ctx->ctrls[i] = v4l2_ctrl_new_custom(
276 &ctx->ctrl_handler, &cfg, NULL);
278 if (controls[i].type == V4L2_CTRL_TYPE_MENU) {
280 v4l2_ctrl_new_std_menu
290 v4l2_ctrl_new_std(&ctx->ctrl_handler,
301 if (ctx->ctrl_handler.error) {
302 vpu_err("Adding control (%d) failed\n", i);
303 return ctx->ctrl_handler.error;
306 if (controls[i].is_volatile && ctx->ctrls[i])
307 ctx->ctrls[i]->flags |= V4L2_CTRL_FLAG_VOLATILE;
308 if (controls[i].is_read_only && ctx->ctrls[i])
309 ctx->ctrls[i]->flags |= V4L2_CTRL_FLAG_READ_ONLY;
310 if (controls[i].can_store && ctx->ctrls[i])
311 ctx->ctrls[i]->flags |= V4L2_CTRL_FLAG_CAN_STORE;
314 v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
315 ctx->num_ctrls = num_ctrls;
319 void rk3288_vpu_ctrls_delete(struct rk3288_vpu_ctx *ctx)
323 v4l2_ctrl_handler_free(&ctx->ctrl_handler);
324 for (i = 0; i < ctx->num_ctrls; i++)
325 ctx->ctrls[i] = NULL;
329 * V4L2 file operations.
332 static int rk3288_vpu_open(struct file *filp)
334 struct video_device *vdev = video_devdata(filp);
335 struct rk3288_vpu_dev *dev = video_drvdata(filp);
336 struct rk3288_vpu_ctx *ctx = NULL;
341 * We do not need any extra locking here, because we operate only
342 * on local data here, except reading few fields from dev, which
343 * do not change through device's lifetime (which is guaranteed by
344 * reference on module from open()) and V4L2 internal objects (such
345 * as vdev and ctx->fh), which have proper locking done in respective
346 * helper functions used here.
351 /* Allocate memory for context */
352 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
358 v4l2_fh_init(&ctx->fh, video_devdata(filp));
359 filp->private_data = &ctx->fh;
360 v4l2_fh_add(&ctx->fh);
362 INIT_LIST_HEAD(&ctx->src_queue);
363 INIT_LIST_HEAD(&ctx->dst_queue);
364 INIT_LIST_HEAD(&ctx->list);
366 if (vdev == dev->vfd_enc) {
367 /* only for encoder */
368 ret = rk3288_vpu_enc_init(ctx);
370 vpu_err("Failed to initialize encoder context\n");
373 } else if (vdev == dev->vfd_dec) {
374 /* only for decoder */
375 ret = rk3288_vpu_dec_init(ctx);
377 vpu_err("Failed to initialize decoder context\n");
384 ctx->fh.ctrl_handler = &ctx->ctrl_handler;
386 /* Init videobuf2 queue for CAPTURE */
388 q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
389 q->drv_priv = &ctx->fh;
390 q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
391 q->lock = &dev->vpu_mutex;
392 q->buf_struct_size = sizeof(struct rk3288_vpu_buf);
394 if (vdev == dev->vfd_enc) {
395 q->ops = get_enc_queue_ops();
396 } else if (vdev == dev->vfd_dec) {
397 q->ops = get_dec_queue_ops();
398 q->use_dma_bidirectional = 1;
401 q->mem_ops = &vb2_dma_contig_memops;
402 q->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
404 ret = vb2_queue_init(q);
406 vpu_err("Failed to initialize videobuf2 queue(capture)\n");
407 goto err_enc_dec_exit;
410 /* Init videobuf2 queue for OUTPUT */
412 q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
413 q->drv_priv = &ctx->fh;
414 q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
415 q->lock = &dev->vpu_mutex;
416 q->buf_struct_size = sizeof(struct rk3288_vpu_buf);
418 if (vdev == dev->vfd_enc)
419 q->ops = get_enc_queue_ops();
420 else if (vdev == dev->vfd_dec)
421 q->ops = get_dec_queue_ops();
423 q->mem_ops = &vb2_dma_contig_memops;
424 q->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
426 ret = vb2_queue_init(q);
428 vpu_err("Failed to initialize videobuf2 queue(output)\n");
429 goto err_vq_dst_release;
437 vb2_queue_release(&ctx->vq_dst);
439 if (vdev == dev->vfd_enc)
440 rk3288_vpu_enc_exit(ctx);
441 else if (vdev == dev->vfd_dec)
442 rk3288_vpu_dec_exit(ctx);
444 v4l2_fh_del(&ctx->fh);
445 v4l2_fh_exit(&ctx->fh);
453 static int rk3288_vpu_release(struct file *filp)
455 struct rk3288_vpu_ctx *ctx = fh_to_ctx(filp->private_data);
456 struct video_device *vdev = video_devdata(filp);
457 struct rk3288_vpu_dev *dev = ctx->dev;
460 * No need for extra locking because this was the last reference
467 * vb2_queue_release() ensures that streaming is stopped, which
468 * in turn means that there are no frames still being processed
471 vb2_queue_release(&ctx->vq_src);
472 vb2_queue_release(&ctx->vq_dst);
474 v4l2_fh_del(&ctx->fh);
475 v4l2_fh_exit(&ctx->fh);
477 if (vdev == dev->vfd_enc)
478 rk3288_vpu_enc_exit(ctx);
479 else if (vdev == dev->vfd_dec)
480 rk3288_vpu_dec_exit(ctx);
489 static unsigned int rk3288_vpu_poll(struct file *filp,
490 struct poll_table_struct *wait)
492 struct rk3288_vpu_ctx *ctx = fh_to_ctx(filp->private_data);
493 struct vb2_queue *src_q, *dst_q;
494 struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
500 src_q = &ctx->vq_src;
501 dst_q = &ctx->vq_dst;
504 * There has to be at least one buffer queued on each queued_list, which
505 * means either in driver already or waiting for driver to claim it
506 * and start processing.
508 if ((!vb2_is_streaming(src_q) || list_empty(&src_q->queued_list)) &&
509 (!vb2_is_streaming(dst_q) || list_empty(&dst_q->queued_list))) {
510 vpu_debug(0, "src q streaming %d, dst q streaming %d, src list empty(%d), dst list empty(%d)\n",
511 src_q->streaming, dst_q->streaming,
512 list_empty(&src_q->queued_list),
513 list_empty(&dst_q->queued_list));
517 poll_wait(filp, &ctx->fh.wait, wait);
518 poll_wait(filp, &src_q->done_wq, wait);
519 poll_wait(filp, &dst_q->done_wq, wait);
521 if (v4l2_event_pending(&ctx->fh))
524 spin_lock_irqsave(&src_q->done_lock, flags);
526 if (!list_empty(&src_q->done_list))
527 src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
530 if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE ||
531 src_vb->state == VB2_BUF_STATE_ERROR))
532 rc |= POLLOUT | POLLWRNORM;
534 spin_unlock_irqrestore(&src_q->done_lock, flags);
536 spin_lock_irqsave(&dst_q->done_lock, flags);
538 if (!list_empty(&dst_q->done_list))
539 dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
542 if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE ||
543 dst_vb->state == VB2_BUF_STATE_ERROR))
544 rc |= POLLIN | POLLRDNORM;
546 spin_unlock_irqrestore(&dst_q->done_lock, flags);
551 static int rk3288_vpu_mmap(struct file *filp, struct vm_area_struct *vma)
553 struct rk3288_vpu_ctx *ctx = fh_to_ctx(filp->private_data);
554 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
559 if (offset < DST_QUEUE_OFF_BASE) {
560 vpu_debug(4, "mmaping source\n");
562 ret = vb2_mmap(&ctx->vq_src, vma);
563 } else { /* capture */
564 vpu_debug(4, "mmaping destination\n");
566 vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
567 ret = vb2_mmap(&ctx->vq_dst, vma);
575 static const struct v4l2_file_operations rk3288_vpu_fops = {
576 .owner = THIS_MODULE,
577 .open = rk3288_vpu_open,
578 .release = rk3288_vpu_release,
579 .poll = rk3288_vpu_poll,
580 .unlocked_ioctl = video_ioctl2,
581 .mmap = rk3288_vpu_mmap,
588 static int rk3288_vpu_probe(struct platform_device *pdev)
590 struct rk3288_vpu_dev *vpu = NULL;
591 DEFINE_DMA_ATTRS(attrs_novm);
592 DEFINE_DMA_ATTRS(attrs_nohugepage);
593 struct video_device *vfd;
598 vpu = devm_kzalloc(&pdev->dev, sizeof(*vpu), GFP_KERNEL);
602 vpu->dev = &pdev->dev;
604 mutex_init(&vpu->vpu_mutex);
605 spin_lock_init(&vpu->irqlock);
606 INIT_LIST_HEAD(&vpu->ready_ctxs);
607 init_waitqueue_head(&vpu->run_wq);
609 ret = rk3288_vpu_hw_probe(vpu);
611 dev_err(&pdev->dev, "vcodec_hw_probe failed\n");
616 * We'll do mostly sequential access, so sacrifice TLB efficiency for
619 dma_set_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, &attrs_novm);
621 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs_novm);
622 vpu->alloc_ctx = vb2_dma_contig_init_ctx_attrs(&pdev->dev,
624 if (IS_ERR(vpu->alloc_ctx)) {
625 ret = PTR_ERR(vpu->alloc_ctx);
629 dma_set_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, &attrs_nohugepage);
630 vpu->alloc_ctx_vm = vb2_dma_contig_init_ctx_attrs(&pdev->dev,
632 if (IS_ERR(vpu->alloc_ctx_vm)) {
633 ret = PTR_ERR(vpu->alloc_ctx_vm);
634 goto err_dma_contig_vm;
637 ret = v4l2_device_register(&pdev->dev, &vpu->v4l2_dev);
639 dev_err(&pdev->dev, "Failed to register v4l2 device\n");
640 goto err_v4l2_dev_reg;
643 platform_set_drvdata(pdev, vpu);
645 ret = rk3288_vpu_enc_init_dummy_ctx(vpu);
647 dev_err(&pdev->dev, "Failed to create dummy encode context\n");
652 vfd = video_device_alloc();
654 v4l2_err(&vpu->v4l2_dev, "Failed to allocate video device\n");
659 vfd->fops = &rk3288_vpu_fops;
660 vfd->ioctl_ops = get_enc_v4l2_ioctl_ops();
661 vfd->release = video_device_release;
662 vfd->lock = &vpu->vpu_mutex;
663 vfd->v4l2_dev = &vpu->v4l2_dev;
664 vfd->vfl_dir = VFL_DIR_M2M;
665 snprintf(vfd->name, sizeof(vfd->name), "%s", RK3288_VPU_ENC_NAME);
668 video_set_drvdata(vfd, vpu);
670 ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
672 v4l2_err(&vpu->v4l2_dev, "Failed to register video device\n");
673 video_device_release(vfd);
677 v4l2_info(&vpu->v4l2_dev,
678 "Rockchip RK3288 VPU encoder registered as /vpu/video%d\n",
682 vfd = video_device_alloc();
684 v4l2_err(&vpu->v4l2_dev, "Failed to allocate video device\n");
689 vfd->fops = &rk3288_vpu_fops;
690 vfd->ioctl_ops = get_dec_v4l2_ioctl_ops();
691 vfd->release = video_device_release;
692 vfd->lock = &vpu->vpu_mutex;
693 vfd->v4l2_dev = &vpu->v4l2_dev;
694 vfd->vfl_dir = VFL_DIR_M2M;
695 snprintf(vfd->name, sizeof(vfd->name), "%s", RK3288_VPU_DEC_NAME);
698 video_set_drvdata(vfd, vpu);
700 ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
702 v4l2_err(&vpu->v4l2_dev, "Failed to register video device\n");
703 video_device_release(vfd);
707 v4l2_info(&vpu->v4l2_dev,
708 "Rockchip RK3288 VPU decoder registered as /vpu/video%d\n",
716 video_device_release(vpu->vfd_dec);
718 video_unregister_device(vpu->vfd_enc);
720 video_device_release(vpu->vfd_enc);
722 rk3288_vpu_enc_free_dummy_ctx(vpu);
724 v4l2_device_unregister(&vpu->v4l2_dev);
726 vb2_dma_contig_cleanup_ctx(vpu->alloc_ctx_vm);
728 vb2_dma_contig_cleanup_ctx(vpu->alloc_ctx);
730 rk3288_vpu_hw_remove(vpu);
732 pr_debug("%s-- with error\n", __func__);
738 static int rk3288_vpu_remove(struct platform_device *pdev)
740 struct rk3288_vpu_dev *vpu = platform_get_drvdata(pdev);
744 v4l2_info(&vpu->v4l2_dev, "Removing %s\n", pdev->name);
747 * We are safe here assuming that .remove() got called as
748 * a result of module removal, which guarantees that all
749 * contexts have been released.
752 video_unregister_device(vpu->vfd_dec);
753 video_unregister_device(vpu->vfd_enc);
754 rk3288_vpu_enc_free_dummy_ctx(vpu);
755 v4l2_device_unregister(&vpu->v4l2_dev);
756 vb2_dma_contig_cleanup_ctx(vpu->alloc_ctx_vm);
757 vb2_dma_contig_cleanup_ctx(vpu->alloc_ctx);
758 rk3288_vpu_hw_remove(vpu);
765 static struct platform_device_id vpu_driver_ids[] = {
766 { .name = "rk3288-vpu", },
770 MODULE_DEVICE_TABLE(platform, vpu_driver_ids);
773 static const struct of_device_id of_rk3288_vpu_match[] = {
774 { .compatible = "rockchip,rk3288-vpu", },
777 MODULE_DEVICE_TABLE(of, of_rk3288_vpu_match);
780 #ifdef CONFIG_PM_SLEEP
781 static int rk3288_vpu_suspend(struct device *dev)
783 struct rk3288_vpu_dev *vpu = dev_get_drvdata(dev);
785 set_bit(VPU_SUSPENDED, &vpu->state);
786 wait_event(vpu->run_wq, vpu->current_ctx == NULL);
791 static int rk3288_vpu_resume(struct device *dev)
793 struct rk3288_vpu_dev *vpu = dev_get_drvdata(dev);
795 clear_bit(VPU_SUSPENDED, &vpu->state);
796 rk3288_vpu_try_run(vpu);
802 static const struct dev_pm_ops rk3288_vpu_pm_ops = {
803 SET_SYSTEM_SLEEP_PM_OPS(rk3288_vpu_suspend, rk3288_vpu_resume)
806 static struct platform_driver rk3288_vpu_driver = {
807 .probe = rk3288_vpu_probe,
808 .remove = rk3288_vpu_remove,
809 .id_table = vpu_driver_ids,
811 .name = RK3288_VPU_NAME,
812 .owner = THIS_MODULE,
813 .of_match_table = of_match_ptr(of_rk3288_vpu_match),
814 .pm = &rk3288_vpu_pm_ops,
817 module_platform_driver(rk3288_vpu_driver);
819 MODULE_LICENSE("GPL v2");
820 MODULE_AUTHOR("Alpha Lin <Alpha.Lin@Rock-Chips.com>");
821 MODULE_AUTHOR("Tomasz Figa <tfiga@chromium.org>");
822 MODULE_DESCRIPTION("Rockchip RK3288 VPU codec driver");