2 * Rockchip VPU codec driver
4 * Copyright (C) 2014 Google, Inc.
5 * Tomasz Figa <tfiga@chromium.org>
7 * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
9 * Copyright (C) 2011 Samsung Electronics Co., Ltd.
11 * This software is licensed under the terms of the GNU General Public
12 * License version 2, as published by the Free Software Foundation, and
13 * may be copied, distributed, and modified under those terms.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
21 #include "rockchip_vpu_common.h"
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/sched.h>
26 #include <linux/slab.h>
27 #include <linux/videodev2.h>
28 #include <media/v4l2-event.h>
29 #include <linux/workqueue.h>
31 #include <media/videobuf2-core.h>
32 #include <media/videobuf2-dma-contig.h>
34 #include "rockchip_vpu_dec.h"
35 #include "rockchip_vpu_enc.h"
36 #include "rockchip_vpu_hw.h"
39 module_param(debug, int, S_IRUGO | S_IWUSR);
40 MODULE_PARM_DESC(debug,
41 "Debug level - higher value produces more verbose messages");
44 * DMA coherent helpers.
47 int rockchip_vpu_aux_buf_alloc(struct rockchip_vpu_dev *vpu,
48 struct rockchip_vpu_aux_buf *buf, size_t size)
50 buf->cpu = dma_alloc_coherent(vpu->dev, size, &buf->dma, GFP_KERNEL);
58 void rockchip_vpu_aux_buf_free(struct rockchip_vpu_dev *vpu,
59 struct rockchip_vpu_aux_buf *buf)
61 dma_free_coherent(vpu->dev, buf->size, buf->cpu, buf->dma);
72 static void rockchip_vpu_prepare_run(struct rockchip_vpu_ctx *ctx)
74 if (ctx->run_ops->prepare_run)
75 ctx->run_ops->prepare_run(ctx);
78 static void __rockchip_vpu_dequeue_run_locked(struct rockchip_vpu_ctx *ctx)
80 struct rockchip_vpu_buf *src, *dst;
83 * Since ctx was dequeued from ready_ctxs list, we know that it has
84 * at least one buffer in each queue.
86 src = list_first_entry(&ctx->src_queue, struct rockchip_vpu_buf, list);
87 dst = list_first_entry(&ctx->dst_queue, struct rockchip_vpu_buf, list);
96 static struct rockchip_vpu_ctx *
97 rockchip_vpu_encode_after_decode_war(struct rockchip_vpu_ctx *ctx)
99 struct rockchip_vpu_dev *dev = ctx->dev;
101 if (dev->dummy_encode_ctx &&
102 dev->was_decoding && rockchip_vpu_ctx_is_encoder(ctx))
103 return dev->dummy_encode_ctx;
108 static void rockchip_vpu_try_run(struct rockchip_vpu_dev *dev)
110 struct rockchip_vpu_ctx *ctx = NULL;
115 spin_lock_irqsave(&dev->irqlock, flags);
117 if (list_empty(&dev->ready_ctxs) ||
118 test_bit(VPU_SUSPENDED, &dev->state))
122 if (test_and_set_bit(VPU_RUNNING, &dev->state))
124 * The hardware is already running. We will pick another
125 * run after we get the notification in rockchip_vpu_run_done().
129 ctx = list_entry(dev->ready_ctxs.next, struct rockchip_vpu_ctx, list);
132 * WAR for corrupted hardware state when encoding directly after
133 * certain decoding runs.
135 * If previous context was decoding and currently picked one is
136 * encoding then we need to execute a dummy encode with proper
137 * settings to reinitialize certain internal hardware state.
139 ctx = rockchip_vpu_encode_after_decode_war(ctx);
141 if (!rockchip_vpu_ctx_is_dummy_encode(ctx)) {
142 list_del_init(&ctx->list);
143 __rockchip_vpu_dequeue_run_locked(ctx);
146 dev->current_ctx = ctx;
147 dev->was_decoding = !rockchip_vpu_ctx_is_encoder(ctx);
150 spin_unlock_irqrestore(&dev->irqlock, flags);
153 rockchip_vpu_prepare_run(ctx);
154 rockchip_vpu_run(ctx);
160 static void __rockchip_vpu_try_context_locked(struct rockchip_vpu_dev *dev,
161 struct rockchip_vpu_ctx *ctx)
163 if (!list_empty(&ctx->list))
164 /* Context already queued. */
167 if (!list_empty(&ctx->dst_queue) && !list_empty(&ctx->src_queue))
168 list_add_tail(&ctx->list, &dev->ready_ctxs);
171 void rockchip_vpu_run_done(struct rockchip_vpu_ctx *ctx,
172 enum vb2_buffer_state result)
174 struct rockchip_vpu_dev *dev = ctx->dev;
179 if (ctx->run_ops->run_done)
180 ctx->run_ops->run_done(ctx, result);
182 if (!rockchip_vpu_ctx_is_dummy_encode(ctx)) {
183 struct vb2_v4l2_buffer *src =
184 to_vb2_v4l2_buffer(&ctx->run.src->vb.vb2_buf);
185 struct vb2_v4l2_buffer *dst =
186 to_vb2_v4l2_buffer(&ctx->run.dst->vb.vb2_buf);
188 dst->timestamp = src->timestamp;
189 vb2_buffer_done(&ctx->run.src->vb.vb2_buf, result);
190 vb2_buffer_done(&ctx->run.dst->vb.vb2_buf, result);
193 dev->current_ctx = NULL;
194 wake_up_all(&dev->run_wq);
196 spin_lock_irqsave(&dev->irqlock, flags);
198 __rockchip_vpu_try_context_locked(dev, ctx);
199 clear_bit(VPU_RUNNING, &dev->state);
201 spin_unlock_irqrestore(&dev->irqlock, flags);
203 /* Try scheduling another run to see if we have anything left to do. */
204 rockchip_vpu_try_run(dev);
209 void rockchip_vpu_try_context(struct rockchip_vpu_dev *dev,
210 struct rockchip_vpu_ctx *ctx)
216 spin_lock_irqsave(&dev->irqlock, flags);
218 __rockchip_vpu_try_context_locked(dev, ctx);
220 spin_unlock_irqrestore(&dev->irqlock, flags);
222 rockchip_vpu_try_run(dev);
228 * bit stream assembler
231 static int stream_buffer_status(struct stream_s *stream)
233 if (stream->byte_cnt + 5 > stream->size) {
234 stream->overflow = 1;
241 void stream_put_bits(struct stream_s *buffer, s32 value, s32 number,
245 u32 byte_buffer = buffer->byte_buffer;
246 u8 *stream = buffer->stream;
248 if (stream_buffer_status(buffer) != 0)
251 vpu_debug(0, "assemble %s value %x, bits %d\n", name, value, number);
253 BUG_ON(value >= (1 << number));
254 BUG_ON(number >= 25);
256 bits = number + buffer->buffered_bits;
257 value <<= (32 - bits);
258 byte_buffer = byte_buffer | value;
261 *stream = (u8)(byte_buffer >> 24);
269 buffer->byte_buffer = byte_buffer;
270 buffer->buffered_bits = (u8)bits;
271 buffer->stream = stream;
276 void stream_buffer_reset(struct stream_s *buffer)
278 buffer->stream = buffer->buffer;
279 buffer->byte_cnt = 0;
280 buffer->overflow = 0;
281 buffer->byte_buffer = 0;
282 buffer->buffered_bits = 0;
285 int stream_buffer_init(struct stream_s *buffer, u8 *stream, s32 size)
287 if (stream == NULL) {
288 buffer->stream = kzalloc(size, GFP_KERNEL);
291 if (buffer->stream == NULL) {
292 vpu_err("allocate stream buffer failed\n");
296 buffer->buffer = buffer->stream;
299 stream_buffer_reset(buffer);
301 if (stream_buffer_status(buffer) != 0)
308 * Control registration.
311 #define IS_VPU_PRIV(x) ((V4L2_CTRL_ID2WHICH(x) == V4L2_CTRL_CLASS_MPEG) && \
312 V4L2_CTRL_DRIVER_PRIV(x))
314 int rockchip_vpu_ctrls_setup(struct rockchip_vpu_ctx *ctx,
315 const struct v4l2_ctrl_ops *ctrl_ops,
316 struct rockchip_vpu_control *controls,
318 const char *const *(*get_menu)(u32))
320 struct v4l2_ctrl_config cfg;
323 if (num_ctrls > ARRAY_SIZE(ctx->ctrls)) {
324 vpu_err("context control array not large enough\n");
328 v4l2_ctrl_handler_init(&ctx->ctrl_handler, num_ctrls);
329 if (ctx->ctrl_handler.error) {
330 vpu_err("v4l2_ctrl_handler_init failed\n");
331 return ctx->ctrl_handler.error;
334 for (i = 0; i < num_ctrls; i++) {
335 if (IS_VPU_PRIV(controls[i].id)
336 || controls[i].id >= V4L2_CID_CUSTOM_BASE
337 || controls[i].type == V4L2_CTRL_TYPE_PRIVATE) {
338 memset(&cfg, 0, sizeof(struct v4l2_ctrl_config));
341 cfg.id = controls[i].id;
342 cfg.min = controls[i].minimum;
343 cfg.max = controls[i].maximum;
344 cfg.max_stores = controls[i].max_stores;
345 cfg.def = controls[i].default_value;
346 cfg.name = controls[i].name;
347 cfg.type = controls[i].type;
348 cfg.elem_size = controls[i].elem_size;
349 memcpy(cfg.dims, controls[i].dims, sizeof(cfg.dims));
351 if (cfg.type == V4L2_CTRL_TYPE_MENU) {
352 cfg.menu_skip_mask = cfg.menu_skip_mask;
353 cfg.qmenu = get_menu(cfg.id);
355 cfg.step = controls[i].step;
358 ctx->ctrls[i] = v4l2_ctrl_new_custom(
359 &ctx->ctrl_handler, &cfg, NULL);
361 if (controls[i].type == V4L2_CTRL_TYPE_MENU) {
363 v4l2_ctrl_new_std_menu
373 v4l2_ctrl_new_std(&ctx->ctrl_handler,
384 if (ctx->ctrl_handler.error) {
385 vpu_err("Adding control (%d) failed\n", i);
386 return ctx->ctrl_handler.error;
389 if (controls[i].is_volatile && ctx->ctrls[i])
390 ctx->ctrls[i]->flags |= V4L2_CTRL_FLAG_VOLATILE;
391 if (controls[i].is_read_only && ctx->ctrls[i])
392 ctx->ctrls[i]->flags |= V4L2_CTRL_FLAG_READ_ONLY;
393 if (controls[i].can_store && ctx->ctrls[i])
394 ctx->ctrls[i]->flags |= V4L2_CTRL_FLAG_CAN_STORE;
397 v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
398 ctx->num_ctrls = num_ctrls;
402 void rockchip_vpu_ctrls_delete(struct rockchip_vpu_ctx *ctx)
406 v4l2_ctrl_handler_free(&ctx->ctrl_handler);
407 for (i = 0; i < ctx->num_ctrls; i++)
408 ctx->ctrls[i] = NULL;
412 * V4L2 file operations.
415 static int rockchip_vpu_open(struct file *filp)
417 struct video_device *vdev = video_devdata(filp);
418 struct rockchip_vpu_dev *dev = video_drvdata(filp);
419 struct rockchip_vpu_ctx *ctx = NULL;
424 * We do not need any extra locking here, because we operate only
425 * on local data here, except reading few fields from dev, which
426 * do not change through device's lifetime (which is guaranteed by
427 * reference on module from open()) and V4L2 internal objects (such
428 * as vdev and ctx->fh), which have proper locking done in respective
429 * helper functions used here.
434 /* Allocate memory for context */
435 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
441 v4l2_fh_init(&ctx->fh, video_devdata(filp));
442 filp->private_data = &ctx->fh;
443 v4l2_fh_add(&ctx->fh);
445 INIT_LIST_HEAD(&ctx->src_queue);
446 INIT_LIST_HEAD(&ctx->dst_queue);
447 INIT_LIST_HEAD(&ctx->list);
449 if (vdev == dev->vfd_enc) {
450 /* only for encoder */
451 ret = rockchip_vpu_enc_init(ctx);
453 vpu_err("Failed to initialize encoder context\n");
456 } else if (vdev == dev->vfd_dec) {
457 /* only for decoder */
458 ret = rockchip_vpu_dec_init(ctx);
460 vpu_err("Failed to initialize decoder context\n");
467 ctx->fh.ctrl_handler = &ctx->ctrl_handler;
469 /* Init videobuf2 queue for CAPTURE */
471 q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
472 q->drv_priv = &ctx->fh;
473 q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
474 q->lock = &dev->vpu_mutex;
475 q->buf_struct_size = sizeof(struct rockchip_vpu_buf);
477 if (vdev == dev->vfd_enc) {
478 q->ops = get_enc_queue_ops();
479 } else if (vdev == dev->vfd_dec) {
480 q->ops = get_dec_queue_ops();
481 q->use_dma_bidirectional = 1;
484 q->mem_ops = &vb2_dma_contig_memops;
485 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
487 ret = vb2_queue_init(q);
489 vpu_err("Failed to initialize videobuf2 queue(capture)\n");
490 goto err_enc_dec_exit;
493 /* Init videobuf2 queue for OUTPUT */
495 q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
496 q->drv_priv = &ctx->fh;
497 q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
498 q->lock = &dev->vpu_mutex;
499 q->buf_struct_size = sizeof(struct rockchip_vpu_buf);
501 if (vdev == dev->vfd_enc)
502 q->ops = get_enc_queue_ops();
503 else if (vdev == dev->vfd_dec)
504 q->ops = get_dec_queue_ops();
506 q->mem_ops = &vb2_dma_contig_memops;
507 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
509 ret = vb2_queue_init(q);
511 vpu_err("Failed to initialize videobuf2 queue(output)\n");
512 goto err_vq_dst_release;
520 vb2_queue_release(&ctx->vq_dst);
522 if (vdev == dev->vfd_enc)
523 rockchip_vpu_enc_exit(ctx);
524 else if (vdev == dev->vfd_dec)
525 rockchip_vpu_dec_exit(ctx);
527 v4l2_fh_del(&ctx->fh);
528 v4l2_fh_exit(&ctx->fh);
536 static int rockchip_vpu_release(struct file *filp)
538 struct rockchip_vpu_ctx *ctx = fh_to_ctx(filp->private_data);
539 struct video_device *vdev = video_devdata(filp);
540 struct rockchip_vpu_dev *dev = ctx->dev;
543 * No need for extra locking because this was the last reference
550 * vb2_queue_release() ensures that streaming is stopped, which
551 * in turn means that there are no frames still being processed
554 vb2_queue_release(&ctx->vq_src);
555 vb2_queue_release(&ctx->vq_dst);
557 v4l2_fh_del(&ctx->fh);
558 v4l2_fh_exit(&ctx->fh);
560 if (vdev == dev->vfd_enc)
561 rockchip_vpu_enc_exit(ctx);
562 else if (vdev == dev->vfd_dec)
563 rockchip_vpu_dec_exit(ctx);
572 static unsigned int rockchip_vpu_poll(struct file *filp,
573 struct poll_table_struct *wait)
575 struct rockchip_vpu_ctx *ctx = fh_to_ctx(filp->private_data);
576 struct vb2_queue *src_q, *dst_q;
577 struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
583 src_q = &ctx->vq_src;
584 dst_q = &ctx->vq_dst;
587 * There has to be at least one buffer queued on each queued_list, which
588 * means either in driver already or waiting for driver to claim it
589 * and start processing.
591 if ((!vb2_is_streaming(src_q) || list_empty(&src_q->queued_list)) &&
592 (!vb2_is_streaming(dst_q) || list_empty(&dst_q->queued_list))) {
593 vpu_debug(0, "src q streaming %d, dst q streaming %d, src list empty(%d), dst list empty(%d)\n",
594 src_q->streaming, dst_q->streaming,
595 list_empty(&src_q->queued_list),
596 list_empty(&dst_q->queued_list));
600 poll_wait(filp, &ctx->fh.wait, wait);
601 poll_wait(filp, &src_q->done_wq, wait);
602 poll_wait(filp, &dst_q->done_wq, wait);
604 if (v4l2_event_pending(&ctx->fh))
607 spin_lock_irqsave(&src_q->done_lock, flags);
609 if (!list_empty(&src_q->done_list))
610 src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
613 if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE ||
614 src_vb->state == VB2_BUF_STATE_ERROR))
615 rc |= POLLOUT | POLLWRNORM;
617 spin_unlock_irqrestore(&src_q->done_lock, flags);
619 spin_lock_irqsave(&dst_q->done_lock, flags);
621 if (!list_empty(&dst_q->done_list))
622 dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
625 if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE ||
626 dst_vb->state == VB2_BUF_STATE_ERROR))
627 rc |= POLLIN | POLLRDNORM;
629 spin_unlock_irqrestore(&dst_q->done_lock, flags);
634 static int rockchip_vpu_mmap(struct file *filp, struct vm_area_struct *vma)
636 struct rockchip_vpu_ctx *ctx = fh_to_ctx(filp->private_data);
637 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
642 if (offset < DST_QUEUE_OFF_BASE) {
643 vpu_debug(4, "mmaping source\n");
645 ret = vb2_mmap(&ctx->vq_src, vma);
646 } else { /* capture */
647 vpu_debug(4, "mmaping destination\n");
649 vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
650 ret = vb2_mmap(&ctx->vq_dst, vma);
658 static const struct v4l2_file_operations rockchip_vpu_fops = {
659 .owner = THIS_MODULE,
660 .open = rockchip_vpu_open,
661 .release = rockchip_vpu_release,
662 .poll = rockchip_vpu_poll,
663 .unlocked_ioctl = video_ioctl2,
664 .mmap = rockchip_vpu_mmap,
671 static void* rockchip_get_drv_data(struct platform_device *pdev);
673 static int rockchip_vpu_probe(struct platform_device *pdev)
675 struct rockchip_vpu_dev *vpu = NULL;
676 DEFINE_DMA_ATTRS(attrs_novm);
677 DEFINE_DMA_ATTRS(attrs_nohugepage);
678 struct video_device *vfd;
683 vpu = devm_kzalloc(&pdev->dev, sizeof(*vpu), GFP_KERNEL);
687 vpu->dev = &pdev->dev;
689 mutex_init(&vpu->vpu_mutex);
690 spin_lock_init(&vpu->irqlock);
691 INIT_LIST_HEAD(&vpu->ready_ctxs);
692 init_waitqueue_head(&vpu->run_wq);
694 vpu->variant = rockchip_get_drv_data(pdev);
696 ret = rockchip_vpu_hw_probe(vpu);
698 dev_err(&pdev->dev, "rockchip_vpu_hw_probe failed\n");
703 * We'll do mostly sequential access, so sacrifice TLB efficiency for
706 dma_set_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, &attrs_novm);
708 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs_novm);
709 vpu->alloc_ctx = vb2_dma_contig_init_ctx_attrs(&pdev->dev,
711 if (IS_ERR(vpu->alloc_ctx)) {
712 ret = PTR_ERR(vpu->alloc_ctx);
716 dma_set_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, &attrs_nohugepage);
717 vpu->alloc_ctx_vm = vb2_dma_contig_init_ctx_attrs(&pdev->dev,
719 if (IS_ERR(vpu->alloc_ctx_vm)) {
720 ret = PTR_ERR(vpu->alloc_ctx_vm);
721 goto err_dma_contig_vm;
724 ret = v4l2_device_register(&pdev->dev, &vpu->v4l2_dev);
726 dev_err(&pdev->dev, "Failed to register v4l2 device\n");
727 goto err_v4l2_dev_reg;
730 platform_set_drvdata(pdev, vpu);
732 /* workaround for rk3288 codecs */
733 if (vpu->variant->codecs == RK3288_CODECS) {
734 ret = rockchip_vpu_enc_init_dummy_ctx(vpu);
737 "Failed to create dummy encode context\n");
743 if (!(vpu->variant->codecs & ROCKCHIP_VPU_ENCODERS))
746 vfd = video_device_alloc();
748 v4l2_err(&vpu->v4l2_dev, "Failed to allocate video device\n");
753 vfd->fops = &rockchip_vpu_fops;
754 vfd->ioctl_ops = get_enc_v4l2_ioctl_ops();
755 vfd->release = video_device_release;
756 vfd->lock = &vpu->vpu_mutex;
757 vfd->v4l2_dev = &vpu->v4l2_dev;
758 vfd->vfl_dir = VFL_DIR_M2M;
759 snprintf(vfd->name, sizeof(vfd->name), "%s", ROCKCHIP_VPU_ENC_NAME);
762 video_set_drvdata(vfd, vpu);
764 ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
766 v4l2_err(&vpu->v4l2_dev, "Failed to register video device\n");
767 video_device_release(vfd);
771 v4l2_info(&vpu->v4l2_dev,
772 "Rockchip VPU encoder registered as /vpu/video%d\n",
777 if (!(vpu->variant->codecs & ROCKCHIP_VPU_DECODERS))
780 vfd = video_device_alloc();
782 v4l2_err(&vpu->v4l2_dev, "Failed to allocate video device\n");
787 vfd->fops = &rockchip_vpu_fops;
788 vfd->ioctl_ops = get_dec_v4l2_ioctl_ops();
789 vfd->release = video_device_release;
790 vfd->lock = &vpu->vpu_mutex;
791 vfd->v4l2_dev = &vpu->v4l2_dev;
792 vfd->vfl_dir = VFL_DIR_M2M;
793 snprintf(vfd->name, sizeof(vfd->name), "%s", ROCKCHIP_VPU_DEC_NAME);
796 video_set_drvdata(vfd, vpu);
798 ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
800 v4l2_err(&vpu->v4l2_dev, "Failed to register video device\n");
801 video_device_release(vfd);
805 v4l2_info(&vpu->v4l2_dev,
806 "Rockchip VPU decoder registered as /vpu/video%d\n",
815 video_device_release(vpu->vfd_dec);
817 video_unregister_device(vpu->vfd_enc);
819 video_device_release(vpu->vfd_enc);
821 rockchip_vpu_enc_free_dummy_ctx(vpu);
823 v4l2_device_unregister(&vpu->v4l2_dev);
825 vb2_dma_contig_cleanup_ctx(vpu->alloc_ctx_vm);
827 vb2_dma_contig_cleanup_ctx(vpu->alloc_ctx);
829 rockchip_vpu_hw_remove(vpu);
831 pr_debug("%s-- with error\n", __func__);
837 static int rockchip_vpu_remove(struct platform_device *pdev)
839 struct rockchip_vpu_dev *vpu = platform_get_drvdata(pdev);
843 v4l2_info(&vpu->v4l2_dev, "Removing %s\n", pdev->name);
846 * We are safe here assuming that .remove() got called as
847 * a result of module removal, which guarantees that all
848 * contexts have been released.
851 video_unregister_device(vpu->vfd_dec);
852 video_unregister_device(vpu->vfd_enc);
853 rockchip_vpu_enc_free_dummy_ctx(vpu);
854 v4l2_device_unregister(&vpu->v4l2_dev);
855 vb2_dma_contig_cleanup_ctx(vpu->alloc_ctx_vm);
856 vb2_dma_contig_cleanup_ctx(vpu->alloc_ctx);
857 rockchip_vpu_hw_remove(vpu);
864 /* Supported VPU variants. */
865 static const struct rockchip_vpu_variant rk3288_vpu_variant = {
866 .name = "Rk3288 vpu",
867 .codecs = RK3288_CODECS,
871 .dec_reg_num = 60 + 41,
874 static struct platform_device_id vpu_driver_ids[] = {
876 .name = "rk3288-vpu",
877 .driver_data = (unsigned long)&rk3288_vpu_variant,
882 MODULE_DEVICE_TABLE(platform, vpu_driver_ids);
885 static const struct of_device_id of_rockchip_vpu_match[] = {
886 { .compatible = "rockchip,rk3288-vpu", .data = &rk3288_vpu_variant, },
889 MODULE_DEVICE_TABLE(of, of_rockchip_vpu_match);
892 static void* rockchip_get_drv_data(struct platform_device *pdev)
895 if (pdev->dev.of_node) {
896 const struct of_device_id *match;
897 match = of_match_node(of_rockchip_vpu_match,
900 return (void *)match->data;
904 return (void *)platform_get_device_id(pdev)->driver_data;
907 #ifdef CONFIG_PM_SLEEP
908 static int rockchip_vpu_suspend(struct device *dev)
910 struct rockchip_vpu_dev *vpu = dev_get_drvdata(dev);
912 set_bit(VPU_SUSPENDED, &vpu->state);
913 wait_event(vpu->run_wq, vpu->current_ctx == NULL);
918 static int rockchip_vpu_resume(struct device *dev)
920 struct rockchip_vpu_dev *vpu = dev_get_drvdata(dev);
922 clear_bit(VPU_SUSPENDED, &vpu->state);
923 rockchip_vpu_try_run(vpu);
929 static const struct dev_pm_ops rockchip_vpu_pm_ops = {
930 SET_SYSTEM_SLEEP_PM_OPS(rockchip_vpu_suspend, rockchip_vpu_resume)
933 static struct platform_driver rockchip_vpu_driver = {
934 .probe = rockchip_vpu_probe,
935 .remove = rockchip_vpu_remove,
936 .id_table = vpu_driver_ids,
938 .name = ROCKCHIP_VPU_NAME,
939 .owner = THIS_MODULE,
941 .of_match_table = of_match_ptr(of_rockchip_vpu_match),
943 .pm = &rockchip_vpu_pm_ops,
946 module_platform_driver(rockchip_vpu_driver);
948 MODULE_LICENSE("GPL v2");
949 MODULE_AUTHOR("Alpha Lin <Alpha.Lin@Rock-Chips.com>");
950 MODULE_AUTHOR("Tomasz Figa <tfiga@chromium.org>");
951 MODULE_DESCRIPTION("Rockchip VPU codec driver");