2 * Copyright (C) 2013 Google, Inc.
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
15 #include <linux/kthread.h>
16 #include <linux/mutex.h>
17 #include <linux/slab.h>
21 #include <video/adf.h>
22 #include <video/adf_client.h>
23 #include <video/adf_format.h>
27 static inline bool vsync_active(u8 state)
29 return state == DRM_MODE_DPMS_ON || state == DRM_MODE_DPMS_STANDBY;
33 * adf_interface_blank - set interface's DPMS state
35 * @intf: the interface
36 * @state: one of %DRM_MODE_DPMS_*
38 * Returns 0 on success or -errno on failure.
40 int adf_interface_blank(struct adf_interface *intf, u8 state)
42 struct adf_device *dev = adf_interface_parent(intf);
47 struct adf_event_refcount *vsync_refcount;
49 if (!intf->ops || !intf->ops->blank)
52 mutex_lock(&dev->client_lock);
53 if (state != DRM_MODE_DPMS_ON)
54 flush_kthread_worker(&dev->post_worker);
55 mutex_lock(&intf->base.event_lock);
57 vsync_refcount = adf_obj_find_event_refcount(&intf->base,
59 if (!vsync_refcount) {
64 prev_state = intf->dpms_state;
65 if (prev_state == state) {
70 disable_vsync = vsync_active(prev_state) &&
71 !vsync_active(state) &&
72 vsync_refcount->refcount;
73 enable_vsync = !vsync_active(prev_state) &&
74 vsync_active(state) &&
75 vsync_refcount->refcount;
78 intf->base.ops->set_event(&intf->base, ADF_EVENT_VSYNC,
81 ret = intf->ops->blank(intf, state);
84 intf->base.ops->set_event(&intf->base, ADF_EVENT_VSYNC,
90 intf->base.ops->set_event(&intf->base, ADF_EVENT_VSYNC,
93 intf->dpms_state = state;
95 mutex_unlock(&intf->base.event_lock);
96 mutex_unlock(&dev->client_lock);
99 EXPORT_SYMBOL(adf_interface_blank);
102 * adf_interface_blank - get interface's current DPMS state
104 * @intf: the interface
106 * Returns one of %DRM_MODE_DPMS_*.
108 u8 adf_interface_dpms_state(struct adf_interface *intf)
110 struct adf_device *dev = adf_interface_parent(intf);
113 mutex_lock(&dev->client_lock);
114 dpms_state = intf->dpms_state;
115 mutex_unlock(&dev->client_lock);
119 EXPORT_SYMBOL(adf_interface_dpms_state);
122 * adf_interface_current_mode - get interface's current display mode
124 * @intf: the interface
125 * @mode: returns the current mode
127 void adf_interface_current_mode(struct adf_interface *intf,
128 struct drm_mode_modeinfo *mode)
130 struct adf_device *dev = adf_interface_parent(intf);
132 mutex_lock(&dev->client_lock);
133 memcpy(mode, &intf->current_mode, sizeof(*mode));
134 mutex_unlock(&dev->client_lock);
136 EXPORT_SYMBOL(adf_interface_current_mode);
139 * adf_interface_modelist - get interface's modelist
141 * @intf: the interface
142 * @modelist: storage for the modelist (optional)
143 * @n_modes: length of @modelist
145 * If @modelist is not NULL, adf_interface_modelist() will copy up to @n_modes
146 * modelist entries into @modelist.
148 * Returns the length of the modelist.
150 size_t adf_interface_modelist(struct adf_interface *intf,
151 struct drm_mode_modeinfo *modelist, size_t n_modes)
156 read_lock_irqsave(&intf->hotplug_modelist_lock, flags);
158 memcpy(modelist, intf->modelist, sizeof(modelist[0]) *
159 min(n_modes, intf->n_modes));
160 retval = intf->n_modes;
161 read_unlock_irqrestore(&intf->hotplug_modelist_lock, flags);
165 EXPORT_SYMBOL(adf_interface_modelist);
168 * adf_interface_set_mode - set interface's display mode
170 * @intf: the interface
171 * @mode: the new mode
173 * Returns 0 on success or -errno on failure.
175 int adf_interface_set_mode(struct adf_interface *intf,
176 struct drm_mode_modeinfo *mode)
178 struct adf_device *dev = adf_interface_parent(intf);
181 if (!intf->ops || !intf->ops->modeset)
184 mutex_lock(&dev->client_lock);
185 flush_kthread_worker(&dev->post_worker);
187 ret = intf->ops->modeset(intf, mode);
191 memcpy(&intf->current_mode, mode, sizeof(*mode));
193 mutex_unlock(&dev->client_lock);
196 EXPORT_SYMBOL(adf_interface_set_mode);
199 * adf_interface_screen_size - get size of screen connected to interface
201 * @intf: the interface
202 * @width_mm: returns the screen width in mm
203 * @height_mm: returns the screen width in mm
205 * Returns 0 on success or -errno on failure.
207 int adf_interface_get_screen_size(struct adf_interface *intf, u16 *width_mm,
210 struct adf_device *dev = adf_interface_parent(intf);
213 if (!intf->ops || !intf->ops->screen_size)
216 mutex_lock(&dev->client_lock);
217 ret = intf->ops->screen_size(intf, width_mm, height_mm);
218 mutex_unlock(&dev->client_lock);
222 EXPORT_SYMBOL(adf_interface_get_screen_size);
225 * adf_overlay_engine_supports_format - returns whether a format is in an
226 * overlay engine's supported list
228 * @eng: the overlay engine
229 * @format: format fourcc
231 bool adf_overlay_engine_supports_format(struct adf_overlay_engine *eng,
235 for (i = 0; i < eng->ops->n_supported_formats; i++)
236 if (format == eng->ops->supported_formats[i])
241 EXPORT_SYMBOL(adf_overlay_engine_supports_format);
243 static int adf_buffer_validate(struct adf_buffer *buf)
245 struct adf_overlay_engine *eng = buf->overlay_engine;
246 struct device *dev = &eng->base.dev;
247 u8 hsub, vsub, num_planes, i;
249 if (!adf_overlay_engine_supports_format(eng, buf->format)) {
250 char format_str[ADF_FORMAT_STR_SIZE];
251 adf_format_str(buf->format, format_str);
252 dev_err(dev, "unsupported format %s\n", format_str);
256 if (!adf_format_is_standard(buf->format)) {
257 struct adf_device *parent = adf_overlay_engine_parent(eng);
258 return parent->ops->validate_custom_format(parent, buf);
261 hsub = adf_format_horz_chroma_subsampling(buf->format);
262 vsub = adf_format_vert_chroma_subsampling(buf->format);
263 num_planes = adf_format_num_planes(buf->format);
265 if (num_planes != buf->n_planes) {
266 char format_str[ADF_FORMAT_STR_SIZE];
267 adf_format_str(buf->format, format_str);
268 dev_err(dev, "%u planes expected for format %s but %u planes provided\n",
269 num_planes, format_str, buf->n_planes);
273 if (buf->w == 0 || buf->w % hsub) {
274 dev_err(dev, "bad buffer width %u\n", buf->w);
278 if (buf->h == 0 || buf->h % vsub) {
279 dev_err(dev, "bad buffer height %u\n", buf->h);
283 for (i = 0; i < num_planes; i++) {
284 u32 width = buf->w / (i != 0 ? hsub : 1);
285 u32 height = buf->h / (i != 0 ? vsub : 1);
286 u8 cpp = adf_format_plane_cpp(buf->format, i);
288 if (buf->pitch[i] < (u64) width * cpp) {
289 dev_err(dev, "plane %u pitch is shorter than buffer width (pitch = %u, width = %u, bpp = %u)\n",
290 i, buf->pitch[i], width, cpp * 8);
294 if ((u64) height * buf->pitch[i] + buf->offset[i] >
295 buf->dma_bufs[i]->size) {
296 dev_err(dev, "plane %u buffer too small (height = %u, pitch = %u, offset = %u, size = %zu)\n",
297 i, height, buf->pitch[i],
298 buf->offset[i], buf->dma_bufs[i]->size);
305 static int adf_buffer_map(struct adf_device *dev, struct adf_buffer *buf,
306 struct adf_buffer_mapping *mapping)
311 for (i = 0; i < buf->n_planes; i++) {
312 struct dma_buf_attachment *attachment;
313 struct sg_table *sg_table;
315 attachment = dma_buf_attach(buf->dma_bufs[i], dev->dev);
316 if (IS_ERR(attachment)) {
317 ret = PTR_ERR(attachment);
318 dev_err(&dev->base.dev, "attaching plane %u failed: %d\n",
322 mapping->attachments[i] = attachment;
324 sg_table = dma_buf_map_attachment(attachment, DMA_TO_DEVICE);
325 if (IS_ERR(sg_table)) {
326 ret = PTR_ERR(sg_table);
327 dev_err(&dev->base.dev, "mapping plane %u failed: %d",
330 } else if (!sg_table) {
332 dev_err(&dev->base.dev, "mapping plane %u failed\n", i);
335 mapping->sg_tables[i] = sg_table;
340 adf_buffer_mapping_cleanup(mapping, buf);
345 static struct sync_fence *adf_sw_complete_fence(struct adf_device *dev)
348 struct sync_fence *complete_fence;
350 if (!dev->timeline) {
351 dev->timeline = sw_sync_timeline_create(dev->base.name);
353 return ERR_PTR(-ENOMEM);
354 dev->timeline_max = 1;
358 pt = sw_sync_pt_create(dev->timeline, dev->timeline_max);
361 complete_fence = sync_fence_create(dev->base.name, pt);
363 goto err_fence_create;
365 return complete_fence;
371 return ERR_PTR(-ENOSYS);
375 * adf_device_post - flip to a new set of buffers
377 * @dev: device targeted by the flip
378 * @intfs: interfaces targeted by the flip
379 * @n_intfs: number of targeted interfaces
380 * @bufs: description of buffers displayed
381 * @n_bufs: number of buffers displayed
382 * @custom_data: driver-private data
383 * @custom_data_size: size of driver-private data
385 * adf_device_post() will copy @intfs, @bufs, and @custom_data, so they may
386 * point to variables on the stack. adf_device_post() also takes its own
387 * reference on each of the dma-bufs in @bufs. The adf_device_post_nocopy()
388 * variant transfers ownership of these resources to ADF instead.
390 * On success, returns a sync fence which signals when the buffers are removed
391 * from the screen. On failure, returns ERR_PTR(-errno).
393 struct sync_fence *adf_device_post(struct adf_device *dev,
394 struct adf_interface **intfs, size_t n_intfs,
395 struct adf_buffer *bufs, size_t n_bufs, void *custom_data,
396 size_t custom_data_size)
398 struct adf_interface **intfs_copy = NULL;
399 struct adf_buffer *bufs_copy = NULL;
400 void *custom_data_copy = NULL;
401 struct sync_fence *ret;
404 intfs_copy = kzalloc(sizeof(intfs_copy[0]) * n_intfs, GFP_KERNEL);
406 return ERR_PTR(-ENOMEM);
408 bufs_copy = kzalloc(sizeof(bufs_copy[0]) * n_bufs, GFP_KERNEL);
410 ret = ERR_PTR(-ENOMEM);
414 custom_data_copy = kzalloc(custom_data_size, GFP_KERNEL);
415 if (!custom_data_copy) {
416 ret = ERR_PTR(-ENOMEM);
420 for (i = 0; i < n_bufs; i++) {
422 for (j = 0; j < bufs[i].n_planes; j++)
423 get_dma_buf(bufs[i].dma_bufs[j]);
426 memcpy(intfs_copy, intfs, sizeof(intfs_copy[0]) * n_intfs);
427 memcpy(bufs_copy, bufs, sizeof(bufs_copy[0]) * n_bufs);
428 memcpy(custom_data_copy, custom_data, custom_data_size);
430 ret = adf_device_post_nocopy(dev, intfs_copy, n_intfs, bufs_copy,
431 n_bufs, custom_data_copy, custom_data_size);
438 for (i = 0; i < n_bufs; i++) {
440 for (j = 0; j < bufs[i].n_planes; j++)
441 dma_buf_put(bufs[i].dma_bufs[j]);
444 kfree(custom_data_copy);
449 EXPORT_SYMBOL(adf_device_post);
452 * adf_device_post_nocopy - flip to a new set of buffers
454 * adf_device_post_nocopy() has the same behavior as adf_device_post(),
455 * except ADF does not copy @intfs, @bufs, or @custom_data, and it does
456 * not take an extra reference on the dma-bufs in @bufs.
458 * @intfs, @bufs, and @custom_data must point to buffers allocated by
459 * kmalloc(). On success, ADF takes ownership of these buffers and the dma-bufs
460 * in @bufs, and will kfree()/dma_buf_put() them when they are no longer needed.
461 * On failure, adf_device_post_nocopy() does NOT take ownership of these
462 * buffers or the dma-bufs, and the caller must clean them up.
464 * adf_device_post_nocopy() is mainly intended for implementing ADF's ioctls.
465 * Clients may find the nocopy variant useful in limited cases, but most should
466 * call adf_device_post() instead.
468 struct sync_fence *adf_device_post_nocopy(struct adf_device *dev,
469 struct adf_interface **intfs, size_t n_intfs,
470 struct adf_buffer *bufs, size_t n_bufs,
471 void *custom_data, size_t custom_data_size)
473 struct adf_pending_post *cfg;
474 struct adf_buffer_mapping *mappings;
475 struct sync_fence *ret;
479 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
481 return ERR_PTR(-ENOMEM);
483 mappings = kzalloc(sizeof(mappings[0]) * n_bufs, GFP_KERNEL);
485 ret = ERR_PTR(-ENOMEM);
489 mutex_lock(&dev->client_lock);
491 for (i = 0; i < n_bufs; i++) {
492 err = adf_buffer_validate(&bufs[i]);
498 err = adf_buffer_map(dev, &bufs[i], &mappings[i]);
505 INIT_LIST_HEAD(&cfg->head);
506 cfg->config.n_bufs = n_bufs;
507 cfg->config.bufs = bufs;
508 cfg->config.mappings = mappings;
509 cfg->config.custom_data = custom_data;
510 cfg->config.custom_data_size = custom_data_size;
512 err = dev->ops->validate(dev, &cfg->config, &cfg->state);
518 mutex_lock(&dev->post_lock);
520 if (dev->ops->complete_fence)
521 ret = dev->ops->complete_fence(dev, &cfg->config,
524 ret = adf_sw_complete_fence(dev);
529 list_add_tail(&cfg->head, &dev->post_list);
530 queue_kthread_work(&dev->post_worker, &dev->post_work);
531 mutex_unlock(&dev->post_lock);
532 mutex_unlock(&dev->client_lock);
537 mutex_unlock(&dev->post_lock);
540 for (i = 0; i < n_bufs; i++)
541 adf_buffer_mapping_cleanup(&mappings[i], &bufs[i]);
543 mutex_unlock(&dev->client_lock);
550 EXPORT_SYMBOL(adf_device_post_nocopy);
552 static void adf_attachment_list_to_array(struct adf_device *dev,
553 struct list_head *src, struct adf_attachment *dst, size_t size)
555 struct adf_attachment_list *entry;
561 list_for_each_entry(entry, src, head) {
564 dst[i] = entry->attachment;
570 * adf_device_attachments - get device's list of active attachments
573 * @attachments: storage for the attachment list (optional)
574 * @n_attachments: length of @attachments
576 * If @attachments is not NULL, adf_device_attachments() will copy up to
577 * @n_attachments entries into @attachments.
579 * Returns the length of the active attachment list.
581 size_t adf_device_attachments(struct adf_device *dev,
582 struct adf_attachment *attachments, size_t n_attachments)
586 mutex_lock(&dev->client_lock);
587 adf_attachment_list_to_array(dev, &dev->attached, attachments,
589 retval = dev->n_attached;
590 mutex_unlock(&dev->client_lock);
594 EXPORT_SYMBOL(adf_device_attachments);
597 * adf_device_attachments_allowed - get device's list of allowed attachments
600 * @attachments: storage for the attachment list (optional)
601 * @n_attachments: length of @attachments
603 * If @attachments is not NULL, adf_device_attachments_allowed() will copy up to
604 * @n_attachments entries into @attachments.
606 * Returns the length of the allowed attachment list.
608 size_t adf_device_attachments_allowed(struct adf_device *dev,
609 struct adf_attachment *attachments, size_t n_attachments)
613 mutex_lock(&dev->client_lock);
614 adf_attachment_list_to_array(dev, &dev->attach_allowed, attachments,
616 retval = dev->n_attach_allowed;
617 mutex_unlock(&dev->client_lock);
621 EXPORT_SYMBOL(adf_device_attachments_allowed);
624 * adf_device_attached - return whether an overlay engine and interface are
627 * @dev: the parent device
628 * @eng: the overlay engine
629 * @intf: the interface
631 bool adf_device_attached(struct adf_device *dev, struct adf_overlay_engine *eng,
632 struct adf_interface *intf)
634 struct adf_attachment_list *attachment;
636 mutex_lock(&dev->client_lock);
637 attachment = adf_attachment_find(&dev->attached, eng, intf);
638 mutex_unlock(&dev->client_lock);
640 return attachment != NULL;
642 EXPORT_SYMBOL(adf_device_attached);
645 * adf_device_attach_allowed - return whether the ADF device supports attaching
646 * an overlay engine and interface
648 * @dev: the parent device
649 * @eng: the overlay engine
650 * @intf: the interface
652 bool adf_device_attach_allowed(struct adf_device *dev,
653 struct adf_overlay_engine *eng, struct adf_interface *intf)
655 struct adf_attachment_list *attachment;
657 mutex_lock(&dev->client_lock);
658 attachment = adf_attachment_find(&dev->attach_allowed, eng, intf);
659 mutex_unlock(&dev->client_lock);
661 return attachment != NULL;
663 EXPORT_SYMBOL(adf_device_attach_allowed);
665 * adf_device_attach - attach an overlay engine to an interface
667 * @dev: the parent device
668 * @eng: the overlay engine
669 * @intf: the interface
671 * Returns 0 on success, -%EINVAL if attaching @intf and @eng is not allowed,
672 * -%EALREADY if @intf and @eng are already attached, or -errno on any other
675 int adf_device_attach(struct adf_device *dev, struct adf_overlay_engine *eng,
676 struct adf_interface *intf)
679 struct adf_attachment_list *attachment = NULL;
681 ret = adf_attachment_validate(dev, eng, intf);
685 mutex_lock(&dev->client_lock);
687 if (dev->n_attached == ADF_MAX_ATTACHMENTS) {
692 if (!adf_attachment_find(&dev->attach_allowed, eng, intf)) {
697 if (adf_attachment_find(&dev->attached, eng, intf)) {
702 ret = adf_device_attach_op(dev, eng, intf);
706 attachment = kzalloc(sizeof(*attachment), GFP_KERNEL);
712 attachment->attachment.interface = intf;
713 attachment->attachment.overlay_engine = eng;
714 list_add_tail(&attachment->head, &dev->attached);
718 mutex_unlock(&dev->client_lock);
724 EXPORT_SYMBOL(adf_device_attach);
727 * adf_device_detach - detach an overlay engine from an interface
729 * @dev: the parent device
730 * @eng: the overlay engine
731 * @intf: the interface
733 * Returns 0 on success, -%EINVAL if @intf and @eng are not attached,
734 * or -errno on any other failure.
736 int adf_device_detach(struct adf_device *dev, struct adf_overlay_engine *eng,
737 struct adf_interface *intf)
740 struct adf_attachment_list *attachment;
742 ret = adf_attachment_validate(dev, eng, intf);
746 mutex_lock(&dev->client_lock);
748 attachment = adf_attachment_find(&dev->attached, eng, intf);
754 ret = adf_device_detach_op(dev, eng, intf);
758 adf_attachment_free(attachment);
761 mutex_unlock(&dev->client_lock);
764 EXPORT_SYMBOL(adf_device_detach);
767 * adf_interface_simple_buffer_alloc - allocate a simple buffer
769 * @intf: target interface
770 * @w: width in pixels
771 * @h: height in pixels
772 * @format: format fourcc
773 * @dma_buf: returns the allocated buffer
774 * @offset: returns the byte offset of the allocated buffer's first pixel
775 * @pitch: returns the allocated buffer's pitch
777 * See &struct adf_simple_buffer_alloc for a description of simple buffers and
780 * Returns 0 on success or -errno on failure.
782 int adf_interface_simple_buffer_alloc(struct adf_interface *intf, u16 w, u16 h,
783 u32 format, struct dma_buf **dma_buf, u32 *offset, u32 *pitch)
785 if (!intf->ops || !intf->ops->alloc_simple_buffer)
788 if (!adf_format_is_rgb(format))
791 return intf->ops->alloc_simple_buffer(intf, w, h, format, dma_buf,
794 EXPORT_SYMBOL(adf_interface_simple_buffer_alloc);
797 * adf_interface_simple_post - flip to a single buffer
799 * @intf: interface targeted by the flip
800 * @buf: buffer to display
802 * adf_interface_simple_post() can be used generically for simple display
803 * configurations, since the client does not need to provide any driver-private
804 * configuration data.
806 * adf_interface_simple_post() has the same copying semantics as
809 * On success, returns a sync fence which signals when the buffer is removed
810 * from the screen. On failure, returns ERR_PTR(-errno).
812 struct sync_fence *adf_interface_simple_post(struct adf_interface *intf,
813 struct adf_buffer *buf)
815 size_t custom_data_size = 0;
816 void *custom_data = NULL;
817 struct sync_fence *ret;
819 if (intf->ops && intf->ops->describe_simple_post) {
822 custom_data = kzalloc(ADF_MAX_CUSTOM_DATA_SIZE, GFP_KERNEL);
824 ret = ERR_PTR(-ENOMEM);
828 err = intf->ops->describe_simple_post(intf, buf, custom_data,
836 ret = adf_device_post(adf_interface_parent(intf), &intf, 1, buf, 1,
837 custom_data, custom_data_size);
842 EXPORT_SYMBOL(adf_interface_simple_post);