2 * Copyright (C) 2013 Google, Inc.
3 * adf_modeinfo_{set_name,set_vrefresh} modified from
4 * drivers/gpu/drm/drm_modes.c
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #include <linux/device.h>
18 #include <linux/idr.h>
19 #include <linux/highmem.h>
20 #include <linux/memblock.h>
21 #include <linux/module.h>
22 #include <linux/platform_device.h>
23 #include <linux/slab.h>
25 #include <video/adf_format.h>
32 #include "adf_sysfs.h"
34 #define CREATE_TRACE_POINTS
35 #include "adf_trace.h"
37 #define ADF_SHORT_FENCE_TIMEOUT (1 * MSEC_PER_SEC)
38 #define ADF_LONG_FENCE_TIMEOUT (10 * MSEC_PER_SEC)
40 static DEFINE_IDR(adf_devices);
42 static void adf_fence_wait(struct adf_device *dev, struct sync_fence *fence)
44 /* sync_fence_wait() dumps debug information on timeout. Experience
45 has shown that if the pipeline gets stuck, a short timeout followed
46 by a longer one provides useful information for debugging. */
47 int err = sync_fence_wait(fence, ADF_SHORT_FENCE_TIMEOUT);
52 err = sync_fence_wait(fence, ADF_LONG_FENCE_TIMEOUT);
55 dev_warn(&dev->base.dev, "error waiting on fence: %d\n", err);
58 void adf_buffer_cleanup(struct adf_buffer *buf)
61 for (i = 0; i < ARRAY_SIZE(buf->dma_bufs); i++)
63 dma_buf_put(buf->dma_bufs[i]);
65 if (buf->acquire_fence)
66 sync_fence_put(buf->acquire_fence);
69 void adf_buffer_mapping_cleanup(struct adf_buffer_mapping *mapping,
70 struct adf_buffer *buf)
72 /* calling adf_buffer_mapping_cleanup() is safe even if mapping is
73 uninitialized or partially-initialized, as long as it was
74 zeroed on allocation */
76 for (i = 0; i < ARRAY_SIZE(mapping->sg_tables); i++) {
77 if (mapping->sg_tables[i])
78 dma_buf_unmap_attachment(mapping->attachments[i],
79 mapping->sg_tables[i], DMA_TO_DEVICE);
80 if (mapping->attachments[i])
81 dma_buf_detach(buf->dma_bufs[i],
82 mapping->attachments[i]);
86 void adf_post_cleanup(struct adf_device *dev, struct adf_pending_post *post)
91 dev->ops->state_free(dev, post->state);
93 for (i = 0; i < post->config.n_bufs; i++) {
94 adf_buffer_mapping_cleanup(&post->config.mappings[i],
95 &post->config.bufs[i]);
96 adf_buffer_cleanup(&post->config.bufs[i]);
99 kfree(post->config.custom_data);
100 kfree(post->config.mappings);
101 kfree(post->config.bufs);
105 static void adf_sw_advance_timeline(struct adf_device *dev)
107 #ifdef CONFIG_SW_SYNC
108 sw_sync_timeline_inc(dev->timeline, 1);
114 static void adf_post_work_func(struct kthread_work *work)
116 struct adf_device *dev =
117 container_of(work, struct adf_device, post_work);
118 struct adf_pending_post *post, *next;
119 struct list_head saved_list;
121 mutex_lock(&dev->post_lock);
122 memcpy(&saved_list, &dev->post_list, sizeof(saved_list));
123 list_replace_init(&dev->post_list, &saved_list);
124 mutex_unlock(&dev->post_lock);
126 list_for_each_entry_safe(post, next, &saved_list, head) {
129 for (i = 0; i < post->config.n_bufs; i++) {
130 struct sync_fence *fence =
131 post->config.bufs[i].acquire_fence;
133 adf_fence_wait(dev, fence);
136 dev->ops->post(dev, &post->config, post->state);
138 if (dev->ops->advance_timeline)
139 dev->ops->advance_timeline(dev, &post->config,
142 adf_sw_advance_timeline(dev);
144 list_del(&post->head);
146 adf_post_cleanup(dev, dev->onscreen);
147 dev->onscreen = post;
151 void adf_attachment_free(struct adf_attachment_list *attachment)
153 list_del(&attachment->head);
157 struct adf_event_refcount *adf_obj_find_event_refcount(struct adf_obj *obj,
158 enum adf_event_type type)
160 struct rb_root *root = &obj->event_refcount;
161 struct rb_node **new = &(root->rb_node);
162 struct rb_node *parent = NULL;
163 struct adf_event_refcount *refcount;
166 refcount = container_of(*new, struct adf_event_refcount, node);
169 if (refcount->type > type)
170 new = &(*new)->rb_left;
171 else if (refcount->type < type)
172 new = &(*new)->rb_right;
177 refcount = kzalloc(sizeof(*refcount), GFP_KERNEL);
180 refcount->type = type;
182 rb_link_node(&refcount->node, parent, new);
183 rb_insert_color(&refcount->node, root);
188 * adf_event_get - increase the refcount for an event
190 * @obj: the object that produces the event
191 * @type: the event type
193 * ADF will call the object's set_event() op if needed. ops are allowed
194 * to sleep, so adf_event_get() must NOT be called from an atomic context.
196 * Returns 0 if successful, or -%EINVAL if the object does not support the
197 * requested event type.
199 int adf_event_get(struct adf_obj *obj, enum adf_event_type type)
201 struct adf_event_refcount *refcount;
205 ret = adf_obj_check_supports_event(obj, type);
209 mutex_lock(&obj->event_lock);
211 refcount = adf_obj_find_event_refcount(obj, type);
217 old_refcount = refcount->refcount++;
219 if (old_refcount == 0) {
220 obj->ops->set_event(obj, type, true);
221 trace_adf_event_enable(obj, type);
225 mutex_unlock(&obj->event_lock);
228 EXPORT_SYMBOL(adf_event_get);
231 * adf_event_put - decrease the refcount for an event
233 * @obj: the object that produces the event
234 * @type: the event type
236 * ADF will call the object's set_event() op if needed. ops are allowed
237 * to sleep, so adf_event_put() must NOT be called from an atomic context.
239 * Returns 0 if successful, -%EINVAL if the object does not support the
240 * requested event type, or -%EALREADY if the refcount is already 0.
242 int adf_event_put(struct adf_obj *obj, enum adf_event_type type)
244 struct adf_event_refcount *refcount;
248 ret = adf_obj_check_supports_event(obj, type);
253 mutex_lock(&obj->event_lock);
255 refcount = adf_obj_find_event_refcount(obj, type);
261 old_refcount = refcount->refcount--;
263 if (WARN_ON(old_refcount == 0)) {
264 refcount->refcount++;
266 } else if (old_refcount == 1) {
267 obj->ops->set_event(obj, type, false);
268 trace_adf_event_disable(obj, type);
272 mutex_unlock(&obj->event_lock);
275 EXPORT_SYMBOL(adf_event_put);
278 * adf_vsync_wait - wait for a vsync event on a display interface
280 * @intf: the display interface
281 * @timeout: timeout in jiffies (0 = wait indefinitely)
283 * adf_vsync_wait() may sleep, so it must NOT be called from an atomic context.
285 * This function returns -%ERESTARTSYS if it is interrupted by a signal.
286 * If @timeout == 0 then this function returns 0 on vsync. If @timeout > 0 then
287 * this function returns the number of remaining jiffies or -%ETIMEDOUT on
290 int adf_vsync_wait(struct adf_interface *intf, long timeout)
296 read_lock_irqsave(&intf->vsync_lock, flags);
297 timestamp = intf->vsync_timestamp;
298 read_unlock_irqrestore(&intf->vsync_lock, flags);
302 ret = wait_event_interruptible_timeout(intf->vsync_wait,
303 !ktime_equal(timestamp,
304 intf->vsync_timestamp),
305 msecs_to_jiffies(timeout));
306 if (ret == 0 && ktime_equal(timestamp, intf->vsync_timestamp))
309 ret = wait_event_interruptible(intf->vsync_wait,
310 !ktime_equal(timestamp,
311 intf->vsync_timestamp));
317 EXPORT_SYMBOL(adf_vsync_wait);
319 static void adf_event_queue(struct adf_obj *obj, struct adf_event *event)
321 struct adf_file *file;
324 trace_adf_event(obj, event->type);
326 spin_lock_irqsave(&obj->file_lock, flags);
328 list_for_each_entry(file, &obj->file_list, head)
329 if (test_bit(event->type, file->event_subscriptions))
330 adf_file_queue_event(file, event);
332 spin_unlock_irqrestore(&obj->file_lock, flags);
336 * adf_event_notify - notify userspace of a driver-private event
338 * @obj: the ADF object that produced the event
341 * adf_event_notify() may be called safely from an atomic context. It will
342 * copy @event if needed, so @event may point to a variable on the stack.
344 * Drivers must NOT call adf_event_notify() for vsync and hotplug events.
345 * ADF provides adf_vsync_notify() and
346 * adf_hotplug_notify_{connected,disconnected}() for these events.
348 int adf_event_notify(struct adf_obj *obj, struct adf_event *event)
350 if (WARN_ON(event->type == ADF_EVENT_VSYNC ||
351 event->type == ADF_EVENT_HOTPLUG))
354 adf_event_queue(obj, event);
357 EXPORT_SYMBOL(adf_event_notify);
360 * adf_vsync_notify - notify ADF of a display interface's vsync event
362 * @intf: the display interface
363 * @timestamp: the time the vsync occurred
365 * adf_vsync_notify() may be called safely from an atomic context.
367 void adf_vsync_notify(struct adf_interface *intf, ktime_t timestamp)
370 struct adf_vsync_event event;
372 write_lock_irqsave(&intf->vsync_lock, flags);
373 intf->vsync_timestamp = timestamp;
374 write_unlock_irqrestore(&intf->vsync_lock, flags);
376 wake_up_interruptible_all(&intf->vsync_wait);
378 event.base.type = ADF_EVENT_VSYNC;
379 event.base.length = sizeof(event);
380 event.timestamp = ktime_to_ns(timestamp);
381 adf_event_queue(&intf->base, &event.base);
383 EXPORT_SYMBOL(adf_vsync_notify);
385 void adf_hotplug_notify(struct adf_interface *intf, bool connected,
386 struct drm_mode_modeinfo *modelist, size_t n_modes)
389 struct adf_hotplug_event event;
390 struct drm_mode_modeinfo *old_modelist;
392 write_lock_irqsave(&intf->hotplug_modelist_lock, flags);
393 old_modelist = intf->modelist;
394 intf->hotplug_detect = connected;
395 intf->modelist = modelist;
396 intf->n_modes = n_modes;
397 write_unlock_irqrestore(&intf->hotplug_modelist_lock, flags);
401 event.base.length = sizeof(event);
402 event.base.type = ADF_EVENT_HOTPLUG;
403 event.connected = connected;
404 adf_event_queue(&intf->base, &event.base);
408 * adf_hotplug_notify_connected - notify ADF of a display interface being
409 * connected to a display
411 * @intf: the display interface
412 * @modelist: hardware modes supported by display
413 * @n_modes: length of modelist
415 * @modelist is copied as needed, so it may point to a variable on the stack.
417 * adf_hotplug_notify_connected() may NOT be called safely from an atomic
420 * Returns 0 on success or error code (<0) on error.
422 int adf_hotplug_notify_connected(struct adf_interface *intf,
423 struct drm_mode_modeinfo *modelist, size_t n_modes)
425 struct drm_mode_modeinfo *modelist_copy;
427 if (n_modes > ADF_MAX_MODES)
430 modelist_copy = kzalloc(sizeof(modelist_copy[0]) * n_modes,
434 memcpy(modelist_copy, modelist, sizeof(modelist_copy[0]) * n_modes);
436 adf_hotplug_notify(intf, true, modelist_copy, n_modes);
439 EXPORT_SYMBOL(adf_hotplug_notify_connected);
442 * adf_hotplug_notify_disconnected - notify ADF of a display interface being
443 * disconnected from a display
445 * @intf: the display interface
447 * adf_hotplug_notify_disconnected() may be called safely from an atomic
450 void adf_hotplug_notify_disconnected(struct adf_interface *intf)
452 adf_hotplug_notify(intf, false, NULL, 0);
454 EXPORT_SYMBOL(adf_hotplug_notify_disconnected);
456 static int adf_obj_init(struct adf_obj *obj, enum adf_obj_type type,
457 struct idr *idr, struct adf_device *parent,
458 const struct adf_obj_ops *ops, const char *fmt, va_list args)
462 if (ops && ops->supports_event && !ops->set_event) {
463 pr_err("%s: %s implements supports_event but not set_event\n",
464 __func__, adf_obj_type_str(type));
468 ret = idr_alloc(idr, obj, 0, 0, GFP_KERNEL);
470 pr_err("%s: allocating object id failed: %d\n", __func__, ret);
475 vscnprintf(obj->name, sizeof(obj->name), fmt, args);
479 obj->parent = parent;
480 mutex_init(&obj->event_lock);
481 obj->event_refcount = RB_ROOT;
482 spin_lock_init(&obj->file_lock);
483 INIT_LIST_HEAD(&obj->file_list);
487 static void adf_obj_destroy(struct adf_obj *obj, struct idr *idr)
489 struct rb_node *node = rb_first(&obj->event_refcount);
492 struct adf_event_refcount *refcount =
493 container_of(node, struct adf_event_refcount,
496 node = rb_first(&obj->event_refcount);
499 mutex_destroy(&obj->event_lock);
500 idr_remove(idr, obj->id);
504 * adf_device_init - initialize ADF-internal data for a display device
505 * and create sysfs entries
507 * @dev: the display device
508 * @parent: the device's parent device
509 * @ops: the device's associated ops
510 * @fmt: formatting string for the display device's name
512 * @fmt specifies the device's sysfs filename and the name returned to
513 * userspace through the %ADF_GET_DEVICE_DATA ioctl.
515 * Returns 0 on success or error code (<0) on failure.
517 int adf_device_init(struct adf_device *dev, struct device *parent,
518 const struct adf_device_ops *ops, const char *fmt, ...)
523 if (!ops->validate || !ops->post) {
524 pr_err("%s: device must implement validate and post\n",
529 if (!ops->complete_fence && !ops->advance_timeline) {
530 if (!IS_ENABLED(CONFIG_SW_SYNC)) {
531 pr_err("%s: device requires sw_sync but it is not enabled in the kernel\n",
535 } else if (!(ops->complete_fence && ops->advance_timeline)) {
536 pr_err("%s: device must implement both complete_fence and advance_timeline, or implement neither\n",
541 memset(dev, 0, sizeof(*dev));
544 ret = adf_obj_init(&dev->base, ADF_OBJ_DEVICE, &adf_devices, dev,
545 &ops->base, fmt, args);
552 idr_init(&dev->overlay_engines);
553 idr_init(&dev->interfaces);
554 mutex_init(&dev->client_lock);
555 INIT_LIST_HEAD(&dev->post_list);
556 mutex_init(&dev->post_lock);
557 init_kthread_worker(&dev->post_worker);
558 INIT_LIST_HEAD(&dev->attached);
559 INIT_LIST_HEAD(&dev->attach_allowed);
561 dev->post_thread = kthread_run(kthread_worker_fn,
562 &dev->post_worker, dev->base.name);
563 if (IS_ERR(dev->post_thread)) {
564 ret = PTR_ERR(dev->post_thread);
565 dev->post_thread = NULL;
567 pr_err("%s: failed to run config posting thread: %d\n",
571 init_kthread_work(&dev->post_work, adf_post_work_func);
573 ret = adf_device_sysfs_init(dev);
580 adf_device_destroy(dev);
583 EXPORT_SYMBOL(adf_device_init);
586 * adf_device_destroy - clean up ADF-internal data for a display device
588 * @dev: the display device
590 void adf_device_destroy(struct adf_device *dev)
592 struct adf_attachment_list *entry, *next;
594 idr_destroy(&dev->interfaces);
595 idr_destroy(&dev->overlay_engines);
597 if (dev->post_thread) {
598 flush_kthread_worker(&dev->post_worker);
599 kthread_stop(dev->post_thread);
603 adf_post_cleanup(dev, dev->onscreen);
604 adf_device_sysfs_destroy(dev);
605 list_for_each_entry_safe(entry, next, &dev->attach_allowed, head) {
606 adf_attachment_free(entry);
608 list_for_each_entry_safe(entry, next, &dev->attached, head) {
609 adf_attachment_free(entry);
611 mutex_destroy(&dev->post_lock);
612 mutex_destroy(&dev->client_lock);
613 adf_obj_destroy(&dev->base, &adf_devices);
615 EXPORT_SYMBOL(adf_device_destroy);
618 * adf_interface_init - initialize ADF-internal data for a display interface
619 * and create sysfs entries
621 * @intf: the display interface
622 * @dev: the interface's "parent" display device
623 * @type: interface type (see enum @adf_interface_type)
624 * @idx: which interface of type @type;
625 * e.g. interface DSI.1 -> @type=%ADF_INTF_TYPE_DSI, @idx=1
626 * @flags: informational flags (bitmask of %ADF_INTF_FLAG_* values)
627 * @ops: the interface's associated ops
628 * @fmt: formatting string for the display interface's name
630 * @dev must have previously been initialized with adf_device_init().
632 * @fmt affects the name returned to userspace through the
633 * %ADF_GET_INTERFACE_DATA ioctl. It does not affect the sysfs filename,
634 * which is derived from @dev's name.
636 * Returns 0 on success or error code (<0) on failure.
638 int adf_interface_init(struct adf_interface *intf, struct adf_device *dev,
639 enum adf_interface_type type, u32 idx, u32 flags,
640 const struct adf_interface_ops *ops, const char *fmt, ...)
644 const u32 allowed_flags = ADF_INTF_FLAG_PRIMARY |
645 ADF_INTF_FLAG_EXTERNAL;
647 if (dev->n_interfaces == ADF_MAX_INTERFACES) {
648 pr_err("%s: parent device %s has too many interfaces\n",
649 __func__, dev->base.name);
653 if (type >= ADF_INTF_MEMORY && type <= ADF_INTF_TYPE_DEVICE_CUSTOM) {
654 pr_err("%s: invalid interface type %u\n", __func__, type);
658 if (flags & ~allowed_flags) {
659 pr_err("%s: invalid interface flags 0x%X\n", __func__,
660 flags & ~allowed_flags);
664 memset(intf, 0, sizeof(*intf));
667 ret = adf_obj_init(&intf->base, ADF_OBJ_INTERFACE, &dev->interfaces,
668 dev, ops ? &ops->base : NULL, fmt, args);
677 intf->dpms_state = DRM_MODE_DPMS_OFF;
678 init_waitqueue_head(&intf->vsync_wait);
679 rwlock_init(&intf->vsync_lock);
680 rwlock_init(&intf->hotplug_modelist_lock);
682 ret = adf_interface_sysfs_init(intf);
690 adf_obj_destroy(&intf->base, &dev->interfaces);
693 EXPORT_SYMBOL(adf_interface_init);
696 * adf_interface_destroy - clean up ADF-internal data for a display interface
698 * @intf: the display interface
700 void adf_interface_destroy(struct adf_interface *intf)
702 struct adf_device *dev = adf_interface_parent(intf);
703 struct adf_attachment_list *entry, *next;
705 mutex_lock(&dev->client_lock);
706 list_for_each_entry_safe(entry, next, &dev->attach_allowed, head) {
707 if (entry->attachment.interface == intf) {
708 adf_attachment_free(entry);
709 dev->n_attach_allowed--;
712 list_for_each_entry_safe(entry, next, &dev->attached, head) {
713 if (entry->attachment.interface == intf) {
714 adf_device_detach_op(dev,
715 entry->attachment.overlay_engine, intf);
716 adf_attachment_free(entry);
720 kfree(intf->modelist);
721 adf_interface_sysfs_destroy(intf);
722 adf_obj_destroy(&intf->base, &dev->interfaces);
724 mutex_unlock(&dev->client_lock);
726 EXPORT_SYMBOL(adf_interface_destroy);
728 static bool adf_overlay_engine_has_custom_formats(
729 const struct adf_overlay_engine_ops *ops)
732 for (i = 0; i < ops->n_supported_formats; i++)
733 if (!adf_format_is_standard(ops->supported_formats[i]))
739 * adf_overlay_engine_init - initialize ADF-internal data for an
740 * overlay engine and create sysfs entries
742 * @eng: the overlay engine
743 * @dev: the overlay engine's "parent" display device
744 * @ops: the overlay engine's associated ops
745 * @fmt: formatting string for the overlay engine's name
747 * @dev must have previously been initialized with adf_device_init().
749 * @fmt affects the name returned to userspace through the
750 * %ADF_GET_OVERLAY_ENGINE_DATA ioctl. It does not affect the sysfs filename,
751 * which is derived from @dev's name.
753 * Returns 0 on success or error code (<0) on failure.
755 int adf_overlay_engine_init(struct adf_overlay_engine *eng,
756 struct adf_device *dev,
757 const struct adf_overlay_engine_ops *ops, const char *fmt, ...)
762 if (!ops->supported_formats) {
763 pr_err("%s: overlay engine must support at least one format\n",
768 if (ops->n_supported_formats > ADF_MAX_SUPPORTED_FORMATS) {
769 pr_err("%s: overlay engine supports too many formats\n",
774 if (adf_overlay_engine_has_custom_formats(ops) &&
775 !dev->ops->validate_custom_format) {
776 pr_err("%s: overlay engine has custom formats but parent device %s does not implement validate_custom_format\n",
777 __func__, dev->base.name);
781 memset(eng, 0, sizeof(*eng));
784 ret = adf_obj_init(&eng->base, ADF_OBJ_OVERLAY_ENGINE,
785 &dev->overlay_engines, dev, &ops->base, fmt, args);
792 ret = adf_overlay_engine_sysfs_init(eng);
799 adf_obj_destroy(&eng->base, &dev->overlay_engines);
802 EXPORT_SYMBOL(adf_overlay_engine_init);
805 * adf_interface_destroy - clean up ADF-internal data for an overlay engine
807 * @eng: the overlay engine
809 void adf_overlay_engine_destroy(struct adf_overlay_engine *eng)
811 struct adf_device *dev = adf_overlay_engine_parent(eng);
812 struct adf_attachment_list *entry, *next;
814 mutex_lock(&dev->client_lock);
815 list_for_each_entry_safe(entry, next, &dev->attach_allowed, head) {
816 if (entry->attachment.overlay_engine == eng) {
817 adf_attachment_free(entry);
818 dev->n_attach_allowed--;
821 list_for_each_entry_safe(entry, next, &dev->attached, head) {
822 if (entry->attachment.overlay_engine == eng) {
823 adf_device_detach_op(dev, eng,
824 entry->attachment.interface);
825 adf_attachment_free(entry);
829 adf_overlay_engine_sysfs_destroy(eng);
830 adf_obj_destroy(&eng->base, &dev->overlay_engines);
831 mutex_unlock(&dev->client_lock);
833 EXPORT_SYMBOL(adf_overlay_engine_destroy);
835 struct adf_attachment_list *adf_attachment_find(struct list_head *list,
836 struct adf_overlay_engine *eng, struct adf_interface *intf)
838 struct adf_attachment_list *entry;
839 list_for_each_entry(entry, list, head) {
840 if (entry->attachment.interface == intf &&
841 entry->attachment.overlay_engine == eng)
847 int adf_attachment_validate(struct adf_device *dev,
848 struct adf_overlay_engine *eng, struct adf_interface *intf)
850 struct adf_device *intf_dev = adf_interface_parent(intf);
851 struct adf_device *eng_dev = adf_overlay_engine_parent(eng);
853 if (intf_dev != dev) {
854 dev_err(&dev->base.dev, "can't attach interface %s belonging to device %s\n",
855 intf->base.name, intf_dev->base.name);
859 if (eng_dev != dev) {
860 dev_err(&dev->base.dev, "can't attach overlay engine %s belonging to device %s\n",
861 eng->base.name, eng_dev->base.name);
869 * adf_attachment_allow - add a new entry to the list of allowed
872 * @dev: the parent device
873 * @eng: the overlay engine
874 * @intf: the interface
876 * adf_attachment_allow() indicates that the underlying display hardware allows
877 * @intf to scan out @eng's output. It is intended to be called at
878 * driver initialization for each supported overlay engine + interface pair.
880 * Returns 0 on success, -%EALREADY if the entry already exists, or -errno on
883 int adf_attachment_allow(struct adf_device *dev,
884 struct adf_overlay_engine *eng, struct adf_interface *intf)
887 struct adf_attachment_list *entry = NULL;
889 ret = adf_attachment_validate(dev, eng, intf);
893 mutex_lock(&dev->client_lock);
895 if (dev->n_attach_allowed == ADF_MAX_ATTACHMENTS) {
900 if (adf_attachment_find(&dev->attach_allowed, eng, intf)) {
905 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
911 entry->attachment.interface = intf;
912 entry->attachment.overlay_engine = eng;
913 list_add_tail(&entry->head, &dev->attach_allowed);
914 dev->n_attach_allowed++;
917 mutex_unlock(&dev->client_lock);
925 * adf_obj_type_str - string representation of an adf_obj_type
927 * @type: the object type
929 const char *adf_obj_type_str(enum adf_obj_type type)
932 case ADF_OBJ_OVERLAY_ENGINE:
933 return "overlay engine";
935 case ADF_OBJ_INTERFACE:
945 EXPORT_SYMBOL(adf_obj_type_str);
948 * adf_interface_type_str - string representation of an adf_interface's type
950 * @intf: the interface
952 const char *adf_interface_type_str(struct adf_interface *intf)
954 switch (intf->type) {
973 case ADF_INTF_MEMORY:
977 if (intf->type >= ADF_INTF_TYPE_DEVICE_CUSTOM) {
978 if (intf->ops && intf->ops->type_str)
979 return intf->ops->type_str(intf);
985 EXPORT_SYMBOL(adf_interface_type_str);
988 * adf_event_type_str - string representation of an adf_event_type
990 * @obj: ADF object that produced the event
993 const char *adf_event_type_str(struct adf_obj *obj, enum adf_event_type type)
996 case ADF_EVENT_VSYNC:
999 case ADF_EVENT_HOTPLUG:
1003 if (type >= ADF_EVENT_DEVICE_CUSTOM) {
1004 if (obj->ops && obj->ops->event_type_str)
1005 return obj->ops->event_type_str(obj, type);
1011 EXPORT_SYMBOL(adf_event_type_str);
1014 * adf_format_str - string representation of an ADF/DRM fourcc format
1016 * @format: format fourcc
1017 * @buf: target buffer for the format's string representation
1019 void adf_format_str(u32 format, char buf[ADF_FORMAT_STR_SIZE])
1021 buf[0] = format & 0xFF;
1022 buf[1] = (format >> 8) & 0xFF;
1023 buf[2] = (format >> 16) & 0xFF;
1024 buf[3] = (format >> 24) & 0xFF;
1027 EXPORT_SYMBOL(adf_format_str);
1030 * adf_format_validate_yuv - validate the number and size of planes in buffers
1031 * with a custom YUV format.
1033 * @dev: ADF device performing the validation
1034 * @buf: buffer to validate
1035 * @num_planes: expected number of planes
1036 * @hsub: expected horizontal chroma subsampling factor, in pixels
1037 * @vsub: expected vertical chroma subsampling factor, in pixels
1038 * @cpp: expected bytes per pixel for each plane (length @num_planes)
1040 * adf_format_validate_yuv() is intended to be called as a helper from @dev's
1041 * validate_custom_format() op.
1043 * Returns 0 if @buf has the expected number of planes and each plane
1044 * has sufficient size, or -EINVAL otherwise.
1046 int adf_format_validate_yuv(struct adf_device *dev, struct adf_buffer *buf,
1047 u8 num_planes, u8 hsub, u8 vsub, u8 cpp[])
1051 if (num_planes != buf->n_planes) {
1052 char format_str[ADF_FORMAT_STR_SIZE];
1053 adf_format_str(buf->format, format_str);
1054 dev_err(&dev->base.dev, "%u planes expected for format %s but %u planes provided\n",
1055 num_planes, format_str, buf->n_planes);
1059 if (buf->w == 0 || buf->w % hsub) {
1060 dev_err(&dev->base.dev, "bad buffer width %u\n", buf->w);
1064 if (buf->h == 0 || buf->h % vsub) {
1065 dev_err(&dev->base.dev, "bad buffer height %u\n", buf->h);
1069 for (i = 0; i < num_planes; i++) {
1070 u32 width = buf->w / (i != 0 ? hsub : 1);
1071 u32 height = buf->h / (i != 0 ? vsub : 1);
1072 u8 cpp = adf_format_plane_cpp(buf->format, i);
1074 if (buf->pitch[i] < (u64) width * cpp) {
1075 dev_err(&dev->base.dev, "plane %u pitch is shorter than buffer width (pitch = %u, width = %u, bpp = %u)\n",
1076 i, buf->pitch[i], width, cpp * 8);
1080 if ((u64) height * buf->pitch[i] + buf->offset[i] >
1081 buf->dma_bufs[i]->size) {
1082 dev_err(&dev->base.dev, "plane %u buffer too small (height = %u, pitch = %u, offset = %u, size = %zu)\n",
1083 i, height, buf->pitch[i],
1084 buf->offset[i], buf->dma_bufs[i]->size);
1091 EXPORT_SYMBOL(adf_format_validate_yuv);
1094 * adf_modeinfo_set_name - sets the name of a mode from its display resolution
1098 * adf_modeinfo_set_name() fills in @mode->name in the format
1099 * "[hdisplay]x[vdisplay](i)". It is intended to help drivers create
1100 * ADF/DRM-style modelists from other mode formats.
1102 void adf_modeinfo_set_name(struct drm_mode_modeinfo *mode)
1104 bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
1106 snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d%s",
1107 mode->hdisplay, mode->vdisplay,
1108 interlaced ? "i" : "");
1110 EXPORT_SYMBOL(adf_modeinfo_set_name);
1113 * adf_modeinfo_set_vrefresh - sets the vrefresh of a mode from its other
1118 * adf_modeinfo_set_vrefresh() calculates @mode->vrefresh from
1119 * @mode->{h,v}display and @mode->flags. It is intended to help drivers
1120 * create ADF/DRM-style modelists from other mode formats.
1122 void adf_modeinfo_set_vrefresh(struct drm_mode_modeinfo *mode)
1125 unsigned int calc_val;
1127 if (mode->vrefresh > 0)
1130 if (mode->htotal <= 0 || mode->vtotal <= 0)
1133 /* work out vrefresh the value will be x1000 */
1134 calc_val = (mode->clock * 1000);
1135 calc_val /= mode->htotal;
1136 refresh = (calc_val + mode->vtotal / 2) / mode->vtotal;
1138 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1140 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
1142 if (mode->vscan > 1)
1143 refresh /= mode->vscan;
1145 mode->vrefresh = refresh;
1147 EXPORT_SYMBOL(adf_modeinfo_set_vrefresh);
1149 static int __init adf_init(void)
1153 err = adf_sysfs_init();
1160 static void __exit adf_exit(void)
1162 adf_sysfs_destroy();
1165 module_init(adf_init);
1166 module_exit(adf_exit);