1 /*************************************************************************/ /*!
3 @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
4 @License Dual MIT/GPLv2
6 The contents of this file are subject to the MIT license as set out below.
8 Permission is hereby granted, free of charge, to any person obtaining a copy
9 of this software and associated documentation files (the "Software"), to deal
10 in the Software without restriction, including without limitation the rights
11 to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 copies of the Software, and to permit persons to whom the Software is
13 furnished to do so, subject to the following conditions:
15 The above copyright notice and this permission notice shall be included in
16 all copies or substantial portions of the Software.
18 Alternatively, the contents of this file may be used under the terms of
19 the GNU General Public License Version 2 ("GPL") in which case the provisions
20 of GPL are applicable instead of those above.
22 If you wish to allow use of your version of this file only under the terms of
23 GPL, and not to allow others to use your version of this file under the terms
24 of the MIT license, indicate your decision by deleting the provisions above
25 and replace them with the notice and other provisions required by GPL as set
26 out in the file called "GPL-COPYING" included in this distribution. If you do
27 not delete the provisions above, a recipient may use your version of this file
28 under the terms of either the MIT license or GPL.
30 This License is also included in this distribution in the file called
33 EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
34 PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
35 BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
36 PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
37 COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
38 IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
39 CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40 */ /**************************************************************************/
43 #include <linux/version.h>
44 #include <linux/console.h>
45 #include <linux/dma-buf.h>
46 #include <linux/uaccess.h>
47 #include <linux/module.h>
50 #include <drm/drm_fourcc.h>
52 #include <video/adf.h>
53 #include <video/adf_fbdev.h>
54 #include <video/adf_client.h>
56 #include <adf/adf_ext.h>
58 /* for sync_fence_put */
59 #include PVR_ANDROID_SYNC_HEADER
61 #include "adf_common.h"
64 #error adf_fbdev needs Linux framebuffer support. Enable it in your kernel.
67 MODULE_AUTHOR("Imagination Technologies Ltd. <gpl-support@imgtec.com>");
68 MODULE_LICENSE("Dual MIT/GPL");
70 /* NOTE: This is just an example of how to use adf. You should NOT use this
71 * module in a production environment. It is meaningless to layer adf
72 * on top of fbdev, as adf is more flexible than fbdev and adf itself
73 * provides fbdev emulation. Do not use this implementation generally!
76 #define DRVNAME "adf_fbdev"
78 #define FALLBACK_REFRESH_RATE 60
79 #define FALLBACK_DPI 160
81 #if defined(ADF_FBDEV_NUM_PREFERRED_BUFFERS)
82 #define NUM_PREFERRED_BUFFERS ADF_FBDEV_NUM_PREFERRED_BUFFERS
84 #define NUM_PREFERRED_BUFFERS 3
87 struct adf_fbdev_dmabuf {
88 struct sg_table sg_table;
94 /* Used for cleanup of dmabuf private data */
95 spinlock_t *alloc_lock;
100 struct adf_fbdev_device {
101 struct adf_device base;
102 struct fb_info *fb_info;
106 struct adf_fbdev_interface {
107 struct adf_interface base;
108 struct drm_mode_modeinfo fb_mode;
109 u16 width_mm, height_mm;
110 struct fb_info *fb_info;
111 spinlock_t alloc_lock;
115 /* SIMPLE BUFFER MANAGER *****************************************************/
117 /* Handle alloc/free from the fbdev carveout (fix.smem_start -> fix.smem_size)
118 * region. This simple allocator sets a bit in the alloc_mask when a buffer is
119 * owned by dmabuf. When the dmabuf ->release() is called, the alloc_mask bit
120 * is cleared and the adf_fbdev_dmabuf object is freed.
122 * Since dmabuf relies on sg_table/scatterlists, and hence struct page*, this
123 * code may have problems if your framebuffer uses memory that is not in the
124 * kernel's page tables.
127 static struct adf_fbdev_dmabuf *
128 adf_fbdev_alloc_buffer(struct adf_fbdev_interface *interface)
130 struct adf_fbdev_dmabuf *fbdev_dmabuf;
131 struct scatterlist *sg;
136 spin_lock(&interface->alloc_lock);
138 for (id = 0; id < NUM_PREFERRED_BUFFERS; id++) {
139 if (!(interface->alloc_mask & (1UL << id))) {
140 interface->alloc_mask |= (1UL << id);
145 spin_unlock(&interface->alloc_lock);
147 if (id == NUM_PREFERRED_BUFFERS)
148 return ERR_PTR(-ENOMEM);
150 unitary_size = interface->fb_info->fix.line_length *
151 interface->fb_info->var.yres;
153 /* PAGE_SIZE alignment has been checked already, do NOT allow it
154 * through here. We are about to allocate an sg_list.
156 BUG_ON((unitary_size % PAGE_SIZE) != 0);
158 fbdev_dmabuf = kmalloc(sizeof(struct adf_fbdev_dmabuf), GFP_KERNEL);
160 return ERR_PTR(-ENOMEM);
162 /* We only need one scatterlist entry per buffer because fbdev memory
163 * is always physically contiguous.
165 err = sg_alloc_table(&fbdev_dmabuf->sg_table, 1, GFP_KERNEL);
171 /* Increment the reference count of this module as long as the
172 * adb_fbdev_dmabuf object exists. This prevents this module from
173 * being unloaded if the buffer is passed around by dmabuf.
175 if (!try_module_get(THIS_MODULE)) {
176 pr_err("try_module_get(THIS_MODULE) failed");
178 return ERR_PTR(-EFAULT);
181 fbdev_dmabuf->offset = id * unitary_size;
182 fbdev_dmabuf->length = unitary_size;
183 fbdev_dmabuf->vaddr = interface->fb_info->screen_base +
184 fbdev_dmabuf->offset;
185 fbdev_dmabuf->paddr = interface->fb_info->fix.smem_start +
186 fbdev_dmabuf->offset;
188 sg_set_page(fbdev_dmabuf->sg_table.sgl,
189 pfn_to_page(PFN_DOWN(fbdev_dmabuf->paddr)),
190 fbdev_dmabuf->length, 0);
192 /* Shadow what ion is doing currently to ensure sg_dma_address() is
193 * valid. This is not strictly correct as the dma address should
194 * only be valid after mapping (ownership changed), and we haven't
195 * mapped the scatter list yet.
197 for_each_sg(fbdev_dmabuf->sg_table.sgl, sg,
198 fbdev_dmabuf->sg_table.nents, i) {
199 sg_dma_address(sg) = sg_phys(sg);
202 fbdev_dmabuf->alloc_mask = &interface->alloc_mask;
203 fbdev_dmabuf->alloc_lock = &interface->alloc_lock;
204 fbdev_dmabuf->id = id;
209 static void adf_fbdev_free_buffer(struct adf_fbdev_dmabuf *fbdev_dmabuf)
213 spin_lock_irqsave(fbdev_dmabuf->alloc_lock, flags);
214 (*fbdev_dmabuf->alloc_mask) &= ~(1UL << fbdev_dmabuf->id);
215 spin_unlock_irqrestore(fbdev_dmabuf->alloc_lock, flags);
217 sg_free_table(&fbdev_dmabuf->sg_table);
220 module_put(THIS_MODULE);
223 /* DMA BUF LAYER *************************************************************/
225 static struct sg_table *
226 adf_fbdev_d_map_dma_buf(struct dma_buf_attachment *attachment,
227 enum dma_data_direction direction)
229 struct adf_fbdev_dmabuf *fbdev_dmabuf = attachment->dmabuf->priv;
231 return &fbdev_dmabuf->sg_table;
234 static void adf_fbdev_d_unmap_dma_buf(struct dma_buf_attachment *attachment,
235 struct sg_table *table,
236 enum dma_data_direction direction)
241 static int adf_fbdev_d_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
243 struct adf_fbdev_dmabuf *fbdev_dmabuf = dmabuf->priv;
245 return remap_pfn_range(vma, vma->vm_start,
246 PFN_DOWN(fbdev_dmabuf->paddr),
247 vma->vm_end - vma->vm_start,
251 static void adf_fbdev_d_release(struct dma_buf *dmabuf)
253 adf_fbdev_free_buffer(dmabuf->priv);
257 adf_fbdev_d_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
258 enum dma_data_direction dir)
260 struct adf_fbdev_dmabuf *fbdev_dmabuf = dmabuf->priv;
262 if (start + len > fbdev_dmabuf->length)
267 static void adf_fbdev_d_end_cpu_access(struct dma_buf *dmabuf, size_t start,
268 size_t len, enum dma_data_direction dir)
270 /* Framebuffer memory is cache coherent. No-op. */
274 adf_fbdev_d_kmap(struct dma_buf *dmabuf, unsigned long page_offset)
276 struct adf_fbdev_dmabuf *fbdev_dmabuf = dmabuf->priv;
279 if (page_offset * PAGE_SIZE >= fbdev_dmabuf->length)
280 return ERR_PTR(-EINVAL);
281 vaddr = fbdev_dmabuf->vaddr + page_offset * PAGE_SIZE;
286 adf_fbdev_d_kunmap(struct dma_buf *dmabuf, unsigned long page_offset,
292 static void *adf_fbdev_d_vmap(struct dma_buf *dmabuf)
294 struct adf_fbdev_dmabuf *fbdev_dmabuf = dmabuf->priv;
296 return fbdev_dmabuf->vaddr;
299 static void adf_fbdev_d_vunmap(struct dma_buf *dmabuf, void *vaddr)
304 static const struct dma_buf_ops adf_fbdev_dma_buf_ops = {
305 .map_dma_buf = adf_fbdev_d_map_dma_buf,
306 .unmap_dma_buf = adf_fbdev_d_unmap_dma_buf,
307 .mmap = adf_fbdev_d_mmap,
308 .release = adf_fbdev_d_release,
309 .begin_cpu_access = adf_fbdev_d_begin_cpu_access,
310 .end_cpu_access = adf_fbdev_d_end_cpu_access,
311 .kmap_atomic = adf_fbdev_d_kmap,
312 .kunmap_atomic = adf_fbdev_d_kunmap,
313 .kmap = adf_fbdev_d_kmap,
314 .kunmap = adf_fbdev_d_kunmap,
315 .vmap = adf_fbdev_d_vmap,
316 .vunmap = adf_fbdev_d_vunmap,
319 /* ADF LAYER *****************************************************************/
321 static u32 adf_fbdev_supported_format;
323 static int adf_fbdev_validate(struct adf_device *dev, struct adf_post *cfg,
326 int err = adf_img_validate_simple(dev, cfg, driver_state);
328 if (cfg->n_bufs == 0 || err != 0)
331 /* Everything checked out in the generic validation, but we
332 * additionally want to check that the dmabuf came from the
333 * adf_fbdev module, which the generic code can't check.
335 if (cfg->bufs[0].dma_bufs[0]->ops != &adf_fbdev_dma_buf_ops)
341 static void adf_fbdev_post(struct adf_device *dev, struct adf_post *cfg,
344 struct adf_fbdev_device *device = (struct adf_fbdev_device *)dev;
345 struct fb_var_screeninfo new_var = device->fb_info->var;
346 struct adf_fbdev_dmabuf *fbdev_dmabuf;
347 struct adf_buffer *buffer;
350 /* "Null" flip handling */
351 if (cfg->n_bufs == 0)
354 if (!lock_fb_info(device->fb_info)) {
355 pr_err("Failed to lock fb_info structure.\n");
361 buffer = &cfg->bufs[0];
362 fbdev_dmabuf = buffer->dma_bufs[0]->priv;
363 new_var.yoffset = new_var.yres * fbdev_dmabuf->id;
365 /* If we're supposed to be able to flip, but the yres_virtual has been
366 * changed to an unsupported (smaller) value, we need to change it back
367 * (this is a workaround for some Linux fbdev drivers that seem to lose
368 * any modifications to yres_virtual after a blank.)
370 if (new_var.yres_virtual < new_var.yres * NUM_PREFERRED_BUFFERS) {
371 new_var.activate = FB_ACTIVATE_NOW;
372 new_var.yres_virtual = new_var.yres * NUM_PREFERRED_BUFFERS;
374 err = fb_set_var(device->fb_info, &new_var);
376 pr_err("fb_set_var failed (err=%d)\n", err);
378 err = fb_pan_display(device->fb_info, &new_var);
380 pr_err("fb_pan_display failed (err=%d)\n", err);
385 unlock_fb_info(device->fb_info);
389 adf_fbdev_open2(struct adf_obj *obj, struct inode *inode, struct file *file)
391 struct adf_fbdev_device *dev =
392 (struct adf_fbdev_device *)obj->parent;
393 atomic_inc(&dev->refcount);
398 adf_fbdev_release2(struct adf_obj *obj, struct inode *inode, struct file *file)
400 struct adf_fbdev_device *dev =
401 (struct adf_fbdev_device *)obj->parent;
402 struct sync_fence *release_fence;
404 if (atomic_dec_return(&dev->refcount))
407 /* This special "null" flip works around a problem with ADF
408 * which leaves buffers pinned by the display engine even
409 * after all ADF clients have closed.
411 * The "null" flip is pipelined like any other. The user won't
412 * be able to unload this module until it has been posted.
414 release_fence = adf_device_post(&dev->base, NULL, 0, NULL, 0, NULL, 0);
415 if (IS_ERR_OR_NULL(release_fence)) {
416 pr_err("Failed to queue null flip command (err=%d).\n",
417 (int)PTR_ERR(release_fence));
421 sync_fence_put(release_fence);
424 static const struct adf_device_ops adf_fbdev_device_ops = {
425 .owner = THIS_MODULE,
427 .open = adf_fbdev_open2,
428 .release = adf_fbdev_release2,
429 .ioctl = adf_img_ioctl,
431 .validate = adf_fbdev_validate,
432 .post = adf_fbdev_post,
436 adf_fbdev_supports_event(struct adf_obj *obj, enum adf_event_type type)
439 case ADF_EVENT_VSYNC:
440 case ADF_EVENT_HOTPLUG:
448 adf_fbdev_set_event(struct adf_obj *obj, enum adf_event_type type,
452 case ADF_EVENT_VSYNC:
453 case ADF_EVENT_HOTPLUG:
460 static int adf_fbdev_blank2(struct adf_interface *intf, u8 state)
462 struct adf_fbdev_interface *interface =
463 (struct adf_fbdev_interface *)intf;
464 struct fb_info *fb_info = interface->fb_info;
466 if (!fb_info->fbops->fb_blank)
469 return fb_info->fbops->fb_blank(state, fb_info);
473 adf_fbdev_alloc_simple_buffer(struct adf_interface *intf, u16 w, u16 h,
474 u32 format, struct dma_buf **dma_buf,
475 u32 *offset, u32 *pitch)
477 struct adf_fbdev_interface *interface =
478 (struct adf_fbdev_interface *)intf;
479 struct fb_var_screeninfo *var = &interface->fb_info->var;
480 struct adf_fbdev_dmabuf *fbdev_dmabuf;
482 if (w != var->xres) {
483 pr_err("Simple alloc request w=%u does not match w=%u.\n",
488 if (h != var->yres) {
489 pr_err("Simple alloc request h=%u does not match h=%u.\n",
494 if (format != adf_fbdev_supported_format) {
495 pr_err("Simple alloc request f=0x%x does not match f=0x%x.\n",
496 format, adf_fbdev_supported_format);
500 fbdev_dmabuf = adf_fbdev_alloc_buffer(interface);
501 if (IS_ERR_OR_NULL(fbdev_dmabuf))
502 return PTR_ERR(fbdev_dmabuf);
504 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
506 DEFINE_DMA_BUF_EXPORT_INFO(export_info);
508 export_info.ops = &adf_fbdev_dma_buf_ops;
509 export_info.size = fbdev_dmabuf->length;
510 export_info.flags = O_RDWR;
511 export_info.priv = fbdev_dmabuf;
513 *dma_buf = dma_buf_export(&export_info);
515 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)) || \
516 defined(CONFIG_ARCH_MT8173)
517 *dma_buf = dma_buf_export(fbdev_dmabuf, &adf_fbdev_dma_buf_ops,
518 fbdev_dmabuf->length, O_RDWR, NULL);
520 *dma_buf = dma_buf_export(fbdev_dmabuf, &adf_fbdev_dma_buf_ops,
521 fbdev_dmabuf->length, O_RDWR);
523 if (IS_ERR(*dma_buf)) {
524 adf_fbdev_free_buffer(fbdev_dmabuf);
525 return PTR_ERR(*dma_buf);
528 *pitch = interface->fb_info->fix.line_length;
534 adf_fbdev_screen_size(struct adf_interface *intf, u16 *width_mm,
537 struct adf_fbdev_interface *interface =
538 (struct adf_fbdev_interface *)intf;
539 *width_mm = interface->width_mm;
540 *height_mm = interface->height_mm;
544 static int adf_fbdev_modeset(struct adf_interface *intf,
545 struct drm_mode_modeinfo *mode)
547 struct adf_fbdev_interface *interface =
548 (struct adf_fbdev_interface *)intf;
549 return mode == &interface->fb_mode ? 0 : -EINVAL;
552 static const struct adf_interface_ops adf_fbdev_interface_ops = {
554 .supports_event = adf_fbdev_supports_event,
555 .set_event = adf_fbdev_set_event,
557 .blank = adf_fbdev_blank2,
558 .alloc_simple_buffer = adf_fbdev_alloc_simple_buffer,
559 .screen_size = adf_fbdev_screen_size,
560 .modeset = adf_fbdev_modeset,
563 struct adf_overlay_engine_ops adf_fbdev_overlay_engine_ops = {
564 .supported_formats = &adf_fbdev_supported_format,
565 .n_supported_formats = 1,
568 /* If we can flip, we need to make sure we have the memory to do so.
570 * We'll assume that the fbdev device provides extra space in
571 * yres_virtual for panning; xres_virtual is theoretically supported,
572 * but it involves more work.
574 * If the fbdev device doesn't have yres_virtual > yres, we'll try
575 * requesting it before bailing. Userspace applications commonly do
576 * this with an FBIOPUT_VSCREENINFO ioctl().
578 * Another problem is with a limitation in PowerVR services -- it
579 * needs framebuffers to be page aligned (this is a SW limitation,
580 * the HW can support non-page-aligned buffers). So we have to
581 * check that stride * height for a single buffer is page aligned.
583 static bool adf_fbdev_flip_possible(struct fb_info *fb_info)
585 struct fb_var_screeninfo var = fb_info->var;
588 if (!fb_info->fix.xpanstep && !fb_info->fix.ypanstep &&
589 !fb_info->fix.ywrapstep) {
590 pr_err("The fbdev device detected does not support ypan/ywrap.\n");
594 if ((fb_info->fix.line_length * var.yres) % PAGE_SIZE != 0) {
595 pr_err("Line length (in bytes) x yres is not a multiple of page size.\n");
599 /* We might already have enough space */
600 if (var.yres * NUM_PREFERRED_BUFFERS <= var.yres_virtual)
603 pr_err("No buffer space for flipping; asking for more.\n");
605 var.activate = FB_ACTIVATE_NOW;
606 var.yres_virtual = var.yres * NUM_PREFERRED_BUFFERS;
608 err = fb_set_var(fb_info, &var);
610 pr_err("fb_set_var failed (err=%d).\n", err);
614 if (var.yres * NUM_PREFERRED_BUFFERS > var.yres_virtual) {
615 pr_err("Failed to obtain additional buffer space.\n");
619 /* Some fbdev drivers allow the yres_virtual modification through,
620 * but don't actually update the fix. We need the fix to be updated
621 * and more memory allocated, so we can actually take advantage of
622 * the increased yres_virtual.
624 if (fb_info->fix.smem_len < fb_info->fix.line_length *
626 pr_err("'fix' not re-allocated with sufficient buffer space.\n");
627 pr_err("Check NUM_PREFERRED_BUFFERS (%u) is as intended.\n",
628 NUM_PREFERRED_BUFFERS);
635 /* Could use devres here? */
637 struct adf_fbdev_device device;
638 struct adf_fbdev_interface interface;
639 struct adf_overlay_engine engine;
642 static int __init init_adf_fbdev(void)
644 struct drm_mode_modeinfo *mode = &dev_data.interface.fb_mode;
645 char format_str[ADF_FORMAT_STR_SIZE];
646 struct fb_info *fb_info;
649 fb_info = registered_fb[0];
651 pr_err("No Linux framebuffer (fbdev) device is registered!\n");
652 pr_err("Check you have a framebuffer driver compiled into your kernel\n");
653 pr_err("and that it is enabled on the cmdline.\n");
657 if (!lock_fb_info(fb_info))
662 /* Filter out broken FB devices */
663 if (!fb_info->fix.smem_len || !fb_info->fix.line_length) {
664 pr_err("The fbdev device detected had a zero smem_len or line_length,\n");
665 pr_err("which suggests it is a broken driver.\n");
669 if (fb_info->fix.type != FB_TYPE_PACKED_PIXELS ||
670 fb_info->fix.visual != FB_VISUAL_TRUECOLOR) {
671 pr_err("The fbdev device detected is not truecolor with packed pixels.\n");
675 if (fb_info->var.bits_per_pixel == 32) {
676 if (fb_info->var.red.length != 8 ||
677 fb_info->var.green.length != 8 ||
678 fb_info->var.blue.length != 8 ||
679 fb_info->var.red.offset != 16 ||
680 fb_info->var.green.offset != 8 ||
681 fb_info->var.blue.offset != 0) {
682 pr_err("The fbdev device detected uses an unrecognized 32bit pixel format (%u/%u/%u, %u/%u/%u)\n",
683 fb_info->var.red.length,
684 fb_info->var.green.length,
685 fb_info->var.blue.length,
686 fb_info->var.red.offset,
687 fb_info->var.green.offset,
688 fb_info->var.blue.offset);
691 #if defined(ADF_FBDEV_FORCE_XRGB8888)
692 adf_fbdev_supported_format = DRM_FORMAT_BGRX8888;
694 adf_fbdev_supported_format = DRM_FORMAT_BGRA8888;
696 } else if (fb_info->var.bits_per_pixel == 16) {
697 if (fb_info->var.red.length != 5 ||
698 fb_info->var.green.length != 6 ||
699 fb_info->var.blue.length != 5 ||
700 fb_info->var.red.offset != 11 ||
701 fb_info->var.green.offset != 5 ||
702 fb_info->var.blue.offset != 0) {
703 pr_err("The fbdev device detected uses an unrecognized 16bit pixel format (%u/%u/%u, %u/%u/%u)\n",
704 fb_info->var.red.length,
705 fb_info->var.green.length,
706 fb_info->var.blue.length,
707 fb_info->var.red.offset,
708 fb_info->var.green.offset,
709 fb_info->var.blue.offset);
712 adf_fbdev_supported_format = DRM_FORMAT_BGR565;
714 pr_err("The fbdev device detected uses an unsupported bpp (%u).\n",
715 fb_info->var.bits_per_pixel);
719 if (!try_module_get(fb_info->fbops->owner)) {
720 pr_err("try_module_get() failed");
724 if (fb_info->fbops->fb_open &&
725 fb_info->fbops->fb_open(fb_info, 0) != 0) {
726 pr_err("fb_open() failed");
730 if (!adf_fbdev_flip_possible(fb_info)) {
731 pr_err("Flipping must be supported for ADF. Aborting.\n");
735 err = adf_device_init(&dev_data.device.base, fb_info->dev,
736 &adf_fbdev_device_ops, "fbdev");
738 pr_err("adf_device_init failed (%d)", err);
742 dev_data.device.fb_info = fb_info;
744 err = adf_interface_init(&dev_data.interface.base,
745 &dev_data.device.base,
746 ADF_INTF_DVI, 0, ADF_INTF_FLAG_PRIMARY,
747 &adf_fbdev_interface_ops, "fbdev_interface");
749 pr_err("adf_interface_init failed (%d)", err);
750 goto err_device_destroy;
753 spin_lock_init(&dev_data.interface.alloc_lock);
754 dev_data.interface.fb_info = fb_info;
756 /* If the fbdev mode looks viable, try to inherit from it */
758 adf_modeinfo_from_fb_videomode(fb_info->mode, mode);
760 /* Framebuffer drivers aren't always very good at filling out their
761 * mode information, so fake up anything that's missing so we don't
762 * need to accommodate it in userspace.
766 mode->hdisplay = fb_info->var.xres;
768 mode->vdisplay = fb_info->var.yres;
770 mode->vrefresh = FALLBACK_REFRESH_RATE;
772 if (fb_info->var.width > 0 && fb_info->var.width < 1000) {
773 dev_data.interface.width_mm = fb_info->var.width;
775 dev_data.interface.width_mm = (fb_info->var.xres * 25400) /
776 (FALLBACK_DPI * 1000);
779 if (fb_info->var.height > 0 && fb_info->var.height < 1000) {
780 dev_data.interface.height_mm = fb_info->var.height;
782 dev_data.interface.height_mm = (fb_info->var.yres * 25400) /
783 (FALLBACK_DPI * 1000);
786 err = adf_hotplug_notify_connected(&dev_data.interface.base, mode, 1);
788 pr_err("adf_hotplug_notify_connected failed (%d)", err);
789 goto err_interface_destroy;
792 /* This doesn't really set the mode, it just updates current_mode */
793 err = adf_interface_set_mode(&dev_data.interface.base, mode);
795 pr_err("adf_interface_set_mode failed (%d)", err);
796 goto err_interface_destroy;
799 err = adf_overlay_engine_init(&dev_data.engine, &dev_data.device.base,
800 &adf_fbdev_overlay_engine_ops,
801 "fbdev_overlay_engine");
803 pr_err("adf_overlay_engine_init failed (%d)", err);
804 goto err_interface_destroy;
807 err = adf_attachment_allow(&dev_data.device.base,
809 &dev_data.interface.base);
812 pr_err("adf_attachment_allow failed (%d)", err);
813 goto err_overlay_engine_destroy;
816 adf_format_str(adf_fbdev_supported_format, format_str);
817 pr_info("Found usable fbdev device (%s):\n"
818 "range (physical) = 0x%lx-0x%lx\n"
819 "range (virtual) = %p-%p\n"
820 "size (bytes) = 0x%x\n"
821 "xres x yres = %ux%u\n"
822 "xres x yres (v) = %ux%u\n"
823 "physical (mm) = %ux%u\n"
824 "refresh (Hz) = %u\n"
825 "drm fourcc = %s (0x%x)\n",
827 fb_info->fix.smem_start,
828 fb_info->fix.smem_start + fb_info->fix.smem_len,
829 fb_info->screen_base,
830 fb_info->screen_base + fb_info->screen_size,
831 fb_info->fix.smem_len,
832 mode->hdisplay, mode->vdisplay,
833 fb_info->var.xres_virtual, fb_info->var.yres_virtual,
834 dev_data.interface.width_mm, dev_data.interface.height_mm,
836 format_str, adf_fbdev_supported_format);
840 unlock_fb_info(fb_info);
843 err_overlay_engine_destroy:
844 adf_overlay_engine_destroy(&dev_data.engine);
845 err_interface_destroy:
846 adf_interface_destroy(&dev_data.interface.base);
848 adf_device_destroy(&dev_data.device.base);
850 if (fb_info->fbops->fb_release)
851 fb_info->fbops->fb_release(fb_info, 0);
853 module_put(fb_info->fbops->owner);
857 static void __exit exit_adf_fbdev(void)
859 struct fb_info *fb_info = dev_data.device.fb_info;
861 if (!lock_fb_info(fb_info)) {
862 pr_err("Failed to lock fb_info.\n");
868 adf_overlay_engine_destroy(&dev_data.engine);
869 adf_interface_destroy(&dev_data.interface.base);
870 adf_device_destroy(&dev_data.device.base);
872 if (fb_info->fbops->fb_release)
873 fb_info->fbops->fb_release(fb_info, 0);
875 module_put(fb_info->fbops->owner);
878 unlock_fb_info(fb_info);
881 module_init(init_adf_fbdev);
882 module_exit(exit_adf_fbdev);