* GNU General Public License for more details.
*/
-#include <linux/dma-iommu.h>
-
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_sync_helper.h>
#include <drm/rockchip_drm.h>
#include <linux/dma-mapping.h>
+#include <linux/dma-iommu.h>
#include <linux/pm_runtime.h>
#include <linux/memblock.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_graph.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/component.h>
#include <linux/fence.h>
#include <linux/console.h>
+#include <linux/iommu.h>
+
+#include <drm/rockchip_drm.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_fb.h"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
+static bool is_support_iommu = true;
+
static LIST_HEAD(rockchip_drm_subdrv_list);
static DEFINE_MUTEX(subdrv_list_mutex);
+static struct drm_driver rockchip_drm_driver;
struct rockchip_drm_mode_set {
struct list_head head;
int ratio;
};
+#ifndef MODULE
static struct drm_crtc *find_crtc_by_node(struct drm_device *drm_dev,
struct device_node *node)
{
return NULL;
}
+void rockchip_free_loader_memory(struct drm_device *drm)
+{
+ struct rockchip_drm_private *private = drm->dev_private;
+ struct rockchip_logo *logo;
+ void *start, *end;
+
+ if (!private || !private->logo || --private->logo->count)
+ return;
+
+ logo = private->logo;
+ start = phys_to_virt(logo->start);
+ end = phys_to_virt(logo->start + logo->size);
+
+ if (private->domain) {
+ iommu_unmap(private->domain, logo->dma_addr,
+ logo->iommu_map_size);
+ drm_mm_remove_node(&logo->mm);
+ } else {
+ dma_unmap_sg(drm->dev, logo->sgt->sgl,
+ logo->sgt->nents, DMA_TO_DEVICE);
+ }
+ sg_free_table(logo->sgt);
+ memblock_free(logo->start, logo->size);
+ free_reserved_area(start, end, -1, "drm_logo");
+ kfree(logo);
+ private->logo = NULL;
+}
+
static int init_loader_memory(struct drm_device *drm_dev)
{
struct rockchip_drm_private *private = drm_dev->dev_private;
unsigned long nr_pages;
struct page **pages;
struct sg_table *sgt;
- DEFINE_DMA_ATTRS(attrs);
phys_addr_t start, size;
struct resource res;
int i, ret;
- logo = devm_kmalloc(drm_dev->dev, sizeof(*logo), GFP_KERNEL);
- if (!logo)
- return -ENOMEM;
-
node = of_parse_phandle(np, "memory-region", 0);
if (!node)
return -ENOMEM;
if (!size)
return -ENOMEM;
+ logo = kmalloc(sizeof(*logo), GFP_KERNEL);
+ if (!logo)
+ return -ENOMEM;
+
nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
pages = kmalloc_array(nr_pages, sizeof(*pages), GFP_KERNEL);
if (!pages)
- return -ENOMEM;
+ goto err_free_logo;
i = 0;
while (i < nr_pages) {
pages[i] = phys_to_page(start);
}
sgt = drm_prime_pages_to_sg(pages, nr_pages);
if (IS_ERR(sgt)) {
- kfree(pages);
- return PTR_ERR(sgt);
+ ret = PTR_ERR(sgt);
+ goto err_free_pages;
+ }
+
+ if (private->domain) {
+ memset(&logo->mm, 0, sizeof(logo->mm));
+ ret = drm_mm_insert_node_generic(&private->mm, &logo->mm,
+ size, PAGE_SIZE,
+ 0, 0, 0);
+ if (ret < 0) {
+ DRM_ERROR("out of I/O virtual memory: %d\n", ret);
+ goto err_free_pages;
+ }
+
+ logo->dma_addr = logo->mm.start;
+
+ logo->iommu_map_size = iommu_map_sg(private->domain,
+ logo->dma_addr, sgt->sgl,
+ sgt->nents, IOMMU_READ);
+ if (logo->iommu_map_size < size) {
+ DRM_ERROR("failed to map buffer");
+ ret = -ENOMEM;
+ goto err_remove_node;
+ }
+ } else {
+ dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
+ logo->dma_addr = sg_dma_address(sgt->sgl);
}
- dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
- dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
- dma_map_sg_attrs(drm_dev->dev, sgt->sgl, sgt->nents,
- DMA_TO_DEVICE, &attrs);
- logo->dma_addr = sg_dma_address(sgt->sgl);
logo->sgt = sgt;
logo->start = res.start;
logo->size = size;
- logo->count = 0;
+ logo->count = 1;
private->logo = logo;
return 0;
+
+err_remove_node:
+ drm_mm_remove_node(&logo->mm);
+err_free_pages:
+ kfree(pages);
+err_free_logo:
+ kfree(logo);
+
+ return ret;
}
static struct drm_framebuffer *
}
bpp = val;
- mode_cmd.pitches[0] = mode_cmd.width * bpp / 8;
+ mode_cmd.pitches[0] = ALIGN(mode_cmd.width * bpp, 32) / 8;
switch (bpp) {
case 16:
mode_cmd.pixel_format = DRM_FORMAT_BGR888;
break;
case 32:
- mode_cmd.pixel_format = DRM_FORMAT_XBGR8888;
+ mode_cmd.pixel_format = DRM_FORMAT_XRGB8888;
break;
default:
pr_err("%s: unsupport to logo bpp %d\n", __func__, bpp);
struct drm_plane_state *primary_state;
struct drm_display_mode *mode = NULL;
const struct drm_connector_helper_funcs *funcs;
+ const struct drm_encoder_helper_funcs *encoder_funcs;
bool is_crtc_enabled = true;
int hdisplay, vdisplay;
int fb_width, fb_height;
conn_state->best_encoder = funcs->best_encoder(connector);
if (funcs->loader_protect)
funcs->loader_protect(connector, true);
+ connector->loader_protect = true;
+ encoder_funcs = conn_state->best_encoder->helper_private;
+ if (encoder_funcs->loader_protect)
+ encoder_funcs->loader_protect(conn_state->best_encoder, true);
+ conn_state->best_encoder->loader_protect = true;
num_modes = connector->funcs->fill_modes(connector, 4096, 4096);
if (!num_modes) {
dev_err(drm_dev->dev, "connector[%s] can't found any modes\n",
error:
if (funcs->loader_protect)
funcs->loader_protect(connector, false);
+ connector->loader_protect = false;
+ if (encoder_funcs->loader_protect)
+ encoder_funcs->loader_protect(conn_state->best_encoder, false);
+ conn_state->best_encoder->loader_protect = false;
return ret;
}
struct rockchip_drm_mode_set *set,
unsigned int *plane_mask)
{
- struct drm_mode_config *mode_config = &drm_dev->mode_config;
+ struct rockchip_drm_private *priv = drm_dev->dev_private;
struct drm_crtc *crtc = set->crtc;
struct drm_connector *connector = set->connector;
struct drm_display_mode *mode = set->mode;
crtc_state->active = true;
} else {
- const struct drm_crtc_helper_funcs *funcs;
const struct drm_encoder_helper_funcs *encoder_helper_funcs;
const struct drm_connector_helper_funcs *connector_helper_funcs;
struct drm_encoder *encoder;
+ int pipe = drm_crtc_index(crtc);
- funcs = crtc->helper_private;
connector_helper_funcs = connector->helper_private;
- if (!funcs || !funcs->enable ||
+ if (!priv->crtc_funcs[pipe] ||
+ !priv->crtc_funcs[pipe]->loader_protect ||
!connector_helper_funcs ||
!connector_helper_funcs->best_encoder)
return -ENXIO;
encoder = connector_helper_funcs->best_encoder(connector);
+ if (!encoder)
+ return -ENXIO;
encoder_helper_funcs = encoder->helper_private;
- if (!encoder || !encoder_helper_funcs->atomic_check)
+ if (!encoder_helper_funcs->atomic_check)
return -ENXIO;
ret = encoder_helper_funcs->atomic_check(encoder, crtc->state,
conn_state);
if (ret)
return ret;
- funcs->enable(crtc);
+ if (encoder_helper_funcs->mode_set)
+ encoder_helper_funcs->mode_set(encoder, mode, mode);
+ priv->crtc_funcs[pipe]->loader_protect(crtc, true);
}
primary_state = drm_atomic_get_plane_state(state, crtc->primary);
* some vop maybe not support ymirror, but force use it now.
*/
drm_atomic_plane_set_property(crtc->primary, primary_state,
- mode_config->rotation_property,
- BIT(DRM_REFLECT_Y));
+ priv->logo_ymirror_prop,
+ true);
return ret;
}
static void show_loader_logo(struct drm_device *drm_dev)
{
- struct drm_atomic_state *state;
+ struct drm_atomic_state *state, *old_state;
struct device_node *np = drm_dev->dev->of_node;
struct drm_mode_config *mode_config = &drm_dev->mode_config;
struct device_node *root, *route;
goto err_free_state;
}
+ old_state = drm_atomic_helper_duplicate_state(drm_dev,
+ mode_config->acquire_ctx);
+ if (IS_ERR(old_state)) {
+ dev_err(drm_dev->dev, "failed to duplicate atomic state\n");
+ ret = PTR_ERR_OR_ZERO(old_state);
+ goto err_free_state;
+ }
+
/*
* The state save initial devices status, swap the state into
* drm deivces as old state, so if new state come, can compare
if (IS_ERR(state)) {
dev_err(drm_dev->dev, "failed to duplicate atomic state\n");
ret = PTR_ERR_OR_ZERO(state);
- goto err_unlock;
+ goto err_free_old_state;
}
state->acquire_ctx = mode_config->acquire_ctx;
list_for_each_entry(set, &mode_set_list, head)
drm_atomic_clean_old_fb(drm_dev, plane_mask, ret);
list_for_each_entry_safe(set, tmp, &mode_set_list, head) {
- struct drm_crtc *crtc = set->crtc;
-
list_del(&set->head);
kfree(set);
-
- /* FIXME:
- * primary plane state rotation is not BIT(0), but we only want
- * it effect on logo display, userspace may not known to clean
- * this property, would get unexpect display, so force set
- * primary rotation to BIT(0).
- */
- if (!crtc->primary || !crtc->primary->state)
- continue;
-
- drm_atomic_plane_set_property(crtc->primary,
- crtc->primary->state,
- mode_config->rotation_property,
- BIT(0));
}
/*
*/
WARN_ON(ret == -EDEADLK);
- if (ret)
- goto err_free_state;
+ if (ret) {
+ /*
+ * restore display status if atomic commit failed.
+ */
+ drm_atomic_helper_swap_state(drm_dev, old_state);
+ goto err_free_old_state;
+ }
+
+ rockchip_free_loader_memory(drm_dev);
+ drm_atomic_state_free(old_state);
drm_modeset_unlock_all(drm_dev);
return;
+err_free_old_state:
+ drm_atomic_state_free(old_state);
err_free_state:
drm_atomic_state_free(state);
err_unlock:
dev_err(drm_dev->dev, "failed to show loader logo\n");
}
+static const char *const loader_protect_clocks[] __initconst = {
+ "hclk_vio",
+ "aclk_vio",
+ "aclk_vio0",
+};
+
+static struct clk **loader_clocks __initdata;
+static int __init rockchip_clocks_loader_protect(void)
+{
+ int nclocks = ARRAY_SIZE(loader_protect_clocks);
+ struct clk *clk;
+ int i;
+
+ loader_clocks = kcalloc(nclocks, sizeof(void *), GFP_KERNEL);
+ if (!loader_clocks)
+ return -ENOMEM;
+
+ for (i = 0; i < nclocks; i++) {
+ clk = __clk_lookup(loader_protect_clocks[i]);
+
+ if (clk) {
+ loader_clocks[i] = clk;
+ clk_prepare_enable(clk);
+ }
+ }
+
+ return 0;
+}
+fs_initcall(rockchip_clocks_loader_protect);
+
+static int __init rockchip_clocks_loader_unprotect(void)
+{
+ int i;
+
+ if (!loader_clocks)
+ return -ENODEV;
+
+ for (i = 0; i < ARRAY_SIZE(loader_protect_clocks); i++) {
+ struct clk *clk = loader_clocks[i];
+
+ if (clk)
+ clk_disable_unprepare(clk);
+ }
+ kfree(loader_clocks);
+
+ return 0;
+}
+late_initcall_sync(rockchip_clocks_loader_unprotect);
+#endif
+
/*
* Attach a (component) device to the shared drm dma mapping from master drm
* device. This is used by the VOPs to map GEM buffers to a common DMA
struct device *dev)
{
struct rockchip_drm_private *private = drm_dev->dev_private;
- struct iommu_domain *domain = private->domain;
int ret;
- ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
- if (ret)
- return ret;
+ if (!is_support_iommu)
+ return 0;
- dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
- ret = iommu_attach_device(domain, dev);
+ ret = iommu_attach_device(private->domain, dev);
if (ret) {
dev_err(dev, "Failed to attach iommu device\n");
return ret;
}
- if (!common_iommu_setup_dma_ops(dev, 0x10000000, SZ_2G, domain->ops)) {
- dev_err(dev, "Failed to set dma_ops\n");
- iommu_detach_device(domain, dev);
- ret = -ENODEV;
- }
-
- return ret;
+ return 0;
}
void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
struct rockchip_drm_private *private = drm_dev->dev_private;
struct iommu_domain *domain = private->domain;
+ if (!is_support_iommu)
+ return;
+
iommu_detach_device(domain, dev);
}
int pipe = drm_crtc_index(crtc);
struct rockchip_drm_private *priv = crtc->dev->dev_private;
- if (pipe > ROCKCHIP_MAX_CRTC)
+ if (pipe >= ROCKCHIP_MAX_CRTC)
return -EINVAL;
priv->crtc_funcs[pipe] = crtc_funcs;
int pipe = drm_crtc_index(crtc);
struct rockchip_drm_private *priv = crtc->dev->dev_private;
- if (pipe > ROCKCHIP_MAX_CRTC)
+ if (pipe >= ROCKCHIP_MAX_CRTC)
return;
priv->crtc_funcs[pipe] = NULL;
priv->crtc_funcs[pipe]->disable_vblank(crtc);
}
-static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags)
+static int rockchip_drm_fault_handler(struct iommu_domain *iommu,
+ struct device *dev,
+ unsigned long iova, int flags, void *arg)
+{
+ struct drm_device *drm_dev = arg;
+ struct rockchip_drm_private *priv = drm_dev->dev_private;
+ struct drm_crtc *crtc;
+
+ drm_for_each_crtc(crtc, drm_dev) {
+ int pipe = drm_crtc_index(crtc);
+
+ if (priv->crtc_funcs[pipe] &&
+ priv->crtc_funcs[pipe]->regs_dump)
+ priv->crtc_funcs[pipe]->regs_dump(crtc, NULL);
+
+ if (priv->crtc_funcs[pipe] &&
+ priv->crtc_funcs[pipe]->debugfs_dump)
+ priv->crtc_funcs[pipe]->debugfs_dump(crtc, NULL);
+ }
+
+ return 0;
+}
+
+static int rockchip_drm_init_iommu(struct drm_device *drm_dev)
+{
+ struct rockchip_drm_private *private = drm_dev->dev_private;
+ struct iommu_domain_geometry *geometry;
+ u64 start, end;
+
+ if (!is_support_iommu)
+ return 0;
+
+ private->domain = iommu_domain_alloc(&platform_bus_type);
+ if (!private->domain)
+ return -ENOMEM;
+
+ geometry = &private->domain->geometry;
+ start = geometry->aperture_start;
+ end = geometry->aperture_end;
+
+ DRM_DEBUG("IOMMU context initialized (aperture: %#llx-%#llx)\n",
+ start, end);
+ drm_mm_init(&private->mm, start, end - start + 1);
+ mutex_init(&private->mm_lock);
+
+ iommu_set_fault_handler(private->domain, rockchip_drm_fault_handler,
+ drm_dev);
+
+ return 0;
+}
+
+static void rockchip_iommu_cleanup(struct drm_device *drm_dev)
+{
+ struct rockchip_drm_private *private = drm_dev->dev_private;
+
+ if (!is_support_iommu)
+ return;
+
+ drm_mm_takedown(&private->mm);
+ iommu_domain_free(private->domain);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int rockchip_drm_mm_dump(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct drm_minor *minor = node->minor;
+ struct drm_device *drm_dev = minor->dev;
+ struct rockchip_drm_private *priv = drm_dev->dev_private;
+ int ret;
+
+ if (!priv->domain)
+ return 0;
+
+ mutex_lock(&priv->mm_lock);
+
+ ret = drm_mm_dump_table(s, &priv->mm);
+
+ mutex_unlock(&priv->mm_lock);
+
+ return ret;
+}
+
+static int rockchip_drm_summary_show(struct seq_file *s, void *data)
{
+ struct drm_info_node *node = s->private;
+ struct drm_minor *minor = node->minor;
+ struct drm_device *drm_dev = minor->dev;
+ struct rockchip_drm_private *priv = drm_dev->dev_private;
+ struct drm_crtc *crtc;
+
+ drm_for_each_crtc(crtc, drm_dev) {
+ int pipe = drm_crtc_index(crtc);
+
+ if (priv->crtc_funcs[pipe] &&
+ priv->crtc_funcs[pipe]->debugfs_dump)
+ priv->crtc_funcs[pipe]->debugfs_dump(crtc, s);
+ }
+
+ return 0;
+}
+
+static struct drm_info_list rockchip_debugfs_files[] = {
+ { "summary", rockchip_drm_summary_show, 0, NULL },
+ { "mm_dump", rockchip_drm_mm_dump, 0, NULL },
+};
+
+static int rockchip_drm_debugfs_init(struct drm_minor *minor)
+{
+ struct drm_device *dev = minor->dev;
+ int ret;
+
+ ret = drm_debugfs_create_files(rockchip_debugfs_files,
+ ARRAY_SIZE(rockchip_debugfs_files),
+ minor->debugfs_root,
+ minor);
+ if (ret) {
+ dev_err(dev->dev, "could not install rockchip_debugfs_list\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rockchip_drm_debugfs_cleanup(struct drm_minor *minor)
+{
+ drm_debugfs_remove_files(rockchip_debugfs_files,
+ ARRAY_SIZE(rockchip_debugfs_files), minor);
+}
+#endif
+
+static int rockchip_drm_create_properties(struct drm_device *dev)
+{
+ struct drm_property *prop;
+ struct rockchip_drm_private *private = dev->dev_private;
+ const struct drm_prop_enum_list cabc_mode_enum_list[] = {
+ { ROCKCHIP_DRM_CABC_MODE_DISABLE, "Disable" },
+ { ROCKCHIP_DRM_CABC_MODE_NORMAL, "Normal" },
+ { ROCKCHIP_DRM_CABC_MODE_LOWPOWER, "LowPower" },
+ { ROCKCHIP_DRM_CABC_MODE_USERSPACE, "Userspace" },
+ };
+
+ prop = drm_property_create_enum(dev, 0, "CABC_MODE", cabc_mode_enum_list,
+ ARRAY_SIZE(cabc_mode_enum_list));
+
+ private->cabc_mode_property = prop;
+
+ prop = drm_property_create(dev, DRM_MODE_PROP_BLOB, "CABC_LUT", 0);
+ if (!prop)
+ return -ENOMEM;
+ private->cabc_lut_property = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "CABC_STAGE_UP", 0, 512);
+ if (!prop)
+ return -ENOMEM;
+ private->cabc_stage_up_property = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "CABC_STAGE_DOWN", 0, 255);
+ if (!prop)
+ return -ENOMEM;
+ private->cabc_stage_down_property = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "CABC_GLOBAL_DN", 0, 255);
+ if (!prop)
+ return -ENOMEM;
+ private->cabc_global_dn_property = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "CABC_CALC_PIXEL_NUM", 0, 1000);
+ if (!prop)
+ return -ENOMEM;
+ private->cabc_calc_pixel_num_property = prop;
+
+ return 0;
+}
+
+static int rockchip_drm_bind(struct device *dev)
+{
+ struct drm_device *drm_dev;
struct rockchip_drm_private *private;
- struct device *dev = drm_dev->dev;
- struct drm_connector *connector;
- struct iommu_group *group;
int ret;
- private = devm_kzalloc(drm_dev->dev, sizeof(*private), GFP_KERNEL);
- if (!private)
+ drm_dev = drm_dev_alloc(&rockchip_drm_driver, dev);
+ if (!drm_dev)
return -ENOMEM;
+ ret = drm_dev_set_unique(drm_dev, "%s", dev_name(dev));
+ if (ret)
+ goto err_free;
+
+ dev_set_drvdata(dev, drm_dev);
+
+ private = devm_kzalloc(drm_dev->dev, sizeof(*private), GFP_KERNEL);
+ if (!private) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
mutex_init(&private->commit.lock);
INIT_WORK(&private->commit.work, rockchip_drm_atomic_work);
drm_dev->dev_private = private;
+ private->hdmi_pll.pll = devm_clk_get(dev, "hdmi-tmds-pll");
+ if (PTR_ERR(private->hdmi_pll.pll) == -ENOENT) {
+ private->hdmi_pll.pll = NULL;
+ } else if (PTR_ERR(private->hdmi_pll.pll) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err_free;
+ } else if (IS_ERR(private->hdmi_pll.pll)) {
+ dev_err(dev, "failed to get hdmi-tmds-pll\n");
+ ret = PTR_ERR(private->hdmi_pll.pll);
+ goto err_free;
+ }
+
+ private->default_pll.pll = devm_clk_get(dev, "default-vop-pll");
+ if (PTR_ERR(private->default_pll.pll) == -ENOENT) {
+ private->default_pll.pll = NULL;
+ } else if (PTR_ERR(private->default_pll.pll) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err_free;
+ } else if (IS_ERR(private->default_pll.pll)) {
+ dev_err(dev, "failed to get default vop pll\n");
+ ret = PTR_ERR(private->default_pll.pll);
+ goto err_free;
+ }
+
#ifdef CONFIG_DRM_DMA_SYNC
private->cpu_fence_context = fence_context_alloc(1);
atomic_set(&private->cpu_fence_seqno, 0);
drm_mode_config_init(drm_dev);
rockchip_drm_mode_config_init(drm_dev);
+ rockchip_drm_create_properties(drm_dev);
- dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
- GFP_KERNEL);
- if (!dev->dma_parms) {
- ret = -ENOMEM;
- goto err_config_cleanup;
- }
-
- private->domain = iommu_domain_alloc(&platform_bus_type);
- if (!private->domain)
- return -ENOMEM;
-
- ret = iommu_get_dma_cookie(private->domain);
- if (ret)
- goto err_free_domain;
-
- group = iommu_group_get(dev);
- if (!group) {
- group = iommu_group_alloc();
- if (IS_ERR(group)) {
- dev_err(dev, "Failed to allocate IOMMU group\n");
- goto err_put_cookie;
- }
-
- ret = iommu_group_add_device(group, dev);
- iommu_group_put(group);
- if (ret) {
- dev_err(dev, "failed to add device to IOMMU group\n");
- goto err_put_cookie;
- }
- }
- /*
- * Attach virtual iommu device, sub iommu device can share the same
- * mapping with it.
- */
- ret = rockchip_drm_dma_attach_device(drm_dev, dev);
+ ret = rockchip_drm_init_iommu(drm_dev);
if (ret)
- goto err_group_remove_device;
+ goto err_config_cleanup;
/* Try to bind all sub drivers. */
ret = component_bind_all(dev, drm_dev);
if (ret)
- goto err_detach_device;
-
- /*
- * All components are now added, we can publish the connector sysfs
- * entries to userspace. This will generate hotplug events and so
- * userspace will expect to be able to access DRM at this point.
- */
- list_for_each_entry(connector, &drm_dev->mode_config.connector_list,
- head) {
- ret = drm_connector_register(connector);
- if (ret) {
- dev_err(drm_dev->dev,
- "[CONNECTOR:%d:%s] drm_connector_register failed: %d\n",
- connector->base.id,
- connector->name, ret);
- goto err_unbind;
- }
- }
+ goto err_iommu_cleanup;
/* init kms poll for handling hpd */
drm_kms_helper_poll_init(drm_dev);
drm_mode_config_reset(drm_dev);
+#ifndef MODULE
show_loader_logo(drm_dev);
+#endif
ret = rockchip_drm_fbdev_init(drm_dev);
if (ret)
drm_dev->mode_config.allow_fb_modifiers = true;
+ ret = drm_dev_register(drm_dev, 0);
+ if (ret)
+ goto err_fbdev_fini;
+
return 0;
+err_fbdev_fini:
+ rockchip_drm_fbdev_fini(drm_dev);
err_vblank_cleanup:
drm_vblank_cleanup(drm_dev);
err_kms_helper_poll_fini:
drm_kms_helper_poll_fini(drm_dev);
-err_unbind:
component_unbind_all(dev, drm_dev);
-err_detach_device:
- rockchip_drm_dma_detach_device(drm_dev, dev);
-err_group_remove_device:
- iommu_group_remove_device(dev);
-err_put_cookie:
- iommu_put_dma_cookie(private->domain);
-err_free_domain:
- iommu_domain_free(private->domain);
+err_iommu_cleanup:
+ rockchip_iommu_cleanup(drm_dev);
err_config_cleanup:
drm_mode_config_cleanup(drm_dev);
drm_dev->dev_private = NULL;
+err_free:
+ drm_dev_unref(drm_dev);
return ret;
}
-static int rockchip_drm_unload(struct drm_device *drm_dev)
+static void rockchip_drm_unbind(struct device *dev)
{
- struct device *dev = drm_dev->dev;
- struct rockchip_drm_private *private = drm_dev->dev_private;
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
rockchip_drm_fbdev_fini(drm_dev);
drm_vblank_cleanup(drm_dev);
drm_kms_helper_poll_fini(drm_dev);
component_unbind_all(dev, drm_dev);
- rockchip_drm_dma_detach_device(drm_dev, dev);
- iommu_group_remove_device(dev);
- iommu_put_dma_cookie(private->domain);
- iommu_domain_free(private->domain);
+ rockchip_iommu_cleanup(drm_dev);
drm_mode_config_cleanup(drm_dev);
drm_dev->dev_private = NULL;
-
- return 0;
+ drm_dev_unregister(drm_dev);
+ drm_dev_unref(drm_dev);
+ dev_set_drvdata(dev, NULL);
}
static void rockchip_drm_crtc_cancel_pending_vblank(struct drm_crtc *crtc,
{
struct rockchip_drm_private *priv = dev->dev_private;
- drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev_helper);
+ if (!priv->logo)
+ drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev_helper);
}
static const struct drm_ioctl_desc rockchip_ioctls[] = {
.driver_features = DRIVER_MODESET | DRIVER_GEM |
DRIVER_PRIME | DRIVER_ATOMIC |
DRIVER_RENDER,
- .load = rockchip_drm_load,
- .unload = rockchip_drm_unload,
.preclose = rockchip_drm_preclose,
.lastclose = rockchip_drm_lastclose,
.get_vblank_counter = drm_vblank_no_hw_counter,
.gem_prime_import = drm_gem_prime_import,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_get_sg_table = rockchip_gem_prime_get_sg_table,
+ .gem_prime_import_sg_table = rockchip_gem_prime_import_sg_table,
.gem_prime_vmap = rockchip_gem_prime_vmap,
.gem_prime_vunmap = rockchip_gem_prime_vunmap,
.gem_prime_mmap = rockchip_gem_mmap_buf,
+ .gem_prime_begin_cpu_access = rockchip_gem_prime_begin_cpu_access,
+ .gem_prime_end_cpu_access = rockchip_gem_prime_end_cpu_access,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = rockchip_drm_debugfs_init,
+ .debugfs_cleanup = rockchip_drm_debugfs_cleanup,
+#endif
.ioctls = rockchip_ioctls,
.num_ioctls = ARRAY_SIZE(rockchip_ioctls),
.fops = &rockchip_drm_driver_fops,
}
}
-static int rockchip_drm_bind(struct device *dev)
-{
- struct drm_device *drm;
- int ret;
-
- drm = drm_dev_alloc(&rockchip_drm_driver, dev);
- if (!drm)
- return -ENOMEM;
-
- ret = drm_dev_set_unique(drm, "%s", dev_name(dev));
- if (ret)
- goto err_free;
-
- ret = drm_dev_register(drm, 0);
- if (ret)
- goto err_free;
-
- dev_set_drvdata(dev, drm);
-
- return 0;
-
-err_free:
- drm_dev_unref(drm);
- return ret;
-}
-
-static void rockchip_drm_unbind(struct device *dev)
-{
- struct drm_device *drm = dev_get_drvdata(dev);
-
- drm_dev_unregister(drm);
- drm_dev_unref(drm);
- dev_set_drvdata(dev, NULL);
-}
-
static const struct component_master_ops rockchip_drm_ops = {
.bind = rockchip_drm_bind,
.unbind = rockchip_drm_unbind,
* works as expected.
*/
for (i = 0;; i++) {
+ struct device_node *iommu;
+
port = of_parse_phandle(np, "ports", i);
if (!port)
break;
continue;
}
+ iommu = of_parse_phandle(port->parent, "iommus", 0);
+ if (!iommu || !of_device_is_available(iommu->parent)) {
+ dev_dbg(dev, "no iommu attached for %s, using non-iommu buffers\n",
+ port->parent->full_name);
+ /*
+ * if there is a crtc not support iommu, force set all
+ * crtc use non-iommu buffer.
+ */
+ is_support_iommu = false;
+ }
+
component_match_add(dev, &match, compare_of, port->parent);
of_node_put(port);
}
of_node_put(port);
}
+ port = of_parse_phandle(np, "backlight", 0);
+ if (port && of_device_is_available(port)) {
+ component_match_add(dev, &match, compare_of, port);
+ of_node_put(port);
+ }
+
return component_master_add_with_match(dev, &rockchip_drm_ops, match);
}