Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 26 Feb 2013 00:46:44 +0000 (16:46 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 26 Feb 2013 00:46:44 +0000 (16:46 -0800)
Pull drm merge from Dave Airlie:
 "Highlights:

   - TI LCD controller KMS driver

   - TI OMAP KMS driver merged from staging

   - drop gma500 stub driver

   - the fbcon locking fixes

   - the vgacon dirty like zebra fix.

   - open firmware videomode and hdmi common code helpers

   - major locking rework for kms object handling - pageflip/cursor
     won't block on polling anymore!

   - fbcon helper and prime helper cleanups

   - i915: all over the map, haswell power well enhancements, valleyview
     macro horrors cleaned up, killing lots of legacy GTT code,

   - radeon: CS ioctl unification, deprecated UMS support, gpu reset
     rework, VM fixes

   - nouveau: reworked thermal code, external dp/tmds encoder support
     (anx9805), fences sleep instead of polling,

   - exynos: all over the driver fixes."

Lovely conflict in radeon/evergreen_cs.c between commit de0babd60d8d
("drm/radeon: enforce use of radeon_get_ib_value when reading user cmd")
and the new changes that modified that evergreen_dma_cs_parse()
function.

* 'drm-next' of git://people.freedesktop.org/~airlied/linux: (508 commits)
  drm/tilcdc: only build on arm
  drm/i915: Revert hdmi HDP pin checks
  drm/tegra: Add list of framebuffers to debugfs
  drm/tegra: Fix color expansion
  drm/tegra: Split DC_CMD_STATE_CONTROL register write
  drm/tegra: Implement page-flipping support
  drm/tegra: Implement VBLANK support
  drm/tegra: Implement .mode_set_base()
  drm/tegra: Add plane support
  drm/tegra: Remove bogus tegra_framebuffer structure
  drm: Add consistency check for page-flipping
  drm/radeon: Use generic HDMI infoframe helpers
  drm/tegra: Use generic HDMI infoframe helpers
  drm: Add EDID helper documentation
  drm: Add HDMI infoframe helpers
  video: Add generic HDMI infoframe helpers
  drm: Add some missing forward declarations
  drm: Move mode tables to drm_edid.c
  drm: Remove duplicate drm_mode_cea_vic()
  gma500: Fix n, m1 and m2 clock limits for sdvo and lvds
  ...

30 files changed:
1  2 
drivers/gpu/drm/exynos/exynos_drm_g2d.c
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/nouveau/core/include/core/object.h
drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/omapdrm/Kconfig
drivers/gpu/drm/omapdrm/omap_connector.c
drivers/gpu/drm/omapdrm/omap_crtc.c
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
drivers/gpu/drm/omapdrm/omap_drv.c
drivers/gpu/drm/omapdrm/omap_encoder.c
drivers/gpu/drm/omapdrm/omap_fb.c
drivers/gpu/drm/omapdrm/omap_fbdev.c
drivers/gpu/drm/omapdrm/omap_gem.c
drivers/gpu/drm/omapdrm/omap_plane.c
drivers/gpu/drm/radeon/evergreen_cs.c
drivers/gpu/drm/radeon/r600_cs.c
drivers/gpu/drm/tegra/dc.c
drivers/gpu/drm/tegra/drm.c
drivers/gpu/drm/tegra/hdmi.c
drivers/iommu/intel-iommu.c
drivers/staging/Kconfig
drivers/staging/Makefile
drivers/tty/vt/vt.c
drivers/video/Kconfig
drivers/video/Makefile
drivers/video/console/fbcon.c
include/linux/console.h
kernel/printk.c

Simple merge
Simple merge
Simple merge
index 0000000000000000000000000000000000000000,b724a41314359053d598176e7ac871ba3a63704e..09f65dc3d2c85397e4147e51291c1b7510450d03
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,25 +1,25 @@@
 -      select OMAP2_DSS
+ config DRM_OMAP
+       tristate "OMAP DRM"
+       depends on DRM && !CONFIG_FB_OMAP2
+       depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
++      depends on OMAP2_DSS
+       select DRM_KMS_HELPER
+       select FB_SYS_FILLRECT
+       select FB_SYS_COPYAREA
+       select FB_SYS_IMAGEBLIT
+       select FB_SYS_FOPS
+       default n
+       help
+         DRM display driver for OMAP2/3/4 based boards.
+ config DRM_OMAP_NUM_CRTCS
+       int "Number of CRTCs"
+       range 1 10
+       default 1  if ARCH_OMAP2 || ARCH_OMAP3
+       default 2  if ARCH_OMAP4
+       depends on DRM_OMAP
+       help
+         Select the number of video overlays which can be used as framebuffers.
+         The remaining overlays are reserved for video.
index 0000000000000000000000000000000000000000,44284fd981fc0824b3bd5c6b6e0d583c03d5f72a..c451c41a7a7d2f5b2333dc2217a2adc2db061ecc
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,298 +1,296 @@@
 -      if (!omap_connector) {
 -              dev_err(dev->dev, "could not allocate connector\n");
+ /*
+  * drivers/gpu/drm/omapdrm/omap_connector.c
+  *
+  * Copyright (C) 2011 Texas Instruments
+  * Author: Rob Clark <rob@ti.com>
+  *
+  * This program is free software; you can redistribute it and/or modify it
+  * under the terms of the GNU General Public License version 2 as published by
+  * the Free Software Foundation.
+  *
+  * This program is distributed in the hope that it will be useful, but WITHOUT
+  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  * more details.
+  *
+  * You should have received a copy of the GNU General Public License along with
+  * this program.  If not, see <http://www.gnu.org/licenses/>.
+  */
+ #include "omap_drv.h"
+ #include "drm_crtc.h"
+ #include "drm_crtc_helper.h"
+ /*
+  * connector funcs
+  */
+ #define to_omap_connector(x) container_of(x, struct omap_connector, base)
+ struct omap_connector {
+       struct drm_connector base;
+       struct omap_dss_device *dssdev;
+       struct drm_encoder *encoder;
+ };
+ void copy_timings_omap_to_drm(struct drm_display_mode *mode,
+               struct omap_video_timings *timings)
+ {
+       mode->clock = timings->pixel_clock;
+       mode->hdisplay = timings->x_res;
+       mode->hsync_start = mode->hdisplay + timings->hfp;
+       mode->hsync_end = mode->hsync_start + timings->hsw;
+       mode->htotal = mode->hsync_end + timings->hbp;
+       mode->vdisplay = timings->y_res;
+       mode->vsync_start = mode->vdisplay + timings->vfp;
+       mode->vsync_end = mode->vsync_start + timings->vsw;
+       mode->vtotal = mode->vsync_end + timings->vbp;
+       mode->flags = 0;
+       if (timings->interlace)
+               mode->flags |= DRM_MODE_FLAG_INTERLACE;
+       if (timings->hsync_level == OMAPDSS_SIG_ACTIVE_HIGH)
+               mode->flags |= DRM_MODE_FLAG_PHSYNC;
+       else
+               mode->flags |= DRM_MODE_FLAG_NHSYNC;
+       if (timings->vsync_level == OMAPDSS_SIG_ACTIVE_HIGH)
+               mode->flags |= DRM_MODE_FLAG_PVSYNC;
+       else
+               mode->flags |= DRM_MODE_FLAG_NVSYNC;
+ }
+ void copy_timings_drm_to_omap(struct omap_video_timings *timings,
+               struct drm_display_mode *mode)
+ {
+       timings->pixel_clock = mode->clock;
+       timings->x_res = mode->hdisplay;
+       timings->hfp = mode->hsync_start - mode->hdisplay;
+       timings->hsw = mode->hsync_end - mode->hsync_start;
+       timings->hbp = mode->htotal - mode->hsync_end;
+       timings->y_res = mode->vdisplay;
+       timings->vfp = mode->vsync_start - mode->vdisplay;
+       timings->vsw = mode->vsync_end - mode->vsync_start;
+       timings->vbp = mode->vtotal - mode->vsync_end;
+       timings->interlace = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+       if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+               timings->hsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
+       else
+               timings->hsync_level = OMAPDSS_SIG_ACTIVE_LOW;
+       if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+               timings->vsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
+       else
+               timings->vsync_level = OMAPDSS_SIG_ACTIVE_LOW;
+       timings->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
+       timings->de_level = OMAPDSS_SIG_ACTIVE_HIGH;
+       timings->sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES;
+ }
+ static enum drm_connector_status omap_connector_detect(
+               struct drm_connector *connector, bool force)
+ {
+       struct omap_connector *omap_connector = to_omap_connector(connector);
+       struct omap_dss_device *dssdev = omap_connector->dssdev;
+       struct omap_dss_driver *dssdrv = dssdev->driver;
+       enum drm_connector_status ret;
+       if (dssdrv->detect) {
+               if (dssdrv->detect(dssdev))
+                       ret = connector_status_connected;
+               else
+                       ret = connector_status_disconnected;
+       } else {
+               ret = connector_status_unknown;
+       }
+       VERB("%s: %d (force=%d)", omap_connector->dssdev->name, ret, force);
+       return ret;
+ }
+ static void omap_connector_destroy(struct drm_connector *connector)
+ {
+       struct omap_connector *omap_connector = to_omap_connector(connector);
+       struct omap_dss_device *dssdev = omap_connector->dssdev;
+       DBG("%s", omap_connector->dssdev->name);
+       drm_sysfs_connector_remove(connector);
+       drm_connector_cleanup(connector);
+       kfree(omap_connector);
+       omap_dss_put_device(dssdev);
+ }
+ #define MAX_EDID  512
+ static int omap_connector_get_modes(struct drm_connector *connector)
+ {
+       struct omap_connector *omap_connector = to_omap_connector(connector);
+       struct omap_dss_device *dssdev = omap_connector->dssdev;
+       struct omap_dss_driver *dssdrv = dssdev->driver;
+       struct drm_device *dev = connector->dev;
+       int n = 0;
+       DBG("%s", omap_connector->dssdev->name);
+       /* if display exposes EDID, then we parse that in the normal way to
+        * build table of supported modes.. otherwise (ie. fixed resolution
+        * LCD panels) we just return a single mode corresponding to the
+        * currently configured timings:
+        */
+       if (dssdrv->read_edid) {
+               void *edid = kzalloc(MAX_EDID, GFP_KERNEL);
+               if ((dssdrv->read_edid(dssdev, edid, MAX_EDID) > 0) &&
+                               drm_edid_is_valid(edid)) {
+                       drm_mode_connector_update_edid_property(
+                                       connector, edid);
+                       n = drm_add_edid_modes(connector, edid);
+               } else {
+                       drm_mode_connector_update_edid_property(
+                                       connector, NULL);
+               }
+               kfree(edid);
+       } else {
+               struct drm_display_mode *mode = drm_mode_create(dev);
+               struct omap_video_timings timings = {0};
+               dssdrv->get_timings(dssdev, &timings);
+               copy_timings_omap_to_drm(mode, &timings);
+               mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+               drm_mode_set_name(mode);
+               drm_mode_probed_add(connector, mode);
+               n = 1;
+       }
+       return n;
+ }
+ static int omap_connector_mode_valid(struct drm_connector *connector,
+                                struct drm_display_mode *mode)
+ {
+       struct omap_connector *omap_connector = to_omap_connector(connector);
+       struct omap_dss_device *dssdev = omap_connector->dssdev;
+       struct omap_dss_driver *dssdrv = dssdev->driver;
+       struct omap_video_timings timings = {0};
+       struct drm_device *dev = connector->dev;
+       struct drm_display_mode *new_mode;
+       int ret = MODE_BAD;
+       copy_timings_drm_to_omap(&timings, mode);
+       mode->vrefresh = drm_mode_vrefresh(mode);
+       if (!dssdrv->check_timings(dssdev, &timings)) {
+               /* check if vrefresh is still valid */
+               new_mode = drm_mode_duplicate(dev, mode);
+               new_mode->clock = timings.pixel_clock;
+               new_mode->vrefresh = 0;
+               if (mode->vrefresh == drm_mode_vrefresh(new_mode))
+                       ret = MODE_OK;
+               drm_mode_destroy(dev, new_mode);
+       }
+       DBG("connector: mode %s: "
+                       "%d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+                       (ret == MODE_OK) ? "valid" : "invalid",
+                       mode->base.id, mode->name, mode->vrefresh, mode->clock,
+                       mode->hdisplay, mode->hsync_start,
+                       mode->hsync_end, mode->htotal,
+                       mode->vdisplay, mode->vsync_start,
+                       mode->vsync_end, mode->vtotal, mode->type, mode->flags);
+       return ret;
+ }
+ struct drm_encoder *omap_connector_attached_encoder(
+               struct drm_connector *connector)
+ {
+       struct omap_connector *omap_connector = to_omap_connector(connector);
+       return omap_connector->encoder;
+ }
+ static const struct drm_connector_funcs omap_connector_funcs = {
+       .dpms = drm_helper_connector_dpms,
+       .detect = omap_connector_detect,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .destroy = omap_connector_destroy,
+ };
+ static const struct drm_connector_helper_funcs omap_connector_helper_funcs = {
+       .get_modes = omap_connector_get_modes,
+       .mode_valid = omap_connector_mode_valid,
+       .best_encoder = omap_connector_attached_encoder,
+ };
+ /* flush an area of the framebuffer (in case of manual update display that
+  * is not automatically flushed)
+  */
+ void omap_connector_flush(struct drm_connector *connector,
+               int x, int y, int w, int h)
+ {
+       struct omap_connector *omap_connector = to_omap_connector(connector);
+       /* TODO: enable when supported in dss */
+       VERB("%s: %d,%d, %dx%d", omap_connector->dssdev->name, x, y, w, h);
+ }
+ /* initialize connector */
+ struct drm_connector *omap_connector_init(struct drm_device *dev,
+               int connector_type, struct omap_dss_device *dssdev,
+               struct drm_encoder *encoder)
+ {
+       struct drm_connector *connector = NULL;
+       struct omap_connector *omap_connector;
+       DBG("%s", dssdev->name);
+       omap_dss_get_device(dssdev);
+       omap_connector = kzalloc(sizeof(struct omap_connector), GFP_KERNEL);
 -      }
++      if (!omap_connector)
+               goto fail;
+       omap_connector->dssdev = dssdev;
+       omap_connector->encoder = encoder;
+       connector = &omap_connector->base;
+       drm_connector_init(dev, connector, &omap_connector_funcs,
+                               connector_type);
+       drm_connector_helper_add(connector, &omap_connector_helper_funcs);
+ #if 0 /* enable when dss2 supports hotplug */
+       if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_HPD)
+               connector->polled = 0;
+       else
+ #endif
+               connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+                               DRM_CONNECTOR_POLL_DISCONNECT;
+       connector->interlace_allowed = 1;
+       connector->doublescan_allowed = 0;
+       drm_sysfs_connector_add(connector);
+       return connector;
+ fail:
+       if (connector)
+               omap_connector_destroy(connector);
+       return NULL;
+ }
index 0000000000000000000000000000000000000000,ac2258f59805968e65cde237a3134de6b92bd89b..bec66a490b8f7ded93ff3f766c562a8a76967f2e
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,657 +1,654 @@@
 -
 -      if (!omap_crtc) {
 -              dev_err(dev->dev, "could not allocate CRTC\n");
+ /*
+  * drivers/gpu/drm/omapdrm/omap_crtc.c
+  *
+  * Copyright (C) 2011 Texas Instruments
+  * Author: Rob Clark <rob@ti.com>
+  *
+  * This program is free software; you can redistribute it and/or modify it
+  * under the terms of the GNU General Public License version 2 as published by
+  * the Free Software Foundation.
+  *
+  * This program is distributed in the hope that it will be useful, but WITHOUT
+  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  * more details.
+  *
+  * You should have received a copy of the GNU General Public License along with
+  * this program.  If not, see <http://www.gnu.org/licenses/>.
+  */
+ #include "omap_drv.h"
+ #include <drm/drm_mode.h>
+ #include "drm_crtc.h"
+ #include "drm_crtc_helper.h"
+ #define to_omap_crtc(x) container_of(x, struct omap_crtc, base)
+ struct omap_crtc {
+       struct drm_crtc base;
+       struct drm_plane *plane;
+       const char *name;
+       int pipe;
+       enum omap_channel channel;
+       struct omap_overlay_manager_info info;
+       /*
+        * Temporary: eventually this will go away, but it is needed
+        * for now to keep the output's happy.  (They only need
+        * mgr->id.)  Eventually this will be replaced w/ something
+        * more common-panel-framework-y
+        */
+       struct omap_overlay_manager mgr;
+       struct omap_video_timings timings;
+       bool enabled;
+       bool full_update;
+       struct omap_drm_apply apply;
+       struct omap_drm_irq apply_irq;
+       struct omap_drm_irq error_irq;
+       /* list of in-progress apply's: */
+       struct list_head pending_applies;
+       /* list of queued apply's: */
+       struct list_head queued_applies;
+       /* for handling queued and in-progress applies: */
+       struct work_struct apply_work;
+       /* if there is a pending flip, these will be non-null: */
+       struct drm_pending_vblank_event *event;
+       struct drm_framebuffer *old_fb;
+       /* for handling page flips without caring about what
+        * the callback is called from.  Possibly we should just
+        * make omap_gem always call the cb from the worker so
+        * we don't have to care about this..
+        *
+        * XXX maybe fold into apply_work??
+        */
+       struct work_struct page_flip_work;
+ };
+ /*
+  * Manager-ops, callbacks from output when they need to configure
+  * the upstream part of the video pipe.
+  *
+  * Most of these we can ignore until we add support for command-mode
+  * panels.. for video-mode the crtc-helpers already do an adequate
+  * job of sequencing the setup of the video pipe in the proper order
+  */
+ /* we can probably ignore these until we support command-mode panels: */
+ static void omap_crtc_start_update(struct omap_overlay_manager *mgr)
+ {
+ }
+ static int omap_crtc_enable(struct omap_overlay_manager *mgr)
+ {
+       return 0;
+ }
+ static void omap_crtc_disable(struct omap_overlay_manager *mgr)
+ {
+ }
+ static void omap_crtc_set_timings(struct omap_overlay_manager *mgr,
+               const struct omap_video_timings *timings)
+ {
+       struct omap_crtc *omap_crtc = container_of(mgr, struct omap_crtc, mgr);
+       DBG("%s", omap_crtc->name);
+       omap_crtc->timings = *timings;
+       omap_crtc->full_update = true;
+ }
+ static void omap_crtc_set_lcd_config(struct omap_overlay_manager *mgr,
+               const struct dss_lcd_mgr_config *config)
+ {
+       struct omap_crtc *omap_crtc = container_of(mgr, struct omap_crtc, mgr);
+       DBG("%s", omap_crtc->name);
+       dispc_mgr_set_lcd_config(omap_crtc->channel, config);
+ }
+ static int omap_crtc_register_framedone_handler(
+               struct omap_overlay_manager *mgr,
+               void (*handler)(void *), void *data)
+ {
+       return 0;
+ }
+ static void omap_crtc_unregister_framedone_handler(
+               struct omap_overlay_manager *mgr,
+               void (*handler)(void *), void *data)
+ {
+ }
+ static const struct dss_mgr_ops mgr_ops = {
+               .start_update = omap_crtc_start_update,
+               .enable = omap_crtc_enable,
+               .disable = omap_crtc_disable,
+               .set_timings = omap_crtc_set_timings,
+               .set_lcd_config = omap_crtc_set_lcd_config,
+               .register_framedone_handler = omap_crtc_register_framedone_handler,
+               .unregister_framedone_handler = omap_crtc_unregister_framedone_handler,
+ };
+ /*
+  * CRTC funcs:
+  */
+ static void omap_crtc_destroy(struct drm_crtc *crtc)
+ {
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       DBG("%s", omap_crtc->name);
+       WARN_ON(omap_crtc->apply_irq.registered);
+       omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
+       omap_crtc->plane->funcs->destroy(omap_crtc->plane);
+       drm_crtc_cleanup(crtc);
+       kfree(omap_crtc);
+ }
+ static void omap_crtc_dpms(struct drm_crtc *crtc, int mode)
+ {
+       struct omap_drm_private *priv = crtc->dev->dev_private;
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       bool enabled = (mode == DRM_MODE_DPMS_ON);
+       int i;
+       DBG("%s: %d", omap_crtc->name, mode);
+       if (enabled != omap_crtc->enabled) {
+               omap_crtc->enabled = enabled;
+               omap_crtc->full_update = true;
+               omap_crtc_apply(crtc, &omap_crtc->apply);
+               /* also enable our private plane: */
+               WARN_ON(omap_plane_dpms(omap_crtc->plane, mode));
+               /* and any attached overlay planes: */
+               for (i = 0; i < priv->num_planes; i++) {
+                       struct drm_plane *plane = priv->planes[i];
+                       if (plane->crtc == crtc)
+                               WARN_ON(omap_plane_dpms(plane, mode));
+               }
+       }
+ }
+ static bool omap_crtc_mode_fixup(struct drm_crtc *crtc,
+               const struct drm_display_mode *mode,
+               struct drm_display_mode *adjusted_mode)
+ {
+       return true;
+ }
+ static int omap_crtc_mode_set(struct drm_crtc *crtc,
+               struct drm_display_mode *mode,
+               struct drm_display_mode *adjusted_mode,
+               int x, int y,
+               struct drm_framebuffer *old_fb)
+ {
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       mode = adjusted_mode;
+       DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+                       omap_crtc->name, mode->base.id, mode->name,
+                       mode->vrefresh, mode->clock,
+                       mode->hdisplay, mode->hsync_start,
+                       mode->hsync_end, mode->htotal,
+                       mode->vdisplay, mode->vsync_start,
+                       mode->vsync_end, mode->vtotal,
+                       mode->type, mode->flags);
+       copy_timings_drm_to_omap(&omap_crtc->timings, mode);
+       omap_crtc->full_update = true;
+       return omap_plane_mode_set(omap_crtc->plane, crtc, crtc->fb,
+                       0, 0, mode->hdisplay, mode->vdisplay,
+                       x << 16, y << 16,
+                       mode->hdisplay << 16, mode->vdisplay << 16,
+                       NULL, NULL);
+ }
+ static void omap_crtc_prepare(struct drm_crtc *crtc)
+ {
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       DBG("%s", omap_crtc->name);
+       omap_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ }
+ static void omap_crtc_commit(struct drm_crtc *crtc)
+ {
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       DBG("%s", omap_crtc->name);
+       omap_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+ }
+ static int omap_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+               struct drm_framebuffer *old_fb)
+ {
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       struct drm_plane *plane = omap_crtc->plane;
+       struct drm_display_mode *mode = &crtc->mode;
+       return omap_plane_mode_set(plane, crtc, crtc->fb,
+                       0, 0, mode->hdisplay, mode->vdisplay,
+                       x << 16, y << 16,
+                       mode->hdisplay << 16, mode->vdisplay << 16,
+                       NULL, NULL);
+ }
+ static void omap_crtc_load_lut(struct drm_crtc *crtc)
+ {
+ }
+ static void vblank_cb(void *arg)
+ {
+       struct drm_crtc *crtc = arg;
+       struct drm_device *dev = crtc->dev;
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       unsigned long flags;
+       spin_lock_irqsave(&dev->event_lock, flags);
+       /* wakeup userspace */
+       if (omap_crtc->event)
+               drm_send_vblank_event(dev, omap_crtc->pipe, omap_crtc->event);
+       omap_crtc->event = NULL;
+       omap_crtc->old_fb = NULL;
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
+ static void page_flip_worker(struct work_struct *work)
+ {
+       struct omap_crtc *omap_crtc =
+                       container_of(work, struct omap_crtc, page_flip_work);
+       struct drm_crtc *crtc = &omap_crtc->base;
+       struct drm_display_mode *mode = &crtc->mode;
+       struct drm_gem_object *bo;
+       mutex_lock(&crtc->mutex);
+       omap_plane_mode_set(omap_crtc->plane, crtc, crtc->fb,
+                       0, 0, mode->hdisplay, mode->vdisplay,
+                       crtc->x << 16, crtc->y << 16,
+                       mode->hdisplay << 16, mode->vdisplay << 16,
+                       vblank_cb, crtc);
+       mutex_unlock(&crtc->mutex);
+       bo = omap_framebuffer_bo(crtc->fb, 0);
+       drm_gem_object_unreference_unlocked(bo);
+ }
+ static void page_flip_cb(void *arg)
+ {
+       struct drm_crtc *crtc = arg;
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       struct omap_drm_private *priv = crtc->dev->dev_private;
+       /* avoid assumptions about what ctxt we are called from: */
+       queue_work(priv->wq, &omap_crtc->page_flip_work);
+ }
+ static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
+                struct drm_framebuffer *fb,
+                struct drm_pending_vblank_event *event)
+ {
+       struct drm_device *dev = crtc->dev;
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       struct drm_gem_object *bo;
+       DBG("%d -> %d (event=%p)", crtc->fb ? crtc->fb->base.id : -1,
+                       fb->base.id, event);
+       if (omap_crtc->old_fb) {
+               dev_err(dev->dev, "already a pending flip\n");
+               return -EINVAL;
+       }
+       omap_crtc->event = event;
+       crtc->fb = fb;
+       /*
+        * Hold a reference temporarily until the crtc is updated
+        * and takes the reference to the bo.  This avoids it
+        * getting freed from under us:
+        */
+       bo = omap_framebuffer_bo(fb, 0);
+       drm_gem_object_reference(bo);
+       omap_gem_op_async(bo, OMAP_GEM_READ, page_flip_cb, crtc);
+       return 0;
+ }
+ static int omap_crtc_set_property(struct drm_crtc *crtc,
+               struct drm_property *property, uint64_t val)
+ {
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       struct omap_drm_private *priv = crtc->dev->dev_private;
+       if (property == priv->rotation_prop) {
+               crtc->invert_dimensions =
+                               !!(val & ((1LL << DRM_ROTATE_90) | (1LL << DRM_ROTATE_270)));
+       }
+       return omap_plane_set_property(omap_crtc->plane, property, val);
+ }
+ static const struct drm_crtc_funcs omap_crtc_funcs = {
+       .set_config = drm_crtc_helper_set_config,
+       .destroy = omap_crtc_destroy,
+       .page_flip = omap_crtc_page_flip_locked,
+       .set_property = omap_crtc_set_property,
+ };
+ static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = {
+       .dpms = omap_crtc_dpms,
+       .mode_fixup = omap_crtc_mode_fixup,
+       .mode_set = omap_crtc_mode_set,
+       .prepare = omap_crtc_prepare,
+       .commit = omap_crtc_commit,
+       .mode_set_base = omap_crtc_mode_set_base,
+       .load_lut = omap_crtc_load_lut,
+ };
+ const struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc)
+ {
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       return &omap_crtc->timings;
+ }
+ enum omap_channel omap_crtc_channel(struct drm_crtc *crtc)
+ {
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       return omap_crtc->channel;
+ }
+ static void omap_crtc_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
+ {
+       struct omap_crtc *omap_crtc =
+                       container_of(irq, struct omap_crtc, error_irq);
+       struct drm_crtc *crtc = &omap_crtc->base;
+       DRM_ERROR("%s: errors: %08x\n", omap_crtc->name, irqstatus);
+       /* avoid getting in a flood, unregister the irq until next vblank */
+       omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
+ }
+ static void omap_crtc_apply_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
+ {
+       struct omap_crtc *omap_crtc =
+                       container_of(irq, struct omap_crtc, apply_irq);
+       struct drm_crtc *crtc = &omap_crtc->base;
+       if (!omap_crtc->error_irq.registered)
+               omap_irq_register(crtc->dev, &omap_crtc->error_irq);
+       if (!dispc_mgr_go_busy(omap_crtc->channel)) {
+               struct omap_drm_private *priv =
+                               crtc->dev->dev_private;
+               DBG("%s: apply done", omap_crtc->name);
+               omap_irq_unregister(crtc->dev, &omap_crtc->apply_irq);
+               queue_work(priv->wq, &omap_crtc->apply_work);
+       }
+ }
+ static void apply_worker(struct work_struct *work)
+ {
+       struct omap_crtc *omap_crtc =
+                       container_of(work, struct omap_crtc, apply_work);
+       struct drm_crtc *crtc = &omap_crtc->base;
+       struct drm_device *dev = crtc->dev;
+       struct omap_drm_apply *apply, *n;
+       bool need_apply;
+       /*
+        * Synchronize everything on mode_config.mutex, to keep
+        * the callbacks and list modification all serialized
+        * with respect to modesetting ioctls from userspace.
+        */
+       mutex_lock(&crtc->mutex);
+       dispc_runtime_get();
+       /*
+        * If we are still pending a previous update, wait.. when the
+        * pending update completes, we get kicked again.
+        */
+       if (omap_crtc->apply_irq.registered)
+               goto out;
+       /* finish up previous apply's: */
+       list_for_each_entry_safe(apply, n,
+                       &omap_crtc->pending_applies, pending_node) {
+               apply->post_apply(apply);
+               list_del(&apply->pending_node);
+       }
+       need_apply = !list_empty(&omap_crtc->queued_applies);
+       /* then handle the next round of of queued apply's: */
+       list_for_each_entry_safe(apply, n,
+                       &omap_crtc->queued_applies, queued_node) {
+               apply->pre_apply(apply);
+               list_del(&apply->queued_node);
+               apply->queued = false;
+               list_add_tail(&apply->pending_node,
+                               &omap_crtc->pending_applies);
+       }
+       if (need_apply) {
+               enum omap_channel channel = omap_crtc->channel;
+               DBG("%s: GO", omap_crtc->name);
+               if (dispc_mgr_is_enabled(channel)) {
+                       omap_irq_register(dev, &omap_crtc->apply_irq);
+                       dispc_mgr_go(channel);
+               } else {
+                       struct omap_drm_private *priv = dev->dev_private;
+                       queue_work(priv->wq, &omap_crtc->apply_work);
+               }
+       }
+ out:
+       dispc_runtime_put();
+       mutex_unlock(&crtc->mutex);
+ }
+ int omap_crtc_apply(struct drm_crtc *crtc,
+               struct omap_drm_apply *apply)
+ {
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       WARN_ON(!mutex_is_locked(&crtc->mutex));
+       /* no need to queue it again if it is already queued: */
+       if (apply->queued)
+               return 0;
+       apply->queued = true;
+       list_add_tail(&apply->queued_node, &omap_crtc->queued_applies);
+       /*
+        * If there are no currently pending updates, then go ahead and
+        * kick the worker immediately, otherwise it will run again when
+        * the current update finishes.
+        */
+       if (list_empty(&omap_crtc->pending_applies)) {
+               struct omap_drm_private *priv = crtc->dev->dev_private;
+               queue_work(priv->wq, &omap_crtc->apply_work);
+       }
+       return 0;
+ }
+ /* called only from apply */
+ static void set_enabled(struct drm_crtc *crtc, bool enable)
+ {
+       struct drm_device *dev = crtc->dev;
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       enum omap_channel channel = omap_crtc->channel;
+       struct omap_irq_wait *wait = NULL;
+       if (dispc_mgr_is_enabled(channel) == enable)
+               return;
+       /* ignore sync-lost irqs during enable/disable */
+       omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
+       if (dispc_mgr_get_framedone_irq(channel)) {
+               if (!enable) {
+                       wait = omap_irq_wait_init(dev,
+                                       dispc_mgr_get_framedone_irq(channel), 1);
+               }
+       } else {
+               /*
+                * When we disable digit output, we need to wait until fields
+                * are done.  Otherwise the DSS is still working, and turning
+                * off the clocks prevents DSS from going to OFF mode. And when
+                * enabling, we need to wait for the extra sync losts
+                */
+               wait = omap_irq_wait_init(dev,
+                               dispc_mgr_get_vsync_irq(channel), 2);
+       }
+       dispc_mgr_enable(channel, enable);
+       if (wait) {
+               int ret = omap_irq_wait(dev, wait, msecs_to_jiffies(100));
+               if (ret) {
+                       dev_err(dev->dev, "%s: timeout waiting for %s\n",
+                                       omap_crtc->name, enable ? "enable" : "disable");
+               }
+       }
+       omap_irq_register(crtc->dev, &omap_crtc->error_irq);
+ }
+ static void omap_crtc_pre_apply(struct omap_drm_apply *apply)
+ {
+       struct omap_crtc *omap_crtc =
+                       container_of(apply, struct omap_crtc, apply);
+       struct drm_crtc *crtc = &omap_crtc->base;
+       struct drm_encoder *encoder = NULL;
+       DBG("%s: enabled=%d, full=%d", omap_crtc->name,
+                       omap_crtc->enabled, omap_crtc->full_update);
+       if (omap_crtc->full_update) {
+               struct omap_drm_private *priv = crtc->dev->dev_private;
+               int i;
+               for (i = 0; i < priv->num_encoders; i++) {
+                       if (priv->encoders[i]->crtc == crtc) {
+                               encoder = priv->encoders[i];
+                               break;
+                       }
+               }
+       }
+       if (!omap_crtc->enabled) {
+               set_enabled(&omap_crtc->base, false);
+               if (encoder)
+                       omap_encoder_set_enabled(encoder, false);
+       } else {
+               if (encoder) {
+                       omap_encoder_set_enabled(encoder, false);
+                       omap_encoder_update(encoder, &omap_crtc->mgr,
+                                       &omap_crtc->timings);
+                       omap_encoder_set_enabled(encoder, true);
+                       omap_crtc->full_update = false;
+               }
+               dispc_mgr_setup(omap_crtc->channel, &omap_crtc->info);
+               dispc_mgr_set_timings(omap_crtc->channel,
+                               &omap_crtc->timings);
+               set_enabled(&omap_crtc->base, true);
+       }
+       omap_crtc->full_update = false;
+ }
+ static void omap_crtc_post_apply(struct omap_drm_apply *apply)
+ {
+       /* nothing needed for post-apply */
+ }
+ static const char *channel_names[] = {
+               [OMAP_DSS_CHANNEL_LCD] = "lcd",
+               [OMAP_DSS_CHANNEL_DIGIT] = "tv",
+               [OMAP_DSS_CHANNEL_LCD2] = "lcd2",
+ };
+ /* initialize crtc */
+ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
+               struct drm_plane *plane, enum omap_channel channel, int id)
+ {
+       struct drm_crtc *crtc = NULL;
+       struct omap_crtc *omap_crtc;
+       struct omap_overlay_manager_info *info;
+       DBG("%s", channel_names[channel]);
+       omap_crtc = kzalloc(sizeof(*omap_crtc), GFP_KERNEL);
 -      }
++      if (!omap_crtc)
+               goto fail;
+       crtc = &omap_crtc->base;
+       INIT_WORK(&omap_crtc->page_flip_work, page_flip_worker);
+       INIT_WORK(&omap_crtc->apply_work, apply_worker);
+       INIT_LIST_HEAD(&omap_crtc->pending_applies);
+       INIT_LIST_HEAD(&omap_crtc->queued_applies);
+       omap_crtc->apply.pre_apply  = omap_crtc_pre_apply;
+       omap_crtc->apply.post_apply = omap_crtc_post_apply;
+       omap_crtc->apply_irq.irqmask = pipe2vbl(id);
+       omap_crtc->apply_irq.irq = omap_crtc_apply_irq;
+       omap_crtc->error_irq.irqmask =
+                       dispc_mgr_get_sync_lost_irq(channel);
+       omap_crtc->error_irq.irq = omap_crtc_error_irq;
+       omap_irq_register(dev, &omap_crtc->error_irq);
+       omap_crtc->channel = channel;
+       omap_crtc->plane = plane;
+       omap_crtc->plane->crtc = crtc;
+       omap_crtc->name = channel_names[channel];
+       omap_crtc->pipe = id;
+       /* temporary: */
+       omap_crtc->mgr.id = channel;
+       dss_install_mgr_ops(&mgr_ops);
+       /* TODO: fix hard-coded setup.. add properties! */
+       info = &omap_crtc->info;
+       info->default_color = 0x00000000;
+       info->trans_key = 0x00000000;
+       info->trans_key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
+       info->trans_enabled = false;
+       drm_crtc_init(dev, crtc, &omap_crtc_funcs);
+       drm_crtc_helper_add(crtc, &omap_crtc_helper_funcs);
+       omap_plane_install_properties(omap_crtc->plane, &crtc->base);
+       return crtc;
+ fail:
+       if (crtc)
+               omap_crtc_destroy(crtc);
+       return NULL;
+ }
index 0000000000000000000000000000000000000000,3910215371053037aff61580939cd8596298e9eb..9b794c933c811dde3289f760be63bf1e026cd71c
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,991 +1,986 @@@
 -      if (!omap_dmm) {
 -              dev_err(&dev->dev, "failed to allocate driver data section\n");
+ /*
+  * DMM IOMMU driver support functions for TI OMAP processors.
+  *
+  * Author: Rob Clark <rob@ti.com>
+  *         Andy Gross <andy.gross@ti.com>
+  *
+  * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+  *
+  * This program is free software; you can redistribute it and/or
+  * modify it under the terms of the GNU General Public License as
+  * published by the Free Software Foundation version 2.
+  *
+  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+  * kind, whether express or implied; without even the implied warranty
+  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  * GNU General Public License for more details.
+  */
+ #include <linux/init.h>
+ #include <linux/module.h>
+ #include <linux/platform_device.h> /* platform_device() */
+ #include <linux/errno.h>
+ #include <linux/sched.h>
+ #include <linux/wait.h>
+ #include <linux/interrupt.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/slab.h>
+ #include <linux/vmalloc.h>
+ #include <linux/delay.h>
+ #include <linux/mm.h>
+ #include <linux/time.h>
+ #include <linux/list.h>
+ #include "omap_dmm_tiler.h"
+ #include "omap_dmm_priv.h"
+ #define DMM_DRIVER_NAME "dmm"
+ /* mappings for associating views to luts */
+ static struct tcm *containers[TILFMT_NFORMATS];
+ static struct dmm *omap_dmm;
+ /* global spinlock for protecting lists */
+ static DEFINE_SPINLOCK(list_lock);
+ /* Geometry table */
+ #define GEOM(xshift, yshift, bytes_per_pixel) { \
+               .x_shft = (xshift), \
+               .y_shft = (yshift), \
+               .cpp    = (bytes_per_pixel), \
+               .slot_w = 1 << (SLOT_WIDTH_BITS - (xshift)), \
+               .slot_h = 1 << (SLOT_HEIGHT_BITS - (yshift)), \
+       }
+ static const struct {
+       uint32_t x_shft;        /* unused X-bits (as part of bpp) */
+       uint32_t y_shft;        /* unused Y-bits (as part of bpp) */
+       uint32_t cpp;           /* bytes/chars per pixel */
+       uint32_t slot_w;        /* width of each slot (in pixels) */
+       uint32_t slot_h;        /* height of each slot (in pixels) */
+ } geom[TILFMT_NFORMATS] = {
+               [TILFMT_8BIT]  = GEOM(0, 0, 1),
+               [TILFMT_16BIT] = GEOM(0, 1, 2),
+               [TILFMT_32BIT] = GEOM(1, 1, 4),
+               [TILFMT_PAGE]  = GEOM(SLOT_WIDTH_BITS, SLOT_HEIGHT_BITS, 1),
+ };
+ /* lookup table for registers w/ per-engine instances */
+ static const uint32_t reg[][4] = {
+               [PAT_STATUS] = {DMM_PAT_STATUS__0, DMM_PAT_STATUS__1,
+                               DMM_PAT_STATUS__2, DMM_PAT_STATUS__3},
+               [PAT_DESCR]  = {DMM_PAT_DESCR__0, DMM_PAT_DESCR__1,
+                               DMM_PAT_DESCR__2, DMM_PAT_DESCR__3},
+ };
+ /* simple allocator to grab next 16 byte aligned memory from txn */
+ static void *alloc_dma(struct dmm_txn *txn, size_t sz, dma_addr_t *pa)
+ {
+       void *ptr;
+       struct refill_engine *engine = txn->engine_handle;
+       /* dmm programming requires 16 byte aligned addresses */
+       txn->current_pa = round_up(txn->current_pa, 16);
+       txn->current_va = (void *)round_up((long)txn->current_va, 16);
+       ptr = txn->current_va;
+       *pa = txn->current_pa;
+       txn->current_pa += sz;
+       txn->current_va += sz;
+       BUG_ON((txn->current_va - engine->refill_va) > REFILL_BUFFER_SIZE);
+       return ptr;
+ }
+ /* check status and spin until wait_mask comes true */
+ static int wait_status(struct refill_engine *engine, uint32_t wait_mask)
+ {
+       struct dmm *dmm = engine->dmm;
+       uint32_t r = 0, err, i;
+       i = DMM_FIXED_RETRY_COUNT;
+       while (true) {
+               r = readl(dmm->base + reg[PAT_STATUS][engine->id]);
+               err = r & DMM_PATSTATUS_ERR;
+               if (err)
+                       return -EFAULT;
+               if ((r & wait_mask) == wait_mask)
+                       break;
+               if (--i == 0)
+                       return -ETIMEDOUT;
+               udelay(1);
+       }
+       return 0;
+ }
+ static void release_engine(struct refill_engine *engine)
+ {
+       unsigned long flags;
+       spin_lock_irqsave(&list_lock, flags);
+       list_add(&engine->idle_node, &omap_dmm->idle_head);
+       spin_unlock_irqrestore(&list_lock, flags);
+       atomic_inc(&omap_dmm->engine_counter);
+       wake_up_interruptible(&omap_dmm->engine_queue);
+ }
+ static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
+ {
+       struct dmm *dmm = arg;
+       uint32_t status = readl(dmm->base + DMM_PAT_IRQSTATUS);
+       int i;
+       /* ack IRQ */
+       writel(status, dmm->base + DMM_PAT_IRQSTATUS);
+       for (i = 0; i < dmm->num_engines; i++) {
+               if (status & DMM_IRQSTAT_LST) {
+                       wake_up_interruptible(&dmm->engines[i].wait_for_refill);
+                       if (dmm->engines[i].async)
+                               release_engine(&dmm->engines[i]);
+               }
+               status >>= 8;
+       }
+       return IRQ_HANDLED;
+ }
+ /**
+  * Get a handle for a DMM transaction
+  */
+ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
+ {
+       struct dmm_txn *txn = NULL;
+       struct refill_engine *engine = NULL;
+       int ret;
+       unsigned long flags;
+       /* wait until an engine is available */
+       ret = wait_event_interruptible(omap_dmm->engine_queue,
+               atomic_add_unless(&omap_dmm->engine_counter, -1, 0));
+       if (ret)
+               return ERR_PTR(ret);
+       /* grab an idle engine */
+       spin_lock_irqsave(&list_lock, flags);
+       if (!list_empty(&dmm->idle_head)) {
+               engine = list_entry(dmm->idle_head.next, struct refill_engine,
+                                       idle_node);
+               list_del(&engine->idle_node);
+       }
+       spin_unlock_irqrestore(&list_lock, flags);
+       BUG_ON(!engine);
+       txn = &engine->txn;
+       engine->tcm = tcm;
+       txn->engine_handle = engine;
+       txn->last_pat = NULL;
+       txn->current_va = engine->refill_va;
+       txn->current_pa = engine->refill_pa;
+       return txn;
+ }
+ /**
+  * Add region to DMM transaction.  If pages or pages[i] is NULL, then the
+  * corresponding slot is cleared (ie. dummy_pa is programmed)
+  */
+ static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
+               struct page **pages, uint32_t npages, uint32_t roll)
+ {
+       dma_addr_t pat_pa = 0;
+       uint32_t *data;
+       struct pat *pat;
+       struct refill_engine *engine = txn->engine_handle;
+       int columns = (1 + area->x1 - area->x0);
+       int rows = (1 + area->y1 - area->y0);
+       int i = columns*rows;
+       pat = alloc_dma(txn, sizeof(struct pat), &pat_pa);
+       if (txn->last_pat)
+               txn->last_pat->next_pa = (uint32_t)pat_pa;
+       pat->area = *area;
+       /* adjust Y coordinates based off of container parameters */
+       pat->area.y0 += engine->tcm->y_offset;
+       pat->area.y1 += engine->tcm->y_offset;
+       pat->ctrl = (struct pat_ctrl){
+                       .start = 1,
+                       .lut_id = engine->tcm->lut_id,
+               };
+       data = alloc_dma(txn, 4*i, &pat->data_pa);
+       while (i--) {
+               int n = i + roll;
+               if (n >= npages)
+                       n -= npages;
+               data[i] = (pages && pages[n]) ?
+                       page_to_phys(pages[n]) : engine->dmm->dummy_pa;
+       }
+       txn->last_pat = pat;
+       return;
+ }
+ /**
+  * Commit the DMM transaction.
+  */
+ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
+ {
+       int ret = 0;
+       struct refill_engine *engine = txn->engine_handle;
+       struct dmm *dmm = engine->dmm;
+       if (!txn->last_pat) {
+               dev_err(engine->dmm->dev, "need at least one txn\n");
+               ret = -EINVAL;
+               goto cleanup;
+       }
+       txn->last_pat->next_pa = 0;
+       /* write to PAT_DESCR to clear out any pending transaction */
+       writel(0x0, dmm->base + reg[PAT_DESCR][engine->id]);
+       /* wait for engine ready: */
+       ret = wait_status(engine, DMM_PATSTATUS_READY);
+       if (ret) {
+               ret = -EFAULT;
+               goto cleanup;
+       }
+       /* mark whether it is async to denote list management in IRQ handler */
+       engine->async = wait ? false : true;
+       /* kick reload */
+       writel(engine->refill_pa,
+               dmm->base + reg[PAT_DESCR][engine->id]);
+       if (wait) {
+               if (wait_event_interruptible_timeout(engine->wait_for_refill,
+                               wait_status(engine, DMM_PATSTATUS_READY) == 0,
+                               msecs_to_jiffies(1)) <= 0) {
+                       dev_err(dmm->dev, "timed out waiting for done\n");
+                       ret = -ETIMEDOUT;
+               }
+       }
+ cleanup:
+       /* only place engine back on list if we are done with it */
+       if (ret || wait)
+               release_engine(engine);
+       return ret;
+ }
+ /*
+  * DMM programming
+  */
+ static int fill(struct tcm_area *area, struct page **pages,
+               uint32_t npages, uint32_t roll, bool wait)
+ {
+       int ret = 0;
+       struct tcm_area slice, area_s;
+       struct dmm_txn *txn;
+       txn = dmm_txn_init(omap_dmm, area->tcm);
+       if (IS_ERR_OR_NULL(txn))
+               return -ENOMEM;
+       tcm_for_each_slice(slice, *area, area_s) {
+               struct pat_area p_area = {
+                               .x0 = slice.p0.x,  .y0 = slice.p0.y,
+                               .x1 = slice.p1.x,  .y1 = slice.p1.y,
+               };
+               dmm_txn_append(txn, &p_area, pages, npages, roll);
+               roll += tcm_sizeof(slice);
+       }
+       ret = dmm_txn_commit(txn, wait);
+       return ret;
+ }
+ /*
+  * Pin/unpin
+  */
+ /* note: slots for which pages[i] == NULL are filled w/ dummy page
+  */
+ int tiler_pin(struct tiler_block *block, struct page **pages,
+               uint32_t npages, uint32_t roll, bool wait)
+ {
+       int ret;
+       ret = fill(&block->area, pages, npages, roll, wait);
+       if (ret)
+               tiler_unpin(block);
+       return ret;
+ }
+ int tiler_unpin(struct tiler_block *block)
+ {
+       return fill(&block->area, NULL, 0, 0, false);
+ }
+ /*
+  * Reserve/release
+  */
+ struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
+               uint16_t h, uint16_t align)
+ {
+       struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
+       u32 min_align = 128;
+       int ret;
+       unsigned long flags;
+       BUG_ON(!validfmt(fmt));
+       /* convert width/height to slots */
+       w = DIV_ROUND_UP(w, geom[fmt].slot_w);
+       h = DIV_ROUND_UP(h, geom[fmt].slot_h);
+       /* convert alignment to slots */
+       min_align = max(min_align, (geom[fmt].slot_w * geom[fmt].cpp));
+       align = ALIGN(align, min_align);
+       align /= geom[fmt].slot_w * geom[fmt].cpp;
+       block->fmt = fmt;
+       ret = tcm_reserve_2d(containers[fmt], w, h, align, &block->area);
+       if (ret) {
+               kfree(block);
+               return ERR_PTR(-ENOMEM);
+       }
+       /* add to allocation list */
+       spin_lock_irqsave(&list_lock, flags);
+       list_add(&block->alloc_node, &omap_dmm->alloc_head);
+       spin_unlock_irqrestore(&list_lock, flags);
+       return block;
+ }
+ struct tiler_block *tiler_reserve_1d(size_t size)
+ {
+       struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
+       int num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       unsigned long flags;
+       if (!block)
+               return ERR_PTR(-ENOMEM);
+       block->fmt = TILFMT_PAGE;
+       if (tcm_reserve_1d(containers[TILFMT_PAGE], num_pages,
+                               &block->area)) {
+               kfree(block);
+               return ERR_PTR(-ENOMEM);
+       }
+       spin_lock_irqsave(&list_lock, flags);
+       list_add(&block->alloc_node, &omap_dmm->alloc_head);
+       spin_unlock_irqrestore(&list_lock, flags);
+       return block;
+ }
+ /* note: if you have pin'd pages, you should have already unpin'd first! */
+ int tiler_release(struct tiler_block *block)
+ {
+       int ret = tcm_free(&block->area);
+       unsigned long flags;
+       if (block->area.tcm)
+               dev_err(omap_dmm->dev, "failed to release block\n");
+       spin_lock_irqsave(&list_lock, flags);
+       list_del(&block->alloc_node);
+       spin_unlock_irqrestore(&list_lock, flags);
+       kfree(block);
+       return ret;
+ }
+ /*
+  * Utils
+  */
+ /* calculate the tiler space address of a pixel in a view orientation...
+  * below description copied from the display subsystem section of TRM:
+  *
+  * When the TILER is addressed, the bits:
+  *   [28:27] = 0x0 for 8-bit tiled
+  *             0x1 for 16-bit tiled
+  *             0x2 for 32-bit tiled
+  *             0x3 for page mode
+  *   [31:29] = 0x0 for 0-degree view
+  *             0x1 for 180-degree view + mirroring
+  *             0x2 for 0-degree view + mirroring
+  *             0x3 for 180-degree view
+  *             0x4 for 270-degree view + mirroring
+  *             0x5 for 270-degree view
+  *             0x6 for 90-degree view
+  *             0x7 for 90-degree view + mirroring
+  * Otherwise the bits indicated the corresponding bit address to access
+  * the SDRAM.
+  */
+ static u32 tiler_get_address(enum tiler_fmt fmt, u32 orient, u32 x, u32 y)
+ {
+       u32 x_bits, y_bits, tmp, x_mask, y_mask, alignment;
+       x_bits = CONT_WIDTH_BITS - geom[fmt].x_shft;
+       y_bits = CONT_HEIGHT_BITS - geom[fmt].y_shft;
+       alignment = geom[fmt].x_shft + geom[fmt].y_shft;
+       /* validate coordinate */
+       x_mask = MASK(x_bits);
+       y_mask = MASK(y_bits);
+       if (x < 0 || x > x_mask || y < 0 || y > y_mask) {
+               DBG("invalid coords: %u < 0 || %u > %u || %u < 0 || %u > %u",
+                               x, x, x_mask, y, y, y_mask);
+               return 0;
+       }
+       /* account for mirroring */
+       if (orient & MASK_X_INVERT)
+               x ^= x_mask;
+       if (orient & MASK_Y_INVERT)
+               y ^= y_mask;
+       /* get coordinate address */
+       if (orient & MASK_XY_FLIP)
+               tmp = ((x << y_bits) + y);
+       else
+               tmp = ((y << x_bits) + x);
+       return TIL_ADDR((tmp << alignment), orient, fmt);
+ }
+ dma_addr_t tiler_ssptr(struct tiler_block *block)
+ {
+       BUG_ON(!validfmt(block->fmt));
+       return TILVIEW_8BIT + tiler_get_address(block->fmt, 0,
+                       block->area.p0.x * geom[block->fmt].slot_w,
+                       block->area.p0.y * geom[block->fmt].slot_h);
+ }
+ dma_addr_t tiler_tsptr(struct tiler_block *block, uint32_t orient,
+               uint32_t x, uint32_t y)
+ {
+       struct tcm_pt *p = &block->area.p0;
+       BUG_ON(!validfmt(block->fmt));
+       return tiler_get_address(block->fmt, orient,
+                       (p->x * geom[block->fmt].slot_w) + x,
+                       (p->y * geom[block->fmt].slot_h) + y);
+ }
+ void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h)
+ {
+       BUG_ON(!validfmt(fmt));
+       *w = round_up(*w, geom[fmt].slot_w);
+       *h = round_up(*h, geom[fmt].slot_h);
+ }
+ uint32_t tiler_stride(enum tiler_fmt fmt, uint32_t orient)
+ {
+       BUG_ON(!validfmt(fmt));
+       if (orient & MASK_XY_FLIP)
+               return 1 << (CONT_HEIGHT_BITS + geom[fmt].x_shft);
+       else
+               return 1 << (CONT_WIDTH_BITS + geom[fmt].y_shft);
+ }
+ size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h)
+ {
+       tiler_align(fmt, &w, &h);
+       return geom[fmt].cpp * w * h;
+ }
+ size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h)
+ {
+       BUG_ON(!validfmt(fmt));
+       return round_up(geom[fmt].cpp * w, PAGE_SIZE) * h;
+ }
+ bool dmm_is_available(void)
+ {
+       return omap_dmm ? true : false;
+ }
+ static int omap_dmm_remove(struct platform_device *dev)
+ {
+       struct tiler_block *block, *_block;
+       int i;
+       unsigned long flags;
+       if (omap_dmm) {
+               /* free all area regions */
+               spin_lock_irqsave(&list_lock, flags);
+               list_for_each_entry_safe(block, _block, &omap_dmm->alloc_head,
+                                       alloc_node) {
+                       list_del(&block->alloc_node);
+                       kfree(block);
+               }
+               spin_unlock_irqrestore(&list_lock, flags);
+               for (i = 0; i < omap_dmm->num_lut; i++)
+                       if (omap_dmm->tcm && omap_dmm->tcm[i])
+                               omap_dmm->tcm[i]->deinit(omap_dmm->tcm[i]);
+               kfree(omap_dmm->tcm);
+               kfree(omap_dmm->engines);
+               if (omap_dmm->refill_va)
+                       dma_free_writecombine(omap_dmm->dev,
+                               REFILL_BUFFER_SIZE * omap_dmm->num_engines,
+                               omap_dmm->refill_va,
+                               omap_dmm->refill_pa);
+               if (omap_dmm->dummy_page)
+                       __free_page(omap_dmm->dummy_page);
+               if (omap_dmm->irq > 0)
+                       free_irq(omap_dmm->irq, omap_dmm);
+               iounmap(omap_dmm->base);
+               kfree(omap_dmm);
+               omap_dmm = NULL;
+       }
+       return 0;
+ }
+ static int omap_dmm_probe(struct platform_device *dev)
+ {
+       int ret = -EFAULT, i;
+       struct tcm_area area = {0};
+       u32 hwinfo, pat_geom;
+       struct resource *mem;
+       omap_dmm = kzalloc(sizeof(*omap_dmm), GFP_KERNEL);
 -      }
++      if (!omap_dmm)
+               goto fail;
 -      omap_dmm->engines = kzalloc(
 -                      omap_dmm->num_engines * sizeof(struct refill_engine),
 -                      GFP_KERNEL);
+       /* initialize lists */
+       INIT_LIST_HEAD(&omap_dmm->alloc_head);
+       INIT_LIST_HEAD(&omap_dmm->idle_head);
+       init_waitqueue_head(&omap_dmm->engine_queue);
+       /* lookup hwmod data - base address and irq */
+       mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
+       if (!mem) {
+               dev_err(&dev->dev, "failed to get base address resource\n");
+               goto fail;
+       }
+       omap_dmm->base = ioremap(mem->start, SZ_2K);
+       if (!omap_dmm->base) {
+               dev_err(&dev->dev, "failed to get dmm base address\n");
+               goto fail;
+       }
+       omap_dmm->irq = platform_get_irq(dev, 0);
+       if (omap_dmm->irq < 0) {
+               dev_err(&dev->dev, "failed to get IRQ resource\n");
+               goto fail;
+       }
+       omap_dmm->dev = &dev->dev;
+       hwinfo = readl(omap_dmm->base + DMM_PAT_HWINFO);
+       omap_dmm->num_engines = (hwinfo >> 24) & 0x1F;
+       omap_dmm->num_lut = (hwinfo >> 16) & 0x1F;
+       omap_dmm->container_width = 256;
+       omap_dmm->container_height = 128;
+       atomic_set(&omap_dmm->engine_counter, omap_dmm->num_engines);
+       /* read out actual LUT width and height */
+       pat_geom = readl(omap_dmm->base + DMM_PAT_GEOMETRY);
+       omap_dmm->lut_width = ((pat_geom >> 16) & 0xF) << 5;
+       omap_dmm->lut_height = ((pat_geom >> 24) & 0xF) << 5;
+       /* increment LUT by one if on OMAP5 */
+       /* LUT has twice the height, and is split into a separate container */
+       if (omap_dmm->lut_height != omap_dmm->container_height)
+               omap_dmm->num_lut++;
+       /* initialize DMM registers */
+       writel(0x88888888, omap_dmm->base + DMM_PAT_VIEW__0);
+       writel(0x88888888, omap_dmm->base + DMM_PAT_VIEW__1);
+       writel(0x80808080, omap_dmm->base + DMM_PAT_VIEW_MAP__0);
+       writel(0x80000000, omap_dmm->base + DMM_PAT_VIEW_MAP_BASE);
+       writel(0x88888888, omap_dmm->base + DMM_TILER_OR__0);
+       writel(0x88888888, omap_dmm->base + DMM_TILER_OR__1);
+       ret = request_irq(omap_dmm->irq, omap_dmm_irq_handler, IRQF_SHARED,
+                               "omap_dmm_irq_handler", omap_dmm);
+       if (ret) {
+               dev_err(&dev->dev, "couldn't register IRQ %d, error %d\n",
+                       omap_dmm->irq, ret);
+               omap_dmm->irq = -1;
+               goto fail;
+       }
+       /* Enable all interrupts for each refill engine except
+        * ERR_LUT_MISS<n> (which is just advisory, and we don't care
+        * about because we want to be able to refill live scanout
+        * buffers for accelerated pan/scroll) and FILL_DSC<n> which
+        * we just generally don't care about.
+        */
+       writel(0x7e7e7e7e, omap_dmm->base + DMM_PAT_IRQENABLE_SET);
+       omap_dmm->dummy_page = alloc_page(GFP_KERNEL | __GFP_DMA32);
+       if (!omap_dmm->dummy_page) {
+               dev_err(&dev->dev, "could not allocate dummy page\n");
+               ret = -ENOMEM;
+               goto fail;
+       }
+       /* set dma mask for device */
+       /* NOTE: this is a workaround for the hwmod not initializing properly */
+       dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page);
+       /* alloc refill memory */
+       omap_dmm->refill_va = dma_alloc_writecombine(&dev->dev,
+                               REFILL_BUFFER_SIZE * omap_dmm->num_engines,
+                               &omap_dmm->refill_pa, GFP_KERNEL);
+       if (!omap_dmm->refill_va) {
+               dev_err(&dev->dev, "could not allocate refill memory\n");
+               goto fail;
+       }
+       /* alloc engines */
 -              dev_err(&dev->dev, "could not allocate engines\n");
++      omap_dmm->engines = kcalloc(omap_dmm->num_engines,
++                                  sizeof(struct refill_engine), GFP_KERNEL);
+       if (!omap_dmm->engines) {
 -      omap_dmm->tcm = kzalloc(omap_dmm->num_lut * sizeof(*omap_dmm->tcm),
+               ret = -ENOMEM;
+               goto fail;
+       }
+       for (i = 0; i < omap_dmm->num_engines; i++) {
+               omap_dmm->engines[i].id = i;
+               omap_dmm->engines[i].dmm = omap_dmm;
+               omap_dmm->engines[i].refill_va = omap_dmm->refill_va +
+                                               (REFILL_BUFFER_SIZE * i);
+               omap_dmm->engines[i].refill_pa = omap_dmm->refill_pa +
+                                               (REFILL_BUFFER_SIZE * i);
+               init_waitqueue_head(&omap_dmm->engines[i].wait_for_refill);
+               list_add(&omap_dmm->engines[i].idle_node, &omap_dmm->idle_head);
+       }
 -              dev_err(&dev->dev, "failed to allocate lut ptrs\n");
++      omap_dmm->tcm = kcalloc(omap_dmm->num_lut, sizeof(*omap_dmm->tcm),
+                               GFP_KERNEL);
+       if (!omap_dmm->tcm) {
+               ret = -ENOMEM;
+               goto fail;
+       }
+       /* init containers */
+       /* Each LUT is associated with a TCM (container manager).  We use the
+          lut_id to denote the lut_id used to identify the correct LUT for
+          programming during reill operations */
+       for (i = 0; i < omap_dmm->num_lut; i++) {
+               omap_dmm->tcm[i] = sita_init(omap_dmm->container_width,
+                                               omap_dmm->container_height,
+                                               NULL);
+               if (!omap_dmm->tcm[i]) {
+                       dev_err(&dev->dev, "failed to allocate container\n");
+                       ret = -ENOMEM;
+                       goto fail;
+               }
+               omap_dmm->tcm[i]->lut_id = i;
+       }
+       /* assign access mode containers to applicable tcm container */
+       /* OMAP 4 has 1 container for all 4 views */
+       /* OMAP 5 has 2 containers, 1 for 2D and 1 for 1D */
+       containers[TILFMT_8BIT] = omap_dmm->tcm[0];
+       containers[TILFMT_16BIT] = omap_dmm->tcm[0];
+       containers[TILFMT_32BIT] = omap_dmm->tcm[0];
+       if (omap_dmm->container_height != omap_dmm->lut_height) {
+               /* second LUT is used for PAGE mode.  Programming must use
+                  y offset that is added to all y coordinates.  LUT id is still
+                  0, because it is the same LUT, just the upper 128 lines */
+               containers[TILFMT_PAGE] = omap_dmm->tcm[1];
+               omap_dmm->tcm[1]->y_offset = OMAP5_LUT_OFFSET;
+               omap_dmm->tcm[1]->lut_id = 0;
+       } else {
+               containers[TILFMT_PAGE] = omap_dmm->tcm[0];
+       }
+       area = (struct tcm_area) {
+               .tcm = NULL,
+               .p1.x = omap_dmm->container_width - 1,
+               .p1.y = omap_dmm->container_height - 1,
+       };
+       /* initialize all LUTs to dummy page entries */
+       for (i = 0; i < omap_dmm->num_lut; i++) {
+               area.tcm = omap_dmm->tcm[i];
+               if (fill(&area, NULL, 0, 0, true))
+                       dev_err(omap_dmm->dev, "refill failed");
+       }
+       dev_info(omap_dmm->dev, "initialized all PAT entries\n");
+       return 0;
+ fail:
+       if (omap_dmm_remove(dev))
+               dev_err(&dev->dev, "cleanup failed\n");
+       return ret;
+ }
+ /*
+  * debugfs support
+  */
+ #ifdef CONFIG_DEBUG_FS
+ static const char *alphabet = "abcdefghijklmnopqrstuvwxyz"
+                               "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
+ static const char *special = ".,:;'\"`~!^-+";
+ static void fill_map(char **map, int xdiv, int ydiv, struct tcm_area *a,
+                                                       char c, bool ovw)
+ {
+       int x, y;
+       for (y = a->p0.y / ydiv; y <= a->p1.y / ydiv; y++)
+               for (x = a->p0.x / xdiv; x <= a->p1.x / xdiv; x++)
+                       if (map[y][x] == ' ' || ovw)
+                               map[y][x] = c;
+ }
+ static void fill_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p,
+                                                                       char c)
+ {
+       map[p->y / ydiv][p->x / xdiv] = c;
+ }
+ static char read_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p)
+ {
+       return map[p->y / ydiv][p->x / xdiv];
+ }
+ static int map_width(int xdiv, int x0, int x1)
+ {
+       return (x1 / xdiv) - (x0 / xdiv) + 1;
+ }
+ static void text_map(char **map, int xdiv, char *nice, int yd, int x0, int x1)
+ {
+       char *p = map[yd] + (x0 / xdiv);
+       int w = (map_width(xdiv, x0, x1) - strlen(nice)) / 2;
+       if (w >= 0) {
+               p += w;
+               while (*nice)
+                       *p++ = *nice++;
+       }
+ }
+ static void map_1d_info(char **map, int xdiv, int ydiv, char *nice,
+                                                       struct tcm_area *a)
+ {
+       sprintf(nice, "%dK", tcm_sizeof(*a) * 4);
+       if (a->p0.y + 1 < a->p1.y) {
+               text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv, 0,
+                                                       256 - 1);
+       } else if (a->p0.y < a->p1.y) {
+               if (strlen(nice) < map_width(xdiv, a->p0.x, 256 - 1))
+                       text_map(map, xdiv, nice, a->p0.y / ydiv,
+                                       a->p0.x + xdiv, 256 - 1);
+               else if (strlen(nice) < map_width(xdiv, 0, a->p1.x))
+                       text_map(map, xdiv, nice, a->p1.y / ydiv,
+                                       0, a->p1.y - xdiv);
+       } else if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x)) {
+               text_map(map, xdiv, nice, a->p0.y / ydiv, a->p0.x, a->p1.x);
+       }
+ }
+ static void map_2d_info(char **map, int xdiv, int ydiv, char *nice,
+                                                       struct tcm_area *a)
+ {
+       sprintf(nice, "(%d*%d)", tcm_awidth(*a), tcm_aheight(*a));
+       if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x))
+               text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv,
+                                                       a->p0.x, a->p1.x);
+ }
+ int tiler_map_show(struct seq_file *s, void *arg)
+ {
+       int xdiv = 2, ydiv = 1;
+       char **map = NULL, *global_map;
+       struct tiler_block *block;
+       struct tcm_area a, p;
+       int i;
+       const char *m2d = alphabet;
+       const char *a2d = special;
+       const char *m2dp = m2d, *a2dp = a2d;
+       char nice[128];
+       int h_adj;
+       int w_adj;
+       unsigned long flags;
+       int lut_idx;
+       if (!omap_dmm) {
+               /* early return if dmm/tiler device is not initialized */
+               return 0;
+       }
+       h_adj = omap_dmm->container_height / ydiv;
+       w_adj = omap_dmm->container_width / xdiv;
+       map = kmalloc(h_adj * sizeof(*map), GFP_KERNEL);
+       global_map = kmalloc((w_adj + 1) * h_adj, GFP_KERNEL);
+       if (!map || !global_map)
+               goto error;
+       for (lut_idx = 0; lut_idx < omap_dmm->num_lut; lut_idx++) {
+               memset(map, 0, sizeof(h_adj * sizeof(*map)));
+               memset(global_map, ' ', (w_adj + 1) * h_adj);
+               for (i = 0; i < omap_dmm->container_height; i++) {
+                       map[i] = global_map + i * (w_adj + 1);
+                       map[i][w_adj] = 0;
+               }
+               spin_lock_irqsave(&list_lock, flags);
+               list_for_each_entry(block, &omap_dmm->alloc_head, alloc_node) {
+                       if (block->area.tcm == omap_dmm->tcm[lut_idx]) {
+                               if (block->fmt != TILFMT_PAGE) {
+                                       fill_map(map, xdiv, ydiv, &block->area,
+                                               *m2dp, true);
+                                       if (!*++a2dp)
+                                               a2dp = a2d;
+                                       if (!*++m2dp)
+                                               m2dp = m2d;
+                                       map_2d_info(map, xdiv, ydiv, nice,
+                                                       &block->area);
+                               } else {
+                                       bool start = read_map_pt(map, xdiv,
+                                               ydiv, &block->area.p0) == ' ';
+                                       bool end = read_map_pt(map, xdiv, ydiv,
+                                                       &block->area.p1) == ' ';
+                                       tcm_for_each_slice(a, block->area, p)
+                                               fill_map(map, xdiv, ydiv, &a,
+                                                       '=', true);
+                                       fill_map_pt(map, xdiv, ydiv,
+                                                       &block->area.p0,
+                                                       start ? '<' : 'X');
+                                       fill_map_pt(map, xdiv, ydiv,
+                                                       &block->area.p1,
+                                                       end ? '>' : 'X');
+                                       map_1d_info(map, xdiv, ydiv, nice,
+                                                       &block->area);
+                               }
+                       }
+               }
+               spin_unlock_irqrestore(&list_lock, flags);
+               if (s) {
+                       seq_printf(s, "CONTAINER %d DUMP BEGIN\n", lut_idx);
+                       for (i = 0; i < 128; i++)
+                               seq_printf(s, "%03d:%s\n", i, map[i]);
+                       seq_printf(s, "CONTAINER %d DUMP END\n", lut_idx);
+               } else {
+                       dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP BEGIN\n",
+                               lut_idx);
+                       for (i = 0; i < 128; i++)
+                               dev_dbg(omap_dmm->dev, "%03d:%s\n", i, map[i]);
+                       dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP END\n",
+                               lut_idx);
+               }
+       }
+ error:
+       kfree(map);
+       kfree(global_map);
+       return 0;
+ }
+ #endif
+ #ifdef CONFIG_PM
+ static int omap_dmm_resume(struct device *dev)
+ {
+       struct tcm_area area;
+       int i;
+       if (!omap_dmm)
+               return -ENODEV;
+       area = (struct tcm_area) {
+               .tcm = NULL,
+               .p1.x = omap_dmm->container_width - 1,
+               .p1.y = omap_dmm->container_height - 1,
+       };
+       /* initialize all LUTs to dummy page entries */
+       for (i = 0; i < omap_dmm->num_lut; i++) {
+               area.tcm = omap_dmm->tcm[i];
+               if (fill(&area, NULL, 0, 0, true))
+                       dev_err(dev, "refill failed");
+       }
+       return 0;
+ }
+ static const struct dev_pm_ops omap_dmm_pm_ops = {
+       .resume = omap_dmm_resume,
+ };
+ #endif
+ struct platform_driver omap_dmm_driver = {
+       .probe = omap_dmm_probe,
+       .remove = omap_dmm_remove,
+       .driver = {
+               .owner = THIS_MODULE,
+               .name = DMM_DRIVER_NAME,
+ #ifdef CONFIG_PM
+               .pm = &omap_dmm_pm_ops,
+ #endif
+       },
+ };
+ MODULE_LICENSE("GPL v2");
+ MODULE_AUTHOR("Andy Gross <andy.gross@ti.com>");
+ MODULE_DESCRIPTION("OMAP DMM/Tiler Driver");
+ MODULE_ALIAS("platform:" DMM_DRIVER_NAME);
index 0000000000000000000000000000000000000000,9083538bd16a8d0017fed2ad1fce4df5356acd62..079c54c6f94c974c966a32524cbd95210ed2572f
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,610 +1,608 @@@
 -      if (!priv) {
 -              dev_err(dev->dev, "could not allocate priv\n");
+ /*
+  * drivers/gpu/drm/omapdrm/omap_drv.c
+  *
+  * Copyright (C) 2011 Texas Instruments
+  * Author: Rob Clark <rob@ti.com>
+  *
+  * This program is free software; you can redistribute it and/or modify it
+  * under the terms of the GNU General Public License version 2 as published by
+  * the Free Software Foundation.
+  *
+  * This program is distributed in the hope that it will be useful, but WITHOUT
+  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  * more details.
+  *
+  * You should have received a copy of the GNU General Public License along with
+  * this program.  If not, see <http://www.gnu.org/licenses/>.
+  */
+ #include "omap_drv.h"
+ #include "drm_crtc_helper.h"
+ #include "drm_fb_helper.h"
+ #include "omap_dmm_tiler.h"
+ #define DRIVER_NAME           MODULE_NAME
+ #define DRIVER_DESC           "OMAP DRM"
+ #define DRIVER_DATE           "20110917"
+ #define DRIVER_MAJOR          1
+ #define DRIVER_MINOR          0
+ #define DRIVER_PATCHLEVEL     0
+ static int num_crtc = CONFIG_DRM_OMAP_NUM_CRTCS;
+ MODULE_PARM_DESC(num_crtc, "Number of overlays to use as CRTCs");
+ module_param(num_crtc, int, 0600);
+ /*
+  * mode config funcs
+  */
+ /* Notes about mapping DSS and DRM entities:
+  *    CRTC:        overlay
+  *    encoder:     manager.. with some extension to allow one primary CRTC
+  *                 and zero or more video CRTC's to be mapped to one encoder?
+  *    connector:   dssdev.. manager can be attached/detached from different
+  *                 devices
+  */
+ static void omap_fb_output_poll_changed(struct drm_device *dev)
+ {
+       struct omap_drm_private *priv = dev->dev_private;
+       DBG("dev=%p", dev);
+       if (priv->fbdev)
+               drm_fb_helper_hotplug_event(priv->fbdev);
+ }
+ static const struct drm_mode_config_funcs omap_mode_config_funcs = {
+       .fb_create = omap_framebuffer_create,
+       .output_poll_changed = omap_fb_output_poll_changed,
+ };
+ static int get_connector_type(struct omap_dss_device *dssdev)
+ {
+       switch (dssdev->type) {
+       case OMAP_DISPLAY_TYPE_HDMI:
+               return DRM_MODE_CONNECTOR_HDMIA;
+       case OMAP_DISPLAY_TYPE_DPI:
+               if (!strcmp(dssdev->name, "dvi"))
+                       return DRM_MODE_CONNECTOR_DVID;
+               /* fallthrough */
+       default:
+               return DRM_MODE_CONNECTOR_Unknown;
+       }
+ }
+ static int omap_modeset_init(struct drm_device *dev)
+ {
+       struct omap_drm_private *priv = dev->dev_private;
+       struct omap_dss_device *dssdev = NULL;
+       int num_ovls = dss_feat_get_num_ovls();
+       int id;
+       drm_mode_config_init(dev);
+       omap_drm_irq_install(dev);
+       /*
+        * Create private planes and CRTCs for the last NUM_CRTCs overlay
+        * plus manager:
+        */
+       for (id = 0; id < min(num_crtc, num_ovls); id++) {
+               struct drm_plane *plane;
+               struct drm_crtc *crtc;
+               plane = omap_plane_init(dev, id, true);
+               crtc = omap_crtc_init(dev, plane, pipe2chan(id), id);
+               BUG_ON(priv->num_crtcs >= ARRAY_SIZE(priv->crtcs));
+               priv->crtcs[id] = crtc;
+               priv->num_crtcs++;
+               priv->planes[id] = plane;
+               priv->num_planes++;
+       }
+       /*
+        * Create normal planes for the remaining overlays:
+        */
+       for (; id < num_ovls; id++) {
+               struct drm_plane *plane = omap_plane_init(dev, id, false);
+               BUG_ON(priv->num_planes >= ARRAY_SIZE(priv->planes));
+               priv->planes[priv->num_planes++] = plane;
+       }
+       for_each_dss_dev(dssdev) {
+               struct drm_connector *connector;
+               struct drm_encoder *encoder;
+               if (!dssdev->driver) {
+                       dev_warn(dev->dev, "%s has no driver.. skipping it\n",
+                                       dssdev->name);
+                       return 0;
+               }
+               if (!(dssdev->driver->get_timings ||
+                                       dssdev->driver->read_edid)) {
+                       dev_warn(dev->dev, "%s driver does not support "
+                               "get_timings or read_edid.. skipping it!\n",
+                               dssdev->name);
+                       return 0;
+               }
+               encoder = omap_encoder_init(dev, dssdev);
+               if (!encoder) {
+                       dev_err(dev->dev, "could not create encoder: %s\n",
+                                       dssdev->name);
+                       return -ENOMEM;
+               }
+               connector = omap_connector_init(dev,
+                               get_connector_type(dssdev), dssdev, encoder);
+               if (!connector) {
+                       dev_err(dev->dev, "could not create connector: %s\n",
+                                       dssdev->name);
+                       return -ENOMEM;
+               }
+               BUG_ON(priv->num_encoders >= ARRAY_SIZE(priv->encoders));
+               BUG_ON(priv->num_connectors >= ARRAY_SIZE(priv->connectors));
+               priv->encoders[priv->num_encoders++] = encoder;
+               priv->connectors[priv->num_connectors++] = connector;
+               drm_mode_connector_attach_encoder(connector, encoder);
+               /* figure out which crtc's we can connect the encoder to: */
+               encoder->possible_crtcs = 0;
+               for (id = 0; id < priv->num_crtcs; id++) {
+                       enum omap_dss_output_id supported_outputs =
+                                       dss_feat_get_supported_outputs(pipe2chan(id));
+                       if (supported_outputs & dssdev->output->id)
+                               encoder->possible_crtcs |= (1 << id);
+               }
+       }
+       dev->mode_config.min_width = 32;
+       dev->mode_config.min_height = 32;
+       /* note: eventually will need some cpu_is_omapXYZ() type stuff here
+        * to fill in these limits properly on different OMAP generations..
+        */
+       dev->mode_config.max_width = 2048;
+       dev->mode_config.max_height = 2048;
+       dev->mode_config.funcs = &omap_mode_config_funcs;
+       return 0;
+ }
+ static void omap_modeset_free(struct drm_device *dev)
+ {
+       drm_mode_config_cleanup(dev);
+ }
+ /*
+  * drm ioctl funcs
+  */
+ static int ioctl_get_param(struct drm_device *dev, void *data,
+               struct drm_file *file_priv)
+ {
+       struct omap_drm_private *priv = dev->dev_private;
+       struct drm_omap_param *args = data;
+       DBG("%p: param=%llu", dev, args->param);
+       switch (args->param) {
+       case OMAP_PARAM_CHIPSET_ID:
+               args->value = priv->omaprev;
+               break;
+       default:
+               DBG("unknown parameter %lld", args->param);
+               return -EINVAL;
+       }
+       return 0;
+ }
+ static int ioctl_set_param(struct drm_device *dev, void *data,
+               struct drm_file *file_priv)
+ {
+       struct drm_omap_param *args = data;
+       switch (args->param) {
+       default:
+               DBG("unknown parameter %lld", args->param);
+               return -EINVAL;
+       }
+       return 0;
+ }
+ static int ioctl_gem_new(struct drm_device *dev, void *data,
+               struct drm_file *file_priv)
+ {
+       struct drm_omap_gem_new *args = data;
+       VERB("%p:%p: size=0x%08x, flags=%08x", dev, file_priv,
+                       args->size.bytes, args->flags);
+       return omap_gem_new_handle(dev, file_priv, args->size,
+                       args->flags, &args->handle);
+ }
+ static int ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
+               struct drm_file *file_priv)
+ {
+       struct drm_omap_gem_cpu_prep *args = data;
+       struct drm_gem_object *obj;
+       int ret;
+       VERB("%p:%p: handle=%d, op=%x", dev, file_priv, args->handle, args->op);
+       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+       if (!obj)
+               return -ENOENT;
+       ret = omap_gem_op_sync(obj, args->op);
+       if (!ret)
+               ret = omap_gem_op_start(obj, args->op);
+       drm_gem_object_unreference_unlocked(obj);
+       return ret;
+ }
+ static int ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
+               struct drm_file *file_priv)
+ {
+       struct drm_omap_gem_cpu_fini *args = data;
+       struct drm_gem_object *obj;
+       int ret;
+       VERB("%p:%p: handle=%d", dev, file_priv, args->handle);
+       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+       if (!obj)
+               return -ENOENT;
+       /* XXX flushy, flushy */
+       ret = 0;
+       if (!ret)
+               ret = omap_gem_op_finish(obj, args->op);
+       drm_gem_object_unreference_unlocked(obj);
+       return ret;
+ }
+ static int ioctl_gem_info(struct drm_device *dev, void *data,
+               struct drm_file *file_priv)
+ {
+       struct drm_omap_gem_info *args = data;
+       struct drm_gem_object *obj;
+       int ret = 0;
+       VERB("%p:%p: handle=%d", dev, file_priv, args->handle);
+       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+       if (!obj)
+               return -ENOENT;
+       args->size = omap_gem_mmap_size(obj);
+       args->offset = omap_gem_mmap_offset(obj);
+       drm_gem_object_unreference_unlocked(obj);
+       return ret;
+ }
+ struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {
+       DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param, DRM_UNLOCKED|DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, ioctl_set_param, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_PREP, ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_FINI, ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(OMAP_GEM_INFO, ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH),
+ };
+ /*
+  * drm driver funcs
+  */
+ /**
+  * load - setup chip and create an initial config
+  * @dev: DRM device
+  * @flags: startup flags
+  *
+  * The driver load routine has to do several things:
+  *   - initialize the memory manager
+  *   - allocate initial config memory
+  *   - setup the DRM framebuffer with the allocated memory
+  */
+ static int dev_load(struct drm_device *dev, unsigned long flags)
+ {
+       struct omap_drm_platform_data *pdata = dev->dev->platform_data;
+       struct omap_drm_private *priv;
+       int ret;
+       DBG("load: dev=%p", dev);
+       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 -      }
++      if (!priv)
+               return -ENOMEM;
+       priv->omaprev = pdata->omaprev;
+       dev->dev_private = priv;
+       priv->wq = alloc_ordered_workqueue("omapdrm", 0);
+       INIT_LIST_HEAD(&priv->obj_list);
+       omap_gem_init(dev);
+       ret = omap_modeset_init(dev);
+       if (ret) {
+               dev_err(dev->dev, "omap_modeset_init failed: ret=%d\n", ret);
+               dev->dev_private = NULL;
+               kfree(priv);
+               return ret;
+       }
+       ret = drm_vblank_init(dev, priv->num_crtcs);
+       if (ret)
+               dev_warn(dev->dev, "could not init vblank\n");
+       priv->fbdev = omap_fbdev_init(dev);
+       if (!priv->fbdev) {
+               dev_warn(dev->dev, "omap_fbdev_init failed\n");
+               /* well, limp along without an fbdev.. maybe X11 will work? */
+       }
+       /* store off drm_device for use in pm ops */
+       dev_set_drvdata(dev->dev, dev);
+       drm_kms_helper_poll_init(dev);
+       return 0;
+ }
+ static int dev_unload(struct drm_device *dev)
+ {
+       struct omap_drm_private *priv = dev->dev_private;
+       DBG("unload: dev=%p", dev);
+       drm_kms_helper_poll_fini(dev);
+       drm_vblank_cleanup(dev);
+       omap_drm_irq_uninstall(dev);
+       omap_fbdev_free(dev);
+       omap_modeset_free(dev);
+       omap_gem_deinit(dev);
+       flush_workqueue(priv->wq);
+       destroy_workqueue(priv->wq);
+       kfree(dev->dev_private);
+       dev->dev_private = NULL;
+       dev_set_drvdata(dev->dev, NULL);
+       return 0;
+ }
+ static int dev_open(struct drm_device *dev, struct drm_file *file)
+ {
+       file->driver_priv = NULL;
+       DBG("open: dev=%p, file=%p", dev, file);
+       return 0;
+ }
+ static int dev_firstopen(struct drm_device *dev)
+ {
+       DBG("firstopen: dev=%p", dev);
+       return 0;
+ }
+ /**
+  * lastclose - clean up after all DRM clients have exited
+  * @dev: DRM device
+  *
+  * Take care of cleaning up after all DRM clients have exited.  In the
+  * mode setting case, we want to restore the kernel's initial mode (just
+  * in case the last client left us in a bad state).
+  */
+ static void dev_lastclose(struct drm_device *dev)
+ {
+       int i;
+       /* we don't support vga-switcheroo.. so just make sure the fbdev
+        * mode is active
+        */
+       struct omap_drm_private *priv = dev->dev_private;
+       int ret;
+       DBG("lastclose: dev=%p", dev);
+       if (priv->rotation_prop) {
+               /* need to restore default rotation state.. not sure
+                * if there is a cleaner way to restore properties to
+                * default state?  Maybe a flag that properties should
+                * automatically be restored to default state on
+                * lastclose?
+                */
+               for (i = 0; i < priv->num_crtcs; i++) {
+                       drm_object_property_set_value(&priv->crtcs[i]->base,
+                                       priv->rotation_prop, 0);
+               }
+               for (i = 0; i < priv->num_planes; i++) {
+                       drm_object_property_set_value(&priv->planes[i]->base,
+                                       priv->rotation_prop, 0);
+               }
+       }
+       drm_modeset_lock_all(dev);
+       ret = drm_fb_helper_restore_fbdev_mode(priv->fbdev);
+       drm_modeset_unlock_all(dev);
+       if (ret)
+               DBG("failed to restore crtc mode");
+ }
+ static void dev_preclose(struct drm_device *dev, struct drm_file *file)
+ {
+       DBG("preclose: dev=%p", dev);
+ }
+ static void dev_postclose(struct drm_device *dev, struct drm_file *file)
+ {
+       DBG("postclose: dev=%p, file=%p", dev, file);
+ }
+ static const struct vm_operations_struct omap_gem_vm_ops = {
+       .fault = omap_gem_fault,
+       .open = drm_gem_vm_open,
+       .close = drm_gem_vm_close,
+ };
+ static const struct file_operations omapdriver_fops = {
+               .owner = THIS_MODULE,
+               .open = drm_open,
+               .unlocked_ioctl = drm_ioctl,
+               .release = drm_release,
+               .mmap = omap_gem_mmap,
+               .poll = drm_poll,
+               .fasync = drm_fasync,
+               .read = drm_read,
+               .llseek = noop_llseek,
+ };
+ static struct drm_driver omap_drm_driver = {
+               .driver_features =
+                               DRIVER_HAVE_IRQ | DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
+               .load = dev_load,
+               .unload = dev_unload,
+               .open = dev_open,
+               .firstopen = dev_firstopen,
+               .lastclose = dev_lastclose,
+               .preclose = dev_preclose,
+               .postclose = dev_postclose,
+               .get_vblank_counter = drm_vblank_count,
+               .enable_vblank = omap_irq_enable_vblank,
+               .disable_vblank = omap_irq_disable_vblank,
+               .irq_preinstall = omap_irq_preinstall,
+               .irq_postinstall = omap_irq_postinstall,
+               .irq_uninstall = omap_irq_uninstall,
+               .irq_handler = omap_irq_handler,
+ #ifdef CONFIG_DEBUG_FS
+               .debugfs_init = omap_debugfs_init,
+               .debugfs_cleanup = omap_debugfs_cleanup,
+ #endif
+               .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+               .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+               .gem_prime_export = omap_gem_prime_export,
+               .gem_prime_import = omap_gem_prime_import,
+               .gem_init_object = omap_gem_init_object,
+               .gem_free_object = omap_gem_free_object,
+               .gem_vm_ops = &omap_gem_vm_ops,
+               .dumb_create = omap_gem_dumb_create,
+               .dumb_map_offset = omap_gem_dumb_map_offset,
+               .dumb_destroy = omap_gem_dumb_destroy,
+               .ioctls = ioctls,
+               .num_ioctls = DRM_OMAP_NUM_IOCTLS,
+               .fops = &omapdriver_fops,
+               .name = DRIVER_NAME,
+               .desc = DRIVER_DESC,
+               .date = DRIVER_DATE,
+               .major = DRIVER_MAJOR,
+               .minor = DRIVER_MINOR,
+               .patchlevel = DRIVER_PATCHLEVEL,
+ };
+ static int pdev_suspend(struct platform_device *pDevice, pm_message_t state)
+ {
+       DBG("");
+       return 0;
+ }
+ static int pdev_resume(struct platform_device *device)
+ {
+       DBG("");
+       return 0;
+ }
+ static void pdev_shutdown(struct platform_device *device)
+ {
+       DBG("");
+ }
+ static int pdev_probe(struct platform_device *device)
+ {
+       DBG("%s", device->name);
+       return drm_platform_init(&omap_drm_driver, device);
+ }
+ static int pdev_remove(struct platform_device *device)
+ {
+       DBG("");
+       drm_platform_exit(&omap_drm_driver, device);
+       platform_driver_unregister(&omap_dmm_driver);
+       return 0;
+ }
+ #ifdef CONFIG_PM
+ static const struct dev_pm_ops omapdrm_pm_ops = {
+       .resume = omap_gem_resume,
+ };
+ #endif
+ struct platform_driver pdev = {
+               .driver = {
+                       .name = DRIVER_NAME,
+                       .owner = THIS_MODULE,
+ #ifdef CONFIG_PM
+                       .pm = &omapdrm_pm_ops,
+ #endif
+               },
+               .probe = pdev_probe,
+               .remove = pdev_remove,
+               .suspend = pdev_suspend,
+               .resume = pdev_resume,
+               .shutdown = pdev_shutdown,
+ };
+ static int __init omap_drm_init(void)
+ {
+       DBG("init");
+       if (platform_driver_register(&omap_dmm_driver)) {
+               /* we can continue on without DMM.. so not fatal */
+               dev_err(NULL, "DMM registration failed\n");
+       }
+       return platform_driver_register(&pdev);
+ }
+ static void __exit omap_drm_fini(void)
+ {
+       DBG("fini");
+       platform_driver_unregister(&pdev);
+ }
+ /* need late_initcall() so we load after dss_driver's are loaded */
+ late_initcall(omap_drm_init);
+ module_exit(omap_drm_fini);
+ MODULE_AUTHOR("Rob Clark <rob@ti.com>");
+ MODULE_DESCRIPTION("OMAP DRM Display Driver");
+ MODULE_ALIAS("platform:" DRIVER_NAME);
+ MODULE_LICENSE("GPL v2");
index 0000000000000000000000000000000000000000,7e1f2ab653729edc653438d43eca1619e8c49011..21d126d0317ebc31ad5b28938ac1ef0452e6bbe1
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,170 +1,168 @@@
 -      if (!omap_encoder) {
 -              dev_err(dev->dev, "could not allocate encoder\n");
+ /*
+  * drivers/gpu/drm/omapdrm/omap_encoder.c
+  *
+  * Copyright (C) 2011 Texas Instruments
+  * Author: Rob Clark <rob@ti.com>
+  *
+  * This program is free software; you can redistribute it and/or modify it
+  * under the terms of the GNU General Public License version 2 as published by
+  * the Free Software Foundation.
+  *
+  * This program is distributed in the hope that it will be useful, but WITHOUT
+  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  * more details.
+  *
+  * You should have received a copy of the GNU General Public License along with
+  * this program.  If not, see <http://www.gnu.org/licenses/>.
+  */
+ #include "omap_drv.h"
+ #include "drm_crtc.h"
+ #include "drm_crtc_helper.h"
+ #include <linux/list.h>
+ /*
+  * encoder funcs
+  */
+ #define to_omap_encoder(x) container_of(x, struct omap_encoder, base)
+ /* The encoder and connector both map to same dssdev.. the encoder
+  * handles the 'active' parts, ie. anything the modifies the state
+  * of the hw, and the connector handles the 'read-only' parts, like
+  * detecting connection and reading edid.
+  */
+ struct omap_encoder {
+       struct drm_encoder base;
+       struct omap_dss_device *dssdev;
+ };
+ static void omap_encoder_destroy(struct drm_encoder *encoder)
+ {
+       struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+       drm_encoder_cleanup(encoder);
+       kfree(omap_encoder);
+ }
+ static const struct drm_encoder_funcs omap_encoder_funcs = {
+       .destroy = omap_encoder_destroy,
+ };
+ /*
+  * The CRTC drm_crtc_helper_set_mode() doesn't really give us the right
+  * order.. the easiest way to work around this for now is to make all
+  * the encoder-helper's no-op's and have the omap_crtc code take care
+  * of the sequencing and call us in the right points.
+  *
+  * Eventually to handle connecting CRTCs to different encoders properly,
+  * either the CRTC helpers need to change or we need to replace
+  * drm_crtc_helper_set_mode(), but lets wait until atomic-modeset for
+  * that.
+  */
+ static void omap_encoder_dpms(struct drm_encoder *encoder, int mode)
+ {
+ }
+ static bool omap_encoder_mode_fixup(struct drm_encoder *encoder,
+                                 const struct drm_display_mode *mode,
+                                 struct drm_display_mode *adjusted_mode)
+ {
+       return true;
+ }
+ static void omap_encoder_mode_set(struct drm_encoder *encoder,
+                               struct drm_display_mode *mode,
+                               struct drm_display_mode *adjusted_mode)
+ {
+ }
+ static void omap_encoder_prepare(struct drm_encoder *encoder)
+ {
+ }
+ static void omap_encoder_commit(struct drm_encoder *encoder)
+ {
+ }
+ static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = {
+       .dpms = omap_encoder_dpms,
+       .mode_fixup = omap_encoder_mode_fixup,
+       .mode_set = omap_encoder_mode_set,
+       .prepare = omap_encoder_prepare,
+       .commit = omap_encoder_commit,
+ };
+ /*
+  * Instead of relying on the helpers for modeset, the omap_crtc code
+  * calls these functions in the proper sequence.
+  */
+ int omap_encoder_set_enabled(struct drm_encoder *encoder, bool enabled)
+ {
+       struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+       struct omap_dss_device *dssdev = omap_encoder->dssdev;
+       struct omap_dss_driver *dssdrv = dssdev->driver;
+       if (enabled) {
+               return dssdrv->enable(dssdev);
+       } else {
+               dssdrv->disable(dssdev);
+               return 0;
+       }
+ }
+ int omap_encoder_update(struct drm_encoder *encoder,
+               struct omap_overlay_manager *mgr,
+               struct omap_video_timings *timings)
+ {
+       struct drm_device *dev = encoder->dev;
+       struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+       struct omap_dss_device *dssdev = omap_encoder->dssdev;
+       struct omap_dss_driver *dssdrv = dssdev->driver;
+       int ret;
+       dssdev->output->manager = mgr;
+       ret = dssdrv->check_timings(dssdev, timings);
+       if (ret) {
+               dev_err(dev->dev, "could not set timings: %d\n", ret);
+               return ret;
+       }
+       dssdrv->set_timings(dssdev, timings);
+       return 0;
+ }
+ /* initialize encoder */
+ struct drm_encoder *omap_encoder_init(struct drm_device *dev,
+               struct omap_dss_device *dssdev)
+ {
+       struct drm_encoder *encoder = NULL;
+       struct omap_encoder *omap_encoder;
+       omap_encoder = kzalloc(sizeof(*omap_encoder), GFP_KERNEL);
 -      }
++      if (!omap_encoder)
+               goto fail;
+       omap_encoder->dssdev = dssdev;
+       encoder = &omap_encoder->base;
+       drm_encoder_init(dev, encoder, &omap_encoder_funcs,
+                        DRM_MODE_ENCODER_TMDS);
+       drm_encoder_helper_add(encoder, &omap_encoder_helper_funcs);
+       return encoder;
+ fail:
+       if (encoder)
+               omap_encoder_destroy(encoder);
+       return NULL;
+ }
index 0000000000000000000000000000000000000000,9d5f6f696c7275457abf6a34ddc1ec12b76fe04e..8031402e79516ac984ae24c4ca6ec6e74e2dc663
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,472 +1,471 @@@
 -              dev_err(dev->dev, "could not allocate fb\n");
+ /*
+  * drivers/gpu/drm/omapdrm/omap_fb.c
+  *
+  * Copyright (C) 2011 Texas Instruments
+  * Author: Rob Clark <rob@ti.com>
+  *
+  * This program is free software; you can redistribute it and/or modify it
+  * under the terms of the GNU General Public License version 2 as published by
+  * the Free Software Foundation.
+  *
+  * This program is distributed in the hope that it will be useful, but WITHOUT
+  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  * more details.
+  *
+  * You should have received a copy of the GNU General Public License along with
+  * this program.  If not, see <http://www.gnu.org/licenses/>.
+  */
+ #include "omap_drv.h"
+ #include "omap_dmm_tiler.h"
+ #include "drm_crtc.h"
+ #include "drm_crtc_helper.h"
+ /*
+  * framebuffer funcs
+  */
+ /* per-format info: */
+ struct format {
+       enum omap_color_mode dss_format;
+       uint32_t pixel_format;
+       struct {
+               int stride_bpp;           /* this times width is stride */
+               int sub_y;                /* sub-sample in y dimension */
+       } planes[4];
+       bool yuv;
+ };
+ static const struct format formats[] = {
+       /* 16bpp [A]RGB: */
+       { OMAP_DSS_COLOR_RGB16,       DRM_FORMAT_RGB565,   {{2, 1}}, false }, /* RGB16-565 */
+       { OMAP_DSS_COLOR_RGB12U,      DRM_FORMAT_RGBX4444, {{2, 1}}, false }, /* RGB12x-4444 */
+       { OMAP_DSS_COLOR_RGBX16,      DRM_FORMAT_XRGB4444, {{2, 1}}, false }, /* xRGB12-4444 */
+       { OMAP_DSS_COLOR_RGBA16,      DRM_FORMAT_RGBA4444, {{2, 1}}, false }, /* RGBA12-4444 */
+       { OMAP_DSS_COLOR_ARGB16,      DRM_FORMAT_ARGB4444, {{2, 1}}, false }, /* ARGB16-4444 */
+       { OMAP_DSS_COLOR_XRGB16_1555, DRM_FORMAT_XRGB1555, {{2, 1}}, false }, /* xRGB15-1555 */
+       { OMAP_DSS_COLOR_ARGB16_1555, DRM_FORMAT_ARGB1555, {{2, 1}}, false }, /* ARGB16-1555 */
+       /* 24bpp RGB: */
+       { OMAP_DSS_COLOR_RGB24P,      DRM_FORMAT_RGB888,   {{3, 1}}, false }, /* RGB24-888 */
+       /* 32bpp [A]RGB: */
+       { OMAP_DSS_COLOR_RGBX32,      DRM_FORMAT_RGBX8888, {{4, 1}}, false }, /* RGBx24-8888 */
+       { OMAP_DSS_COLOR_RGB24U,      DRM_FORMAT_XRGB8888, {{4, 1}}, false }, /* xRGB24-8888 */
+       { OMAP_DSS_COLOR_RGBA32,      DRM_FORMAT_RGBA8888, {{4, 1}}, false }, /* RGBA32-8888 */
+       { OMAP_DSS_COLOR_ARGB32,      DRM_FORMAT_ARGB8888, {{4, 1}}, false }, /* ARGB32-8888 */
+       /* YUV: */
+       { OMAP_DSS_COLOR_NV12,        DRM_FORMAT_NV12,     {{1, 1}, {1, 2}}, true },
+       { OMAP_DSS_COLOR_YUV2,        DRM_FORMAT_YUYV,     {{2, 1}}, true },
+       { OMAP_DSS_COLOR_UYVY,        DRM_FORMAT_UYVY,     {{2, 1}}, true },
+ };
+ /* convert from overlay's pixel formats bitmask to an array of fourcc's */
+ uint32_t omap_framebuffer_get_formats(uint32_t *pixel_formats,
+               uint32_t max_formats, enum omap_color_mode supported_modes)
+ {
+       uint32_t nformats = 0;
+       int i = 0;
+       for (i = 0; i < ARRAY_SIZE(formats) && nformats < max_formats; i++)
+               if (formats[i].dss_format & supported_modes)
+                       pixel_formats[nformats++] = formats[i].pixel_format;
+       return nformats;
+ }
+ /* per-plane info for the fb: */
+ struct plane {
+       struct drm_gem_object *bo;
+       uint32_t pitch;
+       uint32_t offset;
+       dma_addr_t paddr;
+ };
+ #define to_omap_framebuffer(x) container_of(x, struct omap_framebuffer, base)
+ struct omap_framebuffer {
+       struct drm_framebuffer base;
+       const struct format *format;
+       struct plane planes[4];
+ };
+ static int omap_framebuffer_create_handle(struct drm_framebuffer *fb,
+               struct drm_file *file_priv,
+               unsigned int *handle)
+ {
+       struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+       return drm_gem_handle_create(file_priv,
+                       omap_fb->planes[0].bo, handle);
+ }
+ static void omap_framebuffer_destroy(struct drm_framebuffer *fb)
+ {
+       struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+       int i, n = drm_format_num_planes(fb->pixel_format);
+       DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
+       drm_framebuffer_cleanup(fb);
+       for (i = 0; i < n; i++) {
+               struct plane *plane = &omap_fb->planes[i];
+               if (plane->bo)
+                       drm_gem_object_unreference_unlocked(plane->bo);
+       }
+       kfree(omap_fb);
+ }
+ static int omap_framebuffer_dirty(struct drm_framebuffer *fb,
+               struct drm_file *file_priv, unsigned flags, unsigned color,
+               struct drm_clip_rect *clips, unsigned num_clips)
+ {
+       int i;
+       for (i = 0; i < num_clips; i++) {
+               omap_framebuffer_flush(fb, clips[i].x1, clips[i].y1,
+                                       clips[i].x2 - clips[i].x1,
+                                       clips[i].y2 - clips[i].y1);
+       }
+       return 0;
+ }
+ static const struct drm_framebuffer_funcs omap_framebuffer_funcs = {
+       .create_handle = omap_framebuffer_create_handle,
+       .destroy = omap_framebuffer_destroy,
+       .dirty = omap_framebuffer_dirty,
+ };
+ static uint32_t get_linear_addr(struct plane *plane,
+               const struct format *format, int n, int x, int y)
+ {
+       uint32_t offset;
+       offset = plane->offset +
+                       (x * format->planes[n].stride_bpp) +
+                       (y * plane->pitch / format->planes[n].sub_y);
+       return plane->paddr + offset;
+ }
+ /* update ovl info for scanout, handles cases of multi-planar fb's, etc.
+  */
+ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
+               struct omap_drm_window *win, struct omap_overlay_info *info)
+ {
+       struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+       const struct format *format = omap_fb->format;
+       struct plane *plane = &omap_fb->planes[0];
+       uint32_t x, y, orient = 0;
+       info->color_mode = format->dss_format;
+       info->pos_x      = win->crtc_x;
+       info->pos_y      = win->crtc_y;
+       info->out_width  = win->crtc_w;
+       info->out_height = win->crtc_h;
+       info->width      = win->src_w;
+       info->height     = win->src_h;
+       x = win->src_x;
+       y = win->src_y;
+       if (omap_gem_flags(plane->bo) & OMAP_BO_TILED) {
+               uint32_t w = win->src_w;
+               uint32_t h = win->src_h;
+               switch (win->rotation & 0xf) {
+               default:
+                       dev_err(fb->dev->dev, "invalid rotation: %02x",
+                                       (uint32_t)win->rotation);
+                       /* fallthru to default to no rotation */
+               case 0:
+               case BIT(DRM_ROTATE_0):
+                       orient = 0;
+                       break;
+               case BIT(DRM_ROTATE_90):
+                       orient = MASK_XY_FLIP | MASK_X_INVERT;
+                       break;
+               case BIT(DRM_ROTATE_180):
+                       orient = MASK_X_INVERT | MASK_Y_INVERT;
+                       break;
+               case BIT(DRM_ROTATE_270):
+                       orient = MASK_XY_FLIP | MASK_Y_INVERT;
+                       break;
+               }
+               if (win->rotation & BIT(DRM_REFLECT_X))
+                       orient ^= MASK_X_INVERT;
+               if (win->rotation & BIT(DRM_REFLECT_Y))
+                       orient ^= MASK_Y_INVERT;
+               /* adjust x,y offset for flip/invert: */
+               if (orient & MASK_XY_FLIP)
+                       swap(w, h);
+               if (orient & MASK_Y_INVERT)
+                       y += h - 1;
+               if (orient & MASK_X_INVERT)
+                       x += w - 1;
+               omap_gem_rotated_paddr(plane->bo, orient, x, y, &info->paddr);
+               info->rotation_type = OMAP_DSS_ROT_TILER;
+               info->screen_width  = omap_gem_tiled_stride(plane->bo, orient);
+       } else {
+               info->paddr         = get_linear_addr(plane, format, 0, x, y);
+               info->rotation_type = OMAP_DSS_ROT_DMA;
+               info->screen_width  = plane->pitch;
+       }
+       /* convert to pixels: */
+       info->screen_width /= format->planes[0].stride_bpp;
+       if (format->dss_format == OMAP_DSS_COLOR_NV12) {
+               plane = &omap_fb->planes[1];
+               if (info->rotation_type == OMAP_DSS_ROT_TILER) {
+                       WARN_ON(!(omap_gem_flags(plane->bo) & OMAP_BO_TILED));
+                       omap_gem_rotated_paddr(plane->bo, orient,
+                                       x/2, y/2, &info->p_uv_addr);
+               } else {
+                       info->p_uv_addr = get_linear_addr(plane, format, 1, x, y);
+               }
+       } else {
+               info->p_uv_addr = 0;
+       }
+ }
+ /* Call for unpin 'a' (if not NULL), and pin 'b' (if not NULL).  Although
+  * buffers to unpin are just pushed to the unpin fifo so that the
+  * caller can defer unpin until vblank.
+  *
+  * Note if this fails (ie. something went very wrong!), all buffers are
+  * unpinned, and the caller disables the overlay.  We could have tried
+  * to revert back to the previous set of pinned buffers but if things are
+  * hosed there is no guarantee that would succeed.
+  */
+ int omap_framebuffer_replace(struct drm_framebuffer *a,
+               struct drm_framebuffer *b, void *arg,
+               void (*unpin)(void *arg, struct drm_gem_object *bo))
+ {
+       int ret = 0, i, na, nb;
+       struct omap_framebuffer *ofba = to_omap_framebuffer(a);
+       struct omap_framebuffer *ofbb = to_omap_framebuffer(b);
+       uint32_t pinned_mask = 0;
+       na = a ? drm_format_num_planes(a->pixel_format) : 0;
+       nb = b ? drm_format_num_planes(b->pixel_format) : 0;
+       for (i = 0; i < max(na, nb); i++) {
+               struct plane *pa, *pb;
+               pa = (i < na) ? &ofba->planes[i] : NULL;
+               pb = (i < nb) ? &ofbb->planes[i] : NULL;
+               if (pa)
+                       unpin(arg, pa->bo);
+               if (pb && !ret) {
+                       ret = omap_gem_get_paddr(pb->bo, &pb->paddr, true);
+                       if (!ret) {
+                               omap_gem_dma_sync(pb->bo, DMA_TO_DEVICE);
+                               pinned_mask |= (1 << i);
+                       }
+               }
+       }
+       if (ret) {
+               /* something went wrong.. unpin what has been pinned */
+               for (i = 0; i < nb; i++) {
+                       if (pinned_mask & (1 << i)) {
+                               struct plane *pb = &ofba->planes[i];
+                               unpin(arg, pb->bo);
+                       }
+               }
+       }
+       return ret;
+ }
+ struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p)
+ {
+       struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+       if (p >= drm_format_num_planes(fb->pixel_format))
+               return NULL;
+       return omap_fb->planes[p].bo;
+ }
+ /* iterate thru all the connectors, returning ones that are attached
+  * to the same fb..
+  */
+ struct drm_connector *omap_framebuffer_get_next_connector(
+               struct drm_framebuffer *fb, struct drm_connector *from)
+ {
+       struct drm_device *dev = fb->dev;
+       struct list_head *connector_list = &dev->mode_config.connector_list;
+       struct drm_connector *connector = from;
+       if (!from)
+               return list_first_entry(connector_list, typeof(*from), head);
+       list_for_each_entry_from(connector, connector_list, head) {
+               if (connector != from) {
+                       struct drm_encoder *encoder = connector->encoder;
+                       struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
+                       if (crtc && crtc->fb == fb)
+                               return connector;
+               }
+       }
+       return NULL;
+ }
+ /* flush an area of the framebuffer (in case of manual update display that
+  * is not automatically flushed)
+  */
+ void omap_framebuffer_flush(struct drm_framebuffer *fb,
+               int x, int y, int w, int h)
+ {
+       struct drm_connector *connector = NULL;
+       VERB("flush: %d,%d %dx%d, fb=%p", x, y, w, h, fb);
+       while ((connector = omap_framebuffer_get_next_connector(fb, connector))) {
+               /* only consider connectors that are part of a chain */
+               if (connector->encoder && connector->encoder->crtc) {
+                       /* TODO: maybe this should propagate thru the crtc who
+                        * could do the coordinate translation..
+                        */
+                       struct drm_crtc *crtc = connector->encoder->crtc;
+                       int cx = max(0, x - crtc->x);
+                       int cy = max(0, y - crtc->y);
+                       int cw = w + (x - crtc->x) - cx;
+                       int ch = h + (y - crtc->y) - cy;
+                       omap_connector_flush(connector, cx, cy, cw, ch);
+               }
+       }
+ }
+ #ifdef CONFIG_DEBUG_FS
+ void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
+ {
+       struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+       int i, n = drm_format_num_planes(fb->pixel_format);
+       seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
+                       (char *)&fb->pixel_format);
+       for (i = 0; i < n; i++) {
+               struct plane *plane = &omap_fb->planes[i];
+               seq_printf(m, "   %d: offset=%d pitch=%d, obj: ",
+                               i, plane->offset, plane->pitch);
+               omap_gem_describe(plane->bo, m);
+       }
+ }
+ #endif
+ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
+               struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd)
+ {
+       struct drm_gem_object *bos[4];
+       struct drm_framebuffer *fb;
+       int ret;
+       ret = objects_lookup(dev, file, mode_cmd->pixel_format,
+                       bos, mode_cmd->handles);
+       if (ret)
+               return ERR_PTR(ret);
+       fb = omap_framebuffer_init(dev, mode_cmd, bos);
+       if (IS_ERR(fb)) {
+               int i, n = drm_format_num_planes(mode_cmd->pixel_format);
+               for (i = 0; i < n; i++)
+                       drm_gem_object_unreference_unlocked(bos[i]);
+               return fb;
+       }
+       return fb;
+ }
+ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
+               struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
+ {
+       struct omap_framebuffer *omap_fb;
+       struct drm_framebuffer *fb = NULL;
+       const struct format *format = NULL;
+       int ret, i, n = drm_format_num_planes(mode_cmd->pixel_format);
+       DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)",
+                       dev, mode_cmd, mode_cmd->width, mode_cmd->height,
+                       (char *)&mode_cmd->pixel_format);
+       for (i = 0; i < ARRAY_SIZE(formats); i++) {
+               if (formats[i].pixel_format == mode_cmd->pixel_format) {
+                       format = &formats[i];
+                       break;
+               }
+       }
+       if (!format) {
+               dev_err(dev->dev, "unsupported pixel format: %4.4s\n",
+                               (char *)&mode_cmd->pixel_format);
+               ret = -EINVAL;
+               goto fail;
+       }
+       omap_fb = kzalloc(sizeof(*omap_fb), GFP_KERNEL);
+       if (!omap_fb) {
+               ret = -ENOMEM;
+               goto fail;
+       }
+       fb = &omap_fb->base;
+       omap_fb->format = format;
+       for (i = 0; i < n; i++) {
+               struct plane *plane = &omap_fb->planes[i];
+               int size, pitch = mode_cmd->pitches[i];
+               if (pitch < (mode_cmd->width * format->planes[i].stride_bpp)) {
+                       dev_err(dev->dev, "provided buffer pitch is too small! %d < %d\n",
+                                       pitch, mode_cmd->width * format->planes[i].stride_bpp);
+                       ret = -EINVAL;
+                       goto fail;
+               }
+               size = pitch * mode_cmd->height / format->planes[i].sub_y;
+               if (size > (omap_gem_mmap_size(bos[i]) - mode_cmd->offsets[i])) {
+                       dev_err(dev->dev, "provided buffer object is too small! %d < %d\n",
+                                       bos[i]->size - mode_cmd->offsets[i], size);
+                       ret = -EINVAL;
+                       goto fail;
+               }
+               plane->bo     = bos[i];
+               plane->offset = mode_cmd->offsets[i];
+               plane->pitch  = pitch;
+               plane->paddr  = 0;
+       }
+       drm_helper_mode_fill_fb_struct(fb, mode_cmd);
+       ret = drm_framebuffer_init(dev, fb, &omap_framebuffer_funcs);
+       if (ret) {
+               dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
+               goto fail;
+       }
+       DBG("create: FB ID: %d (%p)", fb->base.id, fb);
+       return fb;
+ fail:
+       if (fb)
+               omap_framebuffer_destroy(fb);
+       return ERR_PTR(ret);
+ }
index 0000000000000000000000000000000000000000,f0033bd3e4ae3b46249e9ee171326736481a24cf..b11ce609fcc218894471b9da564df2943b2cfa06
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,399 +1,397 @@@
 -      if (!fbdev) {
 -              dev_err(dev->dev, "could not allocate fbdev\n");
+ /*
+  * drivers/gpu/drm/omapdrm/omap_fbdev.c
+  *
+  * Copyright (C) 2011 Texas Instruments
+  * Author: Rob Clark <rob@ti.com>
+  *
+  * This program is free software; you can redistribute it and/or modify it
+  * under the terms of the GNU General Public License version 2 as published by
+  * the Free Software Foundation.
+  *
+  * This program is distributed in the hope that it will be useful, but WITHOUT
+  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  * more details.
+  *
+  * You should have received a copy of the GNU General Public License along with
+  * this program.  If not, see <http://www.gnu.org/licenses/>.
+  */
+ #include "omap_drv.h"
+ #include "drm_crtc.h"
+ #include "drm_fb_helper.h"
+ MODULE_PARM_DESC(ywrap, "Enable ywrap scrolling (omap44xx and later, default 'y')");
+ static bool ywrap_enabled = true;
+ module_param_named(ywrap, ywrap_enabled, bool, 0644);
+ /*
+  * fbdev funcs, to implement legacy fbdev interface on top of drm driver
+  */
+ #define to_omap_fbdev(x) container_of(x, struct omap_fbdev, base)
+ struct omap_fbdev {
+       struct drm_fb_helper base;
+       struct drm_framebuffer *fb;
+       struct drm_gem_object *bo;
+       bool ywrap_enabled;
+       /* for deferred dmm roll when getting called in atomic ctx */
+       struct work_struct work;
+ };
+ static void omap_fbdev_flush(struct fb_info *fbi, int x, int y, int w, int h);
+ static struct drm_fb_helper *get_fb(struct fb_info *fbi);
+ static ssize_t omap_fbdev_write(struct fb_info *fbi, const char __user *buf,
+               size_t count, loff_t *ppos)
+ {
+       ssize_t res;
+       res = fb_sys_write(fbi, buf, count, ppos);
+       omap_fbdev_flush(fbi, 0, 0, fbi->var.xres, fbi->var.yres);
+       return res;
+ }
+ static void omap_fbdev_fillrect(struct fb_info *fbi,
+               const struct fb_fillrect *rect)
+ {
+       sys_fillrect(fbi, rect);
+       omap_fbdev_flush(fbi, rect->dx, rect->dy, rect->width, rect->height);
+ }
+ static void omap_fbdev_copyarea(struct fb_info *fbi,
+               const struct fb_copyarea *area)
+ {
+       sys_copyarea(fbi, area);
+       omap_fbdev_flush(fbi, area->dx, area->dy, area->width, area->height);
+ }
+ static void omap_fbdev_imageblit(struct fb_info *fbi,
+               const struct fb_image *image)
+ {
+       sys_imageblit(fbi, image);
+       omap_fbdev_flush(fbi, image->dx, image->dy,
+                               image->width, image->height);
+ }
+ static void pan_worker(struct work_struct *work)
+ {
+       struct omap_fbdev *fbdev = container_of(work, struct omap_fbdev, work);
+       struct fb_info *fbi = fbdev->base.fbdev;
+       int npages;
+       /* DMM roll shifts in 4K pages: */
+       npages = fbi->fix.line_length >> PAGE_SHIFT;
+       omap_gem_roll(fbdev->bo, fbi->var.yoffset * npages);
+ }
+ static int omap_fbdev_pan_display(struct fb_var_screeninfo *var,
+               struct fb_info *fbi)
+ {
+       struct drm_fb_helper *helper = get_fb(fbi);
+       struct omap_fbdev *fbdev = to_omap_fbdev(helper);
+       if (!helper)
+               goto fallback;
+       if (!fbdev->ywrap_enabled)
+               goto fallback;
+       if (drm_can_sleep()) {
+               pan_worker(&fbdev->work);
+       } else {
+               struct omap_drm_private *priv = helper->dev->dev_private;
+               queue_work(priv->wq, &fbdev->work);
+       }
+       return 0;
+ fallback:
+       return drm_fb_helper_pan_display(var, fbi);
+ }
+ static struct fb_ops omap_fb_ops = {
+       .owner = THIS_MODULE,
+       /* Note: to properly handle manual update displays, we wrap the
+        * basic fbdev ops which write to the framebuffer
+        */
+       .fb_read = fb_sys_read,
+       .fb_write = omap_fbdev_write,
+       .fb_fillrect = omap_fbdev_fillrect,
+       .fb_copyarea = omap_fbdev_copyarea,
+       .fb_imageblit = omap_fbdev_imageblit,
+       .fb_check_var = drm_fb_helper_check_var,
+       .fb_set_par = drm_fb_helper_set_par,
+       .fb_pan_display = omap_fbdev_pan_display,
+       .fb_blank = drm_fb_helper_blank,
+       .fb_setcmap = drm_fb_helper_setcmap,
+ };
+ static int omap_fbdev_create(struct drm_fb_helper *helper,
+               struct drm_fb_helper_surface_size *sizes)
+ {
+       struct omap_fbdev *fbdev = to_omap_fbdev(helper);
+       struct drm_device *dev = helper->dev;
+       struct omap_drm_private *priv = dev->dev_private;
+       struct drm_framebuffer *fb = NULL;
+       union omap_gem_size gsize;
+       struct fb_info *fbi = NULL;
+       struct drm_mode_fb_cmd2 mode_cmd = {0};
+       dma_addr_t paddr;
+       int ret;
+       /* only doing ARGB32 since this is what is needed to alpha-blend
+        * with video overlays:
+        */
+       sizes->surface_bpp = 32;
+       sizes->surface_depth = 32;
+       DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
+                       sizes->surface_height, sizes->surface_bpp,
+                       sizes->fb_width, sizes->fb_height);
+       mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+                       sizes->surface_depth);
+       mode_cmd.width = sizes->surface_width;
+       mode_cmd.height = sizes->surface_height;
+       mode_cmd.pitches[0] = align_pitch(
+                       mode_cmd.width * ((sizes->surface_bpp + 7) / 8),
+                       mode_cmd.width, sizes->surface_bpp);
+       fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled;
+       if (fbdev->ywrap_enabled) {
+               /* need to align pitch to page size if using DMM scrolling */
+               mode_cmd.pitches[0] = ALIGN(mode_cmd.pitches[0], PAGE_SIZE);
+       }
+       /* allocate backing bo */
+       gsize = (union omap_gem_size){
+               .bytes = PAGE_ALIGN(mode_cmd.pitches[0] * mode_cmd.height),
+       };
+       DBG("allocating %d bytes for fb %d", gsize.bytes, dev->primary->index);
+       fbdev->bo = omap_gem_new(dev, gsize, OMAP_BO_SCANOUT | OMAP_BO_WC);
+       if (!fbdev->bo) {
+               dev_err(dev->dev, "failed to allocate buffer object\n");
+               ret = -ENOMEM;
+               goto fail;
+       }
+       fb = omap_framebuffer_init(dev, &mode_cmd, &fbdev->bo);
+       if (IS_ERR(fb)) {
+               dev_err(dev->dev, "failed to allocate fb\n");
+               /* note: if fb creation failed, we can't rely on fb destroy
+                * to unref the bo:
+                */
+               drm_gem_object_unreference(fbdev->bo);
+               ret = PTR_ERR(fb);
+               goto fail;
+       }
+       /* note: this keeps the bo pinned.. which is perhaps not ideal,
+        * but is needed as long as we use fb_mmap() to mmap to userspace
+        * (since this happens using fix.smem_start).  Possibly we could
+        * implement our own mmap using GEM mmap support to avoid this
+        * (non-tiled buffer doesn't need to be pinned for fbcon to write
+        * to it).  Then we just need to be sure that we are able to re-
+        * pin it in case of an opps.
+        */
+       ret = omap_gem_get_paddr(fbdev->bo, &paddr, true);
+       if (ret) {
+               dev_err(dev->dev,
+                       "could not map (paddr)!  Skipping framebuffer alloc\n");
+               ret = -ENOMEM;
+               goto fail;
+       }
+       mutex_lock(&dev->struct_mutex);
+       fbi = framebuffer_alloc(0, dev->dev);
+       if (!fbi) {
+               dev_err(dev->dev, "failed to allocate fb info\n");
+               ret = -ENOMEM;
+               goto fail_unlock;
+       }
+       DBG("fbi=%p, dev=%p", fbi, dev);
+       fbdev->fb = fb;
+       helper->fb = fb;
+       helper->fbdev = fbi;
+       fbi->par = helper;
+       fbi->flags = FBINFO_DEFAULT;
+       fbi->fbops = &omap_fb_ops;
+       strcpy(fbi->fix.id, MODULE_NAME);
+       ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+       if (ret) {
+               ret = -ENOMEM;
+               goto fail_unlock;
+       }
+       drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
+       drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
+       dev->mode_config.fb_base = paddr;
+       fbi->screen_base = omap_gem_vaddr(fbdev->bo);
+       fbi->screen_size = fbdev->bo->size;
+       fbi->fix.smem_start = paddr;
+       fbi->fix.smem_len = fbdev->bo->size;
+       /* if we have DMM, then we can use it for scrolling by just
+        * shuffling pages around in DMM rather than doing sw blit.
+        */
+       if (fbdev->ywrap_enabled) {
+               DRM_INFO("Enabling DMM ywrap scrolling\n");
+               fbi->flags |= FBINFO_HWACCEL_YWRAP | FBINFO_READS_FAST;
+               fbi->fix.ywrapstep = 1;
+       }
+       DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
+       DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+ fail_unlock:
+       mutex_unlock(&dev->struct_mutex);
+ fail:
+       if (ret) {
+               if (fbi)
+                       framebuffer_release(fbi);
+               if (fb) {
+                       drm_framebuffer_unregister_private(fb);
+                       drm_framebuffer_remove(fb);
+               }
+       }
+       return ret;
+ }
+ static void omap_crtc_fb_gamma_set(struct drm_crtc *crtc,
+               u16 red, u16 green, u16 blue, int regno)
+ {
+       DBG("fbdev: set gamma");
+ }
+ static void omap_crtc_fb_gamma_get(struct drm_crtc *crtc,
+               u16 *red, u16 *green, u16 *blue, int regno)
+ {
+       DBG("fbdev: get gamma");
+ }
+ static struct drm_fb_helper_funcs omap_fb_helper_funcs = {
+       .gamma_set = omap_crtc_fb_gamma_set,
+       .gamma_get = omap_crtc_fb_gamma_get,
+       .fb_probe = omap_fbdev_create,
+ };
+ static struct drm_fb_helper *get_fb(struct fb_info *fbi)
+ {
+       if (!fbi || strcmp(fbi->fix.id, MODULE_NAME)) {
+               /* these are not the fb's you're looking for */
+               return NULL;
+       }
+       return fbi->par;
+ }
+ /* flush an area of the framebuffer (in case of manual update display that
+  * is not automatically flushed)
+  */
+ static void omap_fbdev_flush(struct fb_info *fbi, int x, int y, int w, int h)
+ {
+       struct drm_fb_helper *helper = get_fb(fbi);
+       if (!helper)
+               return;
+       VERB("flush fbdev: %d,%d %dx%d, fbi=%p", x, y, w, h, fbi);
+       omap_framebuffer_flush(helper->fb, x, y, w, h);
+ }
+ /* initialize fbdev helper */
+ struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
+ {
+       struct omap_drm_private *priv = dev->dev_private;
+       struct omap_fbdev *fbdev = NULL;
+       struct drm_fb_helper *helper;
+       int ret = 0;
+       fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
 -      }
++      if (!fbdev)
+               goto fail;
+       INIT_WORK(&fbdev->work, pan_worker);
+       helper = &fbdev->base;
+       helper->funcs = &omap_fb_helper_funcs;
+       ret = drm_fb_helper_init(dev, helper,
+                       priv->num_crtcs, priv->num_connectors);
+       if (ret) {
+               dev_err(dev->dev, "could not init fbdev: ret=%d\n", ret);
+               goto fail;
+       }
+       drm_fb_helper_single_add_all_connectors(helper);
+       /* disable all the possible outputs/crtcs before entering KMS mode */
+       drm_helper_disable_unused_functions(dev);
+       drm_fb_helper_initial_config(helper, 32);
+       priv->fbdev = helper;
+       return helper;
+ fail:
+       kfree(fbdev);
+       return NULL;
+ }
+ void omap_fbdev_free(struct drm_device *dev)
+ {
+       struct omap_drm_private *priv = dev->dev_private;
+       struct drm_fb_helper *helper = priv->fbdev;
+       struct omap_fbdev *fbdev;
+       struct fb_info *fbi;
+       DBG();
+       fbi = helper->fbdev;
+       /* only cleanup framebuffer if it is present */
+       if (fbi) {
+               unregister_framebuffer(fbi);
+               framebuffer_release(fbi);
+       }
+       drm_fb_helper_fini(helper);
+       fbdev = to_omap_fbdev(priv->fbdev);
+       /* this will free the backing object */
+       if (fbdev->fb) {
+               drm_framebuffer_unregister_private(fbdev->fb);
+               drm_framebuffer_remove(fbdev->fb);
+       }
+       kfree(fbdev);
+       priv->fbdev = NULL;
+ }
index 0000000000000000000000000000000000000000,e8302b02691d5d0a5c0fa345e49f4000aa293611..ebbdf4132e9cb2175fabd6422ac17365eaa10d4a
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,1511 +1,1507 @@@
 -      if (!omap_obj) {
 -              dev_err(dev->dev, "could not allocate GEM object\n");
+ /*
+  * drivers/gpu/drm/omapdrm/omap_gem.c
+  *
+  * Copyright (C) 2011 Texas Instruments
+  * Author: Rob Clark <rob.clark@linaro.org>
+  *
+  * This program is free software; you can redistribute it and/or modify it
+  * under the terms of the GNU General Public License version 2 as published by
+  * the Free Software Foundation.
+  *
+  * This program is distributed in the hope that it will be useful, but WITHOUT
+  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  * more details.
+  *
+  * You should have received a copy of the GNU General Public License along with
+  * this program.  If not, see <http://www.gnu.org/licenses/>.
+  */
+ #include <linux/spinlock.h>
+ #include <linux/shmem_fs.h>
+ #include "omap_drv.h"
+ #include "omap_dmm_tiler.h"
+ /* remove these once drm core helpers are merged */
+ struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
+ void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
+               bool dirty, bool accessed);
+ int _drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
+ /*
+  * GEM buffer object implementation.
+  */
+ #define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
+ /* note: we use upper 8 bits of flags for driver-internal flags: */
+ #define OMAP_BO_DMA                   0x01000000      /* actually is physically contiguous */
+ #define OMAP_BO_EXT_SYNC      0x02000000      /* externally allocated sync object */
+ #define OMAP_BO_EXT_MEM               0x04000000      /* externally allocated memory */
+ struct omap_gem_object {
+       struct drm_gem_object base;
+       struct list_head mm_list;
+       uint32_t flags;
+       /** width/height for tiled formats (rounded up to slot boundaries) */
+       uint16_t width, height;
+       /** roll applied when mapping to DMM */
+       uint32_t roll;
+       /**
+        * If buffer is allocated physically contiguous, the OMAP_BO_DMA flag
+        * is set and the paddr is valid.  Also if the buffer is remapped in
+        * TILER and paddr_cnt > 0, then paddr is valid.  But if you are using
+        * the physical address and OMAP_BO_DMA is not set, then you should
+        * be going thru omap_gem_{get,put}_paddr() to ensure the mapping is
+        * not removed from under your feet.
+        *
+        * Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable
+        * buffer is requested, but doesn't mean that it is.  Use the
+        * OMAP_BO_DMA flag to determine if the buffer has a DMA capable
+        * physical address.
+        */
+       dma_addr_t paddr;
+       /**
+        * # of users of paddr
+        */
+       uint32_t paddr_cnt;
+       /**
+        * tiler block used when buffer is remapped in DMM/TILER.
+        */
+       struct tiler_block *block;
+       /**
+        * Array of backing pages, if allocated.  Note that pages are never
+        * allocated for buffers originally allocated from contiguous memory
+        */
+       struct page **pages;
+       /** addresses corresponding to pages in above array */
+       dma_addr_t *addrs;
+       /**
+        * Virtual address, if mapped.
+        */
+       void *vaddr;
+       /**
+        * sync-object allocated on demand (if needed)
+        *
+        * Per-buffer sync-object for tracking pending and completed hw/dma
+        * read and write operations.  The layout in memory is dictated by
+        * the SGX firmware, which uses this information to stall the command
+        * stream if a surface is not ready yet.
+        *
+        * Note that when buffer is used by SGX, the sync-object needs to be
+        * allocated from a special heap of sync-objects.  This way many sync
+        * objects can be packed in a page, and not waste GPU virtual address
+        * space.  Because of this we have to have a omap_gem_set_sync_object()
+        * API to allow replacement of the syncobj after it has (potentially)
+        * already been allocated.  A bit ugly but I haven't thought of a
+        * better alternative.
+        */
+       struct {
+               uint32_t write_pending;
+               uint32_t write_complete;
+               uint32_t read_pending;
+               uint32_t read_complete;
+       } *sync;
+ };
+ static int get_pages(struct drm_gem_object *obj, struct page ***pages);
+ static uint64_t mmap_offset(struct drm_gem_object *obj);
+ /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
+  * not necessarily pinned in TILER all the time, and (b) when they are
+  * they are not necessarily page aligned, we reserve one or more small
+  * regions in each of the 2d containers to use as a user-GART where we
+  * can create a second page-aligned mapping of parts of the buffer
+  * being accessed from userspace.
+  *
+  * Note that we could optimize slightly when we know that multiple
+  * tiler containers are backed by the same PAT.. but I'll leave that
+  * for later..
+  */
+ #define NUM_USERGART_ENTRIES 2
+ struct usergart_entry {
+       struct tiler_block *block;      /* the reserved tiler block */
+       dma_addr_t paddr;
+       struct drm_gem_object *obj;     /* the current pinned obj */
+       pgoff_t obj_pgoff;              /* page offset of obj currently
+                                          mapped in */
+ };
+ static struct {
+       struct usergart_entry entry[NUM_USERGART_ENTRIES];
+       int height;                             /* height in rows */
+       int height_shift;               /* ilog2(height in rows) */
+       int slot_shift;                 /* ilog2(width per slot) */
+       int stride_pfn;                 /* stride in pages */
+       int last;                               /* index of last used entry */
+ } *usergart;
+ static void evict_entry(struct drm_gem_object *obj,
+               enum tiler_fmt fmt, struct usergart_entry *entry)
+ {
+       if (obj->dev->dev_mapping) {
+               struct omap_gem_object *omap_obj = to_omap_bo(obj);
+               int n = usergart[fmt].height;
+               size_t size = PAGE_SIZE * n;
+               loff_t off = mmap_offset(obj) +
+                               (entry->obj_pgoff << PAGE_SHIFT);
+               const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
+               if (m > 1) {
+                       int i;
+                       /* if stride > than PAGE_SIZE then sparse mapping: */
+                       for (i = n; i > 0; i--) {
+                               unmap_mapping_range(obj->dev->dev_mapping,
+                                               off, PAGE_SIZE, 1);
+                               off += PAGE_SIZE * m;
+                       }
+               } else {
+                       unmap_mapping_range(obj->dev->dev_mapping, off, size, 1);
+               }
+       }
+       entry->obj = NULL;
+ }
+ /* Evict a buffer from usergart, if it is mapped there */
+ static void evict(struct drm_gem_object *obj)
+ {
+       struct omap_gem_object *omap_obj = to_omap_bo(obj);
+       if (omap_obj->flags & OMAP_BO_TILED) {
+               enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
+               int i;
+               if (!usergart)
+                       return;
+               for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
+                       struct usergart_entry *entry = &usergart[fmt].entry[i];
+                       if (entry->obj == obj)
+                               evict_entry(obj, fmt, entry);
+               }
+       }
+ }
+ /* GEM objects can either be allocated from contiguous memory (in which
+  * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL).  But non
+  * contiguous buffers can be remapped in TILER/DMM if they need to be
+  * contiguous... but we don't do this all the time to reduce pressure
+  * on TILER/DMM space when we know at allocation time that the buffer
+  * will need to be scanned out.
+  */
+ static inline bool is_shmem(struct drm_gem_object *obj)
+ {
+       return obj->filp != NULL;
+ }
+ /**
+  * shmem buffers that are mapped cached can simulate coherency via using
+  * page faulting to keep track of dirty pages
+  */
+ static inline bool is_cached_coherent(struct drm_gem_object *obj)
+ {
+       struct omap_gem_object *omap_obj = to_omap_bo(obj);
+       return is_shmem(obj) &&
+               ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
+ }
+ static DEFINE_SPINLOCK(sync_lock);
+ /** ensure backing pages are allocated */
+ static int omap_gem_attach_pages(struct drm_gem_object *obj)
+ {
+       struct drm_device *dev = obj->dev;
+       struct omap_gem_object *omap_obj = to_omap_bo(obj);
+       struct page **pages;
+       int npages = obj->size >> PAGE_SHIFT;
+       int i, ret;
+       dma_addr_t *addrs;
+       WARN_ON(omap_obj->pages);
+       /* TODO: __GFP_DMA32 .. but somehow GFP_HIGHMEM is coming from the
+        * mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably
+        * we actually want CMA memory for it all anyways..
+        */
+       pages = _drm_gem_get_pages(obj, GFP_KERNEL);
+       if (IS_ERR(pages)) {
+               dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
+               return PTR_ERR(pages);
+       }
+       /* for non-cached buffers, ensure the new pages are clean because
+        * DSS, GPU, etc. are not cache coherent:
+        */
+       if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
+               addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL);
+               if (!addrs) {
+                       ret = -ENOMEM;
+                       goto free_pages;
+               }
+               for (i = 0; i < npages; i++) {
+                       addrs[i] = dma_map_page(dev->dev, pages[i],
+                                       0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+               }
+       } else {
+               addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
+               if (!addrs) {
+                       ret = -ENOMEM;
+                       goto free_pages;
+               }
+       }
+       omap_obj->addrs = addrs;
+       omap_obj->pages = pages;
+       return 0;
+ free_pages:
+       _drm_gem_put_pages(obj, pages, true, false);
+       return ret;
+ }
+ /** release backing pages */
+ static void omap_gem_detach_pages(struct drm_gem_object *obj)
+ {
+       struct omap_gem_object *omap_obj = to_omap_bo(obj);
+       /* for non-cached buffers, ensure the new pages are clean because
+        * DSS, GPU, etc. are not cache coherent:
+        */
+       if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
+               int i, npages = obj->size >> PAGE_SHIFT;
+               for (i = 0; i < npages; i++) {
+                       dma_unmap_page(obj->dev->dev, omap_obj->addrs[i],
+                                       PAGE_SIZE, DMA_BIDIRECTIONAL);
+               }
+       }
+       kfree(omap_obj->addrs);
+       omap_obj->addrs = NULL;
+       _drm_gem_put_pages(obj, omap_obj->pages, true, false);
+       omap_obj->pages = NULL;
+ }
+ /* get buffer flags */
+ uint32_t omap_gem_flags(struct drm_gem_object *obj)
+ {
+       return to_omap_bo(obj)->flags;
+ }
+ /** get mmap offset */
+ static uint64_t mmap_offset(struct drm_gem_object *obj)
+ {
+       struct drm_device *dev = obj->dev;
+       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+       if (!obj->map_list.map) {
+               /* Make it mmapable */
+               size_t size = omap_gem_mmap_size(obj);
+               int ret = _drm_gem_create_mmap_offset_size(obj, size);
+               if (ret) {
+                       dev_err(dev->dev, "could not allocate mmap offset\n");
+                       return 0;
+               }
+       }
+       return (uint64_t)obj->map_list.hash.key << PAGE_SHIFT;
+ }
+ uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
+ {
+       uint64_t offset;
+       mutex_lock(&obj->dev->struct_mutex);
+       offset = mmap_offset(obj);
+       mutex_unlock(&obj->dev->struct_mutex);
+       return offset;
+ }
+ /** get mmap size */
+ size_t omap_gem_mmap_size(struct drm_gem_object *obj)
+ {
+       struct omap_gem_object *omap_obj = to_omap_bo(obj);
+       size_t size = obj->size;
+       if (omap_obj->flags & OMAP_BO_TILED) {
+               /* for tiled buffers, the virtual size has stride rounded up
+                * to 4kb.. (to hide the fact that row n+1 might start 16kb or
+                * 32kb later!).  But we don't back the entire buffer with
+                * pages, only the valid picture part.. so need to adjust for
+                * this in the size used to mmap and generate mmap offset
+                */
+               size = tiler_vsize(gem2fmt(omap_obj->flags),
+                               omap_obj->width, omap_obj->height);
+       }
+       return size;
+ }
+ /* get tiled size, returns -EINVAL if not tiled buffer */
+ int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h)
+ {
+       struct omap_gem_object *omap_obj = to_omap_bo(obj);
+       if (omap_obj->flags & OMAP_BO_TILED) {
+               *w = omap_obj->width;
+               *h = omap_obj->height;
+               return 0;
+       }
+       return -EINVAL;
+ }
+ /* Normal handling for the case of faulting in non-tiled buffers */
+ static int fault_1d(struct drm_gem_object *obj,
+               struct vm_area_struct *vma, struct vm_fault *vmf)
+ {
+       struct omap_gem_object *omap_obj = to_omap_bo(obj);
+       unsigned long pfn;
+       pgoff_t pgoff;
+       /* We don't use vmf->pgoff since that has the fake offset: */
+       pgoff = ((unsigned long)vmf->virtual_address -
+                       vma->vm_start) >> PAGE_SHIFT;
+       if (omap_obj->pages) {
+               omap_gem_cpu_sync(obj, pgoff);
+               pfn = page_to_pfn(omap_obj->pages[pgoff]);
+       } else {
+               BUG_ON(!(omap_obj->flags & OMAP_BO_DMA));
+               pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
+       }
+       VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+                       pfn, pfn << PAGE_SHIFT);
+       return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
+ }
+ /* Special handling for the case of faulting in 2d tiled buffers */
+ static int fault_2d(struct drm_gem_object *obj,
+               struct vm_area_struct *vma, struct vm_fault *vmf)
+ {
+       struct omap_gem_object *omap_obj = to_omap_bo(obj);
+       struct usergart_entry *entry;
+       enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
+       struct page *pages[64];  /* XXX is this too much to have on stack? */
+       unsigned long pfn;
+       pgoff_t pgoff, base_pgoff;
+       void __user *vaddr;
+       int i, ret, slots;
+       /*
+        * Note the height of the slot is also equal to the number of pages
+        * that need to be mapped in to fill 4kb wide CPU page.  If the slot
+        * height is 64, then 64 pages fill a 4kb wide by 64 row region.
+        */
+       const int n = usergart[fmt].height;
+       const int n_shift = usergart[fmt].height_shift;
+       /*
+        * If buffer width in bytes > PAGE_SIZE then the virtual stride is
+        * rounded up to next multiple of PAGE_SIZE.. this need to be taken
+        * into account in some of the math, so figure out virtual stride
+        * in pages
+        */
+       const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
+       /* We don't use vmf->pgoff since that has the fake offset: */
+       pgoff = ((unsigned long)vmf->virtual_address -
+                       vma->vm_start) >> PAGE_SHIFT;
+       /*
+        * Actual address we start mapping at is rounded down to previous slot
+        * boundary in the y direction:
+        */
+       base_pgoff = round_down(pgoff, m << n_shift);
+       /* figure out buffer width in slots */
+       slots = omap_obj->width >> usergart[fmt].slot_shift;
+       vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
+       entry = &usergart[fmt].entry[usergart[fmt].last];
+       /* evict previous buffer using this usergart entry, if any: */
+       if (entry->obj)
+               evict_entry(entry->obj, fmt, entry);
+       entry->obj = obj;
+       entry->obj_pgoff = base_pgoff;
+       /* now convert base_pgoff to phys offset from virt offset: */
+       base_pgoff = (base_pgoff >> n_shift) * slots;
+       /* for wider-than 4k.. figure out which part of the slot-row we want: */
+       if (m > 1) {
+               int off = pgoff % m;
+               entry->obj_pgoff += off;
+               base_pgoff /= m;
+               slots = min(slots - (off << n_shift), n);
+               base_pgoff += off << n_shift;
+               vaddr += off << PAGE_SHIFT;
+       }
+       /*
+        * Map in pages. Beyond the valid pixel part of the buffer, we set
+        * pages[i] to NULL to get a dummy page mapped in.. if someone
+        * reads/writes it they will get random/undefined content, but at
+        * least it won't be corrupting whatever other random page used to
+        * be mapped in, or other undefined behavior.
+        */
+       memcpy(pages, &omap_obj->pages[base_pgoff],
+                       sizeof(struct page *) * slots);
+       memset(pages + slots, 0,
+                       sizeof(struct page *) * (n - slots));
+       ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
+       if (ret) {
+               dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
+               return ret;
+       }
+       pfn = entry->paddr >> PAGE_SHIFT;
+       VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+                       pfn, pfn << PAGE_SHIFT);
+       for (i = n; i > 0; i--) {
+               vm_insert_mixed(vma, (unsigned long)vaddr, pfn);
+               pfn += usergart[fmt].stride_pfn;
+               vaddr += PAGE_SIZE * m;
+       }
+       /* simple round-robin: */
+       usergart[fmt].last = (usergart[fmt].last + 1) % NUM_USERGART_ENTRIES;
+       return 0;
+ }
+ /**
+  * omap_gem_fault             -       pagefault handler for GEM objects
+  * @vma: the VMA of the GEM object
+  * @vmf: fault detail
+  *
+  * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
+  * does most of the work for us including the actual map/unmap calls
+  * but we need to do the actual page work.
+  *
+  * The VMA was set up by GEM. In doing so it also ensured that the
+  * vma->vm_private_data points to the GEM object that is backing this
+  * mapping.
+  */
+ int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+ {
+       struct drm_gem_object *obj = vma->vm_private_data;
+       struct omap_gem_object *omap_obj = to_omap_bo(obj);
+       struct drm_device *dev = obj->dev;
+       struct page **pages;
+       int ret;
+       /* Make sure we don't parallel update on a fault, nor move or remove
+        * something from beneath our feet
+        */
+       mutex_lock(&dev->struct_mutex);
+       /* if a shmem backed object, make sure we have pages attached now */
+       ret = get_pages(obj, &pages);
+       if (ret)
+               goto fail;
+       /* where should we do corresponding put_pages().. we are mapping
+        * the original page, rather than thru a GART, so we can't rely
+        * on eviction to trigger this.  But munmap() or all mappings should
+        * probably trigger put_pages()?
+        */
+       if (omap_obj->flags & OMAP_BO_TILED)
+               ret = fault_2d(obj, vma, vmf);
+       else
+               ret = fault_1d(obj, vma, vmf);
+ fail:
+       mutex_unlock(&dev->struct_mutex);
+       switch (ret) {
+       case 0:
+       case -ERESTARTSYS:
+       case -EINTR:
+               return VM_FAULT_NOPAGE;
+       case -ENOMEM:
+               return VM_FAULT_OOM;
+       default:
+               return VM_FAULT_SIGBUS;
+       }
+ }
+ /** We override mainly to fix up some of the vm mapping flags.. */
+ int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+ {
+       int ret;
+       ret = drm_gem_mmap(filp, vma);
+       if (ret) {
+               DBG("mmap failed: %d", ret);
+               return ret;
+       }
+       return omap_gem_mmap_obj(vma->vm_private_data, vma);
+ }
+ int omap_gem_mmap_obj(struct drm_gem_object *obj,
+               struct vm_area_struct *vma)
+ {
+       struct omap_gem_object *omap_obj = to_omap_bo(obj);
+       vma->vm_flags &= ~VM_PFNMAP;
+       vma->vm_flags |= VM_MIXEDMAP;
+       if (omap_obj->flags & OMAP_BO_WC) {
+               vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+       } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
+               vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
+       } else {
+               /*
+                * We do have some private objects, at least for scanout buffers
+                * on hardware without DMM/TILER.  But these are allocated write-
+                * combine
+                */
+               if (WARN_ON(!obj->filp))
+                       return -EINVAL;
+               /*
+                * Shunt off cached objs to shmem file so they have their own
+                * address_space (so unmap_mapping_range does what we want,
+                * in particular in the case of mmap'd dmabufs)
+                */
+               fput(vma->vm_file);
+               vma->vm_pgoff = 0;
+               vma->vm_file  = get_file(obj->filp);
+               vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+       }
+       return 0;
+ }
+ /**
+  * omap_gem_dumb_create       -       create a dumb buffer
+  * @drm_file: our client file
+  * @dev: our device
+  * @args: the requested arguments copied from userspace
+  *
+  * Allocate a buffer suitable for use for a frame buffer of the
+  * form described by user space. Give userspace a handle by which
+  * to reference it.
+  */
+ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+               struct drm_mode_create_dumb *args)
+ {
+       union omap_gem_size gsize;
+       /* in case someone tries to feed us a completely bogus stride: */
+       args->pitch = align_pitch(args->pitch, args->width, args->bpp);
+       args->size = PAGE_ALIGN(args->pitch * args->height);
+       gsize = (union omap_gem_size){
+               .bytes = args->size,
+       };
+       return omap_gem_new_handle(dev, file, gsize,
+                       OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
+ }
+ /**
+  * omap_gem_dumb_destroy      -       destroy a dumb buffer
+  * @file: client file
+  * @dev: our DRM device
+  * @handle: the object handle
+  *
+  * Destroy a handle that was created via omap_gem_dumb_create.
+  */
+ int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+               uint32_t handle)
+ {
+       /* No special work needed, drop the reference and see what falls out */
+       return drm_gem_handle_delete(file, handle);
+ }
+ /**
+  * omap_gem_dumb_map  -       buffer mapping for dumb interface
+  * @file: our drm client file
+  * @dev: drm device
+  * @handle: GEM handle to the object (from dumb_create)
+  *
+  * Do the necessary setup to allow the mapping of the frame buffer
+  * into user memory. We don't have to do much here at the moment.
+  */
+ int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+               uint32_t handle, uint64_t *offset)
+ {
+       struct drm_gem_object *obj;
+       int ret = 0;
+       /* GEM does all our handle to object mapping */
+       obj = drm_gem_object_lookup(dev, file, handle);
+       if (obj == NULL) {
+               ret = -ENOENT;
+               goto fail;
+       }
+       *offset = omap_gem_mmap_offset(obj);
+       drm_gem_object_unreference_unlocked(obj);
+ fail:
+       return ret;
+ }
+ /* Set scrolling position.  This allows us to implement fast scrolling
+  * for console.
+  *
+  * Call only from non-atomic contexts.
+  */
+ int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
+ {
+       struct omap_gem_object *omap_obj = to_omap_bo(obj);
+       uint32_t npages = obj->size >> PAGE_SHIFT;
+       int ret = 0;
+       if (roll > npages) {
+               dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
+               return -EINVAL;
+       }
+       omap_obj->roll = roll;
+       mutex_lock(&obj->dev->struct_mutex);
+       /* if we aren't mapped yet, we don't need to do anything */
+       if (omap_obj->block) {
+               struct page **pages;
+               ret = get_pages(obj, &pages);
+               if (ret)
+                       goto fail;
+               ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
+               if (ret)
+                       dev_err(obj->dev->dev, "could not repin: %d\n", ret);
+       }
+ fail:
+       mutex_unlock(&obj->dev->struct_mutex);
+       return ret;
+ }
+ /* Sync the buffer for CPU access.. note pages should already be
+  * attached, ie. omap_gem_get_pages()
+  */
+ void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff)
+ {
+       struct drm_device *dev = obj->dev;
+       struct omap_gem_object *omap_obj = to_omap_bo(obj);
+       if (is_cached_coherent(obj) && omap_obj->addrs[pgoff]) {
+               dma_unmap_page(dev->dev, omap_obj->addrs[pgoff],
+                               PAGE_SIZE, DMA_BIDIRECTIONAL);
+               omap_obj->addrs[pgoff] = 0;
+       }
+ }
+ /* sync the buffer for DMA access */
+ void omap_gem_dma_sync(struct drm_gem_object *obj,
+               enum dma_data_direction dir)
+ {
+       struct drm_device *dev = obj->dev;
+       struct omap_gem_object *omap_obj = to_omap_bo(obj);
+       if (is_cached_coherent(obj)) {
+               int i, npages = obj->size >> PAGE_SHIFT;
+               struct page **pages = omap_obj->pages;
+               bool dirty = false;
+               for (i = 0; i < npages; i++) {
+                       if (!omap_obj->addrs[i]) {
+                               omap_obj->addrs[i] = dma_map_page(dev->dev, pages[i], 0,
+                                               PAGE_SIZE, DMA_BIDIRECTIONAL);
+                               dirty = true;
+                       }
+               }
+               if (dirty) {
+                       unmap_mapping_range(obj->filp->f_mapping, 0,
+                                       omap_gem_mmap_size(obj), 1);
+               }
+       }
+ }
+ /* Get physical address for DMA.. if 'remap' is true, and the buffer is not
+  * already contiguous, remap it to pin in physically contiguous memory.. (ie.
+  * map in TILER)
+  */
+ int omap_gem_get_paddr(struct drm_gem_object *obj,
+               dma_addr_t *paddr, bool remap)
+ {
+       struct omap_drm_private *priv = obj->dev->dev_private;
+       struct omap_gem_object *omap_obj = to_omap_bo(obj);
+       int ret = 0;
+       mutex_lock(&obj->dev->struct_mutex);
+       if (remap && is_shmem(obj) && priv->has_dmm) {
+               if (omap_obj->paddr_cnt == 0) {
+                       struct page **pages;
+                       uint32_t npages = obj->size >> PAGE_SHIFT;
+                       enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
+                       struct tiler_block *block;
+                       BUG_ON(omap_obj->block);
+                       ret = get_pages(obj, &pages);
+                       if (ret)
+                               goto fail;
+                       if (omap_obj->flags & OMAP_BO_TILED) {
+                               block = tiler_reserve_2d(fmt,
+                                               omap_obj->width,
+                                               omap_obj->height, 0);
+                       } else {
+                               block = tiler_reserve_1d(obj->size);
+                       }
+                       if (IS_ERR(block)) {
+                               ret = PTR_ERR(block);
+                               dev_err(obj->dev->dev,
+                                       "could not remap: %d (%d)\n", ret, fmt);
+                               goto fail;
+                       }
+                       /* TODO: enable async refill.. */
+                       ret = tiler_pin(block, pages, npages,
+                                       omap_obj->roll, true);
+                       if (ret) {
+                               tiler_release(block);
+                               dev_err(obj->dev->dev,
+                                               "could not pin: %d\n", ret);
+                               goto fail;
+                       }
+                       omap_obj->paddr = tiler_ssptr(block);
+                       omap_obj->block = block;
+                       DBG("got paddr: %08x", omap_obj->paddr);
+               }
+               omap_obj->paddr_cnt++;
+               *paddr = omap_obj->paddr;
+       } else if (omap_obj->flags & OMAP_BO_DMA) {
+               *paddr = omap_obj->paddr;
+       } else {
+               ret = -EINVAL;
+               goto fail;
+       }
+ fail:
+       mutex_unlock(&obj->dev->struct_mutex);
+       return ret;
+ }
+ /* Release physical address, when DMA is no longer being performed.. this
+  * could potentially unpin and unmap buffers from TILER
+  */
+ int omap_gem_put_paddr(struct drm_gem_object *obj)
+ {
+       struct omap_gem_object *omap_obj = to_omap_bo(obj);
+       int ret = 0;
+       mutex_lock(&obj->dev->struct_mutex);
+       if (omap_obj->paddr_cnt > 0) {
+               omap_obj->paddr_cnt--;
+               if (omap_obj->paddr_cnt == 0) {
+                       ret = tiler_unpin(omap_obj->block);
+                       if (ret) {
+                               dev_err(obj->dev->dev,
+                                       "could not unpin pages: %d\n", ret);
+                               goto fail;
+                       }
+                       ret = tiler_release(omap_obj->block);
+                       if (ret) {
+                               dev_err(obj->dev->dev,
+                                       "could not release unmap: %d\n", ret);
+                       }
+                       omap_obj->block = NULL;
+               }
+       }
+ fail:
+       mutex_unlock(&obj->dev->struct_mutex);
+       return ret;
+ }
+ /* Get rotated scanout address (only valid if already pinned), at the
+  * specified orientation and x,y offset from top-left corner of buffer
+  * (only valid for tiled 2d buffers)
+  */
+ int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient,
+               int x, int y, dma_addr_t *paddr)
+ {
+       struct omap_gem_object *omap_obj = to_omap_bo(obj);
+       int ret = -EINVAL;
+       mutex_lock(&obj->dev->struct_mutex);
+       if ((omap_obj->paddr_cnt > 0) && omap_obj->block &&
+                       (omap_obj->flags & OMAP_BO_TILED)) {
+               *paddr = tiler_tsptr(omap_obj->block, orient, x, y);
+               ret = 0;
+       }
+       mutex_unlock(&obj->dev->struct_mutex);
+       return ret;
+ }
+ /* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
+ int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient)
+ {
+       struct omap_gem_object *omap_obj = to_omap_bo(obj);
+       int ret = -EINVAL;
+       if (omap_obj->flags & OMAP_BO_TILED)
+               ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
+       return ret;
+ }
+ /* acquire pages when needed (for example, for DMA where physically
+  * contiguous buffer is not required
+  */
+ static int get_pages(struct drm_gem_object *obj, struct page ***pages)
+ {
+       struct omap_gem_object *omap_obj = to_omap_bo(obj);
+       int ret = 0;
+       if (is_shmem(obj) && !omap_obj->pages) {
+               ret = omap_gem_attach_pages(obj);
+               if (ret) {
+                       dev_err(obj->dev->dev, "could not attach pages\n");
+                       return ret;
+               }
+       }
+       /* TODO: even phys-contig.. we should have a list of pages? */
+       *pages = omap_obj->pages;
+       return 0;
+ }
+ /* if !remap, and we don't have pages backing, then fail, rather than
+  * increasing the pin count (which we don't really do yet anyways,
+  * because we don't support swapping pages back out).  And 'remap'
+  * might not be quite the right name, but I wanted to keep it working
+  * similarly to omap_gem_get_paddr().  Note though that mutex is not
+  * aquired if !remap (because this can be called in atomic ctxt),
+  * but probably omap_gem_get_paddr() should be changed to work in the
+  * same way.  If !remap, a matching omap_gem_put_pages() call is not
+  * required (and should not be made).
+  */
+ int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
+               bool remap)
+ {
+       int ret;
+       if (!remap) {
+               struct omap_gem_object *omap_obj = to_omap_bo(obj);
+               if (!omap_obj->pages)
+                       return -ENOMEM;
+               *pages = omap_obj->pages;
+               return 0;
+       }
+       mutex_lock(&obj->dev->struct_mutex);
+       ret = get_pages(obj, pages);
+       mutex_unlock(&obj->dev->struct_mutex);
+       return ret;
+ }
+ /* release pages when DMA no longer being performed */
+ int omap_gem_put_pages(struct drm_gem_object *obj)
+ {
+       /* do something here if we dynamically attach/detach pages.. at
+        * least they would no longer need to be pinned if everyone has
+        * released the pages..
+        */
+       return 0;
+ }
+ /* Get kernel virtual address for CPU access.. this more or less only
+  * exists for omap_fbdev.  This should be called with struct_mutex
+  * held.
+  */
+ void *omap_gem_vaddr(struct drm_gem_object *obj)
+ {
+       struct omap_gem_object *omap_obj = to_omap_bo(obj);
+       WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
+       if (!omap_obj->vaddr) {
+               struct page **pages;
+               int ret = get_pages(obj, &pages);
+               if (ret)
+                       return ERR_PTR(ret);
+               omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
+                               VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+       }
+       return omap_obj->vaddr;
+ }
+ #ifdef CONFIG_PM
+ /* re-pin objects in DMM in resume path: */
+ int omap_gem_resume(struct device *dev)
+ {
+       struct drm_device *drm_dev = dev_get_drvdata(dev);
+       struct omap_drm_private *priv = drm_dev->dev_private;
+       struct omap_gem_object *omap_obj;
+       int ret = 0;
+       list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
+               if (omap_obj->block) {
+                       struct drm_gem_object *obj = &omap_obj->base;
+                       uint32_t npages = obj->size >> PAGE_SHIFT;
+                       WARN_ON(!omap_obj->pages);  /* this can't happen */
+                       ret = tiler_pin(omap_obj->block,
+                                       omap_obj->pages, npages,
+                                       omap_obj->roll, true);
+                       if (ret) {
+                               dev_err(dev, "could not repin: %d\n", ret);
+                               return ret;
+                       }
+               }
+       }
+       return 0;
+ }
+ #endif
+ #ifdef CONFIG_DEBUG_FS
+ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
+ {
+       struct drm_device *dev = obj->dev;
+       struct omap_gem_object *omap_obj = to_omap_bo(obj);
+       uint64_t off = 0;
+       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+       if (obj->map_list.map)
+               off = (uint64_t)obj->map_list.hash.key;
+       seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d",
+                       omap_obj->flags, obj->name, obj->refcount.refcount.counter,
+                       off, omap_obj->paddr, omap_obj->paddr_cnt,
+                       omap_obj->vaddr, omap_obj->roll);
+       if (omap_obj->flags & OMAP_BO_TILED) {
+               seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
+               if (omap_obj->block) {
+                       struct tcm_area *area = &omap_obj->block->area;
+                       seq_printf(m, " (%dx%d, %dx%d)",
+                                       area->p0.x, area->p0.y,
+                                       area->p1.x, area->p1.y);
+               }
+       } else {
+               seq_printf(m, " %d", obj->size);
+       }
+       seq_printf(m, "\n");
+ }
+ void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
+ {
+       struct omap_gem_object *omap_obj;
+       int count = 0;
+       size_t size = 0;
+       list_for_each_entry(omap_obj, list, mm_list) {
+               struct drm_gem_object *obj = &omap_obj->base;
+               seq_printf(m, "   ");
+               omap_gem_describe(obj, m);
+               count++;
+               size += obj->size;
+       }
+       seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
+ }
+ #endif
+ /* Buffer Synchronization:
+  */
+ struct omap_gem_sync_waiter {
+       struct list_head list;
+       struct omap_gem_object *omap_obj;
+       enum omap_gem_op op;
+       uint32_t read_target, write_target;
+       /* notify called w/ sync_lock held */
+       void (*notify)(void *arg);
+       void *arg;
+ };
+ /* list of omap_gem_sync_waiter.. the notify fxn gets called back when
+  * the read and/or write target count is achieved which can call a user
+  * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
+  * cpu access), etc.
+  */
+ static LIST_HEAD(waiters);
+ static inline bool is_waiting(struct omap_gem_sync_waiter *waiter)
+ {
+       struct omap_gem_object *omap_obj = waiter->omap_obj;
+       if ((waiter->op & OMAP_GEM_READ) &&
+                       (omap_obj->sync->read_complete < waiter->read_target))
+               return true;
+       if ((waiter->op & OMAP_GEM_WRITE) &&
+                       (omap_obj->sync->write_complete < waiter->write_target))
+               return true;
+       return false;
+ }
+ /* macro for sync debug.. */
+ #define SYNCDBG 0
+ #define SYNC(fmt, ...) do { if (SYNCDBG) \
+               printk(KERN_ERR "%s:%d: "fmt"\n", \
+                               __func__, __LINE__, ##__VA_ARGS__); \
+       } while (0)
+ static void sync_op_update(void)
+ {
+       struct omap_gem_sync_waiter *waiter, *n;
+       list_for_each_entry_safe(waiter, n, &waiters, list) {
+               if (!is_waiting(waiter)) {
+                       list_del(&waiter->list);
+                       SYNC("notify: %p", waiter);
+                       waiter->notify(waiter->arg);
+                       kfree(waiter);
+               }
+       }
+ }
+ static inline int sync_op(struct drm_gem_object *obj,
+               enum omap_gem_op op, bool start)
+ {
+       struct omap_gem_object *omap_obj = to_omap_bo(obj);
+       int ret = 0;
+       spin_lock(&sync_lock);
+       if (!omap_obj->sync) {
+               omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
+               if (!omap_obj->sync) {
+                       ret = -ENOMEM;
+                       goto unlock;
+               }
+       }
+       if (start) {
+               if (op & OMAP_GEM_READ)
+                       omap_obj->sync->read_pending++;
+               if (op & OMAP_GEM_WRITE)
+                       omap_obj->sync->write_pending++;
+       } else {
+               if (op & OMAP_GEM_READ)
+                       omap_obj->sync->read_complete++;
+               if (op & OMAP_GEM_WRITE)
+                       omap_obj->sync->write_complete++;
+               sync_op_update();
+       }
+ unlock:
+       spin_unlock(&sync_lock);
+       return ret;
+ }
+ /* it is a bit lame to handle updates in this sort of polling way, but
+  * in case of PVR, the GPU can directly update read/write complete
+  * values, and not really tell us which ones it updated.. this also
+  * means that sync_lock is not quite sufficient.  So we'll need to
+  * do something a bit better when it comes time to add support for
+  * separate 2d hw..
+  */
+ void omap_gem_op_update(void)
+ {
+       spin_lock(&sync_lock);
+       sync_op_update();
+       spin_unlock(&sync_lock);
+ }
+ /* mark the start of read and/or write operation */
+ int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
+ {
+       return sync_op(obj, op, true);
+ }
+ int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op)
+ {
+       return sync_op(obj, op, false);
+ }
+ static DECLARE_WAIT_QUEUE_HEAD(sync_event);
+ static void sync_notify(void *arg)
+ {
+       struct task_struct **waiter_task = arg;
+       *waiter_task = NULL;
+       wake_up_all(&sync_event);
+ }
+ int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
+ {
+       struct omap_gem_object *omap_obj = to_omap_bo(obj);
+       int ret = 0;
+       if (omap_obj->sync) {
+               struct task_struct *waiter_task = current;
+               struct omap_gem_sync_waiter *waiter =
+                               kzalloc(sizeof(*waiter), GFP_KERNEL);
+               if (!waiter)
+                       return -ENOMEM;
+               waiter->omap_obj = omap_obj;
+               waiter->op = op;
+               waiter->read_target = omap_obj->sync->read_pending;
+               waiter->write_target = omap_obj->sync->write_pending;
+               waiter->notify = sync_notify;
+               waiter->arg = &waiter_task;
+               spin_lock(&sync_lock);
+               if (is_waiting(waiter)) {
+                       SYNC("waited: %p", waiter);
+                       list_add_tail(&waiter->list, &waiters);
+                       spin_unlock(&sync_lock);
+                       ret = wait_event_interruptible(sync_event,
+                                       (waiter_task == NULL));
+                       spin_lock(&sync_lock);
+                       if (waiter_task) {
+                               SYNC("interrupted: %p", waiter);
+                               /* we were interrupted */
+                               list_del(&waiter->list);
+                               waiter_task = NULL;
+                       } else {
+                               /* freed in sync_op_update() */
+                               waiter = NULL;
+                       }
+               }
+               spin_unlock(&sync_lock);
+               if (waiter)
+                       kfree(waiter);
+       }
+       return ret;
+ }
+ /* call fxn(arg), either synchronously or asynchronously if the op
+  * is currently blocked..  fxn() can be called from any context
+  *
+  * (TODO for now fxn is called back from whichever context calls
+  * omap_gem_op_update().. but this could be better defined later
+  * if needed)
+  *
+  * TODO more code in common w/ _sync()..
+  */
+ int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
+               void (*fxn)(void *arg), void *arg)
+ {
+       struct omap_gem_object *omap_obj = to_omap_bo(obj);
+       if (omap_obj->sync) {
+               struct omap_gem_sync_waiter *waiter =
+                               kzalloc(sizeof(*waiter), GFP_ATOMIC);
+               if (!waiter)
+                       return -ENOMEM;
+               waiter->omap_obj = omap_obj;
+               waiter->op = op;
+               waiter->read_target = omap_obj->sync->read_pending;
+               waiter->write_target = omap_obj->sync->write_pending;
+               waiter->notify = fxn;
+               waiter->arg = arg;
+               spin_lock(&sync_lock);
+               if (is_waiting(waiter)) {
+                       SYNC("waited: %p", waiter);
+                       list_add_tail(&waiter->list, &waiters);
+                       spin_unlock(&sync_lock);
+                       return 0;
+               }
+               spin_unlock(&sync_lock);
+       }
+       /* no waiting.. */
+       fxn(arg);
+       return 0;
+ }
+ /* special API so PVR can update the buffer to use a sync-object allocated
+  * from it's sync-obj heap.  Only used for a newly allocated (from PVR's
+  * perspective) sync-object, so we overwrite the new syncobj w/ values
+  * from the already allocated syncobj (if there is one)
+  */
+ int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj)
+ {
+       struct omap_gem_object *omap_obj = to_omap_bo(obj);
+       int ret = 0;
+       spin_lock(&sync_lock);
+       if ((omap_obj->flags & OMAP_BO_EXT_SYNC) && !syncobj) {
+               /* clearing a previously set syncobj */
+               syncobj = kmemdup(omap_obj->sync, sizeof(*omap_obj->sync),
+                                 GFP_ATOMIC);
+               if (!syncobj) {
+                       ret = -ENOMEM;
+                       goto unlock;
+               }
+               omap_obj->flags &= ~OMAP_BO_EXT_SYNC;
+               omap_obj->sync = syncobj;
+       } else if (syncobj && !(omap_obj->flags & OMAP_BO_EXT_SYNC)) {
+               /* replacing an existing syncobj */
+               if (omap_obj->sync) {
+                       memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
+                       kfree(omap_obj->sync);
+               }
+               omap_obj->flags |= OMAP_BO_EXT_SYNC;
+               omap_obj->sync = syncobj;
+       }
+ unlock:
+       spin_unlock(&sync_lock);
+       return ret;
+ }
+ int omap_gem_init_object(struct drm_gem_object *obj)
+ {
+       return -EINVAL;          /* unused */
+ }
+ /* don't call directly.. called from GEM core when it is time to actually
+  * free the object..
+  */
+ void omap_gem_free_object(struct drm_gem_object *obj)
+ {
+       struct drm_device *dev = obj->dev;
+       struct omap_gem_object *omap_obj = to_omap_bo(obj);
+       evict(obj);
+       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+       list_del(&omap_obj->mm_list);
+       if (obj->map_list.map)
+               drm_gem_free_mmap_offset(obj);
+       /* this means the object is still pinned.. which really should
+        * not happen.  I think..
+        */
+       WARN_ON(omap_obj->paddr_cnt > 0);
+       /* don't free externally allocated backing memory */
+       if (!(omap_obj->flags & OMAP_BO_EXT_MEM)) {
+               if (omap_obj->pages)
+                       omap_gem_detach_pages(obj);
+               if (!is_shmem(obj)) {
+                       dma_free_writecombine(dev->dev, obj->size,
+                                       omap_obj->vaddr, omap_obj->paddr);
+               } else if (omap_obj->vaddr) {
+                       vunmap(omap_obj->vaddr);
+               }
+       }
+       /* don't free externally allocated syncobj */
+       if (!(omap_obj->flags & OMAP_BO_EXT_SYNC))
+               kfree(omap_obj->sync);
+       drm_gem_object_release(obj);
+       kfree(obj);
+ }
+ /* convenience method to construct a GEM buffer object, and userspace handle */
+ int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
+               union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
+ {
+       struct drm_gem_object *obj;
+       int ret;
+       obj = omap_gem_new(dev, gsize, flags);
+       if (!obj)
+               return -ENOMEM;
+       ret = drm_gem_handle_create(file, obj, handle);
+       if (ret) {
+               drm_gem_object_release(obj);
+               kfree(obj); /* TODO isn't there a dtor to call? just copying i915 */
+               return ret;
+       }
+       /* drop reference from allocate - handle holds it now */
+       drm_gem_object_unreference_unlocked(obj);
+       return 0;
+ }
+ /* GEM buffer object constructor */
+ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
+               union omap_gem_size gsize, uint32_t flags)
+ {
+       struct omap_drm_private *priv = dev->dev_private;
+       struct omap_gem_object *omap_obj;
+       struct drm_gem_object *obj = NULL;
+       size_t size;
+       int ret;
+       if (flags & OMAP_BO_TILED) {
+               if (!usergart) {
+                       dev_err(dev->dev, "Tiled buffers require DMM\n");
+                       goto fail;
+               }
+               /* tiled buffers are always shmem paged backed.. when they are
+                * scanned out, they are remapped into DMM/TILER
+                */
+               flags &= ~OMAP_BO_SCANOUT;
+               /* currently don't allow cached buffers.. there is some caching
+                * stuff that needs to be handled better
+                */
+               flags &= ~(OMAP_BO_CACHED|OMAP_BO_UNCACHED);
+               flags |= OMAP_BO_WC;
+               /* align dimensions to slot boundaries... */
+               tiler_align(gem2fmt(flags),
+                               &gsize.tiled.width, &gsize.tiled.height);
+               /* ...and calculate size based on aligned dimensions */
+               size = tiler_size(gem2fmt(flags),
+                               gsize.tiled.width, gsize.tiled.height);
+       } else {
+               size = PAGE_ALIGN(gsize.bytes);
+       }
+       omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
 -      }
++      if (!omap_obj)
+               goto fail;
 -      usergart = kzalloc(3 * sizeof(*usergart), GFP_KERNEL);
 -      if (!usergart) {
 -              dev_warn(dev->dev, "could not allocate usergart\n");
+       list_add(&omap_obj->mm_list, &priv->obj_list);
+       obj = &omap_obj->base;
+       if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
+               /* attempt to allocate contiguous memory if we don't
+                * have DMM for remappign discontiguous buffers
+                */
+               omap_obj->vaddr =  dma_alloc_writecombine(dev->dev, size,
+                               &omap_obj->paddr, GFP_KERNEL);
+               if (omap_obj->vaddr)
+                       flags |= OMAP_BO_DMA;
+       }
+       omap_obj->flags = flags;
+       if (flags & OMAP_BO_TILED) {
+               omap_obj->width = gsize.tiled.width;
+               omap_obj->height = gsize.tiled.height;
+       }
+       if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM))
+               ret = drm_gem_private_object_init(dev, obj, size);
+       else
+               ret = drm_gem_object_init(dev, obj, size);
+       if (ret)
+               goto fail;
+       return obj;
+ fail:
+       if (obj)
+               omap_gem_free_object(obj);
+       return NULL;
+ }
+ /* init/cleanup.. if DMM is used, we need to set some stuff up.. */
+ void omap_gem_init(struct drm_device *dev)
+ {
+       struct omap_drm_private *priv = dev->dev_private;
+       const enum tiler_fmt fmts[] = {
+                       TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
+       };
+       int i, j;
+       if (!dmm_is_available()) {
+               /* DMM only supported on OMAP4 and later, so this isn't fatal */
+               dev_warn(dev->dev, "DMM not available, disable DMM support\n");
+               return;
+       }
 -      }
++      usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
++      if (!usergart)
+               return;
+       /* reserve 4k aligned/wide regions for userspace mappings: */
+       for (i = 0; i < ARRAY_SIZE(fmts); i++) {
+               uint16_t h = 1, w = PAGE_SIZE >> i;
+               tiler_align(fmts[i], &w, &h);
+               /* note: since each region is 1 4kb page wide, and minimum
+                * number of rows, the height ends up being the same as the
+                * # of pages in the region
+                */
+               usergart[i].height = h;
+               usergart[i].height_shift = ilog2(h);
+               usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
+               usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
+               for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
+                       struct usergart_entry *entry = &usergart[i].entry[j];
+                       struct tiler_block *block =
+                                       tiler_reserve_2d(fmts[i], w, h,
+                                                       PAGE_SIZE);
+                       if (IS_ERR(block)) {
+                               dev_err(dev->dev,
+                                               "reserve failed: %d, %d, %ld\n",
+                                               i, j, PTR_ERR(block));
+                               return;
+                       }
+                       entry->paddr = tiler_ssptr(block);
+                       entry->block = block;
+                       DBG("%d:%d: %dx%d: paddr=%08x stride=%d", i, j, w, h,
+                                       entry->paddr,
+                                       usergart[i].stride_pfn << PAGE_SHIFT);
+               }
+       }
+       priv->has_dmm = true;
+ }
+ void omap_gem_deinit(struct drm_device *dev)
+ {
+       /* I believe we can rely on there being no more outstanding GEM
+        * objects which could depend on usergart/dmm at this point.
+        */
+       kfree(usergart);
+ }
index 0000000000000000000000000000000000000000,dd68d14ce61588c1c0ca3f4f4f227a7bf9a06ba4..2882cda6ea19de38039f498a9a7fa31e488d92c7
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,450 +1,448 @@@
 -      if (!omap_plane) {
 -              dev_err(dev->dev, "could not allocate plane\n");
+ /*
+  * drivers/gpu/drm/omapdrm/omap_plane.c
+  *
+  * Copyright (C) 2011 Texas Instruments
+  * Author: Rob Clark <rob.clark@linaro.org>
+  *
+  * This program is free software; you can redistribute it and/or modify it
+  * under the terms of the GNU General Public License version 2 as published by
+  * the Free Software Foundation.
+  *
+  * This program is distributed in the hope that it will be useful, but WITHOUT
+  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  * more details.
+  *
+  * You should have received a copy of the GNU General Public License along with
+  * this program.  If not, see <http://www.gnu.org/licenses/>.
+  */
+ #include <linux/kfifo.h>
+ #include "omap_drv.h"
+ #include "omap_dmm_tiler.h"
+ /* some hackery because omapdss has an 'enum omap_plane' (which would be
+  * better named omap_plane_id).. and compiler seems unhappy about having
+  * both a 'struct omap_plane' and 'enum omap_plane'
+  */
+ #define omap_plane _omap_plane
+ /*
+  * plane funcs
+  */
+ struct callback {
+       void (*fxn)(void *);
+       void *arg;
+ };
+ #define to_omap_plane(x) container_of(x, struct omap_plane, base)
+ struct omap_plane {
+       struct drm_plane base;
+       int id;  /* TODO rename omap_plane -> omap_plane_id in omapdss so I can use the enum */
+       const char *name;
+       struct omap_overlay_info info;
+       struct omap_drm_apply apply;
+       /* position/orientation of scanout within the fb: */
+       struct omap_drm_window win;
+       bool enabled;
+       /* last fb that we pinned: */
+       struct drm_framebuffer *pinned_fb;
+       uint32_t nformats;
+       uint32_t formats[32];
+       struct omap_drm_irq error_irq;
+       /* set of bo's pending unpin until next post_apply() */
+       DECLARE_KFIFO_PTR(unpin_fifo, struct drm_gem_object *);
+       // XXX maybe get rid of this and handle vblank in crtc too?
+       struct callback apply_done_cb;
+ };
+ static void unpin(void *arg, struct drm_gem_object *bo)
+ {
+       struct drm_plane *plane = arg;
+       struct omap_plane *omap_plane = to_omap_plane(plane);
+       if (kfifo_put(&omap_plane->unpin_fifo,
+                       (const struct drm_gem_object **)&bo)) {
+               /* also hold a ref so it isn't free'd while pinned */
+               drm_gem_object_reference(bo);
+       } else {
+               dev_err(plane->dev->dev, "unpin fifo full!\n");
+               omap_gem_put_paddr(bo);
+       }
+ }
+ /* update which fb (if any) is pinned for scanout */
+ static int update_pin(struct drm_plane *plane, struct drm_framebuffer *fb)
+ {
+       struct omap_plane *omap_plane = to_omap_plane(plane);
+       struct drm_framebuffer *pinned_fb = omap_plane->pinned_fb;
+       if (pinned_fb != fb) {
+               int ret;
+               DBG("%p -> %p", pinned_fb, fb);
+               if (fb)
+                       drm_framebuffer_reference(fb);
+               ret = omap_framebuffer_replace(pinned_fb, fb, plane, unpin);
+               if (pinned_fb)
+                       drm_framebuffer_unreference(pinned_fb);
+               if (ret) {
+                       dev_err(plane->dev->dev, "could not swap %p -> %p\n",
+                                       omap_plane->pinned_fb, fb);
+                       if (fb)
+                               drm_framebuffer_unreference(fb);
+                       omap_plane->pinned_fb = NULL;
+                       return ret;
+               }
+               omap_plane->pinned_fb = fb;
+       }
+       return 0;
+ }
+ static void omap_plane_pre_apply(struct omap_drm_apply *apply)
+ {
+       struct omap_plane *omap_plane =
+                       container_of(apply, struct omap_plane, apply);
+       struct omap_drm_window *win = &omap_plane->win;
+       struct drm_plane *plane = &omap_plane->base;
+       struct drm_device *dev = plane->dev;
+       struct omap_overlay_info *info = &omap_plane->info;
+       struct drm_crtc *crtc = plane->crtc;
+       enum omap_channel channel;
+       bool enabled = omap_plane->enabled && crtc;
+       bool ilace, replication;
+       int ret;
+       DBG("%s, enabled=%d", omap_plane->name, enabled);
+       /* if fb has changed, pin new fb: */
+       update_pin(plane, enabled ? plane->fb : NULL);
+       if (!enabled) {
+               dispc_ovl_enable(omap_plane->id, false);
+               return;
+       }
+       channel = omap_crtc_channel(crtc);
+       /* update scanout: */
+       omap_framebuffer_update_scanout(plane->fb, win, info);
+       DBG("%dx%d -> %dx%d (%d)", info->width, info->height,
+                       info->out_width, info->out_height,
+                       info->screen_width);
+       DBG("%d,%d %08x %08x", info->pos_x, info->pos_y,
+                       info->paddr, info->p_uv_addr);
+       /* TODO: */
+       ilace = false;
+       replication = false;
+       /* and finally, update omapdss: */
+       ret = dispc_ovl_setup(omap_plane->id, info,
+                       replication, omap_crtc_timings(crtc), false);
+       if (ret) {
+               dev_err(dev->dev, "dispc_ovl_setup failed: %d\n", ret);
+               return;
+       }
+       dispc_ovl_enable(omap_plane->id, true);
+       dispc_ovl_set_channel_out(omap_plane->id, channel);
+ }
+ static void omap_plane_post_apply(struct omap_drm_apply *apply)
+ {
+       struct omap_plane *omap_plane =
+                       container_of(apply, struct omap_plane, apply);
+       struct drm_plane *plane = &omap_plane->base;
+       struct omap_overlay_info *info = &omap_plane->info;
+       struct drm_gem_object *bo = NULL;
+       struct callback cb;
+       cb = omap_plane->apply_done_cb;
+       omap_plane->apply_done_cb.fxn = NULL;
+       while (kfifo_get(&omap_plane->unpin_fifo, &bo)) {
+               omap_gem_put_paddr(bo);
+               drm_gem_object_unreference_unlocked(bo);
+       }
+       if (cb.fxn)
+               cb.fxn(cb.arg);
+       if (omap_plane->enabled) {
+               omap_framebuffer_flush(plane->fb, info->pos_x, info->pos_y,
+                               info->out_width, info->out_height);
+       }
+ }
+ static int apply(struct drm_plane *plane)
+ {
+       if (plane->crtc) {
+               struct omap_plane *omap_plane = to_omap_plane(plane);
+               return omap_crtc_apply(plane->crtc, &omap_plane->apply);
+       }
+       return 0;
+ }
+ int omap_plane_mode_set(struct drm_plane *plane,
+               struct drm_crtc *crtc, struct drm_framebuffer *fb,
+               int crtc_x, int crtc_y,
+               unsigned int crtc_w, unsigned int crtc_h,
+               uint32_t src_x, uint32_t src_y,
+               uint32_t src_w, uint32_t src_h,
+               void (*fxn)(void *), void *arg)
+ {
+       struct omap_plane *omap_plane = to_omap_plane(plane);
+       struct omap_drm_window *win = &omap_plane->win;
+       win->crtc_x = crtc_x;
+       win->crtc_y = crtc_y;
+       win->crtc_w = crtc_w;
+       win->crtc_h = crtc_h;
+       /* src values are in Q16 fixed point, convert to integer: */
+       win->src_x = src_x >> 16;
+       win->src_y = src_y >> 16;
+       win->src_w = src_w >> 16;
+       win->src_h = src_h >> 16;
+       if (fxn) {
+               /* omap_crtc should ensure that a new page flip
+                * isn't permitted while there is one pending:
+                */
+               BUG_ON(omap_plane->apply_done_cb.fxn);
+               omap_plane->apply_done_cb.fxn = fxn;
+               omap_plane->apply_done_cb.arg = arg;
+       }
+       plane->fb = fb;
+       plane->crtc = crtc;
+       return apply(plane);
+ }
+ static int omap_plane_update(struct drm_plane *plane,
+               struct drm_crtc *crtc, struct drm_framebuffer *fb,
+               int crtc_x, int crtc_y,
+               unsigned int crtc_w, unsigned int crtc_h,
+               uint32_t src_x, uint32_t src_y,
+               uint32_t src_w, uint32_t src_h)
+ {
+       struct omap_plane *omap_plane = to_omap_plane(plane);
+       omap_plane->enabled = true;
+       return omap_plane_mode_set(plane, crtc, fb,
+                       crtc_x, crtc_y, crtc_w, crtc_h,
+                       src_x, src_y, src_w, src_h,
+                       NULL, NULL);
+ }
+ static int omap_plane_disable(struct drm_plane *plane)
+ {
+       struct omap_plane *omap_plane = to_omap_plane(plane);
+       omap_plane->win.rotation = BIT(DRM_ROTATE_0);
+       return omap_plane_dpms(plane, DRM_MODE_DPMS_OFF);
+ }
+ static void omap_plane_destroy(struct drm_plane *plane)
+ {
+       struct omap_plane *omap_plane = to_omap_plane(plane);
+       DBG("%s", omap_plane->name);
+       omap_irq_unregister(plane->dev, &omap_plane->error_irq);
+       omap_plane_disable(plane);
+       drm_plane_cleanup(plane);
+       WARN_ON(!kfifo_is_empty(&omap_plane->unpin_fifo));
+       kfifo_free(&omap_plane->unpin_fifo);
+       kfree(omap_plane);
+ }
+ int omap_plane_dpms(struct drm_plane *plane, int mode)
+ {
+       struct omap_plane *omap_plane = to_omap_plane(plane);
+       bool enabled = (mode == DRM_MODE_DPMS_ON);
+       int ret = 0;
+       if (enabled != omap_plane->enabled) {
+               omap_plane->enabled = enabled;
+               ret = apply(plane);
+       }
+       return ret;
+ }
+ /* helper to install properties which are common to planes and crtcs */
+ void omap_plane_install_properties(struct drm_plane *plane,
+               struct drm_mode_object *obj)
+ {
+       struct drm_device *dev = plane->dev;
+       struct omap_drm_private *priv = dev->dev_private;
+       struct drm_property *prop;
+       if (priv->has_dmm) {
+               prop = priv->rotation_prop;
+               if (!prop) {
+                       const struct drm_prop_enum_list props[] = {
+                                       { DRM_ROTATE_0,   "rotate-0" },
+                                       { DRM_ROTATE_90,  "rotate-90" },
+                                       { DRM_ROTATE_180, "rotate-180" },
+                                       { DRM_ROTATE_270, "rotate-270" },
+                                       { DRM_REFLECT_X,  "reflect-x" },
+                                       { DRM_REFLECT_Y,  "reflect-y" },
+                       };
+                       prop = drm_property_create_bitmask(dev, 0, "rotation",
+                                       props, ARRAY_SIZE(props));
+                       if (prop == NULL)
+                               return;
+                       priv->rotation_prop = prop;
+               }
+               drm_object_attach_property(obj, prop, 0);
+       }
+       prop = priv->zorder_prop;
+       if (!prop) {
+               prop = drm_property_create_range(dev, 0, "zorder", 0, 3);
+               if (prop == NULL)
+                       return;
+               priv->zorder_prop = prop;
+       }
+       drm_object_attach_property(obj, prop, 0);
+ }
+ int omap_plane_set_property(struct drm_plane *plane,
+               struct drm_property *property, uint64_t val)
+ {
+       struct omap_plane *omap_plane = to_omap_plane(plane);
+       struct omap_drm_private *priv = plane->dev->dev_private;
+       int ret = -EINVAL;
+       if (property == priv->rotation_prop) {
+               DBG("%s: rotation: %02x", omap_plane->name, (uint32_t)val);
+               omap_plane->win.rotation = val;
+               ret = apply(plane);
+       } else if (property == priv->zorder_prop) {
+               DBG("%s: zorder: %02x", omap_plane->name, (uint32_t)val);
+               omap_plane->info.zorder = val;
+               ret = apply(plane);
+       }
+       return ret;
+ }
+ static const struct drm_plane_funcs omap_plane_funcs = {
+               .update_plane = omap_plane_update,
+               .disable_plane = omap_plane_disable,
+               .destroy = omap_plane_destroy,
+               .set_property = omap_plane_set_property,
+ };
+ static void omap_plane_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
+ {
+       struct omap_plane *omap_plane =
+                       container_of(irq, struct omap_plane, error_irq);
+       DRM_ERROR("%s: errors: %08x\n", omap_plane->name, irqstatus);
+ }
+ static const char *plane_names[] = {
+               [OMAP_DSS_GFX] = "gfx",
+               [OMAP_DSS_VIDEO1] = "vid1",
+               [OMAP_DSS_VIDEO2] = "vid2",
+               [OMAP_DSS_VIDEO3] = "vid3",
+ };
+ static const uint32_t error_irqs[] = {
+               [OMAP_DSS_GFX] = DISPC_IRQ_GFX_FIFO_UNDERFLOW,
+               [OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_FIFO_UNDERFLOW,
+               [OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_FIFO_UNDERFLOW,
+               [OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_FIFO_UNDERFLOW,
+ };
+ /* initialize plane */
+ struct drm_plane *omap_plane_init(struct drm_device *dev,
+               int id, bool private_plane)
+ {
+       struct omap_drm_private *priv = dev->dev_private;
+       struct drm_plane *plane = NULL;
+       struct omap_plane *omap_plane;
+       struct omap_overlay_info *info;
+       int ret;
+       DBG("%s: priv=%d", plane_names[id], private_plane);
+       omap_plane = kzalloc(sizeof(*omap_plane), GFP_KERNEL);
 -      }
++      if (!omap_plane)
+               goto fail;
+       ret = kfifo_alloc(&omap_plane->unpin_fifo, 16, GFP_KERNEL);
+       if (ret) {
+               dev_err(dev->dev, "could not allocate unpin FIFO\n");
+               goto fail;
+       }
+       omap_plane->nformats = omap_framebuffer_get_formats(
+                       omap_plane->formats, ARRAY_SIZE(omap_plane->formats),
+                       dss_feat_get_supported_color_modes(id));
+       omap_plane->id = id;
+       omap_plane->name = plane_names[id];
+       plane = &omap_plane->base;
+       omap_plane->apply.pre_apply  = omap_plane_pre_apply;
+       omap_plane->apply.post_apply = omap_plane_post_apply;
+       omap_plane->error_irq.irqmask = error_irqs[id];
+       omap_plane->error_irq.irq = omap_plane_error_irq;
+       omap_irq_register(dev, &omap_plane->error_irq);
+       drm_plane_init(dev, plane, (1 << priv->num_crtcs) - 1, &omap_plane_funcs,
+                       omap_plane->formats, omap_plane->nformats, private_plane);
+       omap_plane_install_properties(plane, &plane->base);
+       /* get our starting configuration, set defaults for parameters
+        * we don't currently use, etc:
+        */
+       info = &omap_plane->info;
+       info->rotation_type = OMAP_DSS_ROT_DMA;
+       info->rotation = OMAP_DSS_ROT_0;
+       info->global_alpha = 0xff;
+       info->mirror = 0;
+       /* Set defaults depending on whether we are a CRTC or overlay
+        * layer.
+        * TODO add ioctl to give userspace an API to change this.. this
+        * will come in a subsequent patch.
+        */
+       if (private_plane)
+               omap_plane->info.zorder = 0;
+       else
+               omap_plane->info.zorder = id;
+       return plane;
+ fail:
+       if (plane)
+               omap_plane_destroy(plane);
+       return NULL;
+ }
index ee4cff534f100dae84d64bff2122b9b2bc89a9aa,d8f5d5fcd303f68e1a71694dd067f26adb8caddb..99fb13286fd02d3393ff09e28b9c10e91cb9d9e7
@@@ -2908,19 -2708,27 +2708,27 @@@ int evergreen_dma_cs_parse(struct radeo
                                DRM_ERROR("bad DMA_PACKET_WRITE\n");
                                return -EINVAL;
                        }
-                       if (tiled) {
+                       switch (sub_cmd) {
+                       /* tiled */
+                       case 8:
 -                              dst_offset = ib[idx+1];
 +                              dst_offset = radeon_get_ib_value(p, idx+1);
                                dst_offset <<= 8;
  
                                ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
                                p->idx += count + 7;
-                       } else {
+                               break;
+                       /* linear */
+                       case 0:
 -                              dst_offset = ib[idx+1];
 -                              dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32;
 +                              dst_offset = radeon_get_ib_value(p, idx+1);
 +                              dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
  
                                ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
                                ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
                                p->idx += count + 3;
 -                              DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, ib[idx+0]);
+                               break;
+                       default:
++                              DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, header);
+                               return -EINVAL;
                        }
                        if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
                                dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
                                DRM_ERROR("bad DMA_PACKET_COPY\n");
                                return -EINVAL;
                        }
-                       if (tiled) {
-                               idx_value = radeon_get_ib_value(p, idx + 2);
-                               if (new_cmd) {
-                                       switch (misc) {
-                                       case 0:
-                                               /* L2T, frame to fields */
-                                               if (idx_value & (1 << 31)) {
-                                                       DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
-                                                       return -EINVAL;
-                                               }
-                                               r = r600_dma_cs_next_reloc(p, &dst2_reloc);
-                                               if (r) {
-                                                       DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
-                                                       return -EINVAL;
-                                               }
-                                               dst_offset = radeon_get_ib_value(p, idx+1);
-                                               dst_offset <<= 8;
-                                               dst2_offset = radeon_get_ib_value(p, idx+2);
-                                               dst2_offset <<= 8;
-                                               src_offset = radeon_get_ib_value(p, idx+8);
-                                               src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
-                                               if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
-                                                       dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
-                                                                src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
-                                                       return -EINVAL;
-                                               }
-                                               if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
-                                                       dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
-                                                                dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
-                                                       return -EINVAL;
-                                               }
-                                               if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
-                                                       dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
-                                                                dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
-                                                       return -EINVAL;
-                                               }
-                                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
-                                               ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
-                                               ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
-                                               ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
-                                               p->idx += 10;
-                                               break;
-                                       case 1:
-                                               /* L2T, T2L partial */
-                                               if (p->family < CHIP_CAYMAN) {
-                                                       DRM_ERROR("L2T, T2L Partial is cayman only !\n");
-                                                       return -EINVAL;
-                                               }
-                                               /* detile bit */
-                                               if (idx_value & (1 << 31)) {
-                                                       /* tiled src, linear dst */
-                                                       ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
-                                                       ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
-                                                       ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
-                                               } else {
-                                                       /* linear src, tiled dst */
-                                                       ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
-                                                       ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
-                                                       ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
-                                               }
-                                               p->idx += 12;
-                                               break;
-                                       case 3:
-                                               /* L2T, broadcast */
-                                               if (idx_value & (1 << 31)) {
-                                                       DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
-                                                       return -EINVAL;
-                                               }
-                                               r = r600_dma_cs_next_reloc(p, &dst2_reloc);
-                                               if (r) {
-                                                       DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
-                                                       return -EINVAL;
-                                               }
-                                               dst_offset = radeon_get_ib_value(p, idx+1);
-                                               dst_offset <<= 8;
-                                               dst2_offset = radeon_get_ib_value(p, idx+2);
-                                               dst2_offset <<= 8;
-                                               src_offset = radeon_get_ib_value(p, idx+8);
-                                               src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
-                                               if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
-                                                       dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
-                                                                src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
-                                                       return -EINVAL;
-                                               }
-                                               if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
-                                                       dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
-                                                                dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
-                                                       return -EINVAL;
-                                               }
-                                               if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
-                                                       dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
-                                                                dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
-                                                       return -EINVAL;
-                                               }
-                                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
-                                               ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
-                                               ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
-                                               ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
-                                               p->idx += 10;
-                                               break;
-                                       case 4:
-                                               /* L2T, T2L */
-                                               /* detile bit */
-                                               if (idx_value & (1 << 31)) {
-                                                       /* tiled src, linear dst */
-                                                       src_offset = radeon_get_ib_value(p, idx+1);
-                                                       src_offset <<= 8;
-                                                       ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
-                                                       dst_offset = radeon_get_ib_value(p, idx+7);
-                                                       dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
-                                                       ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
-                                                       ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
-                                               } else {
-                                                       /* linear src, tiled dst */
-                                                       src_offset = radeon_get_ib_value(p, idx+7);
-                                                       src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
-                                                       ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
-                                                       ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
-                                                       dst_offset = radeon_get_ib_value(p, idx+1);
-                                                       dst_offset <<= 8;
-                                                       ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
-                                               }
-                                               if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
-                                                       dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
-                                                                src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
-                                                       return -EINVAL;
-                                               }
-                                               if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
-                                                       dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n",
-                                                                dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
-                                                       return -EINVAL;
-                                               }
-                                               p->idx += 9;
-                                               break;
-                                       case 5:
-                                               /* T2T partial */
-                                               if (p->family < CHIP_CAYMAN) {
-                                                       DRM_ERROR("L2T, T2L Partial is cayman only !\n");
-                                                       return -EINVAL;
-                                               }
-                                               ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
-                                               ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
-                                               p->idx += 13;
-                                               break;
-                                       case 7:
-                                               /* L2T, broadcast */
-                                               if (idx_value & (1 << 31)) {
-                                                       DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
-                                                       return -EINVAL;
-                                               }
-                                               r = r600_dma_cs_next_reloc(p, &dst2_reloc);
-                                               if (r) {
-                                                       DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
-                                                       return -EINVAL;
-                                               }
-                                               dst_offset = radeon_get_ib_value(p, idx+1);
-                                               dst_offset <<= 8;
-                                               dst2_offset = radeon_get_ib_value(p, idx+2);
-                                               dst2_offset <<= 8;
-                                               src_offset = radeon_get_ib_value(p, idx+8);
-                                               src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
-                                               if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
-                                                       dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
-                                                                src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
-                                                       return -EINVAL;
-                                               }
-                                               if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
-                                                       dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
-                                                                dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
-                                                       return -EINVAL;
-                                               }
-                                               if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
-                                                       dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
-                                                                dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
-                                                       return -EINVAL;
-                                               }
-                                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
-                                               ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
-                                               ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
-                                               ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
-                                               p->idx += 10;
-                                               break;
-                                       default:
-                                               DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
-                                               return -EINVAL;
-                                       }
+                       switch (sub_cmd) {
+                       /* Copy L2L, DW aligned */
+                       case 0x00:
+                               /* L2L, dw */
 -                              src_offset = ib[idx+2];
 -                              src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
 -                              dst_offset = ib[idx+1];
 -                              dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
++                              src_offset = radeon_get_ib_value(p, idx+2);
++                              src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
++                              dst_offset = radeon_get_ib_value(p, idx+1);
++                              dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
+                               if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+                                       dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
+                                                       src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+                                       return -EINVAL;
+                               }
+                               if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+                                       dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n",
+                                                       dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+                                       return -EINVAL;
+                               }
+                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+                               ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+                               ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+                               ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+                               p->idx += 5;
+                               break;
+                       /* Copy L2T/T2L */
+                       case 0x08:
+                               /* detile bit */
 -                              if (ib[idx + 2] & (1 << 31)) {
++                              if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+                                       /* tiled src, linear dst */
 -                                      src_offset = ib[idx+1];
++                                      src_offset = radeon_get_ib_value(p, idx+1);
+                                       src_offset <<= 8;
+                                       ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+                                       dst_offset = radeon_get_ib_value(p, idx + 7);
 -                                      dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
++                                      dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+                                       ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+                                       ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
                                } else {
-                                       switch (misc) {
-                                       case 0:
-                                               /* detile bit */
-                                               if (idx_value & (1 << 31)) {
-                                                       /* tiled src, linear dst */
-                                                       src_offset = radeon_get_ib_value(p, idx+1);
-                                                       src_offset <<= 8;
-                                                       ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
-                                                       dst_offset = radeon_get_ib_value(p, idx+7);
-                                                       dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
-                                                       ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
-                                                       ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
-                                               } else {
-                                                       /* linear src, tiled dst */
-                                                       src_offset = radeon_get_ib_value(p, idx+7);
-                                                       src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
-                                                       ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
-                                                       ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
-                                                       dst_offset = radeon_get_ib_value(p, idx+1);
-                                                       dst_offset <<= 8;
-                                                       ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
-                                               }
-                                               if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
-                                                       dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
-                                                                src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
-                                                       return -EINVAL;
-                                               }
-                                               if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
-                                                       dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
-                                                                dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
-                                                       return -EINVAL;
-                                               }
-                                               p->idx += 9;
-                                               break;
-                                       default:
-                                               DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
-                                               return -EINVAL;
-                                       }
+                                       /* linear src, tiled dst */
 -                                      src_offset = ib[idx+7];
 -                                      src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
++                                      src_offset = radeon_get_ib_value(p, idx+7);
++                                      src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+                                       ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+                                       ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
 -                                      dst_offset = ib[idx+1];
++                                      dst_offset = radeon_get_ib_value(p, idx+1);
+                                       dst_offset <<= 8;
+                                       ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
                                }
-                       } else {
-                               if (new_cmd) {
-                                       switch (misc) {
-                                       case 0:
-                                               /* L2L, byte */
-                                               src_offset = radeon_get_ib_value(p, idx+2);
-                                               src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
-                                               dst_offset = radeon_get_ib_value(p, idx+1);
-                                               dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
-                                               if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
-                                                       dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
-                                                                src_offset + count, radeon_bo_size(src_reloc->robj));
-                                                       return -EINVAL;
-                                               }
-                                               if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) {
-                                                       dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n",
-                                                                dst_offset + count, radeon_bo_size(dst_reloc->robj));
-                                                       return -EINVAL;
-                                               }
-                                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
-                                               ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
-                                               ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
-                                               ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
-                                               p->idx += 5;
-                                               break;
-                                       case 1:
-                                               /* L2L, partial */
-                                               if (p->family < CHIP_CAYMAN) {
-                                                       DRM_ERROR("L2L Partial is cayman only !\n");
-                                                       return -EINVAL;
-                                               }
-                                               ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
-                                               ib[idx+2] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
-                                               ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
-                                               ib[idx+5] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
-                                               p->idx += 9;
-                                               break;
-                                       case 4:
-                                               /* L2L, dw, broadcast */
-                                               r = r600_dma_cs_next_reloc(p, &dst2_reloc);
-                                               if (r) {
-                                                       DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
-                                                       return -EINVAL;
-                                               }
-                                               dst_offset = radeon_get_ib_value(p, idx+1);
-                                               dst_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
-                                               dst2_offset = radeon_get_ib_value(p, idx+2);
-                                               dst2_offset |= ((u64)(radeon_get_ib_value(p, idx+5) & 0xff)) << 32;
-                                               src_offset = radeon_get_ib_value(p, idx+3);
-                                               src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
-                                               if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
-                                                       dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
-                                                                src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
-                                                       return -EINVAL;
-                                               }
-                                               if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
-                                                       dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n",
-                                                                dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
-                                                       return -EINVAL;
-                                               }
-                                               if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
-                                                       dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n",
-                                                                dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
-                                                       return -EINVAL;
-                                               }
-                                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
-                                               ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset & 0xfffffffc);
-                                               ib[idx+3] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
-                                               ib[idx+4] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
-                                               ib[idx+5] += upper_32_bits(dst2_reloc->lobj.gpu_offset) & 0xff;
-                                               ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
-                                               p->idx += 7;
-                                               break;
-                                       default:
-                                               DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
-                                               return -EINVAL;
-                                       }
+                               if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+                                       dev_warn(p->dev, "DMA L2T, src buffer too small (%llu %lu)\n",
+                                                       src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+                                       return -EINVAL;
+                               }
+                               if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+                                       dev_warn(p->dev, "DMA L2T, dst buffer too small (%llu %lu)\n",
+                                                       dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+                                       return -EINVAL;
+                               }
+                               p->idx += 9;
+                               break;
+                       /* Copy L2L, byte aligned */
+                       case 0x40:
+                               /* L2L, byte */
 -                              src_offset = ib[idx+2];
 -                              src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
 -                              dst_offset = ib[idx+1];
 -                              dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
++                              src_offset = radeon_get_ib_value(p, idx+2);
++                              src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
++                              dst_offset = radeon_get_ib_value(p, idx+1);
++                              dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
+                               if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
+                                       dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
+                                                       src_offset + count, radeon_bo_size(src_reloc->robj));
+                                       return -EINVAL;
+                               }
+                               if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) {
+                                       dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n",
+                                                       dst_offset + count, radeon_bo_size(dst_reloc->robj));
+                                       return -EINVAL;
+                               }
+                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
+                               ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
+                               ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+                               ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+                               p->idx += 5;
+                               break;
+                       /* Copy L2L, partial */
+                       case 0x41:
+                               /* L2L, partial */
+                               if (p->family < CHIP_CAYMAN) {
+                                       DRM_ERROR("L2L Partial is cayman only !\n");
+                                       return -EINVAL;
+                               }
+                               ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
+                               ib[idx+2] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+                               ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
+                               ib[idx+5] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+                               p->idx += 9;
+                               break;
+                       /* Copy L2L, DW aligned, broadcast */
+                       case 0x44:
+                               /* L2L, dw, broadcast */
+                               r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+                               if (r) {
+                                       DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
+                                       return -EINVAL;
+                               }
 -                              dst_offset = ib[idx+1];
 -                              dst_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
 -                              dst2_offset = ib[idx+2];
 -                              dst2_offset |= ((u64)(ib[idx+5] & 0xff)) << 32;
 -                              src_offset = ib[idx+3];
 -                              src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
++                              dst_offset = radeon_get_ib_value(p, idx+1);
++                              dst_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
++                              dst2_offset = radeon_get_ib_value(p, idx+2);
++                              dst2_offset |= ((u64)(radeon_get_ib_value(p, idx+5) & 0xff)) << 32;
++                              src_offset = radeon_get_ib_value(p, idx+3);
++                              src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
+                               if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+                                       dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
+                                                       src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+                                       return -EINVAL;
+                               }
+                               if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+                                       dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n",
+                                                       dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+                                       return -EINVAL;
+                               }
+                               if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+                                       dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n",
+                                                       dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+                                       return -EINVAL;
+                               }
+                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+                               ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset & 0xfffffffc);
+                               ib[idx+3] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+                               ib[idx+4] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+                               ib[idx+5] += upper_32_bits(dst2_reloc->lobj.gpu_offset) & 0xff;
+                               ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+                               p->idx += 7;
+                               break;
+                       /* Copy L2T Frame to Field */
+                       case 0x48:
 -                              if (ib[idx + 2] & (1 << 31)) {
++                              if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+                                       DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+                                       return -EINVAL;
+                               }
+                               r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+                               if (r) {
+                                       DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+                                       return -EINVAL;
+                               }
 -                              dst_offset = ib[idx+1];
++                              dst_offset = radeon_get_ib_value(p, idx+1);
+                               dst_offset <<= 8;
 -                              dst2_offset = ib[idx+2];
++                              dst2_offset = radeon_get_ib_value(p, idx+2);
+                               dst2_offset <<= 8;
 -                              src_offset = ib[idx+8];
 -                              src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
++                              src_offset = radeon_get_ib_value(p, idx+8);
++                              src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
+                               if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+                                       dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
+                                                       src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+                                       return -EINVAL;
+                               }
+                               if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+                                       dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
+                                                       dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+                                       return -EINVAL;
+                               }
+                               if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+                                       dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
+                                                       dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+                                       return -EINVAL;
+                               }
+                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+                               ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+                               ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+                               ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+                               p->idx += 10;
+                               break;
+                       /* Copy L2T/T2L, partial */
+                       case 0x49:
+                               /* L2T, T2L partial */
+                               if (p->family < CHIP_CAYMAN) {
+                                       DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+                                       return -EINVAL;
+                               }
+                               /* detile bit */
 -                              if (ib[idx + 2 ] & (1 << 31)) {
++                              if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+                                       /* tiled src, linear dst */
+                                       ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+                                       ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+                                       ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+                               } else {
+                                       /* linear src, tiled dst */
+                                       ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+                                       ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+                                       ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+                               }
+                               p->idx += 12;
+                               break;
+                       /* Copy L2T broadcast */
+                       case 0x4b:
+                               /* L2T, broadcast */
 -                              if (ib[idx + 2] & (1 << 31)) {
++                              if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+                                       DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+                                       return -EINVAL;
+                               }
+                               r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+                               if (r) {
+                                       DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+                                       return -EINVAL;
+                               }
 -                              dst_offset = ib[idx+1];
++                              dst_offset = radeon_get_ib_value(p, idx+1);
+                               dst_offset <<= 8;
 -                              dst2_offset = ib[idx+2];
++                              dst2_offset = radeon_get_ib_value(p, idx+2);
+                               dst2_offset <<= 8;
 -                              src_offset = ib[idx+8];
 -                              src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
++                              src_offset = radeon_get_ib_value(p, idx+8);
++                              src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
+                               if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+                                       dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+                                                       src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+                                       return -EINVAL;
+                               }
+                               if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+                                       dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+                                                       dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+                                       return -EINVAL;
+                               }
+                               if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+                                       dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
+                                                       dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+                                       return -EINVAL;
+                               }
+                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+                               ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+                               ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+                               ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+                               p->idx += 10;
+                               break;
+                       /* Copy L2T/T2L (tile units) */
+                       case 0x4c:
+                               /* L2T, T2L */
+                               /* detile bit */
 -                              if (ib[idx + 2] & (1 << 31)) {
++                              if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+                                       /* tiled src, linear dst */
 -                                      src_offset = ib[idx+1];
++                                      src_offset = radeon_get_ib_value(p, idx+1);
+                                       src_offset <<= 8;
+                                       ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
 -                                      dst_offset = ib[idx+7];
 -                                      dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
++                                      dst_offset = radeon_get_ib_value(p, idx+7);
++                                      dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+                                       ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+                                       ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
                                } else {
-                                       /* L2L, dw */
-                                       src_offset = radeon_get_ib_value(p, idx+2);
-                                       src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+                                       /* linear src, tiled dst */
 -                                      src_offset = ib[idx+7];
 -                                      src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
++                                      src_offset = radeon_get_ib_value(p, idx+7);
++                                      src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+                                       ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+                                       ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
 -                                      dst_offset = ib[idx+1];
 +                                      dst_offset = radeon_get_ib_value(p, idx+1);
-                                       dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
-                                       if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
-                                               dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
-                                                        src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
-                                               return -EINVAL;
-                                       }
-                                       if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
-                                               dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n",
-                                                        dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
-                                               return -EINVAL;
-                                       }
-                                       ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
-                                       ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
-                                       ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
-                                       ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
-                                       p->idx += 5;
+                                       dst_offset <<= 8;
+                                       ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
                                }
 -                              if (ib[idx + 2] & (1 << 31)) {
+                               if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+                                       dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
+                                                       src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+                                       return -EINVAL;
+                               }
+                               if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+                                       dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n",
+                                                       dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+                                       return -EINVAL;
+                               }
+                               p->idx += 9;
+                               break;
+                       /* Copy T2T, partial (tile units) */
+                       case 0x4d:
+                               /* T2T partial */
+                               if (p->family < CHIP_CAYMAN) {
+                                       DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+                                       return -EINVAL;
+                               }
+                               ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+                               ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+                               p->idx += 13;
+                               break;
+                       /* Copy L2T broadcast (tile units) */
+                       case 0x4f:
+                               /* L2T, broadcast */
 -                              dst_offset = ib[idx+1];
++                              if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+                                       DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+                                       return -EINVAL;
+                               }
+                               r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+                               if (r) {
+                                       DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+                                       return -EINVAL;
+                               }
 -                              dst2_offset = ib[idx+2];
++                              dst_offset = radeon_get_ib_value(p, idx+1);
+                               dst_offset <<= 8;
 -                              src_offset = ib[idx+8];
 -                              src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
++                              dst2_offset = radeon_get_ib_value(p, idx+2);
+                               dst2_offset <<= 8;
 -                              DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, ib[idx+0]);
++                              src_offset = radeon_get_ib_value(p, idx+8);
++                              src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
+                               if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+                                       dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+                                                       src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+                                       return -EINVAL;
+                               }
+                               if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+                                       dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+                                                       dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+                                       return -EINVAL;
+                               }
+                               if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+                                       dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
+                                                       dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+                                       return -EINVAL;
+                               }
+                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+                               ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+                               ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+                               ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+                               p->idx += 10;
+                               break;
+                       default:
++                              DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, header);
+                               return -EINVAL;
                        }
                        break;
                case DMA_PACKET_CONSTANT_FILL:
Simple merge
Simple merge
Simple merge
index d4f3fb9f0c298a9e925109db333cf79f70b68af2,0daee8e2578b4d46fe47444cfc9db45d114034b0..bb747f6cd1a44a30e49d0bdac9f607d0841a3f45
  #include <linux/of.h>
  #include <linux/platform_device.h>
  #include <linux/regulator/consumer.h>
 -
 -#include <mach/clk.h>
 +#include <linux/clk/tegra.h>
  
+ #include <drm/drm_edid.h>
  #include "hdmi.h"
  #include "drm.h"
  #include "dc.h"
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
diff --cc kernel/printk.c
Simple merge