Merge tag 'drm-intel-next-fixes-2014-12-11' of git://anongit.freedesktop.org/drm...
authorDave Airlie <airlied@redhat.com>
Fri, 12 Dec 2014 01:39:49 +0000 (11:39 +1000)
committerDave Airlie <airlied@redhat.com>
Fri, 12 Dec 2014 01:39:49 +0000 (11:39 +1000)
Here's a batch of i915 fixes for 3.19.

* tag 'drm-intel-next-fixes-2014-12-11' of git://anongit.freedesktop.org/drm-intel:
  drm/i915: save/restore GMBUS freq across suspend/resume on gen4
  drm/i915: Remove '& 0xffff' from the mask given to WA_REG()
  drm/i915: Invert the mask and val arguments in wa_add() and WA_REG()
  drm/i915/bdw: Fix the write setting up the WIZ hashing mode
  drm/i915: Don't complain about stolen conflicts on gen3
  drm/i915: resume MST after reading back hw state
  drm/i915: Handle inaccurate time conversion issues
  drm/i915: compute wait_ioctl timeout correctly
  drm/i915: don't always do full mode sets when infoframes are enabled

128 files changed:
Documentation/DocBook/drm.tmpl
Documentation/devicetree/bindings/iommu/rockchip,iommu.txt [new file with mode: 0644]
Documentation/devicetree/bindings/video/rockchip-drm.txt [new file with mode: 0644]
Documentation/devicetree/bindings/video/rockchip-vop.txt [new file with mode: 0644]
MAINTAINERS
Makefile
arch/s390/kernel/nmi.c
arch/x86/boot/compressed/Makefile
arch/x86/kernel/cpu/microcode/core.c
block/bio-integrity.c
drivers/acpi/video.c
drivers/ata/ahci.c
drivers/ata/sata_fsl.c
drivers/gpu/drm/Kconfig
drivers/gpu/drm/Makefile
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
drivers/gpu/drm/amd/amdkfd/kfd_process.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_dp_helper.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_fops.c
drivers/gpu/drm/drm_irq.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_fence.c
drivers/gpu/drm/nouveau/nouveau_fence.h
drivers/gpu/drm/qxl/qxl_release.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/ci_dpm.c
drivers/gpu/drm/radeon/cikd.h
drivers/gpu/drm/radeon/evergreen_cs.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r200.c
drivers/gpu/drm/radeon/r300.c
drivers/gpu/drm/radeon/r600_cs.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_cursor.c
drivers/gpu/drm/radeon/radeon_fb.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_legacy_crtc.c
drivers/gpu/drm/radeon/radeon_mode.h
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_trace.h
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/radeon_uvd.c
drivers/gpu/drm/radeon/radeon_vce.c
drivers/gpu/drm/radeon/radeon_vm.c
drivers/gpu/drm/radeon/si_dpm.c
drivers/gpu/drm/radeon/sid.h
drivers/gpu/drm/rockchip/Kconfig [new file with mode: 0644]
drivers/gpu/drm/rockchip/Makefile [new file with mode: 0644]
drivers/gpu/drm/rockchip/rockchip_drm_drv.c [new file with mode: 0644]
drivers/gpu/drm/rockchip/rockchip_drm_drv.h [new file with mode: 0644]
drivers/gpu/drm/rockchip/rockchip_drm_fb.c [new file with mode: 0644]
drivers/gpu/drm/rockchip/rockchip_drm_fb.h [new file with mode: 0644]
drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c [new file with mode: 0644]
drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h [new file with mode: 0644]
drivers/gpu/drm/rockchip/rockchip_drm_gem.c [new file with mode: 0644]
drivers/gpu/drm/rockchip/rockchip_drm_gem.h [new file with mode: 0644]
drivers/gpu/drm/rockchip/rockchip_drm_vop.c [new file with mode: 0644]
drivers/gpu/drm/rockchip/rockchip_drm_vop.h [new file with mode: 0644]
drivers/gpu/drm/ttm/ttm_execbuf_util.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
drivers/i2c/busses/i2c-cadence.c
drivers/i2c/busses/i2c-davinci.c
drivers/i2c/busses/i2c-designware-core.c
drivers/i2c/busses/i2c-omap.c
drivers/input/evdev.c
drivers/iommu/Kconfig
drivers/iommu/Makefile
drivers/iommu/rockchip-iommu.c [new file with mode: 0644]
drivers/media/i2c/smiapp/smiapp-core.c
drivers/media/pci/cx23885/cx23885-core.c
drivers/media/pci/solo6x10/solo6x10-core.c
drivers/media/rc/ir-rc6-decoder.c
drivers/media/usb/s2255/s2255drv.c
drivers/net/bonding/bond_netlink.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/renesas/sh_eth.h
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/xen-netfront.c
drivers/of/fdt.c
drivers/pci/host/pci-tegra.c
drivers/watchdog/s3c2410_wdt.c
fs/fat/namei_vfat.c
fs/jbd2/journal.c
include/drm/drm_crtc.h
include/drm/drm_displayid.h [new file with mode: 0644]
include/drm/drm_dp_mst_helper.h
include/drm/drm_edid.h
include/drm/drm_fb_helper.h
include/drm/ttm/ttm_execbuf_util.h
include/uapi/linux/Kbuild
ipc/sem.c
kernel/sched/core.c
lib/genalloc.c
lib/show_mem.c
mm/frontswap.c
mm/memory.c
mm/mmap.c
mm/rmap.c
mm/slab.c
mm/vmpressure.c
net/core/rtnetlink.c
security/keys/internal.h
security/keys/keyctl.c
security/keys/keyring.c
security/keys/request_key.c
security/keys/request_key_auth.c
sound/pci/hda/patch_realtek.c

index 56e2a9b65c6857b1788592c985d7aa56180b15c4..4b592ffbafeec8053e09c0c068045505e05757ec 100644 (file)
@@ -1947,10 +1947,16 @@ void intel_crt_init(struct drm_device *dev)
             and then retrieves a list of modes by calling the connector
             <methodname>get_modes</methodname> helper operation.
           </para>
+         <para>
+            If the helper operation returns no mode, and if the connector status
+            is connector_status_connected, standard VESA DMT modes up to
+            1024x768 are automatically added to the modes list by a call to
+            <function>drm_add_modes_noedid</function>.
+          </para>
           <para>
-            The function filters out modes larger than
+            The function then filters out modes larger than
             <parameter>max_width</parameter> and <parameter>max_height</parameter>
-            if specified. It then calls the optional connector
+            if specified. It finally calls the optional connector
             <methodname>mode_valid</methodname> helper operation for each mode in
             the probed list to check whether the mode is valid for the connector.
           </para>
@@ -2090,11 +2096,19 @@ void intel_crt_init(struct drm_device *dev)
           <synopsis>int (*get_modes)(struct drm_connector *connector);</synopsis>
           <para>
             Fill the connector's <structfield>probed_modes</structfield> list
-            by parsing EDID data with <function>drm_add_edid_modes</function> or
-            calling <function>drm_mode_probed_add</function> directly for every
+            by parsing EDID data with <function>drm_add_edid_modes</function>,
+            adding standard VESA DMT modes with <function>drm_add_modes_noedid</function>,
+            or calling <function>drm_mode_probed_add</function> directly for every
             supported mode and return the number of modes it has detected. This
             operation is mandatory.
           </para>
+          <para>
+            Note that the caller function will automatically add standard VESA
+            DMT modes up to 1024x768 if the <methodname>get_modes</methodname>
+            helper operation returns no mode and if the connector status is
+            connector_status_connected. There is no need to call
+            <function>drm_add_edid_modes</function> manually in that case.
+          </para>
           <para>
             When adding modes manually the driver creates each mode with a call to
             <function>drm_mode_create</function> and must fill the following fields.
@@ -2292,7 +2306,7 @@ void intel_crt_init(struct drm_device *dev)
             <function>drm_helper_probe_single_connector_modes</function>.
           </para>
           <para>
-            When parsing EDID data, <function>drm_add_edid_modes</function> fill the
+            When parsing EDID data, <function>drm_add_edid_modes</function> fills the
             connector <structfield>display_info</structfield>
             <structfield>width_mm</structfield> and
             <structfield>height_mm</structfield> fields. When creating modes
@@ -2412,6 +2426,10 @@ void intel_crt_init(struct drm_device *dev)
 !Edrivers/gpu/drm/drm_plane_helper.c
 !Pdrivers/gpu/drm/drm_plane_helper.c overview
     </sect2>
+    <sect2>
+         <title>Tile group</title>
+!Pdrivers/gpu/drm/drm_crtc.c Tile group
+    </sect2>
   </sect1>
 
   <!-- Internals: kms properties -->
@@ -2546,8 +2564,8 @@ void intel_crt_init(struct drm_device *dev)
        <td valign="top" >Description/Restrictions</td>
        </tr>
        <tr>
-       <td rowspan="23" valign="top" >DRM</td>
-       <td rowspan="3" valign="top" >Generic</td>
+       <td rowspan="25" valign="top" >DRM</td>
+       <td rowspan="4" valign="top" >Generic</td>
        <td valign="top" >“EDID”</td>
        <td valign="top" >BLOB | IMMUTABLE</td>
        <td valign="top" >0</td>
@@ -2569,6 +2587,13 @@ void intel_crt_init(struct drm_device *dev)
        <td valign="top" >Contains topology path to a connector.</td>
        </tr>
        <tr>
+       <td valign="top" >“TILE”</td>
+       <td valign="top" >BLOB | IMMUTABLE</td>
+       <td valign="top" >0</td>
+       <td valign="top" >Connector</td>
+       <td valign="top" >Contains tiling information for a connector.</td>
+       </tr>
+       <tr>
        <td rowspan="1" valign="top" >Plane</td>
        <td valign="top" >“type”</td>
        <td valign="top" >ENUM | IMMUTABLE</td>
diff --git a/Documentation/devicetree/bindings/iommu/rockchip,iommu.txt b/Documentation/devicetree/bindings/iommu/rockchip,iommu.txt
new file mode 100644 (file)
index 0000000..9a55ac3
--- /dev/null
@@ -0,0 +1,26 @@
+Rockchip IOMMU
+==============
+
+A Rockchip DRM iommu translates io virtual addresses to physical addresses for
+its master device.  Each slave device is bound to a single master device, and
+shares its clocks, power domain and irq.
+
+Required properties:
+- compatible      : Should be "rockchip,iommu"
+- reg             : Address space for the configuration registers
+- interrupts      : Interrupt specifier for the IOMMU instance
+- interrupt-names : Interrupt name for the IOMMU instance
+- #iommu-cells    : Should be <0>.  This indicates the iommu is a
+                    "single-master" device, and needs no additional information
+                    to associate with its master device.  See:
+                    Documentation/devicetree/bindings/iommu/iommu.txt
+
+Example:
+
+       vopl_mmu: iommu@ff940300 {
+               compatible = "rockchip,iommu";
+               reg = <0xff940300 0x100>;
+               interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-names = "vopl_mmu";
+               #iommu-cells = <0>;
+       };
diff --git a/Documentation/devicetree/bindings/video/rockchip-drm.txt b/Documentation/devicetree/bindings/video/rockchip-drm.txt
new file mode 100644 (file)
index 0000000..7fff582
--- /dev/null
@@ -0,0 +1,19 @@
+Rockchip DRM master device
+================================
+
+The Rockchip DRM master device is a virtual device needed to list all
+vop devices or other display interface nodes that comprise the
+graphics subsystem.
+
+Required properties:
+- compatible: Should be "rockchip,display-subsystem"
+- ports: Should contain a list of phandles pointing to display interface port
+  of vop devices. vop definitions as defined in
+  Documentation/devicetree/bindings/video/rockchip-vop.txt
+
+example:
+
+display-subsystem {
+       compatible = "rockchip,display-subsystem";
+       ports = <&vopl_out>, <&vopb_out>;
+};
diff --git a/Documentation/devicetree/bindings/video/rockchip-vop.txt b/Documentation/devicetree/bindings/video/rockchip-vop.txt
new file mode 100644 (file)
index 0000000..d15351f
--- /dev/null
@@ -0,0 +1,58 @@
+device-tree bindings for rockchip soc display controller (vop)
+
+VOP (Visual Output Processor) is the Display Controller for the Rockchip
+series of SoCs which transfers the image data from a video memory
+buffer to an external LCD interface.
+
+Required properties:
+- compatible: value should be one of the following
+               "rockchip,rk3288-vop";
+
+- interrupts: should contain a list of all VOP IP block interrupts in the
+                order: VSYNC, LCD_SYSTEM. The interrupt specifier
+                format depends on the interrupt controller used.
+
+- clocks: must include clock specifiers corresponding to entries in the
+               clock-names property.
+
+- clock-names: Must contain
+               aclk_vop: for ddr buffer transfer.
+               hclk_vop: for ahb bus to R/W the phy regs.
+               dclk_vop: pixel clock.
+
+- resets: Must contain an entry for each entry in reset-names.
+  See ../reset/reset.txt for details.
+- reset-names: Must include the following entries:
+  - axi
+  - ahb
+  - dclk
+
+- iommus: required a iommu node
+
+- port: A port node with endpoint definitions as defined in
+  Documentation/devicetree/bindings/media/video-interfaces.txt.
+
+Example:
+SoC specific DT entry:
+       vopb: vopb@ff930000 {
+               compatible = "rockchip,rk3288-vop";
+               reg = <0xff930000 0x19c>;
+               interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&cru ACLK_VOP0>, <&cru DCLK_VOP0>, <&cru HCLK_VOP0>;
+               clock-names = "aclk_vop", "dclk_vop", "hclk_vop";
+               resets = <&cru SRST_LCDC1_AXI>, <&cru SRST_LCDC1_AHB>, <&cru SRST_LCDC1_DCLK>;
+               reset-names = "axi", "ahb", "dclk";
+               iommus = <&vopb_mmu>;
+               vopb_out: port {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       vopb_out_edp: endpoint@0 {
+                               reg = <0>;
+                               remote-endpoint=<&edp_in_vopb>;
+                       };
+                       vopb_out_hdmi: endpoint@1 {
+                               reg = <1>;
+                               remote-endpoint=<&hdmi_in_vopb>;
+                       };
+               };
+       };
index 55d3e9b93338b38a05668dff834c33c8fa8428e8..296c02d39c29a3fc48d1296c31f811e1c5311786 100644 (file)
@@ -1838,7 +1838,7 @@ F:        include/net/ax25.h
 F:     net/ax25/
 
 AZ6007 DVB DRIVER
-M:     Mauro Carvalho Chehab <m.chehab@samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
 L:     linux-media@vger.kernel.org
 W:     http://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
@@ -2208,7 +2208,7 @@ F:        Documentation/filesystems/btrfs.txt
 F:     fs/btrfs/
 
 BTTV VIDEO4LINUX DRIVER
-M:     Mauro Carvalho Chehab <m.chehab@samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
 L:     linux-media@vger.kernel.org
 W:     http://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
@@ -2729,7 +2729,7 @@ F:        drivers/media/common/cx2341x*
 F:     include/media/cx2341x*
 
 CX88 VIDEO4LINUX DRIVER
-M:     Mauro Carvalho Chehab <m.chehab@samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
 L:     linux-media@vger.kernel.org
 W:     http://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
@@ -3419,7 +3419,7 @@ F:        fs/ecryptfs/
 EDAC-CORE
 M:     Doug Thompson <dougthompson@xmission.com>
 M:     Borislav Petkov <bp@alien8.de>
-M:     Mauro Carvalho Chehab <m.chehab@samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
 L:     linux-edac@vger.kernel.org
 W:     bluesmoke.sourceforge.net
 S:     Supported
@@ -3468,7 +3468,7 @@ S:        Maintained
 F:     drivers/edac/e7xxx_edac.c
 
 EDAC-GHES
-M:     Mauro Carvalho Chehab <m.chehab@samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
 L:     linux-edac@vger.kernel.org
 W:     bluesmoke.sourceforge.net
 S:     Maintained
@@ -3496,21 +3496,21 @@ S:      Maintained
 F:     drivers/edac/i5000_edac.c
 
 EDAC-I5400
-M:     Mauro Carvalho Chehab <m.chehab@samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
 L:     linux-edac@vger.kernel.org
 W:     bluesmoke.sourceforge.net
 S:     Maintained
 F:     drivers/edac/i5400_edac.c
 
 EDAC-I7300
-M:     Mauro Carvalho Chehab <m.chehab@samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
 L:     linux-edac@vger.kernel.org
 W:     bluesmoke.sourceforge.net
 S:     Maintained
 F:     drivers/edac/i7300_edac.c
 
 EDAC-I7CORE
-M:     Mauro Carvalho Chehab <m.chehab@samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
 L:     linux-edac@vger.kernel.org
 W:     bluesmoke.sourceforge.net
 S:     Maintained
@@ -3553,7 +3553,7 @@ S:        Maintained
 F:     drivers/edac/r82600_edac.c
 
 EDAC-SBRIDGE
-M:     Mauro Carvalho Chehab <m.chehab@samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
 L:     linux-edac@vger.kernel.org
 W:     bluesmoke.sourceforge.net
 S:     Maintained
@@ -3613,7 +3613,7 @@ S:        Maintained
 F:     drivers/net/ethernet/ibm/ehea/
 
 EM28XX VIDEO4LINUX DRIVER
-M:     Mauro Carvalho Chehab <m.chehab@samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
 L:     linux-media@vger.kernel.org
 W:     http://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
@@ -5979,7 +5979,7 @@ S:        Maintained
 F:     drivers/media/radio/radio-maxiradio*
 
 MEDIA INPUT INFRASTRUCTURE (V4L/DVB)
-M:     Mauro Carvalho Chehab <m.chehab@samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
 P:     LinuxTV.org Project
 L:     linux-media@vger.kernel.org
 W:     http://linuxtv.org
@@ -8030,7 +8030,7 @@ S:        Odd Fixes
 F:     drivers/media/i2c/saa6588*
 
 SAA7134 VIDEO4LINUX DRIVER
-M:     Mauro Carvalho Chehab <m.chehab@samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
 L:     linux-media@vger.kernel.org
 W:     http://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
@@ -8488,7 +8488,7 @@ S:        Maintained
 F:     drivers/media/radio/si4713/radio-usb-si4713.c
 
 SIANO DVB DRIVER
-M:     Mauro Carvalho Chehab <m.chehab@samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
 L:     linux-media@vger.kernel.org
 W:     http://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
@@ -8699,7 +8699,9 @@ S:        Maintained
 F:     drivers/leds/leds-net48xx.c
 
 SOFTLOGIC 6x10 MPEG CODEC
-M:     Ismael Luceno <ismael.luceno@corp.bluecherry.net>
+M:     Bluecherry Maintainers <maintainers@bluecherrydvr.com>
+M:     Andrey Utkin <andrey.utkin@corp.bluecherry.net>
+M:     Andrey Utkin <andrey.krieger.utkin@gmail.com>
 L:     linux-media@vger.kernel.org
 S:     Supported
 F:     drivers/media/pci/solo6x10/
@@ -9173,7 +9175,7 @@ S:        Maintained
 F:     drivers/media/i2c/tda9840*
 
 TEA5761 TUNER DRIVER
-M:     Mauro Carvalho Chehab <m.chehab@samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
 L:     linux-media@vger.kernel.org
 W:     http://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
@@ -9181,7 +9183,7 @@ S:        Odd fixes
 F:     drivers/media/tuners/tea5761.*
 
 TEA5767 TUNER DRIVER
-M:     Mauro Carvalho Chehab <m.chehab@samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
 L:     linux-media@vger.kernel.org
 W:     http://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
@@ -9493,7 +9495,7 @@ F:        include/linux/shmem_fs.h
 F:     mm/shmem.c
 
 TM6000 VIDEO4LINUX DRIVER
-M:     Mauro Carvalho Chehab <m.chehab@samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
 L:     linux-media@vger.kernel.org
 W:     http://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
@@ -10314,7 +10316,7 @@ S:      Maintained
 F:     arch/x86/kernel/cpu/mcheck/*
 
 XC2028/3028 TUNER DRIVER
-M:     Mauro Carvalho Chehab <m.chehab@samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
 L:     linux-media@vger.kernel.org
 W:     http://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
index ce70361f766e783d43cc9b6dab00d1cd34304a6f..fd80c6e9bc2367f79f47edebb15649d23d341791 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 18
 SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION =
 NAME = Diseased Newt
 
 # *DOCUMENTATION*
index dd1c24ceda50245978c97e13c5b992db3139f329..3f51cf4e8f020e1b972eaa15ef4640ebe028e4e3 100644 (file)
@@ -54,12 +54,8 @@ void s390_handle_mcck(void)
         */
        local_irq_save(flags);
        local_mcck_disable();
-       /*
-        * Ummm... Does this make sense at all? Copying the percpu struct
-        * and then zapping it one statement later?
-        */
-       memcpy(&mcck, this_cpu_ptr(&cpu_mcck), sizeof(mcck));
-       memset(&mcck, 0, sizeof(struct mcck_struct));
+       mcck = *this_cpu_ptr(&cpu_mcck);
+       memset(this_cpu_ptr(&cpu_mcck), 0, sizeof(mcck));
        clear_cpu_flag(CIF_MCCK_PENDING);
        local_mcck_enable();
        local_irq_restore(flags);
index be1e07d4b596e4bc5a4e17e819f92ae9bebcb57a..45abc363dd3e44dac4b5ced56d28ad9b510241f3 100644 (file)
@@ -76,7 +76,7 @@ suffix-$(CONFIG_KERNEL_XZ)    := xz
 suffix-$(CONFIG_KERNEL_LZO)    := lzo
 suffix-$(CONFIG_KERNEL_LZ4)    := lz4
 
-RUN_SIZE = $(shell objdump -h vmlinux | \
+RUN_SIZE = $(shell $(OBJDUMP) -h vmlinux | \
             perl $(srctree)/arch/x86/tools/calc_run_size.pl)
 quiet_cmd_mkpiggy = MKPIGGY $@
       cmd_mkpiggy = $(obj)/mkpiggy $< $(RUN_SIZE) > $@ || ( rm -f $@ ; false )
index 2ce9051174e608381c6a91171e7e3d0d5a6a44ca..08fe6e8a726e5160e51b9c992da765e2fab29348 100644 (file)
@@ -465,6 +465,7 @@ static void mc_bp_resume(void)
 
        if (uci->valid && uci->mc)
                microcode_ops->apply_microcode(cpu);
+#ifdef CONFIG_X86_64
        else if (!uci->mc)
                /*
                 * We might resume and not have applied late microcode but still
@@ -473,6 +474,7 @@ static void mc_bp_resume(void)
                 * applying patches early on the APs.
                 */
                load_ucode_ap();
+#endif
 }
 
 static struct syscore_ops mc_syscore_ops = {
index 0984232e429fb61d427c90340fa0c77e55c1fee2..5cbd5d9ea61dd52969d9c55313dbf703f3c4d24f 100644 (file)
@@ -216,9 +216,10 @@ static int bio_integrity_process(struct bio *bio,
 {
        struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
        struct blk_integrity_iter iter;
-       struct bio_vec *bv;
+       struct bvec_iter bviter;
+       struct bio_vec bv;
        struct bio_integrity_payload *bip = bio_integrity(bio);
-       unsigned int i, ret = 0;
+       unsigned int ret = 0;
        void *prot_buf = page_address(bip->bip_vec->bv_page) +
                bip->bip_vec->bv_offset;
 
@@ -227,11 +228,11 @@ static int bio_integrity_process(struct bio *bio,
        iter.seed = bip_get_seed(bip);
        iter.prot_buf = prot_buf;
 
-       bio_for_each_segment_all(bv, bio, i) {
-               void *kaddr = kmap_atomic(bv->bv_page);
+       bio_for_each_segment(bv, bio, bviter) {
+               void *kaddr = kmap_atomic(bv.bv_page);
 
-               iter.data_buf = kaddr + bv->bv_offset;
-               iter.data_size = bv->bv_len;
+               iter.data_buf = kaddr + bv.bv_offset;
+               iter.data_size = bv.bv_len;
 
                ret = proc_fn(&iter);
                if (ret) {
index 807a88a0f394f8a639cbc3f6e2b78073078986cc..9d75ead2a1f9107e92bf7f6408348add2cd71a7f 100644 (file)
@@ -1164,7 +1164,8 @@ static bool acpi_video_device_in_dod(struct acpi_video_device *device)
                return true;
 
        for (i = 0; i < video->attached_count; i++) {
-               if (video->attached_array[i].bind_info == device)
+               if ((video->attached_array[i].value.int_val & 0xfff) ==
+                   (device->device_id & 0xfff))
                        return true;
        }
 
index e45f83789809a29a722448c589bcc6b5c60f0303..49f1e6890587e0b7f8970fe2dbdb7d879da92871 100644 (file)
@@ -321,6 +321,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */
        { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
        { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */
+       { PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */
+       { PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
+       { PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
        { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
        { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */
        { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
@@ -492,6 +495,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
         * enabled.  https://bugzilla.kernel.org/show_bug.cgi?id=60731
         */
        { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_nomsi },
+       { PCI_VDEVICE(SAMSUNG, 0xa800), board_ahci_nomsi },
 
        /* Enmotus */
        { PCI_DEVICE(0x1c44, 0x8000), board_ahci },
index 07bc7e4dbd04b836722d42b1051dc3da33f99a9d..65071591b143f5b0914b3ba37c8a0ac8f16f01fb 100644 (file)
@@ -1488,7 +1488,7 @@ static int sata_fsl_probe(struct platform_device *ofdev)
        host_priv->csr_base = csr_base;
 
        irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
-       if (irq < 0) {
+       if (!irq) {
                dev_err(&ofdev->dev, "invalid irq from platform\n");
                goto error_exit_with_cleanup;
        }
index 24c2d7caedd5633e3cd73712478c3f18d48c3ace..c3413b6adb17caec6ac0f273bfa80ef8bacca960 100644 (file)
@@ -167,6 +167,8 @@ config DRM_SAVAGE
 
 source "drivers/gpu/drm/exynos/Kconfig"
 
+source "drivers/gpu/drm/rockchip/Kconfig"
+
 source "drivers/gpu/drm/vmwgfx/Kconfig"
 
 source "drivers/gpu/drm/gma500/Kconfig"
index 47d89869c5df01a06e91b50bea4b48eecc1c8f97..66e40398b3d32220624cb7fad671c86058c84339 100644 (file)
@@ -49,6 +49,7 @@ obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
 obj-$(CONFIG_DRM_VIA)  +=via/
 obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
 obj-$(CONFIG_DRM_EXYNOS) +=exynos/
+obj-$(CONFIG_DRM_ROCKCHIP) +=rockchip/
 obj-$(CONFIG_DRM_GMA500) += gma500/
 obj-$(CONFIG_DRM_UDL) += udl/
 obj-$(CONFIG_DRM_AST) += ast/
index 102cd36799b129d4680567bb3de912a73b3070a9..4f7b275f2f7b0fdbb488055057c3291a1aab3a70 100644 (file)
@@ -102,15 +102,26 @@ struct device *kfd_chardev(void)
 static int kfd_open(struct inode *inode, struct file *filep)
 {
        struct kfd_process *process;
+       bool is_32bit_user_mode;
 
        if (iminor(inode) != 0)
                return -ENODEV;
 
+       is_32bit_user_mode = is_compat_task();
+
+       if (is_32bit_user_mode == true) {
+               dev_warn(kfd_device,
+                       "Process %d (32-bit) failed to open /dev/kfd\n"
+                       "32-bit processes are not supported by amdkfd\n",
+                       current->pid);
+               return -EPERM;
+       }
+
        process = kfd_create_process(current);
        if (IS_ERR(process))
                return PTR_ERR(process);
 
-       process->is_32bit_user_mode = is_compat_task();
+       process->is_32bit_user_mode = is_32bit_user_mode;
 
        dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
                process->pasid, process->is_32bit_user_mode);
index 9abac48de4995d200a3f490ab2a6d7e4674198f2..93507141072495b9c7815b4d5cad2cf124b9abe3 100644 (file)
@@ -221,8 +221,14 @@ static int acquire_packet_buffer(struct kernel_queue *kq,
                                                        queue_size_dwords;
 
        if (packet_size_in_dwords >= queue_size_dwords ||
-                       packet_size_in_dwords >= available_size)
+                       packet_size_in_dwords >= available_size) {
+               /*
+                * make sure calling functions know
+                * acquire_packet_buffer() failed
+                */
+               *buffer_ptr = NULL;
                return -ENOMEM;
+       }
 
        if (wptr + packet_size_in_dwords >= queue_size_dwords) {
                while (wptr > 0) {
index 2458ab7c0c6e3bbcd8376b203ff9ef51c4407708..71699ad97d74487d532cef2a168f8bb4d4fb5366 100644 (file)
@@ -32,8 +32,7 @@ int kfd_pasid_init(void)
 {
        pasid_limit = max_num_of_processes;
 
-       pasid_bitmap = kzalloc(DIV_ROUND_UP(pasid_limit, BITS_PER_BYTE),
-                               GFP_KERNEL);
+       pasid_bitmap = kzalloc(BITS_TO_LONGS(pasid_limit), GFP_KERNEL);
        if (!pasid_bitmap)
                return -ENOMEM;
 
index b4f49ac13334249d2c7ac370249d8db91858a386..b85eb0b830b41d5d894efebb2a3e6fab83ca7552 100644 (file)
@@ -196,7 +196,7 @@ static void kfd_process_destroy_delayed(struct rcu_head *rcu)
        mmdrop(p->mm);
 
        work = (struct kfd_process_release_work *)
-               kmalloc(sizeof(struct kfd_process_release_work), GFP_KERNEL);
+               kmalloc(sizeof(struct kfd_process_release_work), GFP_ATOMIC);
 
        if (work) {
                INIT_WORK((struct work_struct *) work, kfd_process_wq_release);
index de79283eaea720ba0dc49cab73060a8dfee3d3ca..5213da499d39febae587057ad9b38701e787db32 100644 (file)
@@ -725,6 +725,8 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
        WARN_ON(crtc->state && !crtc->funcs->atomic_destroy_state);
        if (crtc->state && crtc->funcs->atomic_destroy_state)
                crtc->funcs->atomic_destroy_state(crtc, crtc->state);
+
+       memset(crtc, 0, sizeof(*crtc));
 }
 EXPORT_SYMBOL(drm_crtc_cleanup);
 
@@ -908,6 +910,11 @@ void drm_connector_cleanup(struct drm_connector *connector)
        struct drm_device *dev = connector->dev;
        struct drm_display_mode *mode, *t;
 
+       if (connector->tile_group) {
+               drm_mode_put_tile_group(dev, connector->tile_group);
+               connector->tile_group = NULL;
+       }
+
        list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
                drm_mode_remove(connector, mode);
 
@@ -927,6 +934,8 @@ void drm_connector_cleanup(struct drm_connector *connector)
        if (connector->state && connector->funcs->atomic_destroy_state)
                connector->funcs->atomic_destroy_state(connector,
                                                       connector->state);
+
+       memset(connector, 0, sizeof(*connector));
 }
 EXPORT_SYMBOL(drm_connector_cleanup);
 
@@ -1068,6 +1077,8 @@ void drm_bridge_cleanup(struct drm_bridge *bridge)
        list_del(&bridge->head);
        dev->mode_config.num_bridge--;
        drm_modeset_unlock_all(dev);
+
+       memset(bridge, 0, sizeof(*bridge));
 }
 EXPORT_SYMBOL(drm_bridge_cleanup);
 
@@ -1134,10 +1145,11 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
        drm_modeset_lock_all(dev);
        drm_mode_object_put(dev, &encoder->base);
        kfree(encoder->name);
-       encoder->name = NULL;
        list_del(&encoder->head);
        dev->mode_config.num_encoder--;
        drm_modeset_unlock_all(dev);
+
+       memset(encoder, 0, sizeof(*encoder));
 }
 EXPORT_SYMBOL(drm_encoder_cleanup);
 
@@ -1257,6 +1269,8 @@ void drm_plane_cleanup(struct drm_plane *plane)
        WARN_ON(plane->state && !plane->funcs->atomic_destroy_state);
        if (plane->state && plane->funcs->atomic_destroy_state)
                plane->funcs->atomic_destroy_state(plane, plane->state);
+
+       memset(plane, 0, sizeof(*plane));
 }
 EXPORT_SYMBOL(drm_plane_cleanup);
 
@@ -1339,6 +1353,11 @@ static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
                                       "PATH", 0);
        dev->mode_config.path_property = dev_path;
 
+       dev->mode_config.tile_property = drm_property_create(dev,
+                                                            DRM_MODE_PROP_BLOB |
+                                                            DRM_MODE_PROP_IMMUTABLE,
+                                                            "TILE", 0);
+
        return 0;
 }
 
@@ -3444,7 +3463,7 @@ void drm_fb_release(struct drm_file *priv)
 
        /*
         * When the file gets released that means no one else can access the fb
-        * list any more, so no need to grab fpriv->fbs_lock. And we need to to
+        * list any more, so no need to grab fpriv->fbs_lock. And we need to
         * avoid upsetting lockdep since the universal cursor code adds a
         * framebuffer while holding mutex locks.
         *
@@ -4082,6 +4101,52 @@ int drm_mode_connector_set_path_property(struct drm_connector *connector,
 }
 EXPORT_SYMBOL(drm_mode_connector_set_path_property);
 
+/**
+ * drm_mode_connector_set_tile_property - set tile property on connector
+ * @connector: connector to set property on.
+ *
+ * This looks up the tile information for a connector, and creates a
+ * property for userspace to parse if it exists. The property is of
+ * the form of 8 integers using ':' as a separator.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_connector_set_tile_property(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       int ret, size;
+       char tile[256];
+
+       if (connector->tile_blob_ptr)
+               drm_property_destroy_blob(dev, connector->tile_blob_ptr);
+
+       if (!connector->has_tile) {
+               connector->tile_blob_ptr = NULL;
+               ret = drm_object_property_set_value(&connector->base,
+                                                   dev->mode_config.tile_property, 0);
+               return ret;
+       }
+
+       snprintf(tile, 256, "%d:%d:%d:%d:%d:%d:%d:%d",
+                connector->tile_group->id, connector->tile_is_single_monitor,
+                connector->num_h_tile, connector->num_v_tile,
+                connector->tile_h_loc, connector->tile_v_loc,
+                connector->tile_h_size, connector->tile_v_size);
+       size = strlen(tile) + 1;
+
+       connector->tile_blob_ptr = drm_property_create_blob(connector->dev,
+                                                           size, tile);
+       if (!connector->tile_blob_ptr)
+               return -EINVAL;
+
+       ret = drm_object_property_set_value(&connector->base,
+                                           dev->mode_config.tile_property,
+                                           connector->tile_blob_ptr->base.id);
+       return ret;
+}
+EXPORT_SYMBOL(drm_mode_connector_set_tile_property);
+
 /**
  * drm_mode_connector_update_edid_property - update the edid property of a connector
  * @connector: drm connector
@@ -5152,6 +5217,7 @@ void drm_mode_config_init(struct drm_device *dev)
        INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
        INIT_LIST_HEAD(&dev->mode_config.plane_list);
        idr_init(&dev->mode_config.crtc_idr);
+       idr_init(&dev->mode_config.tile_idr);
 
        drm_modeset_lock_all(dev);
        drm_mode_create_standard_connector_properties(dev);
@@ -5239,6 +5305,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
                crtc->funcs->destroy(crtc);
        }
 
+       idr_destroy(&dev->mode_config.tile_idr);
        idr_destroy(&dev->mode_config.crtc_idr);
        drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
 }
@@ -5261,3 +5328,100 @@ struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
                                           supported_rotations);
 }
 EXPORT_SYMBOL(drm_mode_create_rotation_property);
+
+/**
+ * DOC: Tile group
+ *
+ * Tile groups are used to represent tiled monitors with a unique
+ * integer identifier. Tiled monitors using DisplayID v1.3 have
+ * a unique 8-byte handle, we store this in a tile group, so we
+ * have a common identifier for all tiles in a monitor group.
+ */
+static void drm_tile_group_free(struct kref *kref)
+{
+       struct drm_tile_group *tg = container_of(kref, struct drm_tile_group, refcount);
+       struct drm_device *dev = tg->dev;
+       mutex_lock(&dev->mode_config.idr_mutex);
+       idr_remove(&dev->mode_config.tile_idr, tg->id);
+       mutex_unlock(&dev->mode_config.idr_mutex);
+       kfree(tg);
+}
+
+/**
+ * drm_mode_put_tile_group - drop a reference to a tile group.
+ * @dev: DRM device
+ * @tg: tile group to drop reference to.
+ *
+ * drop reference to tile group and free if 0.
+ */
+void drm_mode_put_tile_group(struct drm_device *dev,
+                            struct drm_tile_group *tg)
+{
+       kref_put(&tg->refcount, drm_tile_group_free);
+}
+
+/**
+ * drm_mode_get_tile_group - get a reference to an existing tile group
+ * @dev: DRM device
+ * @topology: 8-bytes unique per monitor.
+ *
+ * Use the unique bytes to get a reference to an existing tile group.
+ *
+ * RETURNS:
+ * tile group or NULL if not found.
+ */
+struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
+                                              char topology[8])
+{
+       struct drm_tile_group *tg;
+       int id;
+       mutex_lock(&dev->mode_config.idr_mutex);
+       idr_for_each_entry(&dev->mode_config.tile_idr, tg, id) {
+               if (!memcmp(tg->group_data, topology, 8)) {
+                       if (!kref_get_unless_zero(&tg->refcount))
+                               tg = NULL;
+                       mutex_unlock(&dev->mode_config.idr_mutex);
+                       return tg;
+               }
+       }
+       mutex_unlock(&dev->mode_config.idr_mutex);
+       return NULL;
+}
+
+/**
+ * drm_mode_create_tile_group - create a tile group from a displayid description
+ * @dev: DRM device
+ * @topology: 8-bytes unique per monitor.
+ *
+ * Create a tile group for the unique monitor, and get a unique
+ * identifier for the tile group.
+ *
+ * RETURNS:
+ * new tile group or error.
+ */
+struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
+                                                 char topology[8])
+{
+       struct drm_tile_group *tg;
+       int ret;
+
+       tg = kzalloc(sizeof(*tg), GFP_KERNEL);
+       if (!tg)
+               return ERR_PTR(-ENOMEM);
+
+       kref_init(&tg->refcount);
+       memcpy(tg->group_data, topology, 8);
+       tg->dev = dev;
+
+       mutex_lock(&dev->mode_config.idr_mutex);
+       ret = idr_alloc(&dev->mode_config.tile_idr, tg, 1, 0, GFP_KERNEL);
+       if (ret >= 0) {
+               tg->id = ret;
+       } else {
+               kfree(tg);
+               tg = ERR_PTR(ret);
+       }
+
+       mutex_unlock(&dev->mode_config.idr_mutex);
+       return tg;
+}
index 959e2074b0d4299c10d45311fbce06c13ab58d0c..79968e39c8d09909395c5ad40165d0062c8227ac 100644 (file)
@@ -186,10 +186,11 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
 
        /*
         * The specification doesn't give any recommendation on how often to
-        * retry native transactions, so retry 7 times like for I2C-over-AUX
-        * transactions.
+        * retry native transactions. We used to retry 7 times like for
+        * aux i2c transactions but real world devices this wasn't
+        * sufficient, bump to 32 which makes Dell 4k monitors happier.
         */
-       for (retry = 0; retry < 7; retry++) {
+       for (retry = 0; retry < 32; retry++) {
 
                mutex_lock(&aux->hw_mutex);
                err = aux->transfer(aux, &msg);
index 5682d7e9f1ec28a825263097d7009fdbf5b5cdd5..9a5b68717ec8c31fbd27bfa313833b568444fce9 100644 (file)
@@ -839,6 +839,8 @@ static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
 
 static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
 {
+       struct drm_dp_mst_branch *mstb;
+
        switch (old_pdt) {
        case DP_PEER_DEVICE_DP_LEGACY_CONV:
        case DP_PEER_DEVICE_SST_SINK:
@@ -846,8 +848,9 @@ static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
                drm_dp_mst_unregister_i2c_bus(&port->aux);
                break;
        case DP_PEER_DEVICE_MST_BRANCHING:
-               drm_dp_put_mst_branch_device(port->mstb);
+               mstb = port->mstb;
                port->mstb = NULL;
+               drm_dp_put_mst_branch_device(mstb);
                break;
        }
 }
@@ -858,6 +861,8 @@ static void drm_dp_destroy_port(struct kref *kref)
        struct drm_dp_mst_topology_mgr *mgr = port->mgr;
        if (!port->input) {
                port->vcpi.num_slots = 0;
+
+               kfree(port->cached_edid);
                if (port->connector)
                        (*port->mgr->cbs->destroy_connector)(mgr, port->connector);
                drm_dp_port_teardown_pdt(port, port->pdt);
@@ -1097,6 +1102,10 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
                char proppath[255];
                build_mst_prop_path(port, mstb, proppath, sizeof(proppath));
                port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
+
+               if (port->port_num >= 8) {
+                       port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
+               }
        }
 
        /* put reference to this port */
@@ -2167,7 +2176,8 @@ EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
  * This returns the current connection state for a port. It validates the
  * port pointer still exists so the caller doesn't require a reference
  */
-enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
+                                                struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
 {
        enum drm_connector_status status = connector_status_disconnected;
 
@@ -2186,6 +2196,10 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr
 
        case DP_PEER_DEVICE_SST_SINK:
                status = connector_status_connected;
+               /* for logical ports - cache the EDID */
+               if (port->port_num >= 8 && !port->cached_edid) {
+                       port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
+               }
                break;
        case DP_PEER_DEVICE_DP_LEGACY_CONV:
                if (port->ldps)
@@ -2217,7 +2231,12 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
        if (!port)
                return NULL;
 
-       edid = drm_get_edid(connector, &port->aux.ddc);
+       if (port->cached_edid)
+               edid = drm_edid_duplicate(port->cached_edid);
+       else
+               edid = drm_get_edid(connector, &port->aux.ddc);
+
+       drm_mode_connector_set_tile_property(connector);
        drm_dp_put_port(port);
        return edid;
 }
index a7b5a71856a72a3956cc4f3c6fbe90442e1a535f..53bc7a628909453868aa4fd3e10f7c670d738698 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/module.h>
 #include <drm/drmP.h>
 #include <drm/drm_edid.h>
+#include <drm/drm_displayid.h>
 
 #define version_greater(edid, maj, min) \
        (((edid)->version > (maj)) || \
@@ -1014,6 +1015,27 @@ module_param_named(edid_fixup, edid_fixup, int, 0400);
 MODULE_PARM_DESC(edid_fixup,
                 "Minimum number of valid EDID header bytes (0-8, default 6)");
 
+static void drm_get_displayid(struct drm_connector *connector,
+                             struct edid *edid);
+
+static int drm_edid_block_checksum(const u8 *raw_edid)
+{
+       int i;
+       u8 csum = 0;
+       for (i = 0; i < EDID_LENGTH; i++)
+               csum += raw_edid[i];
+
+       return csum;
+}
+
+static bool drm_edid_is_zero(const u8 *in_edid, int length)
+{
+       if (memchr_inv(in_edid, 0, length))
+               return false;
+
+       return true;
+}
+
 /**
  * drm_edid_block_valid - Sanity check the EDID block (base or extension)
  * @raw_edid: pointer to raw EDID block
@@ -1027,8 +1049,7 @@ MODULE_PARM_DESC(edid_fixup,
  */
 bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
 {
-       int i;
-       u8 csum = 0;
+       u8 csum;
        struct edid *edid = (struct edid *)raw_edid;
 
        if (WARN_ON(!raw_edid))
@@ -1048,8 +1069,7 @@ bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
                }
        }
 
-       for (i = 0; i < EDID_LENGTH; i++)
-               csum += raw_edid[i];
+       csum = drm_edid_block_checksum(raw_edid);
        if (csum) {
                if (print_bad_edid) {
                        DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
@@ -1080,9 +1100,13 @@ bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
 
 bad:
        if (print_bad_edid) {
-               printk(KERN_ERR "Raw EDID:\n");
-               print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1,
+               if (drm_edid_is_zero(raw_edid, EDID_LENGTH)) {
+                       printk(KERN_ERR "EDID block is all zeroes\n");
+               } else {
+                       printk(KERN_ERR "Raw EDID:\n");
+                       print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1,
                               raw_edid, EDID_LENGTH, false);
+               }
        }
        return false;
 }
@@ -1115,7 +1139,7 @@ EXPORT_SYMBOL(drm_edid_is_valid);
 #define DDC_SEGMENT_ADDR 0x30
 /**
  * drm_do_probe_ddc_edid() - get EDID information via I2C
- * @adapter: I2C device adaptor
+ * @data: I2C device adapter
  * @buf: EDID data buffer to be filled
  * @block: 128 byte EDID block to start fetching from
  * @len: EDID data buffer length to fetch
@@ -1176,14 +1200,6 @@ drm_do_probe_ddc_edid(void *data, u8 *buf, unsigned int block, size_t len)
        return ret == xfers ? 0 : -1;
 }
 
-static bool drm_edid_is_zero(u8 *in_edid, int length)
-{
-       if (memchr_inv(in_edid, 0, length))
-               return false;
-
-       return true;
-}
-
 /**
  * drm_do_get_edid - get EDID data using a custom EDID block read function
  * @connector: connector we're probing
@@ -1308,10 +1324,15 @@ EXPORT_SYMBOL(drm_probe_ddc);
 struct edid *drm_get_edid(struct drm_connector *connector,
                          struct i2c_adapter *adapter)
 {
+       struct edid *edid;
+
        if (!drm_probe_ddc(adapter))
                return NULL;
 
-       return drm_do_get_edid(connector, drm_do_probe_ddc_edid, adapter);
+       edid = drm_do_get_edid(connector, drm_do_probe_ddc_edid, adapter);
+       if (edid)
+               drm_get_displayid(connector, edid);
+       return edid;
 }
 EXPORT_SYMBOL(drm_get_edid);
 
@@ -2406,7 +2427,7 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
 /*
  * Search EDID for CEA extension block.
  */
-static u8 *drm_find_cea_extension(struct edid *edid)
+static u8 *drm_find_edid_extension(struct edid *edid, int ext_id)
 {
        u8 *edid_ext = NULL;
        int i;
@@ -2418,7 +2439,7 @@ static u8 *drm_find_cea_extension(struct edid *edid)
        /* Find CEA extension */
        for (i = 0; i < edid->extensions; i++) {
                edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
-               if (edid_ext[0] == CEA_EXT)
+               if (edid_ext[0] == ext_id)
                        break;
        }
 
@@ -2428,6 +2449,16 @@ static u8 *drm_find_cea_extension(struct edid *edid)
        return edid_ext;
 }
 
+static u8 *drm_find_cea_extension(struct edid *edid)
+{
+       return drm_find_edid_extension(edid, CEA_EXT);
+}
+
+static u8 *drm_find_displayid_extension(struct edid *edid)
+{
+       return drm_find_edid_extension(edid, DISPLAYID_EXT);
+}
+
 /*
  * Calculate the alternate clock for the CEA mode
  * (60Hz vs. 59.94Hz etc.)
@@ -3888,3 +3919,123 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
        return 0;
 }
 EXPORT_SYMBOL(drm_hdmi_vendor_infoframe_from_display_mode);
+
+static int drm_parse_display_id(struct drm_connector *connector,
+                               u8 *displayid, int length,
+                               bool is_edid_extension)
+{
+       /* if this is an EDID extension the first byte will be 0x70 */
+       int idx = 0;
+       struct displayid_hdr *base;
+       struct displayid_block *block;
+       u8 csum = 0;
+       int i;
+
+       if (is_edid_extension)
+               idx = 1;
+
+       base = (struct displayid_hdr *)&displayid[idx];
+
+       DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n",
+                     base->rev, base->bytes, base->prod_id, base->ext_count);
+
+       if (base->bytes + 5 > length - idx)
+               return -EINVAL;
+
+       for (i = idx; i <= base->bytes + 5; i++) {
+               csum += displayid[i];
+       }
+       if (csum) {
+               DRM_ERROR("DisplayID checksum invalid, remainder is %d\n", csum);
+               return -EINVAL;
+       }
+
+       block = (struct displayid_block *)&displayid[idx + 4];
+       DRM_DEBUG_KMS("block id %d, rev %d, len %d\n",
+                     block->tag, block->rev, block->num_bytes);
+
+       switch (block->tag) {
+       case DATA_BLOCK_TILED_DISPLAY: {
+               struct displayid_tiled_block *tile = (struct displayid_tiled_block *)block;
+
+               u16 w, h;
+               u8 tile_v_loc, tile_h_loc;
+               u8 num_v_tile, num_h_tile;
+               struct drm_tile_group *tg;
+
+               w = tile->tile_size[0] | tile->tile_size[1] << 8;
+               h = tile->tile_size[2] | tile->tile_size[3] << 8;
+
+               num_v_tile = (tile->topo[0] & 0xf) | (tile->topo[2] & 0x30);
+               num_h_tile = (tile->topo[0] >> 4) | ((tile->topo[2] >> 2) & 0x30);
+               tile_v_loc = (tile->topo[1] & 0xf) | ((tile->topo[2] & 0x3) << 4);
+               tile_h_loc = (tile->topo[1] >> 4) | (((tile->topo[2] >> 2) & 0x3) << 4);
+
+               connector->has_tile = true;
+               if (tile->tile_cap & 0x80)
+                       connector->tile_is_single_monitor = true;
+
+               connector->num_h_tile = num_h_tile + 1;
+               connector->num_v_tile = num_v_tile + 1;
+               connector->tile_h_loc = tile_h_loc;
+               connector->tile_v_loc = tile_v_loc;
+               connector->tile_h_size = w + 1;
+               connector->tile_v_size = h + 1;
+
+               DRM_DEBUG_KMS("tile cap 0x%x\n", tile->tile_cap);
+               DRM_DEBUG_KMS("tile_size %d x %d\n", w + 1, h + 1);
+               DRM_DEBUG_KMS("topo num tiles %dx%d, location %dx%d\n",
+                      num_h_tile + 1, num_v_tile + 1, tile_h_loc, tile_v_loc);
+               DRM_DEBUG_KMS("vend %c%c%c\n", tile->topology_id[0], tile->topology_id[1], tile->topology_id[2]);
+
+               tg = drm_mode_get_tile_group(connector->dev, tile->topology_id);
+               if (!tg) {
+                       tg = drm_mode_create_tile_group(connector->dev, tile->topology_id);
+               }
+               if (!tg)
+                       return -ENOMEM;
+
+               if (connector->tile_group != tg) {
+                       /* if we haven't got a pointer,
+                          take the reference, drop ref to old tile group */
+                       if (connector->tile_group) {
+                               drm_mode_put_tile_group(connector->dev, connector->tile_group);
+                       }
+                       connector->tile_group = tg;
+               } else
+                       /* if same tile group, then release the ref we just took. */
+                       drm_mode_put_tile_group(connector->dev, tg);
+       }
+               break;
+       default:
+               printk("unknown displayid tag %d\n", block->tag);
+               break;
+       }
+       return 0;
+}
+
+static void drm_get_displayid(struct drm_connector *connector,
+                             struct edid *edid)
+{
+       void *displayid = NULL;
+       int ret;
+       connector->has_tile = false;
+       displayid = drm_find_displayid_extension(edid);
+       if (!displayid) {
+               /* drop reference to any tile group we had */
+               goto out_drop_ref;
+       }
+
+       ret = drm_parse_display_id(connector, displayid, EDID_LENGTH, true);
+       if (ret < 0)
+               goto out_drop_ref;
+       if (!connector->has_tile)
+               goto out_drop_ref;
+       return;
+out_drop_ref:
+       if (connector->tile_group) {
+               drm_mode_put_tile_group(connector->dev, connector->tile_group);
+               connector->tile_group = NULL;
+       }
+       return;
+}
index 09d47e9ba02612642a19987f9cea6dc39b795f82..52ce26d6b4fb8aeda59bb6358f62a659bc0464b7 100644 (file)
@@ -347,9 +347,18 @@ bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
 {
        struct drm_device *dev = fb_helper->dev;
        bool ret;
+       bool do_delayed = false;
+
        drm_modeset_lock_all(dev);
        ret = restore_fbdev_mode(fb_helper);
+
+       do_delayed = fb_helper->delayed_hotplug;
+       if (do_delayed)
+               fb_helper->delayed_hotplug = false;
        drm_modeset_unlock_all(dev);
+
+       if (do_delayed)
+               drm_fb_helper_hotplug_event(fb_helper);
        return ret;
 }
 EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
@@ -888,10 +897,6 @@ int drm_fb_helper_set_par(struct fb_info *info)
 
        drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
 
-       if (fb_helper->delayed_hotplug) {
-               fb_helper->delayed_hotplug = false;
-               drm_fb_helper_hotplug_event(fb_helper);
-       }
        return 0;
 }
 EXPORT_SYMBOL(drm_fb_helper_set_par);
@@ -995,19 +1000,21 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
        crtc_count = 0;
        for (i = 0; i < fb_helper->crtc_count; i++) {
                struct drm_display_mode *desired_mode;
+               int x, y;
                desired_mode = fb_helper->crtc_info[i].desired_mode;
-
+               x = fb_helper->crtc_info[i].x;
+               y = fb_helper->crtc_info[i].y;
                if (desired_mode) {
                        if (gamma_size == 0)
                                gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
-                       if (desired_mode->hdisplay < sizes.fb_width)
-                               sizes.fb_width = desired_mode->hdisplay;
-                       if (desired_mode->vdisplay < sizes.fb_height)
-                               sizes.fb_height = desired_mode->vdisplay;
-                       if (desired_mode->hdisplay > sizes.surface_width)
-                               sizes.surface_width = desired_mode->hdisplay;
-                       if (desired_mode->vdisplay > sizes.surface_height)
-                               sizes.surface_height = desired_mode->vdisplay;
+                       if (desired_mode->hdisplay + x < sizes.fb_width)
+                               sizes.fb_width = desired_mode->hdisplay + x;
+                       if (desired_mode->vdisplay + y < sizes.fb_height)
+                               sizes.fb_height = desired_mode->vdisplay + y;
+                       if (desired_mode->hdisplay + x > sizes.surface_width)
+                               sizes.surface_width = desired_mode->hdisplay + x;
+                       if (desired_mode->vdisplay + y > sizes.surface_height)
+                               sizes.surface_height = desired_mode->vdisplay + y;
                        crtc_count++;
                }
        }
@@ -1307,6 +1314,7 @@ static void drm_enable_connectors(struct drm_fb_helper *fb_helper,
 
 static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
                              struct drm_display_mode **modes,
+                             struct drm_fb_offset *offsets,
                              bool *enabled, int width, int height)
 {
        int count, i, j;
@@ -1378,27 +1386,88 @@ static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
        return false;
 }
 
+static int drm_get_tile_offsets(struct drm_fb_helper *fb_helper,
+                               struct drm_display_mode **modes,
+                               struct drm_fb_offset *offsets,
+                               int idx,
+                               int h_idx, int v_idx)
+{
+       struct drm_fb_helper_connector *fb_helper_conn;
+       int i;
+       int hoffset = 0, voffset = 0;
+
+       for (i = 0; i < fb_helper->connector_count; i++) {
+               fb_helper_conn = fb_helper->connector_info[i];
+               if (!fb_helper_conn->connector->has_tile)
+                       continue;
+
+               if (!modes[i] && (h_idx || v_idx)) {
+                       DRM_DEBUG_KMS("no modes for connector tiled %d %d\n", i,
+                                     fb_helper_conn->connector->base.id);
+                       continue;
+               }
+               if (fb_helper_conn->connector->tile_h_loc < h_idx)
+                       hoffset += modes[i]->hdisplay;
+
+               if (fb_helper_conn->connector->tile_v_loc < v_idx)
+                       voffset += modes[i]->vdisplay;
+       }
+       offsets[idx].x = hoffset;
+       offsets[idx].y = voffset;
+       DRM_DEBUG_KMS("returned %d %d for %d %d\n", hoffset, voffset, h_idx, v_idx);
+       return 0;
+}
+
 static bool drm_target_preferred(struct drm_fb_helper *fb_helper,
                                 struct drm_display_mode **modes,
+                                struct drm_fb_offset *offsets,
                                 bool *enabled, int width, int height)
 {
        struct drm_fb_helper_connector *fb_helper_conn;
        int i;
-
+       uint64_t conn_configured = 0, mask;
+       int tile_pass = 0;
+       mask = (1 << fb_helper->connector_count) - 1;
+retry:
        for (i = 0; i < fb_helper->connector_count; i++) {
                fb_helper_conn = fb_helper->connector_info[i];
 
-               if (enabled[i] == false)
+               if (conn_configured & (1 << i))
                        continue;
 
+               if (enabled[i] == false) {
+                       conn_configured |= (1 << i);
+                       continue;
+               }
+
+               /* first pass over all the untiled connectors */
+               if (tile_pass == 0 && fb_helper_conn->connector->has_tile)
+                       continue;
+
+               if (tile_pass == 1) {
+                       if (fb_helper_conn->connector->tile_h_loc != 0 ||
+                           fb_helper_conn->connector->tile_v_loc != 0)
+                               continue;
+
+               } else {
+                       if (fb_helper_conn->connector->tile_h_loc != tile_pass -1 &&
+                           fb_helper_conn->connector->tile_v_loc != tile_pass - 1)
+                       /* if this tile_pass doesn't cover any of the tiles - keep going */
+                               continue;
+
+                       /* find the tile offsets for this pass - need
+                          to find all tiles left and above */
+                       drm_get_tile_offsets(fb_helper, modes, offsets,
+                                            i, fb_helper_conn->connector->tile_h_loc, fb_helper_conn->connector->tile_v_loc);
+               }
                DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
                              fb_helper_conn->connector->base.id);
 
                /* got for command line mode first */
                modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
                if (!modes[i]) {
-                       DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
-                                     fb_helper_conn->connector->base.id);
+                       DRM_DEBUG_KMS("looking for preferred mode on connector %d %d\n",
+                                     fb_helper_conn->connector->base.id, fb_helper_conn->connector->tile_group ? fb_helper_conn->connector->tile_group->id : 0);
                        modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height);
                }
                /* No preferred modes, pick one off the list */
@@ -1408,6 +1477,12 @@ static bool drm_target_preferred(struct drm_fb_helper *fb_helper,
                }
                DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
                          "none");
+               conn_configured |= (1 << i);
+       }
+
+       if ((conn_configured & mask) != mask) {
+               tile_pass++;
+               goto retry;
        }
        return true;
 }
@@ -1497,6 +1572,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
        struct drm_device *dev = fb_helper->dev;
        struct drm_fb_helper_crtc **crtcs;
        struct drm_display_mode **modes;
+       struct drm_fb_offset *offsets;
        struct drm_mode_set *modeset;
        bool *enabled;
        int width, height;
@@ -1511,9 +1587,11 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
                        sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
        modes = kcalloc(dev->mode_config.num_connector,
                        sizeof(struct drm_display_mode *), GFP_KERNEL);
+       offsets = kcalloc(dev->mode_config.num_connector,
+                         sizeof(struct drm_fb_offset), GFP_KERNEL);
        enabled = kcalloc(dev->mode_config.num_connector,
                          sizeof(bool), GFP_KERNEL);
-       if (!crtcs || !modes || !enabled) {
+       if (!crtcs || !modes || !enabled || !offsets) {
                DRM_ERROR("Memory allocation failed\n");
                goto out;
        }
@@ -1523,14 +1601,16 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
 
        if (!(fb_helper->funcs->initial_config &&
              fb_helper->funcs->initial_config(fb_helper, crtcs, modes,
+                                              offsets,
                                               enabled, width, height))) {
                memset(modes, 0, dev->mode_config.num_connector*sizeof(modes[0]));
                memset(crtcs, 0, dev->mode_config.num_connector*sizeof(crtcs[0]));
+               memset(offsets, 0, dev->mode_config.num_connector*sizeof(offsets[0]));
 
-               if (!drm_target_cloned(fb_helper,
-                                      modes, enabled, width, height) &&
-                   !drm_target_preferred(fb_helper,
-                                         modes, enabled, width, height))
+               if (!drm_target_cloned(fb_helper, modes, offsets,
+                                      enabled, width, height) &&
+                   !drm_target_preferred(fb_helper, modes, offsets,
+                                         enabled, width, height))
                        DRM_ERROR("Unable to find initial modes\n");
 
                DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n",
@@ -1550,18 +1630,23 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
        for (i = 0; i < fb_helper->connector_count; i++) {
                struct drm_display_mode *mode = modes[i];
                struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
+               struct drm_fb_offset *offset = &offsets[i];
                modeset = &fb_crtc->mode_set;
 
                if (mode && fb_crtc) {
-                       DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
-                                     mode->name, fb_crtc->mode_set.crtc->base.id);
+                       DRM_DEBUG_KMS("desired mode %s set on crtc %d (%d,%d)\n",
+                                     mode->name, fb_crtc->mode_set.crtc->base.id, offset->x, offset->y);
                        fb_crtc->desired_mode = mode;
+                       fb_crtc->x = offset->x;
+                       fb_crtc->y = offset->y;
                        if (modeset->mode)
                                drm_mode_destroy(dev, modeset->mode);
                        modeset->mode = drm_mode_duplicate(dev,
                                                           fb_crtc->desired_mode);
                        modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
                        modeset->fb = fb_helper->fb;
+                       modeset->x = offset->x;
+                       modeset->y = offset->y;
                }
        }
 
@@ -1578,6 +1663,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
 out:
        kfree(crtcs);
        kfree(modes);
+       kfree(offsets);
        kfree(enabled);
 }
 
index 91e1105f28006eae6a4e1e1a654a20c1315cff80..0b9514b6cd640793b7f73e8737bf310cb28b23ee 100644 (file)
@@ -527,6 +527,7 @@ ssize_t drm_read(struct file *filp, char __user *buffer,
                if (copy_to_user(buffer + total,
                                 e->event, e->event->length)) {
                        total = -EFAULT;
+                       e->destroy(e);
                        break;
                }
 
index 0e47df4ef24efa2226de1f1fa8b4ea26c76e55cb..f5a5f18efa5bf230d810d8c031bb30cfddd0390a 100644 (file)
@@ -166,7 +166,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
        spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
 
        /*
-        * If the vblank interrupt was already disbled update the count
+        * If the vblank interrupt was already disabled update the count
         * and timestamp to maintain the appearance that the counter
         * has been ticking all along until this time. This makes the
         * count account for the entire time between drm_vblank_on() and
index 03d0b0cb8e05f0a3598ff7ae088b43f17d7dfe63..fb3e3d429191247c5041af8ca0212c6ce1e2f705 100644 (file)
@@ -4565,7 +4565,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
                ironlake_fdi_disable(crtc);
 
                ironlake_disable_pch_transcoder(dev_priv, pipe);
-               intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
 
                if (HAS_PCH_CPT(dev)) {
                        /* disable TRANS_DP_CTL */
@@ -4636,8 +4635,6 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
 
        if (intel_crtc->config.has_pch_encoder) {
                lpt_disable_pch_transcoder(dev_priv);
-               intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
-                                                     true);
                intel_ddi_fdi_disable(crtc);
        }
 
index bfe359506377852154b43c0307fb7e61c300277c..7f8c6a66680a611c3aeff7171c8a1b0839daa8b9 100644 (file)
@@ -283,7 +283,7 @@ intel_dp_mst_detect(struct drm_connector *connector, bool force)
        struct intel_connector *intel_connector = to_intel_connector(connector);
        struct intel_dp *intel_dp = intel_connector->mst_port;
 
-       return drm_dp_mst_detect_port(&intel_dp->mst_mgr, intel_connector->port);
+       return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr, intel_connector->port);
 }
 
 static int
@@ -414,6 +414,8 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
        intel_dp_add_properties(intel_dp, connector);
 
        drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
+       drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
+
        drm_mode_connector_set_path_property(connector, pathprop);
        drm_reinit_primary_mode_group(dev);
        mutex_lock(&dev->mode_config.mutex);
index f2183b554cbc7708db5567f4cb65e32061de594d..850cf7d6578cef54dd2792852e247322995e5a68 100644 (file)
@@ -324,6 +324,7 @@ intel_fb_helper_crtc(struct drm_fb_helper *fb_helper, struct drm_crtc *crtc)
 static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
                                    struct drm_fb_helper_crtc **crtcs,
                                    struct drm_display_mode **modes,
+                                   struct drm_fb_offset *offsets,
                                    bool *enabled, int width, int height)
 {
        struct drm_device *dev = fb_helper->dev;
@@ -332,6 +333,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
        bool fallback = true;
        int num_connectors_enabled = 0;
        int num_connectors_detected = 0;
+       uint64_t conn_configured = 0, mask;
+       int pass = 0;
 
        save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool),
                               GFP_KERNEL);
@@ -339,7 +342,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
                return false;
 
        memcpy(save_enabled, enabled, dev->mode_config.num_connector);
-
+       mask = (1 << fb_helper->connector_count) - 1;
+retry:
        for (i = 0; i < fb_helper->connector_count; i++) {
                struct drm_fb_helper_connector *fb_conn;
                struct drm_connector *connector;
@@ -349,12 +353,19 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
                fb_conn = fb_helper->connector_info[i];
                connector = fb_conn->connector;
 
+               if (conn_configured & (1 << i))
+                       continue;
+
+               if (pass == 0 && !connector->has_tile)
+                       continue;
+
                if (connector->status == connector_status_connected)
                        num_connectors_detected++;
 
                if (!enabled[i]) {
                        DRM_DEBUG_KMS("connector %s not enabled, skipping\n",
                                      connector->name);
+                       conn_configured |= (1 << i);
                        continue;
                }
 
@@ -373,6 +384,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
                        DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
                                      connector->name);
                        enabled[i] = false;
+                       conn_configured |= (1 << i);
                        continue;
                }
 
@@ -400,8 +412,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
 
                /* try for preferred next */
                if (!modes[i]) {
-                       DRM_DEBUG_KMS("looking for preferred mode on connector %s\n",
-                                     connector->name);
+                       DRM_DEBUG_KMS("looking for preferred mode on connector %s %d\n",
+                                     connector->name, connector->has_tile);
                        modes[i] = drm_has_preferred_mode(fb_conn, width,
                                                          height);
                }
@@ -444,6 +456,12 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
                              modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :"");
 
                fallback = false;
+               conn_configured |= (1 << i);
+       }
+
+       if ((conn_configured & mask) != mask) {
+               pass++;
+               goto retry;
        }
 
        /*
index c03d457a5150422e170ece9a69c86914ee1e3a3a..14654d628ca42ad5fc3bd00e4ee56196c254ebf2 100644 (file)
@@ -899,6 +899,17 @@ void intel_lvds_init(struct drm_device *dev)
        int pipe;
        u8 pin;
 
+       /*
+        * Unlock registers and just leave them unlocked. Do this before
+        * checking quirk lists to avoid bogus WARNINGs.
+        */
+       if (HAS_PCH_SPLIT(dev)) {
+               I915_WRITE(PCH_PP_CONTROL,
+                          I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
+       } else {
+               I915_WRITE(PP_CONTROL,
+                          I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
+       }
        if (!intel_lvds_supported(dev))
                return;
 
@@ -1097,17 +1108,6 @@ out:
        lvds_encoder->a3_power = I915_READ(lvds_encoder->reg) &
                                 LVDS_A3_POWER_MASK;
 
-       /*
-        * Unlock registers and just
-        * leave them unlocked
-        */
-       if (HAS_PCH_SPLIT(dev)) {
-               I915_WRITE(PCH_PP_CONTROL,
-                          I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
-       } else {
-               I915_WRITE(PP_CONTROL,
-                          I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
-       }
        lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
        if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
                DRM_DEBUG_KMS("lid notifier registration failed\n");
index cd05677ad4b7a128a087f4b611f73b5ab4978006..72a40f95d048e06c9bc625ed2e0575edbce6e933 100644 (file)
@@ -218,7 +218,6 @@ nvc0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
                device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
                device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
-               device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
                device->oclass[NVDEV_ENGINE_DISP   ] =  nva3_disp_oclass;
                device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
                break;
index 5ae6a43893b5996c0e13981851c71ed30947c5ad..1931057f996205d68ca93c94805211536c31aa4d 100644 (file)
@@ -551,8 +551,8 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
                        }
 
                        if (status & 0x40000000) {
-                               nouveau_fifo_uevent(&priv->base);
                                nv_wr32(priv, 0x002100, 0x40000000);
+                               nouveau_fifo_uevent(&priv->base);
                                status &= ~0x40000000;
                        }
                }
index 1fe1f8fbda0c0ab279d29630e7582fa48c607593..074d434c3077a53b74410b033a495ae58c3cd666 100644 (file)
@@ -740,6 +740,8 @@ nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn)
        u32 inte = nv_rd32(priv, 0x002628);
        u32 unkn;
 
+       nv_wr32(priv, 0x0025a8 + (engn * 0x04), intr);
+
        for (unkn = 0; unkn < 8; unkn++) {
                u32 ints = (intr >> (unkn * 0x04)) & inte;
                if (ints & 0x1) {
@@ -751,8 +753,6 @@ nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn)
                        nv_mask(priv, 0x002628, ints, 0);
                }
        }
-
-       nv_wr32(priv, 0x0025a8 + (engn * 0x04), intr);
 }
 
 static void
index fc9ef663f25ac0f3368b0c694091505796ff7d9c..6a8db7c80bd148e150e6c4497c43f5bedc6249fe 100644 (file)
@@ -982,8 +982,8 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
        }
 
        if (stat & 0x80000000) {
-               nve0_fifo_intr_engine(priv);
                nv_wr32(priv, 0x002100, 0x80000000);
+               nve0_fifo_intr_engine(priv);
                stat &= ~0x80000000;
        }
 
index afb93bb72f97949135e38b352b902b5d31a66034..65910e3aed0c86e1240273f274f42ea947373d58 100644 (file)
@@ -664,7 +664,6 @@ nouveau_pmops_suspend(struct device *dev)
 
        pci_save_state(pdev);
        pci_disable_device(pdev);
-       pci_ignore_hotplug(pdev);
        pci_set_power_state(pdev, PCI_D3hot);
        return 0;
 }
@@ -732,6 +731,7 @@ nouveau_pmops_runtime_suspend(struct device *dev)
        ret = nouveau_do_suspend(drm_dev, true);
        pci_save_state(pdev);
        pci_disable_device(pdev);
+       pci_ignore_hotplug(pdev);
        pci_set_power_state(pdev, PCI_D3cold);
        drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
        return ret;
index 515cd9aebb9982d1c0c0c06f1deaf4d0c7bf3e5c..f32a434724e307f5da958de0bdf26bb09d115f4b 100644 (file)
@@ -52,20 +52,24 @@ nouveau_fctx(struct nouveau_fence *fence)
        return container_of(fence->base.lock, struct nouveau_fence_chan, lock);
 }
 
-static void
+static int
 nouveau_fence_signal(struct nouveau_fence *fence)
 {
+       int drop = 0;
+
        fence_signal_locked(&fence->base);
        list_del(&fence->head);
+       rcu_assign_pointer(fence->channel, NULL);
 
        if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) {
                struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
 
                if (!--fctx->notify_ref)
-                       nvif_notify_put(&fctx->notify);
+                       drop = 1;
        }
 
        fence_put(&fence->base);
+       return drop;
 }
 
 static struct nouveau_fence *
@@ -88,16 +92,23 @@ nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
 {
        struct nouveau_fence *fence;
 
-       nvif_notify_fini(&fctx->notify);
-
        spin_lock_irq(&fctx->lock);
        while (!list_empty(&fctx->pending)) {
                fence = list_entry(fctx->pending.next, typeof(*fence), head);
 
-               nouveau_fence_signal(fence);
-               fence->channel = NULL;
+               if (nouveau_fence_signal(fence))
+                       nvif_notify_put(&fctx->notify);
        }
        spin_unlock_irq(&fctx->lock);
+
+       nvif_notify_fini(&fctx->notify);
+       fctx->dead = 1;
+
+       /*
+        * Ensure that all accesses to fence->channel complete before freeing
+        * the channel.
+        */
+       synchronize_rcu();
 }
 
 static void
@@ -112,21 +123,23 @@ nouveau_fence_context_free(struct nouveau_fence_chan *fctx)
        kref_put(&fctx->fence_ref, nouveau_fence_context_put);
 }
 
-static void
+static int
 nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
 {
        struct nouveau_fence *fence;
-
+       int drop = 0;
        u32 seq = fctx->read(chan);
 
        while (!list_empty(&fctx->pending)) {
                fence = list_entry(fctx->pending.next, typeof(*fence), head);
 
                if ((int)(seq - fence->base.seqno) < 0)
-                       return;
+                       break;
 
-               nouveau_fence_signal(fence);
+               drop |= nouveau_fence_signal(fence);
        }
+
+       return drop;
 }
 
 static int
@@ -135,18 +148,21 @@ nouveau_fence_wait_uevent_handler(struct nvif_notify *notify)
        struct nouveau_fence_chan *fctx =
                container_of(notify, typeof(*fctx), notify);
        unsigned long flags;
+       int ret = NVIF_NOTIFY_KEEP;
 
        spin_lock_irqsave(&fctx->lock, flags);
        if (!list_empty(&fctx->pending)) {
                struct nouveau_fence *fence;
+               struct nouveau_channel *chan;
 
                fence = list_entry(fctx->pending.next, typeof(*fence), head);
-               nouveau_fence_update(fence->channel, fctx);
+               chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
+               if (nouveau_fence_update(fence->channel, fctx))
+                       ret = NVIF_NOTIFY_DROP;
        }
        spin_unlock_irqrestore(&fctx->lock, flags);
 
-       /* Always return keep here. NVIF refcount is handled with nouveau_fence_update */
-       return NVIF_NOTIFY_KEEP;
+       return ret;
 }
 
 void
@@ -262,7 +278,10 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
        if (!ret) {
                fence_get(&fence->base);
                spin_lock_irq(&fctx->lock);
-               nouveau_fence_update(chan, fctx);
+
+               if (nouveau_fence_update(chan, fctx))
+                       nvif_notify_put(&fctx->notify);
+
                list_add_tail(&fence->head, &fctx->pending);
                spin_unlock_irq(&fctx->lock);
        }
@@ -276,13 +295,16 @@ nouveau_fence_done(struct nouveau_fence *fence)
        if (fence->base.ops == &nouveau_fence_ops_legacy ||
            fence->base.ops == &nouveau_fence_ops_uevent) {
                struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
+               struct nouveau_channel *chan;
                unsigned long flags;
 
                if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
                        return true;
 
                spin_lock_irqsave(&fctx->lock, flags);
-               nouveau_fence_update(fence->channel, fctx);
+               chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
+               if (chan && nouveau_fence_update(chan, fctx))
+                       nvif_notify_put(&fctx->notify);
                spin_unlock_irqrestore(&fctx->lock, flags);
        }
        return fence_is_signaled(&fence->base);
@@ -387,12 +409,18 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
 
        if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
                struct nouveau_channel *prev = NULL;
+               bool must_wait = true;
 
                f = nouveau_local_fence(fence, chan->drm);
-               if (f)
-                       prev = f->channel;
+               if (f) {
+                       rcu_read_lock();
+                       prev = rcu_dereference(f->channel);
+                       if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
+                               must_wait = false;
+                       rcu_read_unlock();
+               }
 
-               if (!prev || (prev != chan && (ret = fctx->sync(f, prev, chan))))
+               if (must_wait)
                        ret = fence_wait(fence, intr);
 
                return ret;
@@ -403,19 +431,22 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
 
        for (i = 0; i < fobj->shared_count && !ret; ++i) {
                struct nouveau_channel *prev = NULL;
+               bool must_wait = true;
 
                fence = rcu_dereference_protected(fobj->shared[i],
                                                reservation_object_held(resv));
 
                f = nouveau_local_fence(fence, chan->drm);
-               if (f)
-                       prev = f->channel;
+               if (f) {
+                       rcu_read_lock();
+                       prev = rcu_dereference(f->channel);
+                       if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
+                               must_wait = false;
+                       rcu_read_unlock();
+               }
 
-               if (!prev || (prev != chan && (ret = fctx->sync(f, prev, chan))))
+               if (must_wait)
                        ret = fence_wait(fence, intr);
-
-               if (ret)
-                       break;
        }
 
        return ret;
@@ -463,7 +494,7 @@ static const char *nouveau_fence_get_timeline_name(struct fence *f)
        struct nouveau_fence *fence = from_fence(f);
        struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
 
-       return fence->channel ? fctx->name : "dead channel";
+       return !fctx->dead ? fctx->name : "dead channel";
 }
 
 /*
@@ -476,9 +507,16 @@ static bool nouveau_fence_is_signaled(struct fence *f)
 {
        struct nouveau_fence *fence = from_fence(f);
        struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
-       struct nouveau_channel *chan = fence->channel;
+       struct nouveau_channel *chan;
+       bool ret = false;
+
+       rcu_read_lock();
+       chan = rcu_dereference(fence->channel);
+       if (chan)
+               ret = (int)(fctx->read(chan) - fence->base.seqno) >= 0;
+       rcu_read_unlock();
 
-       return (int)(fctx->read(chan) - fence->base.seqno) >= 0;
+       return ret;
 }
 
 static bool nouveau_fence_no_signaling(struct fence *f)
index 943b0b17b1fc760296eccea9b409f03696a644fc..96e461c6f68fac370602777d8e5bd363100fa0a5 100644 (file)
@@ -14,7 +14,7 @@ struct nouveau_fence {
 
        bool sysmem;
 
-       struct nouveau_channel *channel;
+       struct nouveau_channel __rcu *channel;
        unsigned long timeout;
 };
 
@@ -47,7 +47,7 @@ struct nouveau_fence_chan {
        char name[32];
 
        struct nvif_notify notify;
-       int notify_ref;
+       int notify_ref, dead;
 };
 
 struct nouveau_fence_priv {
index 446e71ca36cb111c5d47aad5b4928eb6548a68c5..d9b25684ac989d73341b7cddd70bb181b28eee2b 100644 (file)
@@ -264,7 +264,8 @@ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
        if (list_is_singular(&release->bos))
                return 0;
 
-       ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos, !no_intr);
+       ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
+                                    !no_intr, NULL);
        if (ret)
                return ret;
 
index 30d242b25078e1f5c35d63384961db4279c7ffd4..d59ec491dbb9cba64d76369e62ef61b836c094d8 100644 (file)
@@ -2039,6 +2039,7 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
        atombios_crtc_set_base(crtc, x, y, old_fb);
        atombios_overscan_setup(crtc, mode, adjusted_mode);
        atombios_scaler_setup(crtc);
+       radeon_cursor_reset(crtc);
        /* update the hw version fpr dpm */
        radeon_crtc->hw_mode = *adjusted_mode;
 
index 3f898d020ae691f9bc38b482fec6a77a05a44843..f373a81ba3d5f8f969bf810d3d373decf3889170 100644 (file)
@@ -937,7 +937,7 @@ static void ci_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode)
        tmp |= TMIN(0);
        WREG32_SMC(CG_FDO_CTRL2, tmp);
 
-       tmp = RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
+       tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
        tmp |= FDO_PWM_MODE(mode);
        WREG32_SMC(CG_FDO_CTRL2, tmp);
 }
@@ -1162,7 +1162,7 @@ static int ci_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev,
        tmp |= TARGET_PERIOD(tach_period);
        WREG32_SMC(CG_TACH_CTRL, tmp);
 
-       ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
+       ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM);
 
        return 0;
 }
@@ -1178,7 +1178,7 @@ static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev)
                tmp |= FDO_PWM_MODE(pi->fan_ctrl_default_mode);
                WREG32_SMC(CG_FDO_CTRL2, tmp);
 
-               tmp = RREG32_SMC(CG_FDO_CTRL2) & TMIN_MASK;
+               tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK;
                tmp |= TMIN(pi->t_min);
                WREG32_SMC(CG_FDO_CTRL2, tmp);
                pi->fan_ctrl_is_in_default_mode = true;
@@ -5849,7 +5849,6 @@ int ci_dpm_init(struct radeon_device *rdev)
                        rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
 
        pi->fan_ctrl_is_in_default_mode = true;
-       rdev->pm.dpm.fan.ucode_fan_control = false;
 
        return 0;
 }
index e4e88ca8b82eb29c5a4f637c12d8b701a88b4d1c..ba85986febea5054762615ee7fc5edbaa275f2cd 100644 (file)
 
 #define        CG_FDO_CTRL0                                    0xC0300064
 #define                FDO_STATIC_DUTY(x)                      ((x) << 0)
-#define                FDO_STATIC_DUTY_MASK                    0x0000000F
+#define                FDO_STATIC_DUTY_MASK                    0x000000FF
 #define                FDO_STATIC_DUTY_SHIFT                   0
 #define        CG_FDO_CTRL1                                    0xC0300068
 #define                FMAX_DUTY100(x)                         ((x) << 0)
-#define                FMAX_DUTY100_MASK                       0x0000000F
+#define                FMAX_DUTY100_MASK                       0x000000FF
 #define                FMAX_DUTY100_SHIFT                      0
 #define        CG_FDO_CTRL2                                    0xC030006C
 #define                TMIN(x)                                 ((x) << 0)
-#define                TMIN_MASK                               0x0000000F
+#define                TMIN_MASK                               0x000000FF
 #define                TMIN_SHIFT                              0
 #define                FDO_PWM_MODE(x)                         ((x) << 11)
-#define                FDO_PWM_MODE_MASK                       (3 << 11)
+#define                FDO_PWM_MODE_MASK                       (7 << 11)
 #define                FDO_PWM_MODE_SHIFT                      11
 #define                TACH_PWM_RESP_RATE(x)                   ((x) << 25)
 #define                TACH_PWM_RESP_RATE_MASK                 (0x7f << 25)
index 5c8b358f9fbad903fb8615fd7806235fec0e5bf4..924b1b7ab455537755ebf0209ae5d7e2aa9a9e0e 100644 (file)
@@ -35,7 +35,7 @@
 #define MIN(a,b)                   (((a)<(b))?(a):(b))
 
 int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
-                          struct radeon_cs_reloc **cs_reloc);
+                          struct radeon_bo_list **cs_reloc);
 struct evergreen_cs_track {
        u32                     group_size;
        u32                     nbanks;
@@ -1094,7 +1094,7 @@ static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
 static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
 {
        struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
-       struct radeon_cs_reloc *reloc;
+       struct radeon_bo_list *reloc;
        u32 last_reg;
        u32 m, i, tmp, *ib;
        int r;
@@ -1792,7 +1792,7 @@ static bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
 static int evergreen_packet3_check(struct radeon_cs_parser *p,
                                   struct radeon_cs_packet *pkt)
 {
-       struct radeon_cs_reloc *reloc;
+       struct radeon_bo_list *reloc;
        struct evergreen_cs_track *track;
        volatile u32 *ib;
        unsigned idx;
@@ -2661,7 +2661,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
                        p->track = NULL;
                        return r;
                }
-       } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+       } while (p->idx < p->chunk_ib->length_dw);
 #if 0
        for (r = 0; r < p->ib.length_dw; r++) {
                printk(KERN_INFO "%05d  0x%08X\n", r, p->ib.ptr[r]);
@@ -2684,8 +2684,8 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
  **/
 int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
 {
-       struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
-       struct radeon_cs_reloc *src_reloc, *dst_reloc, *dst2_reloc;
+       struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
+       struct radeon_bo_list *src_reloc, *dst_reloc, *dst2_reloc;
        u32 header, cmd, count, sub_cmd;
        volatile u32 *ib = p->ib.ptr;
        u32 idx;
@@ -3100,7 +3100,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                        DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
                        return -EINVAL;
                }
-       } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+       } while (p->idx < p->chunk_ib->length_dw);
 #if 0
        for (r = 0; r < p->ib->length_dw; r++) {
                printk(KERN_INFO "%05d  0x%08X\n", r, p->ib.ptr[r]);
index b53b31a7b76fd67f17614a2889c5f00a1e35441e..74f06d5405913a7e78c18af9d365c6549b591f45 100644 (file)
@@ -1254,7 +1254,7 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
        int r;
        u32 tile_flags = 0;
        u32 tmp;
-       struct radeon_cs_reloc *reloc;
+       struct radeon_bo_list *reloc;
        u32 value;
 
        r = radeon_cs_packet_next_reloc(p, &reloc, 0);
@@ -1293,7 +1293,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
                             int idx)
 {
        unsigned c, i;
-       struct radeon_cs_reloc *reloc;
+       struct radeon_bo_list *reloc;
        struct r100_cs_track *track;
        int r = 0;
        volatile uint32_t *ib;
@@ -1542,7 +1542,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
                              struct radeon_cs_packet *pkt,
                              unsigned idx, unsigned reg)
 {
-       struct radeon_cs_reloc *reloc;
+       struct radeon_bo_list *reloc;
        struct r100_cs_track *track;
        volatile uint32_t *ib;
        uint32_t tmp;
@@ -1901,7 +1901,7 @@ int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
 static int r100_packet3_check(struct radeon_cs_parser *p,
                              struct radeon_cs_packet *pkt)
 {
-       struct radeon_cs_reloc *reloc;
+       struct radeon_bo_list *reloc;
        struct r100_cs_track *track;
        unsigned idx;
        volatile uint32_t *ib;
@@ -2061,7 +2061,7 @@ int r100_cs_parse(struct radeon_cs_parser *p)
                }
                if (r)
                        return r;
-       } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+       } while (p->idx < p->chunk_ib->length_dw);
        return 0;
 }
 
index 732d4938aab75f91d1666960de2c0275de38d4a2..c70e6d5bcd198ce139f4eae2c842044da998b0e3 100644 (file)
@@ -146,7 +146,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
                       struct radeon_cs_packet *pkt,
                       unsigned idx, unsigned reg)
 {
-       struct radeon_cs_reloc *reloc;
+       struct radeon_bo_list *reloc;
        struct r100_cs_track *track;
        volatile uint32_t *ib;
        uint32_t tmp;
index 1bc4704034ce9d90f9563f598bb6135b44878f06..064ad5569ccaac826612aedd8d035db3996106db 100644 (file)
@@ -598,7 +598,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                struct radeon_cs_packet *pkt,
                unsigned idx, unsigned reg)
 {
-       struct radeon_cs_reloc *reloc;
+       struct radeon_bo_list *reloc;
        struct r100_cs_track *track;
        volatile uint32_t *ib;
        uint32_t tmp, tile_flags = 0;
@@ -1142,7 +1142,7 @@ fail:
 static int r300_packet3_check(struct radeon_cs_parser *p,
                              struct radeon_cs_packet *pkt)
 {
-       struct radeon_cs_reloc *reloc;
+       struct radeon_bo_list *reloc;
        struct r100_cs_track *track;
        volatile uint32_t *ib;
        unsigned idx;
@@ -1283,7 +1283,7 @@ int r300_cs_parse(struct radeon_cs_parser *p)
                if (r) {
                        return r;
                }
-       } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+       } while (p->idx < p->chunk_ib->length_dw);
        return 0;
 }
 
index c47537a1ddbad19cf49ad9cdae7cf6ef8438142d..acc1f99c84d993cd2b25b7379df36d55c7afa2e1 100644 (file)
@@ -969,7 +969,7 @@ static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
 static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
 {
        struct r600_cs_track *track = (struct r600_cs_track *)p->track;
-       struct radeon_cs_reloc *reloc;
+       struct radeon_bo_list *reloc;
        u32 m, i, tmp, *ib;
        int r;
 
@@ -1626,7 +1626,7 @@ static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
 static int r600_packet3_check(struct radeon_cs_parser *p,
                                struct radeon_cs_packet *pkt)
 {
-       struct radeon_cs_reloc *reloc;
+       struct radeon_bo_list *reloc;
        struct r600_cs_track *track;
        volatile u32 *ib;
        unsigned idx;
@@ -2316,7 +2316,7 @@ int r600_cs_parse(struct radeon_cs_parser *p)
                        p->track = NULL;
                        return r;
                }
-       } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+       } while (p->idx < p->chunk_ib->length_dw);
 #if 0
        for (r = 0; r < p->ib.length_dw; r++) {
                printk(KERN_INFO "%05d  0x%08X\n", r, p->ib.ptr[r]);
@@ -2351,10 +2351,10 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
 
 static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
 {
-       if (p->chunk_relocs_idx == -1) {
+       if (p->chunk_relocs == NULL) {
                return 0;
        }
-       p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
+       p->relocs = kzalloc(sizeof(struct radeon_bo_list), GFP_KERNEL);
        if (p->relocs == NULL) {
                return -ENOMEM;
        }
@@ -2398,7 +2398,7 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
        /* Copy the packet into the IB, the parser will read from the
         * input memory (cached) and write to the IB (which can be
         * uncached). */
-       ib_chunk = &parser.chunks[parser.chunk_ib_idx];
+       ib_chunk = parser.chunk_ib;
        parser.ib.length_dw = ib_chunk->length_dw;
        *l = parser.ib.length_dw;
        if (copy_from_user(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) {
@@ -2435,24 +2435,24 @@ void r600_cs_legacy_init(void)
  * GPU offset using the provided start.
  **/
 int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
-                          struct radeon_cs_reloc **cs_reloc)
+                          struct radeon_bo_list **cs_reloc)
 {
        struct radeon_cs_chunk *relocs_chunk;
        unsigned idx;
 
        *cs_reloc = NULL;
-       if (p->chunk_relocs_idx == -1) {
+       if (p->chunk_relocs == NULL) {
                DRM_ERROR("No relocation chunk !\n");
                return -EINVAL;
        }
-       relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+       relocs_chunk = p->chunk_relocs;
        idx = p->dma_reloc_idx;
        if (idx >= p->nrelocs) {
                DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
                          idx, p->nrelocs);
                return -EINVAL;
        }
-       *cs_reloc = p->relocs_ptr[idx];
+       *cs_reloc = &p->relocs[idx];
        p->dma_reloc_idx++;
        return 0;
 }
@@ -2472,8 +2472,8 @@ int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
  **/
 int r600_dma_cs_parse(struct radeon_cs_parser *p)
 {
-       struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
-       struct radeon_cs_reloc *src_reloc, *dst_reloc;
+       struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
+       struct radeon_bo_list *src_reloc, *dst_reloc;
        u32 header, cmd, count, tiled;
        volatile u32 *ib = p->ib.ptr;
        u32 idx, idx_value;
@@ -2619,7 +2619,7 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
                        DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
                        return -EINVAL;
                }
-       } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+       } while (p->idx < p->chunk_ib->length_dw);
 #if 0
        for (r = 0; r < p->ib->length_dw; r++) {
                printk(KERN_INFO "%05d  0x%08X\n", r, p->ib.ptr[r]);
index 3207bb60715e961b72f083a0fbbb034a3ee0b2a2..54529b837afaa1d76147f764664047af5f09b5f6 100644 (file)
@@ -450,6 +450,15 @@ struct radeon_mman {
 #endif
 };
 
+struct radeon_bo_list {
+       struct radeon_bo                *robj;
+       struct ttm_validate_buffer      tv;
+       uint64_t                        gpu_offset;
+       unsigned                        prefered_domains;
+       unsigned                        allowed_domains;
+       uint32_t                        tiling_flags;
+};
+
 /* bo virtual address in a specific vm */
 struct radeon_bo_va {
        /* protected by bo being reserved */
@@ -920,6 +929,9 @@ struct radeon_vm {
 
        struct rb_root          va;
 
+       /* protecting invalidated and freed */
+       spinlock_t              status_lock;
+
        /* BOs moved, but not yet updated in the PT */
        struct list_head        invalidated;
 
@@ -1044,19 +1056,7 @@ void cayman_dma_fini(struct radeon_device *rdev);
 /*
  * CS.
  */
-struct radeon_cs_reloc {
-       struct drm_gem_object           *gobj;
-       struct radeon_bo                *robj;
-       struct ttm_validate_buffer      tv;
-       uint64_t                        gpu_offset;
-       unsigned                        prefered_domains;
-       unsigned                        allowed_domains;
-       uint32_t                        tiling_flags;
-       uint32_t                        handle;
-};
-
 struct radeon_cs_chunk {
-       uint32_t                chunk_id;
        uint32_t                length_dw;
        uint32_t                *kdata;
        void __user             *user_ptr;
@@ -1074,16 +1074,15 @@ struct radeon_cs_parser {
        unsigned                idx;
        /* relocations */
        unsigned                nrelocs;
-       struct radeon_cs_reloc  *relocs;
-       struct radeon_cs_reloc  **relocs_ptr;
-       struct radeon_cs_reloc  *vm_bos;
+       struct radeon_bo_list   *relocs;
+       struct radeon_bo_list   *vm_bos;
        struct list_head        validated;
        unsigned                dma_reloc_idx;
        /* indices of various chunks */
-       int                     chunk_ib_idx;
-       int                     chunk_relocs_idx;
-       int                     chunk_flags_idx;
-       int                     chunk_const_ib_idx;
+       struct radeon_cs_chunk  *chunk_ib;
+       struct radeon_cs_chunk  *chunk_relocs;
+       struct radeon_cs_chunk  *chunk_flags;
+       struct radeon_cs_chunk  *chunk_const_ib;
        struct radeon_ib        ib;
        struct radeon_ib        const_ib;
        void                    *track;
@@ -1097,7 +1096,7 @@ struct radeon_cs_parser {
 
 static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
 {
-       struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
+       struct radeon_cs_chunk *ibc = p->chunk_ib;
 
        if (ibc->kdata)
                return ibc->kdata[idx];
@@ -2975,7 +2974,7 @@ int radeon_vm_manager_init(struct radeon_device *rdev);
 void radeon_vm_manager_fini(struct radeon_device *rdev);
 int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
 void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
-struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
+struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
                                          struct radeon_vm *vm,
                                           struct list_head *head);
 struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
@@ -3089,7 +3088,7 @@ bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p);
 void radeon_cs_dump_packet(struct radeon_cs_parser *p,
                           struct radeon_cs_packet *pkt);
 int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
-                               struct radeon_cs_reloc **cs_reloc,
+                               struct radeon_bo_list **cs_reloc,
                                int nomm);
 int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
                               uint32_t *vline_start_end,
index 75f22e5e999fc455565f9bad0a7e2c2322199a9f..c830863bc98aa0cb55e84aedfbbc76606945ee01 100644 (file)
@@ -77,22 +77,18 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
        struct drm_device *ddev = p->rdev->ddev;
        struct radeon_cs_chunk *chunk;
        struct radeon_cs_buckets buckets;
-       unsigned i, j;
-       bool duplicate, need_mmap_lock = false;
+       unsigned i;
+       bool need_mmap_lock = false;
        int r;
 
-       if (p->chunk_relocs_idx == -1) {
+       if (p->chunk_relocs == NULL) {
                return 0;
        }
-       chunk = &p->chunks[p->chunk_relocs_idx];
+       chunk = p->chunk_relocs;
        p->dma_reloc_idx = 0;
        /* FIXME: we assume that each relocs use 4 dwords */
        p->nrelocs = chunk->length_dw / 4;
-       p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
-       if (p->relocs_ptr == NULL) {
-               return -ENOMEM;
-       }
-       p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
+       p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_bo_list), GFP_KERNEL);
        if (p->relocs == NULL) {
                return -ENOMEM;
        }
@@ -101,31 +97,17 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
 
        for (i = 0; i < p->nrelocs; i++) {
                struct drm_radeon_cs_reloc *r;
+               struct drm_gem_object *gobj;
                unsigned priority;
 
-               duplicate = false;
                r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
-               for (j = 0; j < i; j++) {
-                       if (r->handle == p->relocs[j].handle) {
-                               p->relocs_ptr[i] = &p->relocs[j];
-                               duplicate = true;
-                               break;
-                       }
-               }
-               if (duplicate) {
-                       p->relocs[i].handle = 0;
-                       continue;
-               }
-
-               p->relocs[i].gobj = drm_gem_object_lookup(ddev, p->filp,
-                                                         r->handle);
-               if (p->relocs[i].gobj == NULL) {
+               gobj = drm_gem_object_lookup(ddev, p->filp, r->handle);
+               if (gobj == NULL) {
                        DRM_ERROR("gem object lookup failed 0x%x\n",
                                  r->handle);
                        return -ENOENT;
                }
-               p->relocs_ptr[i] = &p->relocs[i];
-               p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
+               p->relocs[i].robj = gem_to_radeon_bo(gobj);
 
                /* The userspace buffer priorities are from 0 to 15. A higher
                 * number means the buffer is more important.
@@ -184,7 +166,6 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
 
                p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
                p->relocs[i].tv.shared = !r->write_domain;
-               p->relocs[i].handle = r->handle;
 
                radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
                                      priority);
@@ -251,22 +232,19 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
 
 static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
 {
-       int i, r = 0;
+       struct radeon_bo_list *reloc;
+       int r;
 
-       for (i = 0; i < p->nrelocs; i++) {
+       list_for_each_entry(reloc, &p->validated, tv.head) {
                struct reservation_object *resv;
 
-               if (!p->relocs[i].robj)
-                       continue;
-
-               resv = p->relocs[i].robj->tbo.resv;
+               resv = reloc->robj->tbo.resv;
                r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
-                                    p->relocs[i].tv.shared);
-
+                                    reloc->tv.shared);
                if (r)
-                       break;
+                       return r;
        }
-       return r;
+       return 0;
 }
 
 /* XXX: note that this is called from the legacy UMS CS ioctl as well */
@@ -286,10 +264,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
        p->idx = 0;
        p->ib.sa_bo = NULL;
        p->const_ib.sa_bo = NULL;
-       p->chunk_ib_idx = -1;
-       p->chunk_relocs_idx = -1;
-       p->chunk_flags_idx = -1;
-       p->chunk_const_ib_idx = -1;
+       p->chunk_ib = NULL;
+       p->chunk_relocs = NULL;
+       p->chunk_flags = NULL;
+       p->chunk_const_ib = NULL;
        p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
        if (p->chunks_array == NULL) {
                return -ENOMEM;
@@ -316,24 +294,23 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
                        return -EFAULT;
                }
                p->chunks[i].length_dw = user_chunk.length_dw;
-               p->chunks[i].chunk_id = user_chunk.chunk_id;
-               if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
-                       p->chunk_relocs_idx = i;
+               if (user_chunk.chunk_id == RADEON_CHUNK_ID_RELOCS) {
+                       p->chunk_relocs = &p->chunks[i];
                }
-               if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
-                       p->chunk_ib_idx = i;
+               if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
+                       p->chunk_ib = &p->chunks[i];
                        /* zero length IB isn't useful */
                        if (p->chunks[i].length_dw == 0)
                                return -EINVAL;
                }
-               if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) {
-                       p->chunk_const_ib_idx = i;
+               if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB) {
+                       p->chunk_const_ib = &p->chunks[i];
                        /* zero length CONST IB isn't useful */
                        if (p->chunks[i].length_dw == 0)
                                return -EINVAL;
                }
-               if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
-                       p->chunk_flags_idx = i;
+               if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
+                       p->chunk_flags = &p->chunks[i];
                        /* zero length flags aren't useful */
                        if (p->chunks[i].length_dw == 0)
                                return -EINVAL;
@@ -342,10 +319,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
                size = p->chunks[i].length_dw;
                cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
                p->chunks[i].user_ptr = cdata;
-               if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB)
+               if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB)
                        continue;
 
-               if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
+               if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
                        if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
                                continue;
                }
@@ -358,7 +335,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
                if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
                        return -EFAULT;
                }
-               if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
+               if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
                        p->cs_flags = p->chunks[i].kdata[0];
                        if (p->chunks[i].length_dw > 1)
                                ring = p->chunks[i].kdata[1];
@@ -399,8 +376,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
 static int cmp_size_smaller_first(void *priv, struct list_head *a,
                                  struct list_head *b)
 {
-       struct radeon_cs_reloc *la = list_entry(a, struct radeon_cs_reloc, tv.head);
-       struct radeon_cs_reloc *lb = list_entry(b, struct radeon_cs_reloc, tv.head);
+       struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, tv.head);
+       struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
 
        /* Sort A before B if A is smaller. */
        return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
@@ -441,13 +418,15 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
 
        if (parser->relocs != NULL) {
                for (i = 0; i < parser->nrelocs; i++) {
-                       if (parser->relocs[i].gobj)
-                               drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
+                       struct radeon_bo *bo = parser->relocs[i].robj;
+                       if (bo == NULL)
+                               continue;
+
+                       drm_gem_object_unreference_unlocked(&bo->gem_base);
                }
        }
        kfree(parser->track);
        kfree(parser->relocs);
-       kfree(parser->relocs_ptr);
        drm_free_large(parser->vm_bos);
        for (i = 0; i < parser->nchunks; i++)
                drm_free_large(parser->chunks[i].kdata);
@@ -462,7 +441,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
 {
        int r;
 
-       if (parser->chunk_ib_idx == -1)
+       if (parser->chunk_ib == NULL)
                return 0;
 
        if (parser->cs_flags & RADEON_CS_USE_VM)
@@ -505,9 +484,6 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
        if (r)
                return r;
 
-       radeon_sync_resv(p->rdev, &p->ib.sync, vm->page_directory->tbo.resv,
-                        true);
-
        r = radeon_vm_clear_freed(rdev, vm);
        if (r)
                return r;
@@ -525,10 +501,6 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
        for (i = 0; i < p->nrelocs; i++) {
                struct radeon_bo *bo;
 
-               /* ignore duplicates */
-               if (p->relocs_ptr[i] != &p->relocs[i])
-                       continue;
-
                bo = p->relocs[i].robj;
                bo_va = radeon_vm_bo_find(vm, bo);
                if (bo_va == NULL) {
@@ -553,7 +525,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
        struct radeon_vm *vm = &fpriv->vm;
        int r;
 
-       if (parser->chunk_ib_idx == -1)
+       if (parser->chunk_ib == NULL)
                return 0;
        if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
                return 0;
@@ -587,7 +559,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
        }
 
        if ((rdev->family >= CHIP_TAHITI) &&
-           (parser->chunk_const_ib_idx != -1)) {
+           (parser->chunk_const_ib != NULL)) {
                r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
        } else {
                r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
@@ -614,7 +586,7 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser
        struct radeon_vm *vm = NULL;
        int r;
 
-       if (parser->chunk_ib_idx == -1)
+       if (parser->chunk_ib == NULL)
                return 0;
 
        if (parser->cs_flags & RADEON_CS_USE_VM) {
@@ -622,8 +594,8 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser
                vm = &fpriv->vm;
 
                if ((rdev->family >= CHIP_TAHITI) &&
-                   (parser->chunk_const_ib_idx != -1)) {
-                       ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
+                   (parser->chunk_const_ib != NULL)) {
+                       ib_chunk = parser->chunk_const_ib;
                        if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
                                DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
                                return -EINVAL;
@@ -642,13 +614,13 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser
                                return -EFAULT;
                }
 
-               ib_chunk = &parser->chunks[parser->chunk_ib_idx];
+               ib_chunk = parser->chunk_ib;
                if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
                        DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
                        return -EINVAL;
                }
        }
-       ib_chunk = &parser->chunks[parser->chunk_ib_idx];
+       ib_chunk = parser->chunk_ib;
 
        r =  radeon_ib_get(rdev, parser->ring, &parser->ib,
                           vm, ib_chunk->length_dw * 4);
@@ -740,7 +712,7 @@ int radeon_cs_packet_parse(struct radeon_cs_parser *p,
                           struct radeon_cs_packet *pkt,
                           unsigned idx)
 {
-       struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+       struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
        struct radeon_device *rdev = p->rdev;
        uint32_t header;
 
@@ -834,7 +806,7 @@ void radeon_cs_dump_packet(struct radeon_cs_parser *p,
  * GPU offset using the provided start.
  **/
 int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
-                               struct radeon_cs_reloc **cs_reloc,
+                               struct radeon_bo_list **cs_reloc,
                                int nomm)
 {
        struct radeon_cs_chunk *relocs_chunk;
@@ -842,12 +814,12 @@ int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
        unsigned idx;
        int r;
 
-       if (p->chunk_relocs_idx == -1) {
+       if (p->chunk_relocs == NULL) {
                DRM_ERROR("No relocation chunk !\n");
                return -EINVAL;
        }
        *cs_reloc = NULL;
-       relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+       relocs_chunk = p->chunk_relocs;
        r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
        if (r)
                return r;
@@ -873,6 +845,6 @@ int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
                        (u64)relocs_chunk->kdata[idx + 3] << 32;
                (*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0];
        } else
-               *cs_reloc = p->relocs_ptr[(idx / 4)];
+               *cs_reloc = &p->relocs[(idx / 4)];
        return 0;
 }
index 85f38ee1188887722f70ed2e6d9886fcd9193e54..45e54060ee97eea33cd005b993c6ca704cf80a16 100644 (file)
@@ -227,11 +227,24 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
        return ret;
 }
 
-static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
-                             uint64_t gpu_addr, int hot_x, int hot_y)
+static int radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj)
 {
        struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
        struct radeon_device *rdev = crtc->dev->dev_private;
+       struct radeon_bo *robj = gem_to_radeon_bo(obj);
+       uint64_t gpu_addr;
+       int ret;
+
+       ret = radeon_bo_reserve(robj, false);
+       if (unlikely(ret != 0))
+               goto fail;
+       /* Only 27 bit offset for legacy cursor */
+       ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
+                                      ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
+                                      &gpu_addr);
+       radeon_bo_unreserve(robj);
+       if (ret)
+               goto fail;
 
        if (ASIC_IS_DCE4(rdev)) {
                WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
@@ -253,18 +266,12 @@ static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
                WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
        }
 
-       if (hot_x != radeon_crtc->cursor_hot_x ||
-           hot_y != radeon_crtc->cursor_hot_y) {
-               int x, y;
-
-               x = radeon_crtc->cursor_x + radeon_crtc->cursor_hot_x - hot_x;
-               y = radeon_crtc->cursor_y + radeon_crtc->cursor_hot_y - hot_y;
+       return 0;
 
-               radeon_cursor_move_locked(crtc, x, y);
+fail:
+       drm_gem_object_unreference_unlocked(obj);
 
-               radeon_crtc->cursor_hot_x = hot_x;
-               radeon_crtc->cursor_hot_y = hot_y;
-       }
+       return ret;
 }
 
 int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
@@ -276,10 +283,7 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
                            int32_t hot_y)
 {
        struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
-       struct radeon_device *rdev = crtc->dev->dev_private;
        struct drm_gem_object *obj;
-       struct radeon_bo *robj;
-       uint64_t gpu_addr;
        int ret;
 
        if (!handle) {
@@ -301,41 +305,76 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
                return -ENOENT;
        }
 
-       robj = gem_to_radeon_bo(obj);
-       ret = radeon_bo_reserve(robj, false);
-       if (unlikely(ret != 0))
-               goto fail;
-       /* Only 27 bit offset for legacy cursor */
-       ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
-                                      ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
-                                      &gpu_addr);
-       radeon_bo_unreserve(robj);
-       if (ret)
-               goto fail;
-
        radeon_crtc->cursor_width = width;
        radeon_crtc->cursor_height = height;
 
        radeon_lock_cursor(crtc, true);
-       radeon_set_cursor(crtc, obj, gpu_addr, hot_x, hot_y);
-       radeon_show_cursor(crtc);
+
+       if (hot_x != radeon_crtc->cursor_hot_x ||
+           hot_y != radeon_crtc->cursor_hot_y) {
+               int x, y;
+
+               x = radeon_crtc->cursor_x + radeon_crtc->cursor_hot_x - hot_x;
+               y = radeon_crtc->cursor_y + radeon_crtc->cursor_hot_y - hot_y;
+
+               radeon_cursor_move_locked(crtc, x, y);
+
+               radeon_crtc->cursor_hot_x = hot_x;
+               radeon_crtc->cursor_hot_y = hot_y;
+       }
+
+       ret = radeon_set_cursor(crtc, obj);
+
+       if (ret)
+               DRM_ERROR("radeon_set_cursor returned %d, not changing cursor\n",
+                         ret);
+       else
+               radeon_show_cursor(crtc);
+
        radeon_lock_cursor(crtc, false);
 
 unpin:
        if (radeon_crtc->cursor_bo) {
-               robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
+               struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
                ret = radeon_bo_reserve(robj, false);
                if (likely(ret == 0)) {
                        radeon_bo_unpin(robj);
                        radeon_bo_unreserve(robj);
                }
-               drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
+               if (radeon_crtc->cursor_bo != obj)
+                       drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
        }
 
        radeon_crtc->cursor_bo = obj;
        return 0;
-fail:
-       drm_gem_object_unreference_unlocked(obj);
+}
 
-       return ret;
+/**
+ * radeon_cursor_reset - Re-set the current cursor, if any.
+ *
+ * @crtc: drm crtc
+ *
+ * If the CRTC passed in currently has a cursor assigned, this function
+ * makes sure it's visible.
+ */
+void radeon_cursor_reset(struct drm_crtc *crtc)
+{
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+       int ret;
+
+       if (radeon_crtc->cursor_bo) {
+               radeon_lock_cursor(crtc, true);
+
+               radeon_cursor_move_locked(crtc, radeon_crtc->cursor_x,
+                                         radeon_crtc->cursor_y);
+
+               ret = radeon_set_cursor(crtc, radeon_crtc->cursor_bo);
+               if (ret)
+                       DRM_ERROR("radeon_set_cursor returned %d, not showing "
+                                 "cursor\n", ret);
+               else
+                       radeon_show_cursor(crtc);
+
+               radeon_lock_cursor(crtc, false);
+       }
 }
index 0ea1db83d57390e70874f3182893b277b56c6f94..29b9220ec3998daee56bac0969de169268964c13 100644 (file)
@@ -48,10 +48,40 @@ struct radeon_fbdev {
        struct radeon_device *rdev;
 };
 
+/**
+ * radeon_fb_helper_set_par - Hide cursor on CRTCs used by fbdev.
+ *
+ * @info: fbdev info
+ *
+ * This function hides the cursor on all CRTCs used by fbdev.
+ */
+static int radeon_fb_helper_set_par(struct fb_info *info)
+{
+       int ret;
+
+       ret = drm_fb_helper_set_par(info);
+
+       /* XXX: with universal plane support fbdev will automatically disable
+        * all non-primary planes (including the cursor)
+        */
+       if (ret == 0) {
+               struct drm_fb_helper *fb_helper = info->par;
+               int i;
+
+               for (i = 0; i < fb_helper->crtc_count; i++) {
+                       struct drm_crtc *crtc = fb_helper->crtc_info[i].mode_set.crtc;
+
+                       radeon_crtc_cursor_set2(crtc, NULL, 0, 0, 0, 0, 0);
+               }
+       }
+
+       return ret;
+}
+
 static struct fb_ops radeonfb_ops = {
        .owner = THIS_MODULE,
        .fb_check_var = drm_fb_helper_check_var,
-       .fb_set_par = drm_fb_helper_set_par,
+       .fb_set_par = radeon_fb_helper_set_par,
        .fb_fillrect = cfb_fillrect,
        .fb_copyarea = cfb_copyarea,
        .fb_imageblit = cfb_imageblit,
index 12cfaeac12056182c46223c3b4d3d05c3b0be876..fe48f229043e33720c26bcdd40949d7a8fd41b00 100644 (file)
@@ -548,7 +548,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
                                    struct radeon_bo_va *bo_va)
 {
        struct ttm_validate_buffer tv, *entry;
-       struct radeon_cs_reloc *vm_bos;
+       struct radeon_bo_list *vm_bos;
        struct ww_acquire_ctx ticket;
        struct list_head list;
        unsigned domain;
@@ -564,7 +564,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
        if (!vm_bos)
                return;
 
-       r = ttm_eu_reserve_buffers(&ticket, &list, true);
+       r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
        if (r)
                goto error_free;
 
index f4dd26ae33e569073bfaaeb86be8495f20aa4a7c..3cf9c1fa64756fb6b4430d5e351f21aee874ad1d 100644 (file)
@@ -800,6 +800,8 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
 
        /* Get associated drm_crtc: */
        drmcrtc = &rdev->mode_info.crtcs[crtc]->base;
+       if (!drmcrtc)
+               return -EINVAL;
 
        /* Helper routine in DRM core does all the work: */
        return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
index cafb1ccf2ec3213b2dc394314dbece153d88ef00..678b4386540d52ea22a5f2cb3b9e583f87232cfe 100644 (file)
@@ -1054,6 +1054,7 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
                        DRM_ERROR("Mode need scaling but only first crtc can do that.\n");
                }
        }
+       radeon_cursor_reset(crtc);
        return 0;
 }
 
index f3d87cdd5c9dcafbae4850919ea5a9bd50dc7a00..390db897f322b48b2cd31399e0fddf5264535780 100644 (file)
@@ -818,6 +818,7 @@ extern int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
                                   int32_t hot_y);
 extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
                                   int x, int y);
+extern void radeon_cursor_reset(struct drm_crtc *crtc);
 
 extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
                                      unsigned int flags,
index 87b00d902bf756d362ec12249105b44e1ae64ac9..7d68223eb4692a026fbad21b68bbe9362ee6d87a 100644 (file)
@@ -233,6 +233,13 @@ int radeon_bo_create(struct radeon_device *rdev,
        if (!(rdev->flags & RADEON_IS_PCIE))
                bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
 
+#ifdef CONFIG_X86_32
+       /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
+        * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
+        */
+       bo->flags &= ~RADEON_GEM_GTT_WC;
+#endif
+
        radeon_ttm_placement_from_domain(bo, domain);
        /* Kernel allocation are uninterruptible */
        down_read(&rdev->pm.mclk_lock);
@@ -502,19 +509,20 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
                            struct ww_acquire_ctx *ticket,
                            struct list_head *head, int ring)
 {
-       struct radeon_cs_reloc *lobj;
-       struct radeon_bo *bo;
+       struct radeon_bo_list *lobj;
+       struct list_head duplicates;
        int r;
        u64 bytes_moved = 0, initial_bytes_moved;
        u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
 
-       r = ttm_eu_reserve_buffers(ticket, head, true);
+       INIT_LIST_HEAD(&duplicates);
+       r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
        if (unlikely(r != 0)) {
                return r;
        }
 
        list_for_each_entry(lobj, head, tv.head) {
-               bo = lobj->robj;
+               struct radeon_bo *bo = lobj->robj;
                if (!bo->pin_count) {
                        u32 domain = lobj->prefered_domains;
                        u32 allowed = lobj->allowed_domains;
@@ -562,6 +570,12 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
                lobj->gpu_offset = radeon_bo_gpu_offset(bo);
                lobj->tiling_flags = bo->tiling_flags;
        }
+
+       list_for_each_entry(lobj, &duplicates, tv.head) {
+               lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj);
+               lobj->tiling_flags = lobj->robj->tiling_flags;
+       }
+
        return 0;
 }
 
index 9db74a96ef617d7d66292023d8250ca3821e236a..ce075cb08cb2b7b618a78c58d74915f06df3c8ff 100644 (file)
@@ -38,7 +38,7 @@ TRACE_EVENT(radeon_cs,
 
            TP_fast_assign(
                           __entry->ring = p->ring;
-                          __entry->dw = p->chunks[p->chunk_ib_idx].length_dw;
+                          __entry->dw = p->chunk_ib->length_dw;
                           __entry->fences = radeon_fence_count_emitted(
                                p->rdev, p->ring);
                           ),
index cbe7b32d181c2f3bf2972b14f12c72597d3e15f5..d02aa1d0f5885408c877056bd4ac1ab0e1ed6f12 100644 (file)
@@ -196,7 +196,7 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
        rbo = container_of(bo, struct radeon_bo, tbo);
        switch (bo->mem.mem_type) {
        case TTM_PL_VRAM:
-               if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false)
+               if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false)
                        radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
                else if (rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size &&
                         bo->mem.start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) {
index 11b6624692536c13dbea4cc4e9b4c75b46902d1d..c10b2aec6450fa8ceb366a691ea0898aff14dcbc 100644 (file)
@@ -488,12 +488,12 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
                               unsigned buf_sizes[], bool *has_msg_cmd)
 {
        struct radeon_cs_chunk *relocs_chunk;
-       struct radeon_cs_reloc *reloc;
+       struct radeon_bo_list *reloc;
        unsigned idx, cmd, offset;
        uint64_t start, end;
        int r;
 
-       relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+       relocs_chunk = p->chunk_relocs;
        offset = radeon_get_ib_value(p, data0);
        idx = radeon_get_ib_value(p, data1);
        if (idx >= relocs_chunk->length_dw) {
@@ -502,7 +502,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
                return -EINVAL;
        }
 
-       reloc = p->relocs_ptr[(idx / 4)];
+       reloc = &p->relocs[(idx / 4)];
        start = reloc->gpu_offset;
        end = start + radeon_bo_size(reloc->robj);
        start += offset;
@@ -610,13 +610,13 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
                [0x00000003]    =       2048,
        };
 
-       if (p->chunks[p->chunk_ib_idx].length_dw % 16) {
+       if (p->chunk_ib->length_dw % 16) {
                DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
-                         p->chunks[p->chunk_ib_idx].length_dw);
+                         p->chunk_ib->length_dw);
                return -EINVAL;
        }
 
-       if (p->chunk_relocs_idx == -1) {
+       if (p->chunk_relocs == NULL) {
                DRM_ERROR("No relocation chunk !\n");
                return -EINVAL;
        }
@@ -640,7 +640,7 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
                        DRM_ERROR("Unknown packet type %d !\n", pkt.type);
                        return -EINVAL;
                }
-       } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+       } while (p->idx < p->chunk_ib->length_dw);
 
        if (!has_msg_cmd) {
                DRM_ERROR("UVD-IBs need a msg command!\n");
index 9e85757d55991a6cd0e077cada024d87f0484c08..976fe432f4e26fe83682d655ea983817cc19f05f 100644 (file)
@@ -453,11 +453,11 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
                        unsigned size)
 {
        struct radeon_cs_chunk *relocs_chunk;
-       struct radeon_cs_reloc *reloc;
+       struct radeon_bo_list *reloc;
        uint64_t start, end, offset;
        unsigned idx;
 
-       relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+       relocs_chunk = p->chunk_relocs;
        offset = radeon_get_ib_value(p, lo);
        idx = radeon_get_ib_value(p, hi);
 
@@ -467,7 +467,7 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
                return -EINVAL;
        }
 
-       reloc = p->relocs_ptr[(idx / 4)];
+       reloc = &p->relocs[(idx / 4)];
        start = reloc->gpu_offset;
        end = start + radeon_bo_size(reloc->robj);
        start += offset;
@@ -534,7 +534,7 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
        uint32_t *size = &tmp;
        int i, r;
 
-       while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) {
+       while (p->idx < p->chunk_ib->length_dw) {
                uint32_t len = radeon_get_ib_value(p, p->idx);
                uint32_t cmd = radeon_get_ib_value(p, p->idx + 1);
 
index 0b10f3a03ce2f20ac66103c932f4d6a51c8fd743..cde48c42b30ad4b63c27dde6741f8637840670fc 100644 (file)
@@ -125,41 +125,37 @@ void radeon_vm_manager_fini(struct radeon_device *rdev)
  * Add the page directory to the list of BOs to
  * validate for command submission (cayman+).
  */
-struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
+struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
                                          struct radeon_vm *vm,
                                          struct list_head *head)
 {
-       struct radeon_cs_reloc *list;
+       struct radeon_bo_list *list;
        unsigned i, idx;
 
        list = drm_malloc_ab(vm->max_pde_used + 2,
-                            sizeof(struct radeon_cs_reloc));
+                            sizeof(struct radeon_bo_list));
        if (!list)
                return NULL;
 
        /* add the vm page table to the list */
-       list[0].gobj = NULL;
        list[0].robj = vm->page_directory;
        list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
        list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
        list[0].tv.bo = &vm->page_directory->tbo;
        list[0].tv.shared = true;
        list[0].tiling_flags = 0;
-       list[0].handle = 0;
        list_add(&list[0].tv.head, head);
 
        for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
                if (!vm->page_tables[i].bo)
                        continue;
 
-               list[idx].gobj = NULL;
                list[idx].robj = vm->page_tables[i].bo;
                list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
                list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
                list[idx].tv.bo = &list[idx].robj->tbo;
                list[idx].tv.shared = true;
                list[idx].tiling_flags = 0;
-               list[idx].handle = 0;
                list_add(&list[idx++].tv.head, head);
        }
 
@@ -491,7 +487,9 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
                        tmp->vm = vm;
                        tmp->addr = bo_va->addr;
                        tmp->bo = radeon_bo_ref(bo_va->bo);
+                       spin_lock(&vm->status_lock);
                        list_add(&tmp->vm_status, &vm->freed);
+                       spin_unlock(&vm->status_lock);
                }
 
                interval_tree_remove(&bo_va->it, &vm->va);
@@ -802,11 +800,11 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev,
  *
  * Global and local mutex must be locked!
  */
-static void radeon_vm_update_ptes(struct radeon_device *rdev,
-                                 struct radeon_vm *vm,
-                                 struct radeon_ib *ib,
-                                 uint64_t start, uint64_t end,
-                                 uint64_t dst, uint32_t flags)
+static int radeon_vm_update_ptes(struct radeon_device *rdev,
+                                struct radeon_vm *vm,
+                                struct radeon_ib *ib,
+                                uint64_t start, uint64_t end,
+                                uint64_t dst, uint32_t flags)
 {
        uint64_t mask = RADEON_VM_PTE_COUNT - 1;
        uint64_t last_pte = ~0, last_dst = ~0;
@@ -819,8 +817,12 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
                struct radeon_bo *pt = vm->page_tables[pt_idx].bo;
                unsigned nptes;
                uint64_t pte;
+               int r;
 
                radeon_sync_resv(rdev, &ib->sync, pt->tbo.resv, true);
+               r = reservation_object_reserve_shared(pt->tbo.resv);
+               if (r)
+                       return r;
 
                if ((addr & ~mask) == (end & ~mask))
                        nptes = end - addr;
@@ -854,6 +856,8 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
                                    last_pte + 8 * count,
                                    last_dst, flags);
        }
+
+       return 0;
 }
 
 /**
@@ -878,7 +882,7 @@ static void radeon_vm_fence_pts(struct radeon_vm *vm,
        end >>= radeon_vm_block_size;
 
        for (i = start; i <= end; ++i)
-               radeon_bo_fence(vm->page_tables[i].bo, fence, false);
+               radeon_bo_fence(vm->page_tables[i].bo, fence, true);
 }
 
 /**
@@ -911,7 +915,9 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
                return -EINVAL;
        }
 
+       spin_lock(&vm->status_lock);
        list_del_init(&bo_va->vm_status);
+       spin_unlock(&vm->status_lock);
 
        bo_va->flags &= ~RADEON_VM_PAGE_VALID;
        bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
@@ -987,9 +993,13 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
                        radeon_sync_fence(&ib.sync, vm->ids[i].last_id_use);
        }
 
-       radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start,
-                             bo_va->it.last + 1, addr,
-                             radeon_vm_page_flags(bo_va->flags));
+       r = radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start,
+                                 bo_va->it.last + 1, addr,
+                                 radeon_vm_page_flags(bo_va->flags));
+       if (r) {
+               radeon_ib_free(rdev, &ib);
+               return r;
+       }
 
        radeon_asic_vm_pad_ib(rdev, &ib);
        WARN_ON(ib.length_dw > ndw);
@@ -1022,17 +1032,25 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
 int radeon_vm_clear_freed(struct radeon_device *rdev,
                          struct radeon_vm *vm)
 {
-       struct radeon_bo_va *bo_va, *tmp;
+       struct radeon_bo_va *bo_va;
        int r;
 
-       list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
+       spin_lock(&vm->status_lock);
+       while (!list_empty(&vm->freed)) {
+               bo_va = list_first_entry(&vm->freed,
+                       struct radeon_bo_va, vm_status);
+               spin_unlock(&vm->status_lock);
+
                r = radeon_vm_bo_update(rdev, bo_va, NULL);
                radeon_bo_unref(&bo_va->bo);
                radeon_fence_unref(&bo_va->last_pt_update);
                kfree(bo_va);
                if (r)
                        return r;
+
+               spin_lock(&vm->status_lock);
        }
+       spin_unlock(&vm->status_lock);
        return 0;
 
 }
@@ -1051,14 +1069,23 @@ int radeon_vm_clear_freed(struct radeon_device *rdev,
 int radeon_vm_clear_invalids(struct radeon_device *rdev,
                             struct radeon_vm *vm)
 {
-       struct radeon_bo_va *bo_va, *tmp;
+       struct radeon_bo_va *bo_va;
        int r;
 
-       list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, vm_status) {
+       spin_lock(&vm->status_lock);
+       while (!list_empty(&vm->invalidated)) {
+               bo_va = list_first_entry(&vm->invalidated,
+                       struct radeon_bo_va, vm_status);
+               spin_unlock(&vm->status_lock);
+
                r = radeon_vm_bo_update(rdev, bo_va, NULL);
                if (r)
                        return r;
+
+               spin_lock(&vm->status_lock);
        }
+       spin_unlock(&vm->status_lock);
+
        return 0;
 }
 
@@ -1081,6 +1108,7 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev,
 
        mutex_lock(&vm->mutex);
        interval_tree_remove(&bo_va->it, &vm->va);
+       spin_lock(&vm->status_lock);
        list_del(&bo_va->vm_status);
 
        if (bo_va->addr) {
@@ -1090,6 +1118,7 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev,
                radeon_fence_unref(&bo_va->last_pt_update);
                kfree(bo_va);
        }
+       spin_unlock(&vm->status_lock);
 
        mutex_unlock(&vm->mutex);
 }
@@ -1110,10 +1139,10 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
 
        list_for_each_entry(bo_va, &bo->va, bo_list) {
                if (bo_va->addr) {
-                       mutex_lock(&bo_va->vm->mutex);
+                       spin_lock(&bo_va->vm->status_lock);
                        list_del(&bo_va->vm_status);
                        list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
-                       mutex_unlock(&bo_va->vm->mutex);
+                       spin_unlock(&bo_va->vm->status_lock);
                }
        }
 }
@@ -1141,6 +1170,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
        }
        mutex_init(&vm->mutex);
        vm->va = RB_ROOT;
+       spin_lock_init(&vm->status_lock);
        INIT_LIST_HEAD(&vm->invalidated);
        INIT_LIST_HEAD(&vm->freed);
 
index cf4c420b55727083696a589631b0ac405b5567fc..32e354b8b0aba6b96cfc8e94b1519b7fca4f47fb 100644 (file)
@@ -5893,7 +5893,7 @@ static void si_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode)
        tmp |= TMIN(0);
        WREG32(CG_FDO_CTRL2, tmp);
 
-       tmp = RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
+       tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
        tmp |= FDO_PWM_MODE(mode);
        WREG32(CG_FDO_CTRL2, tmp);
 }
@@ -6098,7 +6098,7 @@ static int si_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev,
        tmp |= TARGET_PERIOD(tach_period);
        WREG32(CG_TACH_CTRL, tmp);
 
-       si_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
+       si_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM);
 
        return 0;
 }
@@ -6114,7 +6114,7 @@ static void si_fan_ctrl_set_default_mode(struct radeon_device *rdev)
                tmp |= FDO_PWM_MODE(si_pi->fan_ctrl_default_mode);
                WREG32(CG_FDO_CTRL2, tmp);
 
-               tmp = RREG32(CG_FDO_CTRL2) & TMIN_MASK;
+               tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
                tmp |= TMIN(si_pi->t_min);
                WREG32(CG_FDO_CTRL2, tmp);
                si_pi->fan_ctrl_is_in_default_mode = true;
index c549c16a4fe48f2f7d28f3d340a9eca43d3a2249..4069be89e5852852ff4e884630571908dda6e387 100644 (file)
 
 #define        CG_FDO_CTRL0                                    0x754
 #define                FDO_STATIC_DUTY(x)                      ((x) << 0)
-#define                FDO_STATIC_DUTY_MASK                    0x0000000F
+#define                FDO_STATIC_DUTY_MASK                    0x000000FF
 #define                FDO_STATIC_DUTY_SHIFT                   0
 #define        CG_FDO_CTRL1                                    0x758
 #define                FMAX_DUTY100(x)                         ((x) << 0)
-#define                FMAX_DUTY100_MASK                       0x0000000F
+#define                FMAX_DUTY100_MASK                       0x000000FF
 #define                FMAX_DUTY100_SHIFT                      0
 #define        CG_FDO_CTRL2                                    0x75C
 #define                TMIN(x)                                 ((x) << 0)
-#define                TMIN_MASK                               0x0000000F
+#define                TMIN_MASK                               0x000000FF
 #define                TMIN_SHIFT                              0
 #define                FDO_PWM_MODE(x)                         ((x) << 11)
-#define                FDO_PWM_MODE_MASK                       (3 << 11)
+#define                FDO_PWM_MODE_MASK                       (7 << 11)
 #define                FDO_PWM_MODE_SHIFT                      11
 #define                TACH_PWM_RESP_RATE(x)                   ((x) << 25)
 #define                TACH_PWM_RESP_RATE_MASK                 (0x7f << 25)
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
new file mode 100644 (file)
index 0000000..ca9f085
--- /dev/null
@@ -0,0 +1,17 @@
+config DRM_ROCKCHIP
+       tristate "DRM Support for Rockchip"
+       depends on DRM && ROCKCHIP_IOMMU
+       select DRM_KMS_HELPER
+       select DRM_KMS_FB_HELPER
+       select DRM_PANEL
+       select FB_CFB_FILLRECT
+       select FB_CFB_COPYAREA
+       select FB_CFB_IMAGEBLIT
+       select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
+       select VIDEOMODE_HELPERS
+       help
+         Choose this option if you have a Rockchip soc chipset.
+         This driver provides kernel mode setting and buffer
+         management to userspace. This driver does not provide
+         2D or 3D acceleration; acceleration is performed by other
+         IP found on the SoC.
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
new file mode 100644 (file)
index 0000000..2cb0672
--- /dev/null
@@ -0,0 +1,8 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o rockchip_drm_fbdev.o \
+               rockchip_drm_gem.o
+
+obj-$(CONFIG_DRM_ROCKCHIP) += rockchipdrm.o rockchip_drm_vop.o
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
new file mode 100644 (file)
index 0000000..a798c7c
--- /dev/null
@@ -0,0 +1,551 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
+ *
+ * based on exynos_drm_drv.c
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/dma-iommu.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
+#include <linux/of_graph.h>
+#include <linux/component.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_fb.h"
+#include "rockchip_drm_fbdev.h"
+#include "rockchip_drm_gem.h"
+
+#define DRIVER_NAME    "rockchip"
+#define DRIVER_DESC    "RockChip Soc DRM"
+#define DRIVER_DATE    "20140818"
+#define DRIVER_MAJOR   1
+#define DRIVER_MINOR   0
+
+/*
+ * Attach a (component) device to the shared drm dma mapping from master drm
+ * device.  This is used by the VOPs to map GEM buffers to a common DMA
+ * mapping.
+ */
+int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
+                                  struct device *dev)
+{
+       struct dma_iommu_mapping *mapping = drm_dev->dev->archdata.mapping;
+       int ret;
+
+       ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
+
+       dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+
+       return arm_iommu_attach_device(dev, mapping);
+}
+EXPORT_SYMBOL_GPL(rockchip_drm_dma_attach_device);
+
+void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
+                                   struct device *dev)
+{
+       arm_iommu_detach_device(dev);
+}
+EXPORT_SYMBOL_GPL(rockchip_drm_dma_detach_device);
+
+int rockchip_register_crtc_funcs(struct drm_device *dev,
+                                const struct rockchip_crtc_funcs *crtc_funcs,
+                                int pipe)
+{
+       struct rockchip_drm_private *priv = dev->dev_private;
+
+       if (pipe > ROCKCHIP_MAX_CRTC)
+               return -EINVAL;
+
+       priv->crtc_funcs[pipe] = crtc_funcs;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rockchip_register_crtc_funcs);
+
+void rockchip_unregister_crtc_funcs(struct drm_device *dev, int pipe)
+{
+       struct rockchip_drm_private *priv = dev->dev_private;
+
+       if (pipe > ROCKCHIP_MAX_CRTC)
+               return;
+
+       priv->crtc_funcs[pipe] = NULL;
+}
+EXPORT_SYMBOL_GPL(rockchip_unregister_crtc_funcs);
+
+static struct drm_crtc *rockchip_crtc_from_pipe(struct drm_device *drm,
+                                               int pipe)
+{
+       struct drm_crtc *crtc;
+       int i = 0;
+
+       list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
+               if (i++ == pipe)
+                       return crtc;
+
+       return NULL;
+}
+
+static int rockchip_drm_crtc_enable_vblank(struct drm_device *dev, int pipe)
+{
+       struct rockchip_drm_private *priv = dev->dev_private;
+       struct drm_crtc *crtc = rockchip_crtc_from_pipe(dev, pipe);
+
+       if (crtc && priv->crtc_funcs[pipe] &&
+           priv->crtc_funcs[pipe]->enable_vblank)
+               return priv->crtc_funcs[pipe]->enable_vblank(crtc);
+
+       return 0;
+}
+
+static void rockchip_drm_crtc_disable_vblank(struct drm_device *dev, int pipe)
+{
+       struct rockchip_drm_private *priv = dev->dev_private;
+       struct drm_crtc *crtc = rockchip_crtc_from_pipe(dev, pipe);
+
+       if (crtc && priv->crtc_funcs[pipe] &&
+           priv->crtc_funcs[pipe]->enable_vblank)
+               priv->crtc_funcs[pipe]->disable_vblank(crtc);
+}
+
+static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags)
+{
+       struct rockchip_drm_private *private;
+       struct dma_iommu_mapping *mapping;
+       struct device *dev = drm_dev->dev;
+       int ret;
+
+       private = devm_kzalloc(drm_dev->dev, sizeof(*private), GFP_KERNEL);
+       if (!private)
+               return -ENOMEM;
+
+       drm_dev->dev_private = private;
+
+       drm_mode_config_init(drm_dev);
+
+       rockchip_drm_mode_config_init(drm_dev);
+
+       dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
+                                     GFP_KERNEL);
+       if (!dev->dma_parms) {
+               ret = -ENOMEM;
+               goto err_config_cleanup;
+       }
+
+       /* TODO(djkurtz): fetch the mapping start/size from somewhere */
+       mapping = arm_iommu_create_mapping(&platform_bus_type, 0x00000000,
+                                          SZ_2G);
+       if (IS_ERR(mapping)) {
+               ret = PTR_ERR(mapping);
+               goto err_config_cleanup;
+       }
+
+       ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+       if (ret)
+               goto err_release_mapping;
+
+       dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+
+       ret = arm_iommu_attach_device(dev, mapping);
+       if (ret)
+               goto err_release_mapping;
+
+       /* Try to bind all sub drivers. */
+       ret = component_bind_all(dev, drm_dev);
+       if (ret)
+               goto err_detach_device;
+
+       /* init kms poll for handling hpd */
+       drm_kms_helper_poll_init(drm_dev);
+
+       /*
+        * enable drm irq mode.
+        * - with irq_enabled = true, we can use the vblank feature.
+        */
+       drm_dev->irq_enabled = true;
+
+       ret = drm_vblank_init(drm_dev, ROCKCHIP_MAX_CRTC);
+       if (ret)
+               goto err_kms_helper_poll_fini;
+
+       /*
+        * with vblank_disable_allowed = true, vblank interrupt will be disabled
+        * by drm timer once a current process gives up ownership of
+        * vblank event.(after drm_vblank_put function is called)
+        */
+       drm_dev->vblank_disable_allowed = true;
+
+       ret = rockchip_drm_fbdev_init(drm_dev);
+       if (ret)
+               goto err_vblank_cleanup;
+
+       return 0;
+err_vblank_cleanup:
+       drm_vblank_cleanup(drm_dev);
+err_kms_helper_poll_fini:
+       drm_kms_helper_poll_fini(drm_dev);
+       component_unbind_all(dev, drm_dev);
+err_detach_device:
+       arm_iommu_detach_device(dev);
+err_release_mapping:
+       arm_iommu_release_mapping(dev->archdata.mapping);
+err_config_cleanup:
+       drm_mode_config_cleanup(drm_dev);
+       drm_dev->dev_private = NULL;
+       return ret;
+}
+
+static int rockchip_drm_unload(struct drm_device *drm_dev)
+{
+       struct device *dev = drm_dev->dev;
+
+       rockchip_drm_fbdev_fini(drm_dev);
+       drm_vblank_cleanup(drm_dev);
+       drm_kms_helper_poll_fini(drm_dev);
+       component_unbind_all(dev, drm_dev);
+       arm_iommu_detach_device(dev);
+       arm_iommu_release_mapping(dev->archdata.mapping);
+       drm_mode_config_cleanup(drm_dev);
+       drm_dev->dev_private = NULL;
+
+       return 0;
+}
+
+void rockchip_drm_lastclose(struct drm_device *dev)
+{
+       struct rockchip_drm_private *priv = dev->dev_private;
+
+       drm_fb_helper_restore_fbdev_mode_unlocked(&priv->fbdev_helper);
+}
+
+static const struct file_operations rockchip_drm_driver_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_open,
+       .mmap = rockchip_gem_mmap,
+       .poll = drm_poll,
+       .read = drm_read,
+       .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = drm_compat_ioctl,
+#endif
+       .release = drm_release,
+};
+
+const struct vm_operations_struct rockchip_drm_vm_ops = {
+       .open = drm_gem_vm_open,
+       .close = drm_gem_vm_close,
+};
+
+static struct drm_driver rockchip_drm_driver = {
+       .driver_features        = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
+       .load                   = rockchip_drm_load,
+       .unload                 = rockchip_drm_unload,
+       .lastclose              = rockchip_drm_lastclose,
+       .get_vblank_counter     = drm_vblank_count,
+       .enable_vblank          = rockchip_drm_crtc_enable_vblank,
+       .disable_vblank         = rockchip_drm_crtc_disable_vblank,
+       .gem_vm_ops             = &rockchip_drm_vm_ops,
+       .gem_free_object        = rockchip_gem_free_object,
+       .dumb_create            = rockchip_gem_dumb_create,
+       .dumb_map_offset        = rockchip_gem_dumb_map_offset,
+       .dumb_destroy           = drm_gem_dumb_destroy,
+       .prime_handle_to_fd     = drm_gem_prime_handle_to_fd,
+       .prime_fd_to_handle     = drm_gem_prime_fd_to_handle,
+       .gem_prime_import       = drm_gem_prime_import,
+       .gem_prime_export       = drm_gem_prime_export,
+       .gem_prime_get_sg_table = rockchip_gem_prime_get_sg_table,
+       .gem_prime_vmap         = rockchip_gem_prime_vmap,
+       .gem_prime_vunmap       = rockchip_gem_prime_vunmap,
+       .gem_prime_mmap         = rockchip_gem_mmap_buf,
+       .fops                   = &rockchip_drm_driver_fops,
+       .name   = DRIVER_NAME,
+       .desc   = DRIVER_DESC,
+       .date   = DRIVER_DATE,
+       .major  = DRIVER_MAJOR,
+       .minor  = DRIVER_MINOR,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int rockchip_drm_sys_suspend(struct device *dev)
+{
+       struct drm_device *drm = dev_get_drvdata(dev);
+       struct drm_connector *connector;
+
+       if (!drm)
+               return 0;
+
+       drm_modeset_lock_all(drm);
+       list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
+               int old_dpms = connector->dpms;
+
+               if (connector->funcs->dpms)
+                       connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF);
+
+               /* Set the old mode back to the connector for resume */
+               connector->dpms = old_dpms;
+       }
+       drm_modeset_unlock_all(drm);
+
+       return 0;
+}
+
+static int rockchip_drm_sys_resume(struct device *dev)
+{
+       struct drm_device *drm = dev_get_drvdata(dev);
+       struct drm_connector *connector;
+       enum drm_connector_status status;
+       bool changed = false;
+
+       if (!drm)
+               return 0;
+
+       drm_modeset_lock_all(drm);
+       list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
+               int desired_mode = connector->dpms;
+
+               /*
+                * at suspend time, we save dpms to connector->dpms,
+                * restore the old_dpms, and at current time, the connector
+                * dpms status must be DRM_MODE_DPMS_OFF.
+                */
+               connector->dpms = DRM_MODE_DPMS_OFF;
+
+               /*
+                * If the connector has been disconnected during suspend,
+                * disconnect it from the encoder and leave it off. We'll notify
+                * userspace at the end.
+                */
+               if (desired_mode == DRM_MODE_DPMS_ON) {
+                       status = connector->funcs->detect(connector, true);
+                       if (status == connector_status_disconnected) {
+                               connector->encoder = NULL;
+                               connector->status = status;
+                               changed = true;
+                               continue;
+                       }
+               }
+               if (connector->funcs->dpms)
+                       connector->funcs->dpms(connector, desired_mode);
+       }
+       drm_modeset_unlock_all(drm);
+
+       drm_helper_resume_force_mode(drm);
+
+       if (changed)
+               drm_kms_helper_hotplug_event(drm);
+
+       return 0;
+}
+#endif
+
+static const struct dev_pm_ops rockchip_drm_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(rockchip_drm_sys_suspend,
+                               rockchip_drm_sys_resume)
+};
+
+/*
+ * @node: device tree node containing encoder input ports
+ * @encoder: drm_encoder
+ */
+int rockchip_drm_encoder_get_mux_id(struct device_node *node,
+                                   struct drm_encoder *encoder)
+{
+       struct device_node *ep = NULL;
+       struct drm_crtc *crtc = encoder->crtc;
+       struct of_endpoint endpoint;
+       struct device_node *port;
+       int ret;
+
+       if (!node || !crtc)
+               return -EINVAL;
+
+       do {
+               ep = of_graph_get_next_endpoint(node, ep);
+               if (!ep)
+                       break;
+
+               port = of_graph_get_remote_port(ep);
+               of_node_put(port);
+               if (port == crtc->port) {
+                       ret = of_graph_parse_endpoint(ep, &endpoint);
+                       return ret ?: endpoint.id;
+               }
+       } while (ep);
+
+       return -EINVAL;
+}
+
+static int compare_of(struct device *dev, void *data)
+{
+       struct device_node *np = data;
+
+       return dev->of_node == np;
+}
+
+static void rockchip_add_endpoints(struct device *dev,
+                                  struct component_match **match,
+                                  struct device_node *port)
+{
+       struct device_node *ep, *remote;
+
+       for_each_child_of_node(port, ep) {
+               remote = of_graph_get_remote_port_parent(ep);
+               if (!remote || !of_device_is_available(remote)) {
+                       of_node_put(remote);
+                       continue;
+               } else if (!of_device_is_available(remote->parent)) {
+                       dev_warn(dev, "parent device of %s is not available\n",
+                                remote->full_name);
+                       of_node_put(remote);
+                       continue;
+               }
+
+               component_match_add(dev, match, compare_of, remote);
+               of_node_put(remote);
+       }
+}
+
+static int rockchip_drm_bind(struct device *dev)
+{
+       struct drm_device *drm;
+       int ret;
+
+       drm = drm_dev_alloc(&rockchip_drm_driver, dev);
+       if (!drm)
+               return -ENOMEM;
+
+       ret = drm_dev_set_unique(drm, "%s", dev_name(dev));
+       if (ret)
+               goto err_free;
+
+       ret = drm_dev_register(drm, 0);
+       if (ret)
+               goto err_free;
+
+       dev_set_drvdata(dev, drm);
+
+       return 0;
+
+err_free:
+       drm_dev_unref(drm);
+       return ret;
+}
+
+static void rockchip_drm_unbind(struct device *dev)
+{
+       struct drm_device *drm = dev_get_drvdata(dev);
+
+       drm_dev_unregister(drm);
+       drm_dev_unref(drm);
+       dev_set_drvdata(dev, NULL);
+}
+
+static const struct component_master_ops rockchip_drm_ops = {
+       .bind = rockchip_drm_bind,
+       .unbind = rockchip_drm_unbind,
+};
+
+static int rockchip_drm_platform_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct component_match *match = NULL;
+       struct device_node *np = dev->of_node;
+       struct device_node *port;
+       int i;
+
+       if (!np)
+               return -ENODEV;
+       /*
+        * Bind the crtc ports first, so that
+        * drm_of_find_possible_crtcs called from encoder .bind callbacks
+        * works as expected.
+        */
+       for (i = 0;; i++) {
+               port = of_parse_phandle(np, "ports", i);
+               if (!port)
+                       break;
+
+               if (!of_device_is_available(port->parent)) {
+                       of_node_put(port);
+                       continue;
+               }
+
+               component_match_add(dev, &match, compare_of, port->parent);
+               of_node_put(port);
+       }
+
+       if (i == 0) {
+               dev_err(dev, "missing 'ports' property\n");
+               return -ENODEV;
+       }
+
+       if (!match) {
+               dev_err(dev, "No available vop found for display-subsystem.\n");
+               return -ENODEV;
+       }
+       /*
+        * For each bound crtc, bind the encoders attached to its
+        * remote endpoint.
+        */
+       for (i = 0;; i++) {
+               port = of_parse_phandle(np, "ports", i);
+               if (!port)
+                       break;
+
+               if (!of_device_is_available(port->parent)) {
+                       of_node_put(port);
+                       continue;
+               }
+
+               rockchip_add_endpoints(dev, &match, port);
+               of_node_put(port);
+       }
+
+       return component_master_add_with_match(dev, &rockchip_drm_ops, match);
+}
+
+static int rockchip_drm_platform_remove(struct platform_device *pdev)
+{
+       component_master_del(&pdev->dev, &rockchip_drm_ops);
+
+       return 0;
+}
+
+static const struct of_device_id rockchip_drm_dt_ids[] = {
+       { .compatible = "rockchip,display-subsystem", },
+       { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
+
+static struct platform_driver rockchip_drm_platform_driver = {
+       .probe = rockchip_drm_platform_probe,
+       .remove = rockchip_drm_platform_remove,
+       .driver = {
+               .owner = THIS_MODULE,
+               .name = "rockchip-drm",
+               .of_match_table = rockchip_drm_dt_ids,
+               .pm = &rockchip_drm_pm_ops,
+       },
+};
+
+module_platform_driver(rockchip_drm_platform_driver);
+
+MODULE_AUTHOR("Mark Yao <mark.yao@rock-chips.com>");
+MODULE_DESCRIPTION("ROCKCHIP DRM Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
new file mode 100644 (file)
index 0000000..dc4e5f0
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
+ *
+ * based on exynos_drm_drv.h
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ROCKCHIP_DRM_DRV_H
+#define _ROCKCHIP_DRM_DRV_H
+
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem.h>
+
+#include <linux/module.h>
+#include <linux/component.h>
+
+#define ROCKCHIP_MAX_FB_BUFFER 3
+#define ROCKCHIP_MAX_CONNECTOR 2
+#define ROCKCHIP_MAX_CRTC      2
+
+struct drm_device;
+struct drm_connector;
+
+/*
+ * Rockchip drm private crtc funcs.
+ * @enable_vblank: enable crtc vblank irq.
+ * @disable_vblank: disable crtc vblank irq.
+ */
+struct rockchip_crtc_funcs {
+       int (*enable_vblank)(struct drm_crtc *crtc);
+       void (*disable_vblank)(struct drm_crtc *crtc);
+};
+
+/*
+ * Rockchip drm private structure.
+ *
+ * @crtc: array of enabled CRTCs, used to map from "pipe" to drm_crtc.
+ * @num_pipe: number of pipes for this device.
+ */
+struct rockchip_drm_private {
+       struct drm_fb_helper fbdev_helper;
+       struct drm_gem_object *fbdev_bo;
+       const struct rockchip_crtc_funcs *crtc_funcs[ROCKCHIP_MAX_CRTC];
+};
+
+int rockchip_register_crtc_funcs(struct drm_device *dev,
+                                const struct rockchip_crtc_funcs *crtc_funcs,
+                                int pipe);
+void rockchip_unregister_crtc_funcs(struct drm_device *dev, int pipe);
+int rockchip_drm_encoder_get_mux_id(struct device_node *node,
+                                   struct drm_encoder *encoder);
+int rockchip_drm_crtc_mode_config(struct drm_crtc *crtc, int connector_type,
+                                 int out_mode);
+int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
+                                  struct device *dev);
+void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
+                                   struct device *dev);
+
+#endif /* _ROCKCHIP_DRM_DRV_H_ */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
new file mode 100644 (file)
index 0000000..77d5289
--- /dev/null
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <drm/drm.h>
+#include <drm/drmP.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_gem.h"
+
+#define to_rockchip_fb(x) container_of(x, struct rockchip_drm_fb, fb)
+
+struct rockchip_drm_fb {
+       struct drm_framebuffer fb;
+       struct drm_gem_object *obj[ROCKCHIP_MAX_FB_BUFFER];
+};
+
+struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb,
+                                              unsigned int plane)
+{
+       struct rockchip_drm_fb *rk_fb = to_rockchip_fb(fb);
+
+       if (plane >= ROCKCHIP_MAX_FB_BUFFER)
+               return NULL;
+
+       return rk_fb->obj[plane];
+}
+EXPORT_SYMBOL_GPL(rockchip_fb_get_gem_obj);
+
+static void rockchip_drm_fb_destroy(struct drm_framebuffer *fb)
+{
+       struct rockchip_drm_fb *rockchip_fb = to_rockchip_fb(fb);
+       struct drm_gem_object *obj;
+       int i;
+
+       for (i = 0; i < ROCKCHIP_MAX_FB_BUFFER; i++) {
+               obj = rockchip_fb->obj[i];
+               if (obj)
+                       drm_gem_object_unreference_unlocked(obj);
+       }
+
+       drm_framebuffer_cleanup(fb);
+       kfree(rockchip_fb);
+}
+
+static int rockchip_drm_fb_create_handle(struct drm_framebuffer *fb,
+                                        struct drm_file *file_priv,
+                                        unsigned int *handle)
+{
+       struct rockchip_drm_fb *rockchip_fb = to_rockchip_fb(fb);
+
+       return drm_gem_handle_create(file_priv,
+                                    rockchip_fb->obj[0], handle);
+}
+
+static struct drm_framebuffer_funcs rockchip_drm_fb_funcs = {
+       .destroy        = rockchip_drm_fb_destroy,
+       .create_handle  = rockchip_drm_fb_create_handle,
+};
+
+static struct rockchip_drm_fb *
+rockchip_fb_alloc(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode_cmd,
+                 struct drm_gem_object **obj, unsigned int num_planes)
+{
+       struct rockchip_drm_fb *rockchip_fb;
+       int ret;
+       int i;
+
+       rockchip_fb = kzalloc(sizeof(*rockchip_fb), GFP_KERNEL);
+       if (!rockchip_fb)
+               return ERR_PTR(-ENOMEM);
+
+       drm_helper_mode_fill_fb_struct(&rockchip_fb->fb, mode_cmd);
+
+       for (i = 0; i < num_planes; i++)
+               rockchip_fb->obj[i] = obj[i];
+
+       ret = drm_framebuffer_init(dev, &rockchip_fb->fb,
+                                  &rockchip_drm_fb_funcs);
+       if (ret) {
+               dev_err(dev->dev, "Failed to initialize framebuffer: %d\n",
+                       ret);
+               kfree(rockchip_fb);
+               return ERR_PTR(ret);
+       }
+
+       return rockchip_fb;
+}
+
+static struct drm_framebuffer *
+rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+                       struct drm_mode_fb_cmd2 *mode_cmd)
+{
+       struct rockchip_drm_fb *rockchip_fb;
+       struct drm_gem_object *objs[ROCKCHIP_MAX_FB_BUFFER];
+       struct drm_gem_object *obj;
+       unsigned int hsub;
+       unsigned int vsub;
+       int num_planes;
+       int ret;
+       int i;
+
+       hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
+       vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
+       num_planes = min(drm_format_num_planes(mode_cmd->pixel_format),
+                        ROCKCHIP_MAX_FB_BUFFER);
+
+       for (i = 0; i < num_planes; i++) {
+               unsigned int width = mode_cmd->width / (i ? hsub : 1);
+               unsigned int height = mode_cmd->height / (i ? vsub : 1);
+               unsigned int min_size;
+
+               obj = drm_gem_object_lookup(dev, file_priv,
+                                           mode_cmd->handles[i]);
+               if (!obj) {
+                       dev_err(dev->dev, "Failed to lookup GEM object\n");
+                       ret = -ENXIO;
+                       goto err_gem_object_unreference;
+               }
+
+               min_size = (height - 1) * mode_cmd->pitches[i] +
+                       mode_cmd->offsets[i] +
+                       width * drm_format_plane_cpp(mode_cmd->pixel_format, i);
+
+               if (obj->size < min_size) {
+                       drm_gem_object_unreference_unlocked(obj);
+                       ret = -EINVAL;
+                       goto err_gem_object_unreference;
+               }
+               objs[i] = obj;
+       }
+
+       rockchip_fb = rockchip_fb_alloc(dev, mode_cmd, objs, i);
+       if (IS_ERR(rockchip_fb)) {
+               ret = PTR_ERR(rockchip_fb);
+               goto err_gem_object_unreference;
+       }
+
+       return &rockchip_fb->fb;
+
+err_gem_object_unreference:
+       for (i--; i >= 0; i--)
+               drm_gem_object_unreference_unlocked(objs[i]);
+       return ERR_PTR(ret);
+}
+
+static void rockchip_drm_output_poll_changed(struct drm_device *dev)
+{
+       struct rockchip_drm_private *private = dev->dev_private;
+       struct drm_fb_helper *fb_helper = &private->fbdev_helper;
+
+       drm_fb_helper_hotplug_event(fb_helper);
+}
+
+static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
+       .fb_create = rockchip_user_fb_create,
+       .output_poll_changed = rockchip_drm_output_poll_changed,
+};
+
+struct drm_framebuffer *
+rockchip_drm_framebuffer_init(struct drm_device *dev,
+                             struct drm_mode_fb_cmd2 *mode_cmd,
+                             struct drm_gem_object *obj)
+{
+       struct rockchip_drm_fb *rockchip_fb;
+
+       rockchip_fb = rockchip_fb_alloc(dev, mode_cmd, &obj, 1);
+       if (IS_ERR(rockchip_fb))
+               return NULL;
+
+       return &rockchip_fb->fb;
+}
+
+void rockchip_drm_mode_config_init(struct drm_device *dev)
+{
+       dev->mode_config.min_width = 0;
+       dev->mode_config.min_height = 0;
+
+       /*
+        * set max width and height as default value(4096x4096).
+        * this value would be used to check framebuffer size limitation
+        * at drm_mode_addfb().
+        */
+       dev->mode_config.max_width = 4096;
+       dev->mode_config.max_height = 4096;
+
+       dev->mode_config.funcs = &rockchip_drm_mode_config_funcs;
+}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.h b/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
new file mode 100644 (file)
index 0000000..09574d4
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ROCKCHIP_DRM_FB_H
+#define _ROCKCHIP_DRM_FB_H
+
+struct drm_framebuffer *
+rockchip_drm_framebuffer_init(struct drm_device *dev,
+                             struct drm_mode_fb_cmd2 *mode_cmd,
+                             struct drm_gem_object *obj);
+void rockchip_drm_framebuffer_fini(struct drm_framebuffer *fb);
+
+void rockchip_drm_mode_config_init(struct drm_device *dev);
+
+struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb,
+                                              unsigned int plane);
+#endif /* _ROCKCHIP_DRM_FB_H */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
new file mode 100644 (file)
index 0000000..a5d889a
--- /dev/null
@@ -0,0 +1,210 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drm.h>
+#include <drm/drmP.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_gem.h"
+#include "rockchip_drm_fb.h"
+
+#define PREFERRED_BPP          32
+#define to_drm_private(x) \
+               container_of(x, struct rockchip_drm_private, fbdev_helper)
+
+static int rockchip_fbdev_mmap(struct fb_info *info,
+                              struct vm_area_struct *vma)
+{
+       struct drm_fb_helper *helper = info->par;
+       struct rockchip_drm_private *private = to_drm_private(helper);
+
+       return rockchip_gem_mmap_buf(private->fbdev_bo, vma);
+}
+
+static struct fb_ops rockchip_drm_fbdev_ops = {
+       .owner          = THIS_MODULE,
+       .fb_mmap        = rockchip_fbdev_mmap,
+       .fb_fillrect    = cfb_fillrect,
+       .fb_copyarea    = cfb_copyarea,
+       .fb_imageblit   = cfb_imageblit,
+       .fb_check_var   = drm_fb_helper_check_var,
+       .fb_set_par     = drm_fb_helper_set_par,
+       .fb_blank       = drm_fb_helper_blank,
+       .fb_pan_display = drm_fb_helper_pan_display,
+       .fb_setcmap     = drm_fb_helper_setcmap,
+};
+
+static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
+                                    struct drm_fb_helper_surface_size *sizes)
+{
+       struct rockchip_drm_private *private = to_drm_private(helper);
+       struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+       struct drm_device *dev = helper->dev;
+       struct rockchip_gem_object *rk_obj;
+       struct drm_framebuffer *fb;
+       unsigned int bytes_per_pixel;
+       unsigned long offset;
+       struct fb_info *fbi;
+       size_t size;
+       int ret;
+
+       bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
+
+       mode_cmd.width = sizes->surface_width;
+       mode_cmd.height = sizes->surface_height;
+       mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
+       mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+               sizes->surface_depth);
+
+       size = mode_cmd.pitches[0] * mode_cmd.height;
+
+       rk_obj = rockchip_gem_create_object(dev, size);
+       if (IS_ERR(rk_obj))
+               return -ENOMEM;
+
+       private->fbdev_bo = &rk_obj->base;
+
+       fbi = framebuffer_alloc(0, dev->dev);
+       if (!fbi) {
+               dev_err(dev->dev, "Failed to allocate framebuffer info.\n");
+               ret = -ENOMEM;
+               goto err_rockchip_gem_free_object;
+       }
+
+       helper->fb = rockchip_drm_framebuffer_init(dev, &mode_cmd,
+                                                  private->fbdev_bo);
+       if (IS_ERR(helper->fb)) {
+               dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
+               ret = PTR_ERR(helper->fb);
+               goto err_framebuffer_release;
+       }
+
+       helper->fbdev = fbi;
+
+       fbi->par = helper;
+       fbi->flags = FBINFO_FLAG_DEFAULT;
+       fbi->fbops = &rockchip_drm_fbdev_ops;
+
+       ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+       if (ret) {
+               dev_err(dev->dev, "Failed to allocate color map.\n");
+               goto err_drm_framebuffer_unref;
+       }
+
+       fb = helper->fb;
+       drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
+       drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
+
+       offset = fbi->var.xoffset * bytes_per_pixel;
+       offset += fbi->var.yoffset * fb->pitches[0];
+
+       dev->mode_config.fb_base = 0;
+       fbi->screen_base = rk_obj->kvaddr + offset;
+       fbi->screen_size = rk_obj->base.size;
+       fbi->fix.smem_len = rk_obj->base.size;
+
+       DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%d\n",
+                     fb->width, fb->height, fb->depth, rk_obj->kvaddr,
+                     offset, size);
+       return 0;
+
+err_drm_framebuffer_unref:
+       drm_framebuffer_unreference(helper->fb);
+err_framebuffer_release:
+       framebuffer_release(fbi);
+err_rockchip_gem_free_object:
+       rockchip_gem_free_object(&rk_obj->base);
+       return ret;
+}
+
+static const struct drm_fb_helper_funcs rockchip_drm_fb_helper_funcs = {
+       .fb_probe = rockchip_drm_fbdev_create,
+};
+
+int rockchip_drm_fbdev_init(struct drm_device *dev)
+{
+       struct rockchip_drm_private *private = dev->dev_private;
+       struct drm_fb_helper *helper;
+       unsigned int num_crtc;
+       int ret;
+
+       if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
+               return -EINVAL;
+
+       num_crtc = dev->mode_config.num_crtc;
+
+       helper = &private->fbdev_helper;
+
+       drm_fb_helper_prepare(dev, helper, &rockchip_drm_fb_helper_funcs);
+
+       ret = drm_fb_helper_init(dev, helper, num_crtc, ROCKCHIP_MAX_CONNECTOR);
+       if (ret < 0) {
+               dev_err(dev->dev, "Failed to initialize drm fb helper - %d.\n",
+                       ret);
+               return ret;
+       }
+
+       ret = drm_fb_helper_single_add_all_connectors(helper);
+       if (ret < 0) {
+               dev_err(dev->dev, "Failed to add connectors - %d.\n", ret);
+               goto err_drm_fb_helper_fini;
+       }
+
+       /* disable all the possible outputs/crtcs before entering KMS mode */
+       drm_helper_disable_unused_functions(dev);
+
+       ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
+       if (ret < 0) {
+               dev_err(dev->dev, "Failed to set initial hw config - %d.\n",
+                       ret);
+               goto err_drm_fb_helper_fini;
+       }
+
+       return 0;
+
+err_drm_fb_helper_fini:
+       drm_fb_helper_fini(helper);
+       return ret;
+}
+
+void rockchip_drm_fbdev_fini(struct drm_device *dev)
+{
+       struct rockchip_drm_private *private = dev->dev_private;
+       struct drm_fb_helper *helper;
+
+       helper = &private->fbdev_helper;
+
+       if (helper->fbdev) {
+               struct fb_info *info;
+               int ret;
+
+               info = helper->fbdev;
+               ret = unregister_framebuffer(info);
+               if (ret < 0)
+                       DRM_DEBUG_KMS("failed unregister_framebuffer() - %d\n",
+                                     ret);
+
+               if (info->cmap.len)
+                       fb_dealloc_cmap(&info->cmap);
+
+               framebuffer_release(info);
+       }
+
+       if (helper->fb)
+               drm_framebuffer_unreference(helper->fb);
+
+       drm_fb_helper_fini(helper);
+}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h
new file mode 100644 (file)
index 0000000..50432e9
--- /dev/null
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ROCKCHIP_DRM_FBDEV_H
+#define _ROCKCHIP_DRM_FBDEV_H
+
+int rockchip_drm_fbdev_init(struct drm_device *dev);
+void rockchip_drm_fbdev_fini(struct drm_device *dev);
+
+#endif /* _ROCKCHIP_DRM_FBDEV_H */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
new file mode 100644 (file)
index 0000000..bc98a22
--- /dev/null
@@ -0,0 +1,294 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drm.h>
+#include <drm/drmP.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_vma_manager.h>
+
+#include <linux/dma-attrs.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_gem.h"
+
+static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj)
+{
+       struct drm_gem_object *obj = &rk_obj->base;
+       struct drm_device *drm = obj->dev;
+
+       init_dma_attrs(&rk_obj->dma_attrs);
+       dma_set_attr(DMA_ATTR_WRITE_COMBINE, &rk_obj->dma_attrs);
+
+       /* TODO(djkurtz): Use DMA_ATTR_NO_KERNEL_MAPPING except for fbdev */
+       rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size,
+                                        &rk_obj->dma_addr, GFP_KERNEL,
+                                        &rk_obj->dma_attrs);
+       if (IS_ERR(rk_obj->kvaddr)) {
+               int ret = PTR_ERR(rk_obj->kvaddr);
+
+               DRM_ERROR("failed to allocate %#x byte dma buffer, %d",
+                         obj->size, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
+{
+       struct drm_gem_object *obj = &rk_obj->base;
+       struct drm_device *drm = obj->dev;
+
+       dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr,
+                      &rk_obj->dma_attrs);
+}
+
+int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
+                         struct vm_area_struct *vma)
+{
+       struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
+       struct drm_device *drm = obj->dev;
+       unsigned long vm_size;
+
+       vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+       vm_size = vma->vm_end - vma->vm_start;
+
+       if (vm_size > obj->size)
+               return -EINVAL;
+
+       return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
+                            obj->size, &rk_obj->dma_attrs);
+}
+
+/* drm driver mmap file operations */
+int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+       struct drm_file *priv = filp->private_data;
+       struct drm_device *dev = priv->minor->dev;
+       struct drm_gem_object *obj;
+       struct drm_vma_offset_node *node;
+       int ret;
+
+       if (drm_device_is_unplugged(dev))
+               return -ENODEV;
+
+       mutex_lock(&dev->struct_mutex);
+
+       node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
+                                          vma->vm_pgoff,
+                                          vma_pages(vma));
+       if (!node) {
+               mutex_unlock(&dev->struct_mutex);
+               DRM_ERROR("failed to find vma node.\n");
+               return -EINVAL;
+       } else if (!drm_vma_node_is_allowed(node, filp)) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EACCES;
+       }
+
+       obj = container_of(node, struct drm_gem_object, vma_node);
+       ret = rockchip_gem_mmap_buf(obj, vma);
+
+       mutex_unlock(&dev->struct_mutex);
+
+       return ret;
+}
+
+struct rockchip_gem_object *
+       rockchip_gem_create_object(struct drm_device *drm, unsigned int size)
+{
+       struct rockchip_gem_object *rk_obj;
+       struct drm_gem_object *obj;
+       int ret;
+
+       size = round_up(size, PAGE_SIZE);
+
+       rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL);
+       if (!rk_obj)
+               return ERR_PTR(-ENOMEM);
+
+       obj = &rk_obj->base;
+
+       drm_gem_private_object_init(drm, obj, size);
+
+       ret = rockchip_gem_alloc_buf(rk_obj);
+       if (ret)
+               goto err_free_rk_obj;
+
+       return rk_obj;
+
+err_free_rk_obj:
+       kfree(rk_obj);
+       return ERR_PTR(ret);
+}
+
+/*
+ * rockchip_gem_free_object - (struct drm_driver)->gem_free_object callback
+ * function
+ */
+void rockchip_gem_free_object(struct drm_gem_object *obj)
+{
+       struct rockchip_gem_object *rk_obj;
+
+       drm_gem_free_mmap_offset(obj);
+
+       rk_obj = to_rockchip_obj(obj);
+
+       rockchip_gem_free_buf(rk_obj);
+
+       kfree(rk_obj);
+}
+
+/*
+ * rockchip_gem_create_with_handle - allocate an object with the given
+ * size and create a gem handle on it
+ *
+ * returns a struct rockchip_gem_object* on success or ERR_PTR values
+ * on failure.
+ */
+static struct rockchip_gem_object *
+rockchip_gem_create_with_handle(struct drm_file *file_priv,
+                               struct drm_device *drm, unsigned int size,
+                               unsigned int *handle)
+{
+       struct rockchip_gem_object *rk_obj;
+       struct drm_gem_object *obj;
+       int ret;
+
+       rk_obj = rockchip_gem_create_object(drm, size);
+       if (IS_ERR(rk_obj))
+               return ERR_CAST(rk_obj);
+
+       obj = &rk_obj->base;
+
+       /*
+        * allocate a id of idr table where the obj is registered
+        * and handle has the id what user can see.
+        */
+       ret = drm_gem_handle_create(file_priv, obj, handle);
+       if (ret)
+               goto err_handle_create;
+
+       /* drop reference from allocate - handle holds it now. */
+       drm_gem_object_unreference_unlocked(obj);
+
+       return rk_obj;
+
+err_handle_create:
+       rockchip_gem_free_object(obj);
+
+       return ERR_PTR(ret);
+}
+
+int rockchip_gem_dumb_map_offset(struct drm_file *file_priv,
+                                struct drm_device *dev, uint32_t handle,
+                                uint64_t *offset)
+{
+       struct drm_gem_object *obj;
+       int ret;
+
+       mutex_lock(&dev->struct_mutex);
+
+       obj = drm_gem_object_lookup(dev, file_priv, handle);
+       if (!obj) {
+               DRM_ERROR("failed to lookup gem object.\n");
+               ret = -EINVAL;
+               goto unlock;
+       }
+
+       ret = drm_gem_create_mmap_offset(obj);
+       if (ret)
+               goto out;
+
+       *offset = drm_vma_node_offset_addr(&obj->vma_node);
+       DRM_DEBUG_KMS("offset = 0x%llx\n", *offset);
+
+out:
+       drm_gem_object_unreference(obj);
+unlock:
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
+}
+
+/*
+ * rockchip_gem_dumb_create - (struct drm_driver)->dumb_create callback
+ * function
+ *
+ * This aligns the pitch and size arguments to the minimum required. wrap
+ * this into your own function if you need bigger alignment.
+ */
+int rockchip_gem_dumb_create(struct drm_file *file_priv,
+                            struct drm_device *dev,
+                            struct drm_mode_create_dumb *args)
+{
+       struct rockchip_gem_object *rk_obj;
+       int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+
+       /*
+        * align to 64 bytes since Mali requires it.
+        */
+       min_pitch = ALIGN(min_pitch, 64);
+
+       if (args->pitch < min_pitch)
+               args->pitch = min_pitch;
+
+       if (args->size < args->pitch * args->height)
+               args->size = args->pitch * args->height;
+
+       rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
+                                                &args->handle);
+
+       return PTR_ERR_OR_ZERO(rk_obj);
+}
+
+/*
+ * Allocate a sg_table for this GEM object.
+ * Note: Both the table's contents, and the sg_table itself must be freed by
+ *       the caller.
+ * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
+ */
+struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+       struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
+       struct drm_device *drm = obj->dev;
+       struct sg_table *sgt;
+       int ret;
+
+       sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+       if (!sgt)
+               return ERR_PTR(-ENOMEM);
+
+       ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr,
+                                   rk_obj->dma_addr, obj->size,
+                                   &rk_obj->dma_attrs);
+       if (ret) {
+               DRM_ERROR("failed to allocate sgt, %d\n", ret);
+               kfree(sgt);
+               return ERR_PTR(ret);
+       }
+
+       return sgt;
+}
+
+void *rockchip_gem_prime_vmap(struct drm_gem_object *obj)
+{
+       struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
+
+       return rk_obj->kvaddr;
+}
+
+void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+       /* Nothing to do */
+}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
new file mode 100644 (file)
index 0000000..67bcebe
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ROCKCHIP_DRM_GEM_H
+#define _ROCKCHIP_DRM_GEM_H
+
+#define to_rockchip_obj(x) container_of(x, struct rockchip_gem_object, base)
+
+struct rockchip_gem_object {
+       struct drm_gem_object base;
+       unsigned int flags;
+
+       void *kvaddr;
+       dma_addr_t dma_addr;
+       struct dma_attrs dma_attrs;
+};
+
+struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj);
+struct drm_gem_object *
+rockchip_gem_prime_import_sg_table(struct drm_device *dev, size_t size,
+                                  struct sg_table *sgt);
+void *rockchip_gem_prime_vmap(struct drm_gem_object *obj);
+void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+
+/* drm driver mmap file operations */
+int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/* mmap a gem object to userspace. */
+int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
+                         struct vm_area_struct *vma);
+
+struct rockchip_gem_object *
+       rockchip_gem_create_object(struct drm_device *drm, unsigned int size);
+
+void rockchip_gem_free_object(struct drm_gem_object *obj);
+
+int rockchip_gem_dumb_create(struct drm_file *file_priv,
+                            struct drm_device *dev,
+                            struct drm_mode_create_dumb *args);
+int rockchip_gem_dumb_map_offset(struct drm_file *file_priv,
+                                struct drm_device *dev, uint32_t handle,
+                                uint64_t *offset);
+#endif /* _ROCKCHIP_DRM_GEM_H */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
new file mode 100644 (file)
index 0000000..e7ca25b
--- /dev/null
@@ -0,0 +1,1455 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drm.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/component.h>
+
+#include <linux/reset.h>
+#include <linux/delay.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_gem.h"
+#include "rockchip_drm_fb.h"
+#include "rockchip_drm_vop.h"
+
+#define VOP_REG(off, _mask, s) \
+               {.offset = off, \
+                .mask = _mask, \
+                .shift = s,}
+
+#define __REG_SET_RELAXED(x, off, mask, shift, v) \
+               vop_mask_write_relaxed(x, off, (mask) << shift, (v) << shift)
+#define __REG_SET_NORMAL(x, off, mask, shift, v) \
+               vop_mask_write(x, off, (mask) << shift, (v) << shift)
+
+#define REG_SET(x, base, reg, v, mode) \
+               __REG_SET_##mode(x, base + reg.offset, reg.mask, reg.shift, v)
+
+#define VOP_WIN_SET(x, win, name, v) \
+               REG_SET(x, win->base, win->phy->name, v, RELAXED)
+#define VOP_CTRL_SET(x, name, v) \
+               REG_SET(x, 0, (x)->data->ctrl->name, v, NORMAL)
+
+#define VOP_WIN_GET(x, win, name) \
+               vop_read_reg(x, win->base, &win->phy->name)
+
+#define VOP_WIN_GET_YRGBADDR(vop, win) \
+               vop_readl(vop, win->base + win->phy->yrgb_mst.offset)
+
+#define to_vop(x) container_of(x, struct vop, crtc)
+#define to_vop_win(x) container_of(x, struct vop_win, base)
+
+struct vop_win_state {
+       struct list_head head;
+       struct drm_framebuffer *fb;
+       dma_addr_t yrgb_mst;
+       struct drm_pending_vblank_event *event;
+};
+
+struct vop_win {
+       struct drm_plane base;
+       const struct vop_win_data *data;
+       struct vop *vop;
+
+       struct list_head pending;
+       struct vop_win_state *active;
+};
+
+struct vop {
+       struct drm_crtc crtc;
+       struct device *dev;
+       struct drm_device *drm_dev;
+       unsigned int dpms;
+
+       int connector_type;
+       int connector_out_mode;
+
+       /* mutex vsync_ work */
+       struct mutex vsync_mutex;
+       bool vsync_work_pending;
+
+       const struct vop_data *data;
+
+       uint32_t *regsbak;
+       void __iomem *regs;
+
+       /* physical map length of vop register */
+       uint32_t len;
+
+       /* one time only one process allowed to config the register */
+       spinlock_t reg_lock;
+       /* lock vop irq reg */
+       spinlock_t irq_lock;
+
+       unsigned int irq;
+
+       /* vop AHP clk */
+       struct clk *hclk;
+       /* vop dclk */
+       struct clk *dclk;
+       /* vop share memory frequency */
+       struct clk *aclk;
+
+       /* vop dclk reset */
+       struct reset_control *dclk_rst;
+
+       int pipe;
+
+       struct vop_win win[];
+};
+
+enum vop_data_format {
+       VOP_FMT_ARGB8888 = 0,
+       VOP_FMT_RGB888,
+       VOP_FMT_RGB565,
+       VOP_FMT_YUV420SP = 4,
+       VOP_FMT_YUV422SP,
+       VOP_FMT_YUV444SP,
+};
+
+struct vop_reg_data {
+       uint32_t offset;
+       uint32_t value;
+};
+
+struct vop_reg {
+       uint32_t offset;
+       uint32_t shift;
+       uint32_t mask;
+};
+
+struct vop_ctrl {
+       struct vop_reg standby;
+       struct vop_reg data_blank;
+       struct vop_reg gate_en;
+       struct vop_reg mmu_en;
+       struct vop_reg rgb_en;
+       struct vop_reg edp_en;
+       struct vop_reg hdmi_en;
+       struct vop_reg mipi_en;
+       struct vop_reg out_mode;
+       struct vop_reg dither_down;
+       struct vop_reg dither_up;
+       struct vop_reg pin_pol;
+
+       struct vop_reg htotal_pw;
+       struct vop_reg hact_st_end;
+       struct vop_reg vtotal_pw;
+       struct vop_reg vact_st_end;
+       struct vop_reg hpost_st_end;
+       struct vop_reg vpost_st_end;
+};
+
+struct vop_win_phy {
+       const uint32_t *data_formats;
+       uint32_t nformats;
+
+       struct vop_reg enable;
+       struct vop_reg format;
+       struct vop_reg act_info;
+       struct vop_reg dsp_info;
+       struct vop_reg dsp_st;
+       struct vop_reg yrgb_mst;
+       struct vop_reg uv_mst;
+       struct vop_reg yrgb_vir;
+       struct vop_reg uv_vir;
+
+       struct vop_reg dst_alpha_ctl;
+       struct vop_reg src_alpha_ctl;
+};
+
+struct vop_win_data {
+       uint32_t base;
+       const struct vop_win_phy *phy;
+       enum drm_plane_type type;
+};
+
+struct vop_data {
+       const struct vop_reg_data *init_table;
+       unsigned int table_size;
+       const struct vop_ctrl *ctrl;
+       const struct vop_win_data *win;
+       unsigned int win_size;
+};
+
+static const uint32_t formats_01[] = {
+       DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_ARGB8888,
+       DRM_FORMAT_RGB888,
+       DRM_FORMAT_RGB565,
+       DRM_FORMAT_NV12,
+       DRM_FORMAT_NV16,
+       DRM_FORMAT_NV24,
+};
+
+static const uint32_t formats_234[] = {
+       DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_ARGB8888,
+       DRM_FORMAT_RGB888,
+       DRM_FORMAT_RGB565,
+};
+
+static const struct vop_win_phy win01_data = {
+       .data_formats = formats_01,
+       .nformats = ARRAY_SIZE(formats_01),
+       .enable = VOP_REG(WIN0_CTRL0, 0x1, 0),
+       .format = VOP_REG(WIN0_CTRL0, 0x7, 1),
+       .act_info = VOP_REG(WIN0_ACT_INFO, 0x1fff1fff, 0),
+       .dsp_info = VOP_REG(WIN0_DSP_INFO, 0x0fff0fff, 0),
+       .dsp_st = VOP_REG(WIN0_DSP_ST, 0x1fff1fff, 0),
+       .yrgb_mst = VOP_REG(WIN0_YRGB_MST, 0xffffffff, 0),
+       .uv_mst = VOP_REG(WIN0_CBR_MST, 0xffffffff, 0),
+       .yrgb_vir = VOP_REG(WIN0_VIR, 0x3fff, 0),
+       .uv_vir = VOP_REG(WIN0_VIR, 0x3fff, 16),
+       .src_alpha_ctl = VOP_REG(WIN0_SRC_ALPHA_CTRL, 0xff, 0),
+       .dst_alpha_ctl = VOP_REG(WIN0_DST_ALPHA_CTRL, 0xff, 0),
+};
+
+static const struct vop_win_phy win23_data = {
+       .data_formats = formats_234,
+       .nformats = ARRAY_SIZE(formats_234),
+       .enable = VOP_REG(WIN2_CTRL0, 0x1, 0),
+       .format = VOP_REG(WIN2_CTRL0, 0x7, 1),
+       .dsp_info = VOP_REG(WIN2_DSP_INFO0, 0x0fff0fff, 0),
+       .dsp_st = VOP_REG(WIN2_DSP_ST0, 0x1fff1fff, 0),
+       .yrgb_mst = VOP_REG(WIN2_MST0, 0xffffffff, 0),
+       .yrgb_vir = VOP_REG(WIN2_VIR0_1, 0x1fff, 0),
+       .src_alpha_ctl = VOP_REG(WIN2_SRC_ALPHA_CTRL, 0xff, 0),
+       .dst_alpha_ctl = VOP_REG(WIN2_DST_ALPHA_CTRL, 0xff, 0),
+};
+
+static const struct vop_win_phy cursor_data = {
+       .data_formats = formats_234,
+       .nformats = ARRAY_SIZE(formats_234),
+       .enable = VOP_REG(HWC_CTRL0, 0x1, 0),
+       .format = VOP_REG(HWC_CTRL0, 0x7, 1),
+       .dsp_st = VOP_REG(HWC_DSP_ST, 0x1fff1fff, 0),
+       .yrgb_mst = VOP_REG(HWC_MST, 0xffffffff, 0),
+};
+
+static const struct vop_ctrl ctrl_data = {
+       .standby = VOP_REG(SYS_CTRL, 0x1, 22),
+       .gate_en = VOP_REG(SYS_CTRL, 0x1, 23),
+       .mmu_en = VOP_REG(SYS_CTRL, 0x1, 20),
+       .rgb_en = VOP_REG(SYS_CTRL, 0x1, 12),
+       .hdmi_en = VOP_REG(SYS_CTRL, 0x1, 13),
+       .edp_en = VOP_REG(SYS_CTRL, 0x1, 14),
+       .mipi_en = VOP_REG(SYS_CTRL, 0x1, 15),
+       .dither_down = VOP_REG(DSP_CTRL1, 0xf, 1),
+       .dither_up = VOP_REG(DSP_CTRL1, 0x1, 6),
+       .data_blank = VOP_REG(DSP_CTRL0, 0x1, 19),
+       .out_mode = VOP_REG(DSP_CTRL0, 0xf, 0),
+       .pin_pol = VOP_REG(DSP_CTRL0, 0xf, 4),
+       .htotal_pw = VOP_REG(DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
+       .hact_st_end = VOP_REG(DSP_HACT_ST_END, 0x1fff1fff, 0),
+       .vtotal_pw = VOP_REG(DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
+       .vact_st_end = VOP_REG(DSP_VACT_ST_END, 0x1fff1fff, 0),
+       .hpost_st_end = VOP_REG(POST_DSP_HACT_INFO, 0x1fff1fff, 0),
+       .vpost_st_end = VOP_REG(POST_DSP_VACT_INFO, 0x1fff1fff, 0),
+};
+
+static const struct vop_reg_data vop_init_reg_table[] = {
+       {SYS_CTRL, 0x00c00000},
+       {DSP_CTRL0, 0x00000000},
+       {WIN0_CTRL0, 0x00000080},
+       {WIN1_CTRL0, 0x00000080},
+};
+
+/*
+ * Note: rk3288 has a dedicated 'cursor' window, however, that window requires
+ * special support to get alpha blending working.  For now, just use overlay
+ * window 1 for the drm cursor.
+ */
+static const struct vop_win_data rk3288_vop_win_data[] = {
+       { .base = 0x00, .phy = &win01_data, .type = DRM_PLANE_TYPE_PRIMARY },
+       { .base = 0x40, .phy = &win01_data, .type = DRM_PLANE_TYPE_CURSOR },
+       { .base = 0x00, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY },
+       { .base = 0x50, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY },
+       { .base = 0x00, .phy = &cursor_data, .type = DRM_PLANE_TYPE_OVERLAY },
+};
+
+static const struct vop_data rk3288_vop = {
+       .init_table = vop_init_reg_table,
+       .table_size = ARRAY_SIZE(vop_init_reg_table),
+       .ctrl = &ctrl_data,
+       .win = rk3288_vop_win_data,
+       .win_size = ARRAY_SIZE(rk3288_vop_win_data),
+};
+
+static const struct of_device_id vop_driver_dt_match[] = {
+       { .compatible = "rockchip,rk3288-vop",
+         .data = &rk3288_vop },
+       {},
+};
+
+static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v)
+{
+       writel(v, vop->regs + offset);
+       vop->regsbak[offset >> 2] = v;
+}
+
+static inline uint32_t vop_readl(struct vop *vop, uint32_t offset)
+{
+       return readl(vop->regs + offset);
+}
+
+static inline uint32_t vop_read_reg(struct vop *vop, uint32_t base,
+                                   const struct vop_reg *reg)
+{
+       return (vop_readl(vop, base + reg->offset) >> reg->shift) & reg->mask;
+}
+
+static inline void vop_cfg_done(struct vop *vop)
+{
+       writel(0x01, vop->regs + REG_CFG_DONE);
+}
+
+static inline void vop_mask_write(struct vop *vop, uint32_t offset,
+                                 uint32_t mask, uint32_t v)
+{
+       if (mask) {
+               uint32_t cached_val = vop->regsbak[offset >> 2];
+
+               cached_val = (cached_val & ~mask) | v;
+               writel(cached_val, vop->regs + offset);
+               vop->regsbak[offset >> 2] = cached_val;
+       }
+}
+
+static inline void vop_mask_write_relaxed(struct vop *vop, uint32_t offset,
+                                         uint32_t mask, uint32_t v)
+{
+       if (mask) {
+               uint32_t cached_val = vop->regsbak[offset >> 2];
+
+               cached_val = (cached_val & ~mask) | v;
+               writel_relaxed(cached_val, vop->regs + offset);
+               vop->regsbak[offset >> 2] = cached_val;
+       }
+}
+
+static enum vop_data_format vop_convert_format(uint32_t format)
+{
+       switch (format) {
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_ARGB8888:
+               return VOP_FMT_ARGB8888;
+       case DRM_FORMAT_RGB888:
+               return VOP_FMT_RGB888;
+       case DRM_FORMAT_RGB565:
+               return VOP_FMT_RGB565;
+       case DRM_FORMAT_NV12:
+               return VOP_FMT_YUV420SP;
+       case DRM_FORMAT_NV16:
+               return VOP_FMT_YUV422SP;
+       case DRM_FORMAT_NV24:
+               return VOP_FMT_YUV444SP;
+       default:
+               DRM_ERROR("unsupport format[%08x]\n", format);
+               return -EINVAL;
+       }
+}
+
+static bool is_alpha_support(uint32_t format)
+{
+       switch (format) {
+       case DRM_FORMAT_ARGB8888:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static void vop_enable(struct drm_crtc *crtc)
+{
+       struct vop *vop = to_vop(crtc);
+       int ret;
+
+       ret = clk_enable(vop->hclk);
+       if (ret < 0) {
+               dev_err(vop->dev, "failed to enable hclk - %d\n", ret);
+               return;
+       }
+
+       ret = clk_enable(vop->dclk);
+       if (ret < 0) {
+               dev_err(vop->dev, "failed to enable dclk - %d\n", ret);
+               goto err_disable_hclk;
+       }
+
+       ret = clk_enable(vop->aclk);
+       if (ret < 0) {
+               dev_err(vop->dev, "failed to enable aclk - %d\n", ret);
+               goto err_disable_dclk;
+       }
+
+       /*
+        * Slave iommu shares power, irq and clock with vop.  It was associated
+        * automatically with this master device via common driver code.
+        * Now that we have enabled the clock we attach it to the shared drm
+        * mapping.
+        */
+       ret = rockchip_drm_dma_attach_device(vop->drm_dev, vop->dev);
+       if (ret) {
+               dev_err(vop->dev, "failed to attach dma mapping, %d\n", ret);
+               goto err_disable_aclk;
+       }
+
+       spin_lock(&vop->reg_lock);
+
+       VOP_CTRL_SET(vop, standby, 0);
+
+       spin_unlock(&vop->reg_lock);
+
+       enable_irq(vop->irq);
+
+       drm_vblank_on(vop->drm_dev, vop->pipe);
+
+       return;
+
+err_disable_aclk:
+       clk_disable(vop->aclk);
+err_disable_dclk:
+       clk_disable(vop->dclk);
+err_disable_hclk:
+       clk_disable(vop->hclk);
+}
+
+static void vop_disable(struct drm_crtc *crtc)
+{
+       struct vop *vop = to_vop(crtc);
+
+       drm_vblank_off(crtc->dev, vop->pipe);
+
+       disable_irq(vop->irq);
+
+       /*
+        * TODO: Since standby doesn't take effect until the next vblank,
+        * when we turn off dclk below, the vop is probably still active.
+        */
+       spin_lock(&vop->reg_lock);
+
+       VOP_CTRL_SET(vop, standby, 1);
+
+       spin_unlock(&vop->reg_lock);
+       /*
+        * disable dclk to stop frame scan, so we can safely detach iommu,
+        */
+       clk_disable(vop->dclk);
+
+       rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev);
+
+       clk_disable(vop->aclk);
+       clk_disable(vop->hclk);
+}
+
+/*
+ * Caller must hold vsync_mutex.
+ */
+static struct drm_framebuffer *vop_win_last_pending_fb(struct vop_win *vop_win)
+{
+       struct vop_win_state *last;
+       struct vop_win_state *active = vop_win->active;
+
+       if (list_empty(&vop_win->pending))
+               return active ? active->fb : NULL;
+
+       last = list_last_entry(&vop_win->pending, struct vop_win_state, head);
+       return last ? last->fb : NULL;
+}
+
+/*
+ * Caller must hold vsync_mutex.
+ */
+static int vop_win_queue_fb(struct vop_win *vop_win,
+                           struct drm_framebuffer *fb, dma_addr_t yrgb_mst,
+                           struct drm_pending_vblank_event *event)
+{
+       struct vop_win_state *state;
+
+       state = kzalloc(sizeof(*state), GFP_KERNEL);
+       if (!state)
+               return -ENOMEM;
+
+       state->fb = fb;
+       state->yrgb_mst = yrgb_mst;
+       state->event = event;
+
+       list_add_tail(&state->head, &vop_win->pending);
+
+       return 0;
+}
+
+static int vop_update_plane_event(struct drm_plane *plane,
+                                 struct drm_crtc *crtc,
+                                 struct drm_framebuffer *fb, int crtc_x,
+                                 int crtc_y, unsigned int crtc_w,
+                                 unsigned int crtc_h, uint32_t src_x,
+                                 uint32_t src_y, uint32_t src_w,
+                                 uint32_t src_h,
+                                 struct drm_pending_vblank_event *event)
+{
+       struct vop_win *vop_win = to_vop_win(plane);
+       const struct vop_win_data *win = vop_win->data;
+       struct vop *vop = to_vop(crtc);
+       struct drm_gem_object *obj;
+       struct rockchip_gem_object *rk_obj;
+       unsigned long offset;
+       unsigned int actual_w;
+       unsigned int actual_h;
+       unsigned int dsp_stx;
+       unsigned int dsp_sty;
+       unsigned int y_vir_stride;
+       dma_addr_t yrgb_mst;
+       enum vop_data_format format;
+       uint32_t val;
+       bool is_alpha;
+       bool visible;
+       int ret;
+       struct drm_rect dest = {
+               .x1 = crtc_x,
+               .y1 = crtc_y,
+               .x2 = crtc_x + crtc_w,
+               .y2 = crtc_y + crtc_h,
+       };
+       struct drm_rect src = {
+               /* 16.16 fixed point */
+               .x1 = src_x,
+               .y1 = src_y,
+               .x2 = src_x + src_w,
+               .y2 = src_y + src_h,
+       };
+       const struct drm_rect clip = {
+               .x2 = crtc->mode.hdisplay,
+               .y2 = crtc->mode.vdisplay,
+       };
+       bool can_position = plane->type != DRM_PLANE_TYPE_PRIMARY;
+
+       ret = drm_plane_helper_check_update(plane, crtc, fb,
+                                           &src, &dest, &clip,
+                                           DRM_PLANE_HELPER_NO_SCALING,
+                                           DRM_PLANE_HELPER_NO_SCALING,
+                                           can_position, false, &visible);
+       if (ret)
+               return ret;
+
+       if (!visible)
+               return 0;
+
+       is_alpha = is_alpha_support(fb->pixel_format);
+       format = vop_convert_format(fb->pixel_format);
+       if (format < 0)
+               return format;
+
+       obj = rockchip_fb_get_gem_obj(fb, 0);
+       if (!obj) {
+               DRM_ERROR("fail to get rockchip gem object from framebuffer\n");
+               return -EINVAL;
+       }
+
+       rk_obj = to_rockchip_obj(obj);
+
+       actual_w = (src.x2 - src.x1) >> 16;
+       actual_h = (src.y2 - src.y1) >> 16;
+       crtc_x = max(0, crtc_x);
+       crtc_y = max(0, crtc_y);
+
+       dsp_stx = crtc_x + crtc->mode.htotal - crtc->mode.hsync_start;
+       dsp_sty = crtc_y + crtc->mode.vtotal - crtc->mode.vsync_start;
+
+       offset = (src.x1 >> 16) * (fb->bits_per_pixel >> 3);
+       offset += (src.y1 >> 16) * fb->pitches[0];
+       yrgb_mst = rk_obj->dma_addr + offset;
+
+       y_vir_stride = fb->pitches[0] / (fb->bits_per_pixel >> 3);
+
+       /*
+        * If this plane update changes the plane's framebuffer, (or more
+        * precisely, if this update has a different framebuffer than the last
+        * update), enqueue it so we can track when it completes.
+        *
+        * Only when we discover that this update has completed, can we
+        * unreference any previous framebuffers.
+        */
+       mutex_lock(&vop->vsync_mutex);
+       if (fb != vop_win_last_pending_fb(vop_win)) {
+               ret = drm_vblank_get(plane->dev, vop->pipe);
+               if (ret) {
+                       DRM_ERROR("failed to get vblank, %d\n", ret);
+                       mutex_unlock(&vop->vsync_mutex);
+                       return ret;
+               }
+
+               drm_framebuffer_reference(fb);
+
+               ret = vop_win_queue_fb(vop_win, fb, yrgb_mst, event);
+               if (ret) {
+                       drm_vblank_put(plane->dev, vop->pipe);
+                       mutex_unlock(&vop->vsync_mutex);
+                       return ret;
+               }
+
+               vop->vsync_work_pending = true;
+       }
+       mutex_unlock(&vop->vsync_mutex);
+
+       spin_lock(&vop->reg_lock);
+
+       VOP_WIN_SET(vop, win, format, format);
+       VOP_WIN_SET(vop, win, yrgb_vir, y_vir_stride);
+       VOP_WIN_SET(vop, win, yrgb_mst, yrgb_mst);
+       val = (actual_h - 1) << 16;
+       val |= (actual_w - 1) & 0xffff;
+       VOP_WIN_SET(vop, win, act_info, val);
+       VOP_WIN_SET(vop, win, dsp_info, val);
+       val = (dsp_sty - 1) << 16;
+       val |= (dsp_stx - 1) & 0xffff;
+       VOP_WIN_SET(vop, win, dsp_st, val);
+
+       if (is_alpha) {
+               VOP_WIN_SET(vop, win, dst_alpha_ctl,
+                           DST_FACTOR_M0(ALPHA_SRC_INVERSE));
+               val = SRC_ALPHA_EN(1) | SRC_COLOR_M0(ALPHA_SRC_PRE_MUL) |
+                       SRC_ALPHA_M0(ALPHA_STRAIGHT) |
+                       SRC_BLEND_M0(ALPHA_PER_PIX) |
+                       SRC_ALPHA_CAL_M0(ALPHA_NO_SATURATION) |
+                       SRC_FACTOR_M0(ALPHA_ONE);
+               VOP_WIN_SET(vop, win, src_alpha_ctl, val);
+       } else {
+               VOP_WIN_SET(vop, win, src_alpha_ctl, SRC_ALPHA_EN(0));
+       }
+
+       VOP_WIN_SET(vop, win, enable, 1);
+
+       vop_cfg_done(vop);
+       spin_unlock(&vop->reg_lock);
+
+       return 0;
+}
+
+static int vop_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+                           struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+                           unsigned int crtc_w, unsigned int crtc_h,
+                           uint32_t src_x, uint32_t src_y, uint32_t src_w,
+                           uint32_t src_h)
+{
+       return vop_update_plane_event(plane, crtc, fb, crtc_x, crtc_y, crtc_w,
+                                     crtc_h, src_x, src_y, src_w, src_h,
+                                     NULL);
+}
+
+static int vop_update_primary_plane(struct drm_crtc *crtc,
+                                   struct drm_pending_vblank_event *event)
+{
+       unsigned int crtc_w, crtc_h;
+
+       crtc_w = crtc->primary->fb->width - crtc->x;
+       crtc_h = crtc->primary->fb->height - crtc->y;
+
+       return vop_update_plane_event(crtc->primary, crtc, crtc->primary->fb,
+                                     0, 0, crtc_w, crtc_h, crtc->x << 16,
+                                     crtc->y << 16, crtc_w << 16,
+                                     crtc_h << 16, event);
+}
+
+static int vop_disable_plane(struct drm_plane *plane)
+{
+       struct vop_win *vop_win = to_vop_win(plane);
+       const struct vop_win_data *win = vop_win->data;
+       struct vop *vop;
+       int ret;
+
+       if (!plane->crtc)
+               return 0;
+
+       vop = to_vop(plane->crtc);
+
+       ret = drm_vblank_get(plane->dev, vop->pipe);
+       if (ret) {
+               DRM_ERROR("failed to get vblank, %d\n", ret);
+               return ret;
+       }
+
+       mutex_lock(&vop->vsync_mutex);
+
+       ret = vop_win_queue_fb(vop_win, NULL, 0, NULL);
+       if (ret) {
+               drm_vblank_put(plane->dev, vop->pipe);
+               mutex_unlock(&vop->vsync_mutex);
+               return ret;
+       }
+
+       vop->vsync_work_pending = true;
+       mutex_unlock(&vop->vsync_mutex);
+
+       spin_lock(&vop->reg_lock);
+       VOP_WIN_SET(vop, win, enable, 0);
+       vop_cfg_done(vop);
+       spin_unlock(&vop->reg_lock);
+
+       return 0;
+}
+
+static void vop_plane_destroy(struct drm_plane *plane)
+{
+       vop_disable_plane(plane);
+       drm_plane_cleanup(plane);
+}
+
+static const struct drm_plane_funcs vop_plane_funcs = {
+       .update_plane = vop_update_plane,
+       .disable_plane = vop_disable_plane,
+       .destroy = vop_plane_destroy,
+};
+
+int rockchip_drm_crtc_mode_config(struct drm_crtc *crtc,
+                                 int connector_type,
+                                 int out_mode)
+{
+       struct vop *vop = to_vop(crtc);
+
+       vop->connector_type = connector_type;
+       vop->connector_out_mode = out_mode;
+
+       return 0;
+}
+
+static int vop_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+       struct vop *vop = to_vop(crtc);
+       unsigned long flags;
+
+       if (vop->dpms != DRM_MODE_DPMS_ON)
+               return -EPERM;
+
+       spin_lock_irqsave(&vop->irq_lock, flags);
+
+       vop_mask_write(vop, INTR_CTRL0, FS_INTR_MASK, FS_INTR_EN(1));
+
+       spin_unlock_irqrestore(&vop->irq_lock, flags);
+
+       return 0;
+}
+
+static void vop_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+       struct vop *vop = to_vop(crtc);
+       unsigned long flags;
+
+       if (vop->dpms != DRM_MODE_DPMS_ON)
+               return;
+       spin_lock_irqsave(&vop->irq_lock, flags);
+       vop_mask_write(vop, INTR_CTRL0, FS_INTR_MASK, FS_INTR_EN(0));
+       spin_unlock_irqrestore(&vop->irq_lock, flags);
+}
+
+static const struct rockchip_crtc_funcs private_crtc_funcs = {
+       .enable_vblank = vop_crtc_enable_vblank,
+       .disable_vblank = vop_crtc_disable_vblank,
+};
+
+static void vop_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+       struct vop *vop = to_vop(crtc);
+
+       DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode);
+
+       if (vop->dpms == mode) {
+               DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n");
+               return;
+       }
+
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+               vop_enable(crtc);
+               break;
+       case DRM_MODE_DPMS_STANDBY:
+       case DRM_MODE_DPMS_SUSPEND:
+       case DRM_MODE_DPMS_OFF:
+               vop_disable(crtc);
+               break;
+       default:
+               DRM_DEBUG_KMS("unspecified mode %d\n", mode);
+               break;
+       }
+
+       vop->dpms = mode;
+}
+
+static void vop_crtc_prepare(struct drm_crtc *crtc)
+{
+       vop_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
+                               const struct drm_display_mode *mode,
+                               struct drm_display_mode *adjusted_mode)
+{
+       if (adjusted_mode->htotal == 0 || adjusted_mode->vtotal == 0)
+               return false;
+
+       return true;
+}
+
+static int vop_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+                                 struct drm_framebuffer *old_fb)
+{
+       int ret;
+
+       crtc->x = x;
+       crtc->y = y;
+
+       ret = vop_update_primary_plane(crtc, NULL);
+       if (ret < 0) {
+               DRM_ERROR("fail to update plane\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+static int vop_crtc_mode_set(struct drm_crtc *crtc,
+                            struct drm_display_mode *mode,
+                            struct drm_display_mode *adjusted_mode,
+                            int x, int y, struct drm_framebuffer *fb)
+{
+       struct vop *vop = to_vop(crtc);
+       u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
+       u16 hdisplay = adjusted_mode->hdisplay;
+       u16 htotal = adjusted_mode->htotal;
+       u16 hact_st = adjusted_mode->htotal - adjusted_mode->hsync_start;
+       u16 hact_end = hact_st + hdisplay;
+       u16 vdisplay = adjusted_mode->vdisplay;
+       u16 vtotal = adjusted_mode->vtotal;
+       u16 vsync_len = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
+       u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start;
+       u16 vact_end = vact_st + vdisplay;
+       int ret;
+       uint32_t val;
+
+       /*
+        * disable dclk to stop frame scan, so that we can safe config mode and
+        * enable iommu.
+        */
+       clk_disable(vop->dclk);
+
+       switch (vop->connector_type) {
+       case DRM_MODE_CONNECTOR_LVDS:
+               VOP_CTRL_SET(vop, rgb_en, 1);
+               break;
+       case DRM_MODE_CONNECTOR_eDP:
+               VOP_CTRL_SET(vop, edp_en, 1);
+               break;
+       case DRM_MODE_CONNECTOR_HDMIA:
+               VOP_CTRL_SET(vop, hdmi_en, 1);
+               break;
+       default:
+               DRM_ERROR("unsupport connector_type[%d]\n",
+                         vop->connector_type);
+               return -EINVAL;
+       };
+       VOP_CTRL_SET(vop, out_mode, vop->connector_out_mode);
+
+       val = 0x8;
+       val |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0;
+       val |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? (1 << 1) : 0;
+       VOP_CTRL_SET(vop, pin_pol, val);
+
+       VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len);
+       val = hact_st << 16;
+       val |= hact_end;
+       VOP_CTRL_SET(vop, hact_st_end, val);
+       VOP_CTRL_SET(vop, hpost_st_end, val);
+
+       VOP_CTRL_SET(vop, vtotal_pw, (vtotal << 16) | vsync_len);
+       val = vact_st << 16;
+       val |= vact_end;
+       VOP_CTRL_SET(vop, vact_st_end, val);
+       VOP_CTRL_SET(vop, vpost_st_end, val);
+
+       ret = vop_crtc_mode_set_base(crtc, x, y, fb);
+       if (ret)
+               return ret;
+
+       /*
+        * reset dclk, take all mode config affect, so the clk would run in
+        * correct frame.
+        */
+       reset_control_assert(vop->dclk_rst);
+       usleep_range(10, 20);
+       reset_control_deassert(vop->dclk_rst);
+
+       clk_set_rate(vop->dclk, adjusted_mode->clock * 1000);
+       ret = clk_enable(vop->dclk);
+       if (ret < 0) {
+               dev_err(vop->dev, "failed to enable dclk - %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void vop_crtc_commit(struct drm_crtc *crtc)
+{
+}
+
+static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
+       .dpms = vop_crtc_dpms,
+       .prepare = vop_crtc_prepare,
+       .mode_fixup = vop_crtc_mode_fixup,
+       .mode_set = vop_crtc_mode_set,
+       .mode_set_base = vop_crtc_mode_set_base,
+       .commit = vop_crtc_commit,
+};
+
+static int vop_crtc_page_flip(struct drm_crtc *crtc,
+                             struct drm_framebuffer *fb,
+                             struct drm_pending_vblank_event *event,
+                             uint32_t page_flip_flags)
+{
+       struct vop *vop = to_vop(crtc);
+       struct drm_framebuffer *old_fb = crtc->primary->fb;
+       int ret;
+
+       /* when the page flip is requested, crtc's dpms should be on */
+       if (vop->dpms > DRM_MODE_DPMS_ON) {
+               DRM_DEBUG("failed page flip request at dpms[%d].\n", vop->dpms);
+               return 0;
+       }
+
+       crtc->primary->fb = fb;
+
+       ret = vop_update_primary_plane(crtc, event);
+       if (ret)
+               crtc->primary->fb = old_fb;
+
+       return ret;
+}
+
+static void vop_win_state_complete(struct vop_win *vop_win,
+                                  struct vop_win_state *state)
+{
+       struct vop *vop = vop_win->vop;
+       struct drm_crtc *crtc = &vop->crtc;
+       struct drm_device *drm = crtc->dev;
+       unsigned long flags;
+
+       if (state->event) {
+               spin_lock_irqsave(&drm->event_lock, flags);
+               drm_send_vblank_event(drm, -1, state->event);
+               spin_unlock_irqrestore(&drm->event_lock, flags);
+       }
+
+       list_del(&state->head);
+       drm_vblank_put(crtc->dev, vop->pipe);
+}
+
+static void vop_crtc_destroy(struct drm_crtc *crtc)
+{
+       drm_crtc_cleanup(crtc);
+}
+
+static const struct drm_crtc_funcs vop_crtc_funcs = {
+       .set_config = drm_crtc_helper_set_config,
+       .page_flip = vop_crtc_page_flip,
+       .destroy = vop_crtc_destroy,
+};
+
+static bool vop_win_state_is_active(struct vop_win *vop_win,
+                                   struct vop_win_state *state)
+{
+       bool active = false;
+
+       if (state->fb) {
+               dma_addr_t yrgb_mst;
+
+               /* check yrgb_mst to tell if pending_fb is now front */
+               yrgb_mst = VOP_WIN_GET_YRGBADDR(vop_win->vop, vop_win->data);
+
+               active = (yrgb_mst == state->yrgb_mst);
+       } else {
+               bool enabled;
+
+               /* if enable bit is clear, plane is now disabled */
+               enabled = VOP_WIN_GET(vop_win->vop, vop_win->data, enable);
+
+               active = (enabled == 0);
+       }
+
+       return active;
+}
+
+static void vop_win_state_destroy(struct vop_win_state *state)
+{
+       struct drm_framebuffer *fb = state->fb;
+
+       if (fb)
+               drm_framebuffer_unreference(fb);
+
+       kfree(state);
+}
+
+static void vop_win_update_state(struct vop_win *vop_win)
+{
+       struct vop_win_state *state, *n, *new_active = NULL;
+
+       /* Check if any pending states are now active */
+       list_for_each_entry(state, &vop_win->pending, head)
+               if (vop_win_state_is_active(vop_win, state)) {
+                       new_active = state;
+                       break;
+               }
+
+       if (!new_active)
+               return;
+
+       /*
+        * Destroy any 'skipped' pending states - states that were queued
+        * before the newly active state.
+        */
+       list_for_each_entry_safe(state, n, &vop_win->pending, head) {
+               if (state == new_active)
+                       break;
+               vop_win_state_complete(vop_win, state);
+               vop_win_state_destroy(state);
+       }
+
+       vop_win_state_complete(vop_win, new_active);
+
+       if (vop_win->active)
+               vop_win_state_destroy(vop_win->active);
+       vop_win->active = new_active;
+}
+
+static bool vop_win_has_pending_state(struct vop_win *vop_win)
+{
+       return !list_empty(&vop_win->pending);
+}
+
+static irqreturn_t vop_isr_thread(int irq, void *data)
+{
+       struct vop *vop = data;
+       const struct vop_data *vop_data = vop->data;
+       unsigned int i;
+
+       mutex_lock(&vop->vsync_mutex);
+
+       if (!vop->vsync_work_pending)
+               goto done;
+
+       vop->vsync_work_pending = false;
+
+       for (i = 0; i < vop_data->win_size; i++) {
+               struct vop_win *vop_win = &vop->win[i];
+
+               vop_win_update_state(vop_win);
+               if (vop_win_has_pending_state(vop_win))
+                       vop->vsync_work_pending = true;
+       }
+
+done:
+       mutex_unlock(&vop->vsync_mutex);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t vop_isr(int irq, void *data)
+{
+       struct vop *vop = data;
+       uint32_t intr0_reg, active_irqs;
+       unsigned long flags;
+
+       /*
+        * INTR_CTRL0 register has interrupt status, enable and clear bits, we
+        * must hold irq_lock to avoid a race with enable/disable_vblank().
+       */
+       spin_lock_irqsave(&vop->irq_lock, flags);
+       intr0_reg = vop_readl(vop, INTR_CTRL0);
+       active_irqs = intr0_reg & INTR_MASK;
+       /* Clear all active interrupt sources */
+       if (active_irqs)
+               vop_writel(vop, INTR_CTRL0,
+                          intr0_reg | (active_irqs << INTR_CLR_SHIFT));
+       spin_unlock_irqrestore(&vop->irq_lock, flags);
+
+       /* This is expected for vop iommu irqs, since the irq is shared */
+       if (!active_irqs)
+               return IRQ_NONE;
+
+       /* Only Frame Start Interrupt is enabled; other irqs are spurious. */
+       if (!(active_irqs & FS_INTR)) {
+               DRM_ERROR("Unknown VOP IRQs: %#02x\n", active_irqs);
+               return IRQ_NONE;
+       }
+
+       drm_handle_vblank(vop->drm_dev, vop->pipe);
+
+       return (vop->vsync_work_pending) ? IRQ_WAKE_THREAD : IRQ_HANDLED;
+}
+
+static int vop_create_crtc(struct vop *vop)
+{
+       const struct vop_data *vop_data = vop->data;
+       struct device *dev = vop->dev;
+       struct drm_device *drm_dev = vop->drm_dev;
+       struct drm_plane *primary = NULL, *cursor = NULL, *plane;
+       struct drm_crtc *crtc = &vop->crtc;
+       struct device_node *port;
+       int ret;
+       int i;
+
+       /*
+        * Create drm_plane for primary and cursor planes first, since we need
+        * to pass them to drm_crtc_init_with_planes, which sets the
+        * "possible_crtcs" to the newly initialized crtc.
+        */
+       for (i = 0; i < vop_data->win_size; i++) {
+               struct vop_win *vop_win = &vop->win[i];
+               const struct vop_win_data *win_data = vop_win->data;
+
+               if (win_data->type != DRM_PLANE_TYPE_PRIMARY &&
+                   win_data->type != DRM_PLANE_TYPE_CURSOR)
+                       continue;
+
+               ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base,
+                                              0, &vop_plane_funcs,
+                                              win_data->phy->data_formats,
+                                              win_data->phy->nformats,
+                                              win_data->type);
+               if (ret) {
+                       DRM_ERROR("failed to initialize plane\n");
+                       goto err_cleanup_planes;
+               }
+
+               plane = &vop_win->base;
+               if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+                       primary = plane;
+               else if (plane->type == DRM_PLANE_TYPE_CURSOR)
+                       cursor = plane;
+       }
+
+       ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
+                                       &vop_crtc_funcs);
+       if (ret)
+               return ret;
+
+       drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs);
+
+       /*
+        * Create drm_planes for overlay windows with possible_crtcs restricted
+        * to the newly created crtc.
+        */
+       for (i = 0; i < vop_data->win_size; i++) {
+               struct vop_win *vop_win = &vop->win[i];
+               const struct vop_win_data *win_data = vop_win->data;
+               unsigned long possible_crtcs = 1 << drm_crtc_index(crtc);
+
+               if (win_data->type != DRM_PLANE_TYPE_OVERLAY)
+                       continue;
+
+               ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base,
+                                              possible_crtcs,
+                                              &vop_plane_funcs,
+                                              win_data->phy->data_formats,
+                                              win_data->phy->nformats,
+                                              win_data->type);
+               if (ret) {
+                       DRM_ERROR("failed to initialize overlay plane\n");
+                       goto err_cleanup_crtc;
+               }
+       }
+
+       port = of_get_child_by_name(dev->of_node, "port");
+       if (!port) {
+               DRM_ERROR("no port node found in %s\n",
+                         dev->of_node->full_name);
+               goto err_cleanup_crtc;
+       }
+
+       crtc->port = port;
+       vop->pipe = drm_crtc_index(crtc);
+       rockchip_register_crtc_funcs(drm_dev, &private_crtc_funcs, vop->pipe);
+
+       return 0;
+
+err_cleanup_crtc:
+       drm_crtc_cleanup(crtc);
+err_cleanup_planes:
+       list_for_each_entry(plane, &drm_dev->mode_config.plane_list, head)
+               drm_plane_cleanup(plane);
+       return ret;
+}
+
+static void vop_destroy_crtc(struct vop *vop)
+{
+       struct drm_crtc *crtc = &vop->crtc;
+
+       rockchip_unregister_crtc_funcs(vop->drm_dev, vop->pipe);
+       of_node_put(crtc->port);
+       drm_crtc_cleanup(crtc);
+}
+
+static int vop_initial(struct vop *vop)
+{
+       const struct vop_data *vop_data = vop->data;
+       const struct vop_reg_data *init_table = vop_data->init_table;
+       struct reset_control *ahb_rst;
+       int i, ret;
+
+       vop->hclk = devm_clk_get(vop->dev, "hclk_vop");
+       if (IS_ERR(vop->hclk)) {
+               dev_err(vop->dev, "failed to get hclk source\n");
+               return PTR_ERR(vop->hclk);
+       }
+       vop->aclk = devm_clk_get(vop->dev, "aclk_vop");
+       if (IS_ERR(vop->aclk)) {
+               dev_err(vop->dev, "failed to get aclk source\n");
+               return PTR_ERR(vop->aclk);
+       }
+       vop->dclk = devm_clk_get(vop->dev, "dclk_vop");
+       if (IS_ERR(vop->dclk)) {
+               dev_err(vop->dev, "failed to get dclk source\n");
+               return PTR_ERR(vop->dclk);
+       }
+
+       ret = clk_prepare(vop->hclk);
+       if (ret < 0) {
+               dev_err(vop->dev, "failed to prepare hclk\n");
+               return ret;
+       }
+
+       ret = clk_prepare(vop->dclk);
+       if (ret < 0) {
+               dev_err(vop->dev, "failed to prepare dclk\n");
+               goto err_unprepare_hclk;
+       }
+
+       ret = clk_prepare(vop->aclk);
+       if (ret < 0) {
+               dev_err(vop->dev, "failed to prepare aclk\n");
+               goto err_unprepare_dclk;
+       }
+
+       /*
+        * enable hclk, so that we can config vop register.
+        */
+       ret = clk_enable(vop->hclk);
+       if (ret < 0) {
+               dev_err(vop->dev, "failed to prepare aclk\n");
+               goto err_unprepare_aclk;
+       }
+       /*
+        * do hclk_reset, reset all vop registers.
+        */
+       ahb_rst = devm_reset_control_get(vop->dev, "ahb");
+       if (IS_ERR(ahb_rst)) {
+               dev_err(vop->dev, "failed to get ahb reset\n");
+               ret = PTR_ERR(ahb_rst);
+               goto err_disable_hclk;
+       }
+       reset_control_assert(ahb_rst);
+       usleep_range(10, 20);
+       reset_control_deassert(ahb_rst);
+
+       memcpy(vop->regsbak, vop->regs, vop->len);
+
+       for (i = 0; i < vop_data->table_size; i++)
+               vop_writel(vop, init_table[i].offset, init_table[i].value);
+
+       for (i = 0; i < vop_data->win_size; i++) {
+               const struct vop_win_data *win = &vop_data->win[i];
+
+               VOP_WIN_SET(vop, win, enable, 0);
+       }
+
+       vop_cfg_done(vop);
+
+       /*
+        * do dclk_reset, let all config take affect.
+        */
+       vop->dclk_rst = devm_reset_control_get(vop->dev, "dclk");
+       if (IS_ERR(vop->dclk_rst)) {
+               dev_err(vop->dev, "failed to get dclk reset\n");
+               ret = PTR_ERR(vop->dclk_rst);
+               goto err_unprepare_aclk;
+       }
+       reset_control_assert(vop->dclk_rst);
+       usleep_range(10, 20);
+       reset_control_deassert(vop->dclk_rst);
+
+       clk_disable(vop->hclk);
+
+       vop->dpms = DRM_MODE_DPMS_OFF;
+
+       return 0;
+
+err_disable_hclk:
+       clk_disable(vop->hclk);
+err_unprepare_aclk:
+       clk_unprepare(vop->aclk);
+err_unprepare_dclk:
+       clk_unprepare(vop->dclk);
+err_unprepare_hclk:
+       clk_unprepare(vop->hclk);
+       return ret;
+}
+
+/*
+ * Initialize the vop->win array elements.
+ */
+static void vop_win_init(struct vop *vop)
+{
+       const struct vop_data *vop_data = vop->data;
+       unsigned int i;
+
+       for (i = 0; i < vop_data->win_size; i++) {
+               struct vop_win *vop_win = &vop->win[i];
+               const struct vop_win_data *win_data = &vop_data->win[i];
+
+               vop_win->data = win_data;
+               vop_win->vop = vop;
+               INIT_LIST_HEAD(&vop_win->pending);
+       }
+}
+
+static int vop_bind(struct device *dev, struct device *master, void *data)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       const struct of_device_id *of_id;
+       const struct vop_data *vop_data;
+       struct drm_device *drm_dev = data;
+       struct vop *vop;
+       struct resource *res;
+       size_t alloc_size;
+       int ret;
+
+       of_id = of_match_device(vop_driver_dt_match, dev);
+       vop_data = of_id->data;
+       if (!vop_data)
+               return -ENODEV;
+
+       /* Allocate vop struct and its vop_win array */
+       alloc_size = sizeof(*vop) + sizeof(*vop->win) * vop_data->win_size;
+       vop = devm_kzalloc(dev, alloc_size, GFP_KERNEL);
+       if (!vop)
+               return -ENOMEM;
+
+       vop->dev = dev;
+       vop->data = vop_data;
+       vop->drm_dev = drm_dev;
+       dev_set_drvdata(dev, vop);
+
+       vop_win_init(vop);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       vop->len = resource_size(res);
+       vop->regs = devm_ioremap_resource(dev, res);
+       if (IS_ERR(vop->regs))
+               return PTR_ERR(vop->regs);
+
+       vop->regsbak = devm_kzalloc(dev, vop->len, GFP_KERNEL);
+       if (!vop->regsbak)
+               return -ENOMEM;
+
+       ret = vop_initial(vop);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "cannot initial vop dev - err %d\n", ret);
+               return ret;
+       }
+
+       vop->irq = platform_get_irq(pdev, 0);
+       if (vop->irq < 0) {
+               dev_err(dev, "cannot find irq for vop\n");
+               return vop->irq;
+       }
+
+       spin_lock_init(&vop->reg_lock);
+       spin_lock_init(&vop->irq_lock);
+
+       mutex_init(&vop->vsync_mutex);
+
+       ret = devm_request_threaded_irq(dev, vop->irq, vop_isr, vop_isr_thread,
+                                       IRQF_SHARED, dev_name(dev), vop);
+       if (ret)
+               return ret;
+
+       /* IRQ is initially disabled; it gets enabled in power_on */
+       disable_irq(vop->irq);
+
+       ret = vop_create_crtc(vop);
+       if (ret)
+               return ret;
+
+       pm_runtime_enable(&pdev->dev);
+       return 0;
+}
+
+static void vop_unbind(struct device *dev, struct device *master, void *data)
+{
+       struct vop *vop = dev_get_drvdata(dev);
+
+       pm_runtime_disable(dev);
+       vop_destroy_crtc(vop);
+}
+
+static const struct component_ops vop_component_ops = {
+       .bind = vop_bind,
+       .unbind = vop_unbind,
+};
+
+static int vop_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+
+       if (!dev->of_node) {
+               dev_err(dev, "can't find vop devices\n");
+               return -ENODEV;
+       }
+
+       return component_add(dev, &vop_component_ops);
+}
+
+static int vop_remove(struct platform_device *pdev)
+{
+       component_del(&pdev->dev, &vop_component_ops);
+
+       return 0;
+}
+
+struct platform_driver vop_platform_driver = {
+       .probe = vop_probe,
+       .remove = vop_remove,
+       .driver = {
+               .name = "rockchip-vop",
+               .owner = THIS_MODULE,
+               .of_match_table = of_match_ptr(vop_driver_dt_match),
+       },
+};
+
+module_platform_driver(vop_platform_driver);
+
+MODULE_AUTHOR("Mark Yao <mark.yao@rock-chips.com>");
+MODULE_DESCRIPTION("ROCKCHIP VOP Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
new file mode 100644 (file)
index 0000000..63e9b3a
--- /dev/null
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ROCKCHIP_DRM_VOP_H
+#define _ROCKCHIP_DRM_VOP_H
+
+/* register definition */
+#define REG_CFG_DONE                   0x0000
+#define VERSION_INFO                   0x0004
+#define SYS_CTRL                       0x0008
+#define SYS_CTRL1                      0x000c
+#define DSP_CTRL0                      0x0010
+#define DSP_CTRL1                      0x0014
+#define DSP_BG                         0x0018
+#define MCU_CTRL                       0x001c
+#define INTR_CTRL0                     0x0020
+#define INTR_CTRL1                     0x0024
+#define WIN0_CTRL0                     0x0030
+#define WIN0_CTRL1                     0x0034
+#define WIN0_COLOR_KEY                 0x0038
+#define WIN0_VIR                       0x003c
+#define WIN0_YRGB_MST                  0x0040
+#define WIN0_CBR_MST                   0x0044
+#define WIN0_ACT_INFO                  0x0048
+#define WIN0_DSP_INFO                  0x004c
+#define WIN0_DSP_ST                    0x0050
+#define WIN0_SCL_FACTOR_YRGB           0x0054
+#define WIN0_SCL_FACTOR_CBR            0x0058
+#define WIN0_SCL_OFFSET                        0x005c
+#define WIN0_SRC_ALPHA_CTRL            0x0060
+#define WIN0_DST_ALPHA_CTRL            0x0064
+#define WIN0_FADING_CTRL               0x0068
+/* win1 register */
+#define WIN1_CTRL0                     0x0070
+#define WIN1_CTRL1                     0x0074
+#define WIN1_COLOR_KEY                 0x0078
+#define WIN1_VIR                       0x007c
+#define WIN1_YRGB_MST                  0x0080
+#define WIN1_CBR_MST                   0x0084
+#define WIN1_ACT_INFO                  0x0088
+#define WIN1_DSP_INFO                  0x008c
+#define WIN1_DSP_ST                    0x0090
+#define WIN1_SCL_FACTOR_YRGB           0x0094
+#define WIN1_SCL_FACTOR_CBR            0x0098
+#define WIN1_SCL_OFFSET                        0x009c
+#define WIN1_SRC_ALPHA_CTRL            0x00a0
+#define WIN1_DST_ALPHA_CTRL            0x00a4
+#define WIN1_FADING_CTRL               0x00a8
+/* win2 register */
+#define WIN2_CTRL0                     0x00b0
+#define WIN2_CTRL1                     0x00b4
+#define WIN2_VIR0_1                    0x00b8
+#define WIN2_VIR2_3                    0x00bc
+#define WIN2_MST0                      0x00c0
+#define WIN2_DSP_INFO0                 0x00c4
+#define WIN2_DSP_ST0                   0x00c8
+#define WIN2_COLOR_KEY                 0x00cc
+#define WIN2_MST1                      0x00d0
+#define WIN2_DSP_INFO1                 0x00d4
+#define WIN2_DSP_ST1                   0x00d8
+#define WIN2_SRC_ALPHA_CTRL            0x00dc
+#define WIN2_MST2                      0x00e0
+#define WIN2_DSP_INFO2                 0x00e4
+#define WIN2_DSP_ST2                   0x00e8
+#define WIN2_DST_ALPHA_CTRL            0x00ec
+#define WIN2_MST3                      0x00f0
+#define WIN2_DSP_INFO3                 0x00f4
+#define WIN2_DSP_ST3                   0x00f8
+#define WIN2_FADING_CTRL               0x00fc
+/* win3 register */
+#define WIN3_CTRL0                     0x0100
+#define WIN3_CTRL1                     0x0104
+#define WIN3_VIR0_1                    0x0108
+#define WIN3_VIR2_3                    0x010c
+#define WIN3_MST0                      0x0110
+#define WIN3_DSP_INFO0                 0x0114
+#define WIN3_DSP_ST0                   0x0118
+#define WIN3_COLOR_KEY                 0x011c
+#define WIN3_MST1                      0x0120
+#define WIN3_DSP_INFO1                 0x0124
+#define WIN3_DSP_ST1                   0x0128
+#define WIN3_SRC_ALPHA_CTRL            0x012c
+#define WIN3_MST2                      0x0130
+#define WIN3_DSP_INFO2                 0x0134
+#define WIN3_DSP_ST2                   0x0138
+#define WIN3_DST_ALPHA_CTRL            0x013c
+#define WIN3_MST3                      0x0140
+#define WIN3_DSP_INFO3                 0x0144
+#define WIN3_DSP_ST3                   0x0148
+#define WIN3_FADING_CTRL               0x014c
+/* hwc register */
+#define HWC_CTRL0                      0x0150
+#define HWC_CTRL1                      0x0154
+#define HWC_MST                                0x0158
+#define HWC_DSP_ST                     0x015c
+#define HWC_SRC_ALPHA_CTRL             0x0160
+#define HWC_DST_ALPHA_CTRL             0x0164
+#define HWC_FADING_CTRL                        0x0168
+/* post process register */
+#define POST_DSP_HACT_INFO             0x0170
+#define POST_DSP_VACT_INFO             0x0174
+#define POST_SCL_FACTOR_YRGB           0x0178
+#define POST_SCL_CTRL                  0x0180
+#define POST_DSP_VACT_INFO_F1          0x0184
+#define DSP_HTOTAL_HS_END              0x0188
+#define DSP_HACT_ST_END                        0x018c
+#define DSP_VTOTAL_VS_END              0x0190
+#define DSP_VACT_ST_END                        0x0194
+#define DSP_VS_ST_END_F1               0x0198
+#define DSP_VACT_ST_END_F1             0x019c
+/* register definition end */
+
+/* interrupt define */
+#define DSP_HOLD_VALID_INTR            (1 << 0)
+#define FS_INTR                                (1 << 1)
+#define LINE_FLAG_INTR                 (1 << 2)
+#define BUS_ERROR_INTR                 (1 << 3)
+
+#define INTR_MASK                      (DSP_HOLD_VALID_INTR | FS_INTR | \
+                                        LINE_FLAG_INTR | BUS_ERROR_INTR)
+
+#define DSP_HOLD_VALID_INTR_EN(x)      ((x) << 4)
+#define FS_INTR_EN(x)                  ((x) << 5)
+#define LINE_FLAG_INTR_EN(x)           ((x) << 6)
+#define BUS_ERROR_INTR_EN(x)           ((x) << 7)
+#define DSP_HOLD_VALID_INTR_MASK       (1 << 4)
+#define FS_INTR_MASK                   (1 << 5)
+#define LINE_FLAG_INTR_MASK            (1 << 6)
+#define BUS_ERROR_INTR_MASK            (1 << 7)
+
+#define INTR_CLR_SHIFT                 8
+#define DSP_HOLD_VALID_INTR_CLR                (1 << (INTR_CLR_SHIFT + 0))
+#define FS_INTR_CLR                    (1 << (INTR_CLR_SHIFT + 1))
+#define LINE_FLAG_INTR_CLR             (1 << (INTR_CLR_SHIFT + 2))
+#define BUS_ERROR_INTR_CLR             (1 << (INTR_CLR_SHIFT + 3))
+
+#define DSP_LINE_NUM(x)                        (((x) & 0x1fff) << 12)
+#define DSP_LINE_NUM_MASK              (0x1fff << 12)
+
+/* src alpha ctrl define */
+#define SRC_FADING_VALUE(x)            (((x) & 0xff) << 24)
+#define SRC_GLOBAL_ALPHA(x)            (((x) & 0xff) << 16)
+#define SRC_FACTOR_M0(x)               (((x) & 0x7) << 6)
+#define SRC_ALPHA_CAL_M0(x)            (((x) & 0x1) << 5)
+#define SRC_BLEND_M0(x)                        (((x) & 0x3) << 3)
+#define SRC_ALPHA_M0(x)                        (((x) & 0x1) << 2)
+#define SRC_COLOR_M0(x)                        (((x) & 0x1) << 1)
+#define SRC_ALPHA_EN(x)                        (((x) & 0x1) << 0)
+/* dst alpha ctrl define */
+#define DST_FACTOR_M0(x)               (((x) & 0x7) << 6)
+
+/*
+ * display output interface supported by rockchip lcdc
+ */
+#define ROCKCHIP_OUT_MODE_P888 0
+#define ROCKCHIP_OUT_MODE_P666 1
+#define ROCKCHIP_OUT_MODE_P565 2
+/* for use special outface */
+#define ROCKCHIP_OUT_MODE_AAAA 15
+
+enum alpha_mode {
+       ALPHA_STRAIGHT,
+       ALPHA_INVERSE,
+};
+
+enum global_blend_mode {
+       ALPHA_GLOBAL,
+       ALPHA_PER_PIX,
+       ALPHA_PER_PIX_GLOBAL,
+};
+
+enum alpha_cal_mode {
+       ALPHA_SATURATION,
+       ALPHA_NO_SATURATION,
+};
+
+enum color_mode {
+       ALPHA_SRC_PRE_MUL,
+       ALPHA_SRC_NO_PRE_MUL,
+};
+
+enum factor_mode {
+       ALPHA_ZERO,
+       ALPHA_ONE,
+       ALPHA_SRC,
+       ALPHA_SRC_INVERSE,
+       ALPHA_SRC_GLOBAL,
+};
+
+#endif /* _ROCKCHIP_DRM_VOP_H */
index 8ce508e76208a0f5ce8c12b1da9e54111aac7ad6..3820ae97a03039cf0d95ee3b473642b589262299 100644 (file)
@@ -93,7 +93,8 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
  */
 
 int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
-                          struct list_head *list, bool intr)
+                          struct list_head *list, bool intr,
+                          struct list_head *dups)
 {
        struct ttm_bo_global *glob;
        struct ttm_validate_buffer *entry;
@@ -117,6 +118,13 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
                        __ttm_bo_unreserve(bo);
 
                        ret = -EBUSY;
+
+               } else if (ret == -EALREADY && dups) {
+                       struct ttm_validate_buffer *safe = entry;
+                       entry = list_prev_entry(entry, head);
+                       list_del(&safe->head);
+                       list_add(&safe->head, dups);
+                       continue;
                }
 
                if (!ret) {
index db7621828bc7724205e2481ee4c031f7700f0422..7b5d22110f25e7619c37eac7fd66858fa25b93e8 100644 (file)
@@ -1062,8 +1062,12 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
 
        vmaster = vmw_master_check(dev, file_priv, flags);
        if (unlikely(IS_ERR(vmaster))) {
-               DRM_INFO("IOCTL ERROR %d\n", nr);
-               return PTR_ERR(vmaster);
+               ret = PTR_ERR(vmaster);
+
+               if (ret != -ERESTARTSYS)
+                       DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n",
+                                nr, ret);
+               return ret;
        }
 
        ret = ioctl_func(filp, cmd, arg);
index 596cd6dafd338c2f2e21a6461eb9c93676c487b3..33176d05db3542903f1c919b59f68cad2da22044 100644 (file)
@@ -2487,7 +2487,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
        if (unlikely(ret != 0))
                goto out_err_nores;
 
-       ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, true);
+       ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
+                                    true, NULL);
        if (unlikely(ret != 0))
                goto out_err;
 
@@ -2677,7 +2678,8 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
        query_val.shared = false;
        list_add_tail(&query_val.head, &validate_list);
 
-       ret = ttm_eu_reserve_buffers(&ticket, &validate_list, false);
+       ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
+                                    false, NULL);
        if (unlikely(ret != 0)) {
                vmw_execbuf_unpin_panic(dev_priv);
                goto out_no_reserve;
index 197164fd7803730c1759728ae9d53fa586f739da..b7594cb758afc4299493122f4f6e44c68ef4d919 100644 (file)
@@ -545,35 +545,19 @@ void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
 
 static void vmw_fence_destroy(struct vmw_fence_obj *fence)
 {
-       struct vmw_fence_manager *fman = fman_from_fence(fence);
-
        fence_free(&fence->base);
-
-       /*
-        * Free kernel space accounting.
-        */
-       ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
-                           fman->fence_size);
 }
 
 int vmw_fence_create(struct vmw_fence_manager *fman,
                     uint32_t seqno,
                     struct vmw_fence_obj **p_fence)
 {
-       struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
        struct vmw_fence_obj *fence;
        int ret;
 
-       ret = ttm_mem_global_alloc(mem_glob, fman->fence_size,
-                                  false, false);
-       if (unlikely(ret != 0))
-               return ret;
-
        fence = kzalloc(sizeof(*fence), GFP_KERNEL);
-       if (unlikely(fence == NULL)) {
-               ret = -ENOMEM;
-               goto out_no_object;
-       }
+       if (unlikely(fence == NULL))
+               return -ENOMEM;
 
        ret = vmw_fence_obj_init(fman, fence, seqno,
                                 vmw_fence_destroy);
@@ -585,8 +569,6 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
 
 out_err_init:
        kfree(fence);
-out_no_object:
-       ttm_mem_global_free(mem_glob, fman->fence_size);
        return ret;
 }
 
@@ -1105,6 +1087,8 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv,
        if (ret != 0)
                goto out_no_queue;
 
+       return 0;
+
 out_no_queue:
        event->base.destroy(&event->base);
 out_no_event:
@@ -1180,17 +1164,10 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
 
        BUG_ON(fence == NULL);
 
-       if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME)
-               ret = vmw_event_fence_action_create(file_priv, fence,
-                                                   arg->flags,
-                                                   arg->user_data,
-                                                   true);
-       else
-               ret = vmw_event_fence_action_create(file_priv, fence,
-                                                   arg->flags,
-                                                   arg->user_data,
-                                                   true);
-
+       ret = vmw_event_fence_action_create(file_priv, fence,
+                                           arg->flags,
+                                           arg->user_data,
+                                           true);
        if (unlikely(ret != 0)) {
                if (ret != -ERESTARTSYS)
                        DRM_ERROR("Failed to attach event to fence.\n");
index 026de7cea0f662bd75e92cf4ad1dfde057c7775e..210ef15b1d0919c59e8d7eb8aff72b588f2f6ea6 100644 (file)
@@ -1222,7 +1222,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
        val_buf->bo = ttm_bo_reference(&res->backup->base);
        val_buf->shared = false;
        list_add_tail(&val_buf->head, &val_list);
-       ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible);
+       ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible, NULL);
        if (unlikely(ret != 0))
                goto out_no_reserve;
 
index 8719fb3cccc93fb8b282c5022f9cdd669dc3e270..6a4584a43aa6cec2d29d35471e6c25559897b3a6 100644 (file)
@@ -198,7 +198,7 @@ static int vmw_gb_shader_bind(struct vmw_resource *res,
        cmd->header.size = sizeof(cmd->body);
        cmd->body.shid = res->id;
        cmd->body.mobid = bo->mem.start;
-       cmd->body.offsetInBytes = 0;
+       cmd->body.offsetInBytes = res->backup_offset;
        res->backup_dirty = false;
        vmw_fifo_commit(dev_priv, sizeof(*cmd));
 
index 63f3f03ecc9b069f899eccbe9524168bbef3656c..c604f4c3ac0dd53d036e28d839f81a87dbfea913 100644 (file)
 #define CDNS_I2C_DIVA_MAX      4
 #define CDNS_I2C_DIVB_MAX      64
 
+#define CDNS_I2C_TIMEOUT_MAX   0xFF
+
 #define cdns_i2c_readreg(offset)       readl_relaxed(id->membase + offset)
 #define cdns_i2c_writereg(val, offset) writel_relaxed(val, id->membase + offset)
 
@@ -852,6 +854,15 @@ static int cdns_i2c_probe(struct platform_device *pdev)
                goto err_clk_dis;
        }
 
+       /*
+        * Cadence I2C controller has a bug wherein it generates
+        * invalid read transaction after HW timeout in master receiver mode.
+        * HW timeout is not used by this driver and the interrupt is disabled.
+        * But the feature itself cannot be disabled. Hence maximum value
+        * is written to this register to reduce the chances of error.
+        */
+       cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET);
+
        dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n",
                 id->i2c_clk / 1000, (unsigned long)r_mem->start, id->irq);
 
index d15b7c9b9219147f05f33e2516aad2d8a4a7f667..01f0cd87a4a5b6d40fb0c01b17d5d589cc2d4ac4 100644 (file)
@@ -407,11 +407,9 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
        if (dev->cmd_err & DAVINCI_I2C_STR_NACK) {
                if (msg->flags & I2C_M_IGNORE_NAK)
                        return msg->len;
-               if (stop) {
-                       w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG);
-                       w |= DAVINCI_I2C_MDR_STP;
-                       davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w);
-               }
+               w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG);
+               w |= DAVINCI_I2C_MDR_STP;
+               davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w);
                return -EREMOTEIO;
        }
        return -EIO;
index edca99dbba23dcd042a54bd6c7a5b5ad13e0b41e..23628b7bfb8d8df208c6e434efb95e887dfad6e6 100644 (file)
@@ -359,7 +359,7 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
        }
 
        /* Configure Tx/Rx FIFO threshold levels */
-       dw_writel(dev, dev->tx_fifo_depth - 1, DW_IC_TX_TL);
+       dw_writel(dev, dev->tx_fifo_depth / 2, DW_IC_TX_TL);
        dw_writel(dev, 0, DW_IC_RX_TL);
 
        /* configure the i2c master */
index 26942c159de1252c867d1b63ba7f6820c60c1402..277a2288d4a86285c3335ba19fdabf01cd85aa74 100644 (file)
@@ -922,14 +922,12 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
                if (stat & OMAP_I2C_STAT_NACK) {
                        err |= OMAP_I2C_STAT_NACK;
                        omap_i2c_ack_stat(dev, OMAP_I2C_STAT_NACK);
-                       break;
                }
 
                if (stat & OMAP_I2C_STAT_AL) {
                        dev_err(dev->dev, "Arbitration lost\n");
                        err |= OMAP_I2C_STAT_AL;
                        omap_i2c_ack_stat(dev, OMAP_I2C_STAT_AL);
-                       break;
                }
 
                /*
@@ -954,11 +952,13 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
                        if (dev->fifo_size)
                                num_bytes = dev->buf_len;
 
-                       omap_i2c_receive_data(dev, num_bytes, true);
-
-                       if (dev->errata & I2C_OMAP_ERRATA_I207)
+                       if (dev->errata & I2C_OMAP_ERRATA_I207) {
                                i2c_omap_errata_i207(dev, stat);
+                               num_bytes = (omap_i2c_read_reg(dev,
+                                       OMAP_I2C_BUFSTAT_REG) >> 8) & 0x3F;
+                       }
 
+                       omap_i2c_receive_data(dev, num_bytes, true);
                        omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR);
                        continue;
                }
index bc203485716d870205a22d38add99ad6c1a9ea1e..8afa28e4570ed099bb3fb9fc4b2d7e1c1a5ba9d6 100644 (file)
@@ -421,7 +421,7 @@ static int evdev_open(struct inode *inode, struct file *file)
 
  err_free_client:
        evdev_detach_client(evdev, client);
-       kfree(client);
+       kvfree(client);
        return error;
 }
 
index dd5112265cc9f20d138ae7d70afedbc8476ffacf..d0a1261eb1ba7295f8fd9c8712b02a006699973a 100644 (file)
@@ -152,6 +152,18 @@ config OMAP_IOMMU_DEBUG
 
          Say N unless you know you need this.
 
+config ROCKCHIP_IOMMU
+       bool "Rockchip IOMMU Support"
+       depends on ARCH_ROCKCHIP
+       select IOMMU_API
+       select ARM_DMA_USE_IOMMU
+       help
+         Support for IOMMUs found on Rockchip rk32xx SOCs.
+         These IOMMUs allow virtualization of the address space used by most
+         cores within the multimedia subsystem.
+         Say Y here if you are using a Rockchip SoC that includes an IOMMU
+         device.
+
 config TEGRA_IOMMU_GART
        bool "Tegra GART IOMMU Support"
        depends on ARCH_TEGRA_2x_SOC
index 16edef74b8ee6373d281a34ed5c7ae9c56fc2437..3e47ef35a35f552d17e963996641d3e330f974ef 100644 (file)
@@ -13,6 +13,7 @@ obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
 obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
 obj-$(CONFIG_OMAP_IOMMU) += omap-iommu2.o
 obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
+obj-$(CONFIG_ROCKCHIP_IOMMU) += rockchip-iommu.o
 obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o
 obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
 obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
new file mode 100644 (file)
index 0000000..b2023af
--- /dev/null
@@ -0,0 +1,1038 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/cacheflush.h>
+#include <asm/pgtable.h>
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/jiffies.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+/** MMU register offsets */
+#define RK_MMU_DTE_ADDR                0x00    /* Directory table address */
+#define RK_MMU_STATUS          0x04
+#define RK_MMU_COMMAND         0x08
+#define RK_MMU_PAGE_FAULT_ADDR 0x0C    /* IOVA of last page fault */
+#define RK_MMU_ZAP_ONE_LINE    0x10    /* Shootdown one IOTLB entry */
+#define RK_MMU_INT_RAWSTAT     0x14    /* IRQ status ignoring mask */
+#define RK_MMU_INT_CLEAR       0x18    /* Acknowledge and re-arm irq */
+#define RK_MMU_INT_MASK                0x1C    /* IRQ enable */
+#define RK_MMU_INT_STATUS      0x20    /* IRQ status after masking */
+#define RK_MMU_AUTO_GATING     0x24
+
+#define DTE_ADDR_DUMMY         0xCAFEBABE
+#define FORCE_RESET_TIMEOUT    100     /* ms */
+
+/* RK_MMU_STATUS fields */
+#define RK_MMU_STATUS_PAGING_ENABLED       BIT(0)
+#define RK_MMU_STATUS_PAGE_FAULT_ACTIVE    BIT(1)
+#define RK_MMU_STATUS_STALL_ACTIVE         BIT(2)
+#define RK_MMU_STATUS_IDLE                 BIT(3)
+#define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY  BIT(4)
+#define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE  BIT(5)
+#define RK_MMU_STATUS_STALL_NOT_ACTIVE     BIT(31)
+
+/* RK_MMU_COMMAND command values */
+#define RK_MMU_CMD_ENABLE_PAGING    0  /* Enable memory translation */
+#define RK_MMU_CMD_DISABLE_PAGING   1  /* Disable memory translation */
+#define RK_MMU_CMD_ENABLE_STALL     2  /* Stall paging to allow other cmds */
+#define RK_MMU_CMD_DISABLE_STALL    3  /* Stop stall re-enables paging */
+#define RK_MMU_CMD_ZAP_CACHE        4  /* Shoot down entire IOTLB */
+#define RK_MMU_CMD_PAGE_FAULT_DONE  5  /* Clear page fault */
+#define RK_MMU_CMD_FORCE_RESET      6  /* Reset all registers */
+
+/* RK_MMU_INT_* register fields */
+#define RK_MMU_IRQ_PAGE_FAULT    0x01  /* page fault */
+#define RK_MMU_IRQ_BUS_ERROR     0x02  /* bus read error */
+#define RK_MMU_IRQ_MASK          (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR)
+
+#define NUM_DT_ENTRIES 1024
+#define NUM_PT_ENTRIES 1024
+
+#define SPAGE_ORDER 12
+#define SPAGE_SIZE (1 << SPAGE_ORDER)
+
+ /*
+  * Support mapping any size that fits in one page table:
+  *   4 KiB to 4 MiB
+  */
+#define RK_IOMMU_PGSIZE_BITMAP 0x007ff000
+
+#define IOMMU_REG_POLL_COUNT_FAST 1000
+
+struct rk_iommu_domain {
+       struct list_head iommus;
+       u32 *dt; /* page directory table */
+       spinlock_t iommus_lock; /* lock for iommus list */
+       spinlock_t dt_lock; /* lock for modifying page directory table */
+};
+
+struct rk_iommu {
+       struct device *dev;
+       void __iomem *base;
+       int irq;
+       struct list_head node; /* entry in rk_iommu_domain.iommus */
+       struct iommu_domain *domain; /* domain to which iommu is attached */
+};
+
+static inline void rk_table_flush(u32 *va, unsigned int count)
+{
+       phys_addr_t pa_start = virt_to_phys(va);
+       phys_addr_t pa_end = virt_to_phys(va + count);
+       size_t size = pa_end - pa_start;
+
+       __cpuc_flush_dcache_area(va, size);
+       outer_flush_range(pa_start, pa_end);
+}
+
+/**
+ * Inspired by _wait_for in intel_drv.h
+ * This is NOT safe for use in interrupt context.
+ *
+ * Note that it's important that we check the condition again after having
+ * timed out, since the timeout could be due to preemption or similar and
+ * we've never had a chance to check the condition before the timeout.
+ */
+#define rk_wait_for(COND, MS) ({ \
+       unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1;   \
+       int ret__ = 0;                                                  \
+       while (!(COND)) {                                               \
+               if (time_after(jiffies, timeout__)) {                   \
+                       ret__ = (COND) ? 0 : -ETIMEDOUT;                \
+                       break;                                          \
+               }                                                       \
+               usleep_range(50, 100);                                  \
+       }                                                               \
+       ret__;                                                          \
+})
+
+/*
+ * The Rockchip rk3288 iommu uses a 2-level page table.
+ * The first level is the "Directory Table" (DT).
+ * The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing
+ * to a "Page Table".
+ * The second level is the 1024 Page Tables (PT).
+ * Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to
+ * a 4 KB page of physical memory.
+ *
+ * The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries).
+ * Each iommu device has a MMU_DTE_ADDR register that contains the physical
+ * address of the start of the DT page.
+ *
+ * The structure of the page table is as follows:
+ *
+ *                   DT
+ * MMU_DTE_ADDR -> +-----+
+ *                 |     |
+ *                 +-----+     PT
+ *                 | DTE | -> +-----+
+ *                 +-----+    |     |     Memory
+ *                 |     |    +-----+     Page
+ *                 |     |    | PTE | -> +-----+
+ *                 +-----+    +-----+    |     |
+ *                            |     |    |     |
+ *                            |     |    |     |
+ *                            +-----+    |     |
+ *                                       |     |
+ *                                       |     |
+ *                                       +-----+
+ */
+
+/*
+ * Each DTE has a PT address and a valid bit:
+ * +---------------------+-----------+-+
+ * | PT address          | Reserved  |V|
+ * +---------------------+-----------+-+
+ *  31:12 - PT address (PTs always starts on a 4 KB boundary)
+ *  11: 1 - Reserved
+ *      0 - 1 if PT @ PT address is valid
+ */
+#define RK_DTE_PT_ADDRESS_MASK    0xfffff000
+#define RK_DTE_PT_VALID           BIT(0)
+
+static inline phys_addr_t rk_dte_pt_address(u32 dte)
+{
+       return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
+}
+
+static inline bool rk_dte_is_pt_valid(u32 dte)
+{
+       return dte & RK_DTE_PT_VALID;
+}
+
+static u32 rk_mk_dte(u32 *pt)
+{
+       phys_addr_t pt_phys = virt_to_phys(pt);
+       return (pt_phys & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
+}
+
+/*
+ * Each PTE has a Page address, some flags and a valid bit:
+ * +---------------------+---+-------+-+
+ * | Page address        |Rsv| Flags |V|
+ * +---------------------+---+-------+-+
+ *  31:12 - Page address (Pages always start on a 4 KB boundary)
+ *  11: 9 - Reserved
+ *   8: 1 - Flags
+ *      8 - Read allocate - allocate cache space on read misses
+ *      7 - Read cache - enable cache & prefetch of data
+ *      6 - Write buffer - enable delaying writes on their way to memory
+ *      5 - Write allocate - allocate cache space on write misses
+ *      4 - Write cache - different writes can be merged together
+ *      3 - Override cache attributes
+ *          if 1, bits 4-8 control cache attributes
+ *          if 0, the system bus defaults are used
+ *      2 - Writable
+ *      1 - Readable
+ *      0 - 1 if Page @ Page address is valid
+ */
+#define RK_PTE_PAGE_ADDRESS_MASK  0xfffff000
+#define RK_PTE_PAGE_FLAGS_MASK    0x000001fe
+#define RK_PTE_PAGE_WRITABLE      BIT(2)
+#define RK_PTE_PAGE_READABLE      BIT(1)
+#define RK_PTE_PAGE_VALID         BIT(0)
+
+static inline phys_addr_t rk_pte_page_address(u32 pte)
+{
+       return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK;
+}
+
+static inline bool rk_pte_is_page_valid(u32 pte)
+{
+       return pte & RK_PTE_PAGE_VALID;
+}
+
+/* TODO: set cache flags per prot IOMMU_CACHE */
+static u32 rk_mk_pte(phys_addr_t page, int prot)
+{
+       u32 flags = 0;
+       flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
+       flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
+       page &= RK_PTE_PAGE_ADDRESS_MASK;
+       return page | flags | RK_PTE_PAGE_VALID;
+}
+
+static u32 rk_mk_pte_invalid(u32 pte)
+{
+       return pte & ~RK_PTE_PAGE_VALID;
+}
+
+/*
+ * rk3288 iova (IOMMU Virtual Address) format
+ *  31       22.21       12.11          0
+ * +-----------+-----------+-------------+
+ * | DTE index | PTE index | Page offset |
+ * +-----------+-----------+-------------+
+ *  31:22 - DTE index   - index of DTE in DT
+ *  21:12 - PTE index   - index of PTE in PT @ DTE.pt_address
+ *  11: 0 - Page offset - offset into page @ PTE.page_address
+ */
+#define RK_IOVA_DTE_MASK    0xffc00000
+#define RK_IOVA_DTE_SHIFT   22
+#define RK_IOVA_PTE_MASK    0x003ff000
+#define RK_IOVA_PTE_SHIFT   12
+#define RK_IOVA_PAGE_MASK   0x00000fff
+#define RK_IOVA_PAGE_SHIFT  0
+
+static u32 rk_iova_dte_index(dma_addr_t iova)
+{
+       return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT;
+}
+
+static u32 rk_iova_pte_index(dma_addr_t iova)
+{
+       return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT;
+}
+
+static u32 rk_iova_page_offset(dma_addr_t iova)
+{
+       return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
+}
+
+static u32 rk_iommu_read(struct rk_iommu *iommu, u32 offset)
+{
+       return readl(iommu->base + offset);
+}
+
+static void rk_iommu_write(struct rk_iommu *iommu, u32 offset, u32 value)
+{
+       writel(value, iommu->base + offset);
+}
+
+static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
+{
+       writel(command, iommu->base + RK_MMU_COMMAND);
+}
+
+static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova,
+                              size_t size)
+{
+       dma_addr_t iova_end = iova + size;
+       /*
+        * TODO(djkurtz): Figure out when it is more efficient to shootdown the
+        * entire iotlb rather than iterate over individual iovas.
+        */
+       for (; iova < iova_end; iova += SPAGE_SIZE)
+               rk_iommu_write(iommu, RK_MMU_ZAP_ONE_LINE, iova);
+}
+
+static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
+{
+       return rk_iommu_read(iommu, RK_MMU_STATUS) & RK_MMU_STATUS_STALL_ACTIVE;
+}
+
+static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
+{
+       return rk_iommu_read(iommu, RK_MMU_STATUS) &
+                            RK_MMU_STATUS_PAGING_ENABLED;
+}
+
+static int rk_iommu_enable_stall(struct rk_iommu *iommu)
+{
+       int ret;
+
+       if (rk_iommu_is_stall_active(iommu))
+               return 0;
+
+       /* Stall can only be enabled if paging is enabled */
+       if (!rk_iommu_is_paging_enabled(iommu))
+               return 0;
+
+       rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
+
+       ret = rk_wait_for(rk_iommu_is_stall_active(iommu), 1);
+       if (ret)
+               dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
+                       rk_iommu_read(iommu, RK_MMU_STATUS));
+
+       return ret;
+}
+
+static int rk_iommu_disable_stall(struct rk_iommu *iommu)
+{
+       int ret;
+
+       if (!rk_iommu_is_stall_active(iommu))
+               return 0;
+
+       rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
+
+       ret = rk_wait_for(!rk_iommu_is_stall_active(iommu), 1);
+       if (ret)
+               dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
+                       rk_iommu_read(iommu, RK_MMU_STATUS));
+
+       return ret;
+}
+
+static int rk_iommu_enable_paging(struct rk_iommu *iommu)
+{
+       int ret;
+
+       if (rk_iommu_is_paging_enabled(iommu))
+               return 0;
+
+       rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
+
+       ret = rk_wait_for(rk_iommu_is_paging_enabled(iommu), 1);
+       if (ret)
+               dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
+                       rk_iommu_read(iommu, RK_MMU_STATUS));
+
+       return ret;
+}
+
+static int rk_iommu_disable_paging(struct rk_iommu *iommu)
+{
+       int ret;
+
+       if (!rk_iommu_is_paging_enabled(iommu))
+               return 0;
+
+       rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
+
+       ret = rk_wait_for(!rk_iommu_is_paging_enabled(iommu), 1);
+       if (ret)
+               dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
+                       rk_iommu_read(iommu, RK_MMU_STATUS));
+
+       return ret;
+}
+
+static int rk_iommu_force_reset(struct rk_iommu *iommu)
+{
+       int ret;
+       u32 dte_addr;
+
+       /*
+        * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
+        * and verifying that upper 5 nybbles are read back.
+        */
+       rk_iommu_write(iommu, RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
+
+       dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR);
+       if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) {
+               dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
+               return -EFAULT;
+       }
+
+       rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
+
+       ret = rk_wait_for(rk_iommu_read(iommu, RK_MMU_DTE_ADDR) == 0x00000000,
+                         FORCE_RESET_TIMEOUT);
+       if (ret)
+               dev_err(iommu->dev, "FORCE_RESET command timed out\n");
+
+       return ret;
+}
+
+static void log_iova(struct rk_iommu *iommu, dma_addr_t iova)
+{
+       u32 dte_index, pte_index, page_offset;
+       u32 mmu_dte_addr;
+       phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
+       u32 *dte_addr;
+       u32 dte;
+       phys_addr_t pte_addr_phys = 0;
+       u32 *pte_addr = NULL;
+       u32 pte = 0;
+       phys_addr_t page_addr_phys = 0;
+       u32 page_flags = 0;
+
+       dte_index = rk_iova_dte_index(iova);
+       pte_index = rk_iova_pte_index(iova);
+       page_offset = rk_iova_page_offset(iova);
+
+       mmu_dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR);
+       mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
+
+       dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
+       dte_addr = phys_to_virt(dte_addr_phys);
+       dte = *dte_addr;
+
+       if (!rk_dte_is_pt_valid(dte))
+               goto print_it;
+
+       pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4);
+       pte_addr = phys_to_virt(pte_addr_phys);
+       pte = *pte_addr;
+
+       if (!rk_pte_is_page_valid(pte))
+               goto print_it;
+
+       page_addr_phys = rk_pte_page_address(pte) + page_offset;
+       page_flags = pte & RK_PTE_PAGE_FLAGS_MASK;
+
+print_it:
+       dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n",
+               &iova, dte_index, pte_index, page_offset);
+       dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
+               &mmu_dte_addr_phys, &dte_addr_phys, dte,
+               rk_dte_is_pt_valid(dte), &pte_addr_phys, pte,
+               rk_pte_is_page_valid(pte), &page_addr_phys, page_flags);
+}
+
+static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
+{
+       struct rk_iommu *iommu = dev_id;
+       u32 status;
+       u32 int_status;
+       dma_addr_t iova;
+
+       int_status = rk_iommu_read(iommu, RK_MMU_INT_STATUS);
+       if (int_status == 0)
+               return IRQ_NONE;
+
+       iova = rk_iommu_read(iommu, RK_MMU_PAGE_FAULT_ADDR);
+
+       if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
+               int flags;
+
+               status = rk_iommu_read(iommu, RK_MMU_STATUS);
+               flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
+                               IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
+
+               dev_err(iommu->dev, "Page fault at %pad of type %s\n",
+                       &iova,
+                       (flags == IOMMU_FAULT_WRITE) ? "write" : "read");
+
+               log_iova(iommu, iova);
+
+               /*
+                * Report page fault to any installed handlers.
+                * Ignore the return code, though, since we always zap cache
+                * and clear the page fault anyway.
+                */
+               if (iommu->domain)
+                       report_iommu_fault(iommu->domain, iommu->dev, iova,
+                                          flags);
+               else
+                       dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
+
+               rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE);
+               rk_iommu_command(iommu, RK_MMU_CMD_PAGE_FAULT_DONE);
+       }
+
+       if (int_status & RK_MMU_IRQ_BUS_ERROR)
+               dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
+
+       if (int_status & ~RK_MMU_IRQ_MASK)
+               dev_err(iommu->dev, "unexpected int_status: %#08x\n",
+                       int_status);
+
+       rk_iommu_write(iommu, RK_MMU_INT_CLEAR, int_status);
+
+       return IRQ_HANDLED;
+}
+
+static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
+                                        dma_addr_t iova)
+{
+       struct rk_iommu_domain *rk_domain = domain->priv;
+       unsigned long flags;
+       phys_addr_t pt_phys, phys = 0;
+       u32 dte, pte;
+       u32 *page_table;
+
+       spin_lock_irqsave(&rk_domain->dt_lock, flags);
+
+       dte = rk_domain->dt[rk_iova_dte_index(iova)];
+       if (!rk_dte_is_pt_valid(dte))
+               goto out;
+
+       pt_phys = rk_dte_pt_address(dte);
+       page_table = (u32 *)phys_to_virt(pt_phys);
+       pte = page_table[rk_iova_pte_index(iova)];
+       if (!rk_pte_is_page_valid(pte))
+               goto out;
+
+       phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova);
+out:
+       spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
+
+       return phys;
+}
+
+static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
+                             dma_addr_t iova, size_t size)
+{
+       struct list_head *pos;
+       unsigned long flags;
+
+       /* shootdown these iova from all iommus using this domain */
+       spin_lock_irqsave(&rk_domain->iommus_lock, flags);
+       list_for_each(pos, &rk_domain->iommus) {
+               struct rk_iommu *iommu;
+               iommu = list_entry(pos, struct rk_iommu, node);
+               rk_iommu_zap_lines(iommu, iova, size);
+       }
+       spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
+}
+
+static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
+                                 dma_addr_t iova)
+{
+       u32 *page_table, *dte_addr;
+       u32 dte;
+       phys_addr_t pt_phys;
+
+       assert_spin_locked(&rk_domain->dt_lock);
+
+       dte_addr = &rk_domain->dt[rk_iova_dte_index(iova)];
+       dte = *dte_addr;
+       if (rk_dte_is_pt_valid(dte))
+               goto done;
+
+       page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
+       if (!page_table)
+               return ERR_PTR(-ENOMEM);
+
+       dte = rk_mk_dte(page_table);
+       *dte_addr = dte;
+
+       rk_table_flush(page_table, NUM_PT_ENTRIES);
+       rk_table_flush(dte_addr, 1);
+
+       /*
+        * Zap the first iova of newly allocated page table so iommu evicts
+        * old cached value of new dte from the iotlb.
+        */
+       rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
+
+done:
+       pt_phys = rk_dte_pt_address(dte);
+       return (u32 *)phys_to_virt(pt_phys);
+}
+
+static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
+                                 u32 *pte_addr, dma_addr_t iova, size_t size)
+{
+       unsigned int pte_count;
+       unsigned int pte_total = size / SPAGE_SIZE;
+
+       assert_spin_locked(&rk_domain->dt_lock);
+
+       for (pte_count = 0; pte_count < pte_total; pte_count++) {
+               u32 pte = pte_addr[pte_count];
+               if (!rk_pte_is_page_valid(pte))
+                       break;
+
+               pte_addr[pte_count] = rk_mk_pte_invalid(pte);
+       }
+
+       rk_table_flush(pte_addr, pte_count);
+
+       return pte_count * SPAGE_SIZE;
+}
+
+static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
+                            dma_addr_t iova, phys_addr_t paddr, size_t size,
+                            int prot)
+{
+       unsigned int pte_count;
+       unsigned int pte_total = size / SPAGE_SIZE;
+       phys_addr_t page_phys;
+
+       assert_spin_locked(&rk_domain->dt_lock);
+
+       for (pte_count = 0; pte_count < pte_total; pte_count++) {
+               u32 pte = pte_addr[pte_count];
+
+               if (rk_pte_is_page_valid(pte))
+                       goto unwind;
+
+               pte_addr[pte_count] = rk_mk_pte(paddr, prot);
+
+               paddr += SPAGE_SIZE;
+       }
+
+       rk_table_flush(pte_addr, pte_count);
+
+       return 0;
+unwind:
+       /* Unmap the range of iovas that we just mapped */
+       rk_iommu_unmap_iova(rk_domain, pte_addr, iova, pte_count * SPAGE_SIZE);
+
+       iova += pte_count * SPAGE_SIZE;
+       page_phys = rk_pte_page_address(pte_addr[pte_count]);
+       pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
+              &iova, &page_phys, &paddr, prot);
+
+       return -EADDRINUSE;
+}
+
+static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
+                       phys_addr_t paddr, size_t size, int prot)
+{
+       struct rk_iommu_domain *rk_domain = domain->priv;
+       unsigned long flags;
+       dma_addr_t iova = (dma_addr_t)_iova;
+       u32 *page_table, *pte_addr;
+       int ret;
+
+       spin_lock_irqsave(&rk_domain->dt_lock, flags);
+
+       /*
+        * pgsize_bitmap specifies iova sizes that fit in one page table
+        * (1024 4-KiB pages = 4 MiB).
+        * So, size will always be 4096 <= size <= 4194304.
+        * Since iommu_map() guarantees that both iova and size will be
+        * aligned, we will always only be mapping from a single dte here.
+        */
+       page_table = rk_dte_get_page_table(rk_domain, iova);
+       if (IS_ERR(page_table)) {
+               spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
+               return PTR_ERR(page_table);
+       }
+
+       pte_addr = &page_table[rk_iova_pte_index(iova)];
+       ret = rk_iommu_map_iova(rk_domain, pte_addr, iova, paddr, size, prot);
+       spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
+
+       return ret;
+}
+
+static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
+                            size_t size)
+{
+       struct rk_iommu_domain *rk_domain = domain->priv;
+       unsigned long flags;
+       dma_addr_t iova = (dma_addr_t)_iova;
+       phys_addr_t pt_phys;
+       u32 dte;
+       u32 *pte_addr;
+       size_t unmap_size;
+
+       spin_lock_irqsave(&rk_domain->dt_lock, flags);
+
+       /*
+        * pgsize_bitmap specifies iova sizes that fit in one page table
+        * (1024 4-KiB pages = 4 MiB).
+        * So, size will always be 4096 <= size <= 4194304.
+        * Since iommu_unmap() guarantees that both iova and size will be
+        * aligned, we will always only be unmapping from a single dte here.
+        */
+       dte = rk_domain->dt[rk_iova_dte_index(iova)];
+       /* Just return 0 if iova is unmapped */
+       if (!rk_dte_is_pt_valid(dte)) {
+               spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
+               return 0;
+       }
+
+       pt_phys = rk_dte_pt_address(dte);
+       pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
+       unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, iova, size);
+
+       spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
+
+       /* Shootdown iotlb entries for iova range that was just unmapped */
+       rk_iommu_zap_iova(rk_domain, iova, unmap_size);
+
+       return unmap_size;
+}
+
+static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
+{
+       struct iommu_group *group;
+       struct device *iommu_dev;
+       struct rk_iommu *rk_iommu;
+
+       group = iommu_group_get(dev);
+       if (!group)
+               return NULL;
+       iommu_dev = iommu_group_get_iommudata(group);
+       rk_iommu = dev_get_drvdata(iommu_dev);
+       iommu_group_put(group);
+
+       return rk_iommu;
+}
+
+static int rk_iommu_attach_device(struct iommu_domain *domain,
+                                 struct device *dev)
+{
+       struct rk_iommu *iommu;
+       struct rk_iommu_domain *rk_domain = domain->priv;
+       unsigned long flags;
+       int ret;
+       phys_addr_t dte_addr;
+
+       /*
+        * Allow 'virtual devices' (e.g., drm) to attach to domain.
+        * Such a device does not belong to an iommu group.
+        */
+       iommu = rk_iommu_from_dev(dev);
+       if (!iommu)
+               return 0;
+
+       ret = rk_iommu_enable_stall(iommu);
+       if (ret)
+               return ret;
+
+       ret = rk_iommu_force_reset(iommu);
+       if (ret)
+               return ret;
+
+       iommu->domain = domain;
+
+       ret = devm_request_irq(dev, iommu->irq, rk_iommu_irq,
+                              IRQF_SHARED, dev_name(dev), iommu);
+       if (ret)
+               return ret;
+
+       dte_addr = virt_to_phys(rk_domain->dt);
+       rk_iommu_write(iommu, RK_MMU_DTE_ADDR, dte_addr);
+       rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE);
+       rk_iommu_write(iommu, RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
+
+       ret = rk_iommu_enable_paging(iommu);
+       if (ret)
+               return ret;
+
+       spin_lock_irqsave(&rk_domain->iommus_lock, flags);
+       list_add_tail(&iommu->node, &rk_domain->iommus);
+       spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
+
+       dev_info(dev, "Attached to iommu domain\n");
+
+       rk_iommu_disable_stall(iommu);
+
+       return 0;
+}
+
+static void rk_iommu_detach_device(struct iommu_domain *domain,
+                                  struct device *dev)
+{
+       struct rk_iommu *iommu;
+       struct rk_iommu_domain *rk_domain = domain->priv;
+       unsigned long flags;
+
+       /* Allow 'virtual devices' (eg drm) to detach from domain */
+       iommu = rk_iommu_from_dev(dev);
+       if (!iommu)
+               return;
+
+       spin_lock_irqsave(&rk_domain->iommus_lock, flags);
+       list_del_init(&iommu->node);
+       spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
+
+       /* Ignore error while disabling, just keep going */
+       rk_iommu_enable_stall(iommu);
+       rk_iommu_disable_paging(iommu);
+       rk_iommu_write(iommu, RK_MMU_INT_MASK, 0);
+       rk_iommu_write(iommu, RK_MMU_DTE_ADDR, 0);
+       rk_iommu_disable_stall(iommu);
+
+       devm_free_irq(dev, iommu->irq, iommu);
+
+       iommu->domain = NULL;
+
+       dev_info(dev, "Detached from iommu domain\n");
+}
+
+static int rk_iommu_domain_init(struct iommu_domain *domain)
+{
+       struct rk_iommu_domain *rk_domain;
+
+       rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL);
+       if (!rk_domain)
+               return -ENOMEM;
+
+       /*
+        * rk32xx iommus use a 2 level pagetable.
+        * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
+        * Allocate one 4 KiB page for each table.
+        */
+       rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
+       if (!rk_domain->dt)
+               goto err_dt;
+
+       rk_table_flush(rk_domain->dt, NUM_DT_ENTRIES);
+
+       spin_lock_init(&rk_domain->iommus_lock);
+       spin_lock_init(&rk_domain->dt_lock);
+       INIT_LIST_HEAD(&rk_domain->iommus);
+
+       domain->priv = rk_domain;
+
+       return 0;
+err_dt:
+       kfree(rk_domain);
+       return -ENOMEM;
+}
+
+static void rk_iommu_domain_destroy(struct iommu_domain *domain)
+{
+       struct rk_iommu_domain *rk_domain = domain->priv;
+       int i;
+
+       WARN_ON(!list_empty(&rk_domain->iommus));
+
+       for (i = 0; i < NUM_DT_ENTRIES; i++) {
+               u32 dte = rk_domain->dt[i];
+               if (rk_dte_is_pt_valid(dte)) {
+                       phys_addr_t pt_phys = rk_dte_pt_address(dte);
+                       u32 *page_table = phys_to_virt(pt_phys);
+                       free_page((unsigned long)page_table);
+               }
+       }
+
+       free_page((unsigned long)rk_domain->dt);
+       kfree(domain->priv);
+       domain->priv = NULL;
+}
+
+static bool rk_iommu_is_dev_iommu_master(struct device *dev)
+{
+       struct device_node *np = dev->of_node;
+       int ret;
+
+       /*
+        * An iommu master has an iommus property containing a list of phandles
+        * to iommu nodes, each with an #iommu-cells property with value 0.
+        */
+       ret = of_count_phandle_with_args(np, "iommus", "#iommu-cells");
+       return (ret > 0);
+}
+
+static int rk_iommu_group_set_iommudata(struct iommu_group *group,
+                                       struct device *dev)
+{
+       struct device_node *np = dev->of_node;
+       struct platform_device *pd;
+       int ret;
+       struct of_phandle_args args;
+
+       /*
+        * An iommu master has an iommus property containing a list of phandles
+        * to iommu nodes, each with an #iommu-cells property with value 0.
+        */
+       ret = of_parse_phandle_with_args(np, "iommus", "#iommu-cells", 0,
+                                        &args);
+       if (ret) {
+               dev_err(dev, "of_parse_phandle_with_args(%s) => %d\n",
+                       np->full_name, ret);
+               return ret;
+       }
+       if (args.args_count != 0) {
+               dev_err(dev, "incorrect number of iommu params found for %s (found %d, expected 0)\n",
+                       args.np->full_name, args.args_count);
+               return -EINVAL;
+       }
+
+       pd = of_find_device_by_node(args.np);
+       of_node_put(args.np);
+       if (!pd) {
+               dev_err(dev, "iommu %s not found\n", args.np->full_name);
+               return -EPROBE_DEFER;
+       }
+
+       /* TODO(djkurtz): handle multiple slave iommus for a single master */
+       iommu_group_set_iommudata(group, &pd->dev, NULL);
+
+       return 0;
+}
+
+static int rk_iommu_add_device(struct device *dev)
+{
+       struct iommu_group *group;
+       int ret;
+
+       if (!rk_iommu_is_dev_iommu_master(dev))
+               return -ENODEV;
+
+       group = iommu_group_get(dev);
+       if (!group) {
+               group = iommu_group_alloc();
+               if (IS_ERR(group)) {
+                       dev_err(dev, "Failed to allocate IOMMU group\n");
+                       return PTR_ERR(group);
+               }
+       }
+
+       ret = iommu_group_add_device(group, dev);
+       if (ret)
+               goto err_put_group;
+
+       ret = rk_iommu_group_set_iommudata(group, dev);
+       if (ret)
+               goto err_remove_device;
+
+       iommu_group_put(group);
+
+       return 0;
+
+err_remove_device:
+       iommu_group_remove_device(dev);
+err_put_group:
+       iommu_group_put(group);
+       return ret;
+}
+
+static void rk_iommu_remove_device(struct device *dev)
+{
+       if (!rk_iommu_is_dev_iommu_master(dev))
+               return;
+
+       iommu_group_remove_device(dev);
+}
+
+static const struct iommu_ops rk_iommu_ops = {
+       .domain_init = rk_iommu_domain_init,
+       .domain_destroy = rk_iommu_domain_destroy,
+       .attach_dev = rk_iommu_attach_device,
+       .detach_dev = rk_iommu_detach_device,
+       .map = rk_iommu_map,
+       .unmap = rk_iommu_unmap,
+       .add_device = rk_iommu_add_device,
+       .remove_device = rk_iommu_remove_device,
+       .iova_to_phys = rk_iommu_iova_to_phys,
+       .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
+};
+
+static int rk_iommu_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct rk_iommu *iommu;
+       struct resource *res;
+
+       iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
+       if (!iommu)
+               return -ENOMEM;
+
+       platform_set_drvdata(pdev, iommu);
+       iommu->dev = dev;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       iommu->base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(iommu->base))
+               return PTR_ERR(iommu->base);
+
+       iommu->irq = platform_get_irq(pdev, 0);
+       if (iommu->irq < 0) {
+               dev_err(dev, "Failed to get IRQ, %d\n", iommu->irq);
+               return -ENXIO;
+       }
+
+       return 0;
+}
+
+static int rk_iommu_remove(struct platform_device *pdev)
+{
+       return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id rk_iommu_dt_ids[] = {
+       { .compatible = "rockchip,iommu" },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids);
+#endif
+
+static struct platform_driver rk_iommu_driver = {
+       .probe = rk_iommu_probe,
+       .remove = rk_iommu_remove,
+       .driver = {
+                  .name = "rk_iommu",
+                  .owner = THIS_MODULE,
+                  .of_match_table = of_match_ptr(rk_iommu_dt_ids),
+       },
+};
+
+static int __init rk_iommu_init(void)
+{
+       int ret;
+
+       ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
+       if (ret)
+               return ret;
+
+       return platform_driver_register(&rk_iommu_driver);
+}
+static void __exit rk_iommu_exit(void)
+{
+       platform_driver_unregister(&rk_iommu_driver);
+}
+
+subsys_initcall(rk_iommu_init);
+module_exit(rk_iommu_exit);
+
+MODULE_DESCRIPTION("IOMMU API for Rockchip");
+MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com> and Daniel Kurtz <djkurtz@chromium.org>");
+MODULE_ALIAS("platform:rockchip-iommu");
+MODULE_LICENSE("GPL v2");
index 932ed9be9ff3d756fb35948e8183e09faaa60c34..b10aaeda2bb4599576849a8a5b786dcc2542aa49 100644 (file)
@@ -2190,7 +2190,7 @@ static int smiapp_set_selection(struct v4l2_subdev *subdev,
                ret = smiapp_set_compose(subdev, fh, sel);
                break;
        default:
-               BUG();
+               ret = -EINVAL;
        }
 
        mutex_unlock(&sensor->mutex);
index 331eddac7222ae7f62fb757148f90c8881b5ef4c..3bd386c371f74ab260b00cac9dcfaa9eadecc962 100644 (file)
@@ -1078,7 +1078,7 @@ static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
        for (line = 0; line < lines; line++) {
                while (offset && offset >= sg_dma_len(sg)) {
                        offset -= sg_dma_len(sg);
-                       sg++;
+                       sg = sg_next(sg);
                }
 
                if (lpi && line > 0 && !(line % lpi))
@@ -1101,14 +1101,14 @@ static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
                        *(rp++) = cpu_to_le32(0); /* bits 63-32 */
                        todo -= (sg_dma_len(sg)-offset);
                        offset = 0;
-                       sg++;
+                       sg = sg_next(sg);
                        while (todo > sg_dma_len(sg)) {
                                *(rp++) = cpu_to_le32(RISC_WRITE|
                                                    sg_dma_len(sg));
                                *(rp++) = cpu_to_le32(sg_dma_address(sg));
                                *(rp++) = cpu_to_le32(0); /* bits 63-32 */
                                todo -= sg_dma_len(sg);
-                               sg++;
+                               sg = sg_next(sg);
                        }
                        *(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
                        *(rp++) = cpu_to_le32(sg_dma_address(sg));
index 172583d736fef31c30ab7338405e3d221d17211b..8cbe6b49f4c238de365b0a231eef8f4dff2425ff 100644 (file)
@@ -105,11 +105,8 @@ static irqreturn_t solo_isr(int irq, void *data)
        if (!status)
                return IRQ_NONE;
 
-       if (status & ~solo_dev->irq_mask) {
-               solo_reg_write(solo_dev, SOLO_IRQ_STAT,
-                              status & ~solo_dev->irq_mask);
-               status &= solo_dev->irq_mask;
-       }
+       /* Acknowledge all interrupts immediately */
+       solo_reg_write(solo_dev, SOLO_IRQ_STAT, status);
 
        if (status & SOLO_IRQ_PCI_ERR)
                solo_p2m_error_isr(solo_dev);
@@ -132,9 +129,6 @@ static irqreturn_t solo_isr(int irq, void *data)
        if (status & SOLO_IRQ_G723)
                solo_g723_isr(solo_dev);
 
-       /* Clear all interrupts handled */
-       solo_reg_write(solo_dev, SOLO_IRQ_STAT, status);
-
        return IRQ_HANDLED;
 }
 
index f1f098e22f7ea9f19b458223f226d5e73077bb2d..d16bc67af732251998fb280b86d887835cf39e1b 100644 (file)
@@ -259,8 +259,8 @@ again:
                        case 32:
                                if ((scancode & RC6_6A_LCC_MASK) == RC6_6A_MCE_CC) {
                                        protocol = RC_TYPE_RC6_MCE;
-                                       scancode &= ~RC6_6A_MCE_TOGGLE_MASK;
                                        toggle = !!(scancode & RC6_6A_MCE_TOGGLE_MASK);
+                                       scancode &= ~RC6_6A_MCE_TOGGLE_MASK;
                                } else {
                                        protocol = RC_BIT_RC6_6A_32;
                                        toggle = 0;
index ccc00099b26144e4bd1c5b99cbfccbf266a50ad7..1c0dbf428a3af83c4460e51615232fd96a2180ab 100644 (file)
@@ -632,7 +632,7 @@ static void s2255_fillbuff(struct s2255_vc *vc,
                        break;
                case V4L2_PIX_FMT_JPEG:
                case V4L2_PIX_FMT_MJPEG:
-                       buf->vb.v4l2_buf.length = jpgsize;
+                       vb2_set_plane_payload(&buf->vb, 0, jpgsize);
                        memcpy(vbuf, tmpbuf, jpgsize);
                        break;
                case V4L2_PIX_FMT_YUV422P:
index c13d83e15ace440fc624ff6913d617fdabd2699f..45f09a66e6c96662febe3015fad41b4e5e6251bf 100644 (file)
@@ -225,7 +225,12 @@ static int bond_changelink(struct net_device *bond_dev,
 
                bond_option_arp_ip_targets_clear(bond);
                nla_for_each_nested(attr, data[IFLA_BOND_ARP_IP_TARGET], rem) {
-                       __be32 target = nla_get_be32(attr);
+                       __be32 target;
+
+                       if (nla_len(attr) < sizeof(target))
+                               return -EINVAL;
+
+                       target = nla_get_be32(attr);
 
                        bond_opt_initval(&newval, (__force u64)target);
                        err = __bond_opt_set(bond, BOND_OPT_ARP_TARGETS,
index 8520d5529df872fad60a377231f5154fcf91a4e3..279873cb6e3ac33b196d4689daeefdb7c1bf87f5 100644 (file)
@@ -2442,9 +2442,13 @@ static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
                     SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
                     SUPPORTED_10000baseKX4_Full;
        else if (type == FW_PORT_TYPE_FIBER_XFI ||
-                type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
+                type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP) {
                v |= SUPPORTED_FIBRE;
-       else if (type == FW_PORT_TYPE_BP40_BA)
+               if (caps & FW_PORT_CAP_SPEED_1G)
+                       v |= SUPPORTED_1000baseT_Full;
+               if (caps & FW_PORT_CAP_SPEED_10G)
+                       v |= SUPPORTED_10000baseT_Full;
+       } else if (type == FW_PORT_TYPE_BP40_BA)
                v |= SUPPORTED_40000baseSR4_Full;
 
        if (caps & FW_PORT_CAP_ANEG)
index 60e9c2cd051e98bd9a1466f32e7344c52f241572..b5db6b3f939fc00884f313829cb9c1ffff118981 100644 (file)
@@ -917,21 +917,13 @@ static int sh_eth_reset(struct net_device *ndev)
        return ret;
 }
 
-#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
 static void sh_eth_set_receive_align(struct sk_buff *skb)
 {
-       int reserve;
+       uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1);
 
-       reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
        if (reserve)
-               skb_reserve(skb, reserve);
+               skb_reserve(skb, SH_ETH_RX_ALIGN - reserve);
 }
-#else
-static void sh_eth_set_receive_align(struct sk_buff *skb)
-{
-       skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
-}
-#endif
 
 
 /* CPU <-> EDMAC endian convert */
@@ -1119,6 +1111,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
        struct sh_eth_txdesc *txdesc = NULL;
        int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
        int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
+       int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
 
        mdp->cur_rx = 0;
        mdp->cur_tx = 0;
@@ -1131,21 +1124,21 @@ static void sh_eth_ring_format(struct net_device *ndev)
        for (i = 0; i < mdp->num_rx_ring; i++) {
                /* skb */
                mdp->rx_skbuff[i] = NULL;
-               skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
+               skb = netdev_alloc_skb(ndev, skbuff_size);
                mdp->rx_skbuff[i] = skb;
                if (skb == NULL)
                        break;
-               dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
-                              DMA_FROM_DEVICE);
                sh_eth_set_receive_align(skb);
 
                /* RX descriptor */
                rxdesc = &mdp->rx_ring[i];
+               /* The size of the buffer is a multiple of 16 bytes. */
+               rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
+               dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length,
+                              DMA_FROM_DEVICE);
                rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
                rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
 
-               /* The size of the buffer is 16 byte boundary. */
-               rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
                /* Rx descriptor address set */
                if (i == 0) {
                        sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
@@ -1397,6 +1390,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
        struct sk_buff *skb;
        u16 pkt_len = 0;
        u32 desc_status;
+       int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
 
        rxdesc = &mdp->rx_ring[entry];
        while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
@@ -1448,7 +1442,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
                        if (mdp->cd->rpadir)
                                skb_reserve(skb, NET_IP_ALIGN);
                        dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr,
-                                               mdp->rx_buf_sz,
+                                               ALIGN(mdp->rx_buf_sz, 16),
                                                DMA_FROM_DEVICE);
                        skb_put(skb, pkt_len);
                        skb->protocol = eth_type_trans(skb, ndev);
@@ -1468,13 +1462,13 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
                rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
 
                if (mdp->rx_skbuff[entry] == NULL) {
-                       skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
+                       skb = netdev_alloc_skb(ndev, skbuff_size);
                        mdp->rx_skbuff[entry] = skb;
                        if (skb == NULL)
                                break;  /* Better luck next round. */
-                       dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
-                                      DMA_FROM_DEVICE);
                        sh_eth_set_receive_align(skb);
+                       dma_map_single(&ndev->dev, skb->data,
+                                      rxdesc->buffer_length, DMA_FROM_DEVICE);
 
                        skb_checksum_none_assert(skb);
                        rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
@@ -2042,6 +2036,8 @@ static int sh_eth_open(struct net_device *ndev)
        if (ret)
                goto out_free_irq;
 
+       mdp->is_opened = 1;
+
        return ret;
 
 out_free_irq:
@@ -2131,6 +2127,36 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        return NETDEV_TX_OK;
 }
 
+static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
+{
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+
+       if (sh_eth_is_rz_fast_ether(mdp))
+               return &ndev->stats;
+
+       if (!mdp->is_opened)
+               return &ndev->stats;
+
+       ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
+       sh_eth_write(ndev, 0, TROCR);   /* (write clear) */
+       ndev->stats.collisions += sh_eth_read(ndev, CDCR);
+       sh_eth_write(ndev, 0, CDCR);    /* (write clear) */
+       ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
+       sh_eth_write(ndev, 0, LCCR);    /* (write clear) */
+
+       if (sh_eth_is_gether(mdp)) {
+               ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
+               sh_eth_write(ndev, 0, CERCR);   /* (write clear) */
+               ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
+               sh_eth_write(ndev, 0, CEECR);   /* (write clear) */
+       } else {
+               ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
+               sh_eth_write(ndev, 0, CNDCR);   /* (write clear) */
+       }
+
+       return &ndev->stats;
+}
+
 /* device close function */
 static int sh_eth_close(struct net_device *ndev)
 {
@@ -2145,6 +2171,7 @@ static int sh_eth_close(struct net_device *ndev)
        sh_eth_write(ndev, 0, EDTRR);
        sh_eth_write(ndev, 0, EDRRR);
 
+       sh_eth_get_stats(ndev);
        /* PHY Disconnect */
        if (mdp->phydev) {
                phy_stop(mdp->phydev);
@@ -2163,36 +2190,9 @@ static int sh_eth_close(struct net_device *ndev)
 
        pm_runtime_put_sync(&mdp->pdev->dev);
 
-       return 0;
-}
-
-static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
-{
-       struct sh_eth_private *mdp = netdev_priv(ndev);
-
-       if (sh_eth_is_rz_fast_ether(mdp))
-               return &ndev->stats;
+       mdp->is_opened = 0;
 
-       pm_runtime_get_sync(&mdp->pdev->dev);
-
-       ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
-       sh_eth_write(ndev, 0, TROCR);   /* (write clear) */
-       ndev->stats.collisions += sh_eth_read(ndev, CDCR);
-       sh_eth_write(ndev, 0, CDCR);    /* (write clear) */
-       ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
-       sh_eth_write(ndev, 0, LCCR);    /* (write clear) */
-       if (sh_eth_is_gether(mdp)) {
-               ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
-               sh_eth_write(ndev, 0, CERCR);   /* (write clear) */
-               ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
-               sh_eth_write(ndev, 0, CEECR);   /* (write clear) */
-       } else {
-               ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
-               sh_eth_write(ndev, 0, CNDCR);   /* (write clear) */
-       }
-       pm_runtime_put_sync(&mdp->pdev->dev);
-
-       return &ndev->stats;
+       return 0;
 }
 
 /* ioctl to device function */
index b37c427144ee0a1010794ff2beff42bc298fd1e9..22301bf9c21daeb925d75aa7ce5c7a588d977e11 100644 (file)
@@ -162,9 +162,9 @@ enum {
 
 /* Driver's parameters */
 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
-#define SH4_SKB_RX_ALIGN       32
+#define SH_ETH_RX_ALIGN                32
 #else
-#define SH2_SH3_SKB_RX_ALIGN   2
+#define SH_ETH_RX_ALIGN                2
 #endif
 
 /* Register's bits
@@ -522,6 +522,7 @@ struct sh_eth_private {
 
        unsigned no_ether_link:1;
        unsigned ether_link_active_low:1;
+       unsigned is_opened:1;
 };
 
 static inline void sh_eth_soft_swap(char *src, int len)
index 5b0da398621668402f06c64845fe44b9938d69ff..58a1a0a423d494e2ce4fcc0861a02ff97e9077dc 100644 (file)
@@ -265,6 +265,15 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
 
        plat_dat = dev_get_platdata(&pdev->dev);
 
+       if (!plat_dat)
+               plat_dat = devm_kzalloc(&pdev->dev,
+                                       sizeof(struct plat_stmmacenet_data),
+                                       GFP_KERNEL);
+       if (!plat_dat) {
+               pr_err("%s: ERROR: no memory", __func__);
+               return  -ENOMEM;
+       }
+
        /* Set default value for multicast hash bins */
        plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
 
@@ -272,15 +281,6 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
        plat_dat->unicast_filter_entries = 1;
 
        if (pdev->dev.of_node) {
-               if (!plat_dat)
-                       plat_dat = devm_kzalloc(&pdev->dev,
-                                       sizeof(struct plat_stmmacenet_data),
-                                       GFP_KERNEL);
-               if (!plat_dat) {
-                       pr_err("%s: ERROR: no memory", __func__);
-                       return  -ENOMEM;
-               }
-
                ret = stmmac_probe_config_dt(pdev, plat_dat, &mac);
                if (ret) {
                        pr_err("%s: main dt probe failed", __func__);
index cca871346a0ff5cfe22ffbcdce0f8c973cf59214..ece8d1804d13606c71b8f39e90851cb8b82de8ac 100644 (file)
@@ -496,9 +496,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue,
                len = skb_frag_size(frag);
                offset = frag->page_offset;
 
-               /* Data must not cross a page boundary. */
-               BUG_ON(len + offset > PAGE_SIZE<<compound_order(page));
-
                /* Skip unused frames from start of page */
                page += offset >> PAGE_SHIFT;
                offset &= ~PAGE_MASK;
@@ -506,8 +503,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue,
                while (len > 0) {
                        unsigned long bytes;
 
-                       BUG_ON(offset >= PAGE_SIZE);
-
                        bytes = PAGE_SIZE - offset;
                        if (bytes > len)
                                bytes = len;
index 30e97bcc4f88293ff902df937446d0983db6d4f3..d134710de96dabcf873ed8ea135611784c60b1d0 100644 (file)
@@ -964,8 +964,6 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
 int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
                                        phys_addr_t size, bool nomap)
 {
-       if (memblock_is_region_reserved(base, size))
-               return -EBUSY;
        if (nomap)
                return memblock_remove(base, size);
        return memblock_reserve(base, size);
index 3d43874319bebb13999889522f8d85e61b7251a2..19bb19c7db4a77eb649ddc3c9fc77b95a1f7c479 100644 (file)
@@ -276,6 +276,7 @@ struct tegra_pcie {
 
        struct resource all;
        struct resource io;
+       struct resource pio;
        struct resource mem;
        struct resource prefetch;
        struct resource busn;
@@ -658,7 +659,6 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
 {
        struct tegra_pcie *pcie = sys_to_pcie(sys);
        int err;
-       phys_addr_t io_start;
 
        err = devm_request_resource(pcie->dev, &pcie->all, &pcie->mem);
        if (err < 0)
@@ -668,14 +668,12 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
        if (err)
                return err;
 
-       io_start = pci_pio_to_address(pcie->io.start);
-
        pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
        pci_add_resource_offset(&sys->resources, &pcie->prefetch,
                                sys->mem_offset);
        pci_add_resource(&sys->resources, &pcie->busn);
 
-       pci_ioremap_io(nr * SZ_64K, io_start);
+       pci_ioremap_io(pcie->pio.start, pcie->io.start);
 
        return 1;
 }
@@ -786,7 +784,6 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg)
 static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
 {
        u32 fpci_bar, size, axi_address;
-       phys_addr_t io_start = pci_pio_to_address(pcie->io.start);
 
        /* Bar 0: type 1 extended configuration space */
        fpci_bar = 0xfe100000;
@@ -799,7 +796,7 @@ static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
        /* Bar 1: downstream IO bar */
        fpci_bar = 0xfdfc0000;
        size = resource_size(&pcie->io);
-       axi_address = io_start;
+       axi_address = pcie->io.start;
        afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
        afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
        afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
@@ -1690,8 +1687,23 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
 
                switch (res.flags & IORESOURCE_TYPE_BITS) {
                case IORESOURCE_IO:
-                       memcpy(&pcie->io, &res, sizeof(res));
-                       pcie->io.name = np->full_name;
+                       memcpy(&pcie->pio, &res, sizeof(res));
+                       pcie->pio.name = np->full_name;
+
+                       /*
+                        * The Tegra PCIe host bridge uses this to program the
+                        * mapping of the I/O space to the physical address,
+                        * so we override the .start and .end fields here that
+                        * of_pci_range_to_resource() converted to I/O space.
+                        * We also set the IORESOURCE_MEM type to clarify that
+                        * the resource is in the physical memory space.
+                        */
+                       pcie->io.start = range.cpu_addr;
+                       pcie->io.end = range.cpu_addr + range.size - 1;
+                       pcie->io.flags = IORESOURCE_MEM;
+                       pcie->io.name = "I/O";
+
+                       memcpy(&res, &pcie->io, sizeof(res));
                        break;
 
                case IORESOURCE_MEM:
index 8532c3e2aea7d6455c1548e1a0cba6491ae3a5da..1626dc66e7636837a340a6faaf762df74c25df54 100644 (file)
@@ -161,7 +161,7 @@ static const struct s3c2410_wdt_variant drv_data_exynos5420 = {
 static const struct s3c2410_wdt_variant drv_data_exynos7 = {
        .disable_reg = EXYNOS5_WDT_DISABLE_REG_OFFSET,
        .mask_reset_reg = EXYNOS5_WDT_MASK_RESET_REG_OFFSET,
-       .mask_bit = 0,
+       .mask_bit = 23,
        .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
        .rst_stat_bit = 23,     /* A57 WDTRESET */
        .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT,
index 6df8d3d885e5a56374dfeb61c90a7b6d6e148e15..b8b92c2f96834baa310a20b007557f1b9e69ae91 100644 (file)
@@ -736,7 +736,12 @@ static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
        }
 
        alias = d_find_alias(inode);
-       if (alias && !vfat_d_anon_disconn(alias)) {
+       /*
+        * Checking "alias->d_parent == dentry->d_parent" to make sure
+        * FS is not corrupted (especially double linked dir).
+        */
+       if (alias && alias->d_parent == dentry->d_parent &&
+           !vfat_d_anon_disconn(alias)) {
                /*
                 * This inode has non anonymous-DCACHE_DISCONNECTED
                 * dentry. This means, the user did ->lookup() by an
@@ -755,12 +760,9 @@ static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
 
 out:
        mutex_unlock(&MSDOS_SB(sb)->s_lock);
-       dentry->d_time = dentry->d_parent->d_inode->i_version;
-       dentry = d_splice_alias(inode, dentry);
-       if (dentry)
-               dentry->d_time = dentry->d_parent->d_inode->i_version;
-       return dentry;
-
+       if (!inode)
+               dentry->d_time = dir->i_version;
+       return d_splice_alias(inode, dentry);
 error:
        mutex_unlock(&MSDOS_SB(sb)->s_lock);
        return ERR_PTR(err);
@@ -793,7 +795,6 @@ static int vfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
        inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
        /* timestamp is already written, so mark_inode_dirty() is unneeded. */
 
-       dentry->d_time = dentry->d_parent->d_inode->i_version;
        d_instantiate(dentry, inode);
 out:
        mutex_unlock(&MSDOS_SB(sb)->s_lock);
@@ -824,6 +825,7 @@ static int vfat_rmdir(struct inode *dir, struct dentry *dentry)
        clear_nlink(inode);
        inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
        fat_detach(inode);
+       dentry->d_time = dir->i_version;
 out:
        mutex_unlock(&MSDOS_SB(sb)->s_lock);
 
@@ -849,6 +851,7 @@ static int vfat_unlink(struct inode *dir, struct dentry *dentry)
        clear_nlink(inode);
        inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
        fat_detach(inode);
+       dentry->d_time = dir->i_version;
 out:
        mutex_unlock(&MSDOS_SB(sb)->s_lock);
 
@@ -889,7 +892,6 @@ static int vfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
        inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
        /* timestamp is already written, so mark_inode_dirty() is unneeded. */
 
-       dentry->d_time = dentry->d_parent->d_inode->i_version;
        d_instantiate(dentry, inode);
 
        mutex_unlock(&MSDOS_SB(sb)->s_lock);
index e4dc74713a4328eda4738823f51e0c6070937e0a..1df94fabe4eba015bba7e6681e5c17638a27d3df 100644 (file)
@@ -1853,13 +1853,12 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
                                journal->j_chksum_driver = NULL;
                                return 0;
                        }
-               }
 
-               /* Precompute checksum seed for all metadata */
-               if (jbd2_journal_has_csum_v2or3(journal))
+                       /* Precompute checksum seed for all metadata */
                        journal->j_csum_seed = jbd2_chksum(journal, ~0,
                                                           sb->s_uuid,
                                                           sizeof(sb->s_uuid));
+               }
        }
 
        /* If enabling v1 checksums, downgrade superblock */
index dd2c16e4333312194151cf47b946507871d2027d..b86329813ad3a3820955e836956be591fc1f7a0d 100644 (file)
@@ -137,6 +137,14 @@ struct drm_display_info {
        u8 cea_rev;
 };
 
+/* data corresponds to displayid vend/prod/serial */
+struct drm_tile_group {
+       struct kref refcount;
+       struct drm_device *dev;
+       int id;
+       u8 group_data[8];
+};
+
 struct drm_framebuffer_funcs {
        /* note: use drm_framebuffer_remove() */
        void (*destroy)(struct drm_framebuffer *framebuffer);
@@ -599,6 +607,15 @@ struct drm_encoder {
  * @bad_edid_counter: track sinks that give us an EDID with invalid checksum
  * @debugfs_entry: debugfs directory for this connector
  * @state: current atomic state for this connector
+ * @has_tile: is this connector connected to a tiled monitor
+ * @tile_group: tile group for the connected monitor
+ * @tile_is_single_monitor: whether the tile is one monitor housing
+ * @num_h_tile: number of horizontal tiles in the tile group
+ * @num_v_tile: number of vertical tiles in the tile group
+ * @tile_h_loc: horizontal location of this tile
+ * @tile_v_loc: vertical location of this tile
+ * @tile_h_size: horizontal size of this tile.
+ * @tile_v_size: vertical size of this tile.
  *
  * Each connector may be connected to one or more CRTCs, or may be clonable by
  * another connector if they can share a CRTC.  Each connector also has a specific
@@ -634,6 +651,8 @@ struct drm_connector {
 
        struct drm_property_blob *path_blob_ptr;
 
+       struct drm_property_blob *tile_blob_ptr;
+
        uint8_t polled; /* DRM_CONNECTOR_POLL_* */
 
        /* requested DPMS state */
@@ -661,6 +680,15 @@ struct drm_connector {
        struct dentry *debugfs_entry;
 
        struct drm_connector_state *state;
+
+       /* DisplayID bits */
+       bool has_tile;
+       struct drm_tile_group *tile_group;
+       bool tile_is_single_monitor;
+
+       uint8_t num_h_tile, num_v_tile;
+       uint8_t tile_h_loc, tile_v_loc;
+       uint16_t tile_h_size, tile_v_size;
 };
 
 /**
@@ -978,6 +1006,7 @@ struct drm_mode_config {
        struct drm_modeset_acquire_ctx *acquire_ctx; /* for legacy _lock_all() / _unlock_all() */
        struct mutex idr_mutex; /* for IDR management */
        struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
+       struct idr tile_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
        /* this is limited to one for now */
 
        struct mutex fb_lock; /* proctects global and per-file fb lists */
@@ -1021,6 +1050,7 @@ struct drm_mode_config {
        struct drm_property *edid_property;
        struct drm_property *dpms_property;
        struct drm_property *path_property;
+       struct drm_property *tile_property;
        struct drm_property *plane_type_property;
        struct drm_property *rotation_property;
 
@@ -1190,6 +1220,7 @@ extern void drm_mode_config_cleanup(struct drm_device *dev);
 
 extern int drm_mode_connector_set_path_property(struct drm_connector *connector,
                                                const char *path);
+int drm_mode_connector_set_tile_property(struct drm_connector *connector);
 extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
                                                   const struct edid *edid);
 
@@ -1326,6 +1357,13 @@ extern void drm_set_preferred_mode(struct drm_connector *connector,
 extern int drm_edid_header_is_valid(const u8 *raw_edid);
 extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
 extern bool drm_edid_is_valid(struct edid *edid);
+
+extern struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
+                                                        char topology[8]);
+extern struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
+                                              char topology[8]);
+extern void drm_mode_put_tile_group(struct drm_device *dev,
+                                  struct drm_tile_group *tg);
 struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
                                           int hsize, int vsize, int fresh,
                                           bool rb);
diff --git a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h
new file mode 100644 (file)
index 0000000..623b4e9
--- /dev/null
@@ -0,0 +1,76 @@
+/*
+ * Copyright © 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef DRM_DISPLAYID_H
+#define DRM_DISPLAYID_H
+
+#define DATA_BLOCK_PRODUCT_ID 0x00
+#define DATA_BLOCK_DISPLAY_PARAMETERS 0x01
+#define DATA_BLOCK_COLOR_CHARACTERISTICS 0x02
+#define DATA_BLOCK_TYPE_1_DETAILED_TIMING 0x03
+#define DATA_BLOCK_TYPE_2_DETAILED_TIMING 0x04
+#define DATA_BLOCK_TYPE_3_SHORT_TIMING 0x05
+#define DATA_BLOCK_TYPE_4_DMT_TIMING 0x06
+#define DATA_BLOCK_VESA_TIMING 0x07
+#define DATA_BLOCK_CEA_TIMING 0x08
+#define DATA_BLOCK_VIDEO_TIMING_RANGE 0x09
+#define DATA_BLOCK_PRODUCT_SERIAL_NUMBER 0x0a
+#define DATA_BLOCK_GP_ASCII_STRING 0x0b
+#define DATA_BLOCK_DISPLAY_DEVICE_DATA 0x0c
+#define DATA_BLOCK_INTERFACE_POWER_SEQUENCING 0x0d
+#define DATA_BLOCK_TRANSFER_CHARACTERISTICS 0x0e
+#define DATA_BLOCK_DISPLAY_INTERFACE 0x0f
+#define DATA_BLOCK_STEREO_DISPLAY_INTERFACE 0x10
+#define DATA_BLOCK_TILED_DISPLAY 0x12
+
+#define DATA_BLOCK_VENDOR_SPECIFIC 0x7f
+
+#define PRODUCT_TYPE_EXTENSION 0
+#define PRODUCT_TYPE_TEST 1
+#define PRODUCT_TYPE_PANEL 2
+#define PRODUCT_TYPE_MONITOR 3
+#define PRODUCT_TYPE_TV 4
+#define PRODUCT_TYPE_REPEATER 5
+#define PRODUCT_TYPE_DIRECT_DRIVE 6
+
+struct displayid_hdr {
+       u8 rev;
+       u8 bytes;
+       u8 prod_id;
+       u8 ext_count;
+} __packed;
+
+struct displayid_block {
+       u8 tag;
+       u8 rev;
+       u8 num_bytes;
+} __packed;
+
+struct displayid_tiled_block {
+       struct displayid_block base;
+       u8 tile_cap;
+       u8 topo[3];
+       u8 tile_size[4];
+       u8 tile_pixel_bezel[5];
+       u8 topology_id[8];
+} __packed;
+
+#endif
index cec6383bbdb8afbdd092ff0a869446070a37adfe..00c1da9272456e8e9bdec207affa286028664421 100644 (file)
@@ -92,6 +92,8 @@ struct drm_dp_mst_port {
        struct drm_dp_vcpi vcpi;
        struct drm_connector *connector;
        struct drm_dp_mst_topology_mgr *mgr;
+
+       struct edid *cached_edid; /* for DP logical ports - make tiling work */
 };
 
 /**
@@ -474,7 +476,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled);
 
 
-enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
+enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
 
 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
 
index d59240ffb1f7cc9f14b41a23b3cd9ba099a959c8..87d85e81d3a7e076a36e35eb65b94ca7ecbc8751 100644 (file)
 
 #define EDID_LENGTH 128
 #define DDC_ADDR 0x50
+#define DDC_ADDR2 0x52 /* E-DDC 1.2 - where DisplayID can hide */
 
 #define CEA_EXT            0x02
 #define VTB_EXT            0x10
 #define DI_EXT     0x40
 #define LS_EXT     0x50
 #define MI_EXT     0x60
+#define DISPLAYID_EXT 0x70
 
 struct est_timings {
        u8 t1;
index f4ad254e3488ea4ebdcf98410757f7e5d01e7cd9..b597068103aa9d8d1bf66841f76858e904f72c34 100644 (file)
@@ -34,9 +34,14 @@ struct drm_fb_helper;
 
 #include <linux/kgdb.h>
 
+struct drm_fb_offset {
+       int x, y;
+};
+
 struct drm_fb_helper_crtc {
        struct drm_mode_set mode_set;
        struct drm_display_mode *desired_mode;
+       int x, y;
 };
 
 struct drm_fb_helper_surface_size {
@@ -72,6 +77,7 @@ struct drm_fb_helper_funcs {
        bool (*initial_config)(struct drm_fb_helper *fb_helper,
                               struct drm_fb_helper_crtc **crtcs,
                               struct drm_display_mode **modes,
+                              struct drm_fb_offset *offsets,
                               bool *enabled, int width, int height);
 };
 
index 460441714413c620b8313ca9390778e089257651..b620c317c7720a27127ca3fd3c54e67e374652ce 100644 (file)
@@ -68,6 +68,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
  *           non-blocking reserves should be tried.
  * @list:    thread private list of ttm_validate_buffer structs.
  * @intr:    should the wait be interruptible
+ * @dups:    [out] optional list of duplicates.
  *
  * Tries to reserve bos pointed to by the list entries for validation.
  * If the function returns 0, all buffers are marked as "unfenced",
@@ -83,6 +84,11 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
  * calling process receives a signal while waiting. In that case, no
  * buffers on the list will be reserved upon return.
  *
+ * If dups is non NULL all buffers already reserved by the current thread
+ * (e.g. duplicates) are added to this list, otherwise -EALREADY is returned
+ * on the first already reserved buffer and all buffers from the list are
+ * unreserved again.
+ *
  * Buffers reserved by this function should be unreserved by
  * a call to either ttm_eu_backoff_reservation() or
  * ttm_eu_fence_buffer_objects() when command submission is complete or
@@ -90,7 +96,8 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
  */
 
 extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
-                                 struct list_head *list, bool intr);
+                                 struct list_head *list, bool intr,
+                                 struct list_head *dups);
 
 /**
  * function ttm_eu_fence_buffer_objects.
index 4c94f31a8c99b68e3ada83ed75b3726de3043a08..8523f9bb72f2d0039963078d765084ee5dfdf155 100644 (file)
@@ -427,7 +427,7 @@ header-y += virtio_net.h
 header-y += virtio_pci.h
 header-y += virtio_ring.h
 header-y += virtio_rng.h
-header=y += vm_sockets.h
+header-y += vm_sockets.h
 header-y += vt.h
 header-y += wait.h
 header-y += wanrouter.h
index 454f6c6020a8d98dccb167e46d3a5225d5f4ce2d..53c3310f41c6867fd4b5f3f0493a150184b8c617 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -507,13 +507,6 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
                return retval;
        }
 
-       id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
-       if (id < 0) {
-               ipc_rcu_putref(sma, sem_rcu_free);
-               return id;
-       }
-       ns->used_sems += nsems;
-
        sma->sem_base = (struct sem *) &sma[1];
 
        for (i = 0; i < nsems; i++) {
@@ -528,6 +521,14 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
        INIT_LIST_HEAD(&sma->list_id);
        sma->sem_nsems = nsems;
        sma->sem_ctime = get_seconds();
+
+       id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
+       if (id < 0) {
+               ipc_rcu_putref(sma, sem_rcu_free);
+               return id;
+       }
+       ns->used_sems += nsems;
+
        sem_unlock(sma, -1);
        rcu_read_unlock();
 
index 24beb9bb4c3e228ac17e8b931f37c44091987d18..89e7283015a61ae1806566c3bea08b87c6a3a35d 100644 (file)
@@ -2874,10 +2874,14 @@ asmlinkage __visible void __sched schedule_user(void)
         * or we have been woken up remotely but the IPI has not yet arrived,
         * we haven't yet exited the RCU idle mode. Do it here manually until
         * we find a better solution.
+        *
+        * NB: There are buggy callers of this function.  Ideally we
+        * should warn if prev_state != IN_USER, but that will trigger
+        * too frequently to make sense yet.
         */
-       user_exit();
+       enum ctx_state prev_state = exception_enter();
        schedule();
-       user_enter();
+       exception_exit(prev_state);
 }
 #endif
 
index cce4dd68c40da211948177f6903917d4ac81b176..2e65d206b01c13d3ad02a57c3d0842b89c8637bd 100644 (file)
@@ -598,6 +598,7 @@ struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
 
        return pool;
 }
+EXPORT_SYMBOL(devm_gen_pool_create);
 
 /**
  * dev_get_gen_pool - Obtain the gen_pool (if any) for a device
index 09225796991a83a9281194d855719021b8b18282..5e256271b47b02b1a0265ad6a89b2bccf5c40002 100644 (file)
@@ -28,7 +28,7 @@ void show_mem(unsigned int filter)
                                continue;
 
                        total += zone->present_pages;
-                       reserved = zone->present_pages - zone->managed_pages;
+                       reserved += zone->present_pages - zone->managed_pages;
 
                        if (is_highmem_idx(zoneid))
                                highmem += zone->present_pages;
index c30eec536f03fb7148e3c7a08538f6a2c3571857..f2a3571c6e22573867cf6e4ba2d481a663a3d04e 100644 (file)
@@ -244,8 +244,10 @@ int __frontswap_store(struct page *page)
                  the (older) page from frontswap
                 */
                inc_frontswap_failed_stores();
-               if (dup)
+               if (dup) {
                        __frontswap_clear(sis, offset);
+                       frontswap_ops->invalidate_page(type, offset);
+               }
        }
        if (frontswap_writethrough_enabled)
                /* report failure so swap also writes to swap device */
index 655fd3d34bb0908e6e18d7b084342f6132f85521..d3cb2ef66ee21325e8d4230efe18d2f2fbef1314 100644 (file)
@@ -816,20 +816,20 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                if (!pte_file(pte)) {
                        swp_entry_t entry = pte_to_swp_entry(pte);
 
-                       if (swap_duplicate(entry) < 0)
-                               return entry.val;
-
-                       /* make sure dst_mm is on swapoff's mmlist. */
-                       if (unlikely(list_empty(&dst_mm->mmlist))) {
-                               spin_lock(&mmlist_lock);
-                               if (list_empty(&dst_mm->mmlist))
-                                       list_add(&dst_mm->mmlist,
-                                                &src_mm->mmlist);
-                               spin_unlock(&mmlist_lock);
-                       }
-                       if (likely(!non_swap_entry(entry)))
+                       if (likely(!non_swap_entry(entry))) {
+                               if (swap_duplicate(entry) < 0)
+                                       return entry.val;
+
+                               /* make sure dst_mm is on swapoff's mmlist. */
+                               if (unlikely(list_empty(&dst_mm->mmlist))) {
+                                       spin_lock(&mmlist_lock);
+                                       if (list_empty(&dst_mm->mmlist))
+                                               list_add(&dst_mm->mmlist,
+                                                        &src_mm->mmlist);
+                                       spin_unlock(&mmlist_lock);
+                               }
                                rss[MM_SWAPENTS]++;
-                       else if (is_migration_entry(entry)) {
+                       else if (is_migration_entry(entry)) {
                                page = migration_entry_to_page(entry);
 
                                if (PageAnon(page))
index 87e82b38453c2cbca83f1dd7ad472c02b6a73b77..ae919891a087e0d7f3a76c5b47f4f1d8326f6dfa 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -776,8 +776,11 @@ again:                     remove_next = 1 + (end > next->vm_end);
                 * shrinking vma had, to cover any anon pages imported.
                 */
                if (exporter && exporter->anon_vma && !importer->anon_vma) {
-                       if (anon_vma_clone(importer, exporter))
-                               return -ENOMEM;
+                       int error;
+
+                       error = anon_vma_clone(importer, exporter);
+                       if (error)
+                               return error;
                        importer->anon_vma = exporter->anon_vma;
                }
        }
@@ -2469,7 +2472,8 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
        if (err)
                goto out_free_vma;
 
-       if (anon_vma_clone(new, vma))
+       err = anon_vma_clone(new, vma);
+       if (err)
                goto out_free_mpol;
 
        if (new->vm_file)
index d3eb1e02d1c681fd21e1f1752b0327349b24accd..a2a1eab077b00968a0797087cf1ef3ac99e6a1e3 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -274,6 +274,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
 {
        struct anon_vma_chain *avc;
        struct anon_vma *anon_vma;
+       int error;
 
        /* Don't bother if the parent process has no anon_vma here. */
        if (!pvma->anon_vma)
@@ -283,8 +284,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
         * First, attach the new VMA to the parent VMA's anon_vmas,
         * so rmap can find non-COWed pages in child processes.
         */
-       if (anon_vma_clone(vma, pvma))
-               return -ENOMEM;
+       error = anon_vma_clone(vma, pvma);
+       if (error)
+               return error;
 
        /* Then add our own anon_vma. */
        anon_vma = anon_vma_alloc();
index eb2b2ea301309887972c148bb80aedf8a6731b9b..f34e053ec46e24bb364a847ac498ef61b5011b27 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3076,7 +3076,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
        void *obj;
        int x;
 
-       VM_BUG_ON(nodeid > num_online_nodes());
+       VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES);
        n = get_node(cachep, nodeid);
        BUG_ON(!n);
 
index d4042e75f7c7e7c7d498c4fcc33c90f1d1de2bff..c5afd573d7da79afc814225043319cc66120addf 100644 (file)
@@ -165,6 +165,7 @@ static void vmpressure_work_fn(struct work_struct *work)
        unsigned long scanned;
        unsigned long reclaimed;
 
+       spin_lock(&vmpr->sr_lock);
        /*
         * Several contexts might be calling vmpressure(), so it is
         * possible that the work was rescheduled again before the old
@@ -173,11 +174,12 @@ static void vmpressure_work_fn(struct work_struct *work)
         * here. No need for any locks here since we don't care if
         * vmpr->reclaimed is in sync.
         */
-       if (!vmpr->scanned)
+       scanned = vmpr->scanned;
+       if (!scanned) {
+               spin_unlock(&vmpr->sr_lock);
                return;
+       }
 
-       spin_lock(&vmpr->sr_lock);
-       scanned = vmpr->scanned;
        reclaimed = vmpr->reclaimed;
        vmpr->scanned = 0;
        vmpr->reclaimed = 0;
index b9b7dfaf202b9be668bf29153593aa2c7dba86ee..76321ea442c3e06c289e86184c5c4e513cbad7ff 100644 (file)
@@ -1498,6 +1498,7 @@ static int do_setlink(const struct sk_buff *skb,
                        goto errout;
                }
                if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
+                       put_net(net);
                        err = -EPERM;
                        goto errout;
                }
index b8960c4959a5e53635180054d27d788105ebf134..200e37867336a3c2903437e97f591fbc302e15b7 100644 (file)
@@ -117,6 +117,7 @@ struct keyring_search_context {
 #define KEYRING_SEARCH_NO_UPDATE_TIME  0x0004  /* Don't update times */
 #define KEYRING_SEARCH_NO_CHECK_PERM   0x0008  /* Don't check permissions */
 #define KEYRING_SEARCH_DETECT_TOO_DEEP 0x0010  /* Give an error on excessive depth */
+#define KEYRING_SEARCH_SKIP_EXPIRED    0x0020  /* Ignore expired keys (intention to replace) */
 
        int (*iterator)(const void *object, void *iterator_data);
 
index eff88a5f5d40da17611d381bcf4e219a90bcc972..4743d71e4aa6dd12f2456a5f00496c1222775c6a 100644 (file)
@@ -26,6 +26,8 @@
 #include <asm/uaccess.h>
 #include "internal.h"
 
+#define KEY_MAX_DESC_SIZE 4096
+
 static int key_get_type_from_user(char *type,
                                  const char __user *_type,
                                  unsigned len)
@@ -78,7 +80,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
 
        description = NULL;
        if (_description) {
-               description = strndup_user(_description, PAGE_SIZE);
+               description = strndup_user(_description, KEY_MAX_DESC_SIZE);
                if (IS_ERR(description)) {
                        ret = PTR_ERR(description);
                        goto error;
@@ -177,7 +179,7 @@ SYSCALL_DEFINE4(request_key, const char __user *, _type,
                goto error;
 
        /* pull the description into kernel space */
-       description = strndup_user(_description, PAGE_SIZE);
+       description = strndup_user(_description, KEY_MAX_DESC_SIZE);
        if (IS_ERR(description)) {
                ret = PTR_ERR(description);
                goto error;
@@ -287,7 +289,7 @@ long keyctl_join_session_keyring(const char __user *_name)
        /* fetch the name from userspace */
        name = NULL;
        if (_name) {
-               name = strndup_user(_name, PAGE_SIZE);
+               name = strndup_user(_name, KEY_MAX_DESC_SIZE);
                if (IS_ERR(name)) {
                        ret = PTR_ERR(name);
                        goto error;
@@ -562,8 +564,9 @@ long keyctl_describe_key(key_serial_t keyid,
 {
        struct key *key, *instkey;
        key_ref_t key_ref;
-       char *tmpbuf;
+       char *infobuf;
        long ret;
+       int desclen, infolen;
 
        key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW);
        if (IS_ERR(key_ref)) {
@@ -586,38 +589,31 @@ long keyctl_describe_key(key_serial_t keyid,
        }
 
 okay:
-       /* calculate how much description we're going to return */
-       ret = -ENOMEM;
-       tmpbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
-       if (!tmpbuf)
-               goto error2;
-
        key = key_ref_to_ptr(key_ref);
+       desclen = strlen(key->description);
 
-       ret = snprintf(tmpbuf, PAGE_SIZE - 1,
-                      "%s;%d;%d;%08x;%s",
-                      key->type->name,
-                      from_kuid_munged(current_user_ns(), key->uid),
-                      from_kgid_munged(current_user_ns(), key->gid),
-                      key->perm,
-                      key->description ?: "");
-
-       /* include a NUL char at the end of the data */
-       if (ret > PAGE_SIZE - 1)
-               ret = PAGE_SIZE - 1;
-       tmpbuf[ret] = 0;
-       ret++;
+       /* calculate how much information we're going to return */
+       ret = -ENOMEM;
+       infobuf = kasprintf(GFP_KERNEL,
+                           "%s;%d;%d;%08x;",
+                           key->type->name,
+                           from_kuid_munged(current_user_ns(), key->uid),
+                           from_kgid_munged(current_user_ns(), key->gid),
+                           key->perm);
+       if (!infobuf)
+               goto error2;
+       infolen = strlen(infobuf);
+       ret = infolen + desclen + 1;
 
        /* consider returning the data */
-       if (buffer && buflen > 0) {
-               if (buflen > ret)
-                       buflen = ret;
-
-               if (copy_to_user(buffer, tmpbuf, buflen) != 0)
+       if (buffer && buflen >= ret) {
+               if (copy_to_user(buffer, infobuf, infolen) != 0 ||
+                   copy_to_user(buffer + infolen, key->description,
+                                desclen + 1) != 0)
                        ret = -EFAULT;
        }
 
-       kfree(tmpbuf);
+       kfree(infobuf);
 error2:
        key_ref_put(key_ref);
 error:
@@ -649,7 +645,7 @@ long keyctl_keyring_search(key_serial_t ringid,
        if (ret < 0)
                goto error;
 
-       description = strndup_user(_description, PAGE_SIZE);
+       description = strndup_user(_description, KEY_MAX_DESC_SIZE);
        if (IS_ERR(description)) {
                ret = PTR_ERR(description);
                goto error;
index 8177010174f7b3d47773a43e48bf2b171b264c5f..e72548b5897ec237dd7463374871538c81a84fd7 100644 (file)
@@ -546,7 +546,8 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
                }
 
                if (key->expiry && ctx->now.tv_sec >= key->expiry) {
-                       ctx->result = ERR_PTR(-EKEYEXPIRED);
+                       if (!(ctx->flags & KEYRING_SEARCH_SKIP_EXPIRED))
+                               ctx->result = ERR_PTR(-EKEYEXPIRED);
                        kleave(" = %d [expire]", ctx->skipped_ret);
                        goto skipped;
                }
@@ -628,6 +629,10 @@ static bool search_nested_keyrings(struct key *keyring,
               ctx->index_key.type->name,
               ctx->index_key.description);
 
+#define STATE_CHECKS (KEYRING_SEARCH_NO_STATE_CHECK | KEYRING_SEARCH_DO_STATE_CHECK)
+       BUG_ON((ctx->flags & STATE_CHECKS) == 0 ||
+              (ctx->flags & STATE_CHECKS) == STATE_CHECKS);
+
        if (ctx->index_key.description)
                ctx->index_key.desc_len = strlen(ctx->index_key.description);
 
@@ -637,7 +642,6 @@ static bool search_nested_keyrings(struct key *keyring,
        if (ctx->match_data.lookup_type == KEYRING_SEARCH_LOOKUP_ITERATE ||
            keyring_compare_object(keyring, &ctx->index_key)) {
                ctx->skipped_ret = 2;
-               ctx->flags |= KEYRING_SEARCH_DO_STATE_CHECK;
                switch (ctx->iterator(keyring_key_to_ptr(keyring), ctx)) {
                case 1:
                        goto found;
@@ -649,8 +653,6 @@ static bool search_nested_keyrings(struct key *keyring,
        }
 
        ctx->skipped_ret = 0;
-       if (ctx->flags & KEYRING_SEARCH_NO_STATE_CHECK)
-               ctx->flags &= ~KEYRING_SEARCH_DO_STATE_CHECK;
 
        /* Start processing a new keyring */
 descend_to_keyring:
index bb4337c7ae1b3978fd5e36d692d8cacf27b89816..0c7aea4dea54d8d299edc09b38c9a8f7b5c82be8 100644 (file)
@@ -516,6 +516,8 @@ struct key *request_key_and_link(struct key_type *type,
                .match_data.cmp         = key_default_cmp,
                .match_data.raw_data    = description,
                .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+               .flags                  = (KEYRING_SEARCH_DO_STATE_CHECK |
+                                          KEYRING_SEARCH_SKIP_EXPIRED),
        };
        struct key *key;
        key_ref_t key_ref;
index 6639e2cb885322c6a43924496b2a68be25b9a5e6..5d672f7580dd5330516dd62f7d705cac46c319fb 100644 (file)
@@ -249,6 +249,7 @@ struct key *key_get_instantiation_authkey(key_serial_t target_id)
                .match_data.cmp         = key_default_cmp,
                .match_data.raw_data    = description,
                .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+               .flags                  = KEYRING_SEARCH_DO_STATE_CHECK,
        };
        struct key *authkey;
        key_ref_t authkey_ref;
index 14f16be3f3747a3c72f1ba2938d2a1ca3f8007b2..b118a5be18df7ea2a9bfeab4997c677e1ae176b5 100644 (file)
@@ -4790,6 +4790,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS_HSJACK),
        SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),